@lobehub/chat 1.79.3 → 1.79.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/package.json +3 -3
  4. package/src/app/(backend)/webapi/chat/[provider]/route.ts +1 -4
  5. package/src/app/(backend)/webapi/plugin/gateway/route.ts +1 -0
  6. package/src/app/(backend)/webapi/trace/route.ts +6 -1
  7. package/src/config/aiModels/xai.ts +79 -18
  8. package/src/const/trace.ts +2 -4
  9. package/src/database/models/__tests__/_util.ts +4 -2
  10. package/src/libs/agent-runtime/AgentRuntime.test.ts +11 -17
  11. package/src/libs/agent-runtime/helpers/index.ts +1 -0
  12. package/src/{utils/fetch/__tests__ → libs/agent-runtime/helpers}/parseToolCalls.test.ts +1 -2
  13. package/src/libs/agent-runtime/index.ts +1 -0
  14. package/src/libs/agent-runtime/types/chat.ts +41 -9
  15. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +4 -2
  16. package/src/libs/agent-runtime/utils/streams/anthropic.test.ts +4 -7
  17. package/src/libs/agent-runtime/utils/streams/bedrock/llama.test.ts +6 -14
  18. package/src/libs/agent-runtime/utils/streams/google-ai.test.ts +3 -6
  19. package/src/libs/agent-runtime/utils/streams/ollama.test.ts +3 -9
  20. package/src/libs/agent-runtime/utils/streams/openai.test.ts +5 -8
  21. package/src/libs/agent-runtime/utils/streams/protocol.ts +55 -10
  22. package/src/libs/agent-runtime/utils/streams/qwen.test.ts +3 -6
  23. package/src/libs/agent-runtime/utils/streams/spark.test.ts +63 -60
  24. package/src/libs/agent-runtime/utils/streams/vertex-ai.test.ts +3 -7
  25. package/src/libs/agent-runtime/zhipu/index.test.ts +2 -2
  26. package/src/server/modules/AgentRuntime/index.ts +4 -75
  27. package/src/server/modules/AgentRuntime/trace.ts +107 -0
  28. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +6 -0
  29. package/src/utils/fetch/fetchSSE.ts +1 -1
  30. /package/src/{utils/fetch → libs/agent-runtime/helpers}/parseToolCalls.ts +0 -0
package/CHANGELOG.md CHANGED
@@ -2,6 +2,56 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.79.5](https://github.com/lobehub/lobe-chat/compare/v1.79.4...v1.79.5)
6
+
7
+ <sup>Released on **2025-04-10**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Fix langfuse intergation.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **misc**: Fix langfuse intergation, closes [#7367](https://github.com/lobehub/lobe-chat/issues/7367) ([22b5236](https://github.com/lobehub/lobe-chat/commit/22b5236))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.79.4](https://github.com/lobehub/lobe-chat/compare/v1.79.3...v1.79.4)
31
+
32
+ <sup>Released on **2025-04-10**</sup>
33
+
34
+ #### 💄 Styles
35
+
36
+ - **misc**: Update Grok 3 models.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### Styles
44
+
45
+ - **misc**: Update Grok 3 models, closes [#7360](https://github.com/lobehub/lobe-chat/issues/7360) ([d2b9120](https://github.com/lobehub/lobe-chat/commit/d2b9120))
46
+
47
+ </details>
48
+
49
+ <div align="right">
50
+
51
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
52
+
53
+ </div>
54
+
5
55
  ### [Version 1.79.3](https://github.com/lobehub/lobe-chat/compare/v1.79.2...v1.79.3)
6
56
 
7
57
  <sup>Released on **2025-04-10**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,22 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "Fix langfuse intergation."
6
+ ]
7
+ },
8
+ "date": "2025-04-10",
9
+ "version": "1.79.5"
10
+ },
11
+ {
12
+ "children": {
13
+ "improvements": [
14
+ "Update Grok 3 models."
15
+ ]
16
+ },
17
+ "date": "2025-04-10",
18
+ "version": "1.79.4"
19
+ },
2
20
  {
3
21
  "children": {
4
22
  "fixes": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.79.3",
3
+ "version": "1.79.5",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -189,8 +189,8 @@
189
189
  "js-sha256": "^0.11.0",
190
190
  "jsonl-parse-stringify": "^1.0.3",
191
191
  "langchain": "^0.3.19",
192
- "langfuse": "3.29.1",
193
- "langfuse-core": "3.29.1",
192
+ "langfuse": "^3.37.1",
193
+ "langfuse-core": "^3.37.1",
194
194
  "lodash-es": "^4.17.21",
195
195
  "lucide-react": "^0.487.0",
196
196
  "mammoth": "^1.9.0",
@@ -33,10 +33,7 @@ export const POST = checkAuth(async (req: Request, { params, jwtPayload, createR
33
33
  let traceOptions = {};
34
34
  // If user enable trace
35
35
  if (tracePayload?.enabled) {
36
- traceOptions = createTraceOptions(data, {
37
- provider,
38
- trace: tracePayload,
39
- });
36
+ traceOptions = createTraceOptions(data, { provider, trace: tracePayload });
40
37
  }
41
38
 
42
39
  return await agentRuntime.chat(data, {
@@ -52,6 +52,7 @@ export const POST = async (req: Request) => {
52
52
  return createErrorResponse(result.error as ErrorType);
53
53
  }
54
54
 
55
+ // TODO: need to be replace by better telemetry system
55
56
  // add trace
56
57
  const tracePayload = getTracePayload(req);
57
58
  const traceClient = new TraceClient();
@@ -1,3 +1,5 @@
1
+ import { after } from 'next/server';
2
+
1
3
  import { TraceEventType } from '@/const/trace';
2
4
  import { TraceClient } from '@/libs/traces';
3
5
  import { TraceEventBasePayload, TraceEventPayloads } from '@/types/trace';
@@ -35,6 +37,9 @@ export const POST = async (req: Request) => {
35
37
  }
36
38
  }
37
39
 
38
- await traceClient.shutdownAsync();
40
+ after(async () => {
41
+ await traceClient.shutdownAsync();
42
+ });
43
+
39
44
  return new Response(undefined, { status: 201 });
40
45
  };
@@ -1,51 +1,68 @@
1
1
  import { AIChatModelCard } from '@/types/aiModel';
2
-
2
+ // https://docs.x.ai/docs/models
3
3
  const xaiChatModels: AIChatModelCard[] = [
4
4
  {
5
5
  abilities: {
6
6
  functionCall: true,
7
7
  },
8
8
  contextWindowTokens: 131_072,
9
- description: '拥有与 Grok 2 相当的性能,但具有更高的效率、速度和功能。',
10
- displayName: 'Grok Beta',
9
+ description: '旗舰级模型,擅长数据提取、编程和文本摘要等企业级应用,拥有金融、医疗、法律和科学等领域的深厚知识。',
10
+ displayName: 'Grok 3 Beta',
11
11
  enabled: true,
12
- id: 'grok-beta',
12
+ id: 'grok-3-beta',
13
13
  pricing: {
14
- input: 5,
14
+ input: 3,
15
15
  output: 15,
16
16
  },
17
+ releasedAt: '2025-04-03',
17
18
  type: 'chat',
18
19
  },
19
20
  {
20
21
  abilities: {
21
22
  functionCall: true,
22
- vision: true,
23
23
  },
24
- contextWindowTokens: 8192,
25
- description: '最新的图像理解模型,可以处理各种各样的视觉信息,包括文档、图表、截图和照片等。',
26
- displayName: 'Grok Vision Beta',
27
- enabled: true,
28
- id: 'grok-vision-beta',
24
+ contextWindowTokens: 131_072,
25
+ description: '旗舰级模型,擅长数据提取、编程和文本摘要等企业级应用,拥有金融、医疗、法律和科学等领域的深厚知识。',
26
+ displayName: 'Grok 3 Beta (Fast mode)',
27
+ id: 'grok-3-fast-beta',
29
28
  pricing: {
30
29
  input: 5,
31
- output: 15,
30
+ output: 25,
32
31
  },
32
+ releasedAt: '2025-04-03',
33
33
  type: 'chat',
34
34
  },
35
35
  {
36
36
  abilities: {
37
37
  functionCall: true,
38
+ reasoning: true,
38
39
  },
39
40
  contextWindowTokens: 131_072,
40
- description: '该模型在准确性、指令遵循和多语言能力方面有所改进。',
41
- displayName: 'Grok 2 1212',
41
+ description: '轻量级模型,回话前会先思考。运行快速、智能,适用于不需要深层领域知识的逻辑任务,并能获取原始的思维轨迹。',
42
+ displayName: 'Grok 3 Mini Beta',
42
43
  enabled: true,
43
- id: 'grok-2-1212',
44
+ id: 'grok-3-mini-beta',
44
45
  pricing: {
45
- input: 2,
46
- output: 10,
46
+ input: 0.3,
47
+ output: 0.5,
47
48
  },
48
- releasedAt: '2024-12-12',
49
+ releasedAt: '2025-04-03',
50
+ type: 'chat',
51
+ },
52
+ {
53
+ abilities: {
54
+ functionCall: true,
55
+ reasoning: true,
56
+ },
57
+ contextWindowTokens: 131_072,
58
+ description: '轻量级模型,回话前会先思考。运行快速、智能,适用于不需要深层领域知识的逻辑任务,并能获取原始的思维轨迹。',
59
+ displayName: 'Grok 3 Mini Beta (Fast mode)',
60
+ id: 'grok-3-mini-fast-beta',
61
+ pricing: {
62
+ input: 0.6,
63
+ output: 4,
64
+ },
65
+ releasedAt: '2025-04-03',
49
66
  type: 'chat',
50
67
  },
51
68
  {
@@ -65,6 +82,50 @@ const xaiChatModels: AIChatModelCard[] = [
65
82
  releasedAt: '2024-12-12',
66
83
  type: 'chat',
67
84
  },
85
+ {
86
+ abilities: {
87
+ functionCall: true,
88
+ },
89
+ contextWindowTokens: 131_072,
90
+ description: '该模型在准确性、指令遵循和多语言能力方面有所改进。',
91
+ displayName: 'Grok 2 1212',
92
+ id: 'grok-2-1212', // legacy
93
+ pricing: {
94
+ input: 2,
95
+ output: 10,
96
+ },
97
+ releasedAt: '2024-12-12',
98
+ type: 'chat',
99
+ },
100
+ {
101
+ abilities: {
102
+ functionCall: true,
103
+ },
104
+ contextWindowTokens: 131_072,
105
+ description: '拥有与 Grok 2 相当的性能,但具有更高的效率、速度和功能。',
106
+ displayName: 'Grok Beta',
107
+ id: 'grok-beta', // legacy
108
+ pricing: {
109
+ input: 5,
110
+ output: 15,
111
+ },
112
+ type: 'chat',
113
+ },
114
+ {
115
+ abilities: {
116
+ functionCall: true,
117
+ vision: true,
118
+ },
119
+ contextWindowTokens: 8192,
120
+ description: '最新的图像理解模型,可以处理各种各样的视觉信息,包括文档、图表、截图和照片等。',
121
+ displayName: 'Grok Vision Beta',
122
+ id: 'grok-vision-beta', // legacy
123
+ pricing: {
124
+ input: 5,
125
+ output: 15,
126
+ },
127
+ type: 'chat',
128
+ },
68
129
  ];
69
130
 
70
131
  export const allModels = [...xaiChatModels];
@@ -7,15 +7,13 @@ export enum TraceNameMap {
7
7
  Conversation = 'Conversation',
8
8
  EmojiPicker = 'Emoji Picker',
9
9
  FetchPluginAPI = 'Fetch Plugin API',
10
- InvokePlugin = 'Invoke Plugin',
11
10
  LanguageDetect = 'Language Detect',
11
+ SearchIntentRecognition = 'Search Intent Recognition',
12
12
  SummaryAgentDescription = 'Summary Agent Description',
13
13
  SummaryAgentTags = 'Summary Agent Tags',
14
14
  SummaryAgentTitle = 'Summary Agent Title',
15
15
  SummaryTopicTitle = 'Summary Topic Title',
16
16
  Translator = 'Translator',
17
- // mean user have relative events
18
- UserEvents = 'User Events',
19
17
  }
20
18
 
21
19
  export enum TraceEventType {
@@ -33,7 +31,7 @@ export enum TraceTagMap {
33
31
  Chat = 'Chat Competition',
34
32
  SystemChain = 'System Chain',
35
33
  ToolCalling = 'Tool Calling',
36
- ToolsCall = 'Tools Call',
34
+ ToolsCalling = 'Tools Calling',
37
35
  }
38
36
 
39
37
  export interface TracePayload {
@@ -1,11 +1,13 @@
1
1
  import { clientDB, initializeDB } from '@/database/client/db';
2
- import { getTestDBInstance } from '@/database/core/dbForTest';
3
2
  import { LobeChatDatabase } from '@/database/type';
4
3
 
5
4
  const isServerDBMode = process.env.TEST_SERVER_DB === '1';
6
5
 
7
6
  export const getTestDB = async () => {
8
- if (isServerDBMode) return await getTestDBInstance();
7
+ if (isServerDBMode) {
8
+ const { getTestDBInstance } = await import('@/database/core/dbForTest');
9
+ return await getTestDBInstance();
10
+ }
9
11
 
10
12
  await initializeDB();
11
13
  return clientDB as LobeChatDatabase;
@@ -66,10 +66,7 @@ const testRuntime = (providerId: string, payload?: any) => {
66
66
  let mockModelRuntime: AgentRuntime;
67
67
  beforeEach(async () => {
68
68
  const jwtPayload: JWTPayload = { apiKey: 'user-openai-key', baseURL: 'user-endpoint' };
69
- mockModelRuntime = await AgentRuntime.initializeWithProvider(
70
- ModelProvider.OpenAI,
71
- jwtPayload,
72
- );
69
+ mockModelRuntime = await AgentRuntime.initializeWithProvider(ModelProvider.OpenAI, jwtPayload);
73
70
  });
74
71
 
75
72
  describe('AgentRuntime', () => {
@@ -112,7 +109,7 @@ describe('AgentRuntime', () => {
112
109
  provider: 'openai',
113
110
  trace: {
114
111
  traceId: 'test-trace-id',
115
- traceName: TraceNameMap.Conversation,
112
+ traceName: TraceNameMap.SummaryTopicTitle,
116
113
  sessionId: 'test-session-id',
117
114
  topicId: 'test-topic-id',
118
115
  tags: [],
@@ -136,7 +133,7 @@ describe('AgentRuntime', () => {
136
133
  provider: 'openai',
137
134
  trace: {
138
135
  traceId: 'test-trace-id',
139
- traceName: TraceNameMap.Conversation,
136
+ traceName: TraceNameMap.SummaryTopicTitle,
140
137
  sessionId: 'test-session-id',
141
138
  topicId: 'test-topic-id',
142
139
  tags: [],
@@ -147,7 +144,7 @@ describe('AgentRuntime', () => {
147
144
 
148
145
  const updateMock = vi.fn();
149
146
 
150
- it('should call experimental_onToolCall correctly', async () => {
147
+ it('should call onToolsCalling correctly', async () => {
151
148
  vi.spyOn(langfuseCfg, 'getLangfuseConfig').mockReturnValue({
152
149
  ENABLE_LANGFUSE: true,
153
150
  LANGFUSE_PUBLIC_KEY: 'abc',
@@ -157,9 +154,9 @@ describe('AgentRuntime', () => {
157
154
  // 使用 spyOn 模拟 chat 方法
158
155
  vi.spyOn(LobeOpenAI.prototype, 'chat').mockImplementation(
159
156
  async (payload, { callback }: any) => {
160
- // 模拟 experimental_onToolCall 回调的触发
161
- if (callback?.experimental_onToolCall) {
162
- await callback.experimental_onToolCall();
157
+ // 模拟 onToolCall 回调的触发
158
+ if (callback?.onToolsCalling) {
159
+ await callback.onToolsCalling();
163
160
  }
164
161
  return new Response('abc');
165
162
  },
@@ -168,7 +165,7 @@ describe('AgentRuntime', () => {
168
165
 
169
166
  await mockModelRuntime.chat(payload, createTraceOptions(payload, options));
170
167
 
171
- expect(updateMock).toHaveBeenCalledWith({ tags: ['Tools Call'] });
168
+ expect(updateMock).toHaveBeenCalledWith({ tags: ['Tools Calling'] });
172
169
  });
173
170
  it('should call onStart correctly', async () => {
174
171
  vi.spyOn(langfuseCfg, 'getLangfuseConfig').mockReturnValue({
@@ -204,7 +201,7 @@ describe('AgentRuntime', () => {
204
201
  vi.spyOn(LobeOpenAI.prototype, 'chat').mockImplementation(
205
202
  async (payload, { callback }: any) => {
206
203
  if (callback?.onCompletion) {
207
- await callback.onCompletion('Test completion');
204
+ await callback.onCompletion({ text: 'Test completion' });
208
205
  }
209
206
  return new Response('Success');
210
207
  },
@@ -215,14 +212,11 @@ describe('AgentRuntime', () => {
215
212
  // Verify onCompletion was called with expected output
216
213
  expect(updateMock).toHaveBeenCalledWith({
217
214
  endTime: expect.any(Date),
218
- metadata: {
219
- provider: 'openai',
220
- tools: undefined,
221
- },
215
+ metadata: {},
222
216
  output: 'Test completion',
223
217
  });
224
218
  });
225
- it('should call onFinal correctly', async () => {
219
+ it.skip('should call onFinal correctly', async () => {
226
220
  vi.spyOn(langfuseCfg, 'getLangfuseConfig').mockReturnValue({
227
221
  ENABLE_LANGFUSE: true,
228
222
  LANGFUSE_PUBLIC_KEY: 'abc',
@@ -0,0 +1 @@
1
+ export * from './parseToolCalls';
@@ -1,7 +1,6 @@
1
1
  import { describe, expect, it } from 'vitest';
2
- import { ZodError } from 'zod';
3
2
 
4
- import { parseToolCalls } from '../parseToolCalls';
3
+ import { parseToolCalls } from './parseToolCalls';
5
4
 
6
5
  describe('parseToolCalls', () => {
7
6
  it('should create add new item', () => {
@@ -8,6 +8,7 @@ export { LobeDeepSeekAI } from './deepseek';
8
8
  export * from './error';
9
9
  export { LobeGoogleAI } from './google';
10
10
  export { LobeGroq } from './groq';
11
+ export * from './helpers';
11
12
  export { LobeMinimaxAI } from './minimax';
12
13
  export { LobeMistralAI } from './mistral';
13
14
  export { LobeMoonshotAI } from './moonshot';
@@ -1,4 +1,25 @@
1
- import { MessageToolCall } from '@/types/message';
1
+ import { DeepPartial } from 'utility-types';
2
+
3
+ import { ModelTokensUsage, ToolFunction } from '@/types/message';
4
+
5
+ export interface MessageToolCall {
6
+ /**
7
+ * The function that the model called.
8
+ */
9
+ function: ToolFunction;
10
+
11
+ /**
12
+ * The ID of the tool call.
13
+ */
14
+ id: string;
15
+
16
+ /**
17
+ * The type of the tool. Currently, only `function` is supported.
18
+ */
19
+ type: 'function' | string;
20
+ }
21
+
22
+ export type MessageToolCallChunk = DeepPartial<MessageToolCall> & { index: number };
2
23
 
3
24
  export type LLMRoleType = 'user' | 'system' | 'assistant' | 'function' | 'tool';
4
25
 
@@ -165,18 +186,29 @@ export interface ChatCompletionTool {
165
186
  type: 'function';
166
187
  }
167
188
 
189
+ interface OnFinishData {
190
+ grounding?: any;
191
+ text: string;
192
+ thinking?: string;
193
+ toolsCalling?: MessageToolCall[];
194
+ usage?: ModelTokensUsage;
195
+ }
196
+
168
197
  export interface ChatStreamCallbacks {
198
+ onCompletion?: (data: OnFinishData) => Promise<void> | void;
169
199
  /**
170
- * `onCompletion`: Called for each tokenized message.
200
+ * `onFinal`: Called once when the stream is closed with the final completion message.
171
201
  **/
172
- onCompletion?: (completion: string) => Promise<void> | void;
173
- /** `onFinal`: Called once when the stream is closed with the final completion message. */
174
- onFinal?: (completion: string) => Promise<void> | void;
202
+ onFinal?: (data: OnFinishData) => Promise<void> | void;
203
+ onGrounding?: (grounding: any) => Promise<void> | void;
175
204
  /** `onStart`: Called once when the stream is initialized. */
176
205
  onStart?: () => Promise<void> | void;
177
206
  /** `onText`: Called for each text chunk. */
178
- onText?: (text: string) => Promise<void> | void;
179
- /** `onToken`: Called for each tokenized message. */
180
- onToken?: (token: string) => Promise<void> | void;
181
- onToolCall?: () => Promise<void> | void;
207
+ onText?: (content: string) => Promise<void> | void;
208
+ onThinking?: (content: string) => Promise<void> | void;
209
+ onToolsCalling?: (data: {
210
+ chunk: MessageToolCallChunk[];
211
+ toolsCalling: MessageToolCall[];
212
+ }) => Promise<void> | void;
213
+ onUsage?: (usage: ModelTokensUsage) => Promise<void> | void;
182
214
  }
@@ -776,7 +776,7 @@ describe('LobeOpenAICompatibleFactory', () => {
776
776
  // 准备 callback 和 headers
777
777
  const mockCallback: ChatStreamCallbacks = {
778
778
  onStart: vi.fn(),
779
- onToken: vi.fn(),
779
+ onCompletion: vi.fn(),
780
780
  };
781
781
  const mockHeaders = { 'Custom-Header': 'TestValue' };
782
782
 
@@ -793,7 +793,9 @@ describe('LobeOpenAICompatibleFactory', () => {
793
793
  // 验证 callback 被调用
794
794
  await result.text(); // 确保流被消费
795
795
  expect(mockCallback.onStart).toHaveBeenCalled();
796
- expect(mockCallback.onToken).toHaveBeenCalledWith('hello');
796
+ expect(mockCallback.onCompletion).toHaveBeenCalledWith({
797
+ text: 'hello',
798
+ });
797
799
 
798
800
  // 验证 headers 被正确传递
799
801
  expect(result.headers.get('Custom-Header')).toEqual('TestValue');
@@ -58,13 +58,11 @@ describe('AnthropicStream', () => {
58
58
 
59
59
  const onStartMock = vi.fn();
60
60
  const onTextMock = vi.fn();
61
- const onTokenMock = vi.fn();
62
61
  const onCompletionMock = vi.fn();
63
62
 
64
63
  const protocolStream = AnthropicStream(mockAnthropicStream, {
65
64
  onStart: onStartMock,
66
65
  onText: onTextMock,
67
- onToken: onTokenMock,
68
66
  onCompletion: onCompletionMock,
69
67
  });
70
68
 
@@ -92,9 +90,8 @@ describe('AnthropicStream', () => {
92
90
  ]);
93
91
 
94
92
  expect(onStartMock).toHaveBeenCalledTimes(1);
95
- expect(onTextMock).toHaveBeenNthCalledWith(1, '"Hello"');
96
- expect(onTextMock).toHaveBeenNthCalledWith(2, '" world!"');
97
- expect(onTokenMock).toHaveBeenCalledTimes(2);
93
+ expect(onTextMock).toHaveBeenNthCalledWith(1, 'Hello');
94
+ expect(onTextMock).toHaveBeenNthCalledWith(2, ' world!');
98
95
  expect(onCompletionMock).toHaveBeenCalledTimes(1);
99
96
  });
100
97
 
@@ -168,7 +165,7 @@ describe('AnthropicStream', () => {
168
165
  const onToolCallMock = vi.fn();
169
166
 
170
167
  const protocolStream = AnthropicStream(mockReadableStream, {
171
- onToolCall: onToolCallMock,
168
+ onToolsCalling: onToolCallMock,
172
169
  });
173
170
 
174
171
  const decoder = new TextDecoder();
@@ -320,7 +317,7 @@ describe('AnthropicStream', () => {
320
317
  const onToolCallMock = vi.fn();
321
318
 
322
319
  const protocolStream = AnthropicStream(mockReadableStream, {
323
- onToolCall: onToolCallMock,
320
+ onToolsCalling: onToolCallMock,
324
321
  });
325
322
 
326
323
  const decoder = new TextDecoder();
@@ -20,13 +20,11 @@ describe('AWSBedrockLlamaStream', () => {
20
20
 
21
21
  const onStartMock = vi.fn();
22
22
  const onTextMock = vi.fn();
23
- const onTokenMock = vi.fn();
24
23
  const onCompletionMock = vi.fn();
25
24
 
26
25
  const protocolStream = AWSBedrockLlamaStream(mockBedrockStream, {
27
26
  onStart: onStartMock,
28
27
  onText: onTextMock,
29
- onToken: onTokenMock,
30
28
  onCompletion: onCompletionMock,
31
29
  });
32
30
 
@@ -51,9 +49,8 @@ describe('AWSBedrockLlamaStream', () => {
51
49
  ]);
52
50
 
53
51
  expect(onStartMock).toHaveBeenCalledTimes(1);
54
- expect(onTextMock).toHaveBeenNthCalledWith(1, '"Hello"');
55
- expect(onTextMock).toHaveBeenNthCalledWith(2, '" world!"');
56
- expect(onTokenMock).toHaveBeenCalledTimes(2);
52
+ expect(onTextMock).toHaveBeenNthCalledWith(1, 'Hello');
53
+ expect(onTextMock).toHaveBeenNthCalledWith(2, ' world!');
57
54
  expect(onCompletionMock).toHaveBeenCalledTimes(1);
58
55
  });
59
56
 
@@ -73,13 +70,11 @@ describe('AWSBedrockLlamaStream', () => {
73
70
 
74
71
  const onStartMock = vi.fn();
75
72
  const onTextMock = vi.fn();
76
- const onTokenMock = vi.fn();
77
73
  const onCompletionMock = vi.fn();
78
74
 
79
75
  const protocolStream = AWSBedrockLlamaStream(mockBedrockStream, {
80
76
  onStart: onStartMock,
81
77
  onText: onTextMock,
82
- onToken: onTokenMock,
83
78
  onCompletion: onCompletionMock,
84
79
  });
85
80
 
@@ -104,9 +99,8 @@ describe('AWSBedrockLlamaStream', () => {
104
99
  ]);
105
100
 
106
101
  expect(onStartMock).toHaveBeenCalledTimes(1);
107
- expect(onTextMock).toHaveBeenNthCalledWith(1, '"Hello"');
108
- expect(onTextMock).toHaveBeenNthCalledWith(2, '" world!"');
109
- expect(onTokenMock).toHaveBeenCalledTimes(2);
102
+ expect(onTextMock).toHaveBeenNthCalledWith(1, 'Hello');
103
+ expect(onTextMock).toHaveBeenNthCalledWith(2, ' world!');
110
104
  expect(onCompletionMock).toHaveBeenCalledTimes(1);
111
105
  });
112
106
 
@@ -143,7 +137,6 @@ describe('AWSBedrockLlamaStream', () => {
143
137
  const protocolStream = AWSBedrockLlamaStream(mockBedrockStream, {
144
138
  onStart: onStartMock,
145
139
  onText: onTextMock,
146
- onToken: onTokenMock,
147
140
  onCompletion: onCompletionMock,
148
141
  });
149
142
 
@@ -168,9 +161,8 @@ describe('AWSBedrockLlamaStream', () => {
168
161
  ]);
169
162
 
170
163
  expect(onStartMock).toHaveBeenCalledTimes(1);
171
- expect(onTextMock).toHaveBeenNthCalledWith(1, '"Hello"');
172
- expect(onTextMock).toHaveBeenNthCalledWith(2, '" world!"');
173
- expect(onTokenMock).toHaveBeenCalledTimes(2);
164
+ expect(onTextMock).toHaveBeenNthCalledWith(1, 'Hello');
165
+ expect(onTextMock).toHaveBeenNthCalledWith(2, ' world!');
174
166
  expect(onCompletionMock).toHaveBeenCalledTimes(1);
175
167
  });
176
168
 
@@ -30,15 +30,13 @@ describe('GoogleGenerativeAIStream', () => {
30
30
 
31
31
  const onStartMock = vi.fn();
32
32
  const onTextMock = vi.fn();
33
- const onTokenMock = vi.fn();
34
33
  const onToolCallMock = vi.fn();
35
34
  const onCompletionMock = vi.fn();
36
35
 
37
36
  const protocolStream = GoogleGenerativeAIStream(mockGoogleStream, {
38
37
  onStart: onStartMock,
39
38
  onText: onTextMock,
40
- onToken: onTokenMock,
41
- onToolCall: onToolCallMock,
39
+ onToolsCalling: onToolCallMock,
42
40
  onCompletion: onCompletionMock,
43
41
  });
44
42
 
@@ -68,9 +66,8 @@ describe('GoogleGenerativeAIStream', () => {
68
66
  ]);
69
67
 
70
68
  expect(onStartMock).toHaveBeenCalledTimes(1);
71
- expect(onTextMock).toHaveBeenNthCalledWith(1, '"Hello"');
72
- expect(onTextMock).toHaveBeenNthCalledWith(2, '" world!"');
73
- expect(onTokenMock).toHaveBeenCalledTimes(2);
69
+ expect(onTextMock).toHaveBeenNthCalledWith(1, 'Hello');
70
+ expect(onTextMock).toHaveBeenNthCalledWith(2, ' world!');
74
71
  expect(onToolCallMock).toHaveBeenCalledTimes(1);
75
72
  expect(onCompletionMock).toHaveBeenCalledTimes(1);
76
73
  });