@lobehub/chat 1.68.9 → 1.68.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +51 -0
- package/changelog/v1.json +18 -0
- package/docs/usage/providers/ppio.mdx +5 -5
- package/docs/usage/providers/ppio.zh-CN.mdx +7 -7
- package/locales/ar/chat.json +5 -1
- package/locales/ar/models.json +12 -9
- package/locales/bg-BG/chat.json +5 -1
- package/locales/bg-BG/models.json +12 -9
- package/locales/de-DE/chat.json +5 -1
- package/locales/de-DE/models.json +12 -9
- package/locales/en-US/chat.json +5 -1
- package/locales/en-US/models.json +12 -9
- package/locales/es-ES/chat.json +5 -1
- package/locales/es-ES/models.json +12 -9
- package/locales/fa-IR/chat.json +5 -1
- package/locales/fa-IR/models.json +12 -9
- package/locales/fr-FR/chat.json +5 -1
- package/locales/fr-FR/models.json +12 -9
- package/locales/it-IT/chat.json +5 -1
- package/locales/it-IT/models.json +12 -9
- package/locales/ja-JP/chat.json +5 -1
- package/locales/ja-JP/models.json +12 -9
- package/locales/ko-KR/chat.json +5 -1
- package/locales/ko-KR/models.json +12 -9
- package/locales/nl-NL/chat.json +5 -1
- package/locales/nl-NL/models.json +12 -9
- package/locales/pl-PL/chat.json +5 -1
- package/locales/pl-PL/models.json +12 -9
- package/locales/pt-BR/chat.json +5 -1
- package/locales/pt-BR/models.json +12 -9
- package/locales/ru-RU/chat.json +5 -1
- package/locales/ru-RU/models.json +12 -9
- package/locales/tr-TR/chat.json +5 -1
- package/locales/tr-TR/models.json +12 -9
- package/locales/vi-VN/chat.json +5 -1
- package/locales/vi-VN/models.json +12 -9
- package/locales/zh-CN/chat.json +5 -1
- package/locales/zh-CN/models.json +12 -9
- package/locales/zh-TW/chat.json +5 -1
- package/locales/zh-TW/models.json +12 -9
- package/package.json +1 -1
- package/src/config/aiModels/google.ts +37 -0
- package/src/config/aiModels/perplexity.ts +36 -20
- package/src/config/aiModels/qwen.ts +64 -25
- package/src/config/modelProviders/ppio.ts +1 -1
- package/src/features/Conversation/Extras/Usage/UsageDetail/ModelCard.tsx +27 -9
- package/src/features/Conversation/Extras/Usage/UsageDetail/index.tsx +77 -35
- package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.test.ts +253 -0
- package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.ts +65 -46
- package/src/libs/agent-runtime/baichuan/index.test.ts +58 -1
- package/src/libs/agent-runtime/groq/index.test.ts +36 -284
- package/src/libs/agent-runtime/mistral/index.test.ts +39 -300
- package/src/libs/agent-runtime/perplexity/index.test.ts +12 -10
- package/src/libs/agent-runtime/providerTestUtils.ts +58 -0
- package/src/libs/agent-runtime/togetherai/index.test.ts +7 -295
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +3 -0
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +5 -2
- package/src/libs/agent-runtime/utils/streams/anthropic.test.ts +89 -5
- package/src/libs/agent-runtime/utils/streams/anthropic.ts +25 -8
- package/src/libs/agent-runtime/utils/streams/openai.test.ts +188 -84
- package/src/libs/agent-runtime/utils/streams/openai.ts +8 -17
- package/src/libs/agent-runtime/utils/usageConverter.test.ts +249 -0
- package/src/libs/agent-runtime/utils/usageConverter.ts +50 -0
- package/src/libs/agent-runtime/zeroone/index.test.ts +7 -294
- package/src/locales/default/chat.ts +4 -0
- package/src/types/message/base.ts +14 -4
- package/src/utils/filter.test.ts +0 -122
- package/src/utils/filter.ts +0 -29
@@ -1,300 +1,12 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import
|
3
|
-
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
2
|
+
import { testProvider } from '@/libs/agent-runtime/providerTestUtils';
|
4
3
|
|
5
|
-
import { LobeOpenAICompatibleRuntime } from '@/libs/agent-runtime';
|
6
|
-
|
7
|
-
import * as debugStreamModule from '../utils/debugStream';
|
8
|
-
import models from './fixtures/models.json';
|
9
4
|
import { LobeTogetherAI } from './index';
|
10
5
|
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
// Mock the console.error to avoid polluting test output
|
18
|
-
vi.spyOn(console, 'error').mockImplementation(() => {});
|
19
|
-
|
20
|
-
let instance: LobeOpenAICompatibleRuntime;
|
21
|
-
|
22
|
-
beforeEach(() => {
|
23
|
-
instance = new LobeTogetherAI({ apiKey: 'test' });
|
24
|
-
|
25
|
-
// 使用 vi.spyOn 来模拟 chat.completions.create 方法
|
26
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
27
|
-
new ReadableStream() as any,
|
28
|
-
);
|
29
|
-
});
|
30
|
-
|
31
|
-
afterEach(() => {
|
32
|
-
vi.clearAllMocks();
|
33
|
-
});
|
34
|
-
|
35
|
-
describe('LobeTogetherAI', () => {
|
36
|
-
describe('init', () => {
|
37
|
-
it('should correctly initialize with an API key', async () => {
|
38
|
-
const instance = new LobeTogetherAI({ apiKey: 'test_api_key' });
|
39
|
-
expect(instance).toBeInstanceOf(LobeTogetherAI);
|
40
|
-
expect(instance.baseURL).toEqual(defaultBaseURL);
|
41
|
-
});
|
42
|
-
});
|
43
|
-
|
44
|
-
describe('chat', () => {
|
45
|
-
it('should return a StreamingTextResponse on successful API call', async () => {
|
46
|
-
// Arrange
|
47
|
-
const mockStream = new ReadableStream();
|
48
|
-
const mockResponse = Promise.resolve(mockStream);
|
49
|
-
|
50
|
-
(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
|
51
|
-
|
52
|
-
// Act
|
53
|
-
const result = await instance.chat({
|
54
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
55
|
-
model: 'mistralai/mistral-7b-instruct:free',
|
56
|
-
temperature: 0,
|
57
|
-
});
|
58
|
-
|
59
|
-
// Assert
|
60
|
-
expect(result).toBeInstanceOf(Response);
|
61
|
-
});
|
62
|
-
|
63
|
-
it('should call TogetherAI API with corresponding options', async () => {
|
64
|
-
// Arrange
|
65
|
-
const mockStream = new ReadableStream();
|
66
|
-
const mockResponse = Promise.resolve(mockStream);
|
67
|
-
|
68
|
-
(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
|
69
|
-
|
70
|
-
// Act
|
71
|
-
const result = await instance.chat({
|
72
|
-
max_tokens: 1024,
|
73
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
74
|
-
model: 'mistralai/mistral-7b-instruct:free',
|
75
|
-
temperature: 0.7,
|
76
|
-
top_p: 1,
|
77
|
-
});
|
78
|
-
|
79
|
-
// Assert
|
80
|
-
expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
|
81
|
-
{
|
82
|
-
max_tokens: 1024,
|
83
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
84
|
-
model: 'mistralai/mistral-7b-instruct:free',
|
85
|
-
temperature: 0.7,
|
86
|
-
stream: true,
|
87
|
-
top_p: 1,
|
88
|
-
},
|
89
|
-
{ headers: { Accept: '*/*' } },
|
90
|
-
);
|
91
|
-
expect(result).toBeInstanceOf(Response);
|
92
|
-
});
|
93
|
-
|
94
|
-
describe('Error', () => {
|
95
|
-
it('should return TogetherAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
|
96
|
-
// Arrange
|
97
|
-
const apiError = new OpenAI.APIError(
|
98
|
-
400,
|
99
|
-
{
|
100
|
-
status: 400,
|
101
|
-
error: {
|
102
|
-
message: 'Bad Request',
|
103
|
-
},
|
104
|
-
},
|
105
|
-
'Error message',
|
106
|
-
{},
|
107
|
-
);
|
108
|
-
|
109
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
110
|
-
|
111
|
-
// Act
|
112
|
-
try {
|
113
|
-
await instance.chat({
|
114
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
115
|
-
model: 'mistralai/mistral-7b-instruct:free',
|
116
|
-
temperature: 0,
|
117
|
-
});
|
118
|
-
} catch (e) {
|
119
|
-
expect(e).toEqual({
|
120
|
-
endpoint: defaultBaseURL,
|
121
|
-
error: {
|
122
|
-
error: { message: 'Bad Request' },
|
123
|
-
status: 400,
|
124
|
-
},
|
125
|
-
errorType: bizErrorType,
|
126
|
-
provider,
|
127
|
-
});
|
128
|
-
}
|
129
|
-
});
|
130
|
-
|
131
|
-
it('should throw AgentRuntimeError with InvalidTogetherAIAPIKey if no apiKey is provided', async () => {
|
132
|
-
try {
|
133
|
-
new LobeTogetherAI({});
|
134
|
-
} catch (e) {
|
135
|
-
expect(e).toEqual({ errorType: invalidErrorType });
|
136
|
-
}
|
137
|
-
});
|
138
|
-
|
139
|
-
it('should return TogetherAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
|
140
|
-
// Arrange
|
141
|
-
const errorInfo = {
|
142
|
-
stack: 'abc',
|
143
|
-
cause: {
|
144
|
-
message: 'api is undefined',
|
145
|
-
},
|
146
|
-
};
|
147
|
-
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
148
|
-
|
149
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
150
|
-
|
151
|
-
// Act
|
152
|
-
try {
|
153
|
-
await instance.chat({
|
154
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
155
|
-
model: 'mistralai/mistral-7b-instruct:free',
|
156
|
-
temperature: 0,
|
157
|
-
});
|
158
|
-
} catch (e) {
|
159
|
-
expect(e).toEqual({
|
160
|
-
endpoint: defaultBaseURL,
|
161
|
-
error: {
|
162
|
-
cause: { message: 'api is undefined' },
|
163
|
-
stack: 'abc',
|
164
|
-
},
|
165
|
-
errorType: bizErrorType,
|
166
|
-
provider,
|
167
|
-
});
|
168
|
-
}
|
169
|
-
});
|
170
|
-
|
171
|
-
it('should return TogetherAIBizError with an cause response with desensitize Url', async () => {
|
172
|
-
// Arrange
|
173
|
-
const errorInfo = {
|
174
|
-
stack: 'abc',
|
175
|
-
cause: { message: 'api is undefined' },
|
176
|
-
};
|
177
|
-
const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
|
178
|
-
|
179
|
-
instance = new LobeTogetherAI({
|
180
|
-
apiKey: 'test',
|
181
|
-
|
182
|
-
baseURL: 'https://api.abc.com/v1',
|
183
|
-
});
|
184
|
-
|
185
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
186
|
-
|
187
|
-
// Act
|
188
|
-
try {
|
189
|
-
await instance.chat({
|
190
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
191
|
-
model: 'mistralai/mistral-7b-instruct:free',
|
192
|
-
temperature: 0,
|
193
|
-
});
|
194
|
-
} catch (e) {
|
195
|
-
expect(e).toEqual({
|
196
|
-
endpoint: 'https://api.***.com/v1',
|
197
|
-
error: {
|
198
|
-
cause: { message: 'api is undefined' },
|
199
|
-
stack: 'abc',
|
200
|
-
},
|
201
|
-
errorType: bizErrorType,
|
202
|
-
provider,
|
203
|
-
});
|
204
|
-
}
|
205
|
-
});
|
206
|
-
|
207
|
-
it('should throw an InvalidTogetherAIAPIKey error type on 401 status code', async () => {
|
208
|
-
// Mock the API call to simulate a 401 error
|
209
|
-
const error = new Error('Unauthorized') as any;
|
210
|
-
error.status = 401;
|
211
|
-
vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
|
212
|
-
|
213
|
-
try {
|
214
|
-
await instance.chat({
|
215
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
216
|
-
model: 'mistralai/mistral-7b-instruct:free',
|
217
|
-
temperature: 0,
|
218
|
-
});
|
219
|
-
} catch (e) {
|
220
|
-
// Expect the chat method to throw an error with InvalidTogetherAIAPIKey
|
221
|
-
expect(e).toEqual({
|
222
|
-
endpoint: defaultBaseURL,
|
223
|
-
error: new Error('Unauthorized'),
|
224
|
-
errorType: invalidErrorType,
|
225
|
-
provider,
|
226
|
-
});
|
227
|
-
}
|
228
|
-
});
|
229
|
-
|
230
|
-
it('should return AgentRuntimeError for non-OpenAI errors', async () => {
|
231
|
-
// Arrange
|
232
|
-
const genericError = new Error('Generic Error');
|
233
|
-
|
234
|
-
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
|
235
|
-
|
236
|
-
// Act
|
237
|
-
try {
|
238
|
-
await instance.chat({
|
239
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
240
|
-
model: 'mistralai/mistral-7b-instruct:free',
|
241
|
-
temperature: 0,
|
242
|
-
});
|
243
|
-
} catch (e) {
|
244
|
-
expect(e).toEqual({
|
245
|
-
endpoint: defaultBaseURL,
|
246
|
-
errorType: 'AgentRuntimeError',
|
247
|
-
provider,
|
248
|
-
error: {
|
249
|
-
name: genericError.name,
|
250
|
-
cause: genericError.cause,
|
251
|
-
message: genericError.message,
|
252
|
-
stack: genericError.stack,
|
253
|
-
},
|
254
|
-
});
|
255
|
-
}
|
256
|
-
});
|
257
|
-
});
|
258
|
-
|
259
|
-
describe('DEBUG', () => {
|
260
|
-
it('should call debugStream and return StreamingTextResponse when DEBUG_TOGETHERAI_CHAT_COMPLETION is 1', async () => {
|
261
|
-
// Arrange
|
262
|
-
const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
|
263
|
-
const mockDebugStream = new ReadableStream({
|
264
|
-
start(controller) {
|
265
|
-
controller.enqueue('Debug stream content');
|
266
|
-
controller.close();
|
267
|
-
},
|
268
|
-
}) as any;
|
269
|
-
mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
|
270
|
-
|
271
|
-
// 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
|
272
|
-
(instance['client'].chat.completions.create as Mock).mockResolvedValue({
|
273
|
-
tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
|
274
|
-
});
|
275
|
-
|
276
|
-
// 保存原始环境变量值
|
277
|
-
const originalDebugValue = process.env.DEBUG_TOGETHERAI_CHAT_COMPLETION;
|
278
|
-
|
279
|
-
// 模拟环境变量
|
280
|
-
process.env.DEBUG_TOGETHERAI_CHAT_COMPLETION = '1';
|
281
|
-
vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
|
282
|
-
|
283
|
-
// 执行测试
|
284
|
-
// 运行你的测试函数,确保它会在条件满足时调用 debugStream
|
285
|
-
// 假设的测试函数调用,你可能需要根据实际情况调整
|
286
|
-
await instance.chat({
|
287
|
-
messages: [{ content: 'Hello', role: 'user' }],
|
288
|
-
model: 'mistralai/mistral-7b-instruct:free',
|
289
|
-
temperature: 0,
|
290
|
-
});
|
291
|
-
|
292
|
-
// 验证 debugStream 被调用
|
293
|
-
expect(debugStreamModule.debugStream).toHaveBeenCalled();
|
294
|
-
|
295
|
-
// 恢复原始环境变量值
|
296
|
-
process.env.DEBUG_TOGETHERAI_CHAT_COMPLETION = originalDebugValue;
|
297
|
-
});
|
298
|
-
});
|
299
|
-
});
|
6
|
+
testProvider({
|
7
|
+
provider: 'togetherai',
|
8
|
+
defaultBaseURL: 'https://api.together.xyz/v1',
|
9
|
+
chatModel: 'mistralai/mistral-7b-instruct:free',
|
10
|
+
Runtime: LobeTogetherAI,
|
11
|
+
chatDebugEnv: 'DEBUG_TOGETHERAI_CHAT_COMPLETION',
|
300
12
|
});
|
@@ -101,8 +101,9 @@ interface OpenAICompatibleFactoryOptions<T extends Record<string, any> = any> {
|
|
101
101
|
export function transformResponseToStream(data: OpenAI.ChatCompletion) {
|
102
102
|
return new ReadableStream({
|
103
103
|
start(controller) {
|
104
|
+
const choices = data.choices || [];
|
104
105
|
const chunk: OpenAI.ChatCompletionChunk = {
|
105
|
-
choices:
|
106
|
+
choices: choices.map((choice: OpenAI.ChatCompletion.Choice) => ({
|
106
107
|
delta: {
|
107
108
|
content: choice.message.content,
|
108
109
|
role: choice.message.role,
|
@@ -128,7 +129,7 @@ export function transformResponseToStream(data: OpenAI.ChatCompletion) {
|
|
128
129
|
controller.enqueue(chunk);
|
129
130
|
|
130
131
|
controller.enqueue({
|
131
|
-
choices:
|
132
|
+
choices: choices.map((choice: OpenAI.ChatCompletion.Choice) => ({
|
132
133
|
delta: {
|
133
134
|
content: null,
|
134
135
|
role: choice.message.role,
|
@@ -219,7 +220,9 @@ export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>
|
|
219
220
|
...postPayload,
|
220
221
|
messages,
|
221
222
|
...(chatCompletion?.noUserId ? {} : { user: options?.user }),
|
223
|
+
stream_options: postPayload.stream ? { include_usage: true } : undefined,
|
222
224
|
};
|
225
|
+
|
223
226
|
if (debug?.chatCompletion?.()) {
|
224
227
|
console.log('[requestPayload]:', JSON.stringify(finalPayload, null, 2));
|
225
228
|
}
|
@@ -225,7 +225,7 @@ describe('AnthropicStream', () => {
|
|
225
225
|
|
226
226
|
'id: msg_017aTuY86wNxth5TE544yqJq',
|
227
227
|
'event: usage',
|
228
|
-
'data: {"
|
228
|
+
'data: {"inputCacheMissTokens":457,"totalInputTokens":457,"totalOutputTokens":84,"totalTokens":541}\n',
|
229
229
|
].map((item) => `${item}\n`),
|
230
230
|
);
|
231
231
|
|
@@ -381,8 +381,7 @@ describe('AnthropicStream', () => {
|
|
381
381
|
|
382
382
|
'id: msg_0175ryA67RbGrnRrGBXFQEYK',
|
383
383
|
'event: usage',
|
384
|
-
'data: {"
|
385
|
-
|
384
|
+
'data: {"inputCacheMissTokens":485,"totalInputTokens":485,"totalOutputTokens":154,"totalTokens":639}\n',
|
386
385
|
'id: msg_0175ryA67RbGrnRrGBXFQEYK',
|
387
386
|
'event: stop',
|
388
387
|
'data: "message_stop"\n',
|
@@ -392,6 +391,91 @@ describe('AnthropicStream', () => {
|
|
392
391
|
expect(onToolCallMock).toHaveBeenCalledTimes(6);
|
393
392
|
});
|
394
393
|
|
394
|
+
it('should handle prompts context caching', async () => {
|
395
|
+
const streams = [
|
396
|
+
{
|
397
|
+
type: 'message_start',
|
398
|
+
message: {
|
399
|
+
id: 'msg_01Vxc4yQTEjkDSba3N3BMbH8',
|
400
|
+
type: 'message',
|
401
|
+
role: 'assistant',
|
402
|
+
model: 'claude-3-7-sonnet-20250219',
|
403
|
+
content: [],
|
404
|
+
stop_reason: null,
|
405
|
+
stop_sequence: null,
|
406
|
+
usage: {
|
407
|
+
input_tokens: 6,
|
408
|
+
cache_creation_input_tokens: 457,
|
409
|
+
cache_read_input_tokens: 17918,
|
410
|
+
output_tokens: 2,
|
411
|
+
},
|
412
|
+
},
|
413
|
+
},
|
414
|
+
{ type: 'content_block_start', index: 0, content_block: { type: 'text', text: '' } },
|
415
|
+
{ type: 'content_block_delta', index: 0, delta: { type: 'text_delta', text: '\n\n根' } },
|
416
|
+
{
|
417
|
+
type: 'content_block_delta',
|
418
|
+
index: 0,
|
419
|
+
delta: { type: 'text_delta', text: '/\n[^20]: https://s' },
|
420
|
+
},
|
421
|
+
{ type: 'content_block_stop', index: 0 },
|
422
|
+
{
|
423
|
+
type: 'message_delta',
|
424
|
+
delta: { stop_reason: 'end_turn', stop_sequence: null },
|
425
|
+
usage: { output_tokens: 3222 },
|
426
|
+
},
|
427
|
+
{ type: 'message_stop' },
|
428
|
+
];
|
429
|
+
|
430
|
+
const mockReadableStream = new ReadableStream({
|
431
|
+
start(controller) {
|
432
|
+
streams.forEach((chunk) => {
|
433
|
+
controller.enqueue(chunk);
|
434
|
+
});
|
435
|
+
controller.close();
|
436
|
+
},
|
437
|
+
});
|
438
|
+
|
439
|
+
const protocolStream = AnthropicStream(mockReadableStream);
|
440
|
+
|
441
|
+
const decoder = new TextDecoder();
|
442
|
+
const chunks = [];
|
443
|
+
|
444
|
+
// @ts-ignore
|
445
|
+
for await (const chunk of protocolStream) {
|
446
|
+
chunks.push(decoder.decode(chunk, { stream: true }));
|
447
|
+
}
|
448
|
+
|
449
|
+
expect(chunks).toEqual(
|
450
|
+
[
|
451
|
+
'id: msg_01Vxc4yQTEjkDSba3N3BMbH8',
|
452
|
+
'event: data',
|
453
|
+
'data: {"id":"msg_01Vxc4yQTEjkDSba3N3BMbH8","type":"message","role":"assistant","model":"claude-3-7-sonnet-20250219","content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":6,"cache_creation_input_tokens":457,"cache_read_input_tokens":17918,"output_tokens":2}}\n',
|
454
|
+
'id: msg_01Vxc4yQTEjkDSba3N3BMbH8',
|
455
|
+
'event: data',
|
456
|
+
'data: ""\n',
|
457
|
+
'id: msg_01Vxc4yQTEjkDSba3N3BMbH8',
|
458
|
+
'event: text',
|
459
|
+
'data: "\\n\\n根"\n',
|
460
|
+
'id: msg_01Vxc4yQTEjkDSba3N3BMbH8',
|
461
|
+
'event: text',
|
462
|
+
'data: "/\\n[^20]: https://s"\n',
|
463
|
+
'id: msg_01Vxc4yQTEjkDSba3N3BMbH8',
|
464
|
+
'event: data',
|
465
|
+
'data: {"type":"content_block_stop","index":0}\n',
|
466
|
+
'id: msg_01Vxc4yQTEjkDSba3N3BMbH8',
|
467
|
+
'event: stop',
|
468
|
+
'data: "end_turn"\n',
|
469
|
+
'id: msg_01Vxc4yQTEjkDSba3N3BMbH8',
|
470
|
+
'event: usage',
|
471
|
+
'data: {"inputCacheMissTokens":6,"inputCachedTokens":17918,"inputWriteCacheTokens":457,"totalInputTokens":18381,"totalOutputTokens":3224,"totalTokens":21605}\n',
|
472
|
+
|
473
|
+
'id: msg_01Vxc4yQTEjkDSba3N3BMbH8',
|
474
|
+
'event: stop',
|
475
|
+
'data: "message_stop"\n',
|
476
|
+
].map((item) => `${item}\n`),
|
477
|
+
);
|
478
|
+
});
|
395
479
|
describe('thinking', () => {
|
396
480
|
it('should handle normal thinking ', async () => {
|
397
481
|
const streams = [
|
@@ -515,7 +599,7 @@ describe('AnthropicStream', () => {
|
|
515
599
|
'data: "end_turn"\n',
|
516
600
|
'id: msg_01MNsLe7n1uVLtu6W8rCFujD',
|
517
601
|
'event: usage',
|
518
|
-
'data: {"
|
602
|
+
'data: {"inputCacheMissTokens":46,"totalInputTokens":46,"totalOutputTokens":365,"totalTokens":411}\n',
|
519
603
|
'id: msg_01MNsLe7n1uVLtu6W8rCFujD',
|
520
604
|
'event: stop',
|
521
605
|
'data: "message_stop"\n',
|
@@ -675,7 +759,7 @@ describe('AnthropicStream', () => {
|
|
675
759
|
'data: "end_turn"\n',
|
676
760
|
'id: msg_019q32esPvu3TftzZnL6JPys',
|
677
761
|
'event: usage',
|
678
|
-
'data: {"
|
762
|
+
'data: {"inputCacheMissTokens":92,"totalInputTokens":92,"totalOutputTokens":263,"totalTokens":355}\n',
|
679
763
|
'id: msg_019q32esPvu3TftzZnL6JPys',
|
680
764
|
'event: stop',
|
681
765
|
'data: "message_stop"\n',
|
@@ -22,9 +22,24 @@ export const transformAnthropicStream = (
|
|
22
22
|
switch (chunk.type) {
|
23
23
|
case 'message_start': {
|
24
24
|
context.id = chunk.message.id;
|
25
|
+
let totalInputTokens = chunk.message.usage?.input_tokens;
|
26
|
+
|
27
|
+
if (
|
28
|
+
chunk.message.usage?.cache_creation_input_tokens ||
|
29
|
+
chunk.message.usage?.cache_read_input_tokens
|
30
|
+
) {
|
31
|
+
totalInputTokens =
|
32
|
+
chunk.message.usage?.input_tokens +
|
33
|
+
(chunk.message.usage.cache_creation_input_tokens || 0) +
|
34
|
+
(chunk.message.usage.cache_read_input_tokens || 0);
|
35
|
+
}
|
36
|
+
|
25
37
|
context.usage = {
|
26
|
-
|
27
|
-
|
38
|
+
inputCacheMissTokens: chunk.message.usage?.input_tokens,
|
39
|
+
inputCachedTokens: chunk.message.usage?.cache_read_input_tokens || undefined,
|
40
|
+
inputWriteCacheTokens: chunk.message.usage?.cache_creation_input_tokens || undefined,
|
41
|
+
totalInputTokens,
|
42
|
+
totalOutputTokens: chunk.message.usage?.output_tokens,
|
28
43
|
};
|
29
44
|
|
30
45
|
return { data: chunk.message, id: chunk.message.id, type: 'data' };
|
@@ -140,18 +155,20 @@ export const transformAnthropicStream = (
|
|
140
155
|
}
|
141
156
|
|
142
157
|
case 'message_delta': {
|
143
|
-
const
|
144
|
-
|
145
|
-
const
|
158
|
+
const totalOutputTokens =
|
159
|
+
chunk.usage?.output_tokens + (context.usage?.totalOutputTokens || 0);
|
160
|
+
const totalInputTokens = context.usage?.totalInputTokens || 0;
|
161
|
+
const totalTokens = totalInputTokens + totalOutputTokens;
|
146
162
|
|
147
163
|
if (totalTokens > 0) {
|
148
164
|
return [
|
149
165
|
{ data: chunk.delta.stop_reason, id: context.id, type: 'stop' },
|
150
166
|
{
|
151
167
|
data: {
|
152
|
-
|
153
|
-
|
154
|
-
|
168
|
+
...context.usage,
|
169
|
+
totalInputTokens,
|
170
|
+
totalOutputTokens,
|
171
|
+
totalTokens,
|
155
172
|
} as ModelTokensUsage,
|
156
173
|
id: context.id,
|
157
174
|
type: 'usage',
|