@lobehub/chat 1.84.19 → 1.84.20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,31 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.84.20](https://github.com/lobehub/lobe-chat/compare/v1.84.19...v1.84.20)
|
6
|
+
|
7
|
+
<sup>Released on **2025-05-04**</sup>
|
8
|
+
|
9
|
+
#### 💄 Styles
|
10
|
+
|
11
|
+
- **misc**: Show Aliyun Bailian tokens usage tracking.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### Styles
|
19
|
+
|
20
|
+
- **misc**: Show Aliyun Bailian tokens usage tracking, closes [#7660](https://github.com/lobehub/lobe-chat/issues/7660) ([3ef0542](https://github.com/lobehub/lobe-chat/commit/3ef0542))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
5
30
|
### [Version 1.84.19](https://github.com/lobehub/lobe-chat/compare/v1.84.18...v1.84.19)
|
6
31
|
|
7
32
|
<sup>Released on **2025-05-04**</sup>
|
package/changelog/v1.json
CHANGED
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.84.
|
3
|
+
"version": "1.84.20",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -24,16 +24,19 @@ export const LobeQwenAI = LobeOpenAICompatibleFactory({
|
|
24
24
|
baseURL: 'https://dashscope.aliyuncs.com/compatible-mode/v1',
|
25
25
|
chatCompletion: {
|
26
26
|
handlePayload: (payload) => {
|
27
|
-
const { model, presence_penalty, temperature, thinking, top_p, enabledSearch, ...rest } =
|
27
|
+
const { model, presence_penalty, temperature, thinking, top_p, enabledSearch, ...rest } =
|
28
|
+
payload;
|
28
29
|
|
29
30
|
return {
|
30
31
|
...rest,
|
31
|
-
...(
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
32
|
+
...(['qwen3', 'qwen-turbo', 'qwen-plus'].some((keyword) =>
|
33
|
+
model.toLowerCase().includes(keyword),
|
34
|
+
)
|
35
|
+
? {
|
36
|
+
enable_thinking: thinking !== undefined ? thinking.type === 'enabled' : false,
|
37
|
+
thinking_budget:
|
38
|
+
thinking?.budget_tokens === 0 ? 0 : thinking?.budget_tokens || undefined,
|
39
|
+
}
|
37
40
|
: {}),
|
38
41
|
frequency_penalty: undefined,
|
39
42
|
model,
|
@@ -77,7 +80,16 @@ export const LobeQwenAI = LobeOpenAICompatibleFactory({
|
|
77
80
|
models: async ({ client }) => {
|
78
81
|
const { LOBE_DEFAULT_MODEL_LIST } = await import('@/config/aiModels');
|
79
82
|
|
80
|
-
const functionCallKeywords = [
|
83
|
+
const functionCallKeywords = [
|
84
|
+
'qwen-max',
|
85
|
+
'qwen-plus',
|
86
|
+
'qwen-turbo',
|
87
|
+
'qwen-long',
|
88
|
+
'qwen1.5',
|
89
|
+
'qwen2',
|
90
|
+
'qwen2.5',
|
91
|
+
'qwen3',
|
92
|
+
];
|
81
93
|
|
82
94
|
const visionKeywords = ['qvq', 'vl'];
|
83
95
|
|
@@ -1,6 +1,8 @@
|
|
1
|
+
import OpenAI from 'openai';
|
1
2
|
import { beforeAll, describe, expect, it, vi } from 'vitest';
|
2
3
|
|
3
|
-
import {
|
4
|
+
import { StreamContext } from './protocol';
|
5
|
+
import { QwenAIStream, transformQwenStream } from './qwen';
|
4
6
|
|
5
7
|
describe('QwenAIStream', () => {
|
6
8
|
beforeAll(() => {});
|
@@ -349,3 +351,76 @@ describe('QwenAIStream', () => {
|
|
349
351
|
]);
|
350
352
|
});
|
351
353
|
});
|
354
|
+
|
355
|
+
describe('transformQwenStream', () => {
|
356
|
+
it('should handle usage chunk', () => {
|
357
|
+
const mockChunk: OpenAI.ChatCompletionChunk = {
|
358
|
+
choices: [],
|
359
|
+
id: 'usage-test-id',
|
360
|
+
model: 'qwen-test-model',
|
361
|
+
object: 'chat.completion.chunk',
|
362
|
+
created: Date.now(),
|
363
|
+
usage: {
|
364
|
+
completion_tokens: 50,
|
365
|
+
prompt_tokens: 100,
|
366
|
+
total_tokens: 150,
|
367
|
+
completion_tokens_details: {}, // Ensure these exist even if empty
|
368
|
+
prompt_tokens_details: {}, // Ensure these exist even if empty
|
369
|
+
},
|
370
|
+
};
|
371
|
+
|
372
|
+
const streamContext: StreamContext = { id: '' };
|
373
|
+
|
374
|
+
const result = transformQwenStream(mockChunk, streamContext);
|
375
|
+
|
376
|
+
expect(result).toEqual({
|
377
|
+
id: 'usage-test-id',
|
378
|
+
type: 'usage',
|
379
|
+
data: {
|
380
|
+
inputTextTokens: 100,
|
381
|
+
outputTextTokens: 50,
|
382
|
+
totalInputTokens: 100,
|
383
|
+
totalOutputTokens: 50,
|
384
|
+
totalTokens: 150,
|
385
|
+
},
|
386
|
+
});
|
387
|
+
|
388
|
+
// Verify streamContext is updated
|
389
|
+
expect(streamContext.usage).toEqual({
|
390
|
+
inputTextTokens: 100,
|
391
|
+
outputTextTokens: 50,
|
392
|
+
totalInputTokens: 100,
|
393
|
+
totalOutputTokens: 50,
|
394
|
+
totalTokens: 150,
|
395
|
+
});
|
396
|
+
});
|
397
|
+
|
398
|
+
it('should handle usage chunk without streamContext', () => {
|
399
|
+
const mockChunk: OpenAI.ChatCompletionChunk = {
|
400
|
+
choices: [],
|
401
|
+
id: 'usage-test-id-no-ctx',
|
402
|
+
model: 'qwen-test-model',
|
403
|
+
object: 'chat.completion.chunk',
|
404
|
+
created: Date.now(),
|
405
|
+
usage: {
|
406
|
+
completion_tokens: 55,
|
407
|
+
prompt_tokens: 105,
|
408
|
+
total_tokens: 160,
|
409
|
+
},
|
410
|
+
};
|
411
|
+
|
412
|
+
const result = transformQwenStream(mockChunk); // No streamContext passed
|
413
|
+
|
414
|
+
expect(result).toEqual({
|
415
|
+
id: 'usage-test-id-no-ctx',
|
416
|
+
type: 'usage',
|
417
|
+
data: {
|
418
|
+
inputTextTokens: 105,
|
419
|
+
outputTextTokens: 55,
|
420
|
+
totalInputTokens: 105,
|
421
|
+
totalOutputTokens: 55,
|
422
|
+
totalTokens: 160,
|
423
|
+
},
|
424
|
+
});
|
425
|
+
});
|
426
|
+
});
|
@@ -4,17 +4,37 @@ import { ChatCompletionContentPart } from 'openai/resources/index.mjs';
|
|
4
4
|
import type { Stream } from 'openai/streaming';
|
5
5
|
|
6
6
|
import { ChatStreamCallbacks } from '../../types';
|
7
|
+
import { convertUsage } from '../usageConverter';
|
7
8
|
import {
|
9
|
+
StreamContext,
|
8
10
|
StreamProtocolChunk,
|
9
11
|
StreamProtocolToolCallChunk,
|
10
12
|
StreamToolCallChunkData,
|
11
13
|
convertIterableToStream,
|
12
14
|
createCallbacksTransformer,
|
13
15
|
createSSEProtocolTransformer,
|
16
|
+
createTokenSpeedCalculator,
|
14
17
|
generateToolCallId,
|
15
18
|
} from './protocol';
|
16
19
|
|
17
|
-
export const transformQwenStream = (
|
20
|
+
export const transformQwenStream = (
|
21
|
+
chunk: OpenAI.ChatCompletionChunk,
|
22
|
+
streamContext?: StreamContext,
|
23
|
+
): StreamProtocolChunk | StreamProtocolChunk[] => {
|
24
|
+
if (Array.isArray(chunk.choices) && chunk.choices.length === 0 && chunk.usage) {
|
25
|
+
const usage = convertUsage({
|
26
|
+
...chunk.usage,
|
27
|
+
completion_tokens_details: chunk.usage.completion_tokens_details || {},
|
28
|
+
prompt_tokens_details: chunk.usage.prompt_tokens_details || {},
|
29
|
+
});
|
30
|
+
|
31
|
+
if (streamContext) {
|
32
|
+
streamContext.usage = usage;
|
33
|
+
}
|
34
|
+
|
35
|
+
return { data: usage, id: chunk.id, type: 'usage' };
|
36
|
+
}
|
37
|
+
|
18
38
|
const item = chunk.choices[0];
|
19
39
|
|
20
40
|
if (!item) {
|
@@ -96,10 +116,14 @@ export const QwenAIStream = (
|
|
96
116
|
// eslint-disable-next-line @typescript-eslint/no-unused-vars, unused-imports/no-unused-vars
|
97
117
|
{ callbacks, inputStartAt }: { callbacks?: ChatStreamCallbacks; inputStartAt?: number } = {},
|
98
118
|
) => {
|
119
|
+
const streamContext: StreamContext = { id: '' };
|
99
120
|
const readableStream =
|
100
121
|
stream instanceof ReadableStream ? stream : convertIterableToStream(stream);
|
101
122
|
|
102
123
|
return readableStream
|
103
|
-
.pipeThrough(
|
124
|
+
.pipeThrough(
|
125
|
+
createTokenSpeedCalculator(transformQwenStream, { inputStartAt, streamStack: streamContext }),
|
126
|
+
)
|
127
|
+
.pipeThrough(createSSEProtocolTransformer((c) => c, streamContext))
|
104
128
|
.pipeThrough(createCallbacksTransformer(callbacks));
|
105
129
|
};
|