@lobehub/chat 1.4.3 → 1.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/README.md +8 -8
- package/README.zh-CN.md +8 -8
- package/package.json +2 -1
- package/src/app/(main)/@nav/_layout/Desktop/BottomActions.tsx +2 -2
- package/src/components/BrandWatermark/index.tsx +7 -1
- package/src/config/modelProviders/qwen.ts +22 -0
- package/src/const/url.ts +5 -0
- package/src/features/AlertBanner/CloudBanner.tsx +3 -3
- package/src/features/User/UserPanel/useMenu.tsx +4 -3
- package/src/libs/agent-runtime/qwen/index.test.ts +114 -4
- package/src/libs/agent-runtime/qwen/index.ts +128 -28
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +54 -55
- package/src/libs/agent-runtime/utils/streams/index.ts +1 -0
- package/src/libs/agent-runtime/utils/streams/qwen.test.ts +350 -0
- package/src/libs/agent-runtime/utils/streams/qwen.ts +94 -0
- package/vitest.config.ts +5 -3
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,56 @@
|
|
|
2
2
|
|
|
3
3
|
# Changelog
|
|
4
4
|
|
|
5
|
+
### [Version 1.5.1](https://github.com/lobehub/lobe-chat/compare/v1.5.0...v1.5.1)
|
|
6
|
+
|
|
7
|
+
<sup>Released on **2024-07-17**</sup>
|
|
8
|
+
|
|
9
|
+
#### 💄 Styles
|
|
10
|
+
|
|
11
|
+
- **misc**: Improve brand url.
|
|
12
|
+
|
|
13
|
+
<br/>
|
|
14
|
+
|
|
15
|
+
<details>
|
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
|
17
|
+
|
|
18
|
+
#### Styles
|
|
19
|
+
|
|
20
|
+
- **misc**: Improve brand url, closes [#3238](https://github.com/lobehub/lobe-chat/issues/3238) ([eef066f](https://github.com/lobehub/lobe-chat/commit/eef066f))
|
|
21
|
+
|
|
22
|
+
</details>
|
|
23
|
+
|
|
24
|
+
<div align="right">
|
|
25
|
+
|
|
26
|
+
[](#readme-top)
|
|
27
|
+
|
|
28
|
+
</div>
|
|
29
|
+
|
|
30
|
+
## [Version 1.5.0](https://github.com/lobehub/lobe-chat/compare/v1.4.3...v1.5.0)
|
|
31
|
+
|
|
32
|
+
<sup>Released on **2024-07-17**</sup>
|
|
33
|
+
|
|
34
|
+
#### ✨ Features
|
|
35
|
+
|
|
36
|
+
- **misc**: Spport qwen-vl and tool call for qwen.
|
|
37
|
+
|
|
38
|
+
<br/>
|
|
39
|
+
|
|
40
|
+
<details>
|
|
41
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
|
42
|
+
|
|
43
|
+
#### What's improved
|
|
44
|
+
|
|
45
|
+
- **misc**: Spport qwen-vl and tool call for qwen, closes [#3114](https://github.com/lobehub/lobe-chat/issues/3114) ([5216a85](https://github.com/lobehub/lobe-chat/commit/5216a85))
|
|
46
|
+
|
|
47
|
+
</details>
|
|
48
|
+
|
|
49
|
+
<div align="right">
|
|
50
|
+
|
|
51
|
+
[](#readme-top)
|
|
52
|
+
|
|
53
|
+
</div>
|
|
54
|
+
|
|
5
55
|
### [Version 1.4.3](https://github.com/lobehub/lobe-chat/compare/v1.4.2...v1.4.3)
|
|
6
56
|
|
|
7
57
|
<sup>Released on **2024-07-15**</sup>
|
package/README.md
CHANGED
|
@@ -265,14 +265,14 @@ Our marketplace is not just a showcase platform but also a collaborative space.
|
|
|
265
265
|
|
|
266
266
|
<!-- AGENT LIST -->
|
|
267
267
|
|
|
268
|
-
| Recent Submits
|
|
269
|
-
|
|
|
270
|
-
| [
|
|
271
|
-
| [
|
|
272
|
-
| [
|
|
273
|
-
| [
|
|
274
|
-
|
|
275
|
-
> 📊 Total agents: [<kbd>**
|
|
268
|
+
| Recent Submits | Description |
|
|
269
|
+
| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
270
|
+
| [Convert SQL Table Structure to Dao and Mapper](https://chat-preview.lobehub.com/market?agent=my-batis-generator)<br/><sup>By **[MeYoung](https://github.com/MeYoung)** on **2024-07-17**</sup> | Generate entity class and MyBatis Mapper based on a table structure<br/>`sql` `sql` `mybatis` |
|
|
271
|
+
| [Foreign Trade High EQ Response](https://chat-preview.lobehub.com/market?agent=reply-agent)<br/><sup>By **[zhushen12580](https://github.com/zhushen12580)** on **2024-07-13**</sup> | My goal is to provide professional responses with high emotional intelligence to help solve various foreign trade-related issues.<br/>`polishing` `high-eq` `response` |
|
|
272
|
+
| [Ducky Programming Assistant](https://chat-preview.lobehub.com/market?agent=rubber-duck-programming)<br/><sup>By **[JiyuShao](https://github.com/JiyuShao)** on **2024-07-10**</sup> | Ducky Programming Assistant<br/>`programming` |
|
|
273
|
+
| [B1 Level German Conversation Partner](https://chat-preview.lobehub.com/market?agent=deutsche-b-1)<br/><sup>By **[tayhe](https://github.com/tayhe)** on **2024-07-08**</sup> | Provides fluent German conversation partners for B1 level learners<br/>`language-exchange` `learning-support` `education` `german-learning` |
|
|
274
|
+
|
|
275
|
+
> 📊 Total agents: [<kbd>**298**</kbd> ](https://github.com/lobehub/lobe-chat-agents)
|
|
276
276
|
|
|
277
277
|
<!-- AGENT LIST -->
|
|
278
278
|
|
package/README.zh-CN.md
CHANGED
|
@@ -254,14 +254,14 @@ LobeChat 的插件生态系统是其核心功能的重要扩展,它极大地
|
|
|
254
254
|
|
|
255
255
|
<!-- AGENT LIST -->
|
|
256
256
|
|
|
257
|
-
| 最近新增
|
|
258
|
-
|
|
|
259
|
-
| [
|
|
260
|
-
| [
|
|
261
|
-
| [
|
|
262
|
-
| [
|
|
263
|
-
|
|
264
|
-
> 📊 Total agents: [<kbd>**
|
|
257
|
+
| 最近新增 | 助手说明 |
|
|
258
|
+
| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- |
|
|
259
|
+
| [SQL 表结构转 Dao 和 Mapper](https://chat-preview.lobehub.com/market?agent=my-batis-generator)<br/><sup>By **[MeYoung](https://github.com/MeYoung)** on **2024-07-17**</sup> | 给与一个表结构,生成表的实体和 MyBatis 的 Mapper<br/>`sql` `sql` `mybatis` |
|
|
260
|
+
| [外贸高情商回复](https://chat-preview.lobehub.com/market?agent=reply-agent)<br/><sup>By **[zhushen12580](https://github.com/zhushen12580)** on **2024-07-13**</sup> | 我的目标是提供具有高情商的专业回复,帮助解决各种外贸相关的问题。<br/>`润色` `高情商` `回复` |
|
|
261
|
+
| [小黄鸭编程助手](https://chat-preview.lobehub.com/market?agent=rubber-duck-programming)<br/><sup>By **[JiyuShao](https://github.com/JiyuShao)** on **2024-07-10**</sup> | 小黄鸭编程助手<br/>`programming` |
|
|
262
|
+
| [B1 级德语会话伙伴](https://chat-preview.lobehub.com/market?agent=deutsche-b-1)<br/><sup>By **[tayhe](https://github.com/tayhe)** on **2024-07-08**</sup> | 为 B1 级学习者提供流利的德语会话伙伴<br/>`语言交流` `学习支持` `教育` `德语学习` |
|
|
263
|
+
|
|
264
|
+
> 📊 Total agents: [<kbd>**298**</kbd> ](https://github.com/lobehub/lobe-chat-agents)
|
|
265
265
|
|
|
266
266
|
<!-- AGENT LIST -->
|
|
267
267
|
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@lobehub/chat",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.5.1",
|
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"framework",
|
|
@@ -267,6 +267,7 @@
|
|
|
267
267
|
"vitest": "~1.2.2",
|
|
268
268
|
"vitest-canvas-mock": "^0.3.3"
|
|
269
269
|
},
|
|
270
|
+
"packageManager": "pnpm@9.5.0",
|
|
270
271
|
"publishConfig": {
|
|
271
272
|
"access": "public",
|
|
272
273
|
"registry": "https://registry.npmjs.org"
|
|
@@ -4,7 +4,7 @@ import Link from 'next/link';
|
|
|
4
4
|
import { memo } from 'react';
|
|
5
5
|
import { useTranslation } from 'react-i18next';
|
|
6
6
|
|
|
7
|
-
import {
|
|
7
|
+
import { DOCUMENTS_REFER_URL, GITHUB } from '@/const/url';
|
|
8
8
|
|
|
9
9
|
const BottomActions = memo(() => {
|
|
10
10
|
const { t } = useTranslation('common');
|
|
@@ -14,7 +14,7 @@ const BottomActions = memo(() => {
|
|
|
14
14
|
<Link aria-label={'GitHub'} href={GITHUB} target={'_blank'}>
|
|
15
15
|
<ActionIcon icon={Github} placement={'right'} title={'GitHub'} />
|
|
16
16
|
</Link>
|
|
17
|
-
<Link aria-label={t('document')} href={
|
|
17
|
+
<Link aria-label={t('document')} href={DOCUMENTS_REFER_URL} target={'_blank'}>
|
|
18
18
|
<ActionIcon icon={Book} placement={'right'} title={t('document')} />
|
|
19
19
|
</Link>
|
|
20
20
|
</>
|
|
@@ -6,6 +6,8 @@ import Link from 'next/link';
|
|
|
6
6
|
import { memo } from 'react';
|
|
7
7
|
import { Flexbox, FlexboxProps } from 'react-layout-kit';
|
|
8
8
|
|
|
9
|
+
import { UTM_SOURCE } from '@/const/url';
|
|
10
|
+
|
|
9
11
|
const useStyles = createStyles(({ token, css }) => ({
|
|
10
12
|
logoLink: css`
|
|
11
13
|
height: 20px;
|
|
@@ -29,7 +31,11 @@ const BrandWatermark = memo<Omit<FlexboxProps, 'children'>>(({ style, ...rest })
|
|
|
29
31
|
{...rest}
|
|
30
32
|
>
|
|
31
33
|
<span>Powered by</span>
|
|
32
|
-
<Link
|
|
34
|
+
<Link
|
|
35
|
+
className={styles.logoLink}
|
|
36
|
+
href={`https://lobehub.com?utm_source=${UTM_SOURCE}&utm_content=brand_watermark`}
|
|
37
|
+
target={'_blank'}
|
|
38
|
+
>
|
|
33
39
|
<LobeHub size={20} type={'text'} />
|
|
34
40
|
</Link>
|
|
35
41
|
</Flexbox>
|
|
@@ -7,6 +7,7 @@ const Qwen: ModelProviderCard = {
|
|
|
7
7
|
description: '通义千问超大规模语言模型,支持中文、英文等不同语言输入',
|
|
8
8
|
displayName: 'Qwen Turbo',
|
|
9
9
|
enabled: true,
|
|
10
|
+
functionCall: true,
|
|
10
11
|
id: 'qwen-turbo',
|
|
11
12
|
tokens: 8000,
|
|
12
13
|
},
|
|
@@ -14,6 +15,7 @@ const Qwen: ModelProviderCard = {
|
|
|
14
15
|
description: '通义千问超大规模语言模型增强版,支持中文、英文等不同语言输入',
|
|
15
16
|
displayName: 'Qwen Plus',
|
|
16
17
|
enabled: true,
|
|
18
|
+
functionCall: true,
|
|
17
19
|
id: 'qwen-plus',
|
|
18
20
|
tokens: 32_000,
|
|
19
21
|
},
|
|
@@ -22,6 +24,7 @@ const Qwen: ModelProviderCard = {
|
|
|
22
24
|
'通义千问千亿级别超大规模语言模型,支持中文、英文等不同语言输入,当前通义千问2.5产品版本背后的API模型',
|
|
23
25
|
displayName: 'Qwen Max',
|
|
24
26
|
enabled: true,
|
|
27
|
+
functionCall: true,
|
|
25
28
|
id: 'qwen-max',
|
|
26
29
|
tokens: 8000,
|
|
27
30
|
},
|
|
@@ -29,6 +32,7 @@ const Qwen: ModelProviderCard = {
|
|
|
29
32
|
description:
|
|
30
33
|
'通义千问千亿级别超大规模语言模型,支持中文、英文等不同语言输入,扩展了上下文窗口',
|
|
31
34
|
displayName: 'Qwen Max LongContext',
|
|
35
|
+
functionCall: true,
|
|
32
36
|
id: 'qwen-max-longcontext',
|
|
33
37
|
tokens: 30_000,
|
|
34
38
|
},
|
|
@@ -50,6 +54,24 @@ const Qwen: ModelProviderCard = {
|
|
|
50
54
|
id: 'qwen2-72b-instruct',
|
|
51
55
|
tokens: 131_072,
|
|
52
56
|
},
|
|
57
|
+
{
|
|
58
|
+
description:
|
|
59
|
+
'通义千问大规模视觉语言模型增强版。大幅提升细节识别能力和文字识别能力,支持超百万像素分辨率和任意长宽比规格的图像。',
|
|
60
|
+
displayName: 'Qwen VL Plus',
|
|
61
|
+
enabled: true,
|
|
62
|
+
id: 'qwen-vl-plus',
|
|
63
|
+
tokens: 6144,
|
|
64
|
+
vision: true,
|
|
65
|
+
},
|
|
66
|
+
{
|
|
67
|
+
description:
|
|
68
|
+
'通义千问超大规模视觉语言模型。相比增强版,再次提升视觉推理能力和指令遵循能力,提供更高的视觉感知和认知水平。',
|
|
69
|
+
displayName: 'Qwen VL Max',
|
|
70
|
+
enabled: true,
|
|
71
|
+
id: 'qwen-vl-max',
|
|
72
|
+
tokens: 6144,
|
|
73
|
+
vision: true,
|
|
74
|
+
},
|
|
53
75
|
],
|
|
54
76
|
checkModel: 'qwen-turbo',
|
|
55
77
|
disableBrowserRequest: true,
|
package/src/const/url.ts
CHANGED
|
@@ -6,6 +6,8 @@ import { withBasePath } from '@/utils/basePath';
|
|
|
6
6
|
import pkg from '../../package.json';
|
|
7
7
|
import { INBOX_SESSION_ID } from './session';
|
|
8
8
|
|
|
9
|
+
export const UTM_SOURCE = 'chat_preview';
|
|
10
|
+
|
|
9
11
|
export const OFFICIAL_URL = 'https://lobechat.com/';
|
|
10
12
|
export const OFFICIAL_PREVIEW_URL = 'https://chat-preview.lobehub.com/';
|
|
11
13
|
export const OFFICIAL_SITE = 'https://lobehub.com/';
|
|
@@ -24,6 +26,9 @@ export const USAGE_DOCUMENTS = urlJoin(DOCUMENTS, '/usage');
|
|
|
24
26
|
export const SELF_HOSTING_DOCUMENTS = urlJoin(DOCUMENTS, '/self-hosting');
|
|
25
27
|
export const WEBRTC_SYNC_DOCUMENTS = urlJoin(SELF_HOSTING_DOCUMENTS, '/advanced/webrtc');
|
|
26
28
|
|
|
29
|
+
// use this for the link
|
|
30
|
+
export const DOCUMENTS_REFER_URL = `${DOCUMENTS}?utm_source=${UTM_SOURCE}`;
|
|
31
|
+
|
|
27
32
|
export const WIKI = urlJoin(GITHUB, 'wiki');
|
|
28
33
|
export const WIKI_PLUGIN_GUIDE = urlJoin(USAGE_DOCUMENTS, '/plugins/development');
|
|
29
34
|
export const MANUAL_UPGRADE_URL = urlJoin(SELF_HOSTING_DOCUMENTS, '/advanced/upstream-sync');
|
|
@@ -11,7 +11,7 @@ import Marquee from 'react-fast-marquee';
|
|
|
11
11
|
import { useTranslation } from 'react-i18next';
|
|
12
12
|
import { Center, Flexbox } from 'react-layout-kit';
|
|
13
13
|
|
|
14
|
-
import { OFFICIAL_URL } from '@/const/url';
|
|
14
|
+
import { OFFICIAL_URL, UTM_SOURCE } from '@/const/url';
|
|
15
15
|
import { isOnServerSide } from '@/utils/env';
|
|
16
16
|
|
|
17
17
|
export const BANNER_HEIGHT = 40;
|
|
@@ -60,7 +60,7 @@ const CloudBanner = memo<{ mobile?: boolean }>(({ mobile }) => {
|
|
|
60
60
|
<b>{t('alert.cloud.title', { name: 'LobeChat Cloud' })}:</b>
|
|
61
61
|
<span>
|
|
62
62
|
{t(mobile ? 'alert.cloud.descOnMobile' : 'alert.cloud.desc', {
|
|
63
|
-
credit: new Intl.NumberFormat('en-US').format(
|
|
63
|
+
credit: new Intl.NumberFormat('en-US').format(450_000),
|
|
64
64
|
name: 'LobeChat Cloud',
|
|
65
65
|
})}
|
|
66
66
|
</span>
|
|
@@ -78,7 +78,7 @@ const CloudBanner = memo<{ mobile?: boolean }>(({ mobile }) => {
|
|
|
78
78
|
<div className={styles.background} />
|
|
79
79
|
<Center className={styles.wrapper} gap={16} horizontal width={'100%'}>
|
|
80
80
|
{isTruncated ? <Marquee pauseOnHover>{content}</Marquee> : content}
|
|
81
|
-
<Link href={OFFICIAL_URL} target={'_blank'}>
|
|
81
|
+
<Link href={`${OFFICIAL_URL}?utm_source=${UTM_SOURCE}&utm_medium=banner`} target={'_blank'}>
|
|
82
82
|
<Button size={'small'} type="primary">
|
|
83
83
|
{t('alert.cloud.action')} <Icon icon={ArrowRightIcon} />
|
|
84
84
|
</Button>
|
|
@@ -24,10 +24,11 @@ import urlJoin from 'url-join';
|
|
|
24
24
|
import type { MenuProps } from '@/components/Menu';
|
|
25
25
|
import {
|
|
26
26
|
DISCORD,
|
|
27
|
-
|
|
27
|
+
DOCUMENTS_REFER_URL,
|
|
28
28
|
EMAIL_SUPPORT,
|
|
29
29
|
GITHUB_ISSUES,
|
|
30
30
|
OFFICIAL_URL,
|
|
31
|
+
UTM_SOURCE,
|
|
31
32
|
mailTo,
|
|
32
33
|
} from '@/const/url';
|
|
33
34
|
import { isServerMode } from '@/const/version';
|
|
@@ -177,7 +178,7 @@ export const useMenu = () => {
|
|
|
177
178
|
icon: <Icon icon={Cloudy} />,
|
|
178
179
|
key: 'cloud',
|
|
179
180
|
label: (
|
|
180
|
-
<Link href={OFFICIAL_URL} target={'_blank'}>
|
|
181
|
+
<Link href={`${OFFICIAL_URL}?utm_source=${UTM_SOURCE}`} target={'_blank'}>
|
|
181
182
|
{t('userPanel.cloud', { name: 'LobeChat Cloud' })}
|
|
182
183
|
</Link>
|
|
183
184
|
),
|
|
@@ -197,7 +198,7 @@ export const useMenu = () => {
|
|
|
197
198
|
icon: <Icon icon={Book} />,
|
|
198
199
|
key: 'docs',
|
|
199
200
|
label: (
|
|
200
|
-
<Link href={
|
|
201
|
+
<Link href={DOCUMENTS_REFER_URL} target={'_blank'}>
|
|
201
202
|
{t('userPanel.docs')}
|
|
202
203
|
</Link>
|
|
203
204
|
),
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
import OpenAI from 'openai';
|
|
3
3
|
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
|
4
4
|
|
|
5
|
+
import Qwen from '@/config/modelProviders/qwen';
|
|
5
6
|
import { LobeOpenAICompatibleRuntime } from '@/libs/agent-runtime';
|
|
6
7
|
import { ModelProvider } from '@/libs/agent-runtime';
|
|
7
8
|
import { AgentRuntimeErrorType } from '@/libs/agent-runtime';
|
|
@@ -17,7 +18,7 @@ const invalidErrorType = AgentRuntimeErrorType.InvalidProviderAPIKey;
|
|
|
17
18
|
// Mock the console.error to avoid polluting test output
|
|
18
19
|
vi.spyOn(console, 'error').mockImplementation(() => {});
|
|
19
20
|
|
|
20
|
-
let instance:
|
|
21
|
+
let instance: LobeQwenAI;
|
|
21
22
|
|
|
22
23
|
beforeEach(() => {
|
|
23
24
|
instance = new LobeQwenAI({ apiKey: 'test' });
|
|
@@ -41,7 +42,116 @@ describe('LobeQwenAI', () => {
|
|
|
41
42
|
});
|
|
42
43
|
});
|
|
43
44
|
|
|
45
|
+
describe('models', () => {
|
|
46
|
+
it('should correctly list available models', async () => {
|
|
47
|
+
const instance = new LobeQwenAI({ apiKey: 'test_api_key' });
|
|
48
|
+
vi.spyOn(instance, 'models').mockResolvedValue(Qwen.chatModels);
|
|
49
|
+
|
|
50
|
+
const models = await instance.models();
|
|
51
|
+
expect(models).toEqual(Qwen.chatModels);
|
|
52
|
+
});
|
|
53
|
+
});
|
|
54
|
+
|
|
44
55
|
describe('chat', () => {
|
|
56
|
+
describe('Params', () => {
|
|
57
|
+
it('should call llms with proper options', async () => {
|
|
58
|
+
const mockStream = new ReadableStream();
|
|
59
|
+
const mockResponse = Promise.resolve(mockStream);
|
|
60
|
+
|
|
61
|
+
(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
|
|
62
|
+
|
|
63
|
+
const result = await instance.chat({
|
|
64
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
65
|
+
model: 'qwen-turbo',
|
|
66
|
+
temperature: 0.6,
|
|
67
|
+
top_p: 0.7,
|
|
68
|
+
});
|
|
69
|
+
|
|
70
|
+
// Assert
|
|
71
|
+
expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
|
|
72
|
+
{
|
|
73
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
74
|
+
model: 'qwen-turbo',
|
|
75
|
+
temperature: 0.6,
|
|
76
|
+
stream: true,
|
|
77
|
+
top_p: 0.7,
|
|
78
|
+
result_format: 'message',
|
|
79
|
+
},
|
|
80
|
+
{ headers: { Accept: '*/*' } },
|
|
81
|
+
);
|
|
82
|
+
expect(result).toBeInstanceOf(Response);
|
|
83
|
+
});
|
|
84
|
+
|
|
85
|
+
it('should call vlms with proper options', async () => {
|
|
86
|
+
const mockStream = new ReadableStream();
|
|
87
|
+
const mockResponse = Promise.resolve(mockStream);
|
|
88
|
+
|
|
89
|
+
(instance['client'].chat.completions.create as Mock).mockResolvedValue(mockResponse);
|
|
90
|
+
|
|
91
|
+
const result = await instance.chat({
|
|
92
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
93
|
+
model: 'qwen-vl-plus',
|
|
94
|
+
temperature: 0.6,
|
|
95
|
+
top_p: 0.7,
|
|
96
|
+
});
|
|
97
|
+
|
|
98
|
+
// Assert
|
|
99
|
+
expect(instance['client'].chat.completions.create).toHaveBeenCalledWith(
|
|
100
|
+
{
|
|
101
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
102
|
+
model: 'qwen-vl-plus',
|
|
103
|
+
stream: true,
|
|
104
|
+
},
|
|
105
|
+
{ headers: { Accept: '*/*' } },
|
|
106
|
+
);
|
|
107
|
+
expect(result).toBeInstanceOf(Response);
|
|
108
|
+
});
|
|
109
|
+
|
|
110
|
+
it('should transform non-streaming response to stream correctly', async () => {
|
|
111
|
+
const mockResponse: OpenAI.ChatCompletion = {
|
|
112
|
+
id: 'chatcmpl-fc539f49-51a8-94be-8061',
|
|
113
|
+
object: 'chat.completion',
|
|
114
|
+
created: 1719901794,
|
|
115
|
+
model: 'qwen-turbo',
|
|
116
|
+
choices: [
|
|
117
|
+
{
|
|
118
|
+
index: 0,
|
|
119
|
+
message: { role: 'assistant', content: 'Hello' },
|
|
120
|
+
finish_reason: 'stop',
|
|
121
|
+
logprobs: null,
|
|
122
|
+
},
|
|
123
|
+
],
|
|
124
|
+
};
|
|
125
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
|
126
|
+
mockResponse as any,
|
|
127
|
+
);
|
|
128
|
+
|
|
129
|
+
const result = await instance.chat({
|
|
130
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
131
|
+
model: 'qwen-turbo',
|
|
132
|
+
temperature: 0.6,
|
|
133
|
+
stream: false,
|
|
134
|
+
});
|
|
135
|
+
|
|
136
|
+
const decoder = new TextDecoder();
|
|
137
|
+
|
|
138
|
+
const reader = result.body!.getReader();
|
|
139
|
+
expect(decoder.decode((await reader.read()).value)).toContain(
|
|
140
|
+
'id: chatcmpl-fc539f49-51a8-94be-8061\n',
|
|
141
|
+
);
|
|
142
|
+
expect(decoder.decode((await reader.read()).value)).toContain('event: text\n');
|
|
143
|
+
expect(decoder.decode((await reader.read()).value)).toContain('data: "Hello"\n\n');
|
|
144
|
+
|
|
145
|
+
expect(decoder.decode((await reader.read()).value)).toContain(
|
|
146
|
+
'id: chatcmpl-fc539f49-51a8-94be-8061\n',
|
|
147
|
+
);
|
|
148
|
+
expect(decoder.decode((await reader.read()).value)).toContain('event: stop\n');
|
|
149
|
+
expect(decoder.decode((await reader.read()).value)).toContain('');
|
|
150
|
+
|
|
151
|
+
expect((await reader.read()).done).toBe(true);
|
|
152
|
+
});
|
|
153
|
+
});
|
|
154
|
+
|
|
45
155
|
describe('Error', () => {
|
|
46
156
|
it('should return QwenBizError with an openai error response when OpenAI.APIError is thrown', async () => {
|
|
47
157
|
// Arrange
|
|
@@ -129,8 +239,7 @@ describe('LobeQwenAI', () => {
|
|
|
129
239
|
|
|
130
240
|
instance = new LobeQwenAI({
|
|
131
241
|
apiKey: 'test',
|
|
132
|
-
|
|
133
|
-
baseURL: 'https://api.abc.com/v1',
|
|
242
|
+
baseURL: defaultBaseURL,
|
|
134
243
|
});
|
|
135
244
|
|
|
136
245
|
vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
|
|
@@ -144,7 +253,8 @@ describe('LobeQwenAI', () => {
|
|
|
144
253
|
});
|
|
145
254
|
} catch (e) {
|
|
146
255
|
expect(e).toEqual({
|
|
147
|
-
endpoint
|
|
256
|
+
/* Desensitizing is unnecessary for a public-accessible gateway endpoint. */
|
|
257
|
+
endpoint: defaultBaseURL,
|
|
148
258
|
error: {
|
|
149
259
|
cause: { message: 'api is undefined' },
|
|
150
260
|
stack: 'abc',
|
|
@@ -1,28 +1,128 @@
|
|
|
1
|
-
import
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
import
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
1
|
+
import { omit } from 'lodash-es';
|
|
2
|
+
import OpenAI, { ClientOptions } from 'openai';
|
|
3
|
+
|
|
4
|
+
import Qwen from '@/config/modelProviders/qwen';
|
|
5
|
+
|
|
6
|
+
import { LobeOpenAICompatibleRuntime, LobeRuntimeAI } from '../BaseAI';
|
|
7
|
+
import { AgentRuntimeErrorType } from '../error';
|
|
8
|
+
import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
|
|
9
|
+
import { AgentRuntimeError } from '../utils/createError';
|
|
10
|
+
import { debugStream } from '../utils/debugStream';
|
|
11
|
+
import { handleOpenAIError } from '../utils/handleOpenAIError';
|
|
12
|
+
import { transformResponseToStream } from '../utils/openaiCompatibleFactory';
|
|
13
|
+
import { StreamingResponse } from '../utils/response';
|
|
14
|
+
import { QwenAIStream } from '../utils/streams';
|
|
15
|
+
|
|
16
|
+
const DEFAULT_BASE_URL = 'https://dashscope.aliyuncs.com/compatible-mode/v1';
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Use DashScope OpenAI compatible mode for now.
|
|
20
|
+
* DashScope OpenAI [compatible mode](https://help.aliyun.com/zh/dashscope/developer-reference/tongyi-qianwen-vl-plus-api) currently supports base64 image input for vision models e.g. qwen-vl-plus.
|
|
21
|
+
* You can use images input either:
|
|
22
|
+
* 1. Use qwen-vl-* out of box with base64 image_url input;
|
|
23
|
+
* or
|
|
24
|
+
* 2. Set S3-* enviroment variables properly to store all uploaded files.
|
|
25
|
+
*/
|
|
26
|
+
export class LobeQwenAI extends LobeOpenAICompatibleRuntime implements LobeRuntimeAI {
|
|
27
|
+
client: OpenAI;
|
|
28
|
+
baseURL: string;
|
|
29
|
+
|
|
30
|
+
constructor({
|
|
31
|
+
apiKey,
|
|
32
|
+
baseURL = DEFAULT_BASE_URL,
|
|
33
|
+
...res
|
|
34
|
+
}: ClientOptions & Record<string, any> = {}) {
|
|
35
|
+
super();
|
|
36
|
+
if (!apiKey) throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidProviderAPIKey);
|
|
37
|
+
this.client = new OpenAI({ apiKey, baseURL, ...res });
|
|
38
|
+
this.baseURL = this.client.baseURL;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
async models() {
|
|
42
|
+
return Qwen.chatModels;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
|
|
46
|
+
try {
|
|
47
|
+
const params = this.buildCompletionParamsByModel(payload);
|
|
48
|
+
|
|
49
|
+
const response = await this.client.chat.completions.create(
|
|
50
|
+
params as OpenAI.ChatCompletionCreateParamsStreaming & { result_format: string },
|
|
51
|
+
{
|
|
52
|
+
headers: { Accept: '*/*' },
|
|
53
|
+
signal: options?.signal,
|
|
54
|
+
},
|
|
55
|
+
);
|
|
56
|
+
|
|
57
|
+
if (params.stream) {
|
|
58
|
+
const [prod, debug] = response.tee();
|
|
59
|
+
|
|
60
|
+
if (process.env.DEBUG_QWEN_CHAT_COMPLETION === '1') {
|
|
61
|
+
debugStream(debug.toReadableStream()).catch(console.error);
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
return StreamingResponse(QwenAIStream(prod, options?.callback), {
|
|
65
|
+
headers: options?.headers,
|
|
66
|
+
});
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
const stream = transformResponseToStream(response as unknown as OpenAI.ChatCompletion);
|
|
70
|
+
|
|
71
|
+
return StreamingResponse(QwenAIStream(stream, options?.callback), {
|
|
72
|
+
headers: options?.headers,
|
|
73
|
+
});
|
|
74
|
+
} catch (error) {
|
|
75
|
+
if ('status' in (error as any)) {
|
|
76
|
+
switch ((error as Response).status) {
|
|
77
|
+
case 401: {
|
|
78
|
+
throw AgentRuntimeError.chat({
|
|
79
|
+
endpoint: this.baseURL,
|
|
80
|
+
error: error as any,
|
|
81
|
+
errorType: AgentRuntimeErrorType.InvalidProviderAPIKey,
|
|
82
|
+
provider: ModelProvider.Qwen,
|
|
83
|
+
});
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
default: {
|
|
87
|
+
break;
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
const { errorResult, RuntimeError } = handleOpenAIError(error);
|
|
92
|
+
const errorType = RuntimeError || AgentRuntimeErrorType.ProviderBizError;
|
|
93
|
+
|
|
94
|
+
throw AgentRuntimeError.chat({
|
|
95
|
+
endpoint: this.baseURL,
|
|
96
|
+
error: errorResult,
|
|
97
|
+
errorType,
|
|
98
|
+
provider: ModelProvider.Qwen,
|
|
99
|
+
});
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
private buildCompletionParamsByModel(payload: ChatStreamPayload) {
|
|
104
|
+
const { model, top_p, stream, messages, tools } = payload;
|
|
105
|
+
const isVisionModel = model.startsWith('qwen-vl');
|
|
106
|
+
|
|
107
|
+
const params = {
|
|
108
|
+
...payload,
|
|
109
|
+
messages,
|
|
110
|
+
result_format: 'message',
|
|
111
|
+
stream: !!tools?.length ? false : stream ?? true,
|
|
112
|
+
top_p: top_p && top_p >= 1 ? 0.999 : top_p,
|
|
113
|
+
};
|
|
114
|
+
|
|
115
|
+
/* Qwen-vl models temporarily do not support parameters below. */
|
|
116
|
+
/* Notice: `top_p` imposes significant impact on the result,the default 1 or 0.999 is not a proper choice. */
|
|
117
|
+
return isVisionModel
|
|
118
|
+
? omit(
|
|
119
|
+
params,
|
|
120
|
+
'presence_penalty',
|
|
121
|
+
'frequency_penalty',
|
|
122
|
+
'temperature',
|
|
123
|
+
'result_format',
|
|
124
|
+
'top_p',
|
|
125
|
+
)
|
|
126
|
+
: params;
|
|
127
|
+
}
|
|
128
|
+
}
|
|
@@ -54,6 +54,59 @@ interface OpenAICompatibleFactoryOptions<T extends Record<string, any> = any> {
|
|
|
54
54
|
provider: string;
|
|
55
55
|
}
|
|
56
56
|
|
|
57
|
+
/**
|
|
58
|
+
* make the OpenAI response data as a stream
|
|
59
|
+
*/
|
|
60
|
+
export function transformResponseToStream(data: OpenAI.ChatCompletion) {
|
|
61
|
+
return new ReadableStream({
|
|
62
|
+
start(controller) {
|
|
63
|
+
const chunk: OpenAI.ChatCompletionChunk = {
|
|
64
|
+
choices: data.choices.map((choice: OpenAI.ChatCompletion.Choice) => ({
|
|
65
|
+
delta: {
|
|
66
|
+
content: choice.message.content,
|
|
67
|
+
role: choice.message.role,
|
|
68
|
+
tool_calls: choice.message.tool_calls?.map(
|
|
69
|
+
(tool, index): OpenAI.ChatCompletionChunk.Choice.Delta.ToolCall => ({
|
|
70
|
+
function: tool.function,
|
|
71
|
+
id: tool.id,
|
|
72
|
+
index,
|
|
73
|
+
type: tool.type,
|
|
74
|
+
}),
|
|
75
|
+
),
|
|
76
|
+
},
|
|
77
|
+
finish_reason: null,
|
|
78
|
+
index: choice.index,
|
|
79
|
+
logprobs: choice.logprobs,
|
|
80
|
+
})),
|
|
81
|
+
created: data.created,
|
|
82
|
+
id: data.id,
|
|
83
|
+
model: data.model,
|
|
84
|
+
object: 'chat.completion.chunk',
|
|
85
|
+
};
|
|
86
|
+
|
|
87
|
+
controller.enqueue(chunk);
|
|
88
|
+
|
|
89
|
+
controller.enqueue({
|
|
90
|
+
choices: data.choices.map((choice: OpenAI.ChatCompletion.Choice) => ({
|
|
91
|
+
delta: {
|
|
92
|
+
content: choice.message.content,
|
|
93
|
+
role: choice.message.role,
|
|
94
|
+
},
|
|
95
|
+
finish_reason: choice.finish_reason,
|
|
96
|
+
index: choice.index,
|
|
97
|
+
logprobs: choice.logprobs,
|
|
98
|
+
})),
|
|
99
|
+
created: data.created,
|
|
100
|
+
id: data.id,
|
|
101
|
+
model: data.model,
|
|
102
|
+
object: 'chat.completion.chunk',
|
|
103
|
+
system_fingerprint: data.system_fingerprint,
|
|
104
|
+
} as OpenAI.ChatCompletionChunk);
|
|
105
|
+
controller.close();
|
|
106
|
+
},
|
|
107
|
+
});
|
|
108
|
+
}
|
|
109
|
+
|
|
57
110
|
export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>({
|
|
58
111
|
provider,
|
|
59
112
|
baseURL: DEFAULT_BASE_URL,
|
|
@@ -117,7 +170,7 @@ export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>
|
|
|
117
170
|
debugResponse(response);
|
|
118
171
|
}
|
|
119
172
|
|
|
120
|
-
const stream =
|
|
173
|
+
const stream = transformResponseToStream(response as unknown as OpenAI.ChatCompletion);
|
|
121
174
|
|
|
122
175
|
return StreamingResponse(OpenAIStream(stream, options?.callback), {
|
|
123
176
|
headers: options?.headers,
|
|
@@ -162,60 +215,6 @@ export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>
|
|
|
162
215
|
}
|
|
163
216
|
}
|
|
164
217
|
|
|
165
|
-
/**
|
|
166
|
-
* make the OpenAI response data as a stream
|
|
167
|
-
* @private
|
|
168
|
-
*/
|
|
169
|
-
private transformResponseToStream(data: OpenAI.ChatCompletion) {
|
|
170
|
-
return new ReadableStream({
|
|
171
|
-
start(controller) {
|
|
172
|
-
const chunk: OpenAI.ChatCompletionChunk = {
|
|
173
|
-
choices: data.choices.map((choice: OpenAI.ChatCompletion.Choice) => ({
|
|
174
|
-
delta: {
|
|
175
|
-
content: choice.message.content,
|
|
176
|
-
role: choice.message.role,
|
|
177
|
-
tool_calls: choice.message.tool_calls?.map(
|
|
178
|
-
(tool, index): OpenAI.ChatCompletionChunk.Choice.Delta.ToolCall => ({
|
|
179
|
-
function: tool.function,
|
|
180
|
-
id: tool.id,
|
|
181
|
-
index,
|
|
182
|
-
type: tool.type,
|
|
183
|
-
}),
|
|
184
|
-
),
|
|
185
|
-
},
|
|
186
|
-
finish_reason: null,
|
|
187
|
-
index: choice.index,
|
|
188
|
-
logprobs: choice.logprobs,
|
|
189
|
-
})),
|
|
190
|
-
created: data.created,
|
|
191
|
-
id: data.id,
|
|
192
|
-
model: data.model,
|
|
193
|
-
object: 'chat.completion.chunk',
|
|
194
|
-
};
|
|
195
|
-
|
|
196
|
-
controller.enqueue(chunk);
|
|
197
|
-
|
|
198
|
-
controller.enqueue({
|
|
199
|
-
choices: data.choices.map((choice: OpenAI.ChatCompletion.Choice) => ({
|
|
200
|
-
delta: {
|
|
201
|
-
content: choice.message.content,
|
|
202
|
-
role: choice.message.role,
|
|
203
|
-
},
|
|
204
|
-
finish_reason: choice.finish_reason,
|
|
205
|
-
index: choice.index,
|
|
206
|
-
logprobs: choice.logprobs,
|
|
207
|
-
})),
|
|
208
|
-
created: data.created,
|
|
209
|
-
id: data.id,
|
|
210
|
-
model: data.model,
|
|
211
|
-
object: 'chat.completion.chunk',
|
|
212
|
-
system_fingerprint: data.system_fingerprint,
|
|
213
|
-
} as OpenAI.ChatCompletionChunk);
|
|
214
|
-
controller.close();
|
|
215
|
-
},
|
|
216
|
-
});
|
|
217
|
-
}
|
|
218
|
-
|
|
219
218
|
private handleError(error: any): ChatCompletionErrorPayload {
|
|
220
219
|
let desensitizedEndpoint = this.baseURL;
|
|
221
220
|
|
|
@@ -0,0 +1,350 @@
|
|
|
1
|
+
import { beforeAll, describe, expect, it, vi } from 'vitest';
|
|
2
|
+
|
|
3
|
+
import { QwenAIStream } from './qwen';
|
|
4
|
+
|
|
5
|
+
describe('QwenAIStream', () => {
|
|
6
|
+
beforeAll(() => {});
|
|
7
|
+
|
|
8
|
+
it('should transform OpenAI stream to protocol stream', async () => {
|
|
9
|
+
const mockOpenAIStream = new ReadableStream({
|
|
10
|
+
start(controller) {
|
|
11
|
+
controller.enqueue({
|
|
12
|
+
choices: [
|
|
13
|
+
{
|
|
14
|
+
delta: { content: 'Hello' },
|
|
15
|
+
index: 0,
|
|
16
|
+
},
|
|
17
|
+
],
|
|
18
|
+
id: '1',
|
|
19
|
+
});
|
|
20
|
+
controller.enqueue({
|
|
21
|
+
choices: [
|
|
22
|
+
{
|
|
23
|
+
delta: { content: ' world!' },
|
|
24
|
+
index: 1,
|
|
25
|
+
},
|
|
26
|
+
],
|
|
27
|
+
id: '1',
|
|
28
|
+
});
|
|
29
|
+
controller.enqueue({
|
|
30
|
+
choices: [
|
|
31
|
+
{
|
|
32
|
+
delta: null,
|
|
33
|
+
finish_reason: 'stop',
|
|
34
|
+
index: 2,
|
|
35
|
+
},
|
|
36
|
+
],
|
|
37
|
+
id: '1',
|
|
38
|
+
});
|
|
39
|
+
|
|
40
|
+
controller.close();
|
|
41
|
+
},
|
|
42
|
+
});
|
|
43
|
+
|
|
44
|
+
const onStartMock = vi.fn();
|
|
45
|
+
const onTextMock = vi.fn();
|
|
46
|
+
const onTokenMock = vi.fn();
|
|
47
|
+
const onCompletionMock = vi.fn();
|
|
48
|
+
|
|
49
|
+
const protocolStream = QwenAIStream(mockOpenAIStream, {
|
|
50
|
+
onStart: onStartMock,
|
|
51
|
+
onText: onTextMock,
|
|
52
|
+
onToken: onTokenMock,
|
|
53
|
+
onCompletion: onCompletionMock,
|
|
54
|
+
});
|
|
55
|
+
|
|
56
|
+
const decoder = new TextDecoder();
|
|
57
|
+
const chunks = [];
|
|
58
|
+
|
|
59
|
+
// @ts-ignore
|
|
60
|
+
for await (const chunk of protocolStream) {
|
|
61
|
+
chunks.push(decoder.decode(chunk, { stream: true }));
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
expect(chunks).toEqual([
|
|
65
|
+
'id: 1\n',
|
|
66
|
+
'event: text\n',
|
|
67
|
+
`data: "Hello"\n\n`,
|
|
68
|
+
'id: 1\n',
|
|
69
|
+
'event: text\n',
|
|
70
|
+
`data: " world!"\n\n`,
|
|
71
|
+
'id: 1\n',
|
|
72
|
+
'event: stop\n',
|
|
73
|
+
`data: "stop"\n\n`,
|
|
74
|
+
]);
|
|
75
|
+
|
|
76
|
+
expect(onStartMock).toHaveBeenCalledTimes(1);
|
|
77
|
+
expect(onTextMock).toHaveBeenNthCalledWith(1, '"Hello"');
|
|
78
|
+
expect(onTextMock).toHaveBeenNthCalledWith(2, '" world!"');
|
|
79
|
+
expect(onTokenMock).toHaveBeenCalledTimes(2);
|
|
80
|
+
expect(onCompletionMock).toHaveBeenCalledTimes(1);
|
|
81
|
+
});
|
|
82
|
+
|
|
83
|
+
it('should handle tool calls', async () => {
|
|
84
|
+
const mockOpenAIStream = new ReadableStream({
|
|
85
|
+
start(controller) {
|
|
86
|
+
controller.enqueue({
|
|
87
|
+
choices: [
|
|
88
|
+
{
|
|
89
|
+
delta: {
|
|
90
|
+
tool_calls: [
|
|
91
|
+
{
|
|
92
|
+
function: { name: 'tool1', arguments: '{}' },
|
|
93
|
+
id: 'call_1',
|
|
94
|
+
index: 0,
|
|
95
|
+
type: 'function',
|
|
96
|
+
},
|
|
97
|
+
{
|
|
98
|
+
function: { name: 'tool2', arguments: '{}' },
|
|
99
|
+
id: 'call_2',
|
|
100
|
+
index: 1,
|
|
101
|
+
},
|
|
102
|
+
],
|
|
103
|
+
},
|
|
104
|
+
index: 0,
|
|
105
|
+
},
|
|
106
|
+
],
|
|
107
|
+
id: '2',
|
|
108
|
+
});
|
|
109
|
+
|
|
110
|
+
controller.close();
|
|
111
|
+
},
|
|
112
|
+
});
|
|
113
|
+
|
|
114
|
+
const onToolCallMock = vi.fn();
|
|
115
|
+
|
|
116
|
+
const protocolStream = QwenAIStream(mockOpenAIStream, {
|
|
117
|
+
onToolCall: onToolCallMock,
|
|
118
|
+
});
|
|
119
|
+
|
|
120
|
+
const decoder = new TextDecoder();
|
|
121
|
+
const chunks = [];
|
|
122
|
+
|
|
123
|
+
// @ts-ignore
|
|
124
|
+
for await (const chunk of protocolStream) {
|
|
125
|
+
chunks.push(decoder.decode(chunk, { stream: true }));
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
expect(chunks).toEqual([
|
|
129
|
+
'id: 2\n',
|
|
130
|
+
'event: tool_calls\n',
|
|
131
|
+
`data: [{"function":{"name":"tool1","arguments":"{}"},"id":"call_1","index":0,"type":"function"},{"function":{"name":"tool2","arguments":"{}"},"id":"call_2","index":1,"type":"function"}]\n\n`,
|
|
132
|
+
]);
|
|
133
|
+
|
|
134
|
+
expect(onToolCallMock).toHaveBeenCalledTimes(1);
|
|
135
|
+
});
|
|
136
|
+
|
|
137
|
+
it('should handle empty stream', async () => {
|
|
138
|
+
const mockStream = new ReadableStream({
|
|
139
|
+
start(controller) {
|
|
140
|
+
controller.close();
|
|
141
|
+
},
|
|
142
|
+
});
|
|
143
|
+
|
|
144
|
+
const protocolStream = QwenAIStream(mockStream);
|
|
145
|
+
|
|
146
|
+
const decoder = new TextDecoder();
|
|
147
|
+
const chunks = [];
|
|
148
|
+
|
|
149
|
+
// @ts-ignore
|
|
150
|
+
for await (const chunk of protocolStream) {
|
|
151
|
+
chunks.push(decoder.decode(chunk, { stream: true }));
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
expect(chunks).toEqual([]);
|
|
155
|
+
});
|
|
156
|
+
|
|
157
|
+
it('should handle chunk with no choices', async () => {
|
|
158
|
+
const mockStream = new ReadableStream({
|
|
159
|
+
start(controller) {
|
|
160
|
+
controller.enqueue({
|
|
161
|
+
choices: [],
|
|
162
|
+
id: '1',
|
|
163
|
+
});
|
|
164
|
+
|
|
165
|
+
controller.close();
|
|
166
|
+
},
|
|
167
|
+
});
|
|
168
|
+
|
|
169
|
+
const protocolStream = QwenAIStream(mockStream);
|
|
170
|
+
|
|
171
|
+
const decoder = new TextDecoder();
|
|
172
|
+
const chunks = [];
|
|
173
|
+
|
|
174
|
+
// @ts-ignore
|
|
175
|
+
for await (const chunk of protocolStream) {
|
|
176
|
+
chunks.push(decoder.decode(chunk, { stream: true }));
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
expect(chunks).toEqual(['id: 1\n', 'event: data\n', 'data: {"choices":[],"id":"1"}\n\n']);
|
|
180
|
+
});
|
|
181
|
+
|
|
182
|
+
it('should handle vision model stream', async () => {
|
|
183
|
+
const mockStream = new ReadableStream({
|
|
184
|
+
start(controller) {
|
|
185
|
+
controller.enqueue({
|
|
186
|
+
choices: [
|
|
187
|
+
{
|
|
188
|
+
delta: {
|
|
189
|
+
content: [
|
|
190
|
+
{
|
|
191
|
+
text: '图中是一只小狗',
|
|
192
|
+
},
|
|
193
|
+
],
|
|
194
|
+
},
|
|
195
|
+
},
|
|
196
|
+
],
|
|
197
|
+
id: '3',
|
|
198
|
+
});
|
|
199
|
+
|
|
200
|
+
/**
|
|
201
|
+
* Just for test against the description of 'output.choices[x].message.content' in [documents](https://help.aliyun.com/zh/dashscope/developer-reference/tongyi-qianwen-vl-plus-api)
|
|
202
|
+
* You're not likely to get image outputs from current versions of vl models.
|
|
203
|
+
*/
|
|
204
|
+
controller.enqueue({
|
|
205
|
+
choices: [
|
|
206
|
+
{
|
|
207
|
+
delta: {
|
|
208
|
+
content: [
|
|
209
|
+
{
|
|
210
|
+
image: 'https://hello.mock/test.png',
|
|
211
|
+
},
|
|
212
|
+
],
|
|
213
|
+
},
|
|
214
|
+
},
|
|
215
|
+
],
|
|
216
|
+
id: '3',
|
|
217
|
+
});
|
|
218
|
+
controller.close();
|
|
219
|
+
},
|
|
220
|
+
});
|
|
221
|
+
|
|
222
|
+
const protocolStream = QwenAIStream(mockStream);
|
|
223
|
+
|
|
224
|
+
const decoder = new TextDecoder();
|
|
225
|
+
const chunks = [];
|
|
226
|
+
|
|
227
|
+
// @ts-ignore
|
|
228
|
+
for await (const chunk of protocolStream) {
|
|
229
|
+
chunks.push(decoder.decode(chunk, { stream: true }));
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
expect(chunks).toEqual([
|
|
233
|
+
'id: 3\n',
|
|
234
|
+
'event: text\n',
|
|
235
|
+
'data: "图中是一只小狗"\n\n',
|
|
236
|
+
'id: 3\n',
|
|
237
|
+
'event: text\n',
|
|
238
|
+
'data: ""\n\n',
|
|
239
|
+
]);
|
|
240
|
+
});
|
|
241
|
+
|
|
242
|
+
it('should delta content null', async () => {
|
|
243
|
+
const mockOpenAIStream = new ReadableStream({
|
|
244
|
+
start(controller) {
|
|
245
|
+
controller.enqueue({
|
|
246
|
+
choices: [
|
|
247
|
+
{
|
|
248
|
+
delta: { content: null },
|
|
249
|
+
index: 0,
|
|
250
|
+
},
|
|
251
|
+
],
|
|
252
|
+
id: '3',
|
|
253
|
+
});
|
|
254
|
+
|
|
255
|
+
controller.close();
|
|
256
|
+
},
|
|
257
|
+
});
|
|
258
|
+
|
|
259
|
+
const protocolStream = QwenAIStream(mockOpenAIStream);
|
|
260
|
+
|
|
261
|
+
const decoder = new TextDecoder();
|
|
262
|
+
const chunks = [];
|
|
263
|
+
|
|
264
|
+
// @ts-ignore
|
|
265
|
+
for await (const chunk of protocolStream) {
|
|
266
|
+
chunks.push(decoder.decode(chunk, { stream: true }));
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
expect(chunks).toEqual(['id: 3\n', 'event: data\n', `data: {"content":null}\n\n`]);
|
|
270
|
+
});
|
|
271
|
+
|
|
272
|
+
it('should handle other delta data', async () => {
|
|
273
|
+
const mockOpenAIStream = new ReadableStream({
|
|
274
|
+
start(controller) {
|
|
275
|
+
controller.enqueue({
|
|
276
|
+
choices: [
|
|
277
|
+
{
|
|
278
|
+
delta: { custom_field: 'custom_value' },
|
|
279
|
+
index: 0,
|
|
280
|
+
},
|
|
281
|
+
],
|
|
282
|
+
id: '4',
|
|
283
|
+
});
|
|
284
|
+
|
|
285
|
+
controller.close();
|
|
286
|
+
},
|
|
287
|
+
});
|
|
288
|
+
|
|
289
|
+
const protocolStream = QwenAIStream(mockOpenAIStream);
|
|
290
|
+
|
|
291
|
+
const decoder = new TextDecoder();
|
|
292
|
+
const chunks = [];
|
|
293
|
+
|
|
294
|
+
// @ts-ignore
|
|
295
|
+
for await (const chunk of protocolStream) {
|
|
296
|
+
chunks.push(decoder.decode(chunk, { stream: true }));
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
expect(chunks).toEqual([
|
|
300
|
+
'id: 4\n',
|
|
301
|
+
'event: data\n',
|
|
302
|
+
`data: {"delta":{"custom_field":"custom_value"},"id":"4","index":0}\n\n`,
|
|
303
|
+
]);
|
|
304
|
+
});
|
|
305
|
+
|
|
306
|
+
it('should handle tool calls without index and type', async () => {
|
|
307
|
+
const mockOpenAIStream = new ReadableStream({
|
|
308
|
+
start(controller) {
|
|
309
|
+
controller.enqueue({
|
|
310
|
+
choices: [
|
|
311
|
+
{
|
|
312
|
+
delta: {
|
|
313
|
+
tool_calls: [
|
|
314
|
+
{
|
|
315
|
+
function: { name: 'tool1', arguments: '{}' },
|
|
316
|
+
id: 'call_1',
|
|
317
|
+
},
|
|
318
|
+
{
|
|
319
|
+
function: { name: 'tool2', arguments: '{}' },
|
|
320
|
+
id: 'call_2',
|
|
321
|
+
},
|
|
322
|
+
],
|
|
323
|
+
},
|
|
324
|
+
index: 0,
|
|
325
|
+
},
|
|
326
|
+
],
|
|
327
|
+
id: '5',
|
|
328
|
+
});
|
|
329
|
+
|
|
330
|
+
controller.close();
|
|
331
|
+
},
|
|
332
|
+
});
|
|
333
|
+
|
|
334
|
+
const protocolStream = QwenAIStream(mockOpenAIStream);
|
|
335
|
+
|
|
336
|
+
const decoder = new TextDecoder();
|
|
337
|
+
const chunks = [];
|
|
338
|
+
|
|
339
|
+
// @ts-ignore
|
|
340
|
+
for await (const chunk of protocolStream) {
|
|
341
|
+
chunks.push(decoder.decode(chunk, { stream: true }));
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
expect(chunks).toEqual([
|
|
345
|
+
'id: 5\n',
|
|
346
|
+
'event: tool_calls\n',
|
|
347
|
+
`data: [{"function":{"name":"tool1","arguments":"{}"},"id":"call_1","index":0,"type":"function"},{"function":{"name":"tool2","arguments":"{}"},"id":"call_2","index":1,"type":"function"}]\n\n`,
|
|
348
|
+
]);
|
|
349
|
+
});
|
|
350
|
+
});
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import { readableFromAsyncIterable } from 'ai';
|
|
2
|
+
import { ChatCompletionContentPartText } from 'ai/prompts';
|
|
3
|
+
import OpenAI from 'openai';
|
|
4
|
+
import { ChatCompletionContentPart } from 'openai/resources/index.mjs';
|
|
5
|
+
import type { Stream } from 'openai/streaming';
|
|
6
|
+
|
|
7
|
+
import { ChatStreamCallbacks } from '../../types';
|
|
8
|
+
import {
|
|
9
|
+
StreamProtocolChunk,
|
|
10
|
+
StreamProtocolToolCallChunk,
|
|
11
|
+
StreamToolCallChunkData,
|
|
12
|
+
chatStreamable,
|
|
13
|
+
createCallbacksTransformer,
|
|
14
|
+
createSSEProtocolTransformer,
|
|
15
|
+
generateToolCallId,
|
|
16
|
+
} from './protocol';
|
|
17
|
+
|
|
18
|
+
export const transformQwenStream = (chunk: OpenAI.ChatCompletionChunk): StreamProtocolChunk => {
|
|
19
|
+
const item = chunk.choices[0];
|
|
20
|
+
|
|
21
|
+
if (!item) {
|
|
22
|
+
return { data: chunk, id: chunk.id, type: 'data' };
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
if (Array.isArray(item.delta?.content)) {
|
|
26
|
+
const part = item.delta.content[0];
|
|
27
|
+
const process = (part: ChatCompletionContentPart): ChatCompletionContentPartText => {
|
|
28
|
+
let [key, value] = Object.entries(part)[0];
|
|
29
|
+
if (key === 'image') {
|
|
30
|
+
return {
|
|
31
|
+
text: ``,
|
|
32
|
+
type: 'text',
|
|
33
|
+
};
|
|
34
|
+
}
|
|
35
|
+
return {
|
|
36
|
+
text: value,
|
|
37
|
+
type: 'text',
|
|
38
|
+
};
|
|
39
|
+
};
|
|
40
|
+
|
|
41
|
+
const data = process(part);
|
|
42
|
+
|
|
43
|
+
return {
|
|
44
|
+
data: data.text,
|
|
45
|
+
id: chunk.id,
|
|
46
|
+
type: 'text',
|
|
47
|
+
};
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
if (item.delta?.tool_calls) {
|
|
51
|
+
return {
|
|
52
|
+
data: item.delta.tool_calls.map(
|
|
53
|
+
(value, index): StreamToolCallChunkData => ({
|
|
54
|
+
function: value.function,
|
|
55
|
+
id: value.id || generateToolCallId(index, value.function?.name),
|
|
56
|
+
index: typeof value.index !== 'undefined' ? value.index : index,
|
|
57
|
+
type: value.type || 'function',
|
|
58
|
+
}),
|
|
59
|
+
),
|
|
60
|
+
id: chunk.id,
|
|
61
|
+
type: 'tool_calls',
|
|
62
|
+
} as StreamProtocolToolCallChunk;
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
if (item.finish_reason) {
|
|
66
|
+
return { data: item.finish_reason, id: chunk.id, type: 'stop' };
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
if (typeof item.delta?.content === 'string') {
|
|
70
|
+
return { data: item.delta.content, id: chunk.id, type: 'text' };
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
if (item.delta?.content === null) {
|
|
74
|
+
return { data: item.delta, id: chunk.id, type: 'data' };
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
return {
|
|
78
|
+
data: { delta: item.delta, id: chunk.id, index: item.index },
|
|
79
|
+
id: chunk.id,
|
|
80
|
+
type: 'data',
|
|
81
|
+
};
|
|
82
|
+
};
|
|
83
|
+
|
|
84
|
+
export const QwenAIStream = (
|
|
85
|
+
stream: Stream<OpenAI.ChatCompletionChunk> | ReadableStream,
|
|
86
|
+
callbacks?: ChatStreamCallbacks,
|
|
87
|
+
) => {
|
|
88
|
+
const readableStream =
|
|
89
|
+
stream instanceof ReadableStream ? stream : readableFromAsyncIterable(chatStreamable(stream));
|
|
90
|
+
|
|
91
|
+
return readableStream
|
|
92
|
+
.pipeThrough(createSSEProtocolTransformer(transformQwenStream))
|
|
93
|
+
.pipeThrough(createCallbacksTransformer(callbacks));
|
|
94
|
+
};
|
package/vitest.config.ts
CHANGED
|
@@ -24,9 +24,6 @@ export default defineConfig({
|
|
|
24
24
|
reporter: ['text', 'json', 'lcov', 'text-summary'],
|
|
25
25
|
reportsDirectory: './coverage/app',
|
|
26
26
|
},
|
|
27
|
-
deps: {
|
|
28
|
-
inline: ['vitest-canvas-mock'],
|
|
29
|
-
},
|
|
30
27
|
environment: 'happy-dom',
|
|
31
28
|
exclude: [
|
|
32
29
|
'**/node_modules/**',
|
|
@@ -36,6 +33,11 @@ export default defineConfig({
|
|
|
36
33
|
'src/server/modules/**/**',
|
|
37
34
|
],
|
|
38
35
|
globals: true,
|
|
36
|
+
server: {
|
|
37
|
+
deps: {
|
|
38
|
+
inline: ['vitest-canvas-mock'],
|
|
39
|
+
},
|
|
40
|
+
},
|
|
39
41
|
setupFiles: './tests/setup.ts',
|
|
40
42
|
},
|
|
41
43
|
});
|