@lobehub/chat 1.16.1 → 1.16.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of @lobehub/chat might be problematic. Click here for more details.
- package/CHANGELOG.md +50 -0
- package/README.md +8 -8
- package/README.zh-CN.md +8 -8
- package/docs/self-hosting/advanced/auth/next-auth/logto.zh-CN.mdx +2 -2
- package/package.json +1 -1
- package/src/config/modelProviders/stepfun.ts +4 -0
- package/src/config/modelProviders/upstage.ts +10 -19
- package/src/libs/agent-runtime/google/index.test.ts +4 -1
- package/src/libs/agent-runtime/google/index.ts +3 -3
- package/src/libs/agent-runtime/utils/anthropicHelpers.test.ts +9 -3
- package/src/libs/agent-runtime/utils/anthropicHelpers.ts +2 -2
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +26 -2
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +4 -0
- package/src/libs/agent-runtime/utils/openaiHelpers.test.ts +146 -0
- package/src/libs/agent-runtime/utils/openaiHelpers.ts +40 -0
- package/src/libs/agent-runtime/zhipu/index.ts +5 -36
- package/src/utils/imageToBase64.test.ts +2 -1
- package/src/utils/imageToBase64.ts +16 -7
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,56 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.16.3](https://github.com/lobehub/lobe-chat/compare/v1.16.2...v1.16.3)
|
6
|
+
|
7
|
+
<sup>Released on **2024-09-11**</sup>
|
8
|
+
|
9
|
+
#### 🐛 Bug Fixes
|
10
|
+
|
11
|
+
- **misc**: Add `LLM_VISION_IMAGE_USE_BASE64` to support local s3 in vision model.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### What's fixed
|
19
|
+
|
20
|
+
- **misc**: Add `LLM_VISION_IMAGE_USE_BASE64` to support local s3 in vision model, closes [#3887](https://github.com/lobehub/lobe-chat/issues/3887) ([16e57ed](https://github.com/lobehub/lobe-chat/commit/16e57ed))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
30
|
+
### [Version 1.16.2](https://github.com/lobehub/lobe-chat/compare/v1.16.1...v1.16.2)
|
31
|
+
|
32
|
+
<sup>Released on **2024-09-11**</sup>
|
33
|
+
|
34
|
+
#### 💄 Styles
|
35
|
+
|
36
|
+
- **misc**: Update Upstage model list.
|
37
|
+
|
38
|
+
<br/>
|
39
|
+
|
40
|
+
<details>
|
41
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
42
|
+
|
43
|
+
#### Styles
|
44
|
+
|
45
|
+
- **misc**: Update Upstage model list, closes [#3890](https://github.com/lobehub/lobe-chat/issues/3890) ([82e2570](https://github.com/lobehub/lobe-chat/commit/82e2570))
|
46
|
+
|
47
|
+
</details>
|
48
|
+
|
49
|
+
<div align="right">
|
50
|
+
|
51
|
+
[](#readme-top)
|
52
|
+
|
53
|
+
</div>
|
54
|
+
|
5
55
|
### [Version 1.16.1](https://github.com/lobehub/lobe-chat/compare/v1.16.0...v1.16.1)
|
6
56
|
|
7
57
|
<sup>Released on **2024-09-10**</sup>
|
package/README.md
CHANGED
@@ -285,14 +285,14 @@ Our marketplace is not just a showcase platform but also a collaborative space.
|
|
285
285
|
|
286
286
|
<!-- AGENT LIST -->
|
287
287
|
|
288
|
-
| Recent Submits
|
289
|
-
|
|
290
|
-
| [AI Agent Generator](https://chat-preview.lobehub.com/market?agent=ai-agent-generator)<br/><sup>By **[xyftw](https://github.com/xyftw)** on **2024-09-10**</sup>
|
291
|
-
| [
|
292
|
-
| [
|
293
|
-
| [
|
294
|
-
|
295
|
-
> 📊 Total agents: [<kbd>**
|
288
|
+
| Recent Submits | Description |
|
289
|
+
| ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
290
|
+
| [AI Agent Generator](https://chat-preview.lobehub.com/market?agent=ai-agent-generator)<br/><sup>By **[xyftw](https://github.com/xyftw)** on **2024-09-10**</sup> | Skilled at creating AI Agent character descriptions that meet the needs.<br/>`ai-agent` `character-creation` |
|
291
|
+
| [Desolate Friend](https://chat-preview.lobehub.com/market?agent=meu)<br/><sup>By **[adminewacc](https://github.com/adminewacc)** on **2024-09-10**</sup> | Skilled at comforting and supporting friends<br/>`friendship` `sadness` `support` |
|
292
|
+
| [NetMaster](https://chat-preview.lobehub.com/market?agent=net-master)<br/><sup>By **[erhuoyan](https://github.com/erhuoyan)** on **2024-09-10**</sup> | Network Engineer: Professional network topology construction and management<br/>`network-engineer` `network-configuration` `network-management` `network-topology` `network-security` |
|
293
|
+
| [HTML to React](https://chat-preview.lobehub.com/market?agent=web-react)<br/><sup>By **[xingwang02](https://github.com/xingwang02)** on **2024-09-10**</sup> | Input HTML snippets and convert them into React components<br/>`react` `html` |
|
294
|
+
|
295
|
+
> 📊 Total agents: [<kbd>**327**</kbd> ](https://github.com/lobehub/lobe-chat-agents)
|
296
296
|
|
297
297
|
<!-- AGENT LIST -->
|
298
298
|
|
package/README.zh-CN.md
CHANGED
@@ -273,14 +273,14 @@ LobeChat 的插件生态系统是其核心功能的重要扩展,它极大地
|
|
273
273
|
|
274
274
|
<!-- AGENT LIST -->
|
275
275
|
|
276
|
-
| 最近新增
|
277
|
-
|
|
278
|
-
| [AI 代理生成器](https://chat-preview.lobehub.com/market?agent=ai-agent-generator)<br/><sup>By **[xyftw](https://github.com/xyftw)** on **2024-09-10**</sup>
|
279
|
-
| [
|
280
|
-
| [
|
281
|
-
| [
|
282
|
-
|
283
|
-
> 📊 Total agents: [<kbd>**
|
276
|
+
| 最近新增 | 助手说明 |
|
277
|
+
| ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------- |
|
278
|
+
| [AI 代理生成器](https://chat-preview.lobehub.com/market?agent=ai-agent-generator)<br/><sup>By **[xyftw](https://github.com/xyftw)** on **2024-09-10**</sup> | 擅长创建满足需求的 AI 代理角色描述。<br/>`ai-agent` `角色创建` |
|
279
|
+
| [孤独的朋友](https://chat-preview.lobehub.com/market?agent=meu)<br/><sup>By **[adminewacc](https://github.com/adminewacc)** on **2024-09-10**</sup> | 擅长安慰和支持朋友<br/>`友谊` `悲伤` `支持` |
|
280
|
+
| [NetMaster](https://chat-preview.lobehub.com/market?agent=net-master)<br/><sup>By **[erhuoyan](https://github.com/erhuoyan)** on **2024-09-10**</sup> | 网络工程师:专业网络拓扑搭建与管理<br/>`网络工程师` `网络配置` `网络管理` `网络拓扑` `网络安全` |
|
281
|
+
| [HTML to React](https://chat-preview.lobehub.com/market?agent=web-react)<br/><sup>By **[xingwang02](https://github.com/xingwang02)** on **2024-09-10**</sup> | 输入 HTML 片段,转化为 React 组件<br/>`react、-html` |
|
282
|
+
|
283
|
+
> 📊 Total agents: [<kbd>**327**</kbd> ](https://github.com/lobehub/lobe-chat-agents)
|
284
284
|
|
285
285
|
<!-- AGENT LIST -->
|
286
286
|
|
@@ -51,7 +51,7 @@ tags:
|
|
51
51
|
src="https://github.com/user-attachments/assets/15af6d94-af4f-4aa9-bbab-7a46e9f9e837"
|
52
52
|
/>
|
53
53
|
|
54
|
-
将获取到的 `Client ID` 和 `Client Secret`,设为 LobeChat 环境变量中的 `LOGTO_CLIENT_ID` 和 `
|
54
|
+
将获取到的 `Client ID` 和 `Client Secret`,设为 LobeChat 环境变量中的 `LOGTO_CLIENT_ID` 和 `LOGTO_CLIENT_SECRET`。
|
55
55
|
|
56
56
|
配置 LobeChat 环境变量中 `LOGTO_ISSUER` 为:
|
57
57
|
|
@@ -75,4 +75,4 @@ tags:
|
|
75
75
|
</Callout>
|
76
76
|
</Steps>
|
77
77
|
|
78
|
-
<Callout type={'info'}>部署成功后,用户将可以通过 Logto 身份认证并使用 LobeChat。</Callout>
|
78
|
+
<Callout type={'info'}>部署成功后,用户将可以通过 Logto 身份认证并使用 LobeChat。</Callout>
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.16.
|
3
|
+
"version": "1.16.3",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -3,9 +3,17 @@ import { ModelProviderCard } from '@/types/llm';
|
|
3
3
|
// ref https://developers.upstage.ai/docs/getting-started/models
|
4
4
|
const Upstage: ModelProviderCard = {
|
5
5
|
chatModels: [
|
6
|
+
{
|
7
|
+
description: 'A more intelligent, instruction-following Solar LLM with IFEval 80+. The official version with expanded language support and longer context length will be released in November 2024. solar-pro supports English only at this time. solar-pro is an alias for our latest Solar Pro model. (Currently solar-pro-preview-240910)',
|
8
|
+
displayName: 'Solar Pro',
|
9
|
+
enabled: true,
|
10
|
+
functionCall: false,
|
11
|
+
id: 'solar-pro',
|
12
|
+
tokens: 4096,
|
13
|
+
},
|
6
14
|
{
|
7
15
|
description: 'A compact LLM offering superior performance to GPT-3.5, with robust multilingual capabilities for both English and Korean, delivering high efficiency in a smaller package. solar-1-mini-chat is alias for our latest solar-1-mini-chat model. (Currently solar-1-mini-chat-240612)',
|
8
|
-
displayName: 'Solar
|
16
|
+
displayName: 'Solar Mini',
|
9
17
|
enabled: true,
|
10
18
|
functionCall: true,
|
11
19
|
id: 'solar-1-mini-chat',
|
@@ -13,28 +21,11 @@ const Upstage: ModelProviderCard = {
|
|
13
21
|
},
|
14
22
|
{
|
15
23
|
description: 'A compact LLM that extends the capabilities of solar-mini-chat with specialization in Japanese, while maintaining high efficiency and performance in English and Korean. solar-1-mini-chat-ja is alias for our latest solar-1-mini-chat-ja model.(Currently solar-1-mini-chat-ja-240612)',
|
16
|
-
displayName: 'Solar
|
17
|
-
enabled: true,
|
24
|
+
displayName: 'Solar Mini (Ja)',
|
18
25
|
functionCall: false,
|
19
26
|
id: 'solar-1-mini-chat-ja',
|
20
27
|
tokens: 32_768,
|
21
28
|
},
|
22
|
-
{
|
23
|
-
description: 'English-to-Korean translation specialized model based on the solar-mini. Maximum context length is 32k tokens. solar-1-mini-translate-enko is alias for our latest solar-1-mini-translate-enko model. (Currently solar-1-mini-translate-enko-240507)',
|
24
|
-
displayName: 'Solar 1 Mini Translate EnKo',
|
25
|
-
enabled: false,
|
26
|
-
functionCall: false,
|
27
|
-
id: 'solar-1-mini-translate-enko',
|
28
|
-
tokens: 32_768,
|
29
|
-
},
|
30
|
-
{
|
31
|
-
description: 'Korean-to-English translation specialized model based on the solar-mini. Maximum context length is 32k tokens. solar-1-mini-translate-koen is alias for our latest solar-1-mini-translate-koen model. (Currently solar-1-mini-translate-koen-240507)',
|
32
|
-
displayName: 'Solar 1 Mini Translate KoEn',
|
33
|
-
enabled: false,
|
34
|
-
functionCall: false,
|
35
|
-
id: 'solar-1-mini-translate-koen',
|
36
|
-
tokens: 32_768,
|
37
|
-
},
|
38
29
|
],
|
39
30
|
checkModel: 'solar-1-mini-chat',
|
40
31
|
id: 'upstage',
|
@@ -309,7 +309,10 @@ describe('LobeGoogleAI', () => {
|
|
309
309
|
const mockBase64 = 'mockBase64Data';
|
310
310
|
|
311
311
|
// Mock the imageUrlToBase64 function
|
312
|
-
vi.spyOn(imageToBase64Module, 'imageUrlToBase64').mockResolvedValueOnce(
|
312
|
+
vi.spyOn(imageToBase64Module, 'imageUrlToBase64').mockResolvedValueOnce({
|
313
|
+
base64: mockBase64,
|
314
|
+
mimeType: 'image/png',
|
315
|
+
});
|
313
316
|
|
314
317
|
const result = await instance['convertContentToGooglePart']({
|
315
318
|
type: 'image_url',
|
@@ -133,12 +133,12 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
133
133
|
}
|
134
134
|
|
135
135
|
if (type === 'url') {
|
136
|
-
const
|
136
|
+
const { base64, mimeType } = await imageUrlToBase64(content.image_url.url);
|
137
137
|
|
138
138
|
return {
|
139
139
|
inlineData: {
|
140
|
-
data:
|
141
|
-
mimeType
|
140
|
+
data: base64,
|
141
|
+
mimeType,
|
142
142
|
},
|
143
143
|
};
|
144
144
|
}
|
@@ -53,7 +53,10 @@ describe('anthropicHelpers', () => {
|
|
53
53
|
base64: null,
|
54
54
|
type: 'url',
|
55
55
|
});
|
56
|
-
vi.mocked(imageUrlToBase64).mockResolvedValue(
|
56
|
+
vi.mocked(imageUrlToBase64).mockResolvedValue({
|
57
|
+
base64: 'convertedBase64String',
|
58
|
+
mimeType: 'image/jpg',
|
59
|
+
});
|
57
60
|
|
58
61
|
const content = {
|
59
62
|
type: 'image_url',
|
@@ -67,7 +70,7 @@ describe('anthropicHelpers', () => {
|
|
67
70
|
expect(result).toEqual({
|
68
71
|
source: {
|
69
72
|
data: 'convertedBase64String',
|
70
|
-
media_type: 'image/
|
73
|
+
media_type: 'image/jpg',
|
71
74
|
type: 'base64',
|
72
75
|
},
|
73
76
|
type: 'image',
|
@@ -80,7 +83,10 @@ describe('anthropicHelpers', () => {
|
|
80
83
|
base64: null,
|
81
84
|
type: 'url',
|
82
85
|
});
|
83
|
-
vi.mocked(imageUrlToBase64).mockResolvedValue(
|
86
|
+
vi.mocked(imageUrlToBase64).mockResolvedValue({
|
87
|
+
base64: 'convertedBase64String',
|
88
|
+
mimeType: 'image/png',
|
89
|
+
});
|
84
90
|
|
85
91
|
const content = {
|
86
92
|
type: 'image_url',
|
@@ -28,11 +28,11 @@ export const buildAnthropicBlock = async (
|
|
28
28
|
};
|
29
29
|
|
30
30
|
if (type === 'url') {
|
31
|
-
const base64 = await imageUrlToBase64(content.image_url.url);
|
31
|
+
const { base64, mimeType } = await imageUrlToBase64(content.image_url.url);
|
32
32
|
return {
|
33
33
|
source: {
|
34
34
|
data: base64 as string,
|
35
|
-
media_type:
|
35
|
+
media_type: mimeType as Anthropic.ImageBlockParam.Source['media_type'],
|
36
36
|
type: 'base64',
|
37
37
|
},
|
38
38
|
type: 'image',
|
@@ -8,6 +8,7 @@ import {
|
|
8
8
|
LobeOpenAICompatibleRuntime,
|
9
9
|
ModelProvider,
|
10
10
|
} from '@/libs/agent-runtime';
|
11
|
+
import { sleep } from '@/utils/sleep';
|
11
12
|
|
12
13
|
import * as debugStreamModule from '../debugStream';
|
13
14
|
import { LobeOpenAICompatibleFactory } from './index';
|
@@ -512,9 +513,18 @@ describe('LobeOpenAICompatibleFactory', () => {
|
|
512
513
|
describe('cancel request', () => {
|
513
514
|
it('should cancel ongoing request correctly', async () => {
|
514
515
|
const controller = new AbortController();
|
515
|
-
const mockCreateMethod = vi
|
516
|
+
const mockCreateMethod = vi
|
517
|
+
.spyOn(instance['client'].chat.completions, 'create')
|
518
|
+
.mockImplementation(
|
519
|
+
() =>
|
520
|
+
new Promise((_, reject) => {
|
521
|
+
setTimeout(() => {
|
522
|
+
reject(new DOMException('The user aborted a request.', 'AbortError'));
|
523
|
+
}, 100);
|
524
|
+
}) as any,
|
525
|
+
);
|
516
526
|
|
517
|
-
instance.chat(
|
527
|
+
const chatPromise = instance.chat(
|
518
528
|
{
|
519
529
|
messages: [{ content: 'Hello', role: 'user' }],
|
520
530
|
model: 'mistralai/mistral-7b-instruct:free',
|
@@ -523,8 +533,22 @@ describe('LobeOpenAICompatibleFactory', () => {
|
|
523
533
|
{ signal: controller.signal },
|
524
534
|
);
|
525
535
|
|
536
|
+
// 给一些时间让请求开始
|
537
|
+
await sleep(50);
|
538
|
+
|
526
539
|
controller.abort();
|
527
540
|
|
541
|
+
// 等待并断言 Promise 被拒绝
|
542
|
+
// 使用 try-catch 来捕获和验证错误
|
543
|
+
try {
|
544
|
+
await chatPromise;
|
545
|
+
// 如果 Promise 没有被拒绝,测试应该失败
|
546
|
+
expect.fail('Expected promise to be rejected');
|
547
|
+
} catch (error) {
|
548
|
+
expect((error as any).errorType).toBe('AgentRuntimeError');
|
549
|
+
expect((error as any).error.name).toBe('AbortError');
|
550
|
+
expect((error as any).error.message).toBe('The user aborted a request.');
|
551
|
+
}
|
528
552
|
expect(mockCreateMethod).toHaveBeenCalledWith(
|
529
553
|
expect.anything(),
|
530
554
|
expect.objectContaining({
|
@@ -20,6 +20,7 @@ import { desensitizeUrl } from '../desensitizeUrl';
|
|
20
20
|
import { handleOpenAIError } from '../handleOpenAIError';
|
21
21
|
import { StreamingResponse } from '../response';
|
22
22
|
import { OpenAIStream } from '../streams';
|
23
|
+
import { convertOpenAIMessages } from '../openaiHelpers';
|
23
24
|
|
24
25
|
// the model contains the following keywords is not a chat model, so we should filter them out
|
25
26
|
const CHAT_MODELS_BLOCK_LIST = [
|
@@ -158,9 +159,12 @@ export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>
|
|
158
159
|
stream: payload.stream ?? true,
|
159
160
|
} as OpenAI.ChatCompletionCreateParamsStreaming);
|
160
161
|
|
162
|
+
const messages = await convertOpenAIMessages(postPayload.messages);
|
163
|
+
|
161
164
|
const response = await this.client.chat.completions.create(
|
162
165
|
{
|
163
166
|
...postPayload,
|
167
|
+
messages,
|
164
168
|
...(chatCompletion?.noUserId ? {} : { user: options?.user }),
|
165
169
|
},
|
166
170
|
{
|
@@ -0,0 +1,146 @@
|
|
1
|
+
import OpenAI from 'openai';
|
2
|
+
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
3
|
+
|
4
|
+
import { imageUrlToBase64 } from '@/utils/imageToBase64';
|
5
|
+
|
6
|
+
import { convertMessageContent, convertOpenAIMessages } from './openaiHelpers';
|
7
|
+
import { parseDataUri } from './uriParser';
|
8
|
+
|
9
|
+
// 模拟依赖
|
10
|
+
vi.mock('@/utils/imageToBase64');
|
11
|
+
vi.mock('./uriParser');
|
12
|
+
|
13
|
+
describe('convertMessageContent', () => {
|
14
|
+
beforeEach(() => {
|
15
|
+
vi.resetAllMocks();
|
16
|
+
});
|
17
|
+
|
18
|
+
afterEach(() => {
|
19
|
+
vi.restoreAllMocks();
|
20
|
+
});
|
21
|
+
|
22
|
+
it('should return the same content if not image_url type', async () => {
|
23
|
+
const content = { type: 'text', text: 'Hello' } as OpenAI.ChatCompletionContentPart;
|
24
|
+
const result = await convertMessageContent(content);
|
25
|
+
expect(result).toEqual(content);
|
26
|
+
});
|
27
|
+
|
28
|
+
it('should convert image URL to base64 when necessary', async () => {
|
29
|
+
// 设置环境变量
|
30
|
+
process.env.LLM_VISION_IMAGE_USE_BASE64 = '1';
|
31
|
+
|
32
|
+
const content = {
|
33
|
+
type: 'image_url',
|
34
|
+
image_url: { url: 'https://example.com/image.jpg' },
|
35
|
+
} as OpenAI.ChatCompletionContentPart;
|
36
|
+
|
37
|
+
vi.mocked(parseDataUri).mockReturnValue({ type: 'url', base64: null, mimeType: null });
|
38
|
+
vi.mocked(imageUrlToBase64).mockResolvedValue({
|
39
|
+
base64: 'base64String',
|
40
|
+
mimeType: 'image/jpeg',
|
41
|
+
});
|
42
|
+
|
43
|
+
const result = await convertMessageContent(content);
|
44
|
+
|
45
|
+
expect(result).toEqual({
|
46
|
+
type: 'image_url',
|
47
|
+
image_url: { url: 'data:image/jpeg;base64,base64String' },
|
48
|
+
});
|
49
|
+
|
50
|
+
expect(parseDataUri).toHaveBeenCalledWith('https://example.com/image.jpg');
|
51
|
+
expect(imageUrlToBase64).toHaveBeenCalledWith('https://example.com/image.jpg');
|
52
|
+
});
|
53
|
+
|
54
|
+
it('should not convert image URL when not necessary', async () => {
|
55
|
+
process.env.LLM_VISION_IMAGE_USE_BASE64 = undefined;
|
56
|
+
|
57
|
+
const content = {
|
58
|
+
type: 'image_url',
|
59
|
+
image_url: { url: 'https://example.com/image.jpg' },
|
60
|
+
} as OpenAI.ChatCompletionContentPart;
|
61
|
+
|
62
|
+
vi.mocked(parseDataUri).mockReturnValue({ type: 'url', base64: null, mimeType: null });
|
63
|
+
|
64
|
+
const result = await convertMessageContent(content);
|
65
|
+
|
66
|
+
expect(result).toEqual(content);
|
67
|
+
expect(imageUrlToBase64).not.toHaveBeenCalled();
|
68
|
+
});
|
69
|
+
});
|
70
|
+
|
71
|
+
describe('convertOpenAIMessages', () => {
|
72
|
+
it('should convert string content messages', async () => {
|
73
|
+
const messages = [
|
74
|
+
{ role: 'user', content: 'Hello' },
|
75
|
+
{ role: 'assistant', content: 'Hi there' },
|
76
|
+
] as OpenAI.ChatCompletionMessageParam[];
|
77
|
+
|
78
|
+
const result = await convertOpenAIMessages(messages);
|
79
|
+
|
80
|
+
expect(result).toEqual(messages);
|
81
|
+
});
|
82
|
+
|
83
|
+
it('should convert array content messages', async () => {
|
84
|
+
const messages = [
|
85
|
+
{
|
86
|
+
role: 'user',
|
87
|
+
content: [
|
88
|
+
{ type: 'text', text: 'Hello' },
|
89
|
+
{ type: 'image_url', image_url: { url: 'https://example.com/image.jpg' } },
|
90
|
+
],
|
91
|
+
},
|
92
|
+
] as OpenAI.ChatCompletionMessageParam[];
|
93
|
+
|
94
|
+
vi.spyOn(Promise, 'all');
|
95
|
+
vi.mocked(parseDataUri).mockReturnValue({ type: 'url', base64: null, mimeType: null });
|
96
|
+
vi.mocked(imageUrlToBase64).mockResolvedValue({
|
97
|
+
base64: 'base64String',
|
98
|
+
mimeType: 'image/jpeg',
|
99
|
+
});
|
100
|
+
|
101
|
+
process.env.LLM_VISION_IMAGE_USE_BASE64 = '1';
|
102
|
+
|
103
|
+
const result = await convertOpenAIMessages(messages);
|
104
|
+
|
105
|
+
expect(result).toEqual([
|
106
|
+
{
|
107
|
+
role: 'user',
|
108
|
+
content: [
|
109
|
+
{ type: 'text', text: 'Hello' },
|
110
|
+
{
|
111
|
+
type: 'image_url',
|
112
|
+
image_url: { url: 'data:image/jpeg;base64,base64String' },
|
113
|
+
},
|
114
|
+
],
|
115
|
+
},
|
116
|
+
]);
|
117
|
+
|
118
|
+
expect(Promise.all).toHaveBeenCalledTimes(2); // 一次用于消息数组,一次用于内容数组
|
119
|
+
|
120
|
+
process.env.LLM_VISION_IMAGE_USE_BASE64 = undefined;
|
121
|
+
});
|
122
|
+
it('should convert array content messages', async () => {
|
123
|
+
const messages = [
|
124
|
+
{
|
125
|
+
role: 'user',
|
126
|
+
content: [
|
127
|
+
{ type: 'text', text: 'Hello' },
|
128
|
+
{ type: 'image_url', image_url: { url: 'https://example.com/image.jpg' } },
|
129
|
+
],
|
130
|
+
},
|
131
|
+
] as OpenAI.ChatCompletionMessageParam[];
|
132
|
+
|
133
|
+
vi.spyOn(Promise, 'all');
|
134
|
+
vi.mocked(parseDataUri).mockReturnValue({ type: 'url', base64: null, mimeType: null });
|
135
|
+
vi.mocked(imageUrlToBase64).mockResolvedValue({
|
136
|
+
base64: 'base64String',
|
137
|
+
mimeType: 'image/jpeg',
|
138
|
+
});
|
139
|
+
|
140
|
+
const result = await convertOpenAIMessages(messages);
|
141
|
+
|
142
|
+
expect(result).toEqual(messages);
|
143
|
+
|
144
|
+
expect(Promise.all).toHaveBeenCalledTimes(2); // 一次用于消息数组,一次用于内容数组
|
145
|
+
});
|
146
|
+
});
|
@@ -0,0 +1,40 @@
|
|
1
|
+
import OpenAI from 'openai';
|
2
|
+
|
3
|
+
import { imageUrlToBase64 } from '@/utils/imageToBase64';
|
4
|
+
|
5
|
+
import { parseDataUri } from './uriParser';
|
6
|
+
|
7
|
+
export const convertMessageContent = async (
|
8
|
+
content: OpenAI.ChatCompletionContentPart,
|
9
|
+
): Promise<OpenAI.ChatCompletionContentPart> => {
|
10
|
+
if (content.type === 'image_url') {
|
11
|
+
const { type } = parseDataUri(content.image_url.url);
|
12
|
+
|
13
|
+
if (type === 'url' && process.env.LLM_VISION_IMAGE_USE_BASE64 === '1') {
|
14
|
+
const { base64, mimeType } = await imageUrlToBase64(content.image_url.url);
|
15
|
+
|
16
|
+
return {
|
17
|
+
...content,
|
18
|
+
image_url: { ...content.image_url, url: `data:${mimeType};base64,${base64}` },
|
19
|
+
};
|
20
|
+
}
|
21
|
+
}
|
22
|
+
|
23
|
+
return content;
|
24
|
+
};
|
25
|
+
|
26
|
+
export const convertOpenAIMessages = async (messages: OpenAI.ChatCompletionMessageParam[]) => {
|
27
|
+
return (await Promise.all(
|
28
|
+
messages.map(async (message) => ({
|
29
|
+
...message,
|
30
|
+
content:
|
31
|
+
typeof message.content === 'string'
|
32
|
+
? message.content
|
33
|
+
: await Promise.all(
|
34
|
+
(message.content || []).map((c) =>
|
35
|
+
convertMessageContent(c as OpenAI.ChatCompletionContentPart),
|
36
|
+
),
|
37
|
+
),
|
38
|
+
})),
|
39
|
+
)) as OpenAI.ChatCompletionMessageParam[];
|
40
|
+
};
|
@@ -2,19 +2,14 @@ import OpenAI, { ClientOptions } from 'openai';
|
|
2
2
|
|
3
3
|
import { LobeRuntimeAI } from '../BaseAI';
|
4
4
|
import { AgentRuntimeErrorType } from '../error';
|
5
|
-
import {
|
6
|
-
ChatCompetitionOptions,
|
7
|
-
ChatStreamPayload,
|
8
|
-
ModelProvider,
|
9
|
-
OpenAIChatMessage,
|
10
|
-
} from '../types';
|
5
|
+
import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
|
11
6
|
import { AgentRuntimeError } from '../utils/createError';
|
12
7
|
import { debugStream } from '../utils/debugStream';
|
13
8
|
import { desensitizeUrl } from '../utils/desensitizeUrl';
|
14
9
|
import { handleOpenAIError } from '../utils/handleOpenAIError';
|
10
|
+
import { convertOpenAIMessages } from '../utils/openaiHelpers';
|
15
11
|
import { StreamingResponse } from '../utils/response';
|
16
12
|
import { OpenAIStream } from '../utils/streams';
|
17
|
-
import { parseDataUri } from '../utils/uriParser';
|
18
13
|
import { generateApiToken } from './authToken';
|
19
14
|
|
20
15
|
const DEFAULT_BASE_URL = 'https://open.bigmodel.cn/api/paas/v4';
|
@@ -52,7 +47,7 @@ export class LobeZhipuAI implements LobeRuntimeAI {
|
|
52
47
|
|
53
48
|
async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
|
54
49
|
try {
|
55
|
-
const params = this.buildCompletionsParams(payload);
|
50
|
+
const params = await this.buildCompletionsParams(payload);
|
56
51
|
|
57
52
|
const response = await this.client.chat.completions.create(
|
58
53
|
params as unknown as OpenAI.ChatCompletionCreateParamsStreaming,
|
@@ -85,11 +80,11 @@ export class LobeZhipuAI implements LobeRuntimeAI {
|
|
85
80
|
}
|
86
81
|
}
|
87
82
|
|
88
|
-
private buildCompletionsParams(payload: ChatStreamPayload) {
|
83
|
+
private async buildCompletionsParams(payload: ChatStreamPayload) {
|
89
84
|
const { messages, temperature, top_p, ...params } = payload;
|
90
85
|
|
91
86
|
return {
|
92
|
-
messages:
|
87
|
+
messages: await convertOpenAIMessages(messages as any),
|
93
88
|
...params,
|
94
89
|
do_sample: temperature === 0,
|
95
90
|
stream: true,
|
@@ -99,32 +94,6 @@ export class LobeZhipuAI implements LobeRuntimeAI {
|
|
99
94
|
top_p: top_p === 1 ? 0.99 : top_p,
|
100
95
|
};
|
101
96
|
}
|
102
|
-
|
103
|
-
// TODO: 临时处理,后续需要移除
|
104
|
-
private transformMessage = (message: OpenAIChatMessage): OpenAIChatMessage => {
|
105
|
-
return {
|
106
|
-
...message,
|
107
|
-
content:
|
108
|
-
typeof message.content === 'string'
|
109
|
-
? message.content
|
110
|
-
: message.content.map((c) => {
|
111
|
-
switch (c.type) {
|
112
|
-
default:
|
113
|
-
case 'text': {
|
114
|
-
return c;
|
115
|
-
}
|
116
|
-
|
117
|
-
case 'image_url': {
|
118
|
-
const { base64 } = parseDataUri(c.image_url.url);
|
119
|
-
return {
|
120
|
-
image_url: { ...c.image_url, url: base64 || c.image_url.url },
|
121
|
-
type: 'image_url',
|
122
|
-
};
|
123
|
-
}
|
124
|
-
}
|
125
|
-
}),
|
126
|
-
};
|
127
|
-
};
|
128
97
|
}
|
129
98
|
|
130
99
|
export default LobeZhipuAI;
|
@@ -72,13 +72,14 @@ describe('imageUrlToBase64', () => {
|
|
72
72
|
it('should convert image URL to base64 string', async () => {
|
73
73
|
mockFetch.mockResolvedValue({
|
74
74
|
arrayBuffer: () => Promise.resolve(mockArrayBuffer),
|
75
|
+
blob: () => Promise.resolve(new Blob([mockArrayBuffer], { type: 'image/jpg' })),
|
75
76
|
});
|
76
77
|
|
77
78
|
const result = await imageUrlToBase64('https://example.com/image.jpg');
|
78
79
|
|
79
80
|
expect(mockFetch).toHaveBeenCalledWith('https://example.com/image.jpg');
|
80
81
|
expect(global.btoa).toHaveBeenCalled();
|
81
|
-
expect(result).
|
82
|
+
expect(result).toEqual({ base64: 'mockBase64String', mimeType: 'image/jpg' });
|
82
83
|
});
|
83
84
|
|
84
85
|
it('should throw an error when fetch fails', async () => {
|
@@ -36,16 +36,25 @@ export const imageToBase64 = ({
|
|
36
36
|
return canvas.toDataURL(type);
|
37
37
|
};
|
38
38
|
|
39
|
-
export const imageUrlToBase64 = async (
|
39
|
+
export const imageUrlToBase64 = async (
|
40
|
+
imageUrl: string,
|
41
|
+
): Promise<{ base64: string; mimeType: string }> => {
|
40
42
|
try {
|
41
43
|
const res = await fetch(imageUrl);
|
42
|
-
const
|
44
|
+
const blob = await res.blob();
|
45
|
+
const arrayBuffer = await blob.arrayBuffer();
|
43
46
|
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
47
|
+
const base64 =
|
48
|
+
typeof btoa === 'function'
|
49
|
+
? btoa(
|
50
|
+
new Uint8Array(arrayBuffer).reduce(
|
51
|
+
(data, byte) => data + String.fromCharCode(byte),
|
52
|
+
'',
|
53
|
+
),
|
54
|
+
)
|
55
|
+
: Buffer.from(arrayBuffer).toString('base64');
|
56
|
+
|
57
|
+
return { base64, mimeType: blob.type };
|
49
58
|
} catch (error) {
|
50
59
|
console.error('Error converting image to base64:', error);
|
51
60
|
throw error;
|