@lobehub/chat 1.101.1 → 1.102.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/Dockerfile +2 -0
- package/README.md +1 -1
- package/README.zh-CN.md +1 -1
- package/changelog/v1.json +18 -0
- package/package.json +1 -1
- package/src/config/aiModels/google.ts +35 -2
- package/src/config/aiModels/index.ts +1 -0
- package/src/config/aiModels/lobehub.ts +7 -0
- package/src/config/modelProviders/index.ts +1 -0
- package/src/config/modelProviders/lobehub.ts +22 -0
- package/src/features/ModelSwitchPanel/index.tsx +1 -1
- package/src/libs/model-runtime/ModelRuntime.test.ts +1 -1
- package/src/libs/model-runtime/google/index.test.ts +340 -12
- package/src/libs/model-runtime/google/index.ts +47 -0
- package/src/libs/model-runtime/types/type.ts +7 -0
- package/src/libs/model-runtime/utils/createError.ts +6 -1
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,56 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
## [Version 1.102.0](https://github.com/lobehub/lobe-chat/compare/v1.101.2...v1.102.0)
|
6
|
+
|
7
|
+
<sup>Released on **2025-07-21**</sup>
|
8
|
+
|
9
|
+
#### ✨ Features
|
10
|
+
|
11
|
+
- **misc**: Add image generation capabilities using Google AI Imagen API.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### What's improved
|
19
|
+
|
20
|
+
- **misc**: Add image generation capabilities using Google AI Imagen API, closes [#8503](https://github.com/lobehub/lobe-chat/issues/8503) ([cef8208](https://github.com/lobehub/lobe-chat/commit/cef8208))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
30
|
+
### [Version 1.101.2](https://github.com/lobehub/lobe-chat/compare/v1.101.1...v1.101.2)
|
31
|
+
|
32
|
+
<sup>Released on **2025-07-21**</sup>
|
33
|
+
|
34
|
+
#### 💄 Styles
|
35
|
+
|
36
|
+
- **misc**: Fix lobehub provider `/chat` in desktop.
|
37
|
+
|
38
|
+
<br/>
|
39
|
+
|
40
|
+
<details>
|
41
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
42
|
+
|
43
|
+
#### Styles
|
44
|
+
|
45
|
+
- **misc**: Fix lobehub provider `/chat` in desktop, closes [#8508](https://github.com/lobehub/lobe-chat/issues/8508) ([c801f9c](https://github.com/lobehub/lobe-chat/commit/c801f9c))
|
46
|
+
|
47
|
+
</details>
|
48
|
+
|
49
|
+
<div align="right">
|
50
|
+
|
51
|
+
[](#readme-top)
|
52
|
+
|
53
|
+
</div>
|
54
|
+
|
5
55
|
### [Version 1.101.1](https://github.com/lobehub/lobe-chat/compare/v1.101.0...v1.101.1)
|
6
56
|
|
7
57
|
<sup>Released on **2025-07-19**</sup>
|
package/Dockerfile
CHANGED
@@ -53,6 +53,8 @@ ENV NEXT_PUBLIC_SENTRY_DSN="${NEXT_PUBLIC_SENTRY_DSN}" \
|
|
53
53
|
SENTRY_ORG="" \
|
54
54
|
SENTRY_PROJECT=""
|
55
55
|
|
56
|
+
ENV APP_URL="http://app.com"
|
57
|
+
|
56
58
|
# Posthog
|
57
59
|
ENV NEXT_PUBLIC_ANALYTICS_POSTHOG="${NEXT_PUBLIC_ANALYTICS_POSTHOG}" \
|
58
60
|
NEXT_PUBLIC_POSTHOG_HOST="${NEXT_PUBLIC_POSTHOG_HOST}" \
|
package/README.md
CHANGED
@@ -383,8 +383,8 @@ In addition, these plugins are not limited to news aggregation, but can also ext
|
|
383
383
|
|
384
384
|
| Recent Submits | Description |
|
385
385
|
| ---------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- |
|
386
|
+
| [PortfolioMeta](https://lobechat.com/discover/plugin/StockData)<br/><sup>By **portfoliometa** on **2025-07-21**</sup> | Analyze stocks and get comprehensive real-time investment data and analytics.<br/>`stock` |
|
386
387
|
| [Speak](https://lobechat.com/discover/plugin/speak)<br/><sup>By **speak** on **2025-07-18**</sup> | Learn how to say anything in another language with Speak, your AI-powered language tutor.<br/>`education` `language` |
|
387
|
-
| [PortfolioMeta](https://lobechat.com/discover/plugin/StockData)<br/><sup>By **portfoliometa** on **2025-05-27**</sup> | Analyze stocks and get comprehensive real-time investment data and analytics.<br/>`stock` |
|
388
388
|
| [Web](https://lobechat.com/discover/plugin/web)<br/><sup>By **Proghit** on **2025-01-24**</sup> | Smart web search that reads and analyzes pages to deliver comprehensive answers from Google results.<br/>`web` `search` |
|
389
389
|
| [Bing_websearch](https://lobechat.com/discover/plugin/Bingsearch-identifier)<br/><sup>By **FineHow** on **2024-12-22**</sup> | Search for information from the internet base BingApi<br/>`bingsearch` |
|
390
390
|
|
package/README.zh-CN.md
CHANGED
@@ -376,8 +376,8 @@ LobeChat 的插件生态系统是其核心功能的重要扩展,它极大地
|
|
376
376
|
|
377
377
|
| 最近新增 | 描述 |
|
378
378
|
| -------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- |
|
379
|
+
| [PortfolioMeta](https://lobechat.com/discover/plugin/StockData)<br/><sup>By **portfoliometa** on **2025-07-21**</sup> | 分析股票并获取全面的实时投资数据和分析。<br/>`股票` |
|
379
380
|
| [Speak](https://lobechat.com/discover/plugin/speak)<br/><sup>By **speak** on **2025-07-18**</sup> | 使用 Speak,您的 AI 语言导师,学习如何用另一种语言说任何事情。<br/>`教育` `语言` |
|
380
|
-
| [PortfolioMeta](https://lobechat.com/discover/plugin/StockData)<br/><sup>By **portfoliometa** on **2025-05-27**</sup> | 分析股票并获取全面的实时投资数据和分析。<br/>`股票` |
|
381
381
|
| [网页](https://lobechat.com/discover/plugin/web)<br/><sup>By **Proghit** on **2025-01-24**</sup> | 智能网页搜索,读取和分析页面,以提供来自 Google 结果的全面答案。<br/>`网页` `搜索` |
|
382
382
|
| [必应网页搜索](https://lobechat.com/discover/plugin/Bingsearch-identifier)<br/><sup>By **FineHow** on **2024-12-22**</sup> | 通过 BingApi 搜索互联网上的信息<br/>`bingsearch` |
|
383
383
|
|
package/changelog/v1.json
CHANGED
@@ -1,4 +1,22 @@
|
|
1
1
|
[
|
2
|
+
{
|
3
|
+
"children": {
|
4
|
+
"features": [
|
5
|
+
"Add image generation capabilities using Google AI Imagen API."
|
6
|
+
]
|
7
|
+
},
|
8
|
+
"date": "2025-07-21",
|
9
|
+
"version": "1.102.0"
|
10
|
+
},
|
11
|
+
{
|
12
|
+
"children": {
|
13
|
+
"improvements": [
|
14
|
+
"Fix lobehub provider /chat in desktop."
|
15
|
+
]
|
16
|
+
},
|
17
|
+
"date": "2025-07-21",
|
18
|
+
"version": "1.101.2"
|
19
|
+
},
|
2
20
|
{
|
3
21
|
"children": {
|
4
22
|
"fixes": [
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.
|
3
|
+
"version": "1.102.0",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -1,4 +1,5 @@
|
|
1
|
-
import {
|
1
|
+
import { ModelParamsSchema } from '@/libs/standard-parameters';
|
2
|
+
import { AIChatModelCard, AIImageModelCard } from '@/types/aiModel';
|
2
3
|
|
3
4
|
const googleChatModels: AIChatModelCard[] = [
|
4
5
|
{
|
@@ -496,6 +497,38 @@ const googleChatModels: AIChatModelCard[] = [
|
|
496
497
|
},
|
497
498
|
];
|
498
499
|
|
499
|
-
|
500
|
+
// Common parameters for Imagen models
|
501
|
+
const imagenBaseParameters: ModelParamsSchema = {
|
502
|
+
aspectRatio: {
|
503
|
+
default: '1:1',
|
504
|
+
enum: ['1:1', '16:9', '9:16', '3:4', '4:3'],
|
505
|
+
},
|
506
|
+
prompt: { default: '' },
|
507
|
+
};
|
508
|
+
|
509
|
+
const googleImageModels: AIImageModelCard[] = [
|
510
|
+
{
|
511
|
+
description: 'Imagen 4th generation text-to-image model series',
|
512
|
+
displayName: 'Imagen4 Preview 06-06',
|
513
|
+
enabled: true,
|
514
|
+
id: 'imagen-4.0-generate-preview-06-06',
|
515
|
+
organization: 'Deepmind',
|
516
|
+
parameters: imagenBaseParameters,
|
517
|
+
releasedAt: '2024-06-06',
|
518
|
+
type: 'image',
|
519
|
+
},
|
520
|
+
{
|
521
|
+
description: 'Imagen 4th generation text-to-image model series Ultra version',
|
522
|
+
displayName: 'Imagen4 Ultra Preview 06-06',
|
523
|
+
enabled: true,
|
524
|
+
id: 'imagen-4.0-ultra-generate-preview-06-06',
|
525
|
+
organization: 'Deepmind',
|
526
|
+
parameters: imagenBaseParameters,
|
527
|
+
releasedAt: '2024-06-06',
|
528
|
+
type: 'image',
|
529
|
+
},
|
530
|
+
];
|
531
|
+
|
532
|
+
export const allModels = [...googleChatModels, ...googleImageModels];
|
500
533
|
|
501
534
|
export default allModels;
|
@@ -156,6 +156,7 @@ export { default as infiniai } from './infiniai';
|
|
156
156
|
export { default as internlm } from './internlm';
|
157
157
|
export { default as jina } from './jina';
|
158
158
|
export { default as lmstudio } from './lmstudio';
|
159
|
+
export { default as lobehub } from './lobehub';
|
159
160
|
export { default as minimax } from './minimax';
|
160
161
|
export { default as mistral } from './mistral';
|
161
162
|
export { default as modelscope } from './modelscope';
|
@@ -200,6 +200,7 @@ export { default as InfiniAIProviderCard } from './infiniai';
|
|
200
200
|
export { default as InternLMProviderCard } from './internlm';
|
201
201
|
export { default as JinaProviderCard } from './jina';
|
202
202
|
export { default as LMStudioProviderCard } from './lmstudio';
|
203
|
+
export { default as LobeHubProviderCard } from './lobehub';
|
203
204
|
export { default as MinimaxProviderCard } from './minimax';
|
204
205
|
export { default as MistralProviderCard } from './mistral';
|
205
206
|
export { default as ModelScopeProviderCard } from './modelscope';
|
@@ -0,0 +1,22 @@
|
|
1
|
+
import { ModelProviderCard } from '@/types/llm';
|
2
|
+
|
3
|
+
const LobeHub: ModelProviderCard = {
|
4
|
+
chatModels: [],
|
5
|
+
description:
|
6
|
+
'LobeChat Cloud 通过官方部署的 API 来实现 AI 模型的调用,并采用 Credits 计算积分的方式来衡量 AI 模型的用量,对应大模型使用的 Tokens。',
|
7
|
+
enabled: true,
|
8
|
+
id: 'lobehub',
|
9
|
+
modelsUrl: 'https://lobehub.com/zh/docs/usage/subscription/model-pricing',
|
10
|
+
name: 'LobeHub',
|
11
|
+
settings: {
|
12
|
+
modelEditable: false,
|
13
|
+
showAddNewModel: false,
|
14
|
+
showModelFetcher: false,
|
15
|
+
},
|
16
|
+
showConfig: false,
|
17
|
+
url: 'https://lobehub.com',
|
18
|
+
};
|
19
|
+
|
20
|
+
export default LobeHub;
|
21
|
+
|
22
|
+
export const planCardModels = ['gpt-4o-mini', 'deepseek-reasoner', 'claude-3-5-sonnet-latest'];
|
@@ -148,7 +148,7 @@ const ModelSwitchPanel = memo<IProps>(({ children, onOpenChange, open }) => {
|
|
148
148
|
// 不加限高就会导致面板超长,顶部的内容会被隐藏
|
149
149
|
// https://github.com/user-attachments/assets/9c043c47-42c5-46ef-b5c1-bee89376f042
|
150
150
|
style: {
|
151
|
-
maxHeight:
|
151
|
+
maxHeight: 550,
|
152
152
|
overflowY: 'scroll',
|
153
153
|
},
|
154
154
|
}}
|
@@ -72,7 +72,7 @@ beforeEach(async () => {
|
|
72
72
|
|
73
73
|
describe('AgentRuntime', () => {
|
74
74
|
describe('should initialize with various providers', () => {
|
75
|
-
const providers = Object.values(ModelProvider);
|
75
|
+
const providers = Object.values(ModelProvider).filter((i) => i !== 'lobehub');
|
76
76
|
|
77
77
|
const specialProviderIds = [ModelProvider.VertexAI, ...specialProviders.map((p) => p.id)];
|
78
78
|
|
@@ -4,6 +4,7 @@ import OpenAI from 'openai';
|
|
4
4
|
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
5
5
|
|
6
6
|
import { OpenAIChatMessage } from '@/libs/model-runtime';
|
7
|
+
import { CreateImagePayload } from '@/libs/model-runtime/types/image';
|
7
8
|
import { ChatStreamPayload } from '@/types/openai/chat';
|
8
9
|
import * as imageToBase64Module from '@/utils/imageToBase64';
|
9
10
|
|
@@ -22,7 +23,7 @@ let instance: LobeGoogleAI;
|
|
22
23
|
beforeEach(() => {
|
23
24
|
instance = new LobeGoogleAI({ apiKey: 'test' });
|
24
25
|
|
25
|
-
//
|
26
|
+
// Use vi.spyOn to mock the chat.completions.create method
|
26
27
|
const mockStreamData = (async function* (): AsyncGenerator<GenerateContentResponse> {})();
|
27
28
|
vi.spyOn(instance['client'].models, 'generateContentStream').mockResolvedValue(mockStreamData);
|
28
29
|
});
|
@@ -53,7 +54,7 @@ describe('LobeGoogleAI', () => {
|
|
53
54
|
expect(result).toBeInstanceOf(Response);
|
54
55
|
});
|
55
56
|
it('should handle text messages correctly', async () => {
|
56
|
-
//
|
57
|
+
// Mock Google AI SDK's generateContentStream method to return a successful response stream
|
57
58
|
const mockStream = new ReadableStream({
|
58
59
|
start(controller) {
|
59
60
|
controller.enqueue('Hello, world!');
|
@@ -71,7 +72,7 @@ describe('LobeGoogleAI', () => {
|
|
71
72
|
});
|
72
73
|
|
73
74
|
expect(result).toBeInstanceOf(Response);
|
74
|
-
//
|
75
|
+
// Additional assertions can be added, such as verifying the returned stream content
|
75
76
|
});
|
76
77
|
|
77
78
|
it('should withGrounding', () => {
|
@@ -214,10 +215,10 @@ describe('LobeGoogleAI', () => {
|
|
214
215
|
});
|
215
216
|
|
216
217
|
it('should call debugStream in DEBUG mode', async () => {
|
217
|
-
//
|
218
|
+
// Set environment variable to enable DEBUG mode
|
218
219
|
process.env.DEBUG_GOOGLE_CHAT_COMPLETION = '1';
|
219
220
|
|
220
|
-
//
|
221
|
+
// Mock Google AI SDK's generateContentStream method to return a successful response stream
|
221
222
|
const mockStream = new ReadableStream({
|
222
223
|
start(controller) {
|
223
224
|
controller.enqueue('Debug mode test');
|
@@ -239,13 +240,13 @@ describe('LobeGoogleAI', () => {
|
|
239
240
|
|
240
241
|
expect(debugStreamSpy).toHaveBeenCalled();
|
241
242
|
|
242
|
-
//
|
243
|
+
// Clean up environment variable
|
243
244
|
delete process.env.DEBUG_GOOGLE_CHAT_COMPLETION;
|
244
245
|
});
|
245
246
|
|
246
247
|
describe('Error', () => {
|
247
248
|
it('should throw InvalidGoogleAPIKey error on API_KEY_INVALID error', async () => {
|
248
|
-
//
|
249
|
+
// Mock Google AI SDK throwing an exception
|
249
250
|
const message = `[GoogleGenerativeAI Error]: Error fetching from https://generativelanguage.googleapis.com/v1/models/gemini-pro:streamGenerateContent?alt=sse: [400 Bad Request] API key not valid. Please pass a valid API key. [{"@type":"type.googleapis.com/google.rpc.ErrorInfo","reason":"API_KEY_INVALID","domain":"googleapis.com","metadata":{"service":"generativelanguage.googleapis.com"}}]`;
|
250
251
|
|
251
252
|
const apiError = new Error(message);
|
@@ -264,7 +265,7 @@ describe('LobeGoogleAI', () => {
|
|
264
265
|
});
|
265
266
|
|
266
267
|
it('should throw LocationNotSupportError error on location not support error', async () => {
|
267
|
-
//
|
268
|
+
// Mock Google AI SDK throwing an exception
|
268
269
|
const message = `[GoogleGenerativeAI Error]: Error fetching from https://generativelanguage.googleapis.com/v1/models/gemini-pro:streamGenerateContent?alt=sse: [400 Bad Request] User location is not supported for the API use.`;
|
269
270
|
|
270
271
|
const apiError = new Error(message);
|
@@ -283,7 +284,7 @@ describe('LobeGoogleAI', () => {
|
|
283
284
|
});
|
284
285
|
|
285
286
|
it('should throw BizError error', async () => {
|
286
|
-
//
|
287
|
+
// Mock Google AI SDK throwing an exception
|
287
288
|
const message = `[GoogleGenerativeAI Error]: Error fetching from https://generativelanguage.googleapis.com/v1/models/gemini-pro:streamGenerateContent?alt=sse: [400 Bad Request] API key not valid. Please pass a valid API key. [{"@type":"type.googleapis.com/google.rpc.ErrorInfo","reason":"Error","domain":"googleapis.com","metadata":{"service":"generativelanguage.googleapis.com"}}]`;
|
288
289
|
|
289
290
|
const apiError = new Error(message);
|
@@ -315,7 +316,7 @@ describe('LobeGoogleAI', () => {
|
|
315
316
|
});
|
316
317
|
|
317
318
|
it('should throw DefaultError error', async () => {
|
318
|
-
//
|
319
|
+
// Mock Google AI SDK throwing an exception
|
319
320
|
const message = `[GoogleGenerativeAI Error]: Error fetching from https://generativelanguage.googleapis.com/v1/models/gemini-pro:streamGenerateContent?alt=sse: [400 Bad Request] API key not valid. Please pass a valid API key. [{"@type":"type.googleapis.com/google.rpc.ErrorInfo","reason":"Error","domain":"googleapis.com","metadata":{"service":"generativelanguage.googleapis.com}}]`;
|
320
321
|
|
321
322
|
const apiError = new Error(message);
|
@@ -345,7 +346,7 @@ describe('LobeGoogleAI', () => {
|
|
345
346
|
// Arrange
|
346
347
|
const apiError = new Error('Error message');
|
347
348
|
|
348
|
-
//
|
349
|
+
// Use vi.spyOn to mock the chat.completions.create method
|
349
350
|
vi.spyOn(instance['client'].models, 'generateContentStream').mockRejectedValue(apiError);
|
350
351
|
|
351
352
|
// Act
|
@@ -537,7 +538,7 @@ describe('LobeGoogleAI', () => {
|
|
537
538
|
},
|
538
539
|
];
|
539
540
|
|
540
|
-
//
|
541
|
+
// Call the buildGoogleMessages method
|
541
542
|
const contents = await instance['buildGoogleMessages'](messages);
|
542
543
|
|
543
544
|
expect(contents).toHaveLength(1);
|
@@ -826,4 +827,331 @@ describe('LobeGoogleAI', () => {
|
|
826
827
|
});
|
827
828
|
});
|
828
829
|
});
|
830
|
+
|
831
|
+
describe('createImage', () => {
|
832
|
+
it('should create image successfully with basic parameters', async () => {
|
833
|
+
// Arrange - Use real base64 image data (5x5 red pixel PNG)
|
834
|
+
const realBase64ImageData =
|
835
|
+
'iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==';
|
836
|
+
const mockImageResponse = {
|
837
|
+
generatedImages: [
|
838
|
+
{
|
839
|
+
image: {
|
840
|
+
imageBytes: realBase64ImageData,
|
841
|
+
},
|
842
|
+
},
|
843
|
+
],
|
844
|
+
};
|
845
|
+
vi.spyOn(instance['client'].models, 'generateImages').mockResolvedValue(
|
846
|
+
mockImageResponse as any,
|
847
|
+
);
|
848
|
+
|
849
|
+
const payload: CreateImagePayload = {
|
850
|
+
model: 'imagen-4.0-generate-preview-06-06',
|
851
|
+
params: {
|
852
|
+
prompt: 'A beautiful landscape with mountains and trees',
|
853
|
+
aspectRatio: '1:1',
|
854
|
+
},
|
855
|
+
};
|
856
|
+
|
857
|
+
// Act
|
858
|
+
const result = await instance.createImage(payload);
|
859
|
+
|
860
|
+
// Assert
|
861
|
+
expect(instance['client'].models.generateImages).toHaveBeenCalledWith({
|
862
|
+
model: 'imagen-4.0-generate-preview-06-06',
|
863
|
+
prompt: 'A beautiful landscape with mountains and trees',
|
864
|
+
config: {
|
865
|
+
aspectRatio: '1:1',
|
866
|
+
numberOfImages: 1,
|
867
|
+
},
|
868
|
+
});
|
869
|
+
expect(result).toEqual({
|
870
|
+
imageUrl: `data:image/png;base64,${realBase64ImageData}`,
|
871
|
+
});
|
872
|
+
});
|
873
|
+
|
874
|
+
it('should support different aspect ratios like 16:9 for widescreen images', async () => {
|
875
|
+
// Arrange - Use real base64 data
|
876
|
+
const realBase64Data =
|
877
|
+
'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==';
|
878
|
+
const mockImageResponse = {
|
879
|
+
generatedImages: [
|
880
|
+
{
|
881
|
+
image: {
|
882
|
+
imageBytes: realBase64Data,
|
883
|
+
},
|
884
|
+
},
|
885
|
+
],
|
886
|
+
};
|
887
|
+
vi.spyOn(instance['client'].models, 'generateImages').mockResolvedValue(
|
888
|
+
mockImageResponse as any,
|
889
|
+
);
|
890
|
+
|
891
|
+
const payload: CreateImagePayload = {
|
892
|
+
model: 'imagen-4.0-ultra-generate-preview-06-06',
|
893
|
+
params: {
|
894
|
+
prompt: 'Cinematic landscape shot with dramatic lighting',
|
895
|
+
aspectRatio: '16:9',
|
896
|
+
},
|
897
|
+
};
|
898
|
+
|
899
|
+
// Act
|
900
|
+
await instance.createImage(payload);
|
901
|
+
|
902
|
+
// Assert
|
903
|
+
expect(instance['client'].models.generateImages).toHaveBeenCalledWith({
|
904
|
+
model: 'imagen-4.0-ultra-generate-preview-06-06',
|
905
|
+
prompt: 'Cinematic landscape shot with dramatic lighting',
|
906
|
+
config: {
|
907
|
+
aspectRatio: '16:9',
|
908
|
+
numberOfImages: 1,
|
909
|
+
},
|
910
|
+
});
|
911
|
+
});
|
912
|
+
|
913
|
+
it('should work with only prompt when aspect ratio is not specified', async () => {
|
914
|
+
// Arrange
|
915
|
+
const realBase64Data =
|
916
|
+
'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==';
|
917
|
+
const mockImageResponse = {
|
918
|
+
generatedImages: [
|
919
|
+
{
|
920
|
+
image: {
|
921
|
+
imageBytes: realBase64Data,
|
922
|
+
},
|
923
|
+
},
|
924
|
+
],
|
925
|
+
};
|
926
|
+
vi.spyOn(instance['client'].models, 'generateImages').mockResolvedValue(
|
927
|
+
mockImageResponse as any,
|
928
|
+
);
|
929
|
+
|
930
|
+
const payload: CreateImagePayload = {
|
931
|
+
model: 'imagen-4.0-generate-preview-06-06',
|
932
|
+
params: {
|
933
|
+
prompt: 'A cute cat sitting in a garden',
|
934
|
+
},
|
935
|
+
};
|
936
|
+
|
937
|
+
// Act
|
938
|
+
await instance.createImage(payload);
|
939
|
+
|
940
|
+
// Assert
|
941
|
+
expect(instance['client'].models.generateImages).toHaveBeenCalledWith({
|
942
|
+
model: 'imagen-4.0-generate-preview-06-06',
|
943
|
+
prompt: 'A cute cat sitting in a garden',
|
944
|
+
config: {
|
945
|
+
aspectRatio: undefined,
|
946
|
+
numberOfImages: 1,
|
947
|
+
},
|
948
|
+
});
|
949
|
+
});
|
950
|
+
|
951
|
+
describe('Error handling', () => {
|
952
|
+
it('should throw InvalidProviderAPIKey error when API key is invalid', async () => {
|
953
|
+
// Arrange - Use real Google AI error format
|
954
|
+
const message = `[GoogleGenerativeAI Error]: Error fetching from https://generativelanguage.googleapis.com/v1/models/imagen-4.0:generateImages: [400 Bad Request] API key not valid. Please pass a valid API key. [{"@type":"type.googleapis.com/google.rpc.ErrorInfo","reason":"API_KEY_INVALID","domain":"googleapis.com","metadata":{"service":"generativelanguage.googleapis.com"}}]`;
|
955
|
+
const apiError = new Error(message);
|
956
|
+
vi.spyOn(instance['client'].models, 'generateImages').mockRejectedValue(apiError);
|
957
|
+
|
958
|
+
const payload: CreateImagePayload = {
|
959
|
+
model: 'imagen-4.0-generate-preview-06-06',
|
960
|
+
params: {
|
961
|
+
prompt: 'A realistic landscape photo',
|
962
|
+
},
|
963
|
+
};
|
964
|
+
|
965
|
+
// Act & Assert - Test error type rather than specific text
|
966
|
+
await expect(instance.createImage(payload)).rejects.toEqual(
|
967
|
+
expect.objectContaining({
|
968
|
+
errorType: invalidErrorType,
|
969
|
+
provider,
|
970
|
+
}),
|
971
|
+
);
|
972
|
+
});
|
973
|
+
|
974
|
+
it('should throw ProviderBizError for network and API errors', async () => {
|
975
|
+
// Arrange
|
976
|
+
const apiError = new Error('Network connection failed');
|
977
|
+
vi.spyOn(instance['client'].models, 'generateImages').mockRejectedValue(apiError);
|
978
|
+
|
979
|
+
const payload: CreateImagePayload = {
|
980
|
+
model: 'imagen-4.0-generate-preview-06-06',
|
981
|
+
params: {
|
982
|
+
prompt: 'A digital art portrait',
|
983
|
+
},
|
984
|
+
};
|
985
|
+
|
986
|
+
// Act & Assert - Test error type and basic structure
|
987
|
+
await expect(instance.createImage(payload)).rejects.toEqual(
|
988
|
+
expect.objectContaining({
|
989
|
+
errorType: bizErrorType,
|
990
|
+
provider,
|
991
|
+
error: expect.objectContaining({
|
992
|
+
message: expect.any(String),
|
993
|
+
}),
|
994
|
+
}),
|
995
|
+
);
|
996
|
+
});
|
997
|
+
|
998
|
+
it('should throw error when API response is malformed - missing generatedImages', async () => {
|
999
|
+
// Arrange
|
1000
|
+
const mockImageResponse = {};
|
1001
|
+
vi.spyOn(instance['client'].models, 'generateImages').mockResolvedValue(
|
1002
|
+
mockImageResponse as any,
|
1003
|
+
);
|
1004
|
+
|
1005
|
+
const payload: CreateImagePayload = {
|
1006
|
+
model: 'imagen-4.0-generate-preview-06-06',
|
1007
|
+
params: {
|
1008
|
+
prompt: 'Abstract geometric patterns',
|
1009
|
+
},
|
1010
|
+
};
|
1011
|
+
|
1012
|
+
// Act & Assert - Test error behavior rather than specific text
|
1013
|
+
await expect(instance.createImage(payload)).rejects.toEqual(
|
1014
|
+
expect.objectContaining({
|
1015
|
+
errorType: bizErrorType,
|
1016
|
+
provider,
|
1017
|
+
}),
|
1018
|
+
);
|
1019
|
+
});
|
1020
|
+
|
1021
|
+
it('should throw error when API response contains empty image array', async () => {
|
1022
|
+
// Arrange
|
1023
|
+
const mockImageResponse = {
|
1024
|
+
generatedImages: [],
|
1025
|
+
};
|
1026
|
+
vi.spyOn(instance['client'].models, 'generateImages').mockResolvedValue(
|
1027
|
+
mockImageResponse as any,
|
1028
|
+
);
|
1029
|
+
|
1030
|
+
const payload: CreateImagePayload = {
|
1031
|
+
model: 'imagen-4.0-generate-preview-06-06',
|
1032
|
+
params: {
|
1033
|
+
prompt: 'Minimalist design poster',
|
1034
|
+
},
|
1035
|
+
};
|
1036
|
+
|
1037
|
+
// Act & Assert
|
1038
|
+
await expect(instance.createImage(payload)).rejects.toEqual(
|
1039
|
+
expect.objectContaining({
|
1040
|
+
errorType: bizErrorType,
|
1041
|
+
provider,
|
1042
|
+
}),
|
1043
|
+
);
|
1044
|
+
});
|
1045
|
+
|
1046
|
+
it('should throw error when generated image lacks required data', async () => {
|
1047
|
+
// Arrange
|
1048
|
+
const mockImageResponse = {
|
1049
|
+
generatedImages: [
|
1050
|
+
{
|
1051
|
+
image: {}, // Missing imageBytes
|
1052
|
+
},
|
1053
|
+
],
|
1054
|
+
};
|
1055
|
+
vi.spyOn(instance['client'].models, 'generateImages').mockResolvedValue(
|
1056
|
+
mockImageResponse as any,
|
1057
|
+
);
|
1058
|
+
|
1059
|
+
const payload: CreateImagePayload = {
|
1060
|
+
model: 'imagen-4.0-generate-preview-06-06',
|
1061
|
+
params: {
|
1062
|
+
prompt: 'Watercolor painting style',
|
1063
|
+
},
|
1064
|
+
};
|
1065
|
+
|
1066
|
+
// Act & Assert
|
1067
|
+
await expect(instance.createImage(payload)).rejects.toEqual(
|
1068
|
+
expect.objectContaining({
|
1069
|
+
errorType: bizErrorType,
|
1070
|
+
provider,
|
1071
|
+
}),
|
1072
|
+
);
|
1073
|
+
});
|
1074
|
+
});
|
1075
|
+
|
1076
|
+
describe('Edge cases', () => {
|
1077
|
+
it('should return first image when API returns multiple generated images', async () => {
|
1078
|
+
// Arrange - Use two different real base64 image data
|
1079
|
+
const firstImageData =
|
1080
|
+
'iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==';
|
1081
|
+
const secondImageData =
|
1082
|
+
'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==';
|
1083
|
+
const mockImageResponse = {
|
1084
|
+
generatedImages: [
|
1085
|
+
{
|
1086
|
+
image: {
|
1087
|
+
imageBytes: firstImageData,
|
1088
|
+
},
|
1089
|
+
},
|
1090
|
+
{
|
1091
|
+
image: {
|
1092
|
+
imageBytes: secondImageData,
|
1093
|
+
},
|
1094
|
+
},
|
1095
|
+
],
|
1096
|
+
};
|
1097
|
+
vi.spyOn(instance['client'].models, 'generateImages').mockResolvedValue(
|
1098
|
+
mockImageResponse as any,
|
1099
|
+
);
|
1100
|
+
|
1101
|
+
const payload: CreateImagePayload = {
|
1102
|
+
model: 'imagen-4.0-generate-preview-06-06',
|
1103
|
+
params: {
|
1104
|
+
prompt: 'Generate multiple variations of a sunset',
|
1105
|
+
},
|
1106
|
+
};
|
1107
|
+
|
1108
|
+
// Act
|
1109
|
+
const result = await instance.createImage(payload);
|
1110
|
+
|
1111
|
+
// Assert - Should return the first image
|
1112
|
+
expect(result).toEqual({
|
1113
|
+
imageUrl: `data:image/png;base64,${firstImageData}`,
|
1114
|
+
});
|
1115
|
+
});
|
1116
|
+
|
1117
|
+
it('should work with custom future Imagen model versions', async () => {
|
1118
|
+
// Arrange
|
1119
|
+
const realBase64Data =
|
1120
|
+
'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==';
|
1121
|
+
const mockImageResponse = {
|
1122
|
+
generatedImages: [
|
1123
|
+
{
|
1124
|
+
image: {
|
1125
|
+
imageBytes: realBase64Data,
|
1126
|
+
},
|
1127
|
+
},
|
1128
|
+
],
|
1129
|
+
};
|
1130
|
+
vi.spyOn(instance['client'].models, 'generateImages').mockResolvedValue(
|
1131
|
+
mockImageResponse as any,
|
1132
|
+
);
|
1133
|
+
|
1134
|
+
const payload: CreateImagePayload = {
|
1135
|
+
model: 'imagen-5.0-future-model',
|
1136
|
+
params: {
|
1137
|
+
prompt: 'Photorealistic portrait with soft lighting',
|
1138
|
+
aspectRatio: '4:3',
|
1139
|
+
},
|
1140
|
+
};
|
1141
|
+
|
1142
|
+
// Act
|
1143
|
+
await instance.createImage(payload);
|
1144
|
+
|
1145
|
+
// Assert
|
1146
|
+
expect(instance['client'].models.generateImages).toHaveBeenCalledWith({
|
1147
|
+
model: 'imagen-5.0-future-model',
|
1148
|
+
prompt: 'Photorealistic portrait with soft lighting',
|
1149
|
+
config: {
|
1150
|
+
aspectRatio: '4:3',
|
1151
|
+
numberOfImages: 1,
|
1152
|
+
},
|
1153
|
+
});
|
1154
|
+
});
|
1155
|
+
});
|
1156
|
+
});
|
829
1157
|
});
|
@@ -21,6 +21,7 @@ import {
|
|
21
21
|
OpenAIChatMessage,
|
22
22
|
UserMessageContentPart,
|
23
23
|
} from '../types';
|
24
|
+
import { CreateImagePayload, CreateImageResponse } from '../types/image';
|
24
25
|
import { AgentRuntimeError } from '../utils/createError';
|
25
26
|
import { debugStream } from '../utils/debugStream';
|
26
27
|
import { StreamingResponse } from '../utils/response';
|
@@ -243,6 +244,52 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
243
244
|
}
|
244
245
|
}
|
245
246
|
|
247
|
+
/**
|
248
|
+
* Generate images using Google AI Imagen API
|
249
|
+
* @see https://ai.google.dev/gemini-api/docs/image-generation#imagen
|
250
|
+
*/
|
251
|
+
async createImage(payload: CreateImagePayload): Promise<CreateImageResponse> {
|
252
|
+
try {
|
253
|
+
const { model, params } = payload;
|
254
|
+
|
255
|
+
const response = await this.client.models.generateImages({
|
256
|
+
config: {
|
257
|
+
aspectRatio: params.aspectRatio,
|
258
|
+
numberOfImages: 1,
|
259
|
+
},
|
260
|
+
model,
|
261
|
+
prompt: params.prompt,
|
262
|
+
});
|
263
|
+
|
264
|
+
if (!response.generatedImages || response.generatedImages.length === 0) {
|
265
|
+
throw new Error('No images generated');
|
266
|
+
}
|
267
|
+
|
268
|
+
const generatedImage = response.generatedImages[0];
|
269
|
+
if (!generatedImage.image || !generatedImage.image.imageBytes) {
|
270
|
+
throw new Error('Invalid image data');
|
271
|
+
}
|
272
|
+
|
273
|
+
const { imageBytes } = generatedImage.image;
|
274
|
+
// 1. official doc use png as example
|
275
|
+
// 2. no responseType param support like openai now.
|
276
|
+
// I think we can just hard code png now
|
277
|
+
const imageUrl = `data:image/png;base64,${imageBytes}`;
|
278
|
+
|
279
|
+
return { imageUrl };
|
280
|
+
} catch (error) {
|
281
|
+
const err = error as Error;
|
282
|
+
console.error('Google AI image generation error:', err);
|
283
|
+
|
284
|
+
const { errorType, error: parsedError } = this.parseErrorMessage(err.message);
|
285
|
+
throw AgentRuntimeError.createImage({
|
286
|
+
error: parsedError,
|
287
|
+
errorType,
|
288
|
+
provider: this.provider,
|
289
|
+
});
|
290
|
+
}
|
291
|
+
}
|
292
|
+
|
246
293
|
private createEnhancedStream(originalStream: any, signal: AbortSignal): ReadableStream {
|
247
294
|
return new ReadableStream({
|
248
295
|
async start(controller) {
|
@@ -16,6 +16,12 @@ export interface ChatCompletionErrorPayload {
|
|
16
16
|
provider: string;
|
17
17
|
}
|
18
18
|
|
19
|
+
export interface CreateImageErrorPayload {
|
20
|
+
error: object;
|
21
|
+
errorType: ILobeAgentRuntimeErrorType;
|
22
|
+
provider: string;
|
23
|
+
}
|
24
|
+
|
19
25
|
export interface CreateChatCompletionOptions {
|
20
26
|
chatModel: OpenAI;
|
21
27
|
payload: ChatStreamPayload;
|
@@ -45,6 +51,7 @@ export enum ModelProvider {
|
|
45
51
|
InternLM = 'internlm',
|
46
52
|
Jina = 'jina',
|
47
53
|
LMStudio = 'lmstudio',
|
54
|
+
LobeHub = 'lobehub',
|
48
55
|
Minimax = 'minimax',
|
49
56
|
Mistral = 'mistral',
|
50
57
|
ModelScope = 'modelscope',
|
@@ -1,5 +1,9 @@
|
|
1
1
|
import { ILobeAgentRuntimeErrorType } from '../error';
|
2
|
-
import {
|
2
|
+
import {
|
3
|
+
AgentInitErrorPayload,
|
4
|
+
ChatCompletionErrorPayload,
|
5
|
+
CreateImageErrorPayload,
|
6
|
+
} from '../types';
|
3
7
|
|
4
8
|
export const AgentRuntimeError = {
|
5
9
|
chat: (error: ChatCompletionErrorPayload): ChatCompletionErrorPayload => error,
|
@@ -7,5 +11,6 @@ export const AgentRuntimeError = {
|
|
7
11
|
errorType: ILobeAgentRuntimeErrorType | string | number,
|
8
12
|
error?: any,
|
9
13
|
): AgentInitErrorPayload => ({ error, errorType }),
|
14
|
+
createImage: (error: CreateImageErrorPayload): CreateImageErrorPayload => error,
|
10
15
|
textToImage: (error: any): any => error,
|
11
16
|
};
|