@lobehub/chat 1.116.4 → 1.117.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +117 -0
- package/changelog/v1.json +21 -0
- package/locales/ar/models.json +3 -0
- package/locales/bg-BG/models.json +3 -0
- package/locales/de-DE/models.json +3 -0
- package/locales/en-US/models.json +3 -0
- package/locales/es-ES/models.json +3 -0
- package/locales/fa-IR/models.json +3 -0
- package/locales/fr-FR/models.json +3 -0
- package/locales/it-IT/models.json +3 -0
- package/locales/ja-JP/models.json +3 -0
- package/locales/ko-KR/models.json +3 -0
- package/locales/nl-NL/models.json +3 -0
- package/locales/pl-PL/models.json +3 -0
- package/locales/pt-BR/models.json +3 -0
- package/locales/ru-RU/models.json +3 -0
- package/locales/tr-TR/models.json +3 -0
- package/locales/vi-VN/models.json +3 -0
- package/locales/zh-CN/models.json +3 -0
- package/locales/zh-TW/models.json +3 -0
- package/package.json +1 -2
- package/packages/const/src/image.ts +9 -0
- package/packages/database/vitest.config.mts +1 -0
- package/packages/database/vitest.config.server.mts +1 -0
- package/packages/file-loaders/package.json +1 -1
- package/packages/model-runtime/src/RouterRuntime/createRuntime.ts +11 -9
- package/packages/model-runtime/src/google/createImage.test.ts +657 -0
- package/packages/model-runtime/src/google/createImage.ts +152 -0
- package/packages/model-runtime/src/google/index.test.ts +0 -328
- package/packages/model-runtime/src/google/index.ts +3 -40
- package/packages/model-runtime/src/utils/modelParse.ts +2 -1
- package/packages/model-runtime/src/utils/openaiCompatibleFactory/createImage.ts +239 -0
- package/packages/model-runtime/src/utils/openaiCompatibleFactory/index.test.ts +22 -22
- package/packages/model-runtime/src/utils/openaiCompatibleFactory/index.ts +9 -116
- package/packages/model-runtime/src/utils/postProcessModelList.ts +55 -0
- package/packages/model-runtime/src/utils/streams/google-ai.test.ts +7 -7
- package/packages/model-runtime/src/utils/streams/google-ai.ts +15 -2
- package/packages/model-runtime/src/utils/streams/openai/openai.test.ts +41 -0
- package/packages/model-runtime/src/utils/streams/openai/openai.ts +38 -2
- package/packages/model-runtime/src/utils/streams/protocol.test.ts +32 -0
- package/packages/model-runtime/src/utils/streams/protocol.ts +7 -3
- package/packages/model-runtime/src/utils/usageConverter.test.ts +58 -0
- package/packages/model-runtime/src/utils/usageConverter.ts +5 -1
- package/packages/utils/vitest.config.mts +1 -0
- package/src/components/ChatItem/ChatItem.tsx +183 -0
- package/src/components/ChatItem/components/Actions.tsx +25 -0
- package/src/components/ChatItem/components/Avatar.tsx +50 -0
- package/src/components/ChatItem/components/BorderSpacing.tsx +13 -0
- package/src/components/ChatItem/components/ErrorContent.tsx +24 -0
- package/src/components/ChatItem/components/Loading.tsx +26 -0
- package/src/components/ChatItem/components/MessageContent.tsx +76 -0
- package/src/components/ChatItem/components/Title.tsx +43 -0
- package/src/components/ChatItem/index.ts +2 -0
- package/src/components/ChatItem/style.ts +208 -0
- package/src/components/ChatItem/type.ts +80 -0
- package/src/config/aiModels/google.ts +42 -22
- package/src/config/aiModels/openrouter.ts +33 -0
- package/src/config/aiModels/vertexai.ts +4 -4
- package/src/features/ChatItem/index.tsx +1 -1
- package/src/features/Conversation/Extras/Usage/UsageDetail/index.tsx +6 -0
- package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.test.ts +38 -0
- package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.ts +13 -1
- package/src/locales/default/chat.ts +1 -0
- package/packages/model-runtime/src/UniformRuntime/index.ts +0 -117
@@ -0,0 +1,152 @@
|
|
1
|
+
import { Content, GoogleGenAI, Part } from '@google/genai';
|
2
|
+
import { imageUrlToBase64 } from '@lobechat/utils';
|
3
|
+
|
4
|
+
import { CreateImagePayload, CreateImageResponse } from '../types/image';
|
5
|
+
import { AgentRuntimeError } from '../utils/createError';
|
6
|
+
import { parseGoogleErrorMessage } from '../utils/googleErrorParser';
|
7
|
+
import { parseDataUri } from '../utils/uriParser';
|
8
|
+
|
9
|
+
/**
|
10
|
+
* Extract image data from generateContent response
|
11
|
+
*/
|
12
|
+
function extractImageFromResponse(response: any): CreateImageResponse {
|
13
|
+
const candidate = response.candidates?.[0];
|
14
|
+
if (!candidate?.content?.parts) {
|
15
|
+
throw new Error('No image generated');
|
16
|
+
}
|
17
|
+
|
18
|
+
for (const part of candidate.content.parts) {
|
19
|
+
if (part.inlineData?.data) {
|
20
|
+
const imageUrl = `data:${part.inlineData.mimeType || 'image/png'};base64,${part.inlineData.data}`;
|
21
|
+
return { imageUrl };
|
22
|
+
}
|
23
|
+
}
|
24
|
+
|
25
|
+
throw new Error('No image data found in response');
|
26
|
+
}
|
27
|
+
|
28
|
+
/**
|
29
|
+
* Generate images using traditional Imagen models with generateImages API
|
30
|
+
*/
|
31
|
+
async function generateByImageModel(
|
32
|
+
client: GoogleGenAI,
|
33
|
+
payload: CreateImagePayload,
|
34
|
+
): Promise<CreateImageResponse> {
|
35
|
+
const { model, params } = payload;
|
36
|
+
|
37
|
+
const response = await client.models.generateImages({
|
38
|
+
config: {
|
39
|
+
aspectRatio: params.aspectRatio,
|
40
|
+
numberOfImages: 1,
|
41
|
+
},
|
42
|
+
model,
|
43
|
+
prompt: params.prompt,
|
44
|
+
});
|
45
|
+
|
46
|
+
if (!response.generatedImages || response.generatedImages.length === 0) {
|
47
|
+
throw new Error('No images generated');
|
48
|
+
}
|
49
|
+
|
50
|
+
const generatedImage = response.generatedImages[0];
|
51
|
+
if (!generatedImage.image || !generatedImage.image.imageBytes) {
|
52
|
+
throw new Error('Invalid image data');
|
53
|
+
}
|
54
|
+
|
55
|
+
const { imageBytes } = generatedImage.image;
|
56
|
+
// 1. official doc use png as example
|
57
|
+
// 2. no responseType param support like openai now.
|
58
|
+
// I think we can just hard code png now
|
59
|
+
const imageUrl = `data:image/png;base64,${imageBytes}`;
|
60
|
+
|
61
|
+
return { imageUrl };
|
62
|
+
}
|
63
|
+
|
64
|
+
/**
|
65
|
+
* Generate images using Gemini Chat Models with generateContent
|
66
|
+
*/
|
67
|
+
async function generateImageByChatModel(
|
68
|
+
client: GoogleGenAI,
|
69
|
+
payload: CreateImagePayload,
|
70
|
+
): Promise<CreateImageResponse> {
|
71
|
+
const { model, params } = payload;
|
72
|
+
const actualModel = model.replace(':image', '');
|
73
|
+
|
74
|
+
// Build content parts
|
75
|
+
const parts: Part[] = [{ text: params.prompt }];
|
76
|
+
|
77
|
+
// Add image for editing if provided
|
78
|
+
if (params.imageUrl && params.imageUrl !== null) {
|
79
|
+
const { mimeType, base64, type } = parseDataUri(params.imageUrl);
|
80
|
+
|
81
|
+
if (type === 'base64') {
|
82
|
+
if (!base64) {
|
83
|
+
throw new TypeError("Image URL doesn't contain base64 data");
|
84
|
+
}
|
85
|
+
|
86
|
+
parts.push({
|
87
|
+
inlineData: {
|
88
|
+
data: base64,
|
89
|
+
mimeType: mimeType || 'image/png',
|
90
|
+
},
|
91
|
+
});
|
92
|
+
} else if (type === 'url') {
|
93
|
+
const { base64: urlBase64, mimeType: urlMimeType } = await imageUrlToBase64(params.imageUrl);
|
94
|
+
|
95
|
+
parts.push({
|
96
|
+
inlineData: {
|
97
|
+
data: urlBase64,
|
98
|
+
mimeType: urlMimeType,
|
99
|
+
},
|
100
|
+
});
|
101
|
+
} else {
|
102
|
+
throw new TypeError(`currently we don't support image url: ${params.imageUrl}`);
|
103
|
+
}
|
104
|
+
}
|
105
|
+
|
106
|
+
const contents: Content[] = [
|
107
|
+
{
|
108
|
+
parts,
|
109
|
+
role: 'user',
|
110
|
+
},
|
111
|
+
];
|
112
|
+
|
113
|
+
const response = await client.models.generateContent({
|
114
|
+
config: {
|
115
|
+
responseModalities: ['Image'],
|
116
|
+
},
|
117
|
+
contents,
|
118
|
+
model: actualModel,
|
119
|
+
});
|
120
|
+
|
121
|
+
return extractImageFromResponse(response);
|
122
|
+
}
|
123
|
+
|
124
|
+
/**
|
125
|
+
* Create image using Google AI models
|
126
|
+
*/
|
127
|
+
export async function createGoogleImage(
|
128
|
+
client: GoogleGenAI,
|
129
|
+
provider: string,
|
130
|
+
payload: CreateImagePayload,
|
131
|
+
): Promise<CreateImageResponse> {
|
132
|
+
try {
|
133
|
+
const { model } = payload;
|
134
|
+
|
135
|
+
// Handle Gemini 2.5 Flash Image models that use generateContent
|
136
|
+
if (model.endsWith(':image')) {
|
137
|
+
return await generateImageByChatModel(client, payload);
|
138
|
+
}
|
139
|
+
|
140
|
+
// Handle traditional Imagen models that use generateImages
|
141
|
+
return await generateByImageModel(client, payload);
|
142
|
+
} catch (error) {
|
143
|
+
const err = error as Error;
|
144
|
+
|
145
|
+
const { errorType, error: parsedError } = parseGoogleErrorMessage(err.message);
|
146
|
+
throw AgentRuntimeError.createImage({
|
147
|
+
error: parsedError,
|
148
|
+
errorType,
|
149
|
+
provider,
|
150
|
+
});
|
151
|
+
}
|
152
|
+
}
|
@@ -5,7 +5,6 @@ import OpenAI from 'openai';
|
|
5
5
|
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
6
6
|
|
7
7
|
import { OpenAIChatMessage } from '@/libs/model-runtime';
|
8
|
-
import { CreateImagePayload } from '@/libs/model-runtime/types/image';
|
9
8
|
import { ChatStreamPayload } from '@/types/openai/chat';
|
10
9
|
|
11
10
|
import * as debugStreamModule from '../utils/debugStream';
|
@@ -827,331 +826,4 @@ describe('LobeGoogleAI', () => {
|
|
827
826
|
});
|
828
827
|
});
|
829
828
|
});
|
830
|
-
|
831
|
-
describe('createImage', () => {
|
832
|
-
it('should create image successfully with basic parameters', async () => {
|
833
|
-
// Arrange - Use real base64 image data (5x5 red pixel PNG)
|
834
|
-
const realBase64ImageData =
|
835
|
-
'iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==';
|
836
|
-
const mockImageResponse = {
|
837
|
-
generatedImages: [
|
838
|
-
{
|
839
|
-
image: {
|
840
|
-
imageBytes: realBase64ImageData,
|
841
|
-
},
|
842
|
-
},
|
843
|
-
],
|
844
|
-
};
|
845
|
-
vi.spyOn(instance['client'].models, 'generateImages').mockResolvedValue(
|
846
|
-
mockImageResponse as any,
|
847
|
-
);
|
848
|
-
|
849
|
-
const payload: CreateImagePayload = {
|
850
|
-
model: 'imagen-4.0-generate-preview-06-06',
|
851
|
-
params: {
|
852
|
-
prompt: 'A beautiful landscape with mountains and trees',
|
853
|
-
aspectRatio: '1:1',
|
854
|
-
},
|
855
|
-
};
|
856
|
-
|
857
|
-
// Act
|
858
|
-
const result = await instance.createImage(payload);
|
859
|
-
|
860
|
-
// Assert
|
861
|
-
expect(instance['client'].models.generateImages).toHaveBeenCalledWith({
|
862
|
-
model: 'imagen-4.0-generate-preview-06-06',
|
863
|
-
prompt: 'A beautiful landscape with mountains and trees',
|
864
|
-
config: {
|
865
|
-
aspectRatio: '1:1',
|
866
|
-
numberOfImages: 1,
|
867
|
-
},
|
868
|
-
});
|
869
|
-
expect(result).toEqual({
|
870
|
-
imageUrl: `data:image/png;base64,${realBase64ImageData}`,
|
871
|
-
});
|
872
|
-
});
|
873
|
-
|
874
|
-
it('should support different aspect ratios like 16:9 for widescreen images', async () => {
|
875
|
-
// Arrange - Use real base64 data
|
876
|
-
const realBase64Data =
|
877
|
-
'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==';
|
878
|
-
const mockImageResponse = {
|
879
|
-
generatedImages: [
|
880
|
-
{
|
881
|
-
image: {
|
882
|
-
imageBytes: realBase64Data,
|
883
|
-
},
|
884
|
-
},
|
885
|
-
],
|
886
|
-
};
|
887
|
-
vi.spyOn(instance['client'].models, 'generateImages').mockResolvedValue(
|
888
|
-
mockImageResponse as any,
|
889
|
-
);
|
890
|
-
|
891
|
-
const payload: CreateImagePayload = {
|
892
|
-
model: 'imagen-4.0-ultra-generate-preview-06-06',
|
893
|
-
params: {
|
894
|
-
prompt: 'Cinematic landscape shot with dramatic lighting',
|
895
|
-
aspectRatio: '16:9',
|
896
|
-
},
|
897
|
-
};
|
898
|
-
|
899
|
-
// Act
|
900
|
-
await instance.createImage(payload);
|
901
|
-
|
902
|
-
// Assert
|
903
|
-
expect(instance['client'].models.generateImages).toHaveBeenCalledWith({
|
904
|
-
model: 'imagen-4.0-ultra-generate-preview-06-06',
|
905
|
-
prompt: 'Cinematic landscape shot with dramatic lighting',
|
906
|
-
config: {
|
907
|
-
aspectRatio: '16:9',
|
908
|
-
numberOfImages: 1,
|
909
|
-
},
|
910
|
-
});
|
911
|
-
});
|
912
|
-
|
913
|
-
it('should work with only prompt when aspect ratio is not specified', async () => {
|
914
|
-
// Arrange
|
915
|
-
const realBase64Data =
|
916
|
-
'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==';
|
917
|
-
const mockImageResponse = {
|
918
|
-
generatedImages: [
|
919
|
-
{
|
920
|
-
image: {
|
921
|
-
imageBytes: realBase64Data,
|
922
|
-
},
|
923
|
-
},
|
924
|
-
],
|
925
|
-
};
|
926
|
-
vi.spyOn(instance['client'].models, 'generateImages').mockResolvedValue(
|
927
|
-
mockImageResponse as any,
|
928
|
-
);
|
929
|
-
|
930
|
-
const payload: CreateImagePayload = {
|
931
|
-
model: 'imagen-4.0-generate-preview-06-06',
|
932
|
-
params: {
|
933
|
-
prompt: 'A cute cat sitting in a garden',
|
934
|
-
},
|
935
|
-
};
|
936
|
-
|
937
|
-
// Act
|
938
|
-
await instance.createImage(payload);
|
939
|
-
|
940
|
-
// Assert
|
941
|
-
expect(instance['client'].models.generateImages).toHaveBeenCalledWith({
|
942
|
-
model: 'imagen-4.0-generate-preview-06-06',
|
943
|
-
prompt: 'A cute cat sitting in a garden',
|
944
|
-
config: {
|
945
|
-
aspectRatio: undefined,
|
946
|
-
numberOfImages: 1,
|
947
|
-
},
|
948
|
-
});
|
949
|
-
});
|
950
|
-
|
951
|
-
describe('Error handling', () => {
|
952
|
-
it('should throw InvalidProviderAPIKey error when API key is invalid', async () => {
|
953
|
-
// Arrange - Use real Google AI error format
|
954
|
-
const message = `[GoogleGenerativeAI Error]: Error fetching from https://generativelanguage.googleapis.com/v1/models/imagen-4.0:generateImages: [400 Bad Request] API key not valid. Please pass a valid API key. [{"@type":"type.googleapis.com/google.rpc.ErrorInfo","reason":"API_KEY_INVALID","domain":"googleapis.com","metadata":{"service":"generativelanguage.googleapis.com"}}]`;
|
955
|
-
const apiError = new Error(message);
|
956
|
-
vi.spyOn(instance['client'].models, 'generateImages').mockRejectedValue(apiError);
|
957
|
-
|
958
|
-
const payload: CreateImagePayload = {
|
959
|
-
model: 'imagen-4.0-generate-preview-06-06',
|
960
|
-
params: {
|
961
|
-
prompt: 'A realistic landscape photo',
|
962
|
-
},
|
963
|
-
};
|
964
|
-
|
965
|
-
// Act & Assert - Test error type rather than specific text
|
966
|
-
await expect(instance.createImage(payload)).rejects.toEqual(
|
967
|
-
expect.objectContaining({
|
968
|
-
errorType: invalidErrorType,
|
969
|
-
provider,
|
970
|
-
}),
|
971
|
-
);
|
972
|
-
});
|
973
|
-
|
974
|
-
it('should throw ProviderBizError for network and API errors', async () => {
|
975
|
-
// Arrange
|
976
|
-
const apiError = new Error('Network connection failed');
|
977
|
-
vi.spyOn(instance['client'].models, 'generateImages').mockRejectedValue(apiError);
|
978
|
-
|
979
|
-
const payload: CreateImagePayload = {
|
980
|
-
model: 'imagen-4.0-generate-preview-06-06',
|
981
|
-
params: {
|
982
|
-
prompt: 'A digital art portrait',
|
983
|
-
},
|
984
|
-
};
|
985
|
-
|
986
|
-
// Act & Assert - Test error type and basic structure
|
987
|
-
await expect(instance.createImage(payload)).rejects.toEqual(
|
988
|
-
expect.objectContaining({
|
989
|
-
errorType: bizErrorType,
|
990
|
-
provider,
|
991
|
-
error: expect.objectContaining({
|
992
|
-
message: expect.any(String),
|
993
|
-
}),
|
994
|
-
}),
|
995
|
-
);
|
996
|
-
});
|
997
|
-
|
998
|
-
it('should throw error when API response is malformed - missing generatedImages', async () => {
|
999
|
-
// Arrange
|
1000
|
-
const mockImageResponse = {};
|
1001
|
-
vi.spyOn(instance['client'].models, 'generateImages').mockResolvedValue(
|
1002
|
-
mockImageResponse as any,
|
1003
|
-
);
|
1004
|
-
|
1005
|
-
const payload: CreateImagePayload = {
|
1006
|
-
model: 'imagen-4.0-generate-preview-06-06',
|
1007
|
-
params: {
|
1008
|
-
prompt: 'Abstract geometric patterns',
|
1009
|
-
},
|
1010
|
-
};
|
1011
|
-
|
1012
|
-
// Act & Assert - Test error behavior rather than specific text
|
1013
|
-
await expect(instance.createImage(payload)).rejects.toEqual(
|
1014
|
-
expect.objectContaining({
|
1015
|
-
errorType: bizErrorType,
|
1016
|
-
provider,
|
1017
|
-
}),
|
1018
|
-
);
|
1019
|
-
});
|
1020
|
-
|
1021
|
-
it('should throw error when API response contains empty image array', async () => {
|
1022
|
-
// Arrange
|
1023
|
-
const mockImageResponse = {
|
1024
|
-
generatedImages: [],
|
1025
|
-
};
|
1026
|
-
vi.spyOn(instance['client'].models, 'generateImages').mockResolvedValue(
|
1027
|
-
mockImageResponse as any,
|
1028
|
-
);
|
1029
|
-
|
1030
|
-
const payload: CreateImagePayload = {
|
1031
|
-
model: 'imagen-4.0-generate-preview-06-06',
|
1032
|
-
params: {
|
1033
|
-
prompt: 'Minimalist design poster',
|
1034
|
-
},
|
1035
|
-
};
|
1036
|
-
|
1037
|
-
// Act & Assert
|
1038
|
-
await expect(instance.createImage(payload)).rejects.toEqual(
|
1039
|
-
expect.objectContaining({
|
1040
|
-
errorType: bizErrorType,
|
1041
|
-
provider,
|
1042
|
-
}),
|
1043
|
-
);
|
1044
|
-
});
|
1045
|
-
|
1046
|
-
it('should throw error when generated image lacks required data', async () => {
|
1047
|
-
// Arrange
|
1048
|
-
const mockImageResponse = {
|
1049
|
-
generatedImages: [
|
1050
|
-
{
|
1051
|
-
image: {}, // Missing imageBytes
|
1052
|
-
},
|
1053
|
-
],
|
1054
|
-
};
|
1055
|
-
vi.spyOn(instance['client'].models, 'generateImages').mockResolvedValue(
|
1056
|
-
mockImageResponse as any,
|
1057
|
-
);
|
1058
|
-
|
1059
|
-
const payload: CreateImagePayload = {
|
1060
|
-
model: 'imagen-4.0-generate-preview-06-06',
|
1061
|
-
params: {
|
1062
|
-
prompt: 'Watercolor painting style',
|
1063
|
-
},
|
1064
|
-
};
|
1065
|
-
|
1066
|
-
// Act & Assert
|
1067
|
-
await expect(instance.createImage(payload)).rejects.toEqual(
|
1068
|
-
expect.objectContaining({
|
1069
|
-
errorType: bizErrorType,
|
1070
|
-
provider,
|
1071
|
-
}),
|
1072
|
-
);
|
1073
|
-
});
|
1074
|
-
});
|
1075
|
-
|
1076
|
-
describe('Edge cases', () => {
|
1077
|
-
it('should return first image when API returns multiple generated images', async () => {
|
1078
|
-
// Arrange - Use two different real base64 image data
|
1079
|
-
const firstImageData =
|
1080
|
-
'iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==';
|
1081
|
-
const secondImageData =
|
1082
|
-
'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==';
|
1083
|
-
const mockImageResponse = {
|
1084
|
-
generatedImages: [
|
1085
|
-
{
|
1086
|
-
image: {
|
1087
|
-
imageBytes: firstImageData,
|
1088
|
-
},
|
1089
|
-
},
|
1090
|
-
{
|
1091
|
-
image: {
|
1092
|
-
imageBytes: secondImageData,
|
1093
|
-
},
|
1094
|
-
},
|
1095
|
-
],
|
1096
|
-
};
|
1097
|
-
vi.spyOn(instance['client'].models, 'generateImages').mockResolvedValue(
|
1098
|
-
mockImageResponse as any,
|
1099
|
-
);
|
1100
|
-
|
1101
|
-
const payload: CreateImagePayload = {
|
1102
|
-
model: 'imagen-4.0-generate-preview-06-06',
|
1103
|
-
params: {
|
1104
|
-
prompt: 'Generate multiple variations of a sunset',
|
1105
|
-
},
|
1106
|
-
};
|
1107
|
-
|
1108
|
-
// Act
|
1109
|
-
const result = await instance.createImage(payload);
|
1110
|
-
|
1111
|
-
// Assert - Should return the first image
|
1112
|
-
expect(result).toEqual({
|
1113
|
-
imageUrl: `data:image/png;base64,${firstImageData}`,
|
1114
|
-
});
|
1115
|
-
});
|
1116
|
-
|
1117
|
-
it('should work with custom future Imagen model versions', async () => {
|
1118
|
-
// Arrange
|
1119
|
-
const realBase64Data =
|
1120
|
-
'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==';
|
1121
|
-
const mockImageResponse = {
|
1122
|
-
generatedImages: [
|
1123
|
-
{
|
1124
|
-
image: {
|
1125
|
-
imageBytes: realBase64Data,
|
1126
|
-
},
|
1127
|
-
},
|
1128
|
-
],
|
1129
|
-
};
|
1130
|
-
vi.spyOn(instance['client'].models, 'generateImages').mockResolvedValue(
|
1131
|
-
mockImageResponse as any,
|
1132
|
-
);
|
1133
|
-
|
1134
|
-
const payload: CreateImagePayload = {
|
1135
|
-
model: 'imagen-5.0-future-model',
|
1136
|
-
params: {
|
1137
|
-
prompt: 'Photorealistic portrait with soft lighting',
|
1138
|
-
aspectRatio: '4:3',
|
1139
|
-
},
|
1140
|
-
};
|
1141
|
-
|
1142
|
-
// Act
|
1143
|
-
await instance.createImage(payload);
|
1144
|
-
|
1145
|
-
// Assert
|
1146
|
-
expect(instance['client'].models.generateImages).toHaveBeenCalledWith({
|
1147
|
-
model: 'imagen-5.0-future-model',
|
1148
|
-
prompt: 'Photorealistic portrait with soft lighting',
|
1149
|
-
config: {
|
1150
|
-
aspectRatio: '4:3',
|
1151
|
-
numberOfImages: 1,
|
1152
|
-
},
|
1153
|
-
});
|
1154
|
-
});
|
1155
|
-
});
|
1156
|
-
});
|
1157
829
|
});
|
@@ -27,6 +27,7 @@ import { parseGoogleErrorMessage } from '../utils/googleErrorParser';
|
|
27
27
|
import { StreamingResponse } from '../utils/response';
|
28
28
|
import { GoogleGenerativeAIStream, VertexAIStream } from '../utils/streams';
|
29
29
|
import { parseDataUri } from '../utils/uriParser';
|
30
|
+
import { createGoogleImage } from './createImage';
|
30
31
|
|
31
32
|
const modelsOffSafetySettings = new Set(['gemini-2.0-flash-exp']);
|
32
33
|
|
@@ -258,49 +259,11 @@ export class LobeGoogleAI implements LobeRuntimeAI {
|
|
258
259
|
}
|
259
260
|
|
260
261
|
/**
|
261
|
-
* Generate images using Google AI Imagen API
|
262
|
+
* Generate images using Google AI Imagen API or Gemini Chat Models
|
262
263
|
* @see https://ai.google.dev/gemini-api/docs/image-generation#imagen
|
263
264
|
*/
|
264
265
|
async createImage(payload: CreateImagePayload): Promise<CreateImageResponse> {
|
265
|
-
|
266
|
-
const { model, params } = payload;
|
267
|
-
|
268
|
-
const response = await this.client.models.generateImages({
|
269
|
-
config: {
|
270
|
-
aspectRatio: params.aspectRatio,
|
271
|
-
numberOfImages: 1,
|
272
|
-
},
|
273
|
-
model,
|
274
|
-
prompt: params.prompt,
|
275
|
-
});
|
276
|
-
|
277
|
-
if (!response.generatedImages || response.generatedImages.length === 0) {
|
278
|
-
throw new Error('No images generated');
|
279
|
-
}
|
280
|
-
|
281
|
-
const generatedImage = response.generatedImages[0];
|
282
|
-
if (!generatedImage.image || !generatedImage.image.imageBytes) {
|
283
|
-
throw new Error('Invalid image data');
|
284
|
-
}
|
285
|
-
|
286
|
-
const { imageBytes } = generatedImage.image;
|
287
|
-
// 1. official doc use png as example
|
288
|
-
// 2. no responseType param support like openai now.
|
289
|
-
// I think we can just hard code png now
|
290
|
-
const imageUrl = `data:image/png;base64,${imageBytes}`;
|
291
|
-
|
292
|
-
return { imageUrl };
|
293
|
-
} catch (error) {
|
294
|
-
const err = error as Error;
|
295
|
-
console.error('Google AI image generation error:', err);
|
296
|
-
|
297
|
-
const { errorType, error: parsedError } = parseGoogleErrorMessage(err.message);
|
298
|
-
throw AgentRuntimeError.createImage({
|
299
|
-
error: parsedError,
|
300
|
-
errorType,
|
301
|
-
provider: this.provider,
|
302
|
-
});
|
303
|
-
}
|
266
|
+
return createGoogleImage(this.client, this.provider, payload);
|
304
267
|
}
|
305
268
|
|
306
269
|
private createEnhancedStream(originalStream: any, signal: AbortSignal): ReadableStream {
|
@@ -111,6 +111,7 @@ export const IMAGE_MODEL_KEYWORDS = [
|
|
111
111
|
'wanxiang',
|
112
112
|
'DESCRIBE',
|
113
113
|
'UPSCALE',
|
114
|
+
'!gemini', // 排除 gemini 模型,即使包含 -image 也是 chat 模型
|
114
115
|
'-image',
|
115
116
|
'^V3',
|
116
117
|
'^V_2',
|
@@ -169,7 +170,7 @@ const findKnownModelByProvider = async (
|
|
169
170
|
|
170
171
|
try {
|
171
172
|
// 尝试动态导入对应的配置文件
|
172
|
-
const moduleImport = await import(`@/config/aiModels/${provider}`);
|
173
|
+
const moduleImport = await import(`@/config/aiModels/${provider}.ts`);
|
173
174
|
const providerModels = moduleImport.default;
|
174
175
|
|
175
176
|
// 如果导入成功且有数据,进行查找
|