@dataclouder/nest-vertex 0.0.35 â 0.0.37
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/controllers/vertex-gemini-chat.controller.js +1 -1
- package/models/adapter.models.d.ts +2 -1
- package/models/key-balancer.models.d.ts +2 -1
- package/models/key-balancer.models.js +1 -0
- package/nest-vertex.module.js +3 -0
- package/package.json +1 -1
- package/services/google-genai.service.d.ts +13 -0
- package/services/google-genai.service.js +53 -0
- package/services/key-balancer-api.service.d.ts +3 -3
- package/services/key-balancer-api.service.js +18 -31
- package/services/vertex-gemini-chat.service.d.ts +7 -6
- package/services/vertex-gemini-chat.service.js +16 -36
- package/services/vertex-image.service.js +4 -4
- package/services/vertex-veo-genai.service.d.ts +3 -2
- package/services/vertex-veo-genai.service.js +24 -17
|
@@ -27,7 +27,7 @@ let GeminiChatController = GeminiChatController_1 = class GeminiChatController {
|
|
|
27
27
|
async generateText(generateTextDto) {
|
|
28
28
|
this.logger.log(`Received request for text generation with keyType: ${generateTextDto.keyType}`);
|
|
29
29
|
try {
|
|
30
|
-
const result = await this.geminiChatService.chat(generateTextDto.messages, undefined, generateTextDto
|
|
30
|
+
const result = await this.geminiChatService.chat(generateTextDto.messages, undefined, generateTextDto?.keyType);
|
|
31
31
|
return result;
|
|
32
32
|
}
|
|
33
33
|
catch (error) {
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { SynthesizeSpeechInput } from '../services/vertex-tts.service';
|
|
2
|
+
import { TierType } from './key-balancer.models';
|
|
2
3
|
export interface IModelAdapterMethods {
|
|
3
4
|
listModels(): Promise<Record<string, string>[]>;
|
|
4
5
|
generate(imageDto: any): Promise<any>;
|
|
@@ -18,7 +19,7 @@ export interface ChatLLMRequestAdapter {
|
|
|
18
19
|
provider?: string;
|
|
19
20
|
model?: IAIModel;
|
|
20
21
|
returnJson?: boolean;
|
|
21
|
-
keyType?:
|
|
22
|
+
keyType?: TierType;
|
|
22
23
|
}
|
|
23
24
|
export declare class ChatMessageDict {
|
|
24
25
|
content: string;
|
package/nest-vertex.module.js
CHANGED
|
@@ -34,6 +34,7 @@ const key_balancer_api_service_1 = require("./services/key-balancer-api.service"
|
|
|
34
34
|
const comfyui_module_1 = require("./comfyui/comfyui.module");
|
|
35
35
|
const vertex_veo_genai_service_1 = require("./services/vertex-veo-genai.service");
|
|
36
36
|
const vertex_veo_video_controller_1 = require("./controllers/vertex-veo-video.controller");
|
|
37
|
+
const google_genai_service_1 = require("./services/google-genai.service");
|
|
37
38
|
let NestVertexModule = class NestVertexModule {
|
|
38
39
|
};
|
|
39
40
|
exports.NestVertexModule = NestVertexModule;
|
|
@@ -59,6 +60,7 @@ exports.NestVertexModule = NestVertexModule = __decorate([
|
|
|
59
60
|
generated_asset_service_1.GeneratedAssetService,
|
|
60
61
|
key_balancer_api_service_1.KeyBalancerApiService,
|
|
61
62
|
vertex_veo_genai_service_1.VertexVeoGenaiService,
|
|
63
|
+
google_genai_service_1.GoogleGenaiService,
|
|
62
64
|
],
|
|
63
65
|
exports: [
|
|
64
66
|
vertex_image_video_service_1.VertexImageVideoService,
|
|
@@ -73,6 +75,7 @@ exports.NestVertexModule = NestVertexModule = __decorate([
|
|
|
73
75
|
generated_asset_service_1.GeneratedAssetService,
|
|
74
76
|
key_balancer_api_service_1.KeyBalancerApiService,
|
|
75
77
|
vertex_veo_genai_service_1.VertexVeoGenaiService,
|
|
78
|
+
google_genai_service_1.GoogleGenaiService,
|
|
76
79
|
comfyui_module_1.ComfyUIModule,
|
|
77
80
|
],
|
|
78
81
|
controllers: [
|
package/package.json
CHANGED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { GoogleGenAI } from '@google/genai';
|
|
2
|
+
import { AvailableKeyResult, ModelType, TierType } from '../models/key-balancer.models';
|
|
3
|
+
import { KeyBalancerApiService } from './key-balancer-api.service';
|
|
4
|
+
export declare class GoogleGenaiService {
|
|
5
|
+
private readonly keyBalancer;
|
|
6
|
+
private readonly logger;
|
|
7
|
+
private readonly defaultApiKey;
|
|
8
|
+
constructor(keyBalancer: KeyBalancerApiService);
|
|
9
|
+
getGoogleGenAIClient(modelName: string, keyTierType?: TierType, modelType?: ModelType): Promise<{
|
|
10
|
+
client: GoogleGenAI;
|
|
11
|
+
balancedKey?: AvailableKeyResult;
|
|
12
|
+
}>;
|
|
13
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
|
|
3
|
+
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
|
|
4
|
+
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
|
|
5
|
+
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
6
|
+
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
7
|
+
};
|
|
8
|
+
var __metadata = (this && this.__metadata) || function (k, v) {
|
|
9
|
+
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
|
|
10
|
+
};
|
|
11
|
+
var GoogleGenaiService_1;
|
|
12
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
13
|
+
exports.GoogleGenaiService = void 0;
|
|
14
|
+
const common_1 = require("@nestjs/common");
|
|
15
|
+
const genai_1 = require("@google/genai");
|
|
16
|
+
const nest_core_1 = require("@dataclouder/nest-core");
|
|
17
|
+
const key_balancer_api_service_1 = require("./key-balancer-api.service");
|
|
18
|
+
let GoogleGenaiService = GoogleGenaiService_1 = class GoogleGenaiService {
|
|
19
|
+
keyBalancer;
|
|
20
|
+
logger = new common_1.Logger(GoogleGenaiService_1.name);
|
|
21
|
+
defaultApiKey;
|
|
22
|
+
constructor(keyBalancer) {
|
|
23
|
+
this.keyBalancer = keyBalancer;
|
|
24
|
+
this.defaultApiKey = process.env.GEMINI_API_KEY || '';
|
|
25
|
+
if (!this.defaultApiKey) {
|
|
26
|
+
this.logger.warn('GEMINI_API_KEY environment variable not set. This service will not work without available keys in the balancer.');
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
async getGoogleGenAIClient(modelName, keyTierType, modelType) {
|
|
30
|
+
this.logger.warn('getGoogleGenAIClient() Getting key from redis');
|
|
31
|
+
if (process.env.KEY_BALANCER_HOST && modelType) {
|
|
32
|
+
const balancedKey = await this.keyBalancer.getKey(modelType, modelName, keyTierType);
|
|
33
|
+
if (balancedKey?.apiKey) {
|
|
34
|
+
this.logger.debug(`Using balanced key: ${balancedKey.name} ${balancedKey.id}`);
|
|
35
|
+
return { client: new genai_1.GoogleGenAI({ apiKey: balancedKey.apiKey }), balancedKey };
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
if (!this.defaultApiKey) {
|
|
39
|
+
throw new nest_core_1.AppException({
|
|
40
|
+
error_message: 'No API keys available',
|
|
41
|
+
explanation: 'All balanced keys are rate-limited or failing, and no default GEMINI_API_KEY is configured.',
|
|
42
|
+
});
|
|
43
|
+
}
|
|
44
|
+
this.logger.warn('No balanced keys available. Falling back to default API key.');
|
|
45
|
+
return { client: new genai_1.GoogleGenAI({ apiKey: this.defaultApiKey }), balancedKey: null };
|
|
46
|
+
}
|
|
47
|
+
};
|
|
48
|
+
exports.GoogleGenaiService = GoogleGenaiService;
|
|
49
|
+
exports.GoogleGenaiService = GoogleGenaiService = GoogleGenaiService_1 = __decorate([
|
|
50
|
+
(0, common_1.Injectable)(),
|
|
51
|
+
__metadata("design:paramtypes", [key_balancer_api_service_1.KeyBalancerApiService])
|
|
52
|
+
], GoogleGenaiService);
|
|
53
|
+
//# sourceMappingURL=google-genai.service.js.map
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
import { HttpService } from '@nestjs/axios';
|
|
2
|
-
import { AvailableKeyResult, ModelType } from '../models/key-balancer.models';
|
|
2
|
+
import { AvailableKeyResult, ModelType, TierType } from '../models/key-balancer.models';
|
|
3
3
|
export declare class KeyBalancerApiService {
|
|
4
4
|
private readonly httpService;
|
|
5
5
|
private readonly logger;
|
|
6
6
|
constructor(httpService: HttpService);
|
|
7
|
-
|
|
8
|
-
|
|
7
|
+
getKey(modelType: ModelType, model: string, tier?: TierType): Promise<AvailableKeyResult>;
|
|
8
|
+
private getUnavailableServiceResult;
|
|
9
9
|
recordFailedRequest(keyId: string, error: any, modelType: ModelType, model: string, ttlSeconds?: number): Promise<void>;
|
|
10
10
|
}
|
|
@@ -20,46 +20,33 @@ let KeyBalancerApiService = KeyBalancerApiService_1 = class KeyBalancerApiServic
|
|
|
20
20
|
constructor(httpService) {
|
|
21
21
|
this.httpService = httpService;
|
|
22
22
|
}
|
|
23
|
-
async
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
this.
|
|
28
|
-
const { data } = await (0, rxjs_1.firstValueFrom)(this.httpService.get(url));
|
|
29
|
-
return data;
|
|
30
|
-
}
|
|
31
|
-
catch (error) {
|
|
32
|
-
this.logger.error(`Failed to get LLM key for model ${model} from the external service.`, error.stack);
|
|
33
|
-
return {
|
|
34
|
-
id: '',
|
|
35
|
-
apiKey: '',
|
|
36
|
-
name: '',
|
|
37
|
-
type: '',
|
|
38
|
-
error: 'ServiceUnavailable',
|
|
39
|
-
errorDescription: 'Failed to fetch key from key-balancer API.',
|
|
40
|
-
};
|
|
23
|
+
async getKey(modelType, model, tier) {
|
|
24
|
+
const keyBalancerHost = process.env.KEY_BALANCER_HOST;
|
|
25
|
+
if (!keyBalancerHost) {
|
|
26
|
+
this.logger.error('KEY_BALANCER_HOST environment variable not set.');
|
|
27
|
+
return this.getUnavailableServiceResult();
|
|
41
28
|
}
|
|
42
|
-
}
|
|
43
|
-
async getImageKey(model, tier) {
|
|
44
29
|
try {
|
|
45
|
-
const
|
|
46
|
-
const url = `${keyBalancerHost}/api/key-balancer/redis/key/image?model=${model}&tier=${tier || ''}`;
|
|
30
|
+
const url = `${keyBalancerHost}/api/key-balancer/redis/key/${modelType.toLowerCase()}?model=${model}&tier=${tier || ''}`;
|
|
47
31
|
this.logger.warn(`Request to: ${url}`);
|
|
48
32
|
const { data } = await (0, rxjs_1.firstValueFrom)(this.httpService.get(url));
|
|
49
33
|
return data;
|
|
50
34
|
}
|
|
51
35
|
catch (error) {
|
|
52
|
-
this.logger.error(`Failed to get
|
|
53
|
-
return
|
|
54
|
-
id: '',
|
|
55
|
-
apiKey: '',
|
|
56
|
-
name: '',
|
|
57
|
-
type: '',
|
|
58
|
-
error: 'ServiceUnavailable',
|
|
59
|
-
errorDescription: 'Failed to fetch key from key-balancer API.',
|
|
60
|
-
};
|
|
36
|
+
this.logger.error(`Failed to get ${modelType} key for model ${model} from the external service.`, error.stack);
|
|
37
|
+
return this.getUnavailableServiceResult();
|
|
61
38
|
}
|
|
62
39
|
}
|
|
40
|
+
getUnavailableServiceResult() {
|
|
41
|
+
return {
|
|
42
|
+
id: '',
|
|
43
|
+
apiKey: '',
|
|
44
|
+
name: '',
|
|
45
|
+
type: '',
|
|
46
|
+
error: 'ServiceUnavailable',
|
|
47
|
+
errorDescription: 'Failed to fetch key from key-balancer API.',
|
|
48
|
+
};
|
|
49
|
+
}
|
|
63
50
|
async recordFailedRequest(keyId, error, modelType, model, ttlSeconds) {
|
|
64
51
|
const keyBalancerHost = process.env.KEY_BALANCER_HOST;
|
|
65
52
|
const url = `${keyBalancerHost}/api/key-balancer/redis/key/failed`;
|
|
@@ -1,19 +1,20 @@
|
|
|
1
1
|
import { ChatMessageDict, MessageLLM, EModelQuality, DescribeImageRequestAdapter } from '../models/adapter.models';
|
|
2
|
+
import { TierType } from '../models/key-balancer.models';
|
|
2
3
|
import { KeyBalancerApiService } from './key-balancer-api.service';
|
|
4
|
+
import { GoogleGenaiService } from './google-genai.service';
|
|
3
5
|
export declare class GeminiChatService {
|
|
4
6
|
private readonly keyBalancer;
|
|
7
|
+
private readonly googleGenaiService;
|
|
5
8
|
private readonly logger;
|
|
6
9
|
private readonly defaultModel;
|
|
7
|
-
|
|
8
|
-
constructor(keyBalancer: KeyBalancerApiService);
|
|
9
|
-
private _getGoogleGenAIClient;
|
|
10
|
+
constructor(keyBalancer: KeyBalancerApiService, googleGenaiService: GoogleGenaiService);
|
|
10
11
|
private mapToGeminiRole;
|
|
11
12
|
private formatMessagesToContent;
|
|
12
|
-
chat(messages: MessageLLM[], model?: string, keyType?:
|
|
13
|
-
chatStream(messages: MessageLLM[], model?: string, keyType?:
|
|
13
|
+
chat(messages: MessageLLM[], model?: string, keyType?: TierType): Promise<ChatMessageDict>;
|
|
14
|
+
chatStream(messages: MessageLLM[], model?: string, keyType?: TierType): Promise<AsyncIterable<ChatMessageDict>>;
|
|
14
15
|
listModels(): Promise<Record<string, string>[]>;
|
|
15
16
|
getDefaultQualityModel(quality: EModelQuality): string;
|
|
16
17
|
private _extractJsonWithRecovery;
|
|
17
|
-
chatAndExtractJson(messages: MessageLLM[], model?: string, keyType?:
|
|
18
|
+
chatAndExtractJson(messages: MessageLLM[], model?: string, keyType?: TierType): Promise<any>;
|
|
18
19
|
describeImageByUrl(dto: DescribeImageRequestAdapter): Promise<any>;
|
|
19
20
|
}
|
|
@@ -12,44 +12,23 @@ var GeminiChatService_1;
|
|
|
12
12
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
13
13
|
exports.GeminiChatService = void 0;
|
|
14
14
|
const common_1 = require("@nestjs/common");
|
|
15
|
-
const genai_1 = require("@google/genai");
|
|
16
15
|
const nest_core_1 = require("@dataclouder/nest-core");
|
|
17
16
|
const gemini_models_1 = require("../models/gemini-models");
|
|
18
17
|
const adapter_models_1 = require("../models/adapter.models");
|
|
19
18
|
const llm_models_1 = require("../models/llm.models");
|
|
20
19
|
const key_balancer_models_1 = require("../models/key-balancer.models");
|
|
21
20
|
const key_balancer_api_service_1 = require("./key-balancer-api.service");
|
|
21
|
+
const google_genai_service_1 = require("./google-genai.service");
|
|
22
22
|
let GeminiChatService = GeminiChatService_1 = class GeminiChatService {
|
|
23
23
|
keyBalancer;
|
|
24
|
+
googleGenaiService;
|
|
24
25
|
logger = new common_1.Logger(GeminiChatService_1.name);
|
|
25
|
-
defaultModel = 'gemini-
|
|
26
|
-
|
|
27
|
-
constructor(keyBalancer) {
|
|
26
|
+
defaultModel = 'gemini-1.5-flash-latest';
|
|
27
|
+
constructor(keyBalancer, googleGenaiService) {
|
|
28
28
|
this.keyBalancer = keyBalancer;
|
|
29
|
-
this.
|
|
30
|
-
if (!this.defaultApiKey) {
|
|
31
|
-
this.logger.warn('GEMINI_API_KEY environment variable not set. This service will not work without available keys in the balancer.');
|
|
32
|
-
}
|
|
29
|
+
this.googleGenaiService = googleGenaiService;
|
|
33
30
|
this.logger.log(`GeminiChatService initialized with model: ${this.defaultModel}`);
|
|
34
31
|
}
|
|
35
|
-
async _getGoogleGenAIClient(model, keyType) {
|
|
36
|
-
this.logger.warn('_getGoogleGenAIClient() Getting key from redis');
|
|
37
|
-
if (process.env.KEY_BALANCER_HOST) {
|
|
38
|
-
const balancedKey = await this.keyBalancer.getLlmKey(model, keyType);
|
|
39
|
-
if (balancedKey?.apiKey) {
|
|
40
|
-
this.logger.debug(`Using balanced key: ${balancedKey.name} ${balancedKey.id}`);
|
|
41
|
-
return { client: new genai_1.GoogleGenAI({ apiKey: balancedKey.apiKey }), balancedKey };
|
|
42
|
-
}
|
|
43
|
-
}
|
|
44
|
-
if (!this.defaultApiKey) {
|
|
45
|
-
throw new nest_core_1.AppException({
|
|
46
|
-
error_message: 'No API keys available',
|
|
47
|
-
explanation: 'All balanced keys are rate-limited or failing, and no default GEMINI_API_KEY is configured.',
|
|
48
|
-
});
|
|
49
|
-
}
|
|
50
|
-
this.logger.warn('No balanced keys available. Falling back to default API key.');
|
|
51
|
-
return { client: new genai_1.GoogleGenAI({ apiKey: this.defaultApiKey }), balancedKey: null };
|
|
52
|
-
}
|
|
53
32
|
mapToGeminiRole(role) {
|
|
54
33
|
switch (role) {
|
|
55
34
|
case 'assistant':
|
|
@@ -64,8 +43,8 @@ let GeminiChatService = GeminiChatService_1 = class GeminiChatService {
|
|
|
64
43
|
}
|
|
65
44
|
formatMessagesToContent(messages) {
|
|
66
45
|
return messages
|
|
67
|
-
.filter(
|
|
68
|
-
.map(
|
|
46
|
+
.filter(msg => msg.role !== 'system' && typeof msg.content === 'string')
|
|
47
|
+
.map(msg => ({
|
|
69
48
|
role: this.mapToGeminiRole(msg.role),
|
|
70
49
|
parts: [{ text: msg.content }],
|
|
71
50
|
}));
|
|
@@ -78,11 +57,11 @@ let GeminiChatService = GeminiChatService_1 = class GeminiChatService {
|
|
|
78
57
|
if (!messages || messages.length === 0) {
|
|
79
58
|
return { content: '', role: adapter_models_1.ChatRole.Assistant, metadata: { finishReason: 'NO_INPUT' } };
|
|
80
59
|
}
|
|
81
|
-
const { client, balancedKey } = await this.
|
|
60
|
+
const { client, balancedKey } = await this.googleGenaiService.getGoogleGenAIClient(model, keyType);
|
|
82
61
|
try {
|
|
83
62
|
const systemMessage = messages
|
|
84
|
-
.filter(
|
|
85
|
-
.map(
|
|
63
|
+
.filter(msg => msg.role === 'system')
|
|
64
|
+
.map(msg => msg.content)
|
|
86
65
|
.join('\n');
|
|
87
66
|
let lastMessage = messages[messages.length - 1];
|
|
88
67
|
if (lastMessage.role !== 'user' || typeof lastMessage.content !== 'string') {
|
|
@@ -153,7 +132,7 @@ let GeminiChatService = GeminiChatService_1 = class GeminiChatService {
|
|
|
153
132
|
this.logger.warn('Gemini chatStream called with empty messages.');
|
|
154
133
|
return (async function* () { })();
|
|
155
134
|
}
|
|
156
|
-
const systemMessage = messages.find(
|
|
135
|
+
const systemMessage = messages.find(msg => msg.role === 'system')?.content;
|
|
157
136
|
const lastMessage = messages[messages.length - 1];
|
|
158
137
|
if (lastMessage.role !== 'user' || typeof lastMessage.content !== 'string') {
|
|
159
138
|
this.logger.error('The last message for streaming must be from the user and contain string content.');
|
|
@@ -161,7 +140,7 @@ let GeminiChatService = GeminiChatService_1 = class GeminiChatService {
|
|
|
161
140
|
}
|
|
162
141
|
const formattedContents = this.formatMessagesToContent(messages);
|
|
163
142
|
const processStream = async function* () {
|
|
164
|
-
const { client, balancedKey } = await this.
|
|
143
|
+
const { client, balancedKey } = await this.googleGenaiService.getGoogleGenAIClient(model, keyType);
|
|
165
144
|
try {
|
|
166
145
|
this.logger.debug(`Sending stream request to Gemini model ${model} with ${formattedContents.length} content parts and system instruction: ${!!systemMessage}`);
|
|
167
146
|
const stream = await client.models.generateContentStream({
|
|
@@ -250,7 +229,7 @@ let GeminiChatService = GeminiChatService_1 = class GeminiChatService {
|
|
|
250
229
|
const textModelForFix = gemini_models_1.GeminiModels.Gemini2_5Lite;
|
|
251
230
|
const prompt = dto.prompt ||
|
|
252
231
|
'Describe this image, return a json with {"description": "description of image in 1 paragraph", "video": "idea of video in 1 paragraph, describe movements and camera movements, animation, actions or anything that can fit in 6 seconds video "}';
|
|
253
|
-
const { client, balancedKey } = await this.
|
|
232
|
+
const { client, balancedKey } = await this.googleGenaiService.getGoogleGenAIClient(visionModelLLM, key_balancer_models_1.TierType.TIER_1);
|
|
254
233
|
this.logger.log(`Fetching image from URL: ${dto.url}`);
|
|
255
234
|
const imageResponse = await fetch(dto.url);
|
|
256
235
|
if (!imageResponse.ok) {
|
|
@@ -286,12 +265,13 @@ let GeminiChatService = GeminiChatService_1 = class GeminiChatService {
|
|
|
286
265
|
this.logger.error(`Error in describeImageByUrl vision call: ${error.message}`, error.stack);
|
|
287
266
|
throw new Error(`Failed to get Gemini vision completion: ${error.message}`);
|
|
288
267
|
}
|
|
289
|
-
return this._extractJsonWithRecovery(visionResponseText, textModelForFix
|
|
268
|
+
return this._extractJsonWithRecovery(visionResponseText, textModelForFix);
|
|
290
269
|
}
|
|
291
270
|
};
|
|
292
271
|
exports.GeminiChatService = GeminiChatService;
|
|
293
272
|
exports.GeminiChatService = GeminiChatService = GeminiChatService_1 = __decorate([
|
|
294
273
|
(0, common_1.Injectable)(),
|
|
295
|
-
__metadata("design:paramtypes", [key_balancer_api_service_1.KeyBalancerApiService
|
|
274
|
+
__metadata("design:paramtypes", [key_balancer_api_service_1.KeyBalancerApiService,
|
|
275
|
+
google_genai_service_1.GoogleGenaiService])
|
|
296
276
|
], GeminiChatService);
|
|
297
277
|
//# sourceMappingURL=vertex-gemini-chat.service.js.map
|
|
@@ -43,7 +43,7 @@ let ImageVertexService = ImageVertexService_1 = class ImageVertexService {
|
|
|
43
43
|
keyId = 'local';
|
|
44
44
|
}
|
|
45
45
|
else {
|
|
46
|
-
availableKey = await this.keyBalancerApiService.
|
|
46
|
+
availableKey = (await this.keyBalancerApiService.getKey(key_balancer_models_1.ModelType.IMAGE, model, key_balancer_models_1.TierType.TIER_1));
|
|
47
47
|
if (availableKey === null) {
|
|
48
48
|
this.logger.error('KEY BALANCER IS DOWN: :::: No available API key from key balancer.');
|
|
49
49
|
}
|
|
@@ -83,10 +83,10 @@ let ImageVertexService = ImageVertexService_1 = class ImageVertexService {
|
|
|
83
83
|
safetySettings: [{ threshold: genai_1.HarmBlockThreshold.BLOCK_NONE }],
|
|
84
84
|
},
|
|
85
85
|
};
|
|
86
|
-
Object.keys(imageRequest.config).forEach(
|
|
86
|
+
Object.keys(imageRequest.config).forEach(key => imageRequest.config[key] === undefined && delete imageRequest.config[key]);
|
|
87
87
|
try {
|
|
88
88
|
const response = await client.models.generateImages(imageRequest);
|
|
89
|
-
const generatedImages = response?.generatedImages?.map(
|
|
89
|
+
const generatedImages = response?.generatedImages?.map(generatedImage => {
|
|
90
90
|
return generatedImage.image;
|
|
91
91
|
});
|
|
92
92
|
console.warn(' _____ Creo que este no regresa el tipo ImageGenAdapterResponse ');
|
|
@@ -122,7 +122,7 @@ let ImageVertexService = ImageVertexService_1 = class ImageVertexService {
|
|
|
122
122
|
personGeneration: genai_1.PersonGeneration.ALLOW_ALL,
|
|
123
123
|
},
|
|
124
124
|
};
|
|
125
|
-
Object.keys(imageVertexRequest.config || {}).forEach(
|
|
125
|
+
Object.keys(imageVertexRequest.config || {}).forEach(key => imageVertexRequest?.config?.[key] === undefined && delete imageVertexRequest?.config?.[key]);
|
|
126
126
|
try {
|
|
127
127
|
const slicedPrompt = imageRequest.prompt.slice(0, 50);
|
|
128
128
|
this.logger.log(`Sending request... -> startImageGenerationAdapter() MODEL đ${imageVertexRequest.model} for prompt: ${slicedPrompt}...`);
|
|
@@ -3,14 +3,15 @@ import { GeneratedAsset } from '../models/generated-asset.entity';
|
|
|
3
3
|
import { CloudStorageService } from '@dataclouder/nest-storage';
|
|
4
4
|
import { GeneratedAssetService } from './generated-asset.service';
|
|
5
5
|
import { HttpService } from '@nestjs/axios';
|
|
6
|
+
import { GoogleGenaiService } from './google-genai.service';
|
|
6
7
|
export declare class VertexVeoGenaiService {
|
|
7
8
|
private readonly generatedAssetModel;
|
|
8
9
|
private readonly cloudStorageService;
|
|
9
10
|
private readonly httpService;
|
|
10
11
|
private readonly generatedAssetService;
|
|
12
|
+
private readonly googleGenaiService;
|
|
11
13
|
private readonly logger;
|
|
12
|
-
|
|
13
|
-
constructor(generatedAssetModel: Model<GeneratedAsset>, cloudStorageService: CloudStorageService, httpService: HttpService, generatedAssetService: GeneratedAssetService);
|
|
14
|
+
constructor(generatedAssetModel: Model<GeneratedAsset>, cloudStorageService: CloudStorageService, httpService: HttpService, generatedAssetService: GeneratedAssetService, googleGenaiService: GoogleGenaiService);
|
|
14
15
|
generateVideo(): Promise<any>;
|
|
15
16
|
generateVideoFromAssetId(assetId: string, metadata?: any): Promise<{
|
|
16
17
|
url: string | undefined;
|
|
@@ -51,39 +51,39 @@ const mongoose_1 = require("@nestjs/mongoose");
|
|
|
51
51
|
const mongoose_2 = require("mongoose");
|
|
52
52
|
const generated_asset_entity_1 = require("../models/generated-asset.entity");
|
|
53
53
|
const nest_storage_1 = require("@dataclouder/nest-storage");
|
|
54
|
-
const genai_1 = require("@google/genai");
|
|
55
54
|
const generated_asset_service_1 = require("./generated-asset.service");
|
|
56
55
|
const axios_1 = require("@nestjs/axios");
|
|
57
56
|
const uuid_1 = require("uuid");
|
|
58
57
|
const fs = __importStar(require("fs/promises"));
|
|
59
|
-
const
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
};
|
|
58
|
+
const google_genai_service_1 = require("./google-genai.service");
|
|
59
|
+
const gemini_models_1 = require("../models/gemini-models");
|
|
60
|
+
const key_balancer_models_1 = require("../models/key-balancer.models");
|
|
63
61
|
let VertexVeoGenaiService = class VertexVeoGenaiService {
|
|
64
62
|
generatedAssetModel;
|
|
65
63
|
cloudStorageService;
|
|
66
64
|
httpService;
|
|
67
65
|
generatedAssetService;
|
|
66
|
+
googleGenaiService;
|
|
68
67
|
logger = new common_1.Logger('VertexVeoGenaiService');
|
|
69
|
-
|
|
70
|
-
constructor(generatedAssetModel, cloudStorageService, httpService, generatedAssetService) {
|
|
68
|
+
constructor(generatedAssetModel, cloudStorageService, httpService, generatedAssetService, googleGenaiService) {
|
|
71
69
|
this.generatedAssetModel = generatedAssetModel;
|
|
72
70
|
this.cloudStorageService = cloudStorageService;
|
|
73
71
|
this.httpService = httpService;
|
|
74
72
|
this.generatedAssetService = generatedAssetService;
|
|
75
|
-
this.
|
|
73
|
+
this.googleGenaiService = googleGenaiService;
|
|
76
74
|
this.logger.log('VertexVeoGenaiService initialized. This service may be temporal, since VEO is only available in GCP vĂa REST API.');
|
|
77
75
|
}
|
|
78
76
|
async generateVideo() {
|
|
79
77
|
const prompt = 'Panning wide shot of a calico kitten sleeping in the sunshine';
|
|
80
|
-
const
|
|
78
|
+
const { client, balancedKey } = await this.googleGenaiService.getGoogleGenAIClient(gemini_models_1.GeminiVideoModels.Veo3Fast, key_balancer_models_1.TierType.TIER_1, key_balancer_models_1.ModelType.VIDEO);
|
|
79
|
+
console.log('the balance key for video is ', balancedKey);
|
|
80
|
+
const imagenResponse = await client.models.generateImages({
|
|
81
81
|
model: 'imagen-3.0-generate-002',
|
|
82
82
|
prompt: prompt,
|
|
83
83
|
});
|
|
84
84
|
this.logger.log(imagenResponse);
|
|
85
85
|
const videoRequest = {
|
|
86
|
-
model:
|
|
86
|
+
model: gemini_models_1.GeminiVideoModels.Veo3Fast,
|
|
87
87
|
prompt: prompt,
|
|
88
88
|
image: {
|
|
89
89
|
imageBytes: imagenResponse.generatedImages[0].image.imageBytes,
|
|
@@ -98,11 +98,12 @@ let VertexVeoGenaiService = class VertexVeoGenaiService {
|
|
|
98
98
|
this.logger.log('Updating asset metadata for future retrieval:', metadata);
|
|
99
99
|
await this.generatedAssetService.partialUpdate(assetId, metadata);
|
|
100
100
|
}
|
|
101
|
+
console.log('Donwloading image for first frame....', genAsset.assets.firstFrame.url);
|
|
101
102
|
const imageUrl = genAsset.assets.firstFrame.url;
|
|
102
103
|
const response = await this.httpService.axiosRef.get(imageUrl, { responseType: 'arraybuffer' });
|
|
103
104
|
const imageBufferBytes = Buffer.from(response.data, 'binary');
|
|
104
105
|
const videoRequest = {
|
|
105
|
-
model:
|
|
106
|
+
model: gemini_models_1.GeminiVideoModels.Veo3Fast,
|
|
106
107
|
prompt: genAsset.prompt || genAsset?.description || 'Random movement for video',
|
|
107
108
|
config: {
|
|
108
109
|
aspectRatio: '9:16',
|
|
@@ -117,27 +118,32 @@ let VertexVeoGenaiService = class VertexVeoGenaiService {
|
|
|
117
118
|
genAsset.url = videoRequestResult.url;
|
|
118
119
|
console.log(videoRequestResult);
|
|
119
120
|
await this.generatedAssetService.save(genAsset);
|
|
121
|
+
this.logger.log(`Generated video saved to DB: ${genAsset._id}`);
|
|
120
122
|
return videoRequestResult;
|
|
121
123
|
}
|
|
122
124
|
async _generateVideo(videoRequest) {
|
|
123
|
-
|
|
125
|
+
console.log('Generating video...', videoRequest);
|
|
126
|
+
const { client, balancedKey } = await this.googleGenaiService.getGoogleGenAIClient(gemini_models_1.GeminiVideoModels.Veo3Fast, key_balancer_models_1.TierType.TIER_1, key_balancer_models_1.ModelType.VIDEO);
|
|
127
|
+
console.log('the balance key for video is ', balancedKey);
|
|
128
|
+
let operation = await client.models.generateVideos(videoRequest);
|
|
124
129
|
const filename = `veo_video_${(0, uuid_1.v4)()}.mp4`;
|
|
125
130
|
let seconds = 0;
|
|
126
131
|
while (!operation.done) {
|
|
127
132
|
seconds += 5;
|
|
128
133
|
this.logger.log(`Waiting for video generation to complete... (${seconds}s)`);
|
|
129
134
|
await new Promise(resolve => setTimeout(resolve, 5000));
|
|
130
|
-
operation = await
|
|
135
|
+
operation = await client.operations.getVideosOperation({
|
|
131
136
|
operation: operation,
|
|
132
137
|
});
|
|
133
138
|
console.log('result', JSON.stringify(operation));
|
|
134
139
|
}
|
|
135
|
-
|
|
140
|
+
console.log('Operations is Done...', JSON.stringify(operation));
|
|
141
|
+
await client.files.download({
|
|
136
142
|
file: operation.response?.generatedVideos?.[0].video || '',
|
|
137
143
|
downloadPath: `${filename}`,
|
|
138
144
|
});
|
|
139
|
-
this.logger.log(` âď¸
|
|
140
|
-
await new Promise(resolve => setTimeout(resolve,
|
|
145
|
+
this.logger.log(` âď¸ Bug de GEN AI API, No FUNCIONA ASYNC para descargar video, esperando 3.5 segundos a que descarge.....`);
|
|
146
|
+
await new Promise(resolve => setTimeout(resolve, 3500));
|
|
141
147
|
const videoBuffer = await fs.readFile(filename);
|
|
142
148
|
console.log('Subiendo video a storage', filename, process.env.STORAGE_BUCKET);
|
|
143
149
|
const uploadResult = await this.cloudStorageService.uploadFileAndMakePublic(process.env.STORAGE_BUCKET, `generated-videos/${filename}`, videoBuffer, 'video/mp4');
|
|
@@ -155,6 +161,7 @@ exports.VertexVeoGenaiService = VertexVeoGenaiService = __decorate([
|
|
|
155
161
|
__metadata("design:paramtypes", [mongoose_2.Model,
|
|
156
162
|
nest_storage_1.CloudStorageService,
|
|
157
163
|
axios_1.HttpService,
|
|
158
|
-
generated_asset_service_1.GeneratedAssetService
|
|
164
|
+
generated_asset_service_1.GeneratedAssetService,
|
|
165
|
+
google_genai_service_1.GoogleGenaiService])
|
|
159
166
|
], VertexVeoGenaiService);
|
|
160
167
|
//# sourceMappingURL=vertex-veo-genai.service.js.map
|