@juspay/neurolink 8.26.1 → 8.28.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,615 @@
1
+ /**
2
+ * Vertex AI Video Generation Handler
3
+ *
4
+ * Standalone module for Veo 3.1 video generation via Vertex AI.
5
+ * Generates videos from an input image and text prompt.
6
+ *
7
+ * Based on Vertex AI Veo 3.1 video generation API
8
+ *
9
+ * @module adapters/video/vertexVideoHandler
10
+ * @see https://cloud.google.com/vertex-ai/generative-ai/docs/video/generate-videos
11
+ */
12
+ import { readFile } from "node:fs/promises";
13
+ import { ErrorCategory, ErrorSeverity } from "../../constants/enums.js";
14
+ import { TIMEOUTS } from "../../constants/timeouts.js";
15
+ import { NeuroLinkError, withTimeout } from "../../utils/errorHandling.js";
16
+ import { logger } from "../../utils/logger.js";
17
+ // ============================================================================
18
+ // VIDEO ERROR CODES
19
+ // ============================================================================
20
+ /**
21
+ * Video generation runtime error codes
22
+ *
23
+ * These are for runtime/execution errors during video generation.
24
+ * Input validation errors (missing image, invalid options, etc.) are handled
25
+ * by parameterValidation.ts using ERROR_CODES from errorHandling.ts.
26
+ *
27
+ * Following TTS pattern (TTS_ERROR_CODES + TTSError in ttsProcessor.ts)
28
+ */
29
+ export const VIDEO_ERROR_CODES = {
30
+ /** Video generation API call failed */
31
+ GENERATION_FAILED: "VIDEO_GENERATION_FAILED",
32
+ /** Provider (Vertex AI) not properly configured */
33
+ PROVIDER_NOT_CONFIGURED: "VIDEO_PROVIDER_NOT_CONFIGURED",
34
+ /** Polling for video completion timed out */
35
+ POLL_TIMEOUT: "VIDEO_POLL_TIMEOUT",
36
+ };
37
+ /**
38
+ * Video generation error class
39
+ * Extends NeuroLinkError for consistent error handling across the SDK
40
+ */
41
+ export class VideoError extends NeuroLinkError {
42
+ constructor(options) {
43
+ super({
44
+ code: options.code,
45
+ message: options.message,
46
+ category: options.category ?? ErrorCategory.EXECUTION,
47
+ severity: options.severity ?? ErrorSeverity.HIGH,
48
+ retriable: options.retriable ?? false,
49
+ context: options.context,
50
+ originalError: options.originalError,
51
+ });
52
+ this.name = "VideoError";
53
+ }
54
+ }
55
+ // ============================================================================
56
+ // CONSTANTS
57
+ // ============================================================================
58
+ /** Timeout for video generation (3 minutes - video gen typically takes 1-2 min) */
59
+ const VIDEO_GENERATION_TIMEOUT_MS = 180000;
60
+ /** Polling interval for checking operation status (5 seconds) */
61
+ const POLL_INTERVAL_MS = 5000;
62
+ /** Full model name for Veo 3.1 (IMPORTANT: not just "veo-3.1") */
63
+ const VEO_MODEL = "veo-3.1-generate-001";
64
+ /** Default location for Vertex AI */
65
+ const DEFAULT_LOCATION = "us-central1";
66
+ // ============================================================================
67
+ // CONFIGURATION HELPERS
68
+ // ============================================================================
69
+ /**
70
+ * Check if Vertex AI is configured for video generation
71
+ *
72
+ * @returns True if Google Cloud credentials are available
73
+ *
74
+ * @example
75
+ * ```typescript
76
+ * if (!isVertexVideoConfigured()) {
77
+ * console.error("Set GOOGLE_APPLICATION_CREDENTIALS to enable video generation");
78
+ * }
79
+ * ```
80
+ */
81
+ export function isVertexVideoConfigured() {
82
+ return !!(process.env.GOOGLE_APPLICATION_CREDENTIALS ||
83
+ process.env.GOOGLE_SERVICE_ACCOUNT_KEY ||
84
+ (process.env.GOOGLE_AUTH_CLIENT_EMAIL &&
85
+ process.env.GOOGLE_AUTH_PRIVATE_KEY));
86
+ }
87
+ /**
88
+ * Get Vertex AI project configuration from environment variables or ADC credentials
89
+ *
90
+ * @returns Project ID and location for Vertex AI
91
+ * @throws VideoError if project cannot be determined
92
+ */
93
+ async function getVertexConfig() {
94
+ const location = process.env.GOOGLE_VERTEX_LOCATION ||
95
+ process.env.GOOGLE_CLOUD_LOCATION ||
96
+ DEFAULT_LOCATION;
97
+ // Try environment variables first
98
+ let project = process.env.GOOGLE_VERTEX_PROJECT ||
99
+ process.env.GOOGLE_CLOUD_PROJECT ||
100
+ process.env.GOOGLE_CLOUD_PROJECT_ID ||
101
+ process.env.VERTEX_PROJECT_ID;
102
+ // Fallback: read from ADC credentials file
103
+ if (!project && process.env.GOOGLE_APPLICATION_CREDENTIALS) {
104
+ try {
105
+ const credData = JSON.parse(await readFile(process.env.GOOGLE_APPLICATION_CREDENTIALS, "utf-8"));
106
+ project = credData.quota_project_id || credData.project_id;
107
+ }
108
+ catch (e) {
109
+ // Ignore read errors, will throw below if project still not found
110
+ logger.debug("Failed to read project from credentials file", {
111
+ error: e instanceof Error ? e.message : String(e),
112
+ });
113
+ }
114
+ }
115
+ if (!project) {
116
+ throw new VideoError({
117
+ code: VIDEO_ERROR_CODES.PROVIDER_NOT_CONFIGURED,
118
+ message: "Google Cloud project not found. Set GOOGLE_VERTEX_PROJECT or GOOGLE_CLOUD_PROJECT environment variable, or ensure ADC credentials contain project_id",
119
+ category: ErrorCategory.CONFIGURATION,
120
+ severity: ErrorSeverity.HIGH,
121
+ retriable: false,
122
+ context: {
123
+ missingVar: "GOOGLE_VERTEX_PROJECT",
124
+ feature: "video-generation",
125
+ checkedEnvVars: [
126
+ "GOOGLE_VERTEX_PROJECT",
127
+ "GOOGLE_CLOUD_PROJECT",
128
+ "GOOGLE_CLOUD_PROJECT_ID",
129
+ "VERTEX_PROJECT_ID",
130
+ ],
131
+ },
132
+ });
133
+ }
134
+ return { project, location };
135
+ }
136
+ /**
137
+ * Get access token for Vertex AI authentication
138
+ *
139
+ * Uses google-auth-library (transitive dependency from @google-cloud/vertexai)
140
+ * to obtain access token from configured credentials.
141
+ *
142
+ * @returns Access token string
143
+ * @throws VideoError if authentication fails
144
+ */
145
+ async function getAccessToken() {
146
+ try {
147
+ // google-auth-library is a transitive dependency from @google-cloud/vertexai
148
+ // Using dynamic import with type assertion for runtime resolution
149
+ const googleAuthLib = (await import("google-auth-library"));
150
+ const auth = new googleAuthLib.GoogleAuth({
151
+ keyFilename: process.env.GOOGLE_APPLICATION_CREDENTIALS,
152
+ scopes: ["https://www.googleapis.com/auth/cloud-platform"],
153
+ });
154
+ const token = await withTimeout(auth.getAccessToken(), TIMEOUTS.PROVIDER.AUTH_MS);
155
+ if (!token) {
156
+ throw new VideoError({
157
+ code: VIDEO_ERROR_CODES.PROVIDER_NOT_CONFIGURED,
158
+ message: "Failed to obtain access token from Google Cloud credentials",
159
+ category: ErrorCategory.CONFIGURATION,
160
+ severity: ErrorSeverity.HIGH,
161
+ retriable: false,
162
+ context: { provider: "vertex", feature: "video-generation" },
163
+ });
164
+ }
165
+ return token;
166
+ }
167
+ catch (error) {
168
+ if (error instanceof VideoError) {
169
+ throw error;
170
+ }
171
+ throw new VideoError({
172
+ code: VIDEO_ERROR_CODES.PROVIDER_NOT_CONFIGURED,
173
+ message: `Google Cloud authentication failed: ${error instanceof Error ? error.message : String(error)}`,
174
+ category: ErrorCategory.CONFIGURATION,
175
+ severity: ErrorSeverity.HIGH,
176
+ retriable: false,
177
+ context: { provider: "vertex", feature: "video-generation" },
178
+ originalError: error instanceof Error ? error : undefined,
179
+ });
180
+ }
181
+ }
182
+ // ============================================================================
183
+ // IMAGE UTILITIES
184
+ // ============================================================================
185
+ /**
186
+ * Detect MIME type from image buffer magic bytes
187
+ *
188
+ * @param image - Image buffer to analyze
189
+ * @returns MIME type string (defaults to "image/jpeg" if unknown)
190
+ */
191
+ function detectMimeType(image) {
192
+ // Validate buffer has minimum length for format detection
193
+ if (image.length < 4) {
194
+ logger.warn("Image buffer too small for format detection", {
195
+ size: image.length,
196
+ });
197
+ return "image/jpeg";
198
+ }
199
+ // JPEG: FF D8 FF
200
+ if (image[0] === 0xff && image[1] === 0xd8 && image[2] === 0xff) {
201
+ return "image/jpeg";
202
+ }
203
+ // PNG: 89 50 4E 47
204
+ if (image[0] === 0x89 &&
205
+ image[1] === 0x50 &&
206
+ image[2] === 0x4e &&
207
+ image[3] === 0x47) {
208
+ return "image/png";
209
+ }
210
+ // WebP: RIFF header (52 49 46 46) + WEBP at offset 8 (57 45 42 50)
211
+ if (image.length >= 12 &&
212
+ image[0] === 0x52 &&
213
+ image[1] === 0x49 &&
214
+ image[2] === 0x46 &&
215
+ image[3] === 0x46 &&
216
+ image[8] === 0x57 &&
217
+ image[9] === 0x45 &&
218
+ image[10] === 0x42 &&
219
+ image[11] === 0x50) {
220
+ return "image/webp";
221
+ }
222
+ // Default fallback
223
+ logger.warn("Unknown image format detected, defaulting to image/jpeg", {
224
+ firstBytes: image.slice(0, 12).toString("hex"),
225
+ size: image.length,
226
+ });
227
+ return "image/jpeg";
228
+ }
229
+ /**
230
+ * Calculate video dimensions based on resolution and aspect ratio
231
+ *
232
+ * @param resolution - Video resolution ("720p" or "1080p")
233
+ * @param aspectRatio - Aspect ratio ("16:9" or "9:16")
234
+ * @returns Width and height dimensions
235
+ */
236
+ function calculateDimensions(resolution, aspectRatio) {
237
+ if (resolution === "1080p") {
238
+ return aspectRatio === "9:16"
239
+ ? { width: 1080, height: 1920 }
240
+ : { width: 1920, height: 1080 };
241
+ }
242
+ // 720p
243
+ return aspectRatio === "9:16"
244
+ ? { width: 720, height: 1280 }
245
+ : { width: 1280, height: 720 };
246
+ }
247
+ // ============================================================================
248
+ // VIDEO GENERATION
249
+ // ============================================================================
250
+ /**
251
+ * Generate video using Vertex AI Veo 3.1
252
+ *
253
+ * Creates a video from an input image and text prompt using Google's Veo 3.1 model.
254
+ * The video is generated with optional audio and can be customized for resolution,
255
+ * duration, and aspect ratio.
256
+ *
257
+ * @param image - Input image buffer (JPEG, PNG, or WebP)
258
+ * @param prompt - Text prompt describing desired video motion/content (max 500 chars)
259
+ * @param options - Video output options (resolution, length, aspect ratio, audio)
260
+ * @returns VideoGenerationResult with video buffer and metadata
261
+ *
262
+ * @throws {VideoError} When credentials are not configured (PROVIDER_NOT_CONFIGURED)
263
+ * @throws {VideoError} When API returns an error (GENERATION_FAILED)
264
+ * @throws {VideoError} When polling times out (POLL_TIMEOUT)
265
+ *
266
+ * @example
267
+ * ```typescript
268
+ * import { generateVideoWithVertex } from "@juspay/neurolink/adapters/video/vertexVideoHandler";
269
+ * import { readFileSync, writeFileSync } from "fs";
270
+ *
271
+ * const image = readFileSync("./input.png");
272
+ * const result = await generateVideoWithVertex(
273
+ * image,
274
+ * "Smooth cinematic camera movement with dramatic lighting",
275
+ * { resolution: "720p", length: 6, aspectRatio: "16:9", audio: true }
276
+ * );
277
+ *
278
+ * writeFileSync("output.mp4", result.data);
279
+ * ```
280
+ */
281
+ export async function generateVideoWithVertex(image, prompt, options = {}) {
282
+ // Validate configuration
283
+ if (!isVertexVideoConfigured()) {
284
+ throw new VideoError({
285
+ code: VIDEO_ERROR_CODES.PROVIDER_NOT_CONFIGURED,
286
+ message: "Vertex AI credentials not configured. Set GOOGLE_APPLICATION_CREDENTIALS environment variable",
287
+ category: ErrorCategory.CONFIGURATION,
288
+ severity: ErrorSeverity.HIGH,
289
+ retriable: false,
290
+ context: {
291
+ provider: "vertex",
292
+ feature: "video-generation",
293
+ suggestion: "Set GOOGLE_APPLICATION_CREDENTIALS to the path of your service account JSON file",
294
+ },
295
+ });
296
+ }
297
+ const { project, location } = await getVertexConfig();
298
+ const startTime = Date.now();
299
+ // Set defaults (matching reference implementation)
300
+ const resolution = options.resolution || "720p";
301
+ const durationSeconds = options.length || 6; // 4, 6, or 8
302
+ const aspectRatio = options.aspectRatio || "16:9";
303
+ const generateAudio = options.audio ?? true;
304
+ logger.debug("Starting Vertex video generation", {
305
+ project,
306
+ location,
307
+ model: VEO_MODEL,
308
+ resolution,
309
+ durationSeconds,
310
+ aspectRatio,
311
+ generateAudio,
312
+ promptLength: prompt.length,
313
+ imageSize: image.length,
314
+ });
315
+ try {
316
+ // Encode image to base64 and detect MIME type
317
+ const imageBase64 = image.toString("base64");
318
+ const mimeType = detectMimeType(image);
319
+ // Get auth token
320
+ const accessToken = await getAccessToken();
321
+ // Construct API request - predictLongRunning endpoint
322
+ const endpoint = `https://${location}-aiplatform.googleapis.com/v1/projects/${project}/locations/${location}/publishers/google/models/${VEO_MODEL}:predictLongRunning`;
323
+ // Request body structure (verified working from video.js reference)
324
+ const requestBody = {
325
+ instances: [
326
+ {
327
+ prompt: prompt,
328
+ image: {
329
+ bytesBase64Encoded: imageBase64,
330
+ mimeType: mimeType,
331
+ },
332
+ },
333
+ ],
334
+ parameters: {
335
+ sampleCount: 1,
336
+ durationSeconds: durationSeconds,
337
+ aspectRatio: aspectRatio,
338
+ resolution: resolution,
339
+ generateAudio: generateAudio,
340
+ resizeMode: "pad", // "pad" preserves aspect ratio, "crop" fills frame
341
+ },
342
+ };
343
+ logger.debug("Sending video generation request", { endpoint });
344
+ // Create abort controller for request timeout
345
+ const controller = new AbortController();
346
+ const requestTimeout = setTimeout(() => controller.abort(), 30000); // 30s request timeout
347
+ // Start long-running operation
348
+ let response;
349
+ try {
350
+ response = await fetch(endpoint, {
351
+ method: "POST",
352
+ headers: {
353
+ Authorization: `Bearer ${accessToken}`,
354
+ "Content-Type": "application/json; charset=utf-8",
355
+ },
356
+ body: JSON.stringify(requestBody),
357
+ signal: controller.signal,
358
+ });
359
+ }
360
+ catch (error) {
361
+ clearTimeout(requestTimeout);
362
+ if (error instanceof Error && error.name === "AbortError") {
363
+ throw new VideoError({
364
+ code: VIDEO_ERROR_CODES.GENERATION_FAILED,
365
+ message: "Video generation request timed out after 30 seconds",
366
+ category: ErrorCategory.EXECUTION,
367
+ severity: ErrorSeverity.HIGH,
368
+ retriable: true,
369
+ context: { provider: "vertex", endpoint, timeout: 30000 },
370
+ });
371
+ }
372
+ throw error;
373
+ }
374
+ clearTimeout(requestTimeout);
375
+ if (!response.ok) {
376
+ const errorText = await response.text();
377
+ throw new VideoError({
378
+ code: VIDEO_ERROR_CODES.GENERATION_FAILED,
379
+ message: `Vertex API error: ${response.status} - ${errorText}`,
380
+ category: ErrorCategory.EXECUTION,
381
+ severity: ErrorSeverity.HIGH,
382
+ retriable: response.status >= 500, // 5xx errors are retriable
383
+ context: {
384
+ status: response.status,
385
+ error: errorText,
386
+ provider: "vertex",
387
+ endpoint,
388
+ },
389
+ });
390
+ }
391
+ const operation = await response.json();
392
+ const operationName = operation.name;
393
+ if (!operationName) {
394
+ throw new VideoError({
395
+ code: VIDEO_ERROR_CODES.GENERATION_FAILED,
396
+ message: "Vertex API did not return an operation name",
397
+ category: ErrorCategory.EXECUTION,
398
+ severity: ErrorSeverity.HIGH,
399
+ retriable: false,
400
+ context: { response: operation, provider: "vertex" },
401
+ });
402
+ }
403
+ logger.debug("Video generation operation started", { operationName });
404
+ // Poll for completion using fetchPredictOperation endpoint
405
+ const remainingTime = VIDEO_GENERATION_TIMEOUT_MS - (Date.now() - startTime);
406
+ const videoBuffer = await pollVideoOperation(operationName, accessToken, project, location, Math.max(1000, remainingTime));
407
+ const processingTime = Date.now() - startTime;
408
+ // Calculate dimensions based on resolution and aspect ratio
409
+ const dimensions = calculateDimensions(resolution, aspectRatio);
410
+ logger.info("Video generation complete", {
411
+ processingTime,
412
+ videoSizeKB: Math.round(videoBuffer.length / 1024),
413
+ dimensions,
414
+ });
415
+ return {
416
+ data: videoBuffer,
417
+ mediaType: "video/mp4",
418
+ metadata: {
419
+ duration: durationSeconds,
420
+ dimensions,
421
+ model: VEO_MODEL,
422
+ provider: "vertex",
423
+ aspectRatio,
424
+ audioEnabled: generateAudio,
425
+ processingTime,
426
+ },
427
+ };
428
+ }
429
+ catch (error) {
430
+ // Re-throw VideoError as-is
431
+ if (error instanceof VideoError) {
432
+ throw error;
433
+ }
434
+ // Wrap other errors
435
+ throw new VideoError({
436
+ code: VIDEO_ERROR_CODES.GENERATION_FAILED,
437
+ message: `Video generation failed: ${error instanceof Error ? error.message : String(error)}`,
438
+ category: ErrorCategory.EXECUTION,
439
+ severity: ErrorSeverity.HIGH,
440
+ retriable: true,
441
+ context: { provider: "vertex" },
442
+ originalError: error instanceof Error ? error : undefined,
443
+ });
444
+ }
445
+ }
446
+ /**
447
+ * Extract video buffer from completed operation result
448
+ *
449
+ * @param result - Completed operation result from Vertex AI
450
+ * @param operationName - Operation name for error context
451
+ * @returns Video buffer
452
+ * @throws VideoError if video data is missing or in unexpected format
453
+ */
454
+ function extractVideoFromResult(result, operationName) {
455
+ // Check for error in completed operation
456
+ if (result.error) {
457
+ throw new VideoError({
458
+ code: VIDEO_ERROR_CODES.GENERATION_FAILED,
459
+ message: `Video generation failed: ${result.error.message || JSON.stringify(result.error)}`,
460
+ category: ErrorCategory.EXECUTION,
461
+ severity: ErrorSeverity.HIGH,
462
+ retriable: false,
463
+ context: { operationName, error: result.error, provider: "vertex" },
464
+ });
465
+ }
466
+ // Extract video from response - structure is result.response.videos[0]
467
+ const videoData = result.response?.videos?.[0];
468
+ if (!videoData) {
469
+ throw new VideoError({
470
+ code: VIDEO_ERROR_CODES.GENERATION_FAILED,
471
+ message: "No video data in response from Vertex AI",
472
+ category: ErrorCategory.EXECUTION,
473
+ severity: ErrorSeverity.HIGH,
474
+ retriable: false,
475
+ context: { operationName, response: result.response, provider: "vertex" },
476
+ });
477
+ }
478
+ // Video can be returned as base64 or GCS URI
479
+ if (videoData.gcsUri) {
480
+ throw new VideoError({
481
+ code: VIDEO_ERROR_CODES.GENERATION_FAILED,
482
+ message: `Video stored at GCS: ${videoData.gcsUri}. GCS download not yet implemented.`,
483
+ category: ErrorCategory.EXECUTION,
484
+ severity: ErrorSeverity.HIGH,
485
+ retriable: false,
486
+ context: {
487
+ operationName,
488
+ gcsUri: videoData.gcsUri,
489
+ provider: "vertex",
490
+ suggestion: "Do not set storageUri parameter to receive video as base64 inline",
491
+ },
492
+ });
493
+ }
494
+ if (videoData.bytesBase64Encoded) {
495
+ return Buffer.from(videoData.bytesBase64Encoded, "base64");
496
+ }
497
+ throw new VideoError({
498
+ code: VIDEO_ERROR_CODES.GENERATION_FAILED,
499
+ message: "No video bytes in response - unexpected response format",
500
+ category: ErrorCategory.EXECUTION,
501
+ severity: ErrorSeverity.HIGH,
502
+ retriable: false,
503
+ context: { operationName, videoData, provider: "vertex" },
504
+ });
505
+ }
506
+ /**
507
+ * Make a poll request to the Vertex AI fetchPredictOperation endpoint
508
+ *
509
+ * @param pollEndpoint - Full URL for the poll endpoint
510
+ * @param operationName - Operation name to poll
511
+ * @param accessToken - Google Cloud access token
512
+ * @param timeoutMs - Request timeout in milliseconds (default: 30000)
513
+ * @returns Response JSON from the poll request
514
+ * @throws VideoError on request failure
515
+ */
516
+ async function makePollRequest(pollEndpoint, operationName, accessToken, timeoutMs = 30000) {
517
+ const controller = new AbortController();
518
+ const requestTimeout = setTimeout(() => controller.abort(), timeoutMs);
519
+ let response;
520
+ try {
521
+ response = await fetch(pollEndpoint, {
522
+ method: "POST", // NOTE: POST, not GET!
523
+ headers: {
524
+ Authorization: `Bearer ${accessToken}`,
525
+ "Content-Type": "application/json; charset=utf-8",
526
+ },
527
+ body: JSON.stringify({ operationName }), // Pass operation name in body
528
+ signal: controller.signal,
529
+ });
530
+ }
531
+ catch (error) {
532
+ clearTimeout(requestTimeout);
533
+ if (error instanceof Error && error.name === "AbortError") {
534
+ throw new VideoError({
535
+ code: VIDEO_ERROR_CODES.GENERATION_FAILED,
536
+ message: `Poll request timed out after ${timeoutMs}ms`,
537
+ category: ErrorCategory.EXECUTION,
538
+ severity: ErrorSeverity.HIGH,
539
+ retriable: true,
540
+ context: { provider: "vertex", operationName, timeout: timeoutMs },
541
+ });
542
+ }
543
+ throw error;
544
+ }
545
+ clearTimeout(requestTimeout);
546
+ if (!response.ok) {
547
+ const errorText = await response.text();
548
+ throw new VideoError({
549
+ code: VIDEO_ERROR_CODES.GENERATION_FAILED,
550
+ message: `Failed to poll video operation: ${response.status} - ${errorText}`,
551
+ category: ErrorCategory.EXECUTION,
552
+ severity: ErrorSeverity.HIGH,
553
+ retriable: response.status >= 500,
554
+ context: {
555
+ operationName,
556
+ status: response.status,
557
+ error: errorText,
558
+ provider: "vertex",
559
+ },
560
+ });
561
+ }
562
+ return response.json();
563
+ }
564
+ // ============================================================================
565
+ // POLLING
566
+ // ============================================================================
567
+ /**
568
+ * Poll Vertex AI operation until complete
569
+ *
570
+ * IMPORTANT: Uses fetchPredictOperation endpoint (POST with operationName in body),
571
+ * NOT the generic operations GET endpoint!
572
+ *
573
+ * @param operationName - Full operation name from predictLongRunning response
574
+ * @param accessToken - Google Cloud access token
575
+ * @param project - Google Cloud project ID
576
+ * @param location - Vertex AI location
577
+ * @param timeoutMs - Maximum time to wait for completion
578
+ * @returns Video buffer when complete
579
+ *
580
+ * @throws {VideoError} On API error, timeout, or missing video data
581
+ */
582
+ async function pollVideoOperation(operationName, accessToken, project, location, timeoutMs) {
583
+ const startTime = Date.now();
584
+ // Use fetchPredictOperation endpoint - this is MODEL-SPECIFIC
585
+ const pollEndpoint = `https://${location}-aiplatform.googleapis.com/v1/projects/${project}/locations/${location}/publishers/google/models/${VEO_MODEL}:fetchPredictOperation`;
586
+ while (Date.now() - startTime < timeoutMs) {
587
+ const result = await makePollRequest(pollEndpoint, operationName, accessToken);
588
+ if (result.done) {
589
+ return extractVideoFromResult(result, operationName);
590
+ }
591
+ const elapsed = Date.now() - startTime;
592
+ logger.debug("Polling video operation...", {
593
+ operationName,
594
+ elapsed,
595
+ remainingMs: timeoutMs - elapsed,
596
+ });
597
+ // Wait before next poll
598
+ await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL_MS));
599
+ }
600
+ // Timeout reached
601
+ throw new VideoError({
602
+ code: VIDEO_ERROR_CODES.POLL_TIMEOUT,
603
+ message: `Video generation timed out after ${Math.round(timeoutMs / 1000)}s while polling for completion`,
604
+ category: ErrorCategory.TIMEOUT,
605
+ severity: ErrorSeverity.HIGH,
606
+ retriable: true,
607
+ context: {
608
+ operationName,
609
+ timeoutMs,
610
+ provider: "vertex",
611
+ suggestion: "Try again - video generation can take 1-3 minutes. Consider using a shorter duration or lower resolution.",
612
+ },
613
+ });
614
+ }
615
+ //# sourceMappingURL=vertexVideoHandler.js.map
@@ -131,7 +131,7 @@ export declare class SageMakerLanguageModel implements LanguageModelV1 {
131
131
  provider: string;
132
132
  specificationVersion: string;
133
133
  endpointName: string;
134
- modelType: "huggingface" | "mistral" | "llama" | "claude" | "jumpstart" | "custom" | undefined;
134
+ modelType: "huggingface" | "mistral" | "custom" | "llama" | "claude" | "jumpstart" | undefined;
135
135
  region: string;
136
136
  };
137
137
  /**
@@ -178,7 +178,7 @@ export declare class SageMakerLanguageModel implements LanguageModelV1 {
178
178
  provider: string;
179
179
  specificationVersion: string;
180
180
  endpointName: string;
181
- modelType: "huggingface" | "mistral" | "llama" | "claude" | "jumpstart" | "custom" | undefined;
181
+ modelType: "huggingface" | "mistral" | "custom" | "llama" | "claude" | "jumpstart" | undefined;
182
182
  region: string;
183
183
  };
184
184
  }