@juspay/neurolink 9.22.3 → 9.24.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/README.md +10 -13
- package/dist/adapters/video/directorPipeline.d.ts +31 -0
- package/dist/adapters/video/directorPipeline.js +516 -0
- package/dist/adapters/video/ffmpegAdapter.d.ts +78 -0
- package/dist/adapters/video/ffmpegAdapter.js +206 -0
- package/dist/adapters/video/frameExtractor.d.ts +28 -0
- package/dist/adapters/video/frameExtractor.js +143 -0
- package/dist/adapters/video/vertexVideoHandler.d.ts +25 -25
- package/dist/adapters/video/vertexVideoHandler.js +173 -42
- package/dist/adapters/video/videoMerger.d.ts +22 -0
- package/dist/adapters/video/videoMerger.js +171 -0
- package/dist/cli/factories/commandFactory.d.ts +34 -0
- package/dist/cli/factories/commandFactory.js +321 -130
- package/dist/constants/index.d.ts +1 -0
- package/dist/constants/index.js +2 -0
- package/dist/constants/videoErrors.d.ts +45 -0
- package/dist/constants/videoErrors.js +46 -0
- package/dist/core/baseProvider.js +42 -1
- package/dist/features/ppt/index.d.ts +1 -1
- package/dist/features/ppt/index.js +1 -1
- package/dist/lib/adapters/video/directorPipeline.d.ts +31 -0
- package/dist/lib/adapters/video/directorPipeline.js +517 -0
- package/dist/lib/adapters/video/ffmpegAdapter.d.ts +78 -0
- package/dist/lib/adapters/video/ffmpegAdapter.js +207 -0
- package/dist/lib/adapters/video/frameExtractor.d.ts +28 -0
- package/dist/lib/adapters/video/frameExtractor.js +144 -0
- package/dist/lib/adapters/video/vertexVideoHandler.d.ts +25 -25
- package/dist/lib/adapters/video/vertexVideoHandler.js +173 -42
- package/dist/lib/adapters/video/videoMerger.d.ts +22 -0
- package/dist/lib/adapters/video/videoMerger.js +172 -0
- package/dist/lib/constants/index.d.ts +1 -0
- package/dist/lib/constants/index.js +2 -0
- package/dist/lib/constants/videoErrors.d.ts +45 -0
- package/dist/lib/constants/videoErrors.js +47 -0
- package/dist/lib/core/baseProvider.js +42 -1
- package/dist/lib/features/ppt/index.d.ts +1 -1
- package/dist/lib/features/ppt/index.js +1 -1
- package/dist/lib/types/cli.d.ts +18 -2
- package/dist/lib/types/content.d.ts +1 -1
- package/dist/lib/types/generateTypes.d.ts +20 -3
- package/dist/lib/types/multimodal.d.ts +64 -4
- package/dist/lib/types/multimodal.js +36 -1
- package/dist/lib/utils/parameterValidation.d.ts +8 -1
- package/dist/lib/utils/parameterValidation.js +80 -1
- package/dist/types/cli.d.ts +18 -2
- package/dist/types/content.d.ts +1 -1
- package/dist/types/generateTypes.d.ts +20 -3
- package/dist/types/multimodal.d.ts +64 -4
- package/dist/types/multimodal.js +36 -1
- package/dist/utils/parameterValidation.d.ts +8 -1
- package/dist/utils/parameterValidation.js +80 -1
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -1,3 +1,15 @@
|
|
|
1
|
+
## [9.24.0](https://github.com/juspay/neurolink/compare/v9.23.0...v9.24.0) (2026-03-14)
|
|
2
|
+
|
|
3
|
+
### Features
|
|
4
|
+
|
|
5
|
+
- **(ppt):** Implement CLI support for PPT Gen ([83e6847](https://github.com/juspay/neurolink/commit/83e684781b04562970bcd48f617d368d1c4db2ee))
|
|
6
|
+
|
|
7
|
+
## [9.23.0](https://github.com/juspay/neurolink/compare/v9.22.3...v9.23.0) (2026-03-14)
|
|
8
|
+
|
|
9
|
+
### Features
|
|
10
|
+
|
|
11
|
+
- **(video-generation):** Add support to generate longer videos by merging multiple scenes ([db9a94f](https://github.com/juspay/neurolink/commit/db9a94f6bbe8b9047831ef486f995c6c710372e8))
|
|
12
|
+
|
|
1
13
|
## [9.22.3](https://github.com/juspay/neurolink/compare/v9.22.2...v9.22.3) (2026-03-12)
|
|
2
14
|
|
|
3
15
|
### Bug Fixes
|
package/README.md
CHANGED
|
@@ -37,19 +37,15 @@ Extracted from production systems at Juspay and battle-tested at enterprise scal
|
|
|
37
37
|
|
|
38
38
|
## What's New (Q1 2026)
|
|
39
39
|
|
|
40
|
-
| Feature | Version | Description | Guide
|
|
41
|
-
| ----------------------------------- | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
42
|
-
| **
|
|
43
|
-
| **
|
|
44
|
-
| **
|
|
45
|
-
| **
|
|
46
|
-
| **
|
|
47
|
-
| **
|
|
48
|
-
| **
|
|
49
|
-
| **Title Generation Events** | v8.38.0 | Emit `conversation:titleGenerated` event when conversation title is generated. Supports custom title prompts via `NEUROLINK_TITLE_PROMPT`. | [Conversation Memory Guide](docs/conversation-memory.md) |
|
|
50
|
-
| **Video Generation with Veo** | v8.32.0 | Video generation using Veo 3.1 (`veo-3.1`). Realistic video generation with many parameter options | [Video Generation Guide](docs/features/video-generation.md) |
|
|
51
|
-
| **Image Generation with Gemini** | v8.31.0 | Native image generation using Gemini 2.0 Flash Experimental (`imagen-3.0-generate-002`). High-quality image synthesis directly from Google AI. | [Image Generation Guide](docs/image-generation-streaming.md) |
|
|
52
|
-
| **HTTP/Streamable HTTP Transport** | v8.29.0 | Connect to remote MCP servers via HTTP with authentication headers, automatic retry with exponential backoff, and configurable rate limiting. | [HTTP Transport Guide](docs/mcp-http-transport.md) |
|
|
40
|
+
| Feature | Version | Description | Guide |
|
|
41
|
+
| ----------------------------------- | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------- |
|
|
42
|
+
| **External TracerProvider Support** | v8.43.0 | Integrate NeuroLink with existing OpenTelemetry instrumentation. Prevents duplicate registration conflicts. | [Observability Guide](docs/features/observability.md) |
|
|
43
|
+
| **Server Adapters** | v8.43.0 | Multi-framework HTTP server with Hono, Express, Fastify, Koa support. Full CLI for server management with foreground/background modes. | [Server Adapters Guide](docs/guides/server-adapters/index.md) |
|
|
44
|
+
| **Title Generation Events** | v8.38.0 | Emit `conversation:titleGenerated` event when conversation title is generated. Supports custom title prompts via `NEUROLINK_TITLE_PROMPT`. | [Conversation Memory Guide](docs/conversation-memory.md) |
|
|
45
|
+
| **Video Generation with Veo** | v8.32.0 | Video generation using Veo 3.1 (`veo-3.1`). Realistic video generation with many parameter options | [Video Generation Guide](docs/features/video-generation.md) |
|
|
46
|
+
| **Image Generation with Gemini** | v8.31.0 | Native image generation using Gemini 2.0 Flash Experimental (`imagen-3.0-generate-002`). High-quality image synthesis directly from Google AI. | [Image Generation Guide](docs/image-generation-streaming.md) |
|
|
47
|
+
| **RAG with generate()/stream()** | v9.2.0 | Pass `rag: { files }` to generate/stream for automatic document chunking, embedding, and AI-powered search. 10 chunking strategies, hybrid search, reranking. | [RAG Guide](docs/features/rag.md) |
|
|
48
|
+
| **HTTP/Streamable HTTP Transport** | v8.29.0 | Connect to remote MCP servers via HTTP with authentication headers, automatic retry with exponential backoff, and configurable rate limiting. | [HTTP Transport Guide](docs/mcp-http-transport.md) |
|
|
53
49
|
|
|
54
50
|
- **Memory** – Per-user condensed memory that persists across all conversations. Automatically retrieves and stores memory on each `generate()`/`stream()` call. Supports S3, Redis, and SQLite storage with LLM-powered condensation. → [Memory Guide](docs/features/memory.md)
|
|
55
51
|
- **External TracerProvider Support** – Integrate NeuroLink with applications that already have OpenTelemetry instrumentation. Supports auto-detection and manual configuration. → [Observability Guide](docs/features/observability.md)
|
|
@@ -57,6 +53,7 @@ Extracted from production systems at Juspay and battle-tested at enterprise scal
|
|
|
57
53
|
- **Title Generation Events** – Emit real-time events when conversation titles are auto-generated. Listen to `conversation:titleGenerated` for session tracking. → [Conversation Memory Guide](docs/conversation-memory.md#title-generation-events)
|
|
58
54
|
- **Custom Title Prompts** – Customize conversation title generation with `NEUROLINK_TITLE_PROMPT` environment variable. Use `${userMessage}` placeholder for dynamic prompts. → [Conversation Memory Guide](docs/conversation-memory.md#customizing-the-title-prompt)
|
|
59
55
|
- **Video Generation** – Transform images into 8-second videos with synchronized audio using Google Veo 3.1 via Vertex AI. Supports 720p/1080p resolutions, portrait/landscape aspect ratios. → [Video Generation Guide](docs/features/video-generation.md)
|
|
56
|
+
- **PPT Generation** – Create professional PowerPoint presentations from text prompts with 35 slide types (title, content, charts, timelines, dashboards, composite layouts), 5 themes, and optional AI-generated images. Works with Vertex AI, OpenAI, Anthropic, Google AI, Azure, and Bedrock. → [PPT Generation Guide](docs/features/ppt-generation.md)
|
|
60
57
|
- **Image Generation** – Generate images from text prompts using Gemini models via Vertex AI or Google AI Studio. Supports streaming mode with automatic file saving. → [Image Generation Guide](docs/image-generation-streaming.md)
|
|
61
58
|
- **RAG with generate()/stream()** – Just pass `rag: { files: ["./docs/guide.md"] }` to `generate()` or `stream()`. NeuroLink auto-chunks, embeds, and creates a search tool the AI can invoke. 10 chunking strategies, hybrid search, 5 reranker types. → [RAG Guide](docs/features/rag.md)
|
|
62
59
|
- **HTTP/Streamable HTTP Transport for MCP** – Connect to remote MCP servers via HTTP with authentication headers, retry logic, and rate limiting. → [HTTP Transport Guide](docs/mcp-http-transport.md)
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Director Mode Pipeline Orchestrator
|
|
3
|
+
*
|
|
4
|
+
* Orchestrates multi-segment video generation: parallel clip generation,
|
|
5
|
+
* parallel frame extraction + transition generation, and sequential merge.
|
|
6
|
+
*
|
|
7
|
+
* Error severity semantics:
|
|
8
|
+
* - HIGH: Fatal — clip generation / merge failures that abort the pipeline
|
|
9
|
+
* - MEDIUM: Non-fatal — transition / frame-extraction failures that degrade
|
|
10
|
+
* to a hard cut but do not abort the pipeline
|
|
11
|
+
*
|
|
12
|
+
* @module adapters/video/directorPipeline
|
|
13
|
+
*/
|
|
14
|
+
import type { DirectorModeOptions, DirectorSegment, VideoGenerationResult, VideoOutputOptions } from "../../types/multimodal.js";
|
|
15
|
+
/** Default timeout for entire Director Mode pipeline (10 minutes) */
|
|
16
|
+
export declare const DIRECTOR_PIPELINE_TIMEOUT_MS = 600000;
|
|
17
|
+
/**
|
|
18
|
+
* Execute the full Director Mode pipeline.
|
|
19
|
+
*
|
|
20
|
+
* Pipeline stages:
|
|
21
|
+
* 1. Parallel clip generation (concurrency = 2, circuit breaker after 2 failures)
|
|
22
|
+
* 2. Parallel frame extraction + transition generation
|
|
23
|
+
* 3. Sequential merge into single MP4
|
|
24
|
+
*
|
|
25
|
+
* @param segments - Array of DirectorSegment objects (2-10)
|
|
26
|
+
* @param videoOptions - Video output options (resolution, length, aspectRatio, audio)
|
|
27
|
+
* @param directorOptions - Director Mode options (transition prompts/durations)
|
|
28
|
+
* @param region - Vertex AI region override
|
|
29
|
+
* @returns VideoGenerationResult with merged video and Director metadata
|
|
30
|
+
*/
|
|
31
|
+
export declare function executeDirectorPipeline(segments: DirectorSegment[], videoOptions?: VideoOutputOptions, directorOptions?: DirectorModeOptions, region?: string): Promise<VideoGenerationResult>;
|
|
@@ -0,0 +1,516 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Director Mode Pipeline Orchestrator
|
|
3
|
+
*
|
|
4
|
+
* Orchestrates multi-segment video generation: parallel clip generation,
|
|
5
|
+
* parallel frame extraction + transition generation, and sequential merge.
|
|
6
|
+
*
|
|
7
|
+
* Error severity semantics:
|
|
8
|
+
* - HIGH: Fatal — clip generation / merge failures that abort the pipeline
|
|
9
|
+
* - MEDIUM: Non-fatal — transition / frame-extraction failures that degrade
|
|
10
|
+
* to a hard cut but do not abort the pipeline
|
|
11
|
+
*
|
|
12
|
+
* @module adapters/video/directorPipeline
|
|
13
|
+
*/
|
|
14
|
+
import pLimit from "p-limit";
|
|
15
|
+
import { ErrorCategory, ErrorSeverity } from "../../constants/enums.js";
|
|
16
|
+
import { logger } from "../../utils/logger.js";
|
|
17
|
+
import { validateImageForVideo } from "../../utils/parameterValidation.js";
|
|
18
|
+
import { VIDEO_ERROR_CODES } from "../../constants/videoErrors.js";
|
|
19
|
+
import { extractFirstFrame, extractLastFrame } from "./frameExtractor.js";
|
|
20
|
+
import { generateTransitionWithVertex, generateVideoWithVertex, VideoError, } from "./vertexVideoHandler.js";
|
|
21
|
+
import { mergeVideoBuffers } from "./videoMerger.js";
|
|
22
|
+
// ============================================================================
|
|
23
|
+
// CONSTANTS
|
|
24
|
+
// ============================================================================
|
|
25
|
+
/** Fixed concurrency for parallel Vertex API calls */
|
|
26
|
+
const CLIP_CONCURRENCY = 2;
|
|
27
|
+
/** Max consecutive clip failures before circuit-breaker trips */
|
|
28
|
+
const CIRCUIT_BREAKER_THRESHOLD = 2;
|
|
29
|
+
/** Timeout for fetching a segment image over HTTP (15 seconds) */
|
|
30
|
+
const IMAGE_IO_TIMEOUT_MS = 15_000;
|
|
31
|
+
/** Default transition prompt when none is specified */
|
|
32
|
+
const DEFAULT_TRANSITION_PROMPT = "Smooth cinematic transition between scenes";
|
|
33
|
+
/** Default timeout for entire Director Mode pipeline (10 minutes) */
|
|
34
|
+
export const DIRECTOR_PIPELINE_TIMEOUT_MS = 600_000;
|
|
35
|
+
/** Default transition duration in seconds */
|
|
36
|
+
const DEFAULT_TRANSITION_DURATION = 4;
|
|
37
|
+
// ============================================================================
|
|
38
|
+
// IMAGE LOADING
|
|
39
|
+
// ============================================================================
|
|
40
|
+
/**
|
|
41
|
+
* Resolve a DirectorSegment image input to a Buffer.
|
|
42
|
+
*
|
|
43
|
+
* Supports Buffer, HTTP(S) URL, local file path, and ImageWithAltText.
|
|
44
|
+
*
|
|
45
|
+
* @throws {VideoError} If the image cannot be resolved
|
|
46
|
+
*/
|
|
47
|
+
async function resolveImageToBuffer(image, segmentIndex) {
|
|
48
|
+
if (Buffer.isBuffer(image)) {
|
|
49
|
+
return image;
|
|
50
|
+
}
|
|
51
|
+
if (typeof image === "string") {
|
|
52
|
+
return image.startsWith("http://") || image.startsWith("https://")
|
|
53
|
+
? fetchImageFromUrl(image, segmentIndex)
|
|
54
|
+
: readImageFromDisk(image, segmentIndex);
|
|
55
|
+
}
|
|
56
|
+
// ImageWithAltText
|
|
57
|
+
if (typeof image === "object" && "data" in image) {
|
|
58
|
+
const imgData = image.data;
|
|
59
|
+
if (Buffer.isBuffer(imgData)) {
|
|
60
|
+
return imgData;
|
|
61
|
+
}
|
|
62
|
+
if (typeof imgData === "string") {
|
|
63
|
+
// Handle HTTP(S) URLs
|
|
64
|
+
if (imgData.startsWith("http://") || imgData.startsWith("https://")) {
|
|
65
|
+
return fetchImageFromUrl(imgData, segmentIndex);
|
|
66
|
+
}
|
|
67
|
+
// Handle data URIs (e.g., "data:image/png;base64,iVBORw0KG...")
|
|
68
|
+
if (imgData.startsWith("data:")) {
|
|
69
|
+
const base64Match = imgData.match(/^data:[^;]+;base64,(.+)$/);
|
|
70
|
+
if (base64Match && base64Match[1]) {
|
|
71
|
+
return Buffer.from(base64Match[1], "base64");
|
|
72
|
+
}
|
|
73
|
+
// Invalid data URI format
|
|
74
|
+
throw new VideoError({
|
|
75
|
+
code: VIDEO_ERROR_CODES.INVALID_INPUT,
|
|
76
|
+
message: `Invalid data URI format for segment ${segmentIndex}. Expected format: data:<mime>;base64,<data>`,
|
|
77
|
+
category: ErrorCategory.EXECUTION,
|
|
78
|
+
severity: ErrorSeverity.HIGH,
|
|
79
|
+
retriable: false,
|
|
80
|
+
context: { segmentIndex, dataUriPrefix: imgData.substring(0, 50) },
|
|
81
|
+
});
|
|
82
|
+
}
|
|
83
|
+
// Try as file path
|
|
84
|
+
try {
|
|
85
|
+
return await readImageFromDisk(imgData, segmentIndex);
|
|
86
|
+
}
|
|
87
|
+
catch (fileError) {
|
|
88
|
+
// Not a valid file path - throw clear error instead of silently treating as base64
|
|
89
|
+
throw new VideoError({
|
|
90
|
+
code: VIDEO_ERROR_CODES.INVALID_INPUT,
|
|
91
|
+
message: `Invalid image input for segment ${segmentIndex}: not a valid URL, file path, or data URI`,
|
|
92
|
+
category: ErrorCategory.EXECUTION,
|
|
93
|
+
severity: ErrorSeverity.HIGH,
|
|
94
|
+
retriable: false,
|
|
95
|
+
context: {
|
|
96
|
+
segmentIndex,
|
|
97
|
+
inputType: "string",
|
|
98
|
+
inputPrefix: imgData.substring(0, 50),
|
|
99
|
+
fileError: fileError instanceof Error
|
|
100
|
+
? fileError.message
|
|
101
|
+
: String(fileError),
|
|
102
|
+
},
|
|
103
|
+
originalError: fileError instanceof Error ? fileError : undefined,
|
|
104
|
+
});
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
throw new VideoError({
|
|
109
|
+
code: VIDEO_ERROR_CODES.INVALID_INPUT,
|
|
110
|
+
message: `Invalid image type for segment ${segmentIndex}`,
|
|
111
|
+
category: ErrorCategory.EXECUTION,
|
|
112
|
+
severity: ErrorSeverity.HIGH,
|
|
113
|
+
retriable: false,
|
|
114
|
+
context: { segmentIndex },
|
|
115
|
+
});
|
|
116
|
+
}
|
|
117
|
+
/** Fetch an image from an HTTP(S) URL with timeout. */
|
|
118
|
+
async function fetchImageFromUrl(url, segmentIndex) {
|
|
119
|
+
const controller = new AbortController();
|
|
120
|
+
const timeout = setTimeout(() => controller.abort(), IMAGE_IO_TIMEOUT_MS);
|
|
121
|
+
try {
|
|
122
|
+
const response = await fetch(url, { signal: controller.signal });
|
|
123
|
+
if (!response.ok) {
|
|
124
|
+
throw new VideoError({
|
|
125
|
+
code: VIDEO_ERROR_CODES.INVALID_INPUT,
|
|
126
|
+
message: `Failed to fetch image for segment ${segmentIndex}: HTTP ${response.status}`,
|
|
127
|
+
category: ErrorCategory.EXECUTION,
|
|
128
|
+
severity: ErrorSeverity.HIGH,
|
|
129
|
+
retriable: response.status >= 500,
|
|
130
|
+
context: { segmentIndex, url: url.substring(0, 100) },
|
|
131
|
+
});
|
|
132
|
+
}
|
|
133
|
+
return Buffer.from(await response.arrayBuffer());
|
|
134
|
+
}
|
|
135
|
+
catch (error) {
|
|
136
|
+
if (error instanceof VideoError) {
|
|
137
|
+
throw error;
|
|
138
|
+
}
|
|
139
|
+
throw new VideoError({
|
|
140
|
+
code: VIDEO_ERROR_CODES.INVALID_INPUT,
|
|
141
|
+
message: `Failed to fetch image for segment ${segmentIndex}: ${error instanceof Error ? error.message : String(error)}`,
|
|
142
|
+
category: ErrorCategory.EXECUTION,
|
|
143
|
+
severity: ErrorSeverity.HIGH,
|
|
144
|
+
retriable: true,
|
|
145
|
+
context: { segmentIndex },
|
|
146
|
+
originalError: error instanceof Error ? error : undefined,
|
|
147
|
+
});
|
|
148
|
+
}
|
|
149
|
+
finally {
|
|
150
|
+
clearTimeout(timeout);
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
/** Read an image from the local filesystem. */
|
|
154
|
+
async function readImageFromDisk(filePath, segmentIndex) {
|
|
155
|
+
const { readFile } = await import("node:fs/promises");
|
|
156
|
+
try {
|
|
157
|
+
return await readFile(filePath);
|
|
158
|
+
}
|
|
159
|
+
catch (error) {
|
|
160
|
+
throw new VideoError({
|
|
161
|
+
code: VIDEO_ERROR_CODES.INVALID_INPUT,
|
|
162
|
+
message: `Failed to read image file for segment ${segmentIndex}: ${error instanceof Error ? error.message : String(error)}`,
|
|
163
|
+
category: ErrorCategory.EXECUTION,
|
|
164
|
+
severity: ErrorSeverity.HIGH,
|
|
165
|
+
retriable: false,
|
|
166
|
+
context: { segmentIndex, path: filePath },
|
|
167
|
+
originalError: error instanceof Error ? error : undefined,
|
|
168
|
+
});
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
/**
|
|
172
|
+
* Process clip completions in order to maintain an accurate consecutive failure count.
|
|
173
|
+
* This prevents out-of-order completions from incorrectly resetting the failure streak.
|
|
174
|
+
*
|
|
175
|
+
* Must be called after each clip completion (success or failure) to evaluate the
|
|
176
|
+
* circuit breaker in submission order.
|
|
177
|
+
*/
|
|
178
|
+
function processOrderedCompletions(state) {
|
|
179
|
+
// Process as many consecutive completed clips as possible
|
|
180
|
+
while (state.nextExpectedIndex < state.completions.length) {
|
|
181
|
+
const completion = state.completions[state.nextExpectedIndex];
|
|
182
|
+
if (completion.status === "pending") {
|
|
183
|
+
// Can't process further until this clip completes
|
|
184
|
+
break;
|
|
185
|
+
}
|
|
186
|
+
if (completion.status === "success") {
|
|
187
|
+
// Success resets the consecutive failure counter
|
|
188
|
+
state.consecutiveFailures = 0;
|
|
189
|
+
}
|
|
190
|
+
else {
|
|
191
|
+
// Failure increments the counter
|
|
192
|
+
state.consecutiveFailures++;
|
|
193
|
+
if (state.consecutiveFailures >= CIRCUIT_BREAKER_THRESHOLD) {
|
|
194
|
+
state.circuitOpen = true;
|
|
195
|
+
logger.error(`Circuit breaker tripped after ${CIRCUIT_BREAKER_THRESHOLD} consecutive clip failures`);
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
state.nextExpectedIndex++;
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
/**
|
|
202
|
+
* Generate a single clip, applying circuit-breaker logic.
|
|
203
|
+
*
|
|
204
|
+
* @throws {VideoError} On generation failure or circuit breaker trip
|
|
205
|
+
*/
|
|
206
|
+
async function generateSingleClip(segment, index, videoOptions, region, state) {
|
|
207
|
+
if (state.circuitOpen) {
|
|
208
|
+
throw new VideoError({
|
|
209
|
+
code: VIDEO_ERROR_CODES.DIRECTOR_CLIP_FAILED,
|
|
210
|
+
message: `Clip ${index} skipped — circuit breaker open after ${CIRCUIT_BREAKER_THRESHOLD} consecutive failures`,
|
|
211
|
+
category: ErrorCategory.EXECUTION,
|
|
212
|
+
severity: ErrorSeverity.HIGH,
|
|
213
|
+
retriable: true,
|
|
214
|
+
context: { segmentIndex: index },
|
|
215
|
+
});
|
|
216
|
+
}
|
|
217
|
+
const clipStart = Date.now();
|
|
218
|
+
try {
|
|
219
|
+
const imageBuffer = await resolveImageToBuffer(segment.image, index);
|
|
220
|
+
// Validate image buffer (type, dimensions, size limits) before generation
|
|
221
|
+
const imageValidation = validateImageForVideo(imageBuffer);
|
|
222
|
+
if (imageValidation) {
|
|
223
|
+
throw new VideoError({
|
|
224
|
+
code: VIDEO_ERROR_CODES.INVALID_INPUT,
|
|
225
|
+
message: `Segment ${index} image validation failed: ${imageValidation.message}`,
|
|
226
|
+
category: ErrorCategory.EXECUTION,
|
|
227
|
+
severity: ErrorSeverity.HIGH,
|
|
228
|
+
retriable: false,
|
|
229
|
+
context: { segmentIndex: index, validation: imageValidation },
|
|
230
|
+
originalError: imageValidation,
|
|
231
|
+
});
|
|
232
|
+
}
|
|
233
|
+
const result = await generateVideoWithVertex(imageBuffer, segment.prompt, videoOptions, region);
|
|
234
|
+
const clipResult = {
|
|
235
|
+
buffer: result.data,
|
|
236
|
+
processingTime: Date.now() - clipStart,
|
|
237
|
+
};
|
|
238
|
+
// Record success and update results array
|
|
239
|
+
state.results[index] = clipResult;
|
|
240
|
+
state.completions[index] = { status: "success", result: clipResult };
|
|
241
|
+
// Process completions in order to update circuit breaker state
|
|
242
|
+
processOrderedCompletions(state);
|
|
243
|
+
}
|
|
244
|
+
catch (error) {
|
|
245
|
+
const errorObj = error instanceof Error ? error : new Error(String(error));
|
|
246
|
+
// Record failure in completion tracking
|
|
247
|
+
state.completions[index] = { status: "failure", error: errorObj };
|
|
248
|
+
// Process completions in order to update circuit breaker state
|
|
249
|
+
processOrderedCompletions(state);
|
|
250
|
+
throw new VideoError({
|
|
251
|
+
code: VIDEO_ERROR_CODES.DIRECTOR_CLIP_FAILED,
|
|
252
|
+
message: `Clip ${index} generation failed: ${errorObj.message}`,
|
|
253
|
+
category: ErrorCategory.EXECUTION,
|
|
254
|
+
severity: ErrorSeverity.HIGH,
|
|
255
|
+
retriable: true,
|
|
256
|
+
context: {
|
|
257
|
+
segmentIndex: index,
|
|
258
|
+
consecutiveFailures: state.consecutiveFailures,
|
|
259
|
+
},
|
|
260
|
+
originalError: errorObj,
|
|
261
|
+
});
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
/**
|
|
265
|
+
* Generate all main clips in parallel with a circuit breaker.
|
|
266
|
+
*
|
|
267
|
+
* The circuit breaker trips after `CIRCUIT_BREAKER_THRESHOLD` consecutive
|
|
268
|
+
* failures, aborting remaining work to avoid wasted API calls.
|
|
269
|
+
*
|
|
270
|
+
* @returns Ordered array of clip results (indexed by segment number)
|
|
271
|
+
* @throws {VideoError} On any clip failure (all clips are mandatory)
|
|
272
|
+
*/
|
|
273
|
+
async function generateClips(segments, videoOptions, region) {
|
|
274
|
+
const limit = pLimit(CLIP_CONCURRENCY);
|
|
275
|
+
const state = {
|
|
276
|
+
consecutiveFailures: 0,
|
|
277
|
+
circuitOpen: false,
|
|
278
|
+
results: new Array(segments.length).fill(null),
|
|
279
|
+
completions: new Array(segments.length).fill({ status: "pending" }),
|
|
280
|
+
nextExpectedIndex: 0,
|
|
281
|
+
};
|
|
282
|
+
const clipPromises = segments.map((segment, i) => limit(() => generateSingleClip(segment, i, videoOptions, region, state)));
|
|
283
|
+
// Collect results — any failure is fatal
|
|
284
|
+
const settled = await Promise.allSettled(clipPromises);
|
|
285
|
+
const failures = settled.filter((r) => r.status === "rejected");
|
|
286
|
+
if (failures.length > 0) {
|
|
287
|
+
const firstError = failures[0].reason instanceof Error
|
|
288
|
+
? failures[0].reason
|
|
289
|
+
: new Error(String(failures[0].reason));
|
|
290
|
+
throw new VideoError({
|
|
291
|
+
code: VIDEO_ERROR_CODES.DIRECTOR_CLIP_FAILED,
|
|
292
|
+
message: `Director Mode: ${failures.length}/${segments.length} clip(s) failed. First: ${firstError.message}`,
|
|
293
|
+
category: ErrorCategory.EXECUTION,
|
|
294
|
+
severity: ErrorSeverity.HIGH,
|
|
295
|
+
retriable: true,
|
|
296
|
+
context: {
|
|
297
|
+
failedCount: failures.length,
|
|
298
|
+
totalSegments: segments.length,
|
|
299
|
+
circuitBreakerTripped: state.circuitOpen,
|
|
300
|
+
},
|
|
301
|
+
originalError: firstError,
|
|
302
|
+
});
|
|
303
|
+
}
|
|
304
|
+
logger.info("All clips generated successfully", {
|
|
305
|
+
clipCount: segments.length,
|
|
306
|
+
concurrency: CLIP_CONCURRENCY,
|
|
307
|
+
});
|
|
308
|
+
return state.results;
|
|
309
|
+
}
|
|
310
|
+
/**
|
|
311
|
+
* Extract boundary frames and generate transition clips in parallel.
|
|
312
|
+
*
|
|
313
|
+
* Transition failures are non-fatal — they degrade to a hard cut.
|
|
314
|
+
* Frame extraction gets one retry before giving up on that transition.
|
|
315
|
+
*/
|
|
316
|
+
async function generateTransitions(clipResults, transitionPrompts, transitionDurations, videoOptions, region) {
|
|
317
|
+
const transitionCount = clipResults.length - 1;
|
|
318
|
+
if (transitionCount === 0) {
|
|
319
|
+
return [];
|
|
320
|
+
}
|
|
321
|
+
const limit = pLimit(CLIP_CONCURRENCY);
|
|
322
|
+
const transitionPromises = Array.from({ length: transitionCount }, (_, i) => limit(async () => {
|
|
323
|
+
const transStart = Date.now();
|
|
324
|
+
const transPrompt = transitionPrompts[i] ?? DEFAULT_TRANSITION_PROMPT;
|
|
325
|
+
const transDuration = transitionDurations[i] ?? DEFAULT_TRANSITION_DURATION;
|
|
326
|
+
try {
|
|
327
|
+
// Extract boundary frames (with one retry each)
|
|
328
|
+
const lastFrameOfPrev = await extractFrameWithRetry(clipResults[i].buffer, "last", i);
|
|
329
|
+
const firstFrameOfNext = await extractFrameWithRetry(clipResults[i + 1].buffer, "first", i + 1);
|
|
330
|
+
// Generate transition clip
|
|
331
|
+
const transBuffer = await generateTransitionWithVertex(lastFrameOfPrev, firstFrameOfNext, transPrompt, {
|
|
332
|
+
aspectRatio: videoOptions.aspectRatio,
|
|
333
|
+
resolution: videoOptions.resolution,
|
|
334
|
+
audio: videoOptions.audio,
|
|
335
|
+
}, transDuration, region);
|
|
336
|
+
logger.debug(`Transition ${i}→${i + 1} generated`, {
|
|
337
|
+
duration: transDuration,
|
|
338
|
+
size: transBuffer.length,
|
|
339
|
+
elapsedMs: Date.now() - transStart,
|
|
340
|
+
});
|
|
341
|
+
return {
|
|
342
|
+
buffer: transBuffer,
|
|
343
|
+
fromSegment: i,
|
|
344
|
+
toSegment: i + 1,
|
|
345
|
+
duration: transDuration,
|
|
346
|
+
processingTime: Date.now() - transStart,
|
|
347
|
+
};
|
|
348
|
+
}
|
|
349
|
+
catch (error) {
|
|
350
|
+
// Non-fatal — fall back to hard cut
|
|
351
|
+
logger.warn(`Transition ${i}→${i + 1} failed, falling back to hard cut`, {
|
|
352
|
+
error: error instanceof Error ? error.message : String(error),
|
|
353
|
+
});
|
|
354
|
+
return {
|
|
355
|
+
buffer: null,
|
|
356
|
+
fromSegment: i,
|
|
357
|
+
toSegment: i + 1,
|
|
358
|
+
duration: 0,
|
|
359
|
+
processingTime: Date.now() - transStart,
|
|
360
|
+
};
|
|
361
|
+
}
|
|
362
|
+
}));
|
|
363
|
+
return Promise.all(transitionPromises);
|
|
364
|
+
}
|
|
365
|
+
/**
|
|
366
|
+
* Extract a frame from a clip buffer, retrying once on failure.
|
|
367
|
+
*/
|
|
368
|
+
async function extractFrameWithRetry(clipBuffer, position, clipIndex) {
|
|
369
|
+
const extract = position === "first" ? extractFirstFrame : extractLastFrame;
|
|
370
|
+
try {
|
|
371
|
+
return await extract(clipBuffer);
|
|
372
|
+
}
|
|
373
|
+
catch (firstError) {
|
|
374
|
+
logger.warn(`Frame extraction (${position}) failed for clip ${clipIndex}, retrying once`, {
|
|
375
|
+
error: firstError instanceof Error ? firstError.message : String(firstError),
|
|
376
|
+
});
|
|
377
|
+
// Single retry — propagate on second failure
|
|
378
|
+
return await extract(clipBuffer);
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
// ============================================================================
|
|
382
|
+
// PHASE 3: SEQUENTIAL MERGE
|
|
383
|
+
// ============================================================================
|
|
384
|
+
/**
|
|
385
|
+
* Build an interleaved buffer array (clip, transition, clip, …) and merge.
|
|
386
|
+
*/
|
|
387
|
+
async function mergeAllClips(clipResults, transitionResults) {
|
|
388
|
+
const mergeBuffers = [];
|
|
389
|
+
const segmentCount = clipResults.length;
|
|
390
|
+
const transitionCount = transitionResults.length;
|
|
391
|
+
for (let i = 0; i < segmentCount; i++) {
|
|
392
|
+
mergeBuffers.push(clipResults[i].buffer);
|
|
393
|
+
if (i < transitionCount && transitionResults[i].buffer) {
|
|
394
|
+
mergeBuffers.push(transitionResults[i].buffer);
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
try {
|
|
398
|
+
return await mergeVideoBuffers(mergeBuffers);
|
|
399
|
+
}
|
|
400
|
+
catch (error) {
|
|
401
|
+
throw new VideoError({
|
|
402
|
+
code: VIDEO_ERROR_CODES.DIRECTOR_MERGE_FAILED,
|
|
403
|
+
message: `Director Mode merge failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
404
|
+
category: ErrorCategory.EXECUTION,
|
|
405
|
+
severity: ErrorSeverity.HIGH,
|
|
406
|
+
retriable: false,
|
|
407
|
+
context: {
|
|
408
|
+
clipCount: segmentCount,
|
|
409
|
+
transitionCount: transitionResults.filter((t) => t.buffer).length,
|
|
410
|
+
},
|
|
411
|
+
originalError: error instanceof Error ? error : undefined,
|
|
412
|
+
});
|
|
413
|
+
}
|
|
414
|
+
}
|
|
415
|
+
// ============================================================================
|
|
416
|
+
// PIPELINE ORCHESTRATOR
|
|
417
|
+
// ============================================================================
|
|
418
|
+
/**
|
|
419
|
+
* Execute the full Director Mode pipeline.
|
|
420
|
+
*
|
|
421
|
+
* Pipeline stages:
|
|
422
|
+
* 1. Parallel clip generation (concurrency = 2, circuit breaker after 2 failures)
|
|
423
|
+
* 2. Parallel frame extraction + transition generation
|
|
424
|
+
* 3. Sequential merge into single MP4
|
|
425
|
+
*
|
|
426
|
+
* @param segments - Array of DirectorSegment objects (2-10)
|
|
427
|
+
* @param videoOptions - Video output options (resolution, length, aspectRatio, audio)
|
|
428
|
+
* @param directorOptions - Director Mode options (transition prompts/durations)
|
|
429
|
+
* @param region - Vertex AI region override
|
|
430
|
+
* @returns VideoGenerationResult with merged video and Director metadata
|
|
431
|
+
*/
|
|
432
|
+
export async function executeDirectorPipeline(segments, videoOptions = {}, directorOptions = {}, region) {
|
|
433
|
+
const pipelineStart = Date.now();
|
|
434
|
+
const segmentCount = segments.length;
|
|
435
|
+
const transitionCount = segmentCount - 1;
|
|
436
|
+
const clipDuration = videoOptions.length ?? 6;
|
|
437
|
+
const transitionPrompts = directorOptions.transitionPrompts ?? [];
|
|
438
|
+
const transitionDurations = directorOptions.transitionDurations ?? [];
|
|
439
|
+
logger.info("Starting Director Mode pipeline", {
|
|
440
|
+
segmentCount,
|
|
441
|
+
transitionCount,
|
|
442
|
+
concurrency: CLIP_CONCURRENCY,
|
|
443
|
+
clipDuration,
|
|
444
|
+
resolution: videoOptions.resolution ?? "720p",
|
|
445
|
+
});
|
|
446
|
+
// Phase 1: Generate all clips
|
|
447
|
+
const clipResults = await generateClips(segments, videoOptions, region);
|
|
448
|
+
logger.info("Phase 1 complete — all clips generated", {
|
|
449
|
+
clipCount: clipResults.length,
|
|
450
|
+
elapsedMs: Date.now() - pipelineStart,
|
|
451
|
+
});
|
|
452
|
+
// Phase 2: Generate transitions in parallel
|
|
453
|
+
const transitionResults = await generateTransitions(clipResults, transitionPrompts, transitionDurations, videoOptions, region);
|
|
454
|
+
const successfulTransitions = transitionResults.filter((t) => t.buffer).length;
|
|
455
|
+
const hardCuts = transitionCount - successfulTransitions;
|
|
456
|
+
if (hardCuts > 0) {
|
|
457
|
+
logger.warn(`${hardCuts}/${transitionCount} transition(s) fell back to hard cut`);
|
|
458
|
+
}
|
|
459
|
+
logger.info("Phase 2 complete — transitions generated", {
|
|
460
|
+
successful: successfulTransitions,
|
|
461
|
+
hardCuts,
|
|
462
|
+
elapsedMs: Date.now() - pipelineStart,
|
|
463
|
+
});
|
|
464
|
+
// Phase 3: Merge
|
|
465
|
+
const mergedBuffer = await mergeAllClips(clipResults, transitionResults);
|
|
466
|
+
// Build metadata
|
|
467
|
+
const actualTransitionDurations = transitionResults.map((t) => t.duration);
|
|
468
|
+
const totalDuration = segmentCount * clipDuration +
|
|
469
|
+
actualTransitionDurations.reduce((a, b) => a + b, 0);
|
|
470
|
+
const resolution = videoOptions.resolution ?? "720p";
|
|
471
|
+
const aspectRatio = videoOptions.aspectRatio ?? "16:9";
|
|
472
|
+
const dimensions = resolution === "1080p"
|
|
473
|
+
? aspectRatio === "9:16"
|
|
474
|
+
? { width: 1080, height: 1920 }
|
|
475
|
+
: { width: 1920, height: 1080 }
|
|
476
|
+
: aspectRatio === "9:16"
|
|
477
|
+
? { width: 720, height: 1280 }
|
|
478
|
+
: { width: 1280, height: 720 };
|
|
479
|
+
const processingTime = Date.now() - pipelineStart;
|
|
480
|
+
logger.info("Director Mode pipeline complete", {
|
|
481
|
+
totalDuration,
|
|
482
|
+
segmentCount,
|
|
483
|
+
transitionsGenerated: successfulTransitions,
|
|
484
|
+
hardCuts,
|
|
485
|
+
mergedSize: mergedBuffer.length,
|
|
486
|
+
processingTime,
|
|
487
|
+
});
|
|
488
|
+
return {
|
|
489
|
+
data: mergedBuffer,
|
|
490
|
+
mediaType: "video/mp4",
|
|
491
|
+
metadata: {
|
|
492
|
+
duration: totalDuration,
|
|
493
|
+
dimensions,
|
|
494
|
+
model: "veo-3.1-generate-001",
|
|
495
|
+
provider: "vertex",
|
|
496
|
+
aspectRatio,
|
|
497
|
+
audioEnabled: videoOptions.audio ?? true,
|
|
498
|
+
processingTime,
|
|
499
|
+
segmentCount,
|
|
500
|
+
transitionCount: successfulTransitions,
|
|
501
|
+
clipDuration,
|
|
502
|
+
transitionDurations: actualTransitionDurations,
|
|
503
|
+
segments: clipResults.map((c, i) => ({
|
|
504
|
+
index: i,
|
|
505
|
+
duration: clipDuration,
|
|
506
|
+
processingTime: c.processingTime,
|
|
507
|
+
})),
|
|
508
|
+
transitions: transitionResults.map((t) => ({
|
|
509
|
+
fromSegment: t.fromSegment,
|
|
510
|
+
toSegment: t.toSegment,
|
|
511
|
+
duration: t.duration,
|
|
512
|
+
processingTime: t.processingTime,
|
|
513
|
+
})),
|
|
514
|
+
},
|
|
515
|
+
};
|
|
516
|
+
}
|