mulmocast 2.0.4 → 2.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/agents/image_genai_agent.js +20 -4
- package/lib/agents/movie_genai_agent.js +105 -48
- package/package.json +1 -1
- package/scripts/test/test_genai2.json +25 -0
- package/scripts/test/test_genai2.json~ +84 -0
- package/scripts/test/test_genai_movie.json +26 -0
- package/scripts/test/test_genai_movie.json~ +22 -0
|
@@ -22,11 +22,13 @@ export const ratio2BlankPath = (aspectRatio) => {
|
|
|
22
22
|
}
|
|
23
23
|
return blankImagePath();
|
|
24
24
|
};
|
|
25
|
-
const getGeminiContents = (prompt,
|
|
25
|
+
const getGeminiContents = (prompt, referenceImages, aspectRatio) => {
|
|
26
26
|
const contents = [{ text: prompt }];
|
|
27
27
|
const images = [...(referenceImages ?? [])];
|
|
28
28
|
// NOTE: There is no way to explicitly specify the aspect ratio for Gemini. This is just a hint.
|
|
29
|
-
|
|
29
|
+
if (aspectRatio) {
|
|
30
|
+
images.push(ratio2BlankPath(aspectRatio));
|
|
31
|
+
}
|
|
30
32
|
images.forEach((imagePath) => {
|
|
31
33
|
const imageData = fs.readFileSync(imagePath);
|
|
32
34
|
const base64Image = imageData.toString("base64");
|
|
@@ -71,11 +73,25 @@ export const imageGenAIAgent = async ({ namedInputs, params, config, }) => {
|
|
|
71
73
|
}
|
|
72
74
|
try {
|
|
73
75
|
const ai = new GoogleGenAI({ apiKey });
|
|
74
|
-
if (model === "gemini-2.5-flash-image"
|
|
75
|
-
const contents = getGeminiContents(prompt,
|
|
76
|
+
if (model === "gemini-2.5-flash-image") {
|
|
77
|
+
const contents = getGeminiContents(prompt, referenceImages, aspectRatio);
|
|
76
78
|
const response = await ai.models.generateContent({ model, contents });
|
|
77
79
|
return geminiFlashResult(response);
|
|
78
80
|
}
|
|
81
|
+
else if (model === "gemini-3-pro-image-preview") {
|
|
82
|
+
const contents = getGeminiContents(prompt, referenceImages);
|
|
83
|
+
const response = await ai.models.generateContent({
|
|
84
|
+
model,
|
|
85
|
+
contents,
|
|
86
|
+
config: {
|
|
87
|
+
imageConfig: {
|
|
88
|
+
// '1:1', '2:3', '3:2', '3:4', '4:3', '4:5', '5:4', '9:16', '16:9', or '21:9'.
|
|
89
|
+
aspectRatio,
|
|
90
|
+
},
|
|
91
|
+
},
|
|
92
|
+
});
|
|
93
|
+
return geminiFlashResult(response);
|
|
94
|
+
}
|
|
79
95
|
else {
|
|
80
96
|
const response = await ai.models.generateImages({
|
|
81
97
|
model,
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { readFileSync } from "fs";
|
|
2
2
|
import { GraphAILogger, sleep } from "graphai";
|
|
3
|
-
import { apiKeyMissingError, agentGenerationError, agentInvalidResponseError, imageAction, movieFileTarget, videoDurationTarget, hasCause, } from "../utils/error_cause.js";
|
|
4
3
|
import { GoogleGenAI, PersonGeneration } from "@google/genai";
|
|
4
|
+
import { apiKeyMissingError, agentGenerationError, agentInvalidResponseError, imageAction, movieFileTarget, videoDurationTarget, hasCause, } from "../utils/error_cause.js";
|
|
5
5
|
import { getModelDuration, provider2MovieAgent } from "../utils/provider2agent.js";
|
|
6
6
|
export const getAspectRatio = (canvasSize) => {
|
|
7
7
|
if (canvasSize.width > canvasSize.height) {
|
|
@@ -14,6 +14,105 @@ export const getAspectRatio = (canvasSize) => {
|
|
|
14
14
|
return "1:1";
|
|
15
15
|
}
|
|
16
16
|
};
|
|
17
|
+
const pollUntilDone = async (ai, operation) => {
|
|
18
|
+
const response = { operation };
|
|
19
|
+
while (!response.operation.done) {
|
|
20
|
+
await sleep(5000);
|
|
21
|
+
response.operation = await ai.operations.getVideosOperation(response);
|
|
22
|
+
}
|
|
23
|
+
return response;
|
|
24
|
+
};
|
|
25
|
+
const getVideoFromResponse = (response, iteration) => {
|
|
26
|
+
const iterationInfo = iteration !== undefined ? ` in iteration ${iteration}` : "";
|
|
27
|
+
if (!response.operation.response?.generatedVideos) {
|
|
28
|
+
throw new Error(`No video${iterationInfo}: ${JSON.stringify(response.operation, null, 2)}`, {
|
|
29
|
+
cause: agentInvalidResponseError("movieGenAIAgent", imageAction, movieFileTarget),
|
|
30
|
+
});
|
|
31
|
+
}
|
|
32
|
+
const video = response.operation.response.generatedVideos[0].video;
|
|
33
|
+
if (!video) {
|
|
34
|
+
throw new Error(`No video${iterationInfo}`, {
|
|
35
|
+
cause: agentInvalidResponseError("movieGenAIAgent", imageAction, movieFileTarget),
|
|
36
|
+
});
|
|
37
|
+
}
|
|
38
|
+
return video;
|
|
39
|
+
};
|
|
40
|
+
const loadImageAsBase64 = (imagePath) => {
|
|
41
|
+
const buffer = readFileSync(imagePath);
|
|
42
|
+
return {
|
|
43
|
+
imageBytes: buffer.toString("base64"),
|
|
44
|
+
mimeType: "image/png",
|
|
45
|
+
};
|
|
46
|
+
};
|
|
47
|
+
const downloadVideo = async (ai, video, movieFile) => {
|
|
48
|
+
await ai.files.download({
|
|
49
|
+
file: video,
|
|
50
|
+
downloadPath: movieFile,
|
|
51
|
+
});
|
|
52
|
+
await sleep(5000); // HACK: Without this, the file is not ready yet.
|
|
53
|
+
return { saved: movieFile };
|
|
54
|
+
};
|
|
55
|
+
const createVeo31Payload = (model, prompt, aspectRatio, source) => ({
|
|
56
|
+
model,
|
|
57
|
+
prompt,
|
|
58
|
+
config: {
|
|
59
|
+
aspectRatio,
|
|
60
|
+
resolution: "720p",
|
|
61
|
+
numberOfVideos: 1,
|
|
62
|
+
},
|
|
63
|
+
...source,
|
|
64
|
+
});
|
|
65
|
+
const generateExtendedVideo = async (ai, model, prompt, aspectRatio, imagePath, requestedDuration, movieFile) => {
|
|
66
|
+
const initialDuration = 8;
|
|
67
|
+
const maxExtensionDuration = 8;
|
|
68
|
+
const extensionsNeeded = Math.ceil((requestedDuration - initialDuration) / maxExtensionDuration);
|
|
69
|
+
GraphAILogger.info(`Veo 3.1 video extension: ${extensionsNeeded} extensions needed for ${requestedDuration}s target`);
|
|
70
|
+
const generateIteration = async (iteration, accumulatedDuration, previousVideo) => {
|
|
71
|
+
const isInitial = iteration === 0;
|
|
72
|
+
const remainingDuration = requestedDuration - accumulatedDuration;
|
|
73
|
+
const extensionDuration = isInitial ? initialDuration : (getModelDuration("google", model, remainingDuration) ?? maxExtensionDuration);
|
|
74
|
+
const getSource = () => {
|
|
75
|
+
if (isInitial)
|
|
76
|
+
return imagePath ? { image: loadImageAsBase64(imagePath) } : undefined;
|
|
77
|
+
return previousVideo?.uri ? { video: { uri: previousVideo.uri } } : undefined;
|
|
78
|
+
};
|
|
79
|
+
const payload = createVeo31Payload(model, prompt, aspectRatio, getSource());
|
|
80
|
+
GraphAILogger.info(isInitial ? "Generating initial 8s video..." : `Extending video: iteration ${iteration}/${extensionsNeeded} (+${extensionDuration}s)...`);
|
|
81
|
+
const operation = await ai.models.generateVideos(payload);
|
|
82
|
+
const response = await pollUntilDone(ai, operation);
|
|
83
|
+
const video = getVideoFromResponse(response, iteration);
|
|
84
|
+
const totalDuration = accumulatedDuration + extensionDuration;
|
|
85
|
+
GraphAILogger.info(`Video ${isInitial ? "generated" : "extended"}: ~${totalDuration}s total`);
|
|
86
|
+
return { video, duration: totalDuration };
|
|
87
|
+
};
|
|
88
|
+
const result = await Array.from({ length: extensionsNeeded + 1 }).reduce(async (prev, _, index) => {
|
|
89
|
+
const { video, duration } = await prev;
|
|
90
|
+
return generateIteration(index, duration, video);
|
|
91
|
+
}, Promise.resolve({ video: undefined, duration: 0 }));
|
|
92
|
+
if (!result.video) {
|
|
93
|
+
throw new Error("Failed to generate extended video", {
|
|
94
|
+
cause: agentInvalidResponseError("movieGenAIAgent", imageAction, movieFileTarget),
|
|
95
|
+
});
|
|
96
|
+
}
|
|
97
|
+
return downloadVideo(ai, result.video, movieFile);
|
|
98
|
+
};
|
|
99
|
+
const generateStandardVideo = async (ai, model, prompt, aspectRatio, imagePath, duration, movieFile) => {
|
|
100
|
+
const isVeo3 = model === "veo-3.0-generate-001" || model === "veo-3.1-generate-preview";
|
|
101
|
+
const payload = {
|
|
102
|
+
model,
|
|
103
|
+
prompt,
|
|
104
|
+
config: {
|
|
105
|
+
durationSeconds: isVeo3 ? undefined : duration,
|
|
106
|
+
aspectRatio,
|
|
107
|
+
personGeneration: imagePath ? undefined : PersonGeneration.ALLOW_ALL,
|
|
108
|
+
},
|
|
109
|
+
image: imagePath ? loadImageAsBase64(imagePath) : undefined,
|
|
110
|
+
};
|
|
111
|
+
const operation = await ai.models.generateVideos(payload);
|
|
112
|
+
const response = await pollUntilDone(ai, operation);
|
|
113
|
+
const video = getVideoFromResponse(response);
|
|
114
|
+
return downloadVideo(ai, video, movieFile);
|
|
115
|
+
};
|
|
17
116
|
export const movieGenAIAgent = async ({ namedInputs, params, config, }) => {
|
|
18
117
|
const { prompt, imagePath, movieFile } = namedInputs;
|
|
19
118
|
const aspectRatio = getAspectRatio(params.canvasSize);
|
|
@@ -33,54 +132,12 @@ export const movieGenAIAgent = async ({ namedInputs, params, config, }) => {
|
|
|
33
132
|
});
|
|
34
133
|
}
|
|
35
134
|
const ai = new GoogleGenAI({ apiKey });
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
prompt,
|
|
39
|
-
config: {
|
|
40
|
-
durationSeconds: duration,
|
|
41
|
-
aspectRatio,
|
|
42
|
-
personGeneration: undefined,
|
|
43
|
-
},
|
|
44
|
-
image: undefined,
|
|
45
|
-
};
|
|
46
|
-
if (model === "veo-3.0-generate-001" || model === "veo-3.1-generate-preview") {
|
|
47
|
-
payload.config.durationSeconds = undefined;
|
|
135
|
+
// Veo 3.1: Video extension mode for videos longer than 8s
|
|
136
|
+
if (model === "veo-3.1-generate-preview" && requestedDuration > 8 && params.canvasSize) {
|
|
137
|
+
return generateExtendedVideo(ai, model, prompt, aspectRatio, imagePath, requestedDuration, movieFile);
|
|
48
138
|
}
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
const imageBytes = buffer.toString("base64");
|
|
52
|
-
payload.image = {
|
|
53
|
-
imageBytes,
|
|
54
|
-
mimeType: "image/png",
|
|
55
|
-
};
|
|
56
|
-
}
|
|
57
|
-
else {
|
|
58
|
-
payload.config.personGeneration = PersonGeneration.ALLOW_ALL;
|
|
59
|
-
}
|
|
60
|
-
const operation = await ai.models.generateVideos(payload);
|
|
61
|
-
const response = { operation };
|
|
62
|
-
// Poll the operation status until the video is ready.
|
|
63
|
-
while (!response.operation.done) {
|
|
64
|
-
await sleep(5000);
|
|
65
|
-
response.operation = await ai.operations.getVideosOperation(response);
|
|
66
|
-
}
|
|
67
|
-
if (!response.operation.response?.generatedVideos) {
|
|
68
|
-
throw new Error(`No video: ${JSON.stringify(response.operation, null, 2)}`, {
|
|
69
|
-
cause: agentInvalidResponseError("movieGenAIAgent", imageAction, movieFileTarget),
|
|
70
|
-
});
|
|
71
|
-
}
|
|
72
|
-
const video = response.operation.response.generatedVideos[0].video;
|
|
73
|
-
if (!video) {
|
|
74
|
-
throw new Error(`No video: ${JSON.stringify(response.operation, null, 2)}`, {
|
|
75
|
-
cause: agentInvalidResponseError("movieGenAIAgent", imageAction, movieFileTarget),
|
|
76
|
-
});
|
|
77
|
-
}
|
|
78
|
-
await ai.files.download({
|
|
79
|
-
file: video,
|
|
80
|
-
downloadPath: movieFile,
|
|
81
|
-
});
|
|
82
|
-
await sleep(5000); // HACK: Without this, the file is not ready yet.
|
|
83
|
-
return { saved: movieFile };
|
|
139
|
+
// Standard mode
|
|
140
|
+
return generateStandardVideo(ai, model, prompt, aspectRatio, imagePath, duration, movieFile);
|
|
84
141
|
}
|
|
85
142
|
catch (error) {
|
|
86
143
|
GraphAILogger.info("Failed to generate movie:", error.message);
|
package/package.json
CHANGED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$mulmocast": { "version": "1.1" },
|
|
3
|
+
"imageParams": {
|
|
4
|
+
"provider": "google",
|
|
5
|
+
"style": "<style>Photo realistic.</style>"
|
|
6
|
+
},
|
|
7
|
+
"canvasSize": {
|
|
8
|
+
"width": 720,
|
|
9
|
+
"height": 1280
|
|
10
|
+
},
|
|
11
|
+
"movieParams": {
|
|
12
|
+
"provider": "google"
|
|
13
|
+
},
|
|
14
|
+
"lang": "en",
|
|
15
|
+
"beats": [
|
|
16
|
+
{
|
|
17
|
+
"id": "gemini_3_pro_image_preview",
|
|
18
|
+
"text": "image generated by gemini-3-pro-image-preview",
|
|
19
|
+
"imagePrompt": "a woman is walking through a busy Tokyo street at night, she is wearing dark sunglasses",
|
|
20
|
+
"imageParams": {
|
|
21
|
+
"model": "gemini-3-pro-image-preview"
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
]
|
|
25
|
+
}
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$mulmocast": { "version": "1.1" },
|
|
3
|
+
"imageParams": {
|
|
4
|
+
"provider": "google",
|
|
5
|
+
"style": "<style>Photo realistic.</style>"
|
|
6
|
+
},
|
|
7
|
+
"movieParams": {
|
|
8
|
+
"provider": "google"
|
|
9
|
+
},
|
|
10
|
+
"lang": "en",
|
|
11
|
+
"beats": [
|
|
12
|
+
{
|
|
13
|
+
"id": "gemini_3_pro_image_preview",
|
|
14
|
+
"text": "image generated by gemini-3-pro-image-preview",
|
|
15
|
+
"imagePrompt": "a woman is walking through a busy Tokyo street at night, she is wearing dark sunglasses",
|
|
16
|
+
"imageParams": {
|
|
17
|
+
"model": "gemini-3-pro-image-preview"
|
|
18
|
+
}
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
"id": "gemini_2_5_flash_image",
|
|
22
|
+
"text": "image generated by gemini-2.5-flash-image",
|
|
23
|
+
"imagePrompt": "a woman is walking through a busy Tokyo street at night, she is wearing dark sunglasses",
|
|
24
|
+
"imageParams": {
|
|
25
|
+
"model": "gemini-2.5-flash-image"
|
|
26
|
+
}
|
|
27
|
+
},
|
|
28
|
+
{
|
|
29
|
+
"id": "imagen_4",
|
|
30
|
+
"text": "image generated by imagen-4",
|
|
31
|
+
"imagePrompt": "a woman is walking through a busy Tokyo street at night, she is wearing dark sunglasses"
|
|
32
|
+
},
|
|
33
|
+
{
|
|
34
|
+
"id": "imagen_4_ultra",
|
|
35
|
+
"text": "image generated by imagen-4",
|
|
36
|
+
"imagePrompt": "a woman is walking through a busy Tokyo street at night, she is wearing dark sunglasses",
|
|
37
|
+
"imageParams": {
|
|
38
|
+
"model": "imagen-4.0-ultra-generate-preview-06-06"
|
|
39
|
+
}
|
|
40
|
+
},
|
|
41
|
+
{
|
|
42
|
+
"id": "genai_veo2",
|
|
43
|
+
"text": "movie generated by veo2",
|
|
44
|
+
"duration": 5,
|
|
45
|
+
"moviePrompt": "a woman is walking through a busy Tokyo street at night, she is wearing dark sunglasses"
|
|
46
|
+
},
|
|
47
|
+
{
|
|
48
|
+
"id": "genai_veo2_image",
|
|
49
|
+
"text": "movie generated by veo2 with image",
|
|
50
|
+
"duration": 5,
|
|
51
|
+
"imagePrompt": "a woman is walking through a busy Tokyo street at night, she is wearing dark sunglasses",
|
|
52
|
+
"moviePrompt": "a woman takes a selfie with her phone"
|
|
53
|
+
},
|
|
54
|
+
{
|
|
55
|
+
"id": "genai_veo3",
|
|
56
|
+
"text": "movie generated by veo3",
|
|
57
|
+
"moviePrompt": "a woman is walking through a busy Tokyo street at night, she is wearing dark sunglasses",
|
|
58
|
+
"movieParams": {
|
|
59
|
+
"model": "veo-3.0-generate-001"
|
|
60
|
+
}
|
|
61
|
+
},
|
|
62
|
+
{
|
|
63
|
+
"id": "genai_veo3_1",
|
|
64
|
+
"text": "movie generated by veo3_1",
|
|
65
|
+
"moviePrompt": "a woman is walking through a busy Tokyo street at night, she is wearing dark sunglasses",
|
|
66
|
+
"movieParams": {
|
|
67
|
+
"model": "veo-3.1-generate-preview"
|
|
68
|
+
}
|
|
69
|
+
},
|
|
70
|
+
{
|
|
71
|
+
"id": "genai_veo3_image",
|
|
72
|
+
"text": "movie generated by veo3",
|
|
73
|
+
"duration": 5,
|
|
74
|
+
"imagePrompt": "a woman is walking through a busy Tokyo street at night, she is wearing dark sunglasses",
|
|
75
|
+
"imageParams": {
|
|
76
|
+
"model": "gemini-2.5-flash-image"
|
|
77
|
+
},
|
|
78
|
+
"moviePrompt": "a woman is walking through a busy Tokyo street at night, she is wearing dark sunglasses",
|
|
79
|
+
"movieParams": {
|
|
80
|
+
"model": "veo-3.0-generate-001"
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
]
|
|
84
|
+
}
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$mulmocast": { "version": "1.1" },
|
|
3
|
+
"imageParams": {
|
|
4
|
+
"provider": "google",
|
|
5
|
+
"style": "<style>Photo realistic.</style>"
|
|
6
|
+
},
|
|
7
|
+
"canvasSize": {
|
|
8
|
+
"width": 720,
|
|
9
|
+
"height": 1280
|
|
10
|
+
},
|
|
11
|
+
"lang": "en",
|
|
12
|
+
"movieParams": {
|
|
13
|
+
"provider": "google",
|
|
14
|
+
"model": "veo-3.1-generate-preview"
|
|
15
|
+
},
|
|
16
|
+
"beats": [
|
|
17
|
+
{
|
|
18
|
+
"moviePrompt": "A butterfly flying in slow motion",
|
|
19
|
+
"duration": 20
|
|
20
|
+
},
|
|
21
|
+
{
|
|
22
|
+
"moviePrompt": "Running Tiger",
|
|
23
|
+
"duration": 25
|
|
24
|
+
}
|
|
25
|
+
]
|
|
26
|
+
}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$mulmocast": { "version": "1.1" },
|
|
3
|
+
"imageParams": {
|
|
4
|
+
"provider": "google",
|
|
5
|
+
"style": "<style>Photo realistic.</style>"
|
|
6
|
+
},
|
|
7
|
+
"canvasSize": {
|
|
8
|
+
"width": 720,
|
|
9
|
+
"height": 1280
|
|
10
|
+
},
|
|
11
|
+
"lang": "en",
|
|
12
|
+
"movieParams": {
|
|
13
|
+
"provider": "google",
|
|
14
|
+
"model": "veo-3.1-generate-preview"
|
|
15
|
+
},
|
|
16
|
+
"beats": [
|
|
17
|
+
{
|
|
18
|
+
"moviePrompt": "A butterfly flying in slow motion",
|
|
19
|
+
"duration": 20
|
|
20
|
+
}
|
|
21
|
+
]
|
|
22
|
+
}
|