mulmocast 1.2.2 → 1.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/assets/templates/ani.json +8 -3
- package/assets/templates/html.json +0 -1
- package/lib/actions/audio.js +1 -0
- package/lib/actions/captions.js +2 -2
- package/lib/actions/image_agents.js +3 -3
- package/lib/actions/images.js +5 -0
- package/lib/actions/translate.js +2 -2
- package/lib/data/bgmAssets.d.ts +18 -0
- package/lib/data/bgmAssets.js +101 -0
- package/lib/data/index.d.ts +1 -0
- package/lib/data/index.js +1 -0
- package/lib/data/promptTemplates.d.ts +13 -74
- package/lib/data/promptTemplates.js +7 -110
- package/lib/data/scriptTemplates.d.ts +1 -1
- package/lib/data/scriptTemplates.js +1 -0
- package/lib/data/templateDataSet.d.ts +0 -2
- package/lib/data/templateDataSet.js +1 -9
- package/lib/methods/mulmo_studio_context.d.ts +1 -1
- package/lib/methods/mulmo_studio_context.js +9 -8
- package/lib/types/schema.d.ts +45 -45
- package/lib/types/schema.js +9 -9
- package/lib/types/type.d.ts +1 -1
- package/lib/utils/filters.js +3 -3
- package/package.json +1 -1
- package/scripts/templates/image_prompt_only_template.json +1 -0
- package/assets/templates/ghibli_image_only.json +0 -28
- package/assets/templates/ghibli_shorts.json +0 -33
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
{
|
|
2
|
-
"title": "Presentation
|
|
2
|
+
"title": "Presentation by Ani",
|
|
3
3
|
"description": "Template for presentation with Ani.",
|
|
4
4
|
"systemPrompt": "言葉づかいは思いっきりツンデレにして。Another AI will generate comic for each beat based on the image prompt of that beat. You don't need to specify the style of the image, just describe the scene. Mention the reference in one of beats, if it exists. Use the JSON below as a template. Create appropriate amount of beats, and make sure the beats are coherent and flow well.",
|
|
5
5
|
"presentationStyle": {
|
|
@@ -16,7 +16,13 @@
|
|
|
16
16
|
"speakers": {
|
|
17
17
|
"Presenter": {
|
|
18
18
|
"voiceId": "shimmer",
|
|
19
|
-
"speechOptions": { "instruction": "Speak in a slightly high-pitched, curt tone with sudden flustered shifts—like a tsundere anime girl." }
|
|
19
|
+
"speechOptions": { "instruction": "Speak in a slightly high-pitched, curt tone with sudden flustered shifts—like a tsundere anime girl." },
|
|
20
|
+
"lang": {
|
|
21
|
+
"ja": {
|
|
22
|
+
"provider": "nijivoice",
|
|
23
|
+
"voiceId": "9d9ed276-49ee-443a-bc19-26e6136d05f0"
|
|
24
|
+
}
|
|
25
|
+
}
|
|
20
26
|
}
|
|
21
27
|
}
|
|
22
28
|
},
|
|
@@ -26,7 +32,6 @@
|
|
|
26
32
|
"url": "https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/morning001.mp3"
|
|
27
33
|
}
|
|
28
34
|
},
|
|
29
|
-
"lang": "en",
|
|
30
35
|
"canvasSize": {
|
|
31
36
|
"width": 1024,
|
|
32
37
|
"height": 1536
|
package/lib/actions/audio.js
CHANGED
package/lib/actions/captions.js
CHANGED
|
@@ -26,7 +26,7 @@ const graph_data = {
|
|
|
26
26
|
agent: async (namedInputs) => {
|
|
27
27
|
const { beat, context, index } = namedInputs;
|
|
28
28
|
try {
|
|
29
|
-
MulmoStudioContextMethods.setBeatSessionState(context, "caption", index, true);
|
|
29
|
+
MulmoStudioContextMethods.setBeatSessionState(context, "caption", index, beat.id, true);
|
|
30
30
|
const captionParams = mulmoCaptionParamsSchema.parse({ ...context.studio.script.captionParams, ...beat.captionParams });
|
|
31
31
|
const canvasSize = MulmoPresentationStyleMethods.getCanvasSize(context.presentationStyle);
|
|
32
32
|
const imagePath = getCaptionImagePath(context, index);
|
|
@@ -46,7 +46,7 @@ const graph_data = {
|
|
|
46
46
|
return imagePath;
|
|
47
47
|
}
|
|
48
48
|
finally {
|
|
49
|
-
MulmoStudioContextMethods.setBeatSessionState(context, "caption", index, false);
|
|
49
|
+
MulmoStudioContextMethods.setBeatSessionState(context, "caption", index, beat.id, false);
|
|
50
50
|
}
|
|
51
51
|
},
|
|
52
52
|
inputs: {
|
|
@@ -64,13 +64,13 @@ export const imagePluginAgent = async (namedInputs) => {
|
|
|
64
64
|
const imagePath = getBeatPngImagePath(context, index);
|
|
65
65
|
const plugin = MulmoBeatMethods.getPlugin(beat);
|
|
66
66
|
try {
|
|
67
|
-
MulmoStudioContextMethods.setBeatSessionState(context, "image", index, true);
|
|
67
|
+
MulmoStudioContextMethods.setBeatSessionState(context, "image", index, beat.id, true);
|
|
68
68
|
const processorParams = { beat, context, imagePath, ...htmlStyle(context, beat) };
|
|
69
69
|
await plugin.process(processorParams);
|
|
70
|
-
MulmoStudioContextMethods.setBeatSessionState(context, "image", index, false);
|
|
70
|
+
MulmoStudioContextMethods.setBeatSessionState(context, "image", index, beat.id, false);
|
|
71
71
|
}
|
|
72
72
|
catch (error) {
|
|
73
|
-
MulmoStudioContextMethods.setBeatSessionState(context, "image", index, false);
|
|
73
|
+
MulmoStudioContextMethods.setBeatSessionState(context, "image", index, beat.id, false);
|
|
74
74
|
throw error;
|
|
75
75
|
}
|
|
76
76
|
};
|
package/lib/actions/images.js
CHANGED
|
@@ -88,6 +88,7 @@ const beat_graph_data = {
|
|
|
88
88
|
force: [":context.force", ":forceImage"],
|
|
89
89
|
file: ":preprocessor.htmlPath",
|
|
90
90
|
index: ":__mapIndex",
|
|
91
|
+
id: ":beat.id",
|
|
91
92
|
mulmoContext: ":context",
|
|
92
93
|
sessionType: "html",
|
|
93
94
|
},
|
|
@@ -130,6 +131,7 @@ const beat_graph_data = {
|
|
|
130
131
|
force: [":context.force", ":forceImage"],
|
|
131
132
|
file: ":preprocessor.imagePath",
|
|
132
133
|
index: ":__mapIndex",
|
|
134
|
+
id: ":beat.id",
|
|
133
135
|
mulmoContext: ":context",
|
|
134
136
|
sessionType: "image",
|
|
135
137
|
},
|
|
@@ -155,6 +157,7 @@ const beat_graph_data = {
|
|
|
155
157
|
force: [":context.force", ":forceMovie"],
|
|
156
158
|
file: ":preprocessor.movieFile",
|
|
157
159
|
index: ":__mapIndex",
|
|
160
|
+
id: ":beat.id",
|
|
158
161
|
sessionType: "movie",
|
|
159
162
|
mulmoContext: ":context",
|
|
160
163
|
},
|
|
@@ -214,6 +217,7 @@ const beat_graph_data = {
|
|
|
214
217
|
force: [":context.force"],
|
|
215
218
|
file: ":preprocessor.soundEffectFile",
|
|
216
219
|
index: ":__mapIndex",
|
|
220
|
+
id: ":beat.id",
|
|
217
221
|
sessionType: "soundEffect",
|
|
218
222
|
mulmoContext: ":context",
|
|
219
223
|
},
|
|
@@ -237,6 +241,7 @@ const beat_graph_data = {
|
|
|
237
241
|
force: [":context.force"],
|
|
238
242
|
file: ":preprocessor.lipSyncFile",
|
|
239
243
|
index: ":__mapIndex",
|
|
244
|
+
id: ":beat.id",
|
|
240
245
|
sessionType: "lipSync",
|
|
241
246
|
mulmoContext: ":context",
|
|
242
247
|
},
|
package/lib/actions/translate.js
CHANGED
|
@@ -184,11 +184,11 @@ const localizedTextCacheAgentFilter = async (context, next) => {
|
|
|
184
184
|
return { text: multiLingual.multiLingualTexts[targetLang].text };
|
|
185
185
|
}
|
|
186
186
|
try {
|
|
187
|
-
MulmoStudioContextMethods.setBeatSessionState(mulmoContext, "multiLingual", beatIndex, true);
|
|
187
|
+
MulmoStudioContextMethods.setBeatSessionState(mulmoContext, "multiLingual", beatIndex, beat.id, true);
|
|
188
188
|
return await next(context);
|
|
189
189
|
}
|
|
190
190
|
finally {
|
|
191
|
-
MulmoStudioContextMethods.setBeatSessionState(mulmoContext, "multiLingual", beatIndex, false);
|
|
191
|
+
MulmoStudioContextMethods.setBeatSessionState(mulmoContext, "multiLingual", beatIndex, beat.id, false);
|
|
192
192
|
}
|
|
193
193
|
};
|
|
194
194
|
const agentFilters = [
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
type BgmAsset = {
|
|
2
|
+
name: string;
|
|
3
|
+
title: string;
|
|
4
|
+
url: string;
|
|
5
|
+
suno_url: string;
|
|
6
|
+
date: string;
|
|
7
|
+
duration: string;
|
|
8
|
+
account: string;
|
|
9
|
+
original_license: string;
|
|
10
|
+
prompt: string;
|
|
11
|
+
model: string;
|
|
12
|
+
};
|
|
13
|
+
export type BgmAssets = {
|
|
14
|
+
license: string;
|
|
15
|
+
bgms: BgmAsset[];
|
|
16
|
+
};
|
|
17
|
+
export declare const bgmAssets: BgmAssets;
|
|
18
|
+
export {};
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
export const bgmAssets = {
|
|
2
|
+
license: "Free to distribute as the BMG of media generated by MulmoCast, including commercial use.",
|
|
3
|
+
bgms: [
|
|
4
|
+
{
|
|
5
|
+
name: "story001.mp3",
|
|
6
|
+
title: "Whispered Melody",
|
|
7
|
+
url: "https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/story001.mp3",
|
|
8
|
+
suno_url: "https://suno.com/s/v6zer50aQJu8Y0qA",
|
|
9
|
+
date: "2025-06-17",
|
|
10
|
+
duration: "03:17",
|
|
11
|
+
account: "@snakajima",
|
|
12
|
+
original_license: "Generated by Suno with commercial use rights (PRO Plan)",
|
|
13
|
+
prompt: "instrumental, smooth, piano",
|
|
14
|
+
model: "v4.5 beta",
|
|
15
|
+
},
|
|
16
|
+
{
|
|
17
|
+
name: "story002.mp3",
|
|
18
|
+
title: "Rise and Shine",
|
|
19
|
+
url: "https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/story002.mp3",
|
|
20
|
+
suno_url: "https://suno.com/s/mJnvyu3UXnkdAPfQ",
|
|
21
|
+
date: "2025-06-17",
|
|
22
|
+
duration: "04:04",
|
|
23
|
+
account: "@snakajima",
|
|
24
|
+
original_license: "Generated by Suno with commercial use rights (PRO Plan)",
|
|
25
|
+
prompt: "techno, inspiring, piano",
|
|
26
|
+
model: "v4.5 beta",
|
|
27
|
+
},
|
|
28
|
+
{
|
|
29
|
+
name: "story003.mp3",
|
|
30
|
+
title: "Chasing the Sunset",
|
|
31
|
+
url: "https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/story003.mp3",
|
|
32
|
+
suno_url: "https://suno.com/s/2zGjMQ9vURJbaMZA",
|
|
33
|
+
date: "2025-06-17",
|
|
34
|
+
duration: "02:49",
|
|
35
|
+
account: "@snakajima",
|
|
36
|
+
original_license: "Generated by Suno with commercial use rights (PRO Plan)",
|
|
37
|
+
prompt: "piano, inspiring, sunset",
|
|
38
|
+
model: "v4.5 beta",
|
|
39
|
+
},
|
|
40
|
+
{
|
|
41
|
+
name: "story004.mp3",
|
|
42
|
+
title: "Whispering Keys",
|
|
43
|
+
url: "https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/story004.mp3",
|
|
44
|
+
suno_url: "https://suno.com/s/0SFoBRsBWsncw6tu",
|
|
45
|
+
date: "2025-06-17",
|
|
46
|
+
duration: "04:00",
|
|
47
|
+
account: "@snakajima",
|
|
48
|
+
original_license: "Generated by Suno with commercial use rights (PRO Plan)",
|
|
49
|
+
prompt: "Piano, classical, ambient",
|
|
50
|
+
model: "v4",
|
|
51
|
+
},
|
|
52
|
+
{
|
|
53
|
+
name: "story005.mp3",
|
|
54
|
+
title: "Whisper of Ivory",
|
|
55
|
+
url: "https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/story005.mp3",
|
|
56
|
+
suno_url: "https://suno.com/s/0SFoBRsBWsncw6tu",
|
|
57
|
+
date: "2025-06-17",
|
|
58
|
+
duration: "04:00",
|
|
59
|
+
account: "@snakajima",
|
|
60
|
+
original_license: "Generated by Suno with commercial use rights (PRO Plan)",
|
|
61
|
+
prompt: "Piano solo, classical, ambient",
|
|
62
|
+
model: "v4",
|
|
63
|
+
},
|
|
64
|
+
{
|
|
65
|
+
name: "theme001.mp3",
|
|
66
|
+
title: "Rise of the Flame",
|
|
67
|
+
url: "https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/theme001.mp3",
|
|
68
|
+
suno_url: "https://suno.com/s/WhYOf8oJYhBgSKET",
|
|
69
|
+
date: "2025-06-20",
|
|
70
|
+
duration: "03:23",
|
|
71
|
+
account: "@snakajima",
|
|
72
|
+
original_license: "Generated by Suno with commercial use rights (PRO Plan)",
|
|
73
|
+
prompt: "Olympic Theme, classical, emotional",
|
|
74
|
+
model: "v4",
|
|
75
|
+
},
|
|
76
|
+
{
|
|
77
|
+
name: "olympic001.mp3",
|
|
78
|
+
title: "Olympic-style Theme Music",
|
|
79
|
+
url: "https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/olympic001.mp3",
|
|
80
|
+
suno_url: "https://suno.com/s/32wpnmCrkFVvkTSQ",
|
|
81
|
+
date: "2025-07-17",
|
|
82
|
+
duration: "02:54",
|
|
83
|
+
account: "@snakajima",
|
|
84
|
+
original_license: "Generated by Suno with commercial use rights (PRO Plan)",
|
|
85
|
+
prompt: "Epic orchestral fanfare in the style of John Williams' Olympic Fanfare and Theme. Bright brass fanfare, soaring strings, powerful percussion, and heroic French horn melodies. Triumphant and majestic mood, suitable for an opening ceremony or national celebration. Emphasize dynamic builds, rich harmonies, and cinematic grandeur.",
|
|
86
|
+
model: "v4.5+",
|
|
87
|
+
},
|
|
88
|
+
{
|
|
89
|
+
name: "morning001.mp3",
|
|
90
|
+
title: "Morning Dance",
|
|
91
|
+
url: "https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/morning001.mp3",
|
|
92
|
+
suno_url: "https://suno.com/s/9MTkutZYqxeyBlwK",
|
|
93
|
+
date: "2025-07-17",
|
|
94
|
+
duration: "03:52",
|
|
95
|
+
account: "@snakajima",
|
|
96
|
+
original_license: "morning, piano solo, Japanese name, sexy",
|
|
97
|
+
prompt: "morning, piano solo, Japanese name, sexy",
|
|
98
|
+
model: "v4.5+",
|
|
99
|
+
},
|
|
100
|
+
],
|
|
101
|
+
};
|
package/lib/data/index.d.ts
CHANGED
package/lib/data/index.js
CHANGED
|
@@ -50,8 +50,8 @@ export declare const promptTemplates: ({
|
|
|
50
50
|
en: string;
|
|
51
51
|
};
|
|
52
52
|
voiceId: string;
|
|
53
|
+
lang?: undefined;
|
|
53
54
|
speechOptions?: undefined;
|
|
54
|
-
provider?: undefined;
|
|
55
55
|
};
|
|
56
56
|
Announcer?: undefined;
|
|
57
57
|
Student?: undefined;
|
|
@@ -113,13 +113,17 @@ export declare const promptTemplates: ({
|
|
|
113
113
|
speechParams: {
|
|
114
114
|
speakers: {
|
|
115
115
|
Presenter: {
|
|
116
|
+
lang: {
|
|
117
|
+
ja: {
|
|
118
|
+
provider: string;
|
|
119
|
+
voiceId: string;
|
|
120
|
+
};
|
|
121
|
+
};
|
|
116
122
|
speechOptions: {
|
|
117
123
|
instruction: string;
|
|
118
|
-
speed?: undefined;
|
|
119
124
|
};
|
|
120
125
|
voiceId: string;
|
|
121
126
|
displayName?: undefined;
|
|
122
|
-
provider?: undefined;
|
|
123
127
|
};
|
|
124
128
|
Announcer?: undefined;
|
|
125
129
|
Student?: undefined;
|
|
@@ -183,8 +187,8 @@ export declare const promptTemplates: ({
|
|
|
183
187
|
en: string;
|
|
184
188
|
};
|
|
185
189
|
voiceId: string;
|
|
190
|
+
lang?: undefined;
|
|
186
191
|
speechOptions?: undefined;
|
|
187
|
-
provider?: undefined;
|
|
188
192
|
};
|
|
189
193
|
Announcer?: undefined;
|
|
190
194
|
Student?: undefined;
|
|
@@ -236,8 +240,8 @@ export declare const promptTemplates: ({
|
|
|
236
240
|
en: string;
|
|
237
241
|
};
|
|
238
242
|
voiceId: string;
|
|
243
|
+
lang?: undefined;
|
|
239
244
|
speechOptions?: undefined;
|
|
240
|
-
provider?: undefined;
|
|
241
245
|
};
|
|
242
246
|
Announcer?: undefined;
|
|
243
247
|
Student?: undefined;
|
|
@@ -300,73 +304,8 @@ export declare const promptTemplates: ({
|
|
|
300
304
|
en: string;
|
|
301
305
|
};
|
|
302
306
|
voiceId: string;
|
|
307
|
+
lang?: undefined;
|
|
303
308
|
speechOptions?: undefined;
|
|
304
|
-
provider?: undefined;
|
|
305
|
-
};
|
|
306
|
-
Announcer?: undefined;
|
|
307
|
-
Student?: undefined;
|
|
308
|
-
Teacher?: undefined;
|
|
309
|
-
};
|
|
310
|
-
};
|
|
311
|
-
};
|
|
312
|
-
scriptName: string;
|
|
313
|
-
systemPrompt: string;
|
|
314
|
-
title: string;
|
|
315
|
-
} | {
|
|
316
|
-
description: string;
|
|
317
|
-
filename: string;
|
|
318
|
-
presentationStyle: {
|
|
319
|
-
$mulmocast: {
|
|
320
|
-
credit: string;
|
|
321
|
-
version: string;
|
|
322
|
-
};
|
|
323
|
-
audioParams: {
|
|
324
|
-
audioVolume: number;
|
|
325
|
-
bgmVolume: number;
|
|
326
|
-
closingPadding: number;
|
|
327
|
-
introPadding: number;
|
|
328
|
-
outroPadding: number;
|
|
329
|
-
padding: number;
|
|
330
|
-
suppressSpeech: boolean;
|
|
331
|
-
bgm?: undefined;
|
|
332
|
-
};
|
|
333
|
-
canvasSize: {
|
|
334
|
-
height: number;
|
|
335
|
-
width: number;
|
|
336
|
-
};
|
|
337
|
-
imageParams: {
|
|
338
|
-
images: {
|
|
339
|
-
presenter: {
|
|
340
|
-
source: {
|
|
341
|
-
kind: string;
|
|
342
|
-
url: string;
|
|
343
|
-
};
|
|
344
|
-
type: string;
|
|
345
|
-
};
|
|
346
|
-
girl?: undefined;
|
|
347
|
-
ani?: undefined;
|
|
348
|
-
optimus?: undefined;
|
|
349
|
-
};
|
|
350
|
-
style: string;
|
|
351
|
-
provider?: undefined;
|
|
352
|
-
};
|
|
353
|
-
movieParams: {
|
|
354
|
-
provider: string;
|
|
355
|
-
model?: undefined;
|
|
356
|
-
};
|
|
357
|
-
soundEffectParams: {
|
|
358
|
-
provider: string;
|
|
359
|
-
};
|
|
360
|
-
speechParams: {
|
|
361
|
-
speakers: {
|
|
362
|
-
Presenter: {
|
|
363
|
-
provider: string;
|
|
364
|
-
speechOptions: {
|
|
365
|
-
speed: number;
|
|
366
|
-
instruction?: undefined;
|
|
367
|
-
};
|
|
368
|
-
voiceId: string;
|
|
369
|
-
displayName?: undefined;
|
|
370
309
|
};
|
|
371
310
|
Announcer?: undefined;
|
|
372
311
|
Student?: undefined;
|
|
@@ -435,8 +374,8 @@ export declare const promptTemplates: ({
|
|
|
435
374
|
en: string;
|
|
436
375
|
};
|
|
437
376
|
voiceId: string;
|
|
377
|
+
lang?: undefined;
|
|
438
378
|
speechOptions?: undefined;
|
|
439
|
-
provider?: undefined;
|
|
440
379
|
};
|
|
441
380
|
Announcer?: undefined;
|
|
442
381
|
Student?: undefined;
|
|
@@ -552,8 +491,8 @@ export declare const promptTemplates: ({
|
|
|
552
491
|
en: string;
|
|
553
492
|
};
|
|
554
493
|
voiceId: string;
|
|
494
|
+
lang?: undefined;
|
|
555
495
|
speechOptions?: undefined;
|
|
556
|
-
provider?: undefined;
|
|
557
496
|
};
|
|
558
497
|
Announcer?: undefined;
|
|
559
498
|
Student?: undefined;
|
|
@@ -608,8 +547,8 @@ export declare const promptTemplates: ({
|
|
|
608
547
|
en: string;
|
|
609
548
|
};
|
|
610
549
|
voiceId: string;
|
|
550
|
+
lang?: undefined;
|
|
611
551
|
speechOptions?: undefined;
|
|
612
|
-
provider?: undefined;
|
|
613
552
|
};
|
|
614
553
|
Announcer?: undefined;
|
|
615
554
|
Student?: undefined;
|
|
@@ -100,6 +100,12 @@ export const promptTemplates = [
|
|
|
100
100
|
speechParams: {
|
|
101
101
|
speakers: {
|
|
102
102
|
Presenter: {
|
|
103
|
+
lang: {
|
|
104
|
+
ja: {
|
|
105
|
+
provider: "nijivoice",
|
|
106
|
+
voiceId: "9d9ed276-49ee-443a-bc19-26e6136d05f0",
|
|
107
|
+
},
|
|
108
|
+
},
|
|
103
109
|
speechOptions: {
|
|
104
110
|
instruction: "Speak in a slightly high-pitched, curt tone with sudden flustered shifts—like a tsundere anime girl.",
|
|
105
111
|
},
|
|
@@ -110,7 +116,7 @@ export const promptTemplates = [
|
|
|
110
116
|
},
|
|
111
117
|
scriptName: "image_prompts_template.json",
|
|
112
118
|
systemPrompt: "言葉づかいは思いっきりツンデレにして。Another AI will generate comic for each beat based on the image prompt of that beat. You don't need to specify the style of the image, just describe the scene. Mention the reference in one of beats, if it exists. Use the JSON below as a template. Create appropriate amount of beats, and make sure the beats are coherent and flow well.",
|
|
113
|
-
title: "Presentation
|
|
119
|
+
title: "Presentation by Ani",
|
|
114
120
|
},
|
|
115
121
|
{
|
|
116
122
|
description: "Template for business presentation.",
|
|
@@ -370,115 +376,6 @@ export const promptTemplates = [
|
|
|
370
376
|
systemPrompt: "Another AI will generate comic strips for each beat based on the text description of that beat. Mention the reference in one of beats, if it exists. Use the JSON below as a template.",
|
|
371
377
|
title: "Ghibli comic style",
|
|
372
378
|
},
|
|
373
|
-
{
|
|
374
|
-
description: "Template for Ghibli-style image-only comic presentation.",
|
|
375
|
-
filename: "ghibli_image_only",
|
|
376
|
-
presentationStyle: {
|
|
377
|
-
$mulmocast: {
|
|
378
|
-
credit: "closing",
|
|
379
|
-
version: "1.1",
|
|
380
|
-
},
|
|
381
|
-
audioParams: {
|
|
382
|
-
audioVolume: 1,
|
|
383
|
-
bgmVolume: 0.2,
|
|
384
|
-
closingPadding: 0.8,
|
|
385
|
-
introPadding: 1,
|
|
386
|
-
outroPadding: 1,
|
|
387
|
-
padding: 0.3,
|
|
388
|
-
suppressSpeech: false,
|
|
389
|
-
},
|
|
390
|
-
canvasSize: {
|
|
391
|
-
height: 1024,
|
|
392
|
-
width: 1536,
|
|
393
|
-
},
|
|
394
|
-
imageParams: {
|
|
395
|
-
images: {
|
|
396
|
-
presenter: {
|
|
397
|
-
source: {
|
|
398
|
-
kind: "url",
|
|
399
|
-
url: "https://raw.githubusercontent.com/receptron/mulmocast-media/refs/heads/main/characters/ghibli_presenter.png",
|
|
400
|
-
},
|
|
401
|
-
type: "image",
|
|
402
|
-
},
|
|
403
|
-
},
|
|
404
|
-
style: "<style>Ghibli style</style>",
|
|
405
|
-
},
|
|
406
|
-
movieParams: {
|
|
407
|
-
provider: "replicate",
|
|
408
|
-
},
|
|
409
|
-
soundEffectParams: {
|
|
410
|
-
provider: "replicate",
|
|
411
|
-
},
|
|
412
|
-
speechParams: {
|
|
413
|
-
speakers: {
|
|
414
|
-
Presenter: {
|
|
415
|
-
displayName: {
|
|
416
|
-
en: "Presenter",
|
|
417
|
-
},
|
|
418
|
-
voiceId: "shimmer",
|
|
419
|
-
},
|
|
420
|
-
},
|
|
421
|
-
},
|
|
422
|
-
},
|
|
423
|
-
scriptName: "image_prompt_only_template.json",
|
|
424
|
-
systemPrompt: "Another AI will generate an image for each beat based on the text description of that beat. Use the JSON below as a template.",
|
|
425
|
-
title: "Ghibli comic image-only",
|
|
426
|
-
},
|
|
427
|
-
{
|
|
428
|
-
description: "Template for Ghibli-style comic presentation.",
|
|
429
|
-
filename: "ghibli_shorts",
|
|
430
|
-
presentationStyle: {
|
|
431
|
-
$mulmocast: {
|
|
432
|
-
credit: "closing",
|
|
433
|
-
version: "1.1",
|
|
434
|
-
},
|
|
435
|
-
audioParams: {
|
|
436
|
-
audioVolume: 1,
|
|
437
|
-
bgmVolume: 0.2,
|
|
438
|
-
closingPadding: 0.8,
|
|
439
|
-
introPadding: 1,
|
|
440
|
-
outroPadding: 1,
|
|
441
|
-
padding: 0.3,
|
|
442
|
-
suppressSpeech: false,
|
|
443
|
-
},
|
|
444
|
-
canvasSize: {
|
|
445
|
-
height: 1536,
|
|
446
|
-
width: 1024,
|
|
447
|
-
},
|
|
448
|
-
imageParams: {
|
|
449
|
-
images: {
|
|
450
|
-
presenter: {
|
|
451
|
-
source: {
|
|
452
|
-
kind: "url",
|
|
453
|
-
url: "https://raw.githubusercontent.com/receptron/mulmocast-media/refs/heads/main/characters/ghibli_presenter.jpg",
|
|
454
|
-
},
|
|
455
|
-
type: "image",
|
|
456
|
-
},
|
|
457
|
-
},
|
|
458
|
-
style: "<style>Ghibli style</style>",
|
|
459
|
-
},
|
|
460
|
-
movieParams: {
|
|
461
|
-
provider: "replicate",
|
|
462
|
-
},
|
|
463
|
-
soundEffectParams: {
|
|
464
|
-
provider: "replicate",
|
|
465
|
-
},
|
|
466
|
-
speechParams: {
|
|
467
|
-
speakers: {
|
|
468
|
-
Presenter: {
|
|
469
|
-
provider: "nijivoice",
|
|
470
|
-
speechOptions: {
|
|
471
|
-
speed: 1.5,
|
|
472
|
-
},
|
|
473
|
-
voiceId: "3708ad43-cace-486c-a4ca-8fe41186e20c",
|
|
474
|
-
},
|
|
475
|
-
},
|
|
476
|
-
},
|
|
477
|
-
},
|
|
478
|
-
scriptName: "image_prompts_template.json",
|
|
479
|
-
systemPrompt: "This script is for YouTube shorts. Another AI will generate comic strips for each beat based on the text description of that beat. Mention the reference in one of beats, if it exists. Use the JSON below as a template.",
|
|
480
|
-
title: "Ghibli style for YouTube Shorts",
|
|
481
|
-
},
|
|
482
379
|
{
|
|
483
380
|
description: "Template for Ghost in the shell style comic presentation.",
|
|
484
381
|
filename: "ghost_comic",
|
|
@@ -5,7 +5,7 @@ export const templateDataSet = {
|
|
|
5
5
|
"```",
|
|
6
6
|
ani: "言葉づかいは思いっきりツンデレにして。Another AI will generate comic for each beat based on the image prompt of that beat. You don't need to specify the style of the image, just describe the scene. Mention the reference in one of beats, if it exists. Use the JSON below as a template. Create appropriate amount of beats, and make sure the beats are coherent and flow well.\n" +
|
|
7
7
|
"```JSON\n" +
|
|
8
|
-
`{"$mulmocast":{"version":"1.1","credit":"closing"},"title":"[TITLE: Brief, engaging title for the topic]","lang":"en","references":[{"url":"[SOURCE_URL: URL of the source material]","title":"[SOURCE_TITLE: Title of the referenced article, or paper]","type":"[SOURCE_TYPE: article, paper]"}],"beats":[{"text":"[OPENING_BEAT: Introduce the topic with a hook. Reference the source material and set up why this topic matters. Usually 2-3 sentences that grab attention and provide context.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[MAIN_CONCEPT: Define or explain the core concept/idea. This should be the central focus of your narrative. Keep it clear and accessible.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[SUPPORTING_DETAIL_1: Additional context, examples, or elaboration that helps illustrate the main concept. This could include how it works, why it's important, or real-world applications.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[SUPPORTING_DETAIL_2: Continue with more examples, deeper explanation, or different aspects of the topic if needed.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[ADDITIONAL_BEATS: Add more beats as necessary to fully explore the topic. Complex topics may require 6-10+ beats to cover adequately. Each beat should advance the narrative or provide valuable information.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[CONCLUSION/IMPACT: Wrap up with the significance, implications, or key takeaway. Help the audience understand why this matters to them.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"}],"movieParams":{"provider":"replicate","model":"bytedance/seedance-1-lite"},"speechParams":{"provider":"openai","speakers":{"Presenter":{"voiceId":"shimmer","speechOptions":{"instruction":"Speak in a slightly high-pitched, curt tone with sudden flustered shifts—like a tsundere anime girl."}}}},"audioParams":{"bgm":{"kind":"url","url":"https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/morning001.mp3"}},"canvasSize":{"width":1024,"height":1536},"imageParams":{"style":"<style>A highly polished 2D digital illustration in anime and manga style, featuring clean linework, soft shading, vivid colors, and expressive facial detailing. The composition emphasizes clarity and visual impact with a minimalistic background and a strong character focus. The lighting is even and bright, giving the image a crisp and energetic feel, reminiscent of high-quality character art used in Japanese visual novels or mobile games.</style>","images":{"ani":{"type":"image","source":{"kind":"url","url":"https://raw.githubusercontent.com/receptron/mulmocast-media/refs/heads/main/characters/ani.png"}}}}}\n` +
|
|
8
|
+
`{"$mulmocast":{"version":"1.1","credit":"closing"},"title":"[TITLE: Brief, engaging title for the topic]","lang":"en","references":[{"url":"[SOURCE_URL: URL of the source material]","title":"[SOURCE_TITLE: Title of the referenced article, or paper]","type":"[SOURCE_TYPE: article, paper]"}],"beats":[{"text":"[OPENING_BEAT: Introduce the topic with a hook. Reference the source material and set up why this topic matters. Usually 2-3 sentences that grab attention and provide context.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[MAIN_CONCEPT: Define or explain the core concept/idea. This should be the central focus of your narrative. Keep it clear and accessible.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[SUPPORTING_DETAIL_1: Additional context, examples, or elaboration that helps illustrate the main concept. This could include how it works, why it's important, or real-world applications.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[SUPPORTING_DETAIL_2: Continue with more examples, deeper explanation, or different aspects of the topic if needed.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[ADDITIONAL_BEATS: Add more beats as necessary to fully explore the topic. Complex topics may require 6-10+ beats to cover adequately. Each beat should advance the narrative or provide valuable information.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[CONCLUSION/IMPACT: Wrap up with the significance, implications, or key takeaway. Help the audience understand why this matters to them.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"}],"movieParams":{"provider":"replicate","model":"bytedance/seedance-1-lite"},"speechParams":{"provider":"openai","speakers":{"Presenter":{"voiceId":"shimmer","speechOptions":{"instruction":"Speak in a slightly high-pitched, curt tone with sudden flustered shifts—like a tsundere anime girl."},"lang":{"ja":{"provider":"nijivoice","voiceId":"9d9ed276-49ee-443a-bc19-26e6136d05f0"}}}}},"audioParams":{"bgm":{"kind":"url","url":"https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/morning001.mp3"}},"canvasSize":{"width":1024,"height":1536},"imageParams":{"style":"<style>A highly polished 2D digital illustration in anime and manga style, featuring clean linework, soft shading, vivid colors, and expressive facial detailing. The composition emphasizes clarity and visual impact with a minimalistic background and a strong character focus. The lighting is even and bright, giving the image a crisp and energetic feel, reminiscent of high-quality character art used in Japanese visual novels or mobile games.</style>","images":{"ani":{"type":"image","source":{"kind":"url","url":"https://raw.githubusercontent.com/receptron/mulmocast-media/refs/heads/main/characters/ani.png"}}}}}\n` +
|
|
9
9
|
"```",
|
|
10
10
|
business: "Use textSlides, markdown, mermaid, or chart to show slides. Extract image links in the article (from <img> tag) to reuse them in the presentation. Mention the reference in one of beats, if it exists. Use the JSON below as a template. chartData is the data for Chart.js\n" +
|
|
11
11
|
"```JSON\n" +
|
|
@@ -35,14 +35,6 @@ export const templateDataSet = {
|
|
|
35
35
|
"```JSON\n" +
|
|
36
36
|
`{"$mulmocast":{"version":"1.1","credit":"closing"},"title":"[TITLE: Brief, engaging title for the topic]","lang":"en","references":[{"url":"[SOURCE_URL: URL of the source material]","title":"[SOURCE_TITLE: Title of the referenced article, or paper]","type":"[SOURCE_TYPE: article, paper]"}],"beats":[{"text":"[OPENING_BEAT: Introduce the topic with a hook. Reference the source material and set up why this topic matters. Usually 2-3 sentences that grab attention and provide context.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[MAIN_CONCEPT: Define or explain the core concept/idea. This should be the central focus of your narrative. Keep it clear and accessible.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[SUPPORTING_DETAIL_1: Additional context, examples, or elaboration that helps illustrate the main concept. This could include how it works, why it's important, or real-world applications.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[SUPPORTING_DETAIL_2: Continue with more examples, deeper explanation, or different aspects of the topic if needed.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[ADDITIONAL_BEATS: Add more beats as necessary to fully explore the topic. Complex topics may require 6-10+ beats to cover adequately. Each beat should advance the narrative or provide valuable information.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[CONCLUSION/IMPACT: Wrap up with the significance, implications, or key takeaway. Help the audience understand why this matters to them.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"}],"canvasSize":{"width":1536,"height":1024},"imageParams":{"style":"<style>Ghibli style</style>","images":{"presenter":{"type":"image","source":{"kind":"url","url":"https://raw.githubusercontent.com/receptron/mulmocast-media/refs/heads/main/characters/ghibli_presenter.png"}}}}}\n` +
|
|
37
37
|
"```",
|
|
38
|
-
ghibli_image_only: "Another AI will generate an image for each beat based on the text description of that beat. Use the JSON below as a template.\n" +
|
|
39
|
-
"```JSON\n" +
|
|
40
|
-
'{"$mulmocast":{"version":"1.1","credit":"closing"},"title":"[TITLE: Brief, engaging title for the topic]","beats":[{"imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"}],"canvasSize":{"width":1536,"height":1024},"imageParams":{"style":"<style>Ghibli style</style>","images":{"presenter":{"type":"image","source":{"kind":"url","url":"https://raw.githubusercontent.com/receptron/mulmocast-media/refs/heads/main/characters/ghibli_presenter.png"}}}}}\n' +
|
|
41
|
-
"```",
|
|
42
|
-
ghibli_shorts: "This script is for YouTube shorts. Another AI will generate comic strips for each beat based on the text description of that beat. Mention the reference in one of beats, if it exists. Use the JSON below as a template.\n" +
|
|
43
|
-
"```JSON\n" +
|
|
44
|
-
`{"$mulmocast":{"version":"1.1","credit":"closing"},"title":"[TITLE: Brief, engaging title for the topic]","lang":"en","references":[{"url":"[SOURCE_URL: URL of the source material]","title":"[SOURCE_TITLE: Title of the referenced article, or paper]","type":"[SOURCE_TYPE: article, paper]"}],"beats":[{"text":"[OPENING_BEAT: Introduce the topic with a hook. Reference the source material and set up why this topic matters. Usually 2-3 sentences that grab attention and provide context.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[MAIN_CONCEPT: Define or explain the core concept/idea. This should be the central focus of your narrative. Keep it clear and accessible.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[SUPPORTING_DETAIL_1: Additional context, examples, or elaboration that helps illustrate the main concept. This could include how it works, why it's important, or real-world applications.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[SUPPORTING_DETAIL_2: Continue with more examples, deeper explanation, or different aspects of the topic if needed.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[ADDITIONAL_BEATS: Add more beats as necessary to fully explore the topic. Complex topics may require 6-10+ beats to cover adequately. Each beat should advance the narrative or provide valuable information.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[CONCLUSION/IMPACT: Wrap up with the significance, implications, or key takeaway. Help the audience understand why this matters to them.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"}],"canvasSize":{"width":1024,"height":1536},"speechParams":{"speakers":{"Presenter":{"provider":"nijivoice","voiceId":"3708ad43-cace-486c-a4ca-8fe41186e20c","speechOptions":{"speed":1.5}}}},"imageParams":{"style":"<style>Ghibli style</style>","images":{"presenter":{"type":"image","source":{"kind":"url","url":"https://raw.githubusercontent.com/receptron/mulmocast-media/refs/heads/main/characters/ghibli_presenter.jpg"}}}}}\n` +
|
|
45
|
-
"```",
|
|
46
38
|
ghost_comic: "Another AI will generate images for each beat based on the image prompt of that beat. Mention the reference in one of beats, if it exists. Use the JSON below as a template.\n" +
|
|
47
39
|
"```JSON\n" +
|
|
48
40
|
`{"$mulmocast":{"version":"1.1","credit":"closing"},"title":"[TITLE: Brief, engaging title for the topic]","lang":"en","references":[{"url":"[SOURCE_URL: URL of the source material]","title":"[SOURCE_TITLE: Title of the referenced article, or paper]","type":"[SOURCE_TYPE: article, paper]"}],"beats":[{"text":"[OPENING_BEAT: Introduce the topic with a hook. Reference the source material and set up why this topic matters. Usually 2-3 sentences that grab attention and provide context.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[MAIN_CONCEPT: Define or explain the core concept/idea. This should be the central focus of your narrative. Keep it clear and accessible.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[SUPPORTING_DETAIL_1: Additional context, examples, or elaboration that helps illustrate the main concept. This could include how it works, why it's important, or real-world applications.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[SUPPORTING_DETAIL_2: Continue with more examples, deeper explanation, or different aspects of the topic if needed.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[ADDITIONAL_BEATS: Add more beats as necessary to fully explore the topic. Complex topics may require 6-10+ beats to cover adequately. Each beat should advance the narrative or provide valuable information.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"},{"text":"[CONCLUSION/IMPACT: Wrap up with the significance, implications, or key takeaway. Help the audience understand why this matters to them.]","imagePrompt":"[IMAGE_PROMPT: A prompt for the image to be generated for this beat.]"}],"canvasSize":{"width":1536,"height":1024},"imageParams":{"style":"<style>Ghost in the shell aesthetic.</style>","images":{"presenter":{"type":"image","source":{"kind":"url","url":"https://raw.githubusercontent.com/receptron/mulmocast-media/refs/heads/main/characters/ghost_presenter.png"}},"optimus":{"type":"image","source":{"kind":"url","url":"https://raw.githubusercontent.com/receptron/mulmocast-media/refs/heads/main/characters/optimus.png"}}}}}\n` +
|
|
@@ -10,7 +10,7 @@ export declare const MulmoStudioContextMethods: {
|
|
|
10
10
|
getFileName(context: MulmoStudioContext): string;
|
|
11
11
|
getCaption(context: MulmoStudioContext): string | undefined;
|
|
12
12
|
setSessionState(context: MulmoStudioContext, sessionType: SessionType, value: boolean): void;
|
|
13
|
-
setBeatSessionState(context: MulmoStudioContext, sessionType: BeatSessionType, index: number, value: boolean): void;
|
|
13
|
+
setBeatSessionState(context: MulmoStudioContext, sessionType: BeatSessionType, index: number, id: string | undefined, value: boolean): void;
|
|
14
14
|
needTranslate(context: MulmoStudioContext, includeCaption?: boolean): boolean | "" | undefined;
|
|
15
15
|
getIntroPadding(context: MulmoStudioContext): number;
|
|
16
16
|
};
|
|
@@ -15,12 +15,12 @@ const notifyStateChange = (context, sessionType) => {
|
|
|
15
15
|
callback({ kind: "session", sessionType, inSession });
|
|
16
16
|
}
|
|
17
17
|
};
|
|
18
|
-
const notifyBeatStateChange = (context, sessionType,
|
|
19
|
-
const inSession = context.sessionState.inBeatSession[sessionType][
|
|
18
|
+
const notifyBeatStateChange = (context, sessionType, id) => {
|
|
19
|
+
const inSession = context.sessionState.inBeatSession[sessionType][id] ?? false;
|
|
20
20
|
const prefix = inSession ? "{" : " }";
|
|
21
|
-
GraphAILogger.info(`${prefix} ${sessionType} ${
|
|
21
|
+
GraphAILogger.info(`${prefix} ${sessionType} ${id}`);
|
|
22
22
|
for (const callback of sessionProgressCallbacks) {
|
|
23
|
-
callback({ kind: "beat", sessionType,
|
|
23
|
+
callback({ kind: "beat", sessionType, id, inSession });
|
|
24
24
|
}
|
|
25
25
|
};
|
|
26
26
|
export const MulmoStudioContextMethods = {
|
|
@@ -50,18 +50,19 @@ export const MulmoStudioContextMethods = {
|
|
|
50
50
|
context.sessionState.inSession[sessionType] = value;
|
|
51
51
|
notifyStateChange(context, sessionType);
|
|
52
52
|
},
|
|
53
|
-
setBeatSessionState(context, sessionType, index, value) {
|
|
53
|
+
setBeatSessionState(context, sessionType, index, id, value) {
|
|
54
|
+
const key = id ?? `__index__${index}`;
|
|
54
55
|
if (value) {
|
|
55
56
|
if (!context.sessionState.inBeatSession[sessionType]) {
|
|
56
57
|
context.sessionState.inBeatSession[sessionType] = {};
|
|
57
58
|
}
|
|
58
|
-
context.sessionState.inBeatSession[sessionType][
|
|
59
|
+
context.sessionState.inBeatSession[sessionType][key] = true;
|
|
59
60
|
}
|
|
60
61
|
else {
|
|
61
62
|
// NOTE: Setting to false causes the parse error in rebuildStudio in preprocess.ts
|
|
62
|
-
delete context.sessionState.inBeatSession[sessionType][
|
|
63
|
+
delete context.sessionState.inBeatSession[sessionType][key];
|
|
63
64
|
}
|
|
64
|
-
notifyBeatStateChange(context, sessionType,
|
|
65
|
+
notifyBeatStateChange(context, sessionType, key);
|
|
65
66
|
},
|
|
66
67
|
needTranslate(context, includeCaption = false) {
|
|
67
68
|
// context.studio.script.lang = defaultLang, context.lang = targetLanguage.
|
package/lib/types/schema.d.ts
CHANGED
|
@@ -5919,35 +5919,35 @@ export declare const mulmoSessionStateSchema: z.ZodObject<{
|
|
|
5919
5919
|
caption: boolean;
|
|
5920
5920
|
}>;
|
|
5921
5921
|
inBeatSession: z.ZodObject<{
|
|
5922
|
-
audio: z.ZodRecord<z.
|
|
5923
|
-
image: z.ZodRecord<z.
|
|
5924
|
-
movie: z.ZodRecord<z.
|
|
5925
|
-
multiLingual: z.ZodRecord<z.
|
|
5926
|
-
caption: z.ZodRecord<z.
|
|
5927
|
-
html: z.ZodRecord<z.
|
|
5928
|
-
imageReference: z.ZodRecord<z.
|
|
5929
|
-
soundEffect: z.ZodRecord<z.
|
|
5930
|
-
lipSync: z.ZodRecord<z.
|
|
5922
|
+
audio: z.ZodRecord<z.ZodString, z.ZodBoolean>;
|
|
5923
|
+
image: z.ZodRecord<z.ZodString, z.ZodBoolean>;
|
|
5924
|
+
movie: z.ZodRecord<z.ZodString, z.ZodBoolean>;
|
|
5925
|
+
multiLingual: z.ZodRecord<z.ZodString, z.ZodBoolean>;
|
|
5926
|
+
caption: z.ZodRecord<z.ZodString, z.ZodBoolean>;
|
|
5927
|
+
html: z.ZodRecord<z.ZodString, z.ZodBoolean>;
|
|
5928
|
+
imageReference: z.ZodRecord<z.ZodString, z.ZodBoolean>;
|
|
5929
|
+
soundEffect: z.ZodRecord<z.ZodString, z.ZodBoolean>;
|
|
5930
|
+
lipSync: z.ZodRecord<z.ZodString, z.ZodBoolean>;
|
|
5931
5931
|
}, "strip", z.ZodTypeAny, {
|
|
5932
|
-
image: Record<
|
|
5933
|
-
audio: Record<
|
|
5934
|
-
movie: Record<
|
|
5935
|
-
html: Record<
|
|
5936
|
-
multiLingual: Record<
|
|
5937
|
-
caption: Record<
|
|
5938
|
-
imageReference: Record<
|
|
5939
|
-
soundEffect: Record<
|
|
5940
|
-
lipSync: Record<
|
|
5932
|
+
image: Record<string, boolean>;
|
|
5933
|
+
audio: Record<string, boolean>;
|
|
5934
|
+
movie: Record<string, boolean>;
|
|
5935
|
+
html: Record<string, boolean>;
|
|
5936
|
+
multiLingual: Record<string, boolean>;
|
|
5937
|
+
caption: Record<string, boolean>;
|
|
5938
|
+
imageReference: Record<string, boolean>;
|
|
5939
|
+
soundEffect: Record<string, boolean>;
|
|
5940
|
+
lipSync: Record<string, boolean>;
|
|
5941
5941
|
}, {
|
|
5942
|
-
image: Record<
|
|
5943
|
-
audio: Record<
|
|
5944
|
-
movie: Record<
|
|
5945
|
-
html: Record<
|
|
5946
|
-
multiLingual: Record<
|
|
5947
|
-
caption: Record<
|
|
5948
|
-
imageReference: Record<
|
|
5949
|
-
soundEffect: Record<
|
|
5950
|
-
lipSync: Record<
|
|
5942
|
+
image: Record<string, boolean>;
|
|
5943
|
+
audio: Record<string, boolean>;
|
|
5944
|
+
movie: Record<string, boolean>;
|
|
5945
|
+
html: Record<string, boolean>;
|
|
5946
|
+
multiLingual: Record<string, boolean>;
|
|
5947
|
+
caption: Record<string, boolean>;
|
|
5948
|
+
imageReference: Record<string, boolean>;
|
|
5949
|
+
soundEffect: Record<string, boolean>;
|
|
5950
|
+
lipSync: Record<string, boolean>;
|
|
5951
5951
|
}>;
|
|
5952
5952
|
}, "strip", z.ZodTypeAny, {
|
|
5953
5953
|
inSession: {
|
|
@@ -5959,15 +5959,15 @@ export declare const mulmoSessionStateSchema: z.ZodObject<{
|
|
|
5959
5959
|
caption: boolean;
|
|
5960
5960
|
};
|
|
5961
5961
|
inBeatSession: {
|
|
5962
|
-
image: Record<
|
|
5963
|
-
audio: Record<
|
|
5964
|
-
movie: Record<
|
|
5965
|
-
html: Record<
|
|
5966
|
-
multiLingual: Record<
|
|
5967
|
-
caption: Record<
|
|
5968
|
-
imageReference: Record<
|
|
5969
|
-
soundEffect: Record<
|
|
5970
|
-
lipSync: Record<
|
|
5962
|
+
image: Record<string, boolean>;
|
|
5963
|
+
audio: Record<string, boolean>;
|
|
5964
|
+
movie: Record<string, boolean>;
|
|
5965
|
+
html: Record<string, boolean>;
|
|
5966
|
+
multiLingual: Record<string, boolean>;
|
|
5967
|
+
caption: Record<string, boolean>;
|
|
5968
|
+
imageReference: Record<string, boolean>;
|
|
5969
|
+
soundEffect: Record<string, boolean>;
|
|
5970
|
+
lipSync: Record<string, boolean>;
|
|
5971
5971
|
};
|
|
5972
5972
|
}, {
|
|
5973
5973
|
inSession: {
|
|
@@ -5979,15 +5979,15 @@ export declare const mulmoSessionStateSchema: z.ZodObject<{
|
|
|
5979
5979
|
caption: boolean;
|
|
5980
5980
|
};
|
|
5981
5981
|
inBeatSession: {
|
|
5982
|
-
image: Record<
|
|
5983
|
-
audio: Record<
|
|
5984
|
-
movie: Record<
|
|
5985
|
-
html: Record<
|
|
5986
|
-
multiLingual: Record<
|
|
5987
|
-
caption: Record<
|
|
5988
|
-
imageReference: Record<
|
|
5989
|
-
soundEffect: Record<
|
|
5990
|
-
lipSync: Record<
|
|
5982
|
+
image: Record<string, boolean>;
|
|
5983
|
+
audio: Record<string, boolean>;
|
|
5984
|
+
movie: Record<string, boolean>;
|
|
5985
|
+
html: Record<string, boolean>;
|
|
5986
|
+
multiLingual: Record<string, boolean>;
|
|
5987
|
+
caption: Record<string, boolean>;
|
|
5988
|
+
imageReference: Record<string, boolean>;
|
|
5989
|
+
soundEffect: Record<string, boolean>;
|
|
5990
|
+
lipSync: Record<string, boolean>;
|
|
5991
5991
|
};
|
|
5992
5992
|
}>;
|
|
5993
5993
|
export declare const mulmoStudioSchema: z.ZodObject<{
|
package/lib/types/schema.js
CHANGED
|
@@ -417,15 +417,15 @@ export const mulmoSessionStateSchema = z.object({
|
|
|
417
417
|
pdf: z.boolean(),
|
|
418
418
|
}),
|
|
419
419
|
inBeatSession: z.object({
|
|
420
|
-
audio: z.record(z.
|
|
421
|
-
image: z.record(z.
|
|
422
|
-
movie: z.record(z.
|
|
423
|
-
multiLingual: z.record(z.
|
|
424
|
-
caption: z.record(z.
|
|
425
|
-
html: z.record(z.
|
|
426
|
-
imageReference: z.record(z.
|
|
427
|
-
soundEffect: z.record(z.
|
|
428
|
-
lipSync: z.record(z.
|
|
420
|
+
audio: z.record(z.string(), z.boolean()),
|
|
421
|
+
image: z.record(z.string(), z.boolean()),
|
|
422
|
+
movie: z.record(z.string(), z.boolean()),
|
|
423
|
+
multiLingual: z.record(z.string(), z.boolean()),
|
|
424
|
+
caption: z.record(z.string(), z.boolean()),
|
|
425
|
+
html: z.record(z.string(), z.boolean()),
|
|
426
|
+
imageReference: z.record(z.string(), z.boolean()),
|
|
427
|
+
soundEffect: z.record(z.string(), z.boolean()),
|
|
428
|
+
lipSync: z.record(z.string(), z.boolean()),
|
|
429
429
|
}),
|
|
430
430
|
});
|
|
431
431
|
export const mulmoStudioSchema = z
|
package/lib/types/type.d.ts
CHANGED
package/lib/utils/filters.js
CHANGED
|
@@ -15,7 +15,7 @@ export const nijovoiceTextAgentFilter = async (context, next) => {
|
|
|
15
15
|
return next(context);
|
|
16
16
|
};
|
|
17
17
|
export const fileCacheAgentFilter = async (context, next) => {
|
|
18
|
-
const { force, file, index, mulmoContext, sessionType } = context.namedInputs.cache;
|
|
18
|
+
const { force, file, index, mulmoContext, sessionType, id } = context.namedInputs.cache;
|
|
19
19
|
const shouldUseCache = async () => {
|
|
20
20
|
if (force && force.some((element) => element)) {
|
|
21
21
|
return false;
|
|
@@ -33,7 +33,7 @@ export const fileCacheAgentFilter = async (context, next) => {
|
|
|
33
33
|
return true;
|
|
34
34
|
}
|
|
35
35
|
try {
|
|
36
|
-
MulmoStudioContextMethods.setBeatSessionState(mulmoContext, sessionType, index, true);
|
|
36
|
+
MulmoStudioContextMethods.setBeatSessionState(mulmoContext, sessionType, index, id, true);
|
|
37
37
|
const output = (await next(context)) || undefined;
|
|
38
38
|
const { buffer, text, saved } = output ?? {};
|
|
39
39
|
if (saved) {
|
|
@@ -56,7 +56,7 @@ export const fileCacheAgentFilter = async (context, next) => {
|
|
|
56
56
|
return false;
|
|
57
57
|
}
|
|
58
58
|
finally {
|
|
59
|
-
MulmoStudioContextMethods.setBeatSessionState(mulmoContext, sessionType, index, false);
|
|
59
|
+
MulmoStudioContextMethods.setBeatSessionState(mulmoContext, sessionType, index, id, false);
|
|
60
60
|
}
|
|
61
61
|
};
|
|
62
62
|
export const browserlessCacheGenerator = (cacheDir) => {
|
package/package.json
CHANGED
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"title": "Ghibli comic image-only",
|
|
3
|
-
"description": "Template for Ghibli-style image-only comic presentation.",
|
|
4
|
-
"systemPrompt": "Another AI will generate an image for each beat based on the text description of that beat. Use the JSON below as a template.",
|
|
5
|
-
"presentationStyle": {
|
|
6
|
-
"$mulmocast": {
|
|
7
|
-
"version": "1.1",
|
|
8
|
-
"credit": "closing"
|
|
9
|
-
},
|
|
10
|
-
"canvasSize": {
|
|
11
|
-
"width": 1536,
|
|
12
|
-
"height": 1024
|
|
13
|
-
},
|
|
14
|
-
"imageParams": {
|
|
15
|
-
"style": "<style>Ghibli style</style>",
|
|
16
|
-
"images": {
|
|
17
|
-
"presenter": {
|
|
18
|
-
"type": "image",
|
|
19
|
-
"source": {
|
|
20
|
-
"kind": "url",
|
|
21
|
-
"url": "https://raw.githubusercontent.com/receptron/mulmocast-media/refs/heads/main/characters/ghibli_presenter.png"
|
|
22
|
-
}
|
|
23
|
-
}
|
|
24
|
-
}
|
|
25
|
-
}
|
|
26
|
-
},
|
|
27
|
-
"scriptName": "image_prompt_only_template.json"
|
|
28
|
-
}
|
|
@@ -1,33 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"title": "Ghibli style for YouTube Shorts",
|
|
3
|
-
"description": "Template for Ghibli-style comic presentation.",
|
|
4
|
-
"systemPrompt": "This script is for YouTube shorts. Another AI will generate comic strips for each beat based on the text description of that beat. Mention the reference in one of beats, if it exists. Use the JSON below as a template.",
|
|
5
|
-
"presentationStyle": {
|
|
6
|
-
"$mulmocast": {
|
|
7
|
-
"version": "1.1",
|
|
8
|
-
"credit": "closing"
|
|
9
|
-
},
|
|
10
|
-
"canvasSize": {
|
|
11
|
-
"width": 1024,
|
|
12
|
-
"height": 1536
|
|
13
|
-
},
|
|
14
|
-
"speechParams": {
|
|
15
|
-
"speakers": {
|
|
16
|
-
"Presenter": { "provider": "nijivoice", "voiceId": "3708ad43-cace-486c-a4ca-8fe41186e20c", "speechOptions": { "speed": 1.5 } }
|
|
17
|
-
}
|
|
18
|
-
},
|
|
19
|
-
"imageParams": {
|
|
20
|
-
"style": "<style>Ghibli style</style>",
|
|
21
|
-
"images": {
|
|
22
|
-
"presenter": {
|
|
23
|
-
"type": "image",
|
|
24
|
-
"source": {
|
|
25
|
-
"kind": "url",
|
|
26
|
-
"url": "https://raw.githubusercontent.com/receptron/mulmocast-media/refs/heads/main/characters/ghibli_presenter.jpg"
|
|
27
|
-
}
|
|
28
|
-
}
|
|
29
|
-
}
|
|
30
|
-
}
|
|
31
|
-
},
|
|
32
|
-
"scriptName": "image_prompts_template.json"
|
|
33
|
-
}
|