mulmocast 1.2.2 → 1.2.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/assets/templates/ani.json +8 -3
  2. package/assets/templates/html.json +0 -1
  3. package/lib/actions/audio.js +1 -0
  4. package/lib/actions/captions.js +2 -2
  5. package/lib/actions/image_agents.js +3 -3
  6. package/lib/actions/images.js +5 -0
  7. package/lib/actions/translate.js +2 -2
  8. package/lib/agents/image_genai_agent.js +1 -1
  9. package/lib/agents/image_openai_agent.js +3 -0
  10. package/lib/agents/lipsync_replicate_agent.js +1 -1
  11. package/lib/agents/movie_genai_agent.js +1 -1
  12. package/lib/agents/movie_replicate_agent.js +1 -1
  13. package/lib/agents/sound_effect_replicate_agent.js +1 -1
  14. package/lib/agents/tts_elevenlabs_agent.js +1 -1
  15. package/lib/agents/tts_nijivoice_agent.js +10 -6
  16. package/lib/agents/tts_openai_agent.js +3 -0
  17. package/lib/data/bgmAssets.d.ts +18 -0
  18. package/lib/data/bgmAssets.js +101 -0
  19. package/lib/data/index.d.ts +1 -0
  20. package/lib/data/index.js +1 -0
  21. package/lib/data/promptTemplates.d.ts +13 -74
  22. package/lib/data/promptTemplates.js +7 -110
  23. package/lib/data/scriptTemplates.d.ts +1 -1
  24. package/lib/data/scriptTemplates.js +1 -0
  25. package/lib/data/templateDataSet.d.ts +0 -2
  26. package/lib/data/templateDataSet.js +1 -9
  27. package/lib/methods/mulmo_studio_context.d.ts +1 -1
  28. package/lib/methods/mulmo_studio_context.js +9 -8
  29. package/lib/types/schema.d.ts +45 -45
  30. package/lib/types/schema.js +9 -9
  31. package/lib/types/type.d.ts +1 -1
  32. package/lib/utils/filters.js +3 -3
  33. package/lib/utils/provider2agent.d.ts +7 -2
  34. package/lib/utils/provider2agent.js +21 -2
  35. package/package.json +11 -11
  36. package/scripts/templates/image_prompt_only_template.json +1 -0
  37. package/assets/templates/ghibli_image_only.json +0 -28
  38. package/assets/templates/ghibli_shorts.json +0 -33
  39. package/scripts/test/test_hello_caption.json~ +0 -21
  40. package/scripts/test/test_hello_image.json~ +0 -18
@@ -1,5 +1,5 @@
1
1
  {
2
- "title": "Presentation with Ani",
2
+ "title": "Presentation by Ani",
3
3
  "description": "Template for presentation with Ani.",
4
4
  "systemPrompt": "言葉づかいは思いっきりツンデレにして。Another AI will generate comic for each beat based on the image prompt of that beat. You don't need to specify the style of the image, just describe the scene. Mention the reference in one of beats, if it exists. Use the JSON below as a template. Create appropriate amount of beats, and make sure the beats are coherent and flow well.",
5
5
  "presentationStyle": {
@@ -16,7 +16,13 @@
16
16
  "speakers": {
17
17
  "Presenter": {
18
18
  "voiceId": "shimmer",
19
- "speechOptions": { "instruction": "Speak in a slightly high-pitched, curt tone with sudden flustered shifts—like a tsundere anime girl." }
19
+ "speechOptions": { "instruction": "Speak in a slightly high-pitched, curt tone with sudden flustered shifts—like a tsundere anime girl." },
20
+ "lang": {
21
+ "ja": {
22
+ "provider": "nijivoice",
23
+ "voiceId": "9d9ed276-49ee-443a-bc19-26e6136d05f0"
24
+ }
25
+ }
20
26
  }
21
27
  }
22
28
  },
@@ -26,7 +32,6 @@
26
32
  "url": "https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/morning001.mp3"
27
33
  }
28
34
  },
29
- "lang": "en",
30
35
  "canvasSize": {
31
36
  "width": 1024,
32
37
  "height": 1536
@@ -7,7 +7,6 @@
7
7
  "version": "1.1",
8
8
  "credit": "closing"
9
9
  },
10
- "lang": "en",
11
10
  "canvasSize": {
12
11
  "width": 1536,
13
12
  "height": 1024
@@ -88,6 +88,7 @@ const graph_tts = {
88
88
  force: [":context.force"],
89
89
  file: ":preprocessor.audioPath",
90
90
  index: ":__mapIndex",
91
+ id: ":beat.id",
91
92
  mulmoContext: ":context",
92
93
  sessionType: "audio",
93
94
  },
@@ -26,7 +26,7 @@ const graph_data = {
26
26
  agent: async (namedInputs) => {
27
27
  const { beat, context, index } = namedInputs;
28
28
  try {
29
- MulmoStudioContextMethods.setBeatSessionState(context, "caption", index, true);
29
+ MulmoStudioContextMethods.setBeatSessionState(context, "caption", index, beat.id, true);
30
30
  const captionParams = mulmoCaptionParamsSchema.parse({ ...context.studio.script.captionParams, ...beat.captionParams });
31
31
  const canvasSize = MulmoPresentationStyleMethods.getCanvasSize(context.presentationStyle);
32
32
  const imagePath = getCaptionImagePath(context, index);
@@ -46,7 +46,7 @@ const graph_data = {
46
46
  return imagePath;
47
47
  }
48
48
  finally {
49
- MulmoStudioContextMethods.setBeatSessionState(context, "caption", index, false);
49
+ MulmoStudioContextMethods.setBeatSessionState(context, "caption", index, beat.id, false);
50
50
  }
51
51
  },
52
52
  inputs: {
@@ -64,13 +64,13 @@ export const imagePluginAgent = async (namedInputs) => {
64
64
  const imagePath = getBeatPngImagePath(context, index);
65
65
  const plugin = MulmoBeatMethods.getPlugin(beat);
66
66
  try {
67
- MulmoStudioContextMethods.setBeatSessionState(context, "image", index, true);
67
+ MulmoStudioContextMethods.setBeatSessionState(context, "image", index, beat.id, true);
68
68
  const processorParams = { beat, context, imagePath, ...htmlStyle(context, beat) };
69
69
  await plugin.process(processorParams);
70
- MulmoStudioContextMethods.setBeatSessionState(context, "image", index, false);
70
+ MulmoStudioContextMethods.setBeatSessionState(context, "image", index, beat.id, false);
71
71
  }
72
72
  catch (error) {
73
- MulmoStudioContextMethods.setBeatSessionState(context, "image", index, false);
73
+ MulmoStudioContextMethods.setBeatSessionState(context, "image", index, beat.id, false);
74
74
  throw error;
75
75
  }
76
76
  };
@@ -88,6 +88,7 @@ const beat_graph_data = {
88
88
  force: [":context.force", ":forceImage"],
89
89
  file: ":preprocessor.htmlPath",
90
90
  index: ":__mapIndex",
91
+ id: ":beat.id",
91
92
  mulmoContext: ":context",
92
93
  sessionType: "html",
93
94
  },
@@ -130,6 +131,7 @@ const beat_graph_data = {
130
131
  force: [":context.force", ":forceImage"],
131
132
  file: ":preprocessor.imagePath",
132
133
  index: ":__mapIndex",
134
+ id: ":beat.id",
133
135
  mulmoContext: ":context",
134
136
  sessionType: "image",
135
137
  },
@@ -155,6 +157,7 @@ const beat_graph_data = {
155
157
  force: [":context.force", ":forceMovie"],
156
158
  file: ":preprocessor.movieFile",
157
159
  index: ":__mapIndex",
160
+ id: ":beat.id",
158
161
  sessionType: "movie",
159
162
  mulmoContext: ":context",
160
163
  },
@@ -214,6 +217,7 @@ const beat_graph_data = {
214
217
  force: [":context.force"],
215
218
  file: ":preprocessor.soundEffectFile",
216
219
  index: ":__mapIndex",
220
+ id: ":beat.id",
217
221
  sessionType: "soundEffect",
218
222
  mulmoContext: ":context",
219
223
  },
@@ -237,6 +241,7 @@ const beat_graph_data = {
237
241
  force: [":context.force"],
238
242
  file: ":preprocessor.lipSyncFile",
239
243
  index: ":__mapIndex",
244
+ id: ":beat.id",
240
245
  sessionType: "lipSync",
241
246
  mulmoContext: ":context",
242
247
  },
@@ -184,11 +184,11 @@ const localizedTextCacheAgentFilter = async (context, next) => {
184
184
  return { text: multiLingual.multiLingualTexts[targetLang].text };
185
185
  }
186
186
  try {
187
- MulmoStudioContextMethods.setBeatSessionState(mulmoContext, "multiLingual", beatIndex, true);
187
+ MulmoStudioContextMethods.setBeatSessionState(mulmoContext, "multiLingual", beatIndex, beat.id, true);
188
188
  return await next(context);
189
189
  }
190
190
  finally {
191
- MulmoStudioContextMethods.setBeatSessionState(mulmoContext, "multiLingual", beatIndex, false);
191
+ MulmoStudioContextMethods.setBeatSessionState(mulmoContext, "multiLingual", beatIndex, beat.id, false);
192
192
  }
193
193
  };
194
194
  const agentFilters = [
@@ -8,7 +8,7 @@ export const imageGenAIAgent = async ({ namedInputs, params, config, }) => {
8
8
  const model = params.model ?? provider2ImageAgent["google"].defaultModel;
9
9
  const apiKey = config?.apiKey;
10
10
  if (!apiKey) {
11
- throw new Error("API key is required for Google GenAI agent");
11
+ throw new Error("Google GenAI API key is required (GEMINI_API_KEY)");
12
12
  }
13
13
  try {
14
14
  const ai = new GoogleGenAI({ apiKey });
@@ -8,6 +8,9 @@ export const imageOpenaiAgent = async ({ namedInputs, params, config, }) => {
8
8
  const { prompt, referenceImages } = namedInputs;
9
9
  const { moderation, canvasSize, quality } = params;
10
10
  const { apiKey, baseURL } = { ...config };
11
+ if (!apiKey) {
12
+ throw new Error("OpenAI API key is required (OPENAI_API_KEY)");
13
+ }
11
14
  const model = params.model ?? provider2ImageAgent["openai"].defaultModel;
12
15
  const openai = new OpenAI({ apiKey, baseURL });
13
16
  const size = (() => {
@@ -7,7 +7,7 @@ export const lipSyncReplicateAgent = async ({ namedInputs, params, config, }) =>
7
7
  const apiKey = config?.apiKey;
8
8
  const model = params.model ?? provider2LipSyncAgent.replicate.defaultModel;
9
9
  if (!apiKey) {
10
- throw new Error("REPLICATE_API_TOKEN environment variable is required");
10
+ throw new Error("Replicate API key is required (REPLICATE_API_TOKEN)");
11
11
  }
12
12
  const replicate = new Replicate({
13
13
  auth: apiKey,
@@ -19,7 +19,7 @@ export const movieGenAIAgent = async ({ namedInputs, params, config, }) => {
19
19
  const duration = params.duration ?? 8;
20
20
  const apiKey = config?.apiKey;
21
21
  if (!apiKey) {
22
- throw new Error("API key is required for Google GenAI agent");
22
+ throw new Error("Google GenAI API key is required (GEMINI_API_KEY)");
23
23
  }
24
24
  try {
25
25
  const ai = new GoogleGenAI({ apiKey });
@@ -86,7 +86,7 @@ export const movieReplicateAgent = async ({ namedInputs, params, config, }) => {
86
86
  }
87
87
  const apiKey = config?.apiKey;
88
88
  if (!apiKey) {
89
- throw new Error("REPLICATE_API_TOKEN environment variable is required");
89
+ throw new Error("Replicate API key is required (REPLICATE_API_TOKEN)");
90
90
  }
91
91
  try {
92
92
  const buffer = await generateMovie(model, apiKey, prompt, imagePath, aspectRatio, duration);
@@ -7,7 +7,7 @@ export const soundEffectReplicateAgent = async ({ namedInputs, params, config })
7
7
  const apiKey = config?.apiKey;
8
8
  const model = params.model ?? provider2SoundEffectAgent.replicate.defaultModel;
9
9
  if (!apiKey) {
10
- throw new Error("REPLICATE_API_TOKEN environment variable is required");
10
+ throw new Error("Replicate API key is required (REPLICATE_API_TOKEN)");
11
11
  }
12
12
  const replicate = new Replicate({
13
13
  auth: apiKey,
@@ -5,7 +5,7 @@ export const ttsElevenlabsAgent = async ({ namedInputs, params, config, }) => {
5
5
  const { voice, model, stability, similarityBoost, suppressError } = params;
6
6
  const apiKey = config?.apiKey;
7
7
  if (!apiKey) {
8
- throw new Error("ELEVENLABS_API_KEY environment variable is required");
8
+ throw new Error("ElevenLabs API key is required (ELEVENLABS_API_KEY)");
9
9
  }
10
10
  if (!voice) {
11
11
  throw new Error("ELEVENLABS Voice ID is required");
@@ -1,15 +1,19 @@
1
- import { GraphAILogger, assert } from "graphai";
1
+ import { GraphAILogger } from "graphai";
2
+ /*
2
3
  const errorMessage = [
3
- "TTS NijiVoice: No API key. ",
4
- "You have the following options:",
5
- "1. Obtain an API key from Niji Voice (https://platform.nijivoice.com/) and set it as the NIJIVOICE_API_KEY environment variable.",
6
- '2. Use OpenAI\'s TTS instead of Niji Voice by changing speechParams.provider from "nijivoice" to "openai".',
4
+ "TTS NijiVoice: No API key. ",
5
+ "You have the following options:",
6
+ "1. Obtain an API key from Niji Voice (https://platform.nijivoice.com/) and set it as the NIJIVOICE_API_KEY environment variable.",
7
+ '2. Use OpenAI\'s TTS instead of Niji Voice by changing speechParams.provider from "nijivoice" to "openai".',
7
8
  ].join("\n");
9
+ */
8
10
  export const ttsNijivoiceAgent = async ({ params, namedInputs, config, }) => {
9
11
  const { suppressError, voice, speed, speed_global } = params;
10
12
  const { apiKey } = config ?? {};
11
13
  const { text } = namedInputs;
12
- assert(!!apiKey, errorMessage);
14
+ if (!apiKey) {
15
+ throw new Error("NijiVoice API key is required (NIJIVOICE_API_KEY)");
16
+ }
13
17
  const url = `https://api.nijivoice.com/api/platform/v1/voice-actors/${voice}/generate-voice`;
14
18
  const options = {
15
19
  method: "POST",
@@ -5,6 +5,9 @@ export const ttsOpenaiAgent = async ({ namedInputs, params, config, }) => {
5
5
  const { text } = namedInputs;
6
6
  const { model, voice, suppressError, instructions } = params;
7
7
  const { apiKey, baseURL } = config ?? {};
8
+ if (!apiKey) {
9
+ throw new Error("OpenAI API key is required (OPENAI_API_KEY)");
10
+ }
8
11
  const openai = new OpenAI({ apiKey, baseURL });
9
12
  try {
10
13
  const tts_options = {
@@ -0,0 +1,18 @@
1
+ type BgmAsset = {
2
+ name: string;
3
+ title: string;
4
+ url: string;
5
+ suno_url: string;
6
+ date: string;
7
+ duration: string;
8
+ account: string;
9
+ original_license: string;
10
+ prompt: string;
11
+ model: string;
12
+ };
13
+ export type BgmAssets = {
14
+ license: string;
15
+ bgms: BgmAsset[];
16
+ };
17
+ export declare const bgmAssets: BgmAssets;
18
+ export {};
@@ -0,0 +1,101 @@
1
+ export const bgmAssets = {
2
+ license: "Free to distribute as the BMG of media generated by MulmoCast, including commercial use.",
3
+ bgms: [
4
+ {
5
+ name: "story001.mp3",
6
+ title: "Whispered Melody",
7
+ url: "https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/story001.mp3",
8
+ suno_url: "https://suno.com/s/v6zer50aQJu8Y0qA",
9
+ date: "2025-06-17",
10
+ duration: "03:17",
11
+ account: "@snakajima",
12
+ original_license: "Generated by Suno with commercial use rights (PRO Plan)",
13
+ prompt: "instrumental, smooth, piano",
14
+ model: "v4.5 beta",
15
+ },
16
+ {
17
+ name: "story002.mp3",
18
+ title: "Rise and Shine",
19
+ url: "https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/story002.mp3",
20
+ suno_url: "https://suno.com/s/mJnvyu3UXnkdAPfQ",
21
+ date: "2025-06-17",
22
+ duration: "04:04",
23
+ account: "@snakajima",
24
+ original_license: "Generated by Suno with commercial use rights (PRO Plan)",
25
+ prompt: "techno, inspiring, piano",
26
+ model: "v4.5 beta",
27
+ },
28
+ {
29
+ name: "story003.mp3",
30
+ title: "Chasing the Sunset",
31
+ url: "https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/story003.mp3",
32
+ suno_url: "https://suno.com/s/2zGjMQ9vURJbaMZA",
33
+ date: "2025-06-17",
34
+ duration: "02:49",
35
+ account: "@snakajima",
36
+ original_license: "Generated by Suno with commercial use rights (PRO Plan)",
37
+ prompt: "piano, inspiring, sunset",
38
+ model: "v4.5 beta",
39
+ },
40
+ {
41
+ name: "story004.mp3",
42
+ title: "Whispering Keys",
43
+ url: "https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/story004.mp3",
44
+ suno_url: "https://suno.com/s/0SFoBRsBWsncw6tu",
45
+ date: "2025-06-17",
46
+ duration: "04:00",
47
+ account: "@snakajima",
48
+ original_license: "Generated by Suno with commercial use rights (PRO Plan)",
49
+ prompt: "Piano, classical, ambient",
50
+ model: "v4",
51
+ },
52
+ {
53
+ name: "story005.mp3",
54
+ title: "Whisper of Ivory",
55
+ url: "https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/story005.mp3",
56
+ suno_url: "https://suno.com/s/0SFoBRsBWsncw6tu",
57
+ date: "2025-06-17",
58
+ duration: "04:00",
59
+ account: "@snakajima",
60
+ original_license: "Generated by Suno with commercial use rights (PRO Plan)",
61
+ prompt: "Piano solo, classical, ambient",
62
+ model: "v4",
63
+ },
64
+ {
65
+ name: "theme001.mp3",
66
+ title: "Rise of the Flame",
67
+ url: "https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/theme001.mp3",
68
+ suno_url: "https://suno.com/s/WhYOf8oJYhBgSKET",
69
+ date: "2025-06-20",
70
+ duration: "03:23",
71
+ account: "@snakajima",
72
+ original_license: "Generated by Suno with commercial use rights (PRO Plan)",
73
+ prompt: "Olympic Theme, classical, emotional",
74
+ model: "v4",
75
+ },
76
+ {
77
+ name: "olympic001.mp3",
78
+ title: "Olympic-style Theme Music",
79
+ url: "https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/olympic001.mp3",
80
+ suno_url: "https://suno.com/s/32wpnmCrkFVvkTSQ",
81
+ date: "2025-07-17",
82
+ duration: "02:54",
83
+ account: "@snakajima",
84
+ original_license: "Generated by Suno with commercial use rights (PRO Plan)",
85
+ prompt: "Epic orchestral fanfare in the style of John Williams' Olympic Fanfare and Theme. Bright brass fanfare, soaring strings, powerful percussion, and heroic French horn melodies. Triumphant and majestic mood, suitable for an opening ceremony or national celebration. Emphasize dynamic builds, rich harmonies, and cinematic grandeur.",
86
+ model: "v4.5+",
87
+ },
88
+ {
89
+ name: "morning001.mp3",
90
+ title: "Morning Dance",
91
+ url: "https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/morning001.mp3",
92
+ suno_url: "https://suno.com/s/9MTkutZYqxeyBlwK",
93
+ date: "2025-07-17",
94
+ duration: "03:52",
95
+ account: "@snakajima",
96
+ original_license: "morning, piano solo, Japanese name, sexy",
97
+ prompt: "morning, piano solo, Japanese name, sexy",
98
+ model: "v4.5+",
99
+ },
100
+ ],
101
+ };
@@ -1,3 +1,4 @@
1
1
  export * from "./promptTemplates.js";
2
2
  export * from "./scriptTemplates.js";
3
3
  export * from "./templateDataSet.js";
4
+ export * from "./bgmAssets.js";
package/lib/data/index.js CHANGED
@@ -1,3 +1,4 @@
1
1
  export * from "./promptTemplates.js";
2
2
  export * from "./scriptTemplates.js";
3
3
  export * from "./templateDataSet.js";
4
+ export * from "./bgmAssets.js";
@@ -50,8 +50,8 @@ export declare const promptTemplates: ({
50
50
  en: string;
51
51
  };
52
52
  voiceId: string;
53
+ lang?: undefined;
53
54
  speechOptions?: undefined;
54
- provider?: undefined;
55
55
  };
56
56
  Announcer?: undefined;
57
57
  Student?: undefined;
@@ -113,13 +113,17 @@ export declare const promptTemplates: ({
113
113
  speechParams: {
114
114
  speakers: {
115
115
  Presenter: {
116
+ lang: {
117
+ ja: {
118
+ provider: string;
119
+ voiceId: string;
120
+ };
121
+ };
116
122
  speechOptions: {
117
123
  instruction: string;
118
- speed?: undefined;
119
124
  };
120
125
  voiceId: string;
121
126
  displayName?: undefined;
122
- provider?: undefined;
123
127
  };
124
128
  Announcer?: undefined;
125
129
  Student?: undefined;
@@ -183,8 +187,8 @@ export declare const promptTemplates: ({
183
187
  en: string;
184
188
  };
185
189
  voiceId: string;
190
+ lang?: undefined;
186
191
  speechOptions?: undefined;
187
- provider?: undefined;
188
192
  };
189
193
  Announcer?: undefined;
190
194
  Student?: undefined;
@@ -236,8 +240,8 @@ export declare const promptTemplates: ({
236
240
  en: string;
237
241
  };
238
242
  voiceId: string;
243
+ lang?: undefined;
239
244
  speechOptions?: undefined;
240
- provider?: undefined;
241
245
  };
242
246
  Announcer?: undefined;
243
247
  Student?: undefined;
@@ -300,73 +304,8 @@ export declare const promptTemplates: ({
300
304
  en: string;
301
305
  };
302
306
  voiceId: string;
307
+ lang?: undefined;
303
308
  speechOptions?: undefined;
304
- provider?: undefined;
305
- };
306
- Announcer?: undefined;
307
- Student?: undefined;
308
- Teacher?: undefined;
309
- };
310
- };
311
- };
312
- scriptName: string;
313
- systemPrompt: string;
314
- title: string;
315
- } | {
316
- description: string;
317
- filename: string;
318
- presentationStyle: {
319
- $mulmocast: {
320
- credit: string;
321
- version: string;
322
- };
323
- audioParams: {
324
- audioVolume: number;
325
- bgmVolume: number;
326
- closingPadding: number;
327
- introPadding: number;
328
- outroPadding: number;
329
- padding: number;
330
- suppressSpeech: boolean;
331
- bgm?: undefined;
332
- };
333
- canvasSize: {
334
- height: number;
335
- width: number;
336
- };
337
- imageParams: {
338
- images: {
339
- presenter: {
340
- source: {
341
- kind: string;
342
- url: string;
343
- };
344
- type: string;
345
- };
346
- girl?: undefined;
347
- ani?: undefined;
348
- optimus?: undefined;
349
- };
350
- style: string;
351
- provider?: undefined;
352
- };
353
- movieParams: {
354
- provider: string;
355
- model?: undefined;
356
- };
357
- soundEffectParams: {
358
- provider: string;
359
- };
360
- speechParams: {
361
- speakers: {
362
- Presenter: {
363
- provider: string;
364
- speechOptions: {
365
- speed: number;
366
- instruction?: undefined;
367
- };
368
- voiceId: string;
369
- displayName?: undefined;
370
309
  };
371
310
  Announcer?: undefined;
372
311
  Student?: undefined;
@@ -435,8 +374,8 @@ export declare const promptTemplates: ({
435
374
  en: string;
436
375
  };
437
376
  voiceId: string;
377
+ lang?: undefined;
438
378
  speechOptions?: undefined;
439
- provider?: undefined;
440
379
  };
441
380
  Announcer?: undefined;
442
381
  Student?: undefined;
@@ -552,8 +491,8 @@ export declare const promptTemplates: ({
552
491
  en: string;
553
492
  };
554
493
  voiceId: string;
494
+ lang?: undefined;
555
495
  speechOptions?: undefined;
556
- provider?: undefined;
557
496
  };
558
497
  Announcer?: undefined;
559
498
  Student?: undefined;
@@ -608,8 +547,8 @@ export declare const promptTemplates: ({
608
547
  en: string;
609
548
  };
610
549
  voiceId: string;
550
+ lang?: undefined;
611
551
  speechOptions?: undefined;
612
- provider?: undefined;
613
552
  };
614
553
  Announcer?: undefined;
615
554
  Student?: undefined;