mulmocast 0.1.2 → 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/assets/templates/characters.json +16 -0
- package/assets/templates/html.json +6 -0
- package/lib/actions/audio.js +8 -6
- package/lib/actions/image_agents.d.ts +121 -0
- package/lib/actions/image_agents.js +56 -0
- package/lib/actions/image_references.d.ts +9 -0
- package/lib/actions/image_references.js +79 -0
- package/lib/actions/images.d.ts +9 -109
- package/lib/actions/images.js +68 -184
- package/lib/actions/index.d.ts +2 -0
- package/lib/actions/index.js +2 -0
- package/lib/actions/movie.js +3 -1
- package/lib/actions/pdf.js +5 -2
- package/lib/agents/image_google_agent.d.ts +2 -15
- package/lib/agents/image_google_agent.js +3 -3
- package/lib/agents/image_openai_agent.d.ts +2 -17
- package/lib/agents/image_openai_agent.js +7 -7
- package/lib/agents/movie_google_agent.d.ts +2 -17
- package/lib/agents/movie_google_agent.js +7 -7
- package/lib/agents/movie_replicate_agent.d.ts +2 -16
- package/lib/agents/movie_replicate_agent.js +3 -3
- package/lib/agents/tts_google_agent.d.ts +9 -1
- package/lib/agents/tts_google_agent.js +2 -2
- package/lib/agents/tts_nijivoice_agent.js +1 -1
- package/lib/agents/tts_openai_agent.d.ts +13 -1
- package/lib/agents/tts_openai_agent.js +2 -2
- package/lib/cli/helpers.js +7 -7
- package/lib/methods/index.d.ts +1 -0
- package/lib/methods/index.js +1 -0
- package/lib/methods/mulmo_beat.d.ts +6 -0
- package/lib/methods/mulmo_beat.js +21 -0
- package/lib/methods/mulmo_presentation_style.d.ts +2 -0
- package/lib/methods/mulmo_presentation_style.js +24 -0
- package/lib/methods/mulmo_studio_context.js +3 -0
- package/lib/tools/story_to_script.js +2 -2
- package/lib/types/agent.d.ts +55 -0
- package/lib/types/agent.js +3 -0
- package/lib/types/schema.d.ts +317 -74
- package/lib/types/schema.js +9 -2
- package/lib/types/type.d.ts +3 -2
- package/lib/utils/context.d.ts +12 -2
- package/lib/utils/context.js +1 -0
- package/lib/utils/ffmpeg_utils.d.ts +1 -1
- package/lib/utils/ffmpeg_utils.js +1 -1
- package/lib/utils/file.js +4 -4
- package/lib/utils/filters.js +3 -4
- package/lib/utils/markdown.js +1 -1
- package/lib/utils/preprocess.d.ts +8 -2
- package/lib/utils/string.js +5 -5
- package/lib/utils/utils.d.ts +8 -1
- package/lib/utils/utils.js +51 -36
- package/package.json +7 -6
- package/scripts/templates/html.json +42 -0
- package/scripts/templates/image_refs.json +35 -0
package/lib/types/schema.js
CHANGED
|
@@ -153,7 +153,13 @@ const mulmoMidiMediaSchema = z
|
|
|
153
153
|
.strict();
|
|
154
154
|
export const mulmoAudioAssetSchema = z.union([mulmoAudioMediaSchema, mulmoMidiMediaSchema]);
|
|
155
155
|
const imageIdSchema = z.string();
|
|
156
|
-
export const
|
|
156
|
+
export const mulmoImagePromptMediaSchema = z
|
|
157
|
+
.object({
|
|
158
|
+
type: z.literal("imagePrompt"),
|
|
159
|
+
prompt: z.string(),
|
|
160
|
+
})
|
|
161
|
+
.strict();
|
|
162
|
+
export const mulmoImageParamsImagesSchema = z.record(imageIdSchema, z.union([mulmoImageMediaSchema, mulmoImagePromptMediaSchema]));
|
|
157
163
|
export const mulmoFillOptionSchema = z
|
|
158
164
|
.object({
|
|
159
165
|
style: z.enum(["aspectFit", "aspectFill"]).default("aspectFit"),
|
|
@@ -329,7 +335,7 @@ export const mulmoReferenceSchema = z.object({
|
|
|
329
335
|
url: URLStringSchema,
|
|
330
336
|
title: z.string().optional(),
|
|
331
337
|
description: z.string().optional(),
|
|
332
|
-
type: z.enum(["article", "paper", "image", "video", "audio"]).default("article"),
|
|
338
|
+
type: z.union([z.enum(["article", "paper", "image", "video", "audio"]), z.string()]).default("article"),
|
|
333
339
|
});
|
|
334
340
|
export const mulmoScriptSchema = mulmoPresentationStyleSchema
|
|
335
341
|
.extend({
|
|
@@ -378,6 +384,7 @@ export const mulmoSessionStateSchema = z.object({
|
|
|
378
384
|
multiLingual: z.record(z.number().int(), z.boolean()),
|
|
379
385
|
caption: z.record(z.number().int(), z.boolean()),
|
|
380
386
|
html: z.record(z.number().int(), z.boolean()),
|
|
387
|
+
imageReference: z.record(z.number().int(), z.boolean()),
|
|
381
388
|
}),
|
|
382
389
|
});
|
|
383
390
|
export const mulmoStudioSchema = z
|
package/lib/types/type.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { langSchema, localizedTextSchema, mulmoBeatSchema, mulmoScriptSchema, mulmoStudioSchema, mulmoStudioBeatSchema, mulmoStoryboardSchema, mulmoStoryboardSceneSchema, mulmoStudioMultiLingualSchema, mulmoStudioMultiLingualDataSchema, speakerDictionarySchema, mulmoImageParamsSchema, mulmoImageParamsImagesSchema, mulmoFillOptionSchema, mulmoMovieParamsSchema, mulmoSpeechParamsSchema, textSlideParamsSchema, speechOptionsSchema, speakerDataSchema, mulmoCanvasDimensionSchema, mulmoScriptTemplateSchema, mulmoScriptTemplateFileSchema, text2ImageProviderSchema, text2HtmlImageProviderSchema, text2MovieProviderSchema, text2SpeechProviderSchema, mulmoPresentationStyleSchema, multiLingualTextsSchema, mulmoMermaidMediaSchema, mulmoTextSlideMediaSchema, mulmoMarkdownMediaSchema, mulmoImageMediaSchema, mulmoChartMediaSchema, mediaSourceSchema, mulmoSessionStateSchema, mulmoOpenAIImageModelSchema, mulmoGoogleImageModelSchema, mulmoGoogleMovieModelSchema, mulmoReplicateMovieModelSchema } from "./schema.js";
|
|
1
|
+
import { langSchema, localizedTextSchema, mulmoBeatSchema, mulmoScriptSchema, mulmoStudioSchema, mulmoStudioBeatSchema, mulmoStoryboardSchema, mulmoStoryboardSceneSchema, mulmoStudioMultiLingualSchema, mulmoStudioMultiLingualDataSchema, speakerDictionarySchema, mulmoImageParamsSchema, mulmoImageParamsImagesSchema, mulmoFillOptionSchema, mulmoMovieParamsSchema, mulmoSpeechParamsSchema, textSlideParamsSchema, speechOptionsSchema, speakerDataSchema, mulmoCanvasDimensionSchema, mulmoScriptTemplateSchema, mulmoScriptTemplateFileSchema, text2ImageProviderSchema, text2HtmlImageProviderSchema, text2MovieProviderSchema, text2SpeechProviderSchema, mulmoPresentationStyleSchema, multiLingualTextsSchema, mulmoMermaidMediaSchema, mulmoTextSlideMediaSchema, mulmoMarkdownMediaSchema, mulmoImageMediaSchema, mulmoChartMediaSchema, mediaSourceSchema, mulmoSessionStateSchema, mulmoOpenAIImageModelSchema, mulmoGoogleImageModelSchema, mulmoGoogleMovieModelSchema, mulmoReplicateMovieModelSchema, mulmoImagePromptMediaSchema } from "./schema.js";
|
|
2
2
|
import { pdf_modes, pdf_sizes, storyToScriptGenerateMode } from "../utils/const.js";
|
|
3
3
|
import { LLM } from "../utils/utils.js";
|
|
4
4
|
import { z } from "zod";
|
|
@@ -35,6 +35,7 @@ export type MulmoOpenAIImageModel = z.infer<typeof mulmoOpenAIImageModelSchema>;
|
|
|
35
35
|
export type MulmoGoogleImageModel = z.infer<typeof mulmoGoogleImageModelSchema>;
|
|
36
36
|
export type MulmoGoogleMovieModel = z.infer<typeof mulmoGoogleMovieModelSchema>;
|
|
37
37
|
export type MulmoReplicateMovieModel = z.infer<typeof mulmoReplicateMovieModelSchema>;
|
|
38
|
+
export type MulmoImagePromptMedia = z.infer<typeof mulmoImagePromptMediaSchema>;
|
|
38
39
|
export type MulmoTextSlideMedia = z.infer<typeof mulmoTextSlideMediaSchema>;
|
|
39
40
|
export type MulmoMarkdownMedia = z.infer<typeof mulmoMarkdownMediaSchema>;
|
|
40
41
|
export type MulmoImageMedia = z.infer<typeof mulmoImageMediaSchema>;
|
|
@@ -90,7 +91,7 @@ export type Text2HtmlAgentInfo = {
|
|
|
90
91
|
export type BeatMediaType = "movie" | "image";
|
|
91
92
|
export type StoryToScriptGenerateMode = (typeof storyToScriptGenerateMode)[keyof typeof storyToScriptGenerateMode];
|
|
92
93
|
export type SessionType = "audio" | "image" | "video" | "multiLingual" | "caption" | "pdf";
|
|
93
|
-
export type BeatSessionType = "audio" | "image" | "multiLingual" | "caption" | "movie" | "html";
|
|
94
|
+
export type BeatSessionType = "audio" | "image" | "multiLingual" | "caption" | "movie" | "html" | "imageReference";
|
|
94
95
|
export type SessionProgressEvent = {
|
|
95
96
|
kind: "session";
|
|
96
97
|
sessionType: SessionType;
|
package/lib/utils/context.d.ts
CHANGED
|
@@ -193,6 +193,7 @@ export declare const initializeContextFromFiles: (files: FileObject, raiseError:
|
|
|
193
193
|
type: "midi";
|
|
194
194
|
source: string;
|
|
195
195
|
} | undefined;
|
|
196
|
+
imagePrompt?: string | undefined;
|
|
196
197
|
description?: string | undefined;
|
|
197
198
|
imageParams?: {
|
|
198
199
|
provider: "openai" | "google";
|
|
@@ -214,6 +215,9 @@ export declare const initializeContextFromFiles: (files: FileObject, raiseError:
|
|
|
214
215
|
path: string;
|
|
215
216
|
kind: "path";
|
|
216
217
|
};
|
|
218
|
+
} | {
|
|
219
|
+
type: "imagePrompt";
|
|
220
|
+
prompt: string;
|
|
217
221
|
}> | undefined;
|
|
218
222
|
} | undefined;
|
|
219
223
|
audioParams?: {
|
|
@@ -236,7 +240,6 @@ export declare const initializeContextFromFiles: (files: FileObject, raiseError:
|
|
|
236
240
|
lang?: string | undefined;
|
|
237
241
|
} | undefined;
|
|
238
242
|
imageNames?: string[] | undefined;
|
|
239
|
-
imagePrompt?: string | undefined;
|
|
240
243
|
moviePrompt?: string | undefined;
|
|
241
244
|
htmlPrompt?: {
|
|
242
245
|
prompt: string;
|
|
@@ -268,6 +271,9 @@ export declare const initializeContextFromFiles: (files: FileObject, raiseError:
|
|
|
268
271
|
path: string;
|
|
269
272
|
kind: "path";
|
|
270
273
|
};
|
|
274
|
+
} | {
|
|
275
|
+
type: "imagePrompt";
|
|
276
|
+
prompt: string;
|
|
271
277
|
}> | undefined;
|
|
272
278
|
} | undefined;
|
|
273
279
|
movieParams?: {
|
|
@@ -293,7 +299,7 @@ export declare const initializeContextFromFiles: (files: FileObject, raiseError:
|
|
|
293
299
|
lang?: string | undefined;
|
|
294
300
|
} | undefined;
|
|
295
301
|
references?: {
|
|
296
|
-
type:
|
|
302
|
+
type: string;
|
|
297
303
|
url: string;
|
|
298
304
|
title?: string | undefined;
|
|
299
305
|
description?: string | undefined;
|
|
@@ -322,6 +328,7 @@ export declare const initializeContextFromFiles: (files: FileObject, raiseError:
|
|
|
322
328
|
multiLingual: {};
|
|
323
329
|
caption: {};
|
|
324
330
|
html: {};
|
|
331
|
+
imageReference: {};
|
|
325
332
|
};
|
|
326
333
|
};
|
|
327
334
|
presentationStyle: {
|
|
@@ -387,6 +394,9 @@ export declare const initializeContextFromFiles: (files: FileObject, raiseError:
|
|
|
387
394
|
path: string;
|
|
388
395
|
kind: "path";
|
|
389
396
|
};
|
|
397
|
+
} | {
|
|
398
|
+
type: "imagePrompt";
|
|
399
|
+
prompt: string;
|
|
390
400
|
}> | undefined;
|
|
391
401
|
} | undefined;
|
|
392
402
|
movieParams?: {
|
package/lib/utils/context.js
CHANGED
|
@@ -12,4 +12,4 @@ export declare const FfmpegContextPushFormattedAudio: (context: FfmpegContext, s
|
|
|
12
12
|
export declare const FfmpegContextInputFormattedAudio: (context: FfmpegContext, input: string, duration?: number | undefined, inputOptions?: string[]) => string;
|
|
13
13
|
export declare const FfmpegContextGenerateOutput: (context: FfmpegContext, output: string, options?: string[]) => Promise<number>;
|
|
14
14
|
export declare const ffmpegGetMediaDuration: (filePath: string) => Promise<number>;
|
|
15
|
-
export declare const extractImageFromMovie: (movieFile: string, imagePath: string) => Promise<
|
|
15
|
+
export declare const extractImageFromMovie: (movieFile: string, imagePath: string) => Promise<object>;
|
|
@@ -77,7 +77,7 @@ export const extractImageFromMovie = (movieFile, imagePath) => {
|
|
|
77
77
|
ffmpeg(movieFile)
|
|
78
78
|
.outputOptions(["-frames:v 1"])
|
|
79
79
|
.output(imagePath)
|
|
80
|
-
.on("end", () => resolve())
|
|
80
|
+
.on("end", () => resolve({}))
|
|
81
81
|
.on("error", (err) => reject(err))
|
|
82
82
|
.run();
|
|
83
83
|
});
|
package/lib/utils/file.js
CHANGED
|
@@ -29,9 +29,9 @@ export function readMulmoScriptFile(arg2, errorMessage) {
|
|
|
29
29
|
fileName: parsedPath.name,
|
|
30
30
|
};
|
|
31
31
|
}
|
|
32
|
-
catch (
|
|
32
|
+
catch (error) {
|
|
33
33
|
if (errorMessage) {
|
|
34
|
-
GraphAILogger.info("read file format is broken.");
|
|
34
|
+
GraphAILogger.info("read file format is broken.", error);
|
|
35
35
|
}
|
|
36
36
|
return null;
|
|
37
37
|
}
|
|
@@ -159,8 +159,8 @@ export const readTemplatePrompt = (templateName) => {
|
|
|
159
159
|
const template = JSON.parse(templateData);
|
|
160
160
|
const script = (() => {
|
|
161
161
|
if (template.scriptName) {
|
|
162
|
-
const
|
|
163
|
-
return { ...
|
|
162
|
+
const scriptData = readScriptTemplateFile(template.scriptName);
|
|
163
|
+
return { ...scriptData, ...(template.presentationStyle ?? {}) };
|
|
164
164
|
}
|
|
165
165
|
return undefined;
|
|
166
166
|
})();
|
package/lib/utils/filters.js
CHANGED
|
@@ -7,17 +7,16 @@ import { writingMessage } from "./file.js";
|
|
|
7
7
|
import { text2hash } from "./utils.js";
|
|
8
8
|
import { MulmoStudioContextMethods } from "../methods/mulmo_studio_context.js";
|
|
9
9
|
export const fileCacheAgentFilter = async (context, next) => {
|
|
10
|
-
const {
|
|
11
|
-
const { file, force, mulmoContext, index, sessionType } = namedInputs;
|
|
10
|
+
const { force, file, index, mulmoContext, sessionType } = context.namedInputs.cache;
|
|
12
11
|
const shouldUseCache = async () => {
|
|
13
|
-
if (force) {
|
|
12
|
+
if (force && force.some((element) => element)) {
|
|
14
13
|
return false;
|
|
15
14
|
}
|
|
16
15
|
try {
|
|
17
16
|
await fsPromise.access(file);
|
|
18
17
|
return true;
|
|
19
18
|
}
|
|
20
|
-
catch
|
|
19
|
+
catch {
|
|
21
20
|
return false;
|
|
22
21
|
}
|
|
23
22
|
};
|
package/lib/utils/markdown.js
CHANGED
|
@@ -18,7 +18,7 @@ export const renderHTMLToImage = async (html, outputPath, width, height, isMerma
|
|
|
18
18
|
}, { timeout: 20000 });
|
|
19
19
|
}
|
|
20
20
|
// Step 3: Capture screenshot of the page (which contains the Markdown-rendered HTML)
|
|
21
|
-
await page.screenshot({ path: outputPath, omitBackground
|
|
21
|
+
await page.screenshot({ path: outputPath, omitBackground });
|
|
22
22
|
await browser.close();
|
|
23
23
|
};
|
|
24
24
|
export const renderMarkdownToImage = async (markdown, style, outputPath, width, height) => {
|
|
@@ -188,6 +188,7 @@ export declare const createOrUpdateStudioData: (_mulmoScript: MulmoScript, curre
|
|
|
188
188
|
type: "midi";
|
|
189
189
|
source: string;
|
|
190
190
|
} | undefined;
|
|
191
|
+
imagePrompt?: string | undefined;
|
|
191
192
|
description?: string | undefined;
|
|
192
193
|
imageParams?: {
|
|
193
194
|
provider: "openai" | "google";
|
|
@@ -209,6 +210,9 @@ export declare const createOrUpdateStudioData: (_mulmoScript: MulmoScript, curre
|
|
|
209
210
|
path: string;
|
|
210
211
|
kind: "path";
|
|
211
212
|
};
|
|
213
|
+
} | {
|
|
214
|
+
type: "imagePrompt";
|
|
215
|
+
prompt: string;
|
|
212
216
|
}> | undefined;
|
|
213
217
|
} | undefined;
|
|
214
218
|
audioParams?: {
|
|
@@ -231,7 +235,6 @@ export declare const createOrUpdateStudioData: (_mulmoScript: MulmoScript, curre
|
|
|
231
235
|
lang?: string | undefined;
|
|
232
236
|
} | undefined;
|
|
233
237
|
imageNames?: string[] | undefined;
|
|
234
|
-
imagePrompt?: string | undefined;
|
|
235
238
|
moviePrompt?: string | undefined;
|
|
236
239
|
htmlPrompt?: {
|
|
237
240
|
prompt: string;
|
|
@@ -263,6 +266,9 @@ export declare const createOrUpdateStudioData: (_mulmoScript: MulmoScript, curre
|
|
|
263
266
|
path: string;
|
|
264
267
|
kind: "path";
|
|
265
268
|
};
|
|
269
|
+
} | {
|
|
270
|
+
type: "imagePrompt";
|
|
271
|
+
prompt: string;
|
|
266
272
|
}> | undefined;
|
|
267
273
|
} | undefined;
|
|
268
274
|
movieParams?: {
|
|
@@ -288,7 +294,7 @@ export declare const createOrUpdateStudioData: (_mulmoScript: MulmoScript, curre
|
|
|
288
294
|
lang?: string | undefined;
|
|
289
295
|
} | undefined;
|
|
290
296
|
references?: {
|
|
291
|
-
type:
|
|
297
|
+
type: string;
|
|
292
298
|
url: string;
|
|
293
299
|
title?: string | undefined;
|
|
294
300
|
description?: string | undefined;
|
package/lib/utils/string.js
CHANGED
|
@@ -20,18 +20,18 @@ export const recursiveSplitJa = (text) => {
|
|
|
20
20
|
const delimiters = ["。", "?", "!", "、"];
|
|
21
21
|
return delimiters
|
|
22
22
|
.reduce((textData, delimiter) => {
|
|
23
|
-
return textData.map((
|
|
23
|
+
return textData.map((textInner) => splitIntoSentencesJa(textInner, delimiter, 7)).flat(1);
|
|
24
24
|
}, [text])
|
|
25
25
|
.flat(1);
|
|
26
26
|
};
|
|
27
27
|
export function replacePairsJa(str, replacements) {
|
|
28
|
-
replacements.
|
|
28
|
+
return replacements.reduce((tmp, current) => {
|
|
29
|
+
const { from, to } = current;
|
|
29
30
|
// Escape any special regex characters in the 'from' string.
|
|
30
31
|
const escapedFrom = from.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
|
31
32
|
const regex = new RegExp(escapedFrom, "g");
|
|
32
|
-
|
|
33
|
-
});
|
|
34
|
-
return str;
|
|
33
|
+
return tmp.replace(regex, to);
|
|
34
|
+
}, str);
|
|
35
35
|
}
|
|
36
36
|
export const replacementsJa = [
|
|
37
37
|
{ from: "Anthropic", to: "アンスロピック" },
|
package/lib/utils/utils.d.ts
CHANGED
|
@@ -18,5 +18,12 @@ export declare const text2hash: (input: string) => string;
|
|
|
18
18
|
export declare const localizedText: (beat: MulmoBeat, multiLingualData?: MulmoStudioMultiLingualData, lang?: string) => string;
|
|
19
19
|
export declare const sleep: (milliseconds: number) => Promise<unknown>;
|
|
20
20
|
export declare function userAssert(condition: boolean, message: string): asserts condition;
|
|
21
|
-
export declare const settings2GraphAIConfig: (settings?: Record<string, string>) => ConfigDataDictionary<DefaultConfigData>;
|
|
21
|
+
export declare const settings2GraphAIConfig: (settings?: Record<string, string>, env?: Record<string, string | undefined>) => ConfigDataDictionary<DefaultConfigData>;
|
|
22
22
|
export declare const getExtention: (contentType: string | null, url: string) => string;
|
|
23
|
+
type Primitive = string | number | boolean | symbol | bigint;
|
|
24
|
+
type CleanableValue = Primitive | null | undefined | CleanableObject | CleanableValue[];
|
|
25
|
+
type CleanableObject = {
|
|
26
|
+
[key: string]: CleanableValue;
|
|
27
|
+
};
|
|
28
|
+
export declare const deepClean: <T extends CleanableValue>(input: T) => T | undefined;
|
|
29
|
+
export {};
|
package/lib/utils/utils.js
CHANGED
|
@@ -60,47 +60,42 @@ export function userAssert(condition, message) {
|
|
|
60
60
|
throw new Error(message);
|
|
61
61
|
}
|
|
62
62
|
}
|
|
63
|
-
export const settings2GraphAIConfig = (settings) => {
|
|
64
|
-
const
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
}
|
|
92
|
-
if (settings.ELEVENLABS_API_KEY) {
|
|
93
|
-
config.ttsElevenlabsAgent = {
|
|
94
|
-
apiKey: settings.ELEVENLABS_API_KEY,
|
|
95
|
-
};
|
|
96
|
-
}
|
|
63
|
+
export const settings2GraphAIConfig = (settings, env) => {
|
|
64
|
+
const getKey = (prefix, key) => {
|
|
65
|
+
return settings?.[`${prefix}_${key}`] ?? settings?.[key] ?? env?.[`${prefix}_${key}`] ?? env?.[key];
|
|
66
|
+
};
|
|
67
|
+
const config = {
|
|
68
|
+
openAIAgent: {
|
|
69
|
+
apiKey: getKey("LLM", "OPENAI_API_KEY"),
|
|
70
|
+
baseURL: getKey("LLM", "OPENAI_BASE_URL"),
|
|
71
|
+
},
|
|
72
|
+
ttsOpenaiAgent: {
|
|
73
|
+
apiKey: getKey("TTS", "OPENAI_API_KEY"),
|
|
74
|
+
baseURL: getKey("TTS", "OPENAI_BASE_URL"),
|
|
75
|
+
},
|
|
76
|
+
imageOpenaiAgent: {
|
|
77
|
+
apiKey: getKey("IMAGE", "OPENAI_API_KEY"),
|
|
78
|
+
baseURL: getKey("IMAGE", "OPENAI_BASE_URL"),
|
|
79
|
+
},
|
|
80
|
+
anthropicAgent: {
|
|
81
|
+
apiKey: getKey("LLM", "ANTHROPIC_API_TOKEN"),
|
|
82
|
+
},
|
|
83
|
+
movieReplicateAgent: {
|
|
84
|
+
apiKey: getKey("MOVIE", "REPLICATE_API_TOKEN"),
|
|
85
|
+
},
|
|
86
|
+
ttsNijivoiceAgent: {
|
|
87
|
+
apiKey: getKey("TTS", "NIJIVOICE_API_KEY"),
|
|
88
|
+
},
|
|
89
|
+
ttsElevenlabsAgent: {
|
|
90
|
+
apiKey: getKey("TTS", "ELEVENLABS_API_KEY"),
|
|
91
|
+
},
|
|
97
92
|
// TODO
|
|
98
93
|
// browserlessAgent
|
|
99
94
|
// ttsGoogleAgent
|
|
100
95
|
// geminiAgent, groqAgent for tool
|
|
101
96
|
// TAVILY_API_KEY ( for deep research)
|
|
102
|
-
}
|
|
103
|
-
return config;
|
|
97
|
+
};
|
|
98
|
+
return deepClean(config) ?? {};
|
|
104
99
|
};
|
|
105
100
|
export const getExtention = (contentType, url) => {
|
|
106
101
|
if (contentType?.includes("jpeg") || contentType?.includes("jpg")) {
|
|
@@ -116,3 +111,23 @@ export const getExtention = (contentType, url) => {
|
|
|
116
111
|
}
|
|
117
112
|
return "png"; // default
|
|
118
113
|
};
|
|
114
|
+
export const deepClean = (input) => {
|
|
115
|
+
if (input === null || input === undefined || input === "") {
|
|
116
|
+
return undefined;
|
|
117
|
+
}
|
|
118
|
+
if (Array.isArray(input)) {
|
|
119
|
+
const cleanedArray = input.map(deepClean).filter((v) => v !== undefined);
|
|
120
|
+
return cleanedArray.length > 0 ? cleanedArray : undefined;
|
|
121
|
+
}
|
|
122
|
+
if (typeof input === "object") {
|
|
123
|
+
const result = {};
|
|
124
|
+
for (const [key, value] of Object.entries(input)) {
|
|
125
|
+
const cleaned = deepClean(value);
|
|
126
|
+
if (cleaned !== undefined) {
|
|
127
|
+
result[key] = cleaned;
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
return Object.keys(result).length > 0 ? result : undefined;
|
|
131
|
+
}
|
|
132
|
+
return input;
|
|
133
|
+
};
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "mulmocast",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.3",
|
|
4
4
|
"description": "",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "lib/index.js",
|
|
@@ -59,7 +59,7 @@
|
|
|
59
59
|
},
|
|
60
60
|
"homepage": "https://github.com/receptron/mulmocast-cli#readme",
|
|
61
61
|
"dependencies": {
|
|
62
|
-
"@google-cloud/text-to-speech": "^6.
|
|
62
|
+
"@google-cloud/text-to-speech": "^6.2.0",
|
|
63
63
|
"@graphai/anthropic_agent": "^2.0.5",
|
|
64
64
|
"@graphai/browserless_agent": "^2.0.1",
|
|
65
65
|
"@graphai/gemini_agent": "^2.0.0",
|
|
@@ -69,18 +69,18 @@
|
|
|
69
69
|
"@graphai/stream_agent_filter": "^2.0.2",
|
|
70
70
|
"@graphai/vanilla": "^2.0.5",
|
|
71
71
|
"@graphai/vanilla_node_agents": "^2.0.1",
|
|
72
|
-
"@modelcontextprotocol/sdk": "^1.
|
|
72
|
+
"@modelcontextprotocol/sdk": "^1.15.1",
|
|
73
73
|
"@tavily/core": "^0.5.9",
|
|
74
74
|
"canvas": "^3.1.2",
|
|
75
75
|
"clipboardy": "^4.0.0",
|
|
76
|
-
"dotenv": "^17.
|
|
76
|
+
"dotenv": "^17.2.0",
|
|
77
77
|
"fluent-ffmpeg": "^2.1.3",
|
|
78
78
|
"google-auth-library": "^9.15.1",
|
|
79
79
|
"graphai": "^2.0.12",
|
|
80
80
|
"inquirer": "^12.7.0",
|
|
81
81
|
"marked": "^16.0.0",
|
|
82
82
|
"ora": "^8.2.0",
|
|
83
|
-
"puppeteer": "^24.12.
|
|
83
|
+
"puppeteer": "^24.12.1",
|
|
84
84
|
"replicate": "^1.0.1",
|
|
85
85
|
"yaml": "^2.8.0",
|
|
86
86
|
"yargs": "^18.0.0",
|
|
@@ -93,9 +93,10 @@
|
|
|
93
93
|
"@receptron/test_utils": "^2.0.0",
|
|
94
94
|
"@types/fluent-ffmpeg": "^2.1.26",
|
|
95
95
|
"@types/yargs": "^17.0.33",
|
|
96
|
-
"eslint": "^9.
|
|
96
|
+
"eslint": "^9.31.0",
|
|
97
97
|
"eslint-config-prettier": "^10.1.5",
|
|
98
98
|
"eslint-plugin-prettier": "^5.5.1",
|
|
99
|
+
"eslint-plugin-sonarjs": "^3.0.4",
|
|
99
100
|
"prettier": "^3.6.2",
|
|
100
101
|
"ts-node": "^10.9.2",
|
|
101
102
|
"tsx": "^4.20.3",
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$mulmocast": {
|
|
3
|
+
"version": "1.0",
|
|
4
|
+
"credit": "closing"
|
|
5
|
+
},
|
|
6
|
+
"references": [
|
|
7
|
+
{
|
|
8
|
+
"url": "https://www.somegreatwebsite.com/article/123",
|
|
9
|
+
"title": "Title of the article we are referencing",
|
|
10
|
+
"type": "[TYPE OF ARTICLE: article, paper, image, video, audio]"
|
|
11
|
+
}
|
|
12
|
+
],
|
|
13
|
+
"title": "[TITLE: Brief, engaging title for the topic]",
|
|
14
|
+
"htmlImageParams": {
|
|
15
|
+
"provider": "anthropic",
|
|
16
|
+
"model": "claude-3-7-sonnet-20250219"
|
|
17
|
+
},
|
|
18
|
+
"lang": "en",
|
|
19
|
+
"beats": [
|
|
20
|
+
{
|
|
21
|
+
"text": "[NARRATION: Narration for the beat.]",
|
|
22
|
+
"htmlPrompt": {
|
|
23
|
+
"prompt": "[PROMPT to create appropriate HTML page for the beat.]"
|
|
24
|
+
}
|
|
25
|
+
},
|
|
26
|
+
{
|
|
27
|
+
"text": "[NARRATION: Narration for the beat.]",
|
|
28
|
+
"htmlPrompt": {
|
|
29
|
+
"prompt": "[PROMPT to create appropriate HTML page for the beat with the data.]",
|
|
30
|
+
"data": {
|
|
31
|
+
"description": "DATA TO BE PRESENTED IN THIS BEAT (in any format)]",
|
|
32
|
+
"net_income": {
|
|
33
|
+
"Q2 FY2024": 320,
|
|
34
|
+
"Q3 FY2024": 333,
|
|
35
|
+
"Q4 FY2024": 350
|
|
36
|
+
},
|
|
37
|
+
"unit": "USD (Million)"
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
]
|
|
42
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$mulmocast": {
|
|
3
|
+
"version": "1.0"
|
|
4
|
+
},
|
|
5
|
+
"title": "[TITLE OF THE PRESENTAITON OR STORY]",
|
|
6
|
+
"imageParams": {
|
|
7
|
+
"images": {
|
|
8
|
+
"[CHARACTER_ID_1]": {
|
|
9
|
+
"type": "imagePrompt",
|
|
10
|
+
"prompt": "[IMAGE PROMPT FOR THIS CHARACTER]"
|
|
11
|
+
},
|
|
12
|
+
"[CHARACTER_ID_2]": {
|
|
13
|
+
"type": "imagePrompt",
|
|
14
|
+
"prompt": "[IMAGE PROMPT FOR THIS CHARACTER]"
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
},
|
|
18
|
+
"beats": [
|
|
19
|
+
{
|
|
20
|
+
"text": "[NARRATION FOR THIS BEAT]",
|
|
21
|
+
"imagePrompt": "[IMAGE PROMPT FOR THIS BEAT (with both characters)]",
|
|
22
|
+
"imageNames": ["[CHARACTER_ID_1]", "[CHARACTER_ID_2]"]
|
|
23
|
+
},
|
|
24
|
+
{
|
|
25
|
+
"text": "[NARRATION FOR THIS BEAT]",
|
|
26
|
+
"imagePrompt": "[IMAGE PROMPT FOR THIS BEAT (only character 1)]",
|
|
27
|
+
"imageNames": ["[CHARACTER_ID_1]"]
|
|
28
|
+
},
|
|
29
|
+
{
|
|
30
|
+
"text": "[NARRATION FOR THIS BEAT]",
|
|
31
|
+
"imagePrompt": "[IMAGE PROMPT FOR THIS BEAT (no character)]",
|
|
32
|
+
"imageNames": []
|
|
33
|
+
}
|
|
34
|
+
]
|
|
35
|
+
}
|