mulmocast 0.0.15 → 0.0.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/assets/templates/text_and_image.json +6 -0
  2. package/assets/templates/text_only.json +6 -0
  3. package/lib/actions/audio.d.ts +3 -1
  4. package/lib/actions/audio.js +82 -44
  5. package/lib/actions/captions.js +1 -1
  6. package/lib/actions/images.d.ts +4 -0
  7. package/lib/actions/images.js +40 -21
  8. package/lib/actions/movie.js +19 -19
  9. package/lib/actions/pdf.js +2 -2
  10. package/lib/actions/translate.js +1 -1
  11. package/lib/agents/add_bgm_agent.js +3 -3
  12. package/lib/agents/combine_audio_files_agent.js +1 -1
  13. package/lib/agents/index.d.ts +2 -1
  14. package/lib/agents/index.js +2 -1
  15. package/lib/agents/tavily_agent.d.ts +15 -0
  16. package/lib/agents/tavily_agent.js +130 -0
  17. package/lib/cli/commands/audio/builder.d.ts +2 -0
  18. package/lib/cli/commands/image/builder.d.ts +2 -0
  19. package/lib/cli/commands/movie/builder.d.ts +2 -0
  20. package/lib/cli/commands/pdf/builder.d.ts +2 -0
  21. package/lib/cli/commands/translate/builder.d.ts +2 -0
  22. package/lib/cli/common.d.ts +2 -0
  23. package/lib/cli/common.js +6 -0
  24. package/lib/cli/helpers.d.ts +5 -1
  25. package/lib/cli/helpers.js +18 -2
  26. package/lib/methods/index.d.ts +1 -1
  27. package/lib/methods/index.js +1 -1
  28. package/lib/methods/mulmo_presentation_style.d.ts +14 -0
  29. package/lib/methods/mulmo_presentation_style.js +70 -0
  30. package/lib/methods/mulmo_studio_context.d.ts +14 -0
  31. package/lib/methods/mulmo_studio_context.js +20 -2
  32. package/lib/tools/deep_research.d.ts +2 -0
  33. package/lib/tools/deep_research.js +265 -0
  34. package/lib/types/schema.d.ts +31 -0
  35. package/lib/types/schema.js +1 -1
  36. package/lib/types/type.d.ts +3 -1
  37. package/lib/utils/ffmpeg_utils.d.ts +1 -0
  38. package/lib/utils/ffmpeg_utils.js +10 -0
  39. package/lib/utils/file.d.ts +1 -3
  40. package/lib/utils/file.js +4 -11
  41. package/lib/utils/preprocess.js +1 -0
  42. package/lib/utils/prompt.d.ts +3 -0
  43. package/lib/utils/prompt.js +52 -0
  44. package/package.json +4 -3
  45. package/assets/music/StarsBeyondEx.mp3 +0 -0
@@ -0,0 +1,265 @@
1
+ import "dotenv/config";
2
+ import { GraphAILogger, GraphAI } from "graphai";
3
+ import { textInputAgent } from "@graphai/input_agents";
4
+ import { consoleStreamDataAgentFilter } from "@graphai/stream_agent_filter/node";
5
+ import { openAIAgent } from "@graphai/openai_agent";
6
+ import * as agents from "@graphai/vanilla";
7
+ import tavilySearchAgent from "../agents/tavily_agent.js";
8
+ import { cliLoadingPlugin } from "../utils/plugins.js";
9
+ import { searchQueryPrompt, reflectionPrompt, finalAnswerPrompt } from "../utils/prompt.js";
10
+ const vanillaAgents = agents.default ?? agents;
11
+ const agentHeader = "\x1b[34m● \x1b[0m\x1b[1mAgent\x1b[0m:\x1b[0m";
12
+ const graphData = {
13
+ version: 0.5,
14
+ nodes: {
15
+ maxRetries: {
16
+ value: 0,
17
+ },
18
+ userInput: {
19
+ agent: "textInputAgent",
20
+ params: {
21
+ message: "You:",
22
+ required: true,
23
+ },
24
+ },
25
+ startMessage: {
26
+ agent: "consoleAgent",
27
+ inputs: {
28
+ text: `\n${agentHeader} It takes a few minutes to gather resources, analyze data, and create a report.`,
29
+ userInput: ":userInput.text",
30
+ },
31
+ },
32
+ deepResearch: {
33
+ agent: "nestedAgent",
34
+ inputs: {
35
+ userInput: ":userInput.text",
36
+ maxRetries: ":maxRetries",
37
+ startMessage: ":startMessage",
38
+ },
39
+ graph: {
40
+ loop: {
41
+ while: ":continue",
42
+ },
43
+ nodes: {
44
+ searchResults: {
45
+ value: [],
46
+ update: ":reducer.array",
47
+ },
48
+ followUpQueries: {
49
+ value: [],
50
+ update: ":reflectionAgent.follow_up_queries",
51
+ },
52
+ counter: {
53
+ value: 0,
54
+ update: ":counter.add(1)",
55
+ },
56
+ searchQueryAgent: {
57
+ agent: "openAIAgent",
58
+ inputs: {
59
+ model: "gpt-4o-mini",
60
+ system: "You are a professional research assistant. Based on the user's inquiry, return the search query to be used for the search engine.",
61
+ prompt: searchQueryPrompt("${:userInput}", "${:followUpQueries.join(,)}"),
62
+ },
63
+ params: {
64
+ tool_choice: "auto",
65
+ tools: [
66
+ {
67
+ type: "function",
68
+ function: {
69
+ name: "search_query",
70
+ description: "Return the search queries to be used for the search engine.",
71
+ parameters: {
72
+ type: "object",
73
+ properties: {
74
+ queries: {
75
+ type: "array",
76
+ items: {
77
+ type: "string",
78
+ description: "A search query to be used for the search engine.",
79
+ },
80
+ description: "An array of search queries to be used for the search engine.",
81
+ },
82
+ research_topic: {
83
+ type: "string",
84
+ description: "The topic of the research. This is used to filter the search results.",
85
+ },
86
+ analysis_plan: {
87
+ type: "string",
88
+ description: "A detailed plan for analyzing the research topic, including main areas to investigate, key factors, and specific aspects that need deeper investigation",
89
+ },
90
+ },
91
+ required: ["queries", "research_topic", "analysis_plan"],
92
+ },
93
+ },
94
+ },
95
+ ],
96
+ },
97
+ output: {
98
+ queries: ".tool.arguments.queries",
99
+ research_topic: ".tool.arguments.research_topic",
100
+ analysis_plan: ".tool.arguments.analysis_plan",
101
+ },
102
+ },
103
+ logSearchQuery: {
104
+ agent: "consoleAgent",
105
+ inputs: {
106
+ text: "\n" + agentHeader + " ${:searchQueryAgent.analysis_plan}",
107
+ },
108
+ },
109
+ mapSearchAgent: {
110
+ agent: "mapAgent",
111
+ inputs: {
112
+ rows: ":searchQueryAgent.queries",
113
+ },
114
+ params: {
115
+ compositeResult: true,
116
+ },
117
+ graph: {
118
+ nodes: {
119
+ tavilySearchAgent: {
120
+ agent: "tavilySearchAgent",
121
+ inputs: {
122
+ query: ":row",
123
+ },
124
+ params: {
125
+ max_results: 3,
126
+ },
127
+ },
128
+ result: {
129
+ agent: "copyAgent",
130
+ inputs: {
131
+ results: ":tavilySearchAgent.results",
132
+ },
133
+ params: {
134
+ namedKey: "results",
135
+ },
136
+ isResult: true,
137
+ },
138
+ logSearchStatus: {
139
+ agent: ({ result }) => {
140
+ GraphAILogger.info(result.map((r) => `- [${r.title}](${r.url})`).join("\n"));
141
+ },
142
+ inputs: {
143
+ result: ":result",
144
+ },
145
+ },
146
+ },
147
+ },
148
+ },
149
+ extractResults: {
150
+ agent: "copyAgent",
151
+ inputs: {
152
+ results: ":mapSearchAgent.result.flat()",
153
+ },
154
+ params: {
155
+ namedKey: "results",
156
+ },
157
+ },
158
+ reflectionAgent: {
159
+ agent: "openAIAgent",
160
+ inputs: {
161
+ model: "gpt-4o-mini",
162
+ system: "You are a professional research assistant. Based on the user's inquiry and the search results, return the sufficiency of information, knowledge gaps, and follow-up queries as a function call.",
163
+ prompt: reflectionPrompt("${:searchQueryAgent.research_topic}", "${:reducer.array.toJSON()}"),
164
+ },
165
+ params: {
166
+ tool_choice: "auto",
167
+ tools: [
168
+ {
169
+ type: "function",
170
+ function: {
171
+ name: "research_sufficiency",
172
+ description: "Return whether the information is sufficient, any knowledge gaps, and follow-up queries for the user's inquiry.",
173
+ parameters: {
174
+ type: "object",
175
+ properties: {
176
+ is_sufficient: {
177
+ type: "boolean",
178
+ description: "Whether the information is sufficient",
179
+ },
180
+ knowledge_gap: {
181
+ type: "string",
182
+ description: "Summary of missing knowledge or information",
183
+ },
184
+ follow_up_queries: {
185
+ type: "array",
186
+ items: {
187
+ type: "string",
188
+ description: "Additional questions to investigate (up to 3 maximum)",
189
+ },
190
+ },
191
+ },
192
+ required: ["is_sufficient", "knowledge_gap", "follow_up_queries"],
193
+ },
194
+ },
195
+ },
196
+ ],
197
+ },
198
+ output: {
199
+ is_sufficient: ".tool.arguments.is_sufficient",
200
+ knowledge_gap: ".tool.arguments.knowledge_gap",
201
+ follow_up_queries: ".tool.arguments.follow_up_queries",
202
+ },
203
+ },
204
+ reducer: {
205
+ agent: "pushAgent",
206
+ inputs: {
207
+ array: ":searchResults",
208
+ items: ":extractResults",
209
+ },
210
+ },
211
+ continue: {
212
+ agent: ({ is_sufficient, knowledge_gap, counter, maxRetries, }) => {
213
+ if (is_sufficient || counter >= maxRetries - 1) {
214
+ GraphAILogger.info(`\n${agentHeader} All necessary information has been gathered. Preparing comprehensive report.`);
215
+ return false;
216
+ }
217
+ GraphAILogger.info(`\n${agentHeader} ${knowledge_gap}`);
218
+ return true;
219
+ },
220
+ inputs: {
221
+ is_sufficient: ":reflectionAgent.is_sufficient",
222
+ knowledge_gap: ":reflectionAgent.knowledge_gap",
223
+ counter: ":counter",
224
+ maxRetries: ":maxRetries",
225
+ },
226
+ },
227
+ finalAnswer: {
228
+ agent: "openAIAgent",
229
+ unless: ":continue",
230
+ inputs: {
231
+ model: "gpt-4o-mini",
232
+ system: "You are a professional research assistant. Based on the user's inquiry and the search results, return the final answer.",
233
+ prompt: finalAnswerPrompt("${:userInput}", "${:searchResults.toJSON()}", "${:searchQueryAgent.research_topic}"),
234
+ },
235
+ isResult: true,
236
+ },
237
+ },
238
+ },
239
+ },
240
+ writeResult: {
241
+ agent: "consoleAgent",
242
+ inputs: {
243
+ text: "\n------Answer------\n\n${:deepResearch.finalAnswer.text}\n",
244
+ },
245
+ },
246
+ },
247
+ };
248
+ export const deepResearch = async () => {
249
+ const agentFilters = [
250
+ {
251
+ name: "consoleStreamDataAgentFilter",
252
+ agent: consoleStreamDataAgentFilter,
253
+ nodeIds: ["chatAgent"],
254
+ },
255
+ ];
256
+ const graph = new GraphAI(graphData, { ...vanillaAgents, openAIAgent, textInputAgent, tavilySearchAgent }, { agentFilters });
257
+ graph.injectValue("maxRetries", 3);
258
+ graph.registerCallback(cliLoadingPlugin({ nodeId: "searchQueryAgent", message: "Generating search queries..." }));
259
+ graph.registerCallback(cliLoadingPlugin({ nodeId: "reflectionAgent", message: "Analyzing search results..." }));
260
+ graph.registerCallback(cliLoadingPlugin({ nodeId: "tavilySearchAgent", message: "Searching..." }));
261
+ graph.registerCallback(cliLoadingPlugin({ nodeId: "finalAnswer", message: "Generating final answer..." }));
262
+ GraphAILogger.info(`${agentHeader} What would you like to know?\n`);
263
+ await graph.run();
264
+ };
265
+ deepResearch();
@@ -49,6 +49,37 @@ export declare const speechOptionsSchema: z.ZodObject<{
49
49
  instruction?: string | undefined;
50
50
  }>;
51
51
  export declare const text2SpeechProviderSchema: z.ZodDefault<z.ZodUnion<[z.ZodLiteral<"openai">, z.ZodLiteral<"nijivoice">, z.ZodLiteral<"google">, z.ZodLiteral<"elevenlabs">]>>;
52
+ export declare const speakerDataSchema: z.ZodObject<{
53
+ displayName: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
54
+ voiceId: z.ZodString;
55
+ speechOptions: z.ZodOptional<z.ZodObject<{
56
+ speed: z.ZodOptional<z.ZodNumber>;
57
+ instruction: z.ZodOptional<z.ZodString>;
58
+ }, "strict", z.ZodTypeAny, {
59
+ speed?: number | undefined;
60
+ instruction?: string | undefined;
61
+ }, {
62
+ speed?: number | undefined;
63
+ instruction?: string | undefined;
64
+ }>>;
65
+ provider: z.ZodOptional<z.ZodDefault<z.ZodUnion<[z.ZodLiteral<"openai">, z.ZodLiteral<"nijivoice">, z.ZodLiteral<"google">, z.ZodLiteral<"elevenlabs">]>>>;
66
+ }, "strict", z.ZodTypeAny, {
67
+ voiceId: string;
68
+ displayName?: Record<string, string> | undefined;
69
+ speechOptions?: {
70
+ speed?: number | undefined;
71
+ instruction?: string | undefined;
72
+ } | undefined;
73
+ provider?: "openai" | "nijivoice" | "google" | "elevenlabs" | undefined;
74
+ }, {
75
+ voiceId: string;
76
+ displayName?: Record<string, string> | undefined;
77
+ speechOptions?: {
78
+ speed?: number | undefined;
79
+ instruction?: string | undefined;
80
+ } | undefined;
81
+ provider?: "openai" | "nijivoice" | "google" | "elevenlabs" | undefined;
82
+ }>;
52
83
  export declare const speakerDictionarySchema: z.ZodRecord<z.ZodString, z.ZodObject<{
53
84
  displayName: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
54
85
  voiceId: z.ZodString;
@@ -21,7 +21,7 @@ export const speechOptionsSchema = z
21
21
  .strict();
22
22
  const speakerIdSchema = z.string();
23
23
  export const text2SpeechProviderSchema = z.union([z.literal("openai"), z.literal("nijivoice"), z.literal("google"), z.literal("elevenlabs")]).default("openai");
24
- const speakerDataSchema = z
24
+ export const speakerDataSchema = z
25
25
  .object({
26
26
  displayName: z.record(langSchema, z.string()).optional(),
27
27
  voiceId: z.string(),
@@ -1,4 +1,4 @@
1
- import { langSchema, localizedTextSchema, mulmoBeatSchema, mulmoScriptSchema, mulmoStudioSchema, mulmoStudioBeatSchema, mulmoStoryboardSchema, mulmoStoryboardSceneSchema, mulmoStudioMultiLingualSchema, mulmoStudioMultiLingualDataSchema, speakerDictionarySchema, mulmoImageParamsSchema, mulmoMovieParamsSchema, mulmoSpeechParamsSchema, textSlideParamsSchema, speechOptionsSchema, mulmoCanvasDimensionSchema, mulmoScriptTemplateSchema, mulmoScriptTemplateFileSchema, text2ImageProviderSchema, text2MovieProviderSchema, text2SpeechProviderSchema, mulmoPresentationStyleSchema, multiLingualTextsSchema, mulmoMermaidMediaSchema, mulmoTextSlideMediaSchema, mulmoMarkdownMediaSchema, mulmoImageMediaSchema, mulmoChartMediaSchema, mediaSourceSchema, mulmoSessionStateSchema } from "./schema.js";
1
+ import { langSchema, localizedTextSchema, mulmoBeatSchema, mulmoScriptSchema, mulmoStudioSchema, mulmoStudioBeatSchema, mulmoStoryboardSchema, mulmoStoryboardSceneSchema, mulmoStudioMultiLingualSchema, mulmoStudioMultiLingualDataSchema, speakerDictionarySchema, mulmoImageParamsSchema, mulmoMovieParamsSchema, mulmoSpeechParamsSchema, textSlideParamsSchema, speechOptionsSchema, speakerDataSchema, mulmoCanvasDimensionSchema, mulmoScriptTemplateSchema, mulmoScriptTemplateFileSchema, text2ImageProviderSchema, text2MovieProviderSchema, text2SpeechProviderSchema, mulmoPresentationStyleSchema, multiLingualTextsSchema, mulmoMermaidMediaSchema, mulmoTextSlideMediaSchema, mulmoMarkdownMediaSchema, mulmoImageMediaSchema, mulmoChartMediaSchema, mediaSourceSchema, mulmoSessionStateSchema } from "./schema.js";
2
2
  import { pdf_modes, pdf_sizes, storyToScriptGenerateMode } from "../utils/const.js";
3
3
  import { LLM } from "../utils/utils.js";
4
4
  import { z } from "zod";
@@ -7,6 +7,7 @@ export type MulmoBeat = z.infer<typeof mulmoBeatSchema>;
7
7
  export type SpeakerDictonary = z.infer<typeof speakerDictionarySchema>;
8
8
  export type MulmoSpeechParams = z.infer<typeof mulmoSpeechParamsSchema>;
9
9
  export type SpeechOptions = z.infer<typeof speechOptionsSchema>;
10
+ export type SpeakerData = z.infer<typeof speakerDataSchema>;
10
11
  export type MulmoImageParams = z.infer<typeof mulmoImageParamsSchema>;
11
12
  export type TextSlideParams = z.infer<typeof textSlideParamsSchema>;
12
13
  export type Text2ImageProvider = z.infer<typeof text2ImageProviderSchema>;
@@ -49,6 +50,7 @@ export type MulmoStudioContext = {
49
50
  force: boolean;
50
51
  caption?: string;
51
52
  sessionState: MulmoSessionState;
53
+ presentationStyle: MulmoPresentationStyle;
52
54
  };
53
55
  export type ScriptingParams = {
54
56
  urls: string[];
@@ -10,3 +10,4 @@ export declare const FfmpegContextPushFormattedAudio: (context: FfmpegContext, s
10
10
  export declare const FfmpegContextInputFormattedAudio: (context: FfmpegContext, input: string, duration?: number | undefined) => string;
11
11
  export declare const FfmpegContextGenerateOutput: (context: FfmpegContext, output: string, options?: string[]) => Promise<number>;
12
12
  export declare const ffmpegGetMediaDuration: (filePath: string) => Promise<number>;
13
+ export declare const extractImageFromMovie: (movieFile: string, imagePath: string) => Promise<void>;
@@ -61,3 +61,13 @@ export const ffmpegGetMediaDuration = (filePath) => {
61
61
  });
62
62
  });
63
63
  };
64
+ export const extractImageFromMovie = (movieFile, imagePath) => {
65
+ return new Promise((resolve, reject) => {
66
+ ffmpeg(movieFile)
67
+ .outputOptions(["-frames:v 1"])
68
+ .output(imagePath)
69
+ .on("end", () => resolve())
70
+ .on("error", (err) => reject(err))
71
+ .run();
72
+ });
73
+ };
@@ -19,9 +19,7 @@ export declare const fetchMulmoScriptFile: (url: string) => Promise<{
19
19
  }>;
20
20
  export declare const getOutputStudioFilePath: (outDirPath: string, fileName: string) => string;
21
21
  export declare const resolveDirPath: (dirPath: string, studioFileName: string) => string;
22
- export declare const getAudioSegmentDirPath: (audioDirPath: string, studioFileName: string) => string;
23
- export declare const getAudioSegmentFilePath: (audioDirPath: string, studioFileName: string, fileName: string) => string;
24
- export declare const getAudioCombinedFilePath: (audioDirPath: string, fileName: string, lang?: string) => string;
22
+ export declare const getAudioFilePath: (audioDirPath: string, dirName: string, fileName: string, lang?: string) => string;
25
23
  export declare const getAudioArtifactFilePath: (outDirPath: string, fileName: string) => string;
26
24
  export declare const getOutputVideoFilePath: (outDirPath: string, fileName: string, lang?: string, caption?: string) => string;
27
25
  export declare const getOutputPdfFilePath: (outDirPath: string, fileName: string, pdfMode: PDFMode, lang?: string) => string;
package/lib/utils/file.js CHANGED
@@ -59,18 +59,11 @@ export const getOutputStudioFilePath = (outDirPath, fileName) => {
59
59
  export const resolveDirPath = (dirPath, studioFileName) => {
60
60
  return path.resolve(dirPath, studioFileName);
61
61
  };
62
- // TODO: probably better to just use resolveDirPath instead.
63
- export const getAudioSegmentDirPath = (audioDirPath, studioFileName) => {
64
- return path.resolve(audioDirPath, studioFileName);
65
- };
66
- export const getAudioSegmentFilePath = (audioDirPath, studioFileName, fileName) => {
67
- return path.resolve(getAudioSegmentDirPath(audioDirPath, studioFileName), fileName + ".mp3");
68
- };
69
- export const getAudioCombinedFilePath = (audioDirPath, fileName, lang) => {
62
+ export const getAudioFilePath = (audioDirPath, dirName, fileName, lang) => {
70
63
  if (lang) {
71
- return path.resolve(audioDirPath, fileName, `${fileName}_${lang}.mp3`);
64
+ return path.resolve(audioDirPath, dirName, `${fileName}_${lang}.mp3`);
72
65
  }
73
- return path.resolve(audioDirPath, fileName, fileName + ".mp3");
66
+ return path.resolve(audioDirPath, dirName, fileName + ".mp3");
74
67
  };
75
68
  export const getAudioArtifactFilePath = (outDirPath, fileName) => {
76
69
  return path.resolve(outDirPath, fileName + ".mp3");
@@ -98,7 +91,7 @@ export const mkdir = (dirPath) => {
98
91
  // export const silentPath = path.resolve(npmRoot, "./assets/audio/silent300.mp3");
99
92
  // export const silentLastPath = path.resolve(npmRoot, "./assets/audio/silent800.mp3");
100
93
  export const silent60secPath = () => path.resolve(npmRoot, "./assets/audio/silent60sec.mp3");
101
- export const defaultBGMPath = () => path.resolve(npmRoot, "./assets/music/StarsBeyondEx.mp3");
94
+ export const defaultBGMPath = () => "https://github.com/receptron/mulmocast-media/raw/refs/heads/main/bgms/story002.mp3";
102
95
  export const getHTMLFile = (filename) => {
103
96
  const htmlPath = path.resolve(npmRoot, `./assets/html/${filename}.html`);
104
97
  return fs.readFileSync(htmlPath, "utf-8");
@@ -39,6 +39,7 @@ const mulmoCredit = (speaker) => {
39
39
  export const createOrUpdateStudioData = (_mulmoScript, currentStudio, fileName) => {
40
40
  const mulmoScript = _mulmoScript.__test_invalid__ ? _mulmoScript : mulmoScriptSchema.parse(_mulmoScript); // validate and insert default value
41
41
  const studio = rebuildStudio(currentStudio, mulmoScript, fileName);
42
+ // TODO: Move this code out of this function later
42
43
  // Addition cloing credit
43
44
  if (mulmoScript.$mulmocast.credit === "closing") {
44
45
  mulmoScript.beats.push(mulmoCredit(mulmoScript.beats[0].speaker)); // First speaker
@@ -14,3 +14,6 @@ export declare const sceneToBeatsPrompt: ({ sampleBeats, beatsPerScene, allScene
14
14
  }) => string;
15
15
  export declare const storyToScriptInfoPrompt: (scriptWithoutBeats: Omit<MulmoScript, "beats">, story: MulmoStoryboard) => string;
16
16
  export declare const storyToScriptPrompt: (script: MulmoScript, beatsPerScene: number, story: MulmoStoryboard) => string;
17
+ export declare const searchQueryPrompt: (inquiry: string, followUpQueries: string) => string;
18
+ export declare const reflectionPrompt: (researchTopic: string, searchResults: string) => string;
19
+ export declare const finalAnswerPrompt: (userInput: string, searchResults: string, researchTopic: string) => string;
@@ -72,3 +72,55 @@ Only include keys that exist in the sample script.
72
72
  Do not add any keys that are not present in the sample script.
73
73
  Please provide your response as valid JSON within \`\`\`json code blocks for clarity.`.trim();
74
74
  };
75
+ export const searchQueryPrompt = (inquiry, followUpQueries) => {
76
+ return `
77
+ You are a professional research assistant specialized in generating sophisticated and diverse web search queries.
78
+ Create queries for advanced automated web research tools that can analyze complex results, follow links, and integrate information.
79
+
80
+ Instructions:
81
+ - Ensure collection of the latest information (current date: ${new Date().toLocaleDateString()})
82
+ - Always prioritize a single search query, add additional ones only when the original question requires multiple aspects
83
+ - Each query should focus on a specific aspect of the original question
84
+ - Do not generate more than 3 queries
85
+ - Generate diverse queries when the topic is broad
86
+ - Do not generate multiple similar queries, one is sufficient
87
+ - If follow-up queries exist, prioritize them over the user's inquiry
88
+
89
+ User's inquiry: ${inquiry}
90
+ Follow-up queries: ${followUpQueries}
91
+ `;
92
+ };
93
+ export const reflectionPrompt = (researchTopic, searchResults) => {
94
+ return `
95
+ You are a professional research assistant analyzing summaries related to "${researchTopic}".
96
+
97
+ Instructions:
98
+ - Identify knowledge gaps and areas requiring deeper exploration, then generate follow-up queries
99
+ - If the provided summary is sufficient to answer the user's question, do not generate follow-up queries
100
+ - When knowledge gaps exist, generate follow-up queries that help deepen understanding
101
+ - Focus on technical details, implementation specifications, and emerging trends that are not fully covered
102
+
103
+ Requirements:
104
+ - Follow-up queries should be self-contained and include necessary context for web search
105
+
106
+ Search results: ${searchResults}
107
+ `;
108
+ };
109
+ export const finalAnswerPrompt = (userInput, searchResults, researchTopic) => {
110
+ const currentDate = new Date().toLocaleDateString();
111
+ return `
112
+ You are a professional research assistant. Generate a high-quality answer based on the following information.
113
+
114
+ Instructions:
115
+ - Utilize all provided information to create a logical and well-structured response
116
+ - Include article information (URL and title) as citations in your output when referencing search results
117
+ - Provide detailed technical specifications and implementation details where possible
118
+ - Reflect the latest information and trends
119
+ - Ensure the response is comprehensive and accurate
120
+
121
+ User's Question: ${userInput}
122
+ Search Results: ${searchResults}
123
+ Research Topic: ${researchTopic}
124
+ Current Date: ${currentDate}
125
+ `;
126
+ };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mulmocast",
3
- "version": "0.0.15",
3
+ "version": "0.0.16",
4
4
  "description": "",
5
5
  "type": "module",
6
6
  "main": "lib/index.js",
@@ -10,7 +10,6 @@
10
10
  "files": [
11
11
  "./lib",
12
12
  "./scripts/templates",
13
- "./assets/music/StarsBeyondEx.mp3",
14
13
  "./assets/audio/silent60sec.mp3",
15
14
  "./assets/html/",
16
15
  "./assets/templates/"
@@ -38,7 +37,8 @@
38
37
  "schema": "npx tsx ./src/cli/bin.ts tool schema",
39
38
  "story_to_script": "npx tsx ./src/cli/bin.ts tool story_to_script",
40
39
  "latest": "yarn upgrade-interactive --latest",
41
- "format": "prettier --write '{src,scripts,assets/templates,draft,ideason,scripts_mag2,proto,test,graphai,output,docs/scripts}/**/*.{ts,json,yaml}'"
40
+ "format": "prettier --write '{src,scripts,assets/templates,assets/styles,draft,ideason,scripts_mag2,proto,test,graphai,output,docs/scripts}/**/*.{ts,json,yaml}'",
41
+ "deep_research": "npx tsx ./src/tools/deep_research.ts"
42
42
  },
43
43
  "repository": "git+ssh://git@github.com/receptron/mulmocast-cli.git",
44
44
  "author": "snakajima",
@@ -58,6 +58,7 @@
58
58
  "@graphai/stream_agent_filter": "^2.0.2",
59
59
  "@graphai/vanilla": "^2.0.4",
60
60
  "@graphai/vanilla_node_agents": "^2.0.1",
61
+ "@tavily/core": "^0.5.7",
61
62
  "canvas": "^3.1.0",
62
63
  "clipboardy": "^4.0.0",
63
64
  "dotenv": "^16.4.7",
Binary file