mulmocast-preprocessor 0.1.2 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/README.md +100 -1
  2. package/lib/cli/commands/process.js +1 -1
  3. package/lib/cli/commands/profiles.js +1 -1
  4. package/lib/cli/commands/query.d.ts +16 -0
  5. package/lib/cli/commands/query.js +119 -0
  6. package/lib/cli/commands/summarize.js +2 -36
  7. package/lib/cli/index.js +67 -0
  8. package/lib/cli/utils.d.ts +9 -0
  9. package/lib/cli/utils.js +35 -0
  10. package/lib/core/ai/command/query/index.d.ts +8 -0
  11. package/lib/core/ai/command/query/index.js +33 -0
  12. package/lib/core/ai/command/query/interactive.d.ts +22 -0
  13. package/lib/core/ai/command/query/interactive.js +44 -0
  14. package/lib/core/ai/command/query/prompts.d.ts +26 -0
  15. package/lib/core/ai/command/query/prompts.js +88 -0
  16. package/lib/core/ai/command/summarize/index.d.ts +8 -0
  17. package/lib/core/ai/command/summarize/index.js +33 -0
  18. package/lib/core/ai/command/summarize/prompts.d.ts +18 -0
  19. package/lib/core/ai/command/summarize/prompts.js +50 -0
  20. package/lib/core/ai/llm.d.ts +61 -0
  21. package/lib/core/ai/llm.js +191 -0
  22. package/lib/core/llm/index.d.ts +45 -0
  23. package/lib/core/llm/index.js +144 -0
  24. package/lib/core/preprocessing/filter.d.ts +14 -0
  25. package/lib/core/preprocessing/filter.js +30 -0
  26. package/lib/core/preprocessing/process.d.ts +7 -0
  27. package/lib/core/preprocessing/process.js +12 -0
  28. package/lib/core/preprocessing/profiles.d.ts +5 -0
  29. package/lib/core/preprocessing/profiles.js +38 -0
  30. package/lib/core/preprocessing/variant.d.ts +6 -0
  31. package/lib/core/preprocessing/variant.js +26 -0
  32. package/lib/core/query/index.d.ts +8 -0
  33. package/lib/core/query/index.js +33 -0
  34. package/lib/core/query/prompts.d.ts +14 -0
  35. package/lib/core/query/prompts.js +59 -0
  36. package/lib/core/summarize/index.js +3 -84
  37. package/lib/core/summarize/prompts.js +1 -18
  38. package/lib/index.d.ts +9 -5
  39. package/lib/index.js +10 -7
  40. package/lib/types/query.d.ts +45 -0
  41. package/lib/types/query.js +21 -0
  42. package/package.json +1 -1
@@ -0,0 +1,88 @@
1
+ import { getLanguageName, buildScriptContent } from "../../llm.js";
2
+ /**
3
+ * Default system prompt for query
4
+ */
5
+ export const DEFAULT_SYSTEM_PROMPT = `You are answering questions based on the content provided.
6
+ - Answer based ONLY on the information in the provided content
7
+ - If the answer cannot be found in the content, say so clearly
8
+ - Be concise and direct in your answers
9
+ - Do not make up information that is not in the content`;
10
+ /**
11
+ * Get system prompt based on options
12
+ */
13
+ export const getSystemPrompt = (options) => {
14
+ if (options.systemPrompt) {
15
+ return options.systemPrompt;
16
+ }
17
+ const basePrompt = DEFAULT_SYSTEM_PROMPT;
18
+ // Add language instruction if specified
19
+ if (options.lang) {
20
+ const langName = getLanguageName(options.lang);
21
+ return `${basePrompt}\n- IMPORTANT: Write the answer in ${langName}`;
22
+ }
23
+ return basePrompt;
24
+ };
25
+ /**
26
+ * Build user prompt from script and question
27
+ */
28
+ export const buildUserPrompt = (script, question) => {
29
+ const parts = [];
30
+ // Add common script content (title, language, sections with beats)
31
+ parts.push(buildScriptContent(script));
32
+ parts.push("---");
33
+ parts.push("");
34
+ parts.push(`Question: ${question}`);
35
+ parts.push("");
36
+ parts.push("Answer:");
37
+ return parts.join("\n");
38
+ };
39
+ /**
40
+ * Default system prompt for interactive query
41
+ */
42
+ export const DEFAULT_INTERACTIVE_SYSTEM_PROMPT = `You are answering questions based on the content provided.
43
+ - Answer based ONLY on the information in the provided content
44
+ - If the answer cannot be found in the content, say so clearly
45
+ - Be concise and direct in your answers
46
+ - Do not make up information that is not in the content
47
+ - You may reference previous conversation when answering follow-up questions`;
48
+ /**
49
+ * Get system prompt for interactive mode
50
+ */
51
+ export const getInteractiveSystemPrompt = (options) => {
52
+ if (options.systemPrompt) {
53
+ return options.systemPrompt;
54
+ }
55
+ const basePrompt = DEFAULT_INTERACTIVE_SYSTEM_PROMPT;
56
+ if (options.lang) {
57
+ const langName = getLanguageName(options.lang);
58
+ return `${basePrompt}\n- IMPORTANT: Write the answer in ${langName}`;
59
+ }
60
+ return basePrompt;
61
+ };
62
+ /**
63
+ * Build user prompt with conversation history for interactive mode
64
+ */
65
+ export const buildInteractiveUserPrompt = (script, question, history) => {
66
+ const parts = [];
67
+ // Add common script content (title, language, sections with beats)
68
+ parts.push(buildScriptContent(script));
69
+ parts.push("---");
70
+ parts.push("");
71
+ // Add conversation history if exists
72
+ if (history.length > 0) {
73
+ parts.push("Previous conversation:");
74
+ history.forEach((msg) => {
75
+ if (msg.role === "user") {
76
+ parts.push(`Q: ${msg.content}`);
77
+ }
78
+ else {
79
+ parts.push(`A: ${msg.content}`);
80
+ }
81
+ });
82
+ parts.push("");
83
+ }
84
+ parts.push(`Current question: ${question}`);
85
+ parts.push("");
86
+ parts.push("Answer:");
87
+ return parts.join("\n");
88
+ };
@@ -0,0 +1,8 @@
1
+ import type { ExtendedScript } from "../../../../types/index.js";
2
+ import type { SummarizeOptions, SummarizeResult } from "../../../../types/summarize.js";
3
+ /**
4
+ * Main summarize function - generates a summary of the entire script
5
+ */
6
+ export declare const summarizeScript: (script: ExtendedScript, options?: Partial<SummarizeOptions>) => Promise<SummarizeResult>;
7
+ export type { SummarizeOptions, SummarizeResult, LLMProvider, SummarizeFormat } from "../../../../types/summarize.js";
8
+ export { summarizeOptionsSchema, llmProviderSchema, summarizeFormatSchema } from "../../../../types/summarize.js";
@@ -0,0 +1,33 @@
1
+ import { summarizeOptionsSchema } from "../../../../types/summarize.js";
2
+ import { executeLLM, filterScript } from "../../llm.js";
3
+ import { buildUserPrompt, getSystemPrompt } from "./prompts.js";
4
+ /**
5
+ * Main summarize function - generates a summary of the entire script
6
+ */
7
+ export const summarizeScript = async (script, options = {}) => {
8
+ // Validate and apply defaults
9
+ const validatedOptions = summarizeOptionsSchema.parse(options);
10
+ // Filter script if section/tags specified
11
+ const filteredScript = filterScript(script, validatedOptions);
12
+ const scriptTitle = script.title || "Untitled";
13
+ if (filteredScript.beats.length === 0) {
14
+ return {
15
+ summary: "No content to summarize.",
16
+ format: validatedOptions.format,
17
+ scriptTitle,
18
+ beatCount: 0,
19
+ };
20
+ }
21
+ // Build prompts
22
+ const systemPrompt = getSystemPrompt(validatedOptions);
23
+ const userPrompt = buildUserPrompt(filteredScript, validatedOptions);
24
+ // Execute LLM
25
+ const summary = await executeLLM(systemPrompt, userPrompt, validatedOptions, `Summarizing script "${script.title}" with ${validatedOptions.provider}... Beats: ${filteredScript.beats.length}, Format: ${validatedOptions.format}`);
26
+ return {
27
+ summary,
28
+ format: validatedOptions.format,
29
+ scriptTitle,
30
+ beatCount: filteredScript.beats.length,
31
+ };
32
+ };
33
+ export { summarizeOptionsSchema, llmProviderSchema, summarizeFormatSchema } from "../../../../types/summarize.js";
@@ -0,0 +1,18 @@
1
+ import type { SummarizeOptions } from "../../../../types/summarize.js";
2
+ import type { ExtendedScript } from "../../../../types/index.js";
3
+ /**
4
+ * Default system prompt for text summary
5
+ */
6
+ export declare const DEFAULT_SYSTEM_PROMPT_TEXT = "You are creating a summary based on the content provided.\n- Extract and explain the actual information and knowledge from the content\n- Do NOT describe what the presentation/script is about (avoid phrases like \"this presentation explains...\" or \"the script describes...\")\n- Write as if you are directly explaining the topic to the reader\n- Be concise and informative\n- Output plain text only";
7
+ /**
8
+ * Default system prompt for markdown summary
9
+ */
10
+ export declare const DEFAULT_SYSTEM_PROMPT_MARKDOWN = "You are creating a summary based on the content provided.\n- Extract and explain the actual information and knowledge from the content\n- Do NOT describe what the presentation/script is about (avoid phrases like \"this presentation explains...\" or \"the script describes...\")\n- Write as if you are directly explaining the topic to the reader\n- Use markdown formatting (headers, bullet points, etc.)\n- Include a title, key points, and conclusion\n- Output well-formatted markdown";
11
+ /**
12
+ * Build user prompt from entire script
13
+ */
14
+ export declare const buildUserPrompt: (script: ExtendedScript, options: SummarizeOptions) => string;
15
+ /**
16
+ * Get system prompt based on format and language
17
+ */
18
+ export declare const getSystemPrompt: (options: SummarizeOptions) => string;
@@ -0,0 +1,50 @@
1
+ import { getLanguageName, buildScriptContent } from "../../llm.js";
2
+ /**
3
+ * Default system prompt for text summary
4
+ */
5
+ export const DEFAULT_SYSTEM_PROMPT_TEXT = `You are creating a summary based on the content provided.
6
+ - Extract and explain the actual information and knowledge from the content
7
+ - Do NOT describe what the presentation/script is about (avoid phrases like "this presentation explains..." or "the script describes...")
8
+ - Write as if you are directly explaining the topic to the reader
9
+ - Be concise and informative
10
+ - Output plain text only`;
11
+ /**
12
+ * Default system prompt for markdown summary
13
+ */
14
+ export const DEFAULT_SYSTEM_PROMPT_MARKDOWN = `You are creating a summary based on the content provided.
15
+ - Extract and explain the actual information and knowledge from the content
16
+ - Do NOT describe what the presentation/script is about (avoid phrases like "this presentation explains..." or "the script describes...")
17
+ - Write as if you are directly explaining the topic to the reader
18
+ - Use markdown formatting (headers, bullet points, etc.)
19
+ - Include a title, key points, and conclusion
20
+ - Output well-formatted markdown`;
21
+ /**
22
+ * Build user prompt from entire script
23
+ */
24
+ export const buildUserPrompt = (script, options) => {
25
+ const parts = [];
26
+ // Add common script content (title, language, sections with beats)
27
+ parts.push(buildScriptContent(script));
28
+ // Add target length if specified
29
+ if (options.targetLengthChars) {
30
+ parts.push(`Target summary length: approximately ${options.targetLengthChars} characters`);
31
+ }
32
+ parts.push("");
33
+ parts.push("Based on the above content, explain the topic directly to the reader:");
34
+ return parts.join("\n");
35
+ };
36
+ /**
37
+ * Get system prompt based on format and language
38
+ */
39
+ export const getSystemPrompt = (options) => {
40
+ if (options.systemPrompt) {
41
+ return options.systemPrompt;
42
+ }
43
+ const basePrompt = options.format === "markdown" ? DEFAULT_SYSTEM_PROMPT_MARKDOWN : DEFAULT_SYSTEM_PROMPT_TEXT;
44
+ // Add language instruction if specified
45
+ if (options.lang) {
46
+ const langName = getLanguageName(options.lang);
47
+ return `${basePrompt}\n- IMPORTANT: Write the output in ${langName}`;
48
+ }
49
+ return basePrompt;
50
+ };
@@ -0,0 +1,61 @@
1
+ import type { ExtendedScript } from "../../types/index.js";
2
+ import type { LLMProvider } from "../../types/summarize.js";
3
+ /**
4
+ * Base options for LLM operations
5
+ */
6
+ export interface BaseLLMOptions {
7
+ provider: LLMProvider;
8
+ model?: string;
9
+ temperature?: number;
10
+ maxTokens?: number;
11
+ lang?: string;
12
+ systemPrompt?: string;
13
+ verbose?: boolean;
14
+ section?: string;
15
+ tags?: string[];
16
+ }
17
+ /**
18
+ * Provider configuration
19
+ */
20
+ export interface ProviderConfig {
21
+ agentName: string;
22
+ defaultModel: string;
23
+ keyName: string;
24
+ maxTokens?: number;
25
+ }
26
+ /**
27
+ * Get provider configuration
28
+ */
29
+ export declare const getProviderConfig: (provider: LLMProvider) => ProviderConfig;
30
+ /**
31
+ * Get API key for provider
32
+ */
33
+ export declare const getProviderApiKey: (provider: LLMProvider) => string | undefined;
34
+ /**
35
+ * Filter script based on options (section, tags)
36
+ */
37
+ export declare const filterScript: (script: ExtendedScript, options: BaseLLMOptions) => ExtendedScript;
38
+ /**
39
+ * Get language name from code
40
+ */
41
+ export declare const getLanguageName: (langCode: string) => string;
42
+ /**
43
+ * Build script content for user prompt (common part)
44
+ */
45
+ export declare const buildScriptContent: (script: ExtendedScript) => string;
46
+ /**
47
+ * Command execution result
48
+ */
49
+ export interface CommandResult {
50
+ text: string;
51
+ scriptTitle: string;
52
+ beatCount: number;
53
+ }
54
+ /**
55
+ * Execute a command (summarize, query, etc.) with common logic
56
+ */
57
+ export declare const executeCommand: <T extends BaseLLMOptions>(script: ExtendedScript, options: T, getSystemPrompt: (opts: T) => string, buildUserPrompt: (script: ExtendedScript) => string, verboseMessage: string) => Promise<CommandResult | null>;
58
+ /**
59
+ * Execute LLM call with GraphAI
60
+ */
61
+ export declare const executeLLM: (systemPrompt: string, userPrompt: string, options: BaseLLMOptions, verboseMessage?: string) => Promise<string>;
@@ -0,0 +1,191 @@
1
+ import dotenv from "dotenv";
2
+ import { GraphAI, GraphAILogger } from "graphai";
3
+ import * as vanillaAgents from "@graphai/vanilla";
4
+ import { openAIAgent } from "@graphai/openai_agent";
5
+ import { anthropicAgent } from "@graphai/anthropic_agent";
6
+ import { groqAgent } from "@graphai/groq_agent";
7
+ import { geminiAgent } from "@graphai/gemini_agent";
8
+ import { filterBySection, filterByTags } from "../preprocessing/filter.js";
9
+ dotenv.config({ quiet: true });
10
+ const agents = vanillaAgents.default ?? vanillaAgents;
11
+ const provider2Agent = {
12
+ openai: {
13
+ agentName: "openAIAgent",
14
+ defaultModel: "gpt-4o-mini",
15
+ keyName: "OPENAI_API_KEY",
16
+ maxTokens: 4096,
17
+ },
18
+ anthropic: {
19
+ agentName: "anthropicAgent",
20
+ defaultModel: "claude-sonnet-4-20250514",
21
+ keyName: "ANTHROPIC_API_KEY",
22
+ maxTokens: 4096,
23
+ },
24
+ groq: {
25
+ agentName: "groqAgent",
26
+ defaultModel: "llama-3.1-8b-instant",
27
+ keyName: "GROQ_API_KEY",
28
+ maxTokens: 4096,
29
+ },
30
+ gemini: {
31
+ agentName: "geminiAgent",
32
+ defaultModel: "gemini-2.0-flash",
33
+ keyName: "GEMINI_API_KEY",
34
+ maxTokens: 4096,
35
+ },
36
+ };
37
+ /**
38
+ * Get provider configuration
39
+ */
40
+ export const getProviderConfig = (provider) => {
41
+ return provider2Agent[provider];
42
+ };
43
+ /**
44
+ * Get API key for provider
45
+ */
46
+ export const getProviderApiKey = (provider) => {
47
+ const config = provider2Agent[provider];
48
+ return process.env[config.keyName];
49
+ };
50
+ /**
51
+ * Create GraphAI graph for LLM call
52
+ */
53
+ const createLLMGraph = (agentName) => ({
54
+ version: 0.5,
55
+ nodes: {
56
+ systemPrompt: {},
57
+ userPrompt: {},
58
+ model: {},
59
+ temperature: {},
60
+ maxTokens: {},
61
+ llmCall: {
62
+ agent: agentName,
63
+ inputs: {
64
+ system: ":systemPrompt",
65
+ prompt: ":userPrompt",
66
+ model: ":model",
67
+ temperature: ":temperature",
68
+ max_tokens: ":maxTokens",
69
+ },
70
+ },
71
+ result: {
72
+ isResult: true,
73
+ agent: "copyAgent",
74
+ inputs: {
75
+ text: ":llmCall.text",
76
+ },
77
+ },
78
+ },
79
+ });
80
+ /**
81
+ * Filter script based on options (section, tags)
82
+ */
83
+ export const filterScript = (script, options) => {
84
+ const afterSection = options.section ? filterBySection(script, options.section) : script;
85
+ const afterTags = options.tags && options.tags.length > 0 ? filterByTags(afterSection, options.tags) : afterSection;
86
+ return afterTags;
87
+ };
88
+ /**
89
+ * Get language name from code
90
+ */
91
+ export const getLanguageName = (langCode) => {
92
+ const langMap = {
93
+ ja: "Japanese",
94
+ en: "English",
95
+ zh: "Chinese",
96
+ ko: "Korean",
97
+ fr: "French",
98
+ de: "German",
99
+ es: "Spanish",
100
+ it: "Italian",
101
+ pt: "Portuguese",
102
+ ru: "Russian",
103
+ };
104
+ return langMap[langCode] || langCode;
105
+ };
106
+ /**
107
+ * Build script content for user prompt (common part)
108
+ */
109
+ export const buildScriptContent = (script) => {
110
+ const parts = [];
111
+ // Add script metadata
112
+ parts.push(`# Script: ${script.title}`);
113
+ parts.push(`Language: ${script.lang}`);
114
+ parts.push("");
115
+ // Collect all text from beats grouped by section
116
+ const sections = new Map();
117
+ script.beats.forEach((beat, index) => {
118
+ const text = beat.text || "";
119
+ if (!text.trim())
120
+ return;
121
+ const section = beat.meta?.section || "main";
122
+ if (!sections.has(section)) {
123
+ sections.set(section, []);
124
+ }
125
+ sections.get(section).push(`[${index}] ${text}`);
126
+ });
127
+ // Output by section
128
+ sections.forEach((texts, section) => {
129
+ parts.push(`## Section: ${section}`);
130
+ texts.forEach((t) => parts.push(t));
131
+ parts.push("");
132
+ });
133
+ return parts.join("\n");
134
+ };
135
+ /**
136
+ * Execute a command (summarize, query, etc.) with common logic
137
+ */
138
+ export const executeCommand = async (script, options, getSystemPrompt, buildUserPrompt, verboseMessage) => {
139
+ const filteredScript = filterScript(script, options);
140
+ const scriptTitle = script.title || "Untitled";
141
+ if (filteredScript.beats.length === 0) {
142
+ return null;
143
+ }
144
+ const systemPrompt = getSystemPrompt(options);
145
+ const userPrompt = buildUserPrompt(filteredScript);
146
+ const text = await executeLLM(systemPrompt, userPrompt, options, verboseMessage);
147
+ return {
148
+ text,
149
+ scriptTitle,
150
+ beatCount: filteredScript.beats.length,
151
+ };
152
+ };
153
+ /**
154
+ * Execute LLM call with GraphAI
155
+ */
156
+ export const executeLLM = async (systemPrompt, userPrompt, options, verboseMessage) => {
157
+ const providerConfig = getProviderConfig(options.provider);
158
+ const apiKey = getProviderApiKey(options.provider);
159
+ if (!apiKey) {
160
+ throw new Error(`API key not found for provider "${options.provider}". Please set the ${providerConfig.keyName} environment variable.`);
161
+ }
162
+ // Build GraphAI config
163
+ const config = {
164
+ openAIAgent: { apiKey: process.env.OPENAI_API_KEY },
165
+ anthropicAgent: { apiKey: process.env.ANTHROPIC_API_KEY },
166
+ groqAgent: { apiKey: process.env.GROQ_API_KEY },
167
+ geminiAgent: { apiKey: process.env.GEMINI_API_KEY },
168
+ };
169
+ // Create GraphAI instance
170
+ const graph = new GraphAI(createLLMGraph(providerConfig.agentName), {
171
+ ...agents,
172
+ openAIAgent,
173
+ anthropicAgent,
174
+ groqAgent,
175
+ geminiAgent,
176
+ }, { config });
177
+ if (options.verbose && verboseMessage) {
178
+ GraphAILogger.info(verboseMessage);
179
+ }
180
+ // Inject values
181
+ graph.injectValue("systemPrompt", systemPrompt);
182
+ graph.injectValue("userPrompt", userPrompt);
183
+ graph.injectValue("model", options.model ?? providerConfig.defaultModel);
184
+ graph.injectValue("temperature", options.temperature ?? 0.7);
185
+ graph.injectValue("maxTokens", options.maxTokens ?? providerConfig.maxTokens ?? 2048);
186
+ // Run graph
187
+ const graphResult = await graph.run();
188
+ // Extract text from result node
189
+ const resultNode = graphResult.result;
190
+ return resultNode?.text || "";
191
+ };
@@ -0,0 +1,45 @@
1
+ import type { ExtendedScript } from "../../types/index.js";
2
+ import type { LLMProvider } from "../../types/summarize.js";
3
+ /**
4
+ * Base options for LLM operations
5
+ */
6
+ export interface BaseLLMOptions {
7
+ provider: LLMProvider;
8
+ model?: string;
9
+ temperature?: number;
10
+ maxTokens?: number;
11
+ lang?: string;
12
+ systemPrompt?: string;
13
+ verbose?: boolean;
14
+ section?: string;
15
+ tags?: string[];
16
+ }
17
+ /**
18
+ * Provider configuration
19
+ */
20
+ export interface ProviderConfig {
21
+ agentName: string;
22
+ defaultModel: string;
23
+ keyName: string;
24
+ maxTokens?: number;
25
+ }
26
+ /**
27
+ * Get provider configuration
28
+ */
29
+ export declare const getProviderConfig: (provider: LLMProvider) => ProviderConfig;
30
+ /**
31
+ * Get API key for provider
32
+ */
33
+ export declare const getProviderApiKey: (provider: LLMProvider) => string | undefined;
34
+ /**
35
+ * Filter script based on options (section, tags)
36
+ */
37
+ export declare const filterScript: (script: ExtendedScript, options: BaseLLMOptions) => ExtendedScript;
38
+ /**
39
+ * Get language name from code
40
+ */
41
+ export declare const getLanguageName: (langCode: string) => string;
42
+ /**
43
+ * Execute LLM call with GraphAI
44
+ */
45
+ export declare const executeLLM: (systemPrompt: string, userPrompt: string, options: BaseLLMOptions, verboseMessage?: string) => Promise<string>;
@@ -0,0 +1,144 @@
1
+ import dotenv from "dotenv";
2
+ import { GraphAI, GraphAILogger } from "graphai";
3
+ import * as vanillaAgents from "@graphai/vanilla";
4
+ import { openAIAgent } from "@graphai/openai_agent";
5
+ import { anthropicAgent } from "@graphai/anthropic_agent";
6
+ import { groqAgent } from "@graphai/groq_agent";
7
+ import { geminiAgent } from "@graphai/gemini_agent";
8
+ import { filterBySection, filterByTags } from "../filter.js";
9
+ dotenv.config({ quiet: true });
10
+ const agents = vanillaAgents.default ?? vanillaAgents;
11
+ const provider2Agent = {
12
+ openai: {
13
+ agentName: "openAIAgent",
14
+ defaultModel: "gpt-4o-mini",
15
+ keyName: "OPENAI_API_KEY",
16
+ maxTokens: 4096,
17
+ },
18
+ anthropic: {
19
+ agentName: "anthropicAgent",
20
+ defaultModel: "claude-sonnet-4-20250514",
21
+ keyName: "ANTHROPIC_API_KEY",
22
+ maxTokens: 4096,
23
+ },
24
+ groq: {
25
+ agentName: "groqAgent",
26
+ defaultModel: "llama-3.1-8b-instant",
27
+ keyName: "GROQ_API_KEY",
28
+ maxTokens: 4096,
29
+ },
30
+ gemini: {
31
+ agentName: "geminiAgent",
32
+ defaultModel: "gemini-2.0-flash",
33
+ keyName: "GEMINI_API_KEY",
34
+ maxTokens: 4096,
35
+ },
36
+ };
37
+ /**
38
+ * Get provider configuration
39
+ */
40
+ export const getProviderConfig = (provider) => {
41
+ return provider2Agent[provider];
42
+ };
43
+ /**
44
+ * Get API key for provider
45
+ */
46
+ export const getProviderApiKey = (provider) => {
47
+ const config = provider2Agent[provider];
48
+ return process.env[config.keyName];
49
+ };
50
+ /**
51
+ * Create GraphAI graph for LLM call
52
+ */
53
+ const createLLMGraph = (agentName) => ({
54
+ version: 0.5,
55
+ nodes: {
56
+ systemPrompt: {},
57
+ userPrompt: {},
58
+ model: {},
59
+ temperature: {},
60
+ maxTokens: {},
61
+ llmCall: {
62
+ agent: agentName,
63
+ inputs: {
64
+ system: ":systemPrompt",
65
+ prompt: ":userPrompt",
66
+ model: ":model",
67
+ temperature: ":temperature",
68
+ max_tokens: ":maxTokens",
69
+ },
70
+ },
71
+ result: {
72
+ isResult: true,
73
+ agent: "copyAgent",
74
+ inputs: {
75
+ text: ":llmCall.text",
76
+ },
77
+ },
78
+ },
79
+ });
80
+ /**
81
+ * Filter script based on options (section, tags)
82
+ */
83
+ export const filterScript = (script, options) => {
84
+ const afterSection = options.section ? filterBySection(script, options.section) : script;
85
+ const afterTags = options.tags && options.tags.length > 0 ? filterByTags(afterSection, options.tags) : afterSection;
86
+ return afterTags;
87
+ };
88
+ /**
89
+ * Get language name from code
90
+ */
91
+ export const getLanguageName = (langCode) => {
92
+ const langMap = {
93
+ ja: "Japanese",
94
+ en: "English",
95
+ zh: "Chinese",
96
+ ko: "Korean",
97
+ fr: "French",
98
+ de: "German",
99
+ es: "Spanish",
100
+ it: "Italian",
101
+ pt: "Portuguese",
102
+ ru: "Russian",
103
+ };
104
+ return langMap[langCode] || langCode;
105
+ };
106
+ /**
107
+ * Execute LLM call with GraphAI
108
+ */
109
+ export const executeLLM = async (systemPrompt, userPrompt, options, verboseMessage) => {
110
+ const providerConfig = getProviderConfig(options.provider);
111
+ const apiKey = getProviderApiKey(options.provider);
112
+ if (!apiKey) {
113
+ throw new Error(`API key not found for provider "${options.provider}". Please set the ${providerConfig.keyName} environment variable.`);
114
+ }
115
+ // Build GraphAI config
116
+ const config = {
117
+ openAIAgent: { apiKey: process.env.OPENAI_API_KEY },
118
+ anthropicAgent: { apiKey: process.env.ANTHROPIC_API_KEY },
119
+ groqAgent: { apiKey: process.env.GROQ_API_KEY },
120
+ geminiAgent: { apiKey: process.env.GEMINI_API_KEY },
121
+ };
122
+ // Create GraphAI instance
123
+ const graph = new GraphAI(createLLMGraph(providerConfig.agentName), {
124
+ ...agents,
125
+ openAIAgent,
126
+ anthropicAgent,
127
+ groqAgent,
128
+ geminiAgent,
129
+ }, { config });
130
+ if (options.verbose && verboseMessage) {
131
+ GraphAILogger.info(verboseMessage);
132
+ }
133
+ // Inject values
134
+ graph.injectValue("systemPrompt", systemPrompt);
135
+ graph.injectValue("userPrompt", userPrompt);
136
+ graph.injectValue("model", options.model ?? providerConfig.defaultModel);
137
+ graph.injectValue("temperature", options.temperature ?? 0.7);
138
+ graph.injectValue("maxTokens", options.maxTokens ?? providerConfig.maxTokens ?? 2048);
139
+ // Run graph
140
+ const graphResult = await graph.run();
141
+ // Extract text from result node
142
+ const resultNode = graphResult.result;
143
+ return resultNode?.text || "";
144
+ };
@@ -0,0 +1,14 @@
1
+ import type { MulmoScript } from "mulmocast";
2
+ import type { ExtendedScript } from "../../types/index.js";
3
+ /**
4
+ * Filter beats by section (preserves meta for chaining)
5
+ */
6
+ export declare const filterBySection: (script: ExtendedScript, section: string) => ExtendedScript;
7
+ /**
8
+ * Filter beats by tags (preserves meta for chaining)
9
+ */
10
+ export declare const filterByTags: (script: ExtendedScript, tags: string[]) => ExtendedScript;
11
+ /**
12
+ * Strip variants and meta fields, converting to standard MulmoScript
13
+ */
14
+ export declare const stripExtendedFields: (script: ExtendedScript) => MulmoScript;