mulmocast-preprocessor 0.1.2 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/README.md +100 -1
  2. package/lib/cli/commands/process.js +1 -1
  3. package/lib/cli/commands/profiles.js +1 -1
  4. package/lib/cli/commands/query.d.ts +16 -0
  5. package/lib/cli/commands/query.js +119 -0
  6. package/lib/cli/commands/summarize.js +2 -36
  7. package/lib/cli/index.js +67 -0
  8. package/lib/cli/utils.d.ts +9 -0
  9. package/lib/cli/utils.js +35 -0
  10. package/lib/core/ai/command/query/index.d.ts +8 -0
  11. package/lib/core/ai/command/query/index.js +33 -0
  12. package/lib/core/ai/command/query/interactive.d.ts +22 -0
  13. package/lib/core/ai/command/query/interactive.js +44 -0
  14. package/lib/core/ai/command/query/prompts.d.ts +26 -0
  15. package/lib/core/ai/command/query/prompts.js +88 -0
  16. package/lib/core/ai/command/summarize/index.d.ts +8 -0
  17. package/lib/core/ai/command/summarize/index.js +33 -0
  18. package/lib/core/ai/command/summarize/prompts.d.ts +18 -0
  19. package/lib/core/ai/command/summarize/prompts.js +50 -0
  20. package/lib/core/ai/llm.d.ts +61 -0
  21. package/lib/core/ai/llm.js +191 -0
  22. package/lib/core/llm/index.d.ts +45 -0
  23. package/lib/core/llm/index.js +144 -0
  24. package/lib/core/preprocessing/filter.d.ts +14 -0
  25. package/lib/core/preprocessing/filter.js +30 -0
  26. package/lib/core/preprocessing/process.d.ts +7 -0
  27. package/lib/core/preprocessing/process.js +12 -0
  28. package/lib/core/preprocessing/profiles.d.ts +5 -0
  29. package/lib/core/preprocessing/profiles.js +38 -0
  30. package/lib/core/preprocessing/variant.d.ts +6 -0
  31. package/lib/core/preprocessing/variant.js +26 -0
  32. package/lib/core/query/index.d.ts +8 -0
  33. package/lib/core/query/index.js +33 -0
  34. package/lib/core/query/prompts.d.ts +14 -0
  35. package/lib/core/query/prompts.js +59 -0
  36. package/lib/core/summarize/index.js +3 -84
  37. package/lib/core/summarize/prompts.js +1 -18
  38. package/lib/index.d.ts +9 -5
  39. package/lib/index.js +10 -7
  40. package/lib/types/query.d.ts +45 -0
  41. package/lib/types/query.js +21 -0
  42. package/package.json +1 -1
package/README.md CHANGED
@@ -14,6 +14,8 @@ npm install mulmocast-preprocessor
14
14
  - **Section filtering**: Extract beats by section
15
15
  - **Tag filtering**: Extract beats by tags
16
16
  - **Profile listing**: List available profiles with beat counts
17
+ - **AI Summarization**: Generate summaries using LLM (OpenAI, Anthropic, Groq, Gemini)
18
+ - **AI Query**: Ask questions about script content with interactive mode
17
19
  - **CLI tool**: Command-line interface for processing scripts
18
20
 
19
21
  ## CLI Usage
@@ -36,9 +38,23 @@ mulmocast-preprocessor script.json --profile summary --section chapter1
36
38
 
37
39
  # List available profiles
38
40
  mulmocast-preprocessor profiles script.json
41
+
42
+ # Summarize script content
43
+ mulmocast-preprocessor summarize script.json
44
+ mulmocast-preprocessor summarize script.json --format markdown
45
+ mulmocast-preprocessor summarize script.json -l ja # Output in Japanese
46
+ mulmocast-preprocessor summarize https://example.com/script.json # From URL
47
+
48
+ # Query script content
49
+ mulmocast-preprocessor query script.json "What is the main topic?"
50
+ mulmocast-preprocessor query script.json "登場人物は?" -l ja
51
+
52
+ # Interactive query mode
53
+ mulmocast-preprocessor query script.json -i
54
+ mulmocast-preprocessor query script.json # Omit question for interactive mode
39
55
  ```
40
56
 
41
- ### CLI Options
57
+ ### CLI Options (process command)
42
58
 
43
59
  | Option | Alias | Description |
44
60
  |--------|-------|-------------|
@@ -49,6 +65,41 @@ mulmocast-preprocessor profiles script.json
49
65
  | `--help` | `-h` | Show help |
50
66
  | `--version` | `-v` | Show version |
51
67
 
68
+ ### CLI Options (summarize command)
69
+
70
+ | Option | Alias | Description |
71
+ |--------|-------|-------------|
72
+ | `--provider` | | LLM provider: openai, anthropic, groq, gemini (default: openai) |
73
+ | `--model` | `-m` | Model name |
74
+ | `--format` | `-f` | Output format: text, markdown (default: text) |
75
+ | `--lang` | `-l` | Output language (e.g., ja, en, zh) |
76
+ | `--target-length` | | Target summary length in characters |
77
+ | `--system-prompt` | | Custom system prompt |
78
+ | `--verbose` | | Show detailed progress |
79
+ | `--section` | `-s` | Filter by section name |
80
+ | `--tags` | `-t` | Filter by tags (comma-separated) |
81
+
82
+ ### CLI Options (query command)
83
+
84
+ | Option | Alias | Description |
85
+ |--------|-------|-------------|
86
+ | `--interactive` | `-i` | Start interactive query mode |
87
+ | `--provider` | | LLM provider: openai, anthropic, groq, gemini (default: openai) |
88
+ | `--model` | `-m` | Model name |
89
+ | `--lang` | `-l` | Output language (e.g., ja, en, zh) |
90
+ | `--system-prompt` | | Custom system prompt |
91
+ | `--verbose` | | Show detailed progress |
92
+ | `--section` | `-s` | Filter by section name |
93
+ | `--tags` | `-t` | Filter by tags (comma-separated) |
94
+
95
+ ### Interactive Query Commands
96
+
97
+ | Command | Description |
98
+ |---------|-------------|
99
+ | `/clear` | Clear conversation history |
100
+ | `/history` | Show conversation history |
101
+ | `/exit` | Exit interactive mode |
102
+
52
103
  ## Programmatic Usage
53
104
 
54
105
  ### Basic Example
@@ -132,6 +183,54 @@ Filter beats by section.
132
183
 
133
184
  Filter beats by tags (extracts beats that have any of the specified tags).
134
185
 
186
+ ### `summarizeScript(script, options)`
187
+
188
+ Generate a summary of the script content using LLM.
189
+
190
+ **Parameters:**
191
+ - `script: ExtendedScript` - Input script
192
+ - `options: SummarizeOptions` - Summarization options
193
+ - `provider?: LLMProvider` - LLM provider (default: "openai")
194
+ - `model?: string` - Model name
195
+ - `format?: "text" | "markdown"` - Output format
196
+ - `lang?: string` - Output language code
197
+ - `targetLengthChars?: number` - Target length
198
+ - `systemPrompt?: string` - Custom system prompt
199
+
200
+ **Returns:** `Promise<SummarizeResult>` - Summary result with text and metadata
201
+
202
+ ### `queryScript(script, question, options)`
203
+
204
+ Ask a question about the script content.
205
+
206
+ **Parameters:**
207
+ - `script: ExtendedScript` - Input script
208
+ - `question: string` - Question to ask
209
+ - `options: QueryOptions` - Query options (same as summarize)
210
+
211
+ **Returns:** `Promise<QueryResult>` - Answer with question and metadata
212
+
213
+ ### `createInteractiveSession(script, options)`
214
+
215
+ Create an interactive query session for follow-up questions.
216
+
217
+ **Parameters:**
218
+ - `script: ExtendedScript` - Input script
219
+ - `options: QueryOptions` - Query options
220
+
221
+ **Returns:** Session object with `sendInteractiveQuery()` method
222
+
223
+ ## Environment Variables
224
+
225
+ For AI features (summarize, query), set the API key for your LLM provider:
226
+
227
+ | Provider | Environment Variable |
228
+ |----------|---------------------|
229
+ | OpenAI | `OPENAI_API_KEY` |
230
+ | Anthropic | `ANTHROPIC_API_KEY` |
231
+ | Groq | `GROQ_API_KEY` |
232
+ | Gemini | `GEMINI_API_KEY` |
233
+
135
234
  ## Extended Schema
136
235
 
137
236
  ### ExtendedBeat
@@ -1,6 +1,6 @@
1
1
  import { readFileSync, writeFileSync } from "fs";
2
2
  import { GraphAILogger } from "graphai";
3
- import { processScript } from "../../core/process.js";
3
+ import { processScript } from "../../core/preprocessing/process.js";
4
4
  /**
5
5
  * Process script with profile and output result
6
6
  */
@@ -1,6 +1,6 @@
1
1
  import { readFileSync } from "fs";
2
2
  import { GraphAILogger } from "graphai";
3
- import { listProfiles } from "../../core/profiles.js";
3
+ import { listProfiles } from "../../core/preprocessing/profiles.js";
4
4
  /**
5
5
  * List available profiles in script
6
6
  */
@@ -0,0 +1,16 @@
1
+ import type { LLMProvider } from "../../types/summarize.js";
2
+ interface QueryCommandOptions {
3
+ provider?: LLMProvider;
4
+ model?: string;
5
+ lang?: string;
6
+ systemPrompt?: string;
7
+ verbose?: boolean;
8
+ section?: string;
9
+ tags?: string[];
10
+ interactive?: boolean;
11
+ }
12
+ /**
13
+ * Query command handler - outputs answer to stdout
14
+ */
15
+ export declare const queryCommand: (scriptPath: string, question: string | undefined, options: QueryCommandOptions) => Promise<void>;
16
+ export {};
@@ -0,0 +1,119 @@
1
+ import { createInterface } from "node:readline";
2
+ import { GraphAILogger } from "graphai";
3
+ import { queryScript } from "../../core/ai/command/query/index.js";
4
+ import { createInteractiveSession, sendInteractiveQuery, clearHistory } from "../../core/ai/command/query/interactive.js";
5
+ import { loadScript } from "../utils.js";
6
+ /**
7
+ * Query command handler - outputs answer to stdout
8
+ */
9
+ export const queryCommand = async (scriptPath, question, options) => {
10
+ try {
11
+ const script = await loadScript(scriptPath);
12
+ // Interactive mode
13
+ if (options.interactive || question === undefined) {
14
+ await runInteractiveMode(scriptPath, script, options);
15
+ return;
16
+ }
17
+ // Single query mode
18
+ const result = await queryScript(script, question, {
19
+ provider: options.provider ?? "openai",
20
+ model: options.model,
21
+ lang: options.lang,
22
+ systemPrompt: options.systemPrompt,
23
+ verbose: options.verbose ?? false,
24
+ section: options.section,
25
+ tags: options.tags,
26
+ });
27
+ // Output answer to stdout
28
+ process.stdout.write(result.answer + "\n");
29
+ }
30
+ catch (error) {
31
+ if (error instanceof Error) {
32
+ GraphAILogger.error(`Error: ${error.message}`);
33
+ }
34
+ else {
35
+ GraphAILogger.error("Unknown error occurred");
36
+ }
37
+ process.exit(1);
38
+ }
39
+ };
40
+ /**
41
+ * Run interactive query mode
42
+ */
43
+ const runInteractiveMode = async (scriptPath, script, options) => {
44
+ const { session, filteredScript, validatedOptions } = createInteractiveSession(script, {
45
+ provider: options.provider ?? "openai",
46
+ model: options.model,
47
+ lang: options.lang,
48
+ systemPrompt: options.systemPrompt,
49
+ verbose: options.verbose ?? false,
50
+ section: options.section,
51
+ tags: options.tags,
52
+ });
53
+ if (filteredScript.beats.length === 0) {
54
+ GraphAILogger.error("No content available to query.");
55
+ process.exit(1);
56
+ }
57
+ const rl = createInterface({
58
+ input: process.stdin,
59
+ output: process.stdout,
60
+ });
61
+ GraphAILogger.info(`Interactive query mode for "${session.scriptTitle}" (${session.beatCount} beats)`);
62
+ GraphAILogger.info("Commands: /clear (clear history), /history (show history), /exit or Ctrl+C (quit)");
63
+ GraphAILogger.info("");
64
+ const prompt = () => {
65
+ rl.question("You: ", async (input) => {
66
+ const trimmedInput = input.trim();
67
+ if (!trimmedInput) {
68
+ prompt();
69
+ return;
70
+ }
71
+ // Handle commands
72
+ if (trimmedInput === "/exit" || trimmedInput === "/quit") {
73
+ GraphAILogger.info("Goodbye!");
74
+ rl.close();
75
+ return;
76
+ }
77
+ if (trimmedInput === "/clear") {
78
+ clearHistory(session);
79
+ GraphAILogger.info("Conversation history cleared.\n");
80
+ prompt();
81
+ return;
82
+ }
83
+ if (trimmedInput === "/history") {
84
+ if (session.history.length === 0) {
85
+ GraphAILogger.info("No conversation history.\n");
86
+ }
87
+ else {
88
+ GraphAILogger.info("Conversation history:");
89
+ session.history.forEach((msg) => {
90
+ const prefix = msg.role === "user" ? "Q" : "A";
91
+ GraphAILogger.info(`${prefix}: ${msg.content}`);
92
+ });
93
+ GraphAILogger.info("");
94
+ }
95
+ prompt();
96
+ return;
97
+ }
98
+ // Send query
99
+ try {
100
+ const answer = await sendInteractiveQuery(filteredScript, trimmedInput, session, validatedOptions);
101
+ GraphAILogger.info(`\nAssistant: ${answer}\n`);
102
+ }
103
+ catch (error) {
104
+ if (error instanceof Error) {
105
+ GraphAILogger.error(`Error: ${error.message}\n`);
106
+ }
107
+ else {
108
+ GraphAILogger.error("Unknown error occurred\n");
109
+ }
110
+ }
111
+ prompt();
112
+ });
113
+ };
114
+ // Handle Ctrl+C
115
+ rl.on("close", () => {
116
+ process.exit(0);
117
+ });
118
+ prompt();
119
+ };
@@ -1,40 +1,6 @@
1
- import { readFileSync } from "fs";
2
1
  import { GraphAILogger } from "graphai";
3
- import { summarizeScript } from "../../core/summarize/index.js";
4
- /**
5
- * Check if input is a URL
6
- */
7
- const isUrl = (input) => {
8
- return input.startsWith("http://") || input.startsWith("https://");
9
- };
10
- /**
11
- * Fetch JSON from URL with timeout
12
- */
13
- const fetchJson = async (url) => {
14
- const controller = new AbortController();
15
- const timeout_ms = 30000;
16
- const timeoutId = setTimeout(() => controller.abort(), timeout_ms);
17
- try {
18
- const response = await fetch(url, { signal: controller.signal });
19
- if (!response.ok) {
20
- throw new Error(`HTTP error: ${response.status} ${response.statusText}`);
21
- }
22
- return (await response.json());
23
- }
24
- finally {
25
- clearTimeout(timeoutId);
26
- }
27
- };
28
- /**
29
- * Load script from file path or URL
30
- */
31
- const loadScript = async (input) => {
32
- if (isUrl(input)) {
33
- return fetchJson(input);
34
- }
35
- const content = readFileSync(input, "utf-8");
36
- return JSON.parse(content);
37
- };
2
+ import { summarizeScript } from "../../core/ai/command/summarize/index.js";
3
+ import { loadScript } from "../utils.js";
38
4
  /**
39
5
  * Summarize command handler - outputs summary to stdout
40
6
  */
package/lib/cli/index.js CHANGED
@@ -4,6 +4,7 @@ import { hideBin } from "yargs/helpers";
4
4
  import { processCommand } from "./commands/process.js";
5
5
  import { profilesCommand } from "./commands/profiles.js";
6
6
  import { summarizeCommand } from "./commands/summarize.js";
7
+ import { queryCommand } from "./commands/query.js";
7
8
  yargs(hideBin(process.argv))
8
9
  .command("$0 <script>", "Process MulmoScript with profile", (builder) => builder
9
10
  .positional("script", {
@@ -109,6 +110,68 @@ yargs(hideBin(process.argv))
109
110
  section: argv.section,
110
111
  tags,
111
112
  });
113
+ })
114
+ .command("query <script> [question]", "Ask a question about the script content", (builder) => builder
115
+ .positional("script", {
116
+ describe: "Path or URL to MulmoScript JSON file",
117
+ type: "string",
118
+ demandOption: true,
119
+ })
120
+ .positional("question", {
121
+ describe: "Question to ask about the script (omit for interactive mode)",
122
+ type: "string",
123
+ })
124
+ .option("interactive", {
125
+ alias: "i",
126
+ describe: "Start interactive query mode",
127
+ type: "boolean",
128
+ default: false,
129
+ })
130
+ .option("provider", {
131
+ describe: "LLM provider (openai, anthropic, groq, gemini)",
132
+ type: "string",
133
+ default: "openai",
134
+ })
135
+ .option("model", {
136
+ alias: "m",
137
+ describe: "Model name",
138
+ type: "string",
139
+ })
140
+ .option("lang", {
141
+ alias: "l",
142
+ describe: "Output language (e.g., ja, en, zh)",
143
+ type: "string",
144
+ })
145
+ .option("system-prompt", {
146
+ describe: "Custom system prompt",
147
+ type: "string",
148
+ })
149
+ .option("verbose", {
150
+ describe: "Show detailed progress",
151
+ type: "boolean",
152
+ default: false,
153
+ })
154
+ .option("section", {
155
+ alias: "s",
156
+ describe: "Filter by section name",
157
+ type: "string",
158
+ })
159
+ .option("tags", {
160
+ alias: "t",
161
+ describe: "Filter by tags (comma-separated)",
162
+ type: "string",
163
+ }), (argv) => {
164
+ const tags = argv.tags ? argv.tags.split(",").map((t) => t.trim()) : undefined;
165
+ queryCommand(argv.script, argv.question, {
166
+ provider: argv.provider,
167
+ model: argv.model,
168
+ lang: argv.lang,
169
+ systemPrompt: argv.systemPrompt,
170
+ verbose: argv.verbose,
171
+ section: argv.section,
172
+ tags,
173
+ interactive: argv.interactive,
174
+ });
112
175
  })
113
176
  .example("$0 script.json --profile summary -o summary.json", "Apply summary profile and save to file")
114
177
  .example("$0 script.json -p teaser", "Apply teaser profile and output to stdout")
@@ -120,6 +183,10 @@ yargs(hideBin(process.argv))
120
183
  .example("$0 summarize script.json --format markdown", "Generate markdown summary")
121
184
  .example("$0 summarize script.json -l ja", "Output summary in Japanese")
122
185
  .example("$0 summarize https://example.com/script.json", "Summarize from URL")
186
+ .example('$0 query script.json "What is the main topic?"', "Ask a question about the script")
187
+ .example('$0 query script.json "登場人物は?" -l ja', "Query in Japanese")
188
+ .example("$0 query script.json -i", "Start interactive query mode")
189
+ .example("$0 query script.json", "Interactive mode (question omitted)")
123
190
  .help()
124
191
  .alias("h", "help")
125
192
  .version()
@@ -0,0 +1,9 @@
1
+ import type { ExtendedScript } from "../types/index.js";
2
+ /**
3
+ * Check if input is a URL
4
+ */
5
+ export declare const isUrl: (input: string) => boolean;
6
+ /**
7
+ * Load script from file path or URL
8
+ */
9
+ export declare const loadScript: (input: string) => Promise<ExtendedScript>;
@@ -0,0 +1,35 @@
1
+ import { readFileSync } from "fs";
2
+ /**
3
+ * Check if input is a URL
4
+ */
5
+ export const isUrl = (input) => {
6
+ return input.startsWith("http://") || input.startsWith("https://");
7
+ };
8
+ /**
9
+ * Fetch JSON from URL with timeout
10
+ */
11
+ const fetchJson = async (url) => {
12
+ const controller = new AbortController();
13
+ const timeout_ms = 30000;
14
+ const timeoutId = setTimeout(() => controller.abort(), timeout_ms);
15
+ try {
16
+ const response = await fetch(url, { signal: controller.signal });
17
+ if (!response.ok) {
18
+ throw new Error(`HTTP error: ${response.status} ${response.statusText}`);
19
+ }
20
+ return (await response.json());
21
+ }
22
+ finally {
23
+ clearTimeout(timeoutId);
24
+ }
25
+ };
26
+ /**
27
+ * Load script from file path or URL
28
+ */
29
+ export const loadScript = async (input) => {
30
+ if (isUrl(input)) {
31
+ return fetchJson(input);
32
+ }
33
+ const content = readFileSync(input, "utf-8");
34
+ return JSON.parse(content);
35
+ };
@@ -0,0 +1,8 @@
1
+ import type { ExtendedScript } from "../../../../types/index.js";
2
+ import type { QueryOptions, QueryResult } from "../../../../types/query.js";
3
+ /**
4
+ * Main query function - answers a question based on script content
5
+ */
6
+ export declare const queryScript: (script: ExtendedScript, question: string, options?: Partial<QueryOptions>) => Promise<QueryResult>;
7
+ export type { QueryOptions, QueryResult } from "../../../../types/query.js";
8
+ export { queryOptionsSchema } from "../../../../types/query.js";
@@ -0,0 +1,33 @@
1
+ import { queryOptionsSchema } from "../../../../types/query.js";
2
+ import { executeLLM, filterScript } from "../../llm.js";
3
+ import { buildUserPrompt, getSystemPrompt } from "./prompts.js";
4
+ /**
5
+ * Main query function - answers a question based on script content
6
+ */
7
+ export const queryScript = async (script, question, options = {}) => {
8
+ // Validate and apply defaults
9
+ const validatedOptions = queryOptionsSchema.parse(options);
10
+ // Filter script if section/tags specified
11
+ const filteredScript = filterScript(script, validatedOptions);
12
+ const scriptTitle = script.title || "Untitled";
13
+ if (filteredScript.beats.length === 0) {
14
+ return {
15
+ answer: "No content available to answer the question.",
16
+ question,
17
+ scriptTitle,
18
+ beatCount: 0,
19
+ };
20
+ }
21
+ // Build prompts
22
+ const systemPrompt = getSystemPrompt(validatedOptions);
23
+ const userPrompt = buildUserPrompt(filteredScript, question);
24
+ // Execute LLM
25
+ const answer = await executeLLM(systemPrompt, userPrompt, validatedOptions, `Querying script "${script.title}" with ${validatedOptions.provider}... Beats: ${filteredScript.beats.length}, Question: ${question}`);
26
+ return {
27
+ answer,
28
+ question,
29
+ scriptTitle,
30
+ beatCount: filteredScript.beats.length,
31
+ };
32
+ };
33
+ export { queryOptionsSchema } from "../../../../types/query.js";
@@ -0,0 +1,22 @@
1
+ import type { ExtendedScript } from "../../../../types/index.js";
2
+ import type { QueryOptions, InteractiveQuerySession, ConversationMessage } from "../../../../types/query.js";
3
+ /**
4
+ * Create an interactive query session
5
+ */
6
+ export declare const createInteractiveSession: (script: ExtendedScript, options?: Partial<QueryOptions>) => {
7
+ session: InteractiveQuerySession;
8
+ filteredScript: ExtendedScript;
9
+ validatedOptions: QueryOptions;
10
+ };
11
+ /**
12
+ * Send a question in an interactive session
13
+ */
14
+ export declare const sendInteractiveQuery: (filteredScript: ExtendedScript, question: string, session: InteractiveQuerySession, options: QueryOptions) => Promise<string>;
15
+ /**
16
+ * Clear conversation history
17
+ */
18
+ export declare const clearHistory: (session: InteractiveQuerySession) => void;
19
+ /**
20
+ * Get conversation history
21
+ */
22
+ export declare const getHistory: (session: InteractiveQuerySession) => ConversationMessage[];
@@ -0,0 +1,44 @@
1
+ import { queryOptionsSchema } from "../../../../types/query.js";
2
+ import { executeLLM, filterScript } from "../../llm.js";
3
+ import { buildInteractiveUserPrompt, getInteractiveSystemPrompt } from "./prompts.js";
4
+ /**
5
+ * Create an interactive query session
6
+ */
7
+ export const createInteractiveSession = (script, options = {}) => {
8
+ const validatedOptions = queryOptionsSchema.parse(options);
9
+ const filteredScript = filterScript(script, validatedOptions);
10
+ const scriptTitle = script.title || "Untitled";
11
+ const session = {
12
+ scriptTitle,
13
+ beatCount: filteredScript.beats.length,
14
+ history: [],
15
+ };
16
+ return { session, filteredScript, validatedOptions };
17
+ };
18
+ /**
19
+ * Send a question in an interactive session
20
+ */
21
+ export const sendInteractiveQuery = async (filteredScript, question, session, options) => {
22
+ if (filteredScript.beats.length === 0) {
23
+ return "No content available to answer the question.";
24
+ }
25
+ const systemPrompt = getInteractiveSystemPrompt(options);
26
+ const userPrompt = buildInteractiveUserPrompt(filteredScript, question, session.history);
27
+ const answer = await executeLLM(systemPrompt, userPrompt, options, options.verbose ? `Interactive query: ${question}` : undefined);
28
+ // Add to history
29
+ session.history.push({ role: "user", content: question });
30
+ session.history.push({ role: "assistant", content: answer });
31
+ return answer;
32
+ };
33
+ /**
34
+ * Clear conversation history
35
+ */
36
+ export const clearHistory = (session) => {
37
+ session.history = [];
38
+ };
39
+ /**
40
+ * Get conversation history
41
+ */
42
+ export const getHistory = (session) => {
43
+ return [...session.history];
44
+ };
@@ -0,0 +1,26 @@
1
+ import type { QueryOptions, ConversationMessage } from "../../../../types/query.js";
2
+ import type { ExtendedScript } from "../../../../types/index.js";
3
+ /**
4
+ * Default system prompt for query
5
+ */
6
+ export declare const DEFAULT_SYSTEM_PROMPT = "You are answering questions based on the content provided.\n- Answer based ONLY on the information in the provided content\n- If the answer cannot be found in the content, say so clearly\n- Be concise and direct in your answers\n- Do not make up information that is not in the content";
7
+ /**
8
+ * Get system prompt based on options
9
+ */
10
+ export declare const getSystemPrompt: (options: QueryOptions) => string;
11
+ /**
12
+ * Build user prompt from script and question
13
+ */
14
+ export declare const buildUserPrompt: (script: ExtendedScript, question: string) => string;
15
+ /**
16
+ * Default system prompt for interactive query
17
+ */
18
+ export declare const DEFAULT_INTERACTIVE_SYSTEM_PROMPT = "You are answering questions based on the content provided.\n- Answer based ONLY on the information in the provided content\n- If the answer cannot be found in the content, say so clearly\n- Be concise and direct in your answers\n- Do not make up information that is not in the content\n- You may reference previous conversation when answering follow-up questions";
19
+ /**
20
+ * Get system prompt for interactive mode
21
+ */
22
+ export declare const getInteractiveSystemPrompt: (options: QueryOptions) => string;
23
+ /**
24
+ * Build user prompt with conversation history for interactive mode
25
+ */
26
+ export declare const buildInteractiveUserPrompt: (script: ExtendedScript, question: string, history: ConversationMessage[]) => string;