samuraizer 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/README.md +206 -0
  2. package/dist/checks/ffmpeg.js +9 -0
  3. package/dist/checks/ffprobe.js +9 -0
  4. package/dist/checks/ollama.js +12 -0
  5. package/dist/checks/whisper.js +9 -0
  6. package/dist/cli/commands/process.js +138 -0
  7. package/dist/cli/index.js +175 -0
  8. package/dist/cli/process-command.js +21 -0
  9. package/dist/config/defaults.js +7 -0
  10. package/dist/config/init.js +21 -0
  11. package/dist/config/load.js +57 -0
  12. package/dist/config/paths.js +20 -0
  13. package/dist/config/schema.js +12 -0
  14. package/dist/config/template.js +9 -0
  15. package/dist/config/types.js +1 -0
  16. package/dist/dev/run-tool.js +38 -0
  17. package/dist/dev/runProcessMeeting.js +21 -0
  18. package/dist/infra/ffmpeg/run-ffmpeg.js +29 -0
  19. package/dist/infra/ollama/ollama-client.js +42 -0
  20. package/dist/infra/whisper/run-whisper-cli.js +40 -0
  21. package/dist/lib/ollama.js +51 -0
  22. package/dist/lib/run-command.js +8 -0
  23. package/dist/orchestrators/process-meeting.js +196 -0
  24. package/dist/pipeline/analysis/action-items/generate.js +117 -0
  25. package/dist/pipeline/analysis/action-items/types.js +1 -0
  26. package/dist/pipeline/analysis/decisions/generate.js +101 -0
  27. package/dist/pipeline/analysis/decisions/types.js +1 -0
  28. package/dist/pipeline/analysis/summary/generate.js +24 -0
  29. package/dist/pipeline/analysis/summary/types.js +1 -0
  30. package/dist/pipeline/audio/normalize.js +11 -0
  31. package/dist/pipeline/audio/probe.js +37 -0
  32. package/dist/pipeline/audio/validate-input.js +46 -0
  33. package/dist/pipeline/output/paths.js +19 -0
  34. package/dist/pipeline/output/prepare.js +22 -0
  35. package/dist/pipeline/output/save.js +16 -0
  36. package/dist/pipeline/report/generate.js +54 -0
  37. package/dist/pipeline/transcription/transcribe.js +36 -0
  38. package/dist/pipeline/transcription/types.js +1 -0
  39. package/dist/shared/tool-definition.js +5 -0
  40. package/dist/shared/tool-registry.js +12 -0
  41. package/dist/tools/analysis/action-item-types.js +1 -0
  42. package/dist/tools/analysis/extract-action-items-tool.js +94 -0
  43. package/dist/tools/analysis/extract-decisions-tool.js +84 -0
  44. package/dist/tools/analysis/summarize-transcription-tool.js +44 -0
  45. package/dist/tools/input/normalize-audio-tool.js +24 -0
  46. package/dist/tools/transcription/transcribe-audio-tool.js +43 -0
  47. package/package.json +42 -0
package/README.md ADDED
@@ -0,0 +1,206 @@
1
+ # Samuraizer
2
+
3
+ Local-first CLI for meeting transcription and analysis.
4
+
5
+ Samuraizer processes audio recordings and generates:
6
+ - transcript
7
+ - summary
8
+ - action items
9
+ - decisions
10
+ - report
11
+
12
+ All processing is done locally using tools like Whisper and Ollama
13
+
14
+
15
+ ## ✨ Features
16
+
17
+ - 🎙 Transcribe audio recordings (Whisper)
18
+ - 🧠 Generate summaries (local LLM via Ollama)
19
+ - ✅ Extract action items
20
+ - 📌 Extract decisions
21
+ - ⚡ Resume processing (skip already processed steps)
22
+ - 🔧 Simple CLI + config system
23
+ - 🔒 Local-first (no cloud required)
24
+
25
+
26
+ ## 📦 Installation
27
+
28
+ ```bash
29
+ npm install -g samuraizer
30
+ ```
31
+
32
+ ## ⚙️ Prerequisites
33
+
34
+ Make sure you have installed:
35
+ ```bash
36
+ Node.js >= 20
37
+ ffmpeg
38
+ whisper-cli (whisper.cpp)
39
+ Ollama
40
+ Start Ollama
41
+ ollama serve
42
+ ollama pull qwen2.5:14b
43
+ ```
44
+
45
+ ## 🚀 Quick Start
46
+ ```bash
47
+ samuraizer init
48
+ samuraizer process meeting.m4a
49
+ ```
50
+
51
+ ## ⚙️ Configuration
52
+
53
+ Samuraizer uses a global JSON config file.
54
+
55
+ ### Initialize config
56
+ ```bash
57
+ samuraizer init
58
+ ```
59
+
60
+ ### Config location
61
+ * **macOS**: `~/Library/Application Support/samuraizer/config.json`
62
+ * **Linux**: `~/.config/samuraizer/config.json`
63
+ * **Windows**: `%AppData%/samuraizer/config.json`
64
+
65
+ ### View config
66
+ ```bash
67
+ samuraizer config get
68
+ ```
69
+ ### Example config
70
+ ```json
71
+ {
72
+ "model": "qwen2.5:14b",
73
+ "ollamaBaseUrl": "http://127.0.0.1:11434",
74
+ "whisperCommand": "whisper-cli",
75
+ "ffmpegCommand": "ffmpeg",
76
+ "ffprobeCommand": "ffprobe"
77
+ }
78
+ ```
79
+
80
+ ### 🧩 Config fields
81
+
82
+ - **model** — LLM model used for analysis (summary, action items, decisions)
83
+ - **ollamaBaseUrl** — URL where Ollama is running
84
+ - **whisperCommand** — Command used to run Whisper
85
+ - **ffmpegCommand** — Command used for audio processing
86
+ - **ffprobeCommand** — Command used for audio inspection
87
+
88
+
89
+ ## 🧪 Commands
90
+
91
+ ### Show help
92
+
93
+ ```bash
94
+ samuraizer --help
95
+ ```
96
+
97
+ ### Show version
98
+
99
+ ```bash
100
+ samuraizer --version
101
+ ```
102
+
103
+ ## 🚀 Full pipeline
104
+
105
+ ### Process an audio recording:
106
+ ```bash
107
+ samuraizer process meeting.m4a
108
+ ```
109
+
110
+ ### Show detailed metadata after processing:
111
+ ```bash
112
+ samuraizer process meeting.m4a --verbose
113
+ ```
114
+
115
+ ### Recompute all steps even if outputs already exist:
116
+ ```bash
117
+ samuraizer process meeting.m4a --force
118
+ ```
119
+
120
+ ### You can combine flags:
121
+ ```bash
122
+ samuraizer process meeting.m4a --verbose --force
123
+ ```
124
+
125
+ ## 🎛 Individual commands
126
+
127
+ ### Normalize audio to Whisper-compatible WAV:
128
+ ```bash
129
+ samuraizer normalize input.m4a output.wav
130
+ ```
131
+
132
+ ### Summarize a transcript file:
133
+ ```bash
134
+ samuraizer summarize transcript.txt
135
+ ```
136
+
137
+ ### Extract action items from a transcript file:
138
+ ```bash
139
+ samuraizer actions transcript.txt
140
+ ```
141
+
142
+ ### Extract decisions from a transcript file:
143
+ ```bash
144
+ samuraizer decisions transcript.txt
145
+ ```
146
+
147
+
148
+ ## ⚙️ Configuration commands
149
+
150
+ ### Create the default config file:
151
+ ```bash
152
+ samuraizer init
153
+ ```
154
+
155
+ ### Print config file path:
156
+ ```bash
157
+ samuraizer config path
158
+ ```
159
+
160
+ ### Print resolved config as JSON:
161
+ ```bash
162
+ samuraizer config get
163
+ ```
164
+
165
+
166
+ ### 📂 Output
167
+ ```json
168
+ output/<file-name>/
169
+ transcript.txt
170
+ summary.txt
171
+ action-items.json
172
+ decisions.json
173
+ report.txt
174
+ ```
175
+
176
+
177
+ ### 🔁 Resume Behavior
178
+
179
+ Samuraizer skips already processed steps.
180
+
181
+ Use `--force` to rebuild everything.
182
+
183
+
184
+ ### ⚠️ Common Issues
185
+ #### Ollama not running
186
+ ```bash
187
+ ollama serve
188
+ ```
189
+
190
+ #### ffmpeg not found
191
+
192
+ macOS:
193
+ ```bash
194
+ brew install ffmpeg
195
+ ```
196
+
197
+ Linux:
198
+ ```bash
199
+ apt install ffmpeg
200
+ ```
201
+
202
+ ### 📄 License
203
+
204
+ ISC
205
+
206
+ ---
@@ -0,0 +1,9 @@
1
+ import { runCommand } from "../lib/run-command.js";
2
+ export async function ensureFfmpeg(command) {
3
+ try {
4
+ await runCommand(command, ["-version"]);
5
+ }
6
+ catch {
7
+ throw new Error(`ffmpeg is not installed or not in PATH (tried '${command}'). Please install ffmpeg.`);
8
+ }
9
+ }
@@ -0,0 +1,9 @@
1
+ import { runCommand } from "../lib/run-command.js";
2
+ export async function ensureFfprobe(command) {
3
+ try {
4
+ await runCommand(command, ["-version"]);
5
+ }
6
+ catch {
7
+ throw new Error(`ffprobe is not installed or not in PATH (tried '${command}'). Please install ffprobe.`);
8
+ }
9
+ }
@@ -0,0 +1,12 @@
1
+ export async function ensureOllama(baseUrl) {
2
+ let response;
3
+ try {
4
+ response = await fetch(`${baseUrl}/api/ps`, { method: "GET" });
5
+ }
6
+ catch {
7
+ throw new Error(`Ollama is not reachable at ${baseUrl}. Make sure ollama is running.`);
8
+ }
9
+ if (!response.ok) {
10
+ throw new Error(`Ollama API check failed with ${response.status} ${response.statusText}.`);
11
+ }
12
+ }
@@ -0,0 +1,9 @@
1
+ import { runCommand } from "../lib/run-command.js";
2
+ export async function ensureWhisperCli(command) {
3
+ try {
4
+ await runCommand(command, ["-h"]);
5
+ }
6
+ catch {
7
+ throw new Error(`whisper-cli is not installed or not in PATH (tried '${command}'). Please install whisper-cli.`);
8
+ }
9
+ }
@@ -0,0 +1,138 @@
1
+ import { ensureFfmpeg } from "../../checks/ffmpeg.js";
2
+ import { ensureFfprobe } from "../../checks/ffprobe.js";
3
+ import { ensureWhisperCli } from "../../checks/whisper.js";
4
+ import { generateActionItemsWithOllama } from "../../pipeline/analysis/action-items/generate.js";
5
+ import { generateDecisionsWithOllama } from "../../pipeline/analysis/decisions/generate.js";
6
+ import { generateSummaryWithOllama } from "../../pipeline/analysis/summary/generate.js";
7
+ import { normalizeAudio } from "../../pipeline/audio/normalize.js";
8
+ import { probeAudio } from "../../pipeline/audio/probe.js";
9
+ import { validateInputFile } from "../../pipeline/audio/validate-input.js";
10
+ import { saveActionItems, saveDecisions, saveMeta, saveSummary, saveTranscript } from "../../pipeline/output/save.js";
11
+ import { prepareOutput } from "../../pipeline/output/prepare.js";
12
+ import { generateReport } from "../../pipeline/report/generate.js";
13
+ import { transcribeWithWhisper } from "../../pipeline/transcription/transcribe.js";
14
+ const DEFAULT_WHISPER_MODEL_PATH = process.env.WHISPER_MODEL_PATH ?? "";
15
+ const DEFAULT_OLLAMA_MODEL = process.env.OLLAMA_MODEL ?? "qwen2.5:14b";
16
+ export function registerProcessCommand(program) {
17
+ program
18
+ .command("process")
19
+ .description("Process a recording file")
20
+ .argument("<file>", "Path to the recording file")
21
+ .option("-v, --verbose", "Enable verbose logging")
22
+ .action(async (file, options) => {
23
+ try {
24
+ if (!DEFAULT_WHISPER_MODEL_PATH) {
25
+ throw new Error("WHISPER_MODEL_PATH is not set. Please export WHISPER_MODEL_PATH=/absolute/path/to/model.bin");
26
+ }
27
+ const validatedFile = await validateInputFile(file);
28
+ const preparedOutput = await prepareOutput(validatedFile);
29
+ console.log("samuraizer started");
30
+ console.log(`Input: ${validatedFile.resolvedPath}`);
31
+ console.log(`Output dir: ${preparedOutput.paths.runDir}`);
32
+ console.log("Checking ffmpeg...");
33
+ await ensureFfmpeg();
34
+ console.log("Checking ffprobe...");
35
+ await ensureFfprobe();
36
+ console.log("Checking whisper-cli...");
37
+ await ensureWhisperCli();
38
+ console.log("Probing input audio...");
39
+ preparedOutput.meta.input.audioMetadata = await probeAudio(validatedFile.resolvedPath);
40
+ console.log("Normalizing audio...");
41
+ await normalizeAudio({
42
+ inputPath: validatedFile.resolvedPath,
43
+ outputPath: preparedOutput.paths.normalizedAudioPath,
44
+ });
45
+ preparedOutput.meta.output.normalizedAudioPath = preparedOutput.paths.normalizedAudioPath;
46
+ preparedOutput.meta.status = "audio_normalized";
47
+ console.log("Probing normalized audio...");
48
+ preparedOutput.meta.output.normalizedAudioMetadata = await probeAudio(preparedOutput.paths.normalizedAudioPath);
49
+ preparedOutput.meta.status = "audio_probed";
50
+ console.log("Transcribing audio...");
51
+ const transcript = await transcribeWithWhisper({
52
+ audioPath: preparedOutput.paths.normalizedAudioPath,
53
+ outputTextPath: preparedOutput.paths.runDir,
54
+ modelPath: DEFAULT_WHISPER_MODEL_PATH,
55
+ });
56
+ await saveTranscript(preparedOutput.paths.transcriptJsonPath, transcript);
57
+ preparedOutput.meta.output.transcriptTextPath = preparedOutput.paths.transcriptTextPath;
58
+ preparedOutput.meta.output.transcriptJsonPath = preparedOutput.paths.transcriptJsonPath;
59
+ preparedOutput.meta.transcription = {
60
+ engine: "whisper.cpp",
61
+ modelPath: DEFAULT_WHISPER_MODEL_PATH,
62
+ textLength: transcript.text.length,
63
+ };
64
+ preparedOutput.meta.status = "transcribed";
65
+ console.log("Generating summary...");
66
+ const summary = await generateSummaryWithOllama({
67
+ transcript: transcript.text,
68
+ model: DEFAULT_OLLAMA_MODEL,
69
+ summaryTextPath: preparedOutput.paths.summaryTextPath,
70
+ sourceTranscriptPath: preparedOutput.paths.transcriptTextPath,
71
+ });
72
+ await saveSummary(preparedOutput.paths.summaryJsonPath, summary);
73
+ preparedOutput.meta.output.summaryTextPath = preparedOutput.paths.summaryTextPath;
74
+ preparedOutput.meta.output.summaryJsonPath = preparedOutput.paths.summaryJsonPath;
75
+ preparedOutput.meta.summary = { model: DEFAULT_OLLAMA_MODEL, textLength: summary.summary.length };
76
+ preparedOutput.meta.status = "summarized";
77
+ console.log("Extracting action items...");
78
+ const actionItems = await generateActionItemsWithOllama({
79
+ transcript: transcript.text,
80
+ model: DEFAULT_OLLAMA_MODEL,
81
+ actionItemsTextPath: preparedOutput.paths.actionItemsTextPath,
82
+ sourceTranscriptPath: preparedOutput.paths.transcriptTextPath,
83
+ });
84
+ await saveActionItems(preparedOutput.paths.actionItemsJsonPath, actionItems);
85
+ preparedOutput.meta.output.actionItemsTextPath = preparedOutput.paths.actionItemsTextPath;
86
+ preparedOutput.meta.output.actionItemsJsonPath = preparedOutput.paths.actionItemsJsonPath;
87
+ preparedOutput.meta.actionItems = { model: DEFAULT_OLLAMA_MODEL, count: actionItems.items.length };
88
+ preparedOutput.meta.status = "action_items_extracted";
89
+ console.log("Extracting decisions...");
90
+ const decisions = await generateDecisionsWithOllama({
91
+ transcript: transcript.text,
92
+ model: DEFAULT_OLLAMA_MODEL,
93
+ decisionsTextPath: preparedOutput.paths.decisionsTextPath,
94
+ sourceTranscriptPath: preparedOutput.paths.transcriptTextPath,
95
+ });
96
+ await saveDecisions(preparedOutput.paths.decisionsJsonPath, decisions);
97
+ preparedOutput.meta.output.decisionsTextPath = preparedOutput.paths.decisionsTextPath;
98
+ preparedOutput.meta.output.decisionsJsonPath = preparedOutput.paths.decisionsJsonPath;
99
+ preparedOutput.meta.decisions = { model: DEFAULT_OLLAMA_MODEL, count: decisions.items.length };
100
+ preparedOutput.meta.status = "decisions_extracted";
101
+ console.log("Generating report...");
102
+ await generateReport({
103
+ reportPath: preparedOutput.paths.reportMarkdownPath,
104
+ meetingTitle: validatedFile.baseName,
105
+ sourceFileName: validatedFile.fileName,
106
+ transcript,
107
+ summary,
108
+ actionItems,
109
+ decisions,
110
+ });
111
+ preparedOutput.meta.output.reportMarkdownPath = preparedOutput.paths.reportMarkdownPath;
112
+ preparedOutput.meta.report = { generated: true };
113
+ preparedOutput.meta.status = "report_generated";
114
+ await saveMeta(preparedOutput.paths, preparedOutput.meta);
115
+ console.log("\nDone.");
116
+ console.log(`Normalized audio: ${preparedOutput.paths.normalizedAudioPath}`);
117
+ console.log(`Transcript: ${preparedOutput.paths.transcriptTextPath}`);
118
+ console.log(`Summary: ${preparedOutput.paths.summaryTextPath}`);
119
+ console.log(`Action items: ${preparedOutput.paths.actionItemsTextPath}`);
120
+ console.log(`Decisions: ${preparedOutput.paths.decisionsTextPath}`);
121
+ console.log(`Report: ${preparedOutput.paths.reportMarkdownPath}`);
122
+ console.log(`Meta: ${preparedOutput.paths.metaFilePath}`);
123
+ if (options.verbose) {
124
+ console.log("\n--- Verbose ---");
125
+ console.log("Transcript preview:", transcript.text.slice(0, 500));
126
+ console.log("Summary preview:", summary.summary.slice(0, 500));
127
+ console.log("Action items:", actionItems.items.slice(0, 5));
128
+ console.log("Decisions:", decisions.items.slice(0, 5));
129
+ console.log("Meta:", JSON.stringify(preparedOutput.meta, null, 2));
130
+ }
131
+ }
132
+ catch (error) {
133
+ const message = error instanceof Error ? error.message : "Unknown error";
134
+ console.error(`Error: ${message}`);
135
+ process.exitCode = 1;
136
+ }
137
+ });
138
+ }
@@ -0,0 +1,175 @@
1
+ #!/usr/bin/env node
2
+ import { readFile } from "node:fs/promises";
3
+ import { Command } from "commander";
4
+ import { initConfig } from "../config/init.js";
5
+ import { loadConfig } from "../config/load.js";
6
+ import { getConfigFilePath } from "../config/paths.js";
7
+ import { processMeeting } from "../orchestrators/process-meeting.js";
8
+ import { runTool } from "../shared/tool-definition.js";
9
+ import { tools } from "../shared/tool-registry.js";
10
+ const program = new Command();
11
+ program
12
+ .name("samuraizer")
13
+ .description("Transform meeting recordings into structured knowledge")
14
+ .version("0.0.1");
15
+ program
16
+ .command("process")
17
+ .description("Run the full pipeline on an audio recording")
18
+ .argument("<input>", "Path to the audio recording")
19
+ .option("-v, --verbose", "Show detailed output after processing")
20
+ .option("-f, --force", "Recompute all steps even if outputs already exist")
21
+ .action(async (input, options) => {
22
+ try {
23
+ const config = await loadConfig();
24
+ console.log(`Input: ${input}`);
25
+ console.log(`Model: ${config.model}`);
26
+ console.log(`Ollama URL: ${config.ollamaBaseUrl}`);
27
+ const result = await processMeeting({
28
+ inputPath: input,
29
+ outputRootDir: config.outputDir,
30
+ model: config.model,
31
+ ollamaBaseUrl: config.ollamaBaseUrl,
32
+ whisperCommand: config.whisperCommand,
33
+ whisperModelPath: config.whisperModelPath,
34
+ language: config.language,
35
+ ffmpegCommand: config.ffmpegCommand,
36
+ ffprobeCommand: config.ffprobeCommand,
37
+ force: options.force,
38
+ });
39
+ console.log("\nDone.");
40
+ console.log(`Output dir: ${result.paths.runDir}`);
41
+ console.log(`Transcript: ${result.paths.transcriptTextPath}`);
42
+ console.log(`Summary: ${result.paths.summaryTextPath}`);
43
+ console.log(`Action items: ${result.paths.actionItemsTextPath}`);
44
+ console.log(`Decisions: ${result.paths.decisionsTextPath}`);
45
+ console.log(`Report: ${result.paths.reportMarkdownPath}`);
46
+ console.log(`Meta: ${result.paths.metaFilePath}`);
47
+ if (options.verbose) {
48
+ console.log("\nMeta:", JSON.stringify(result.meta, null, 2));
49
+ }
50
+ }
51
+ catch (error) {
52
+ console.error(`Error: ${error instanceof Error ? error.message : String(error)}`);
53
+ process.exitCode = 1;
54
+ }
55
+ });
56
+ program
57
+ .command("normalize")
58
+ .description("Normalize audio to Whisper-compatible WAV format")
59
+ .argument("<input>", "Input audio file")
60
+ .argument("<output>", "Output WAV file path")
61
+ .action(async (input, output) => {
62
+ try {
63
+ const config = await loadConfig();
64
+ const result = await runTool(tools.normalize_audio, {
65
+ inputPath: input,
66
+ outputPath: output,
67
+ ffmpegCommand: config.ffmpegCommand,
68
+ });
69
+ console.log(`Normalized: ${result.normalizedAudioPath}`);
70
+ }
71
+ catch (error) {
72
+ console.error(`Error: ${error instanceof Error ? error.message : String(error)}`);
73
+ process.exitCode = 1;
74
+ }
75
+ });
76
+ program
77
+ .command("summarize")
78
+ .description("Summarize a transcript file")
79
+ .argument("<file>", "Path to the transcript text file")
80
+ .action(async (file) => {
81
+ try {
82
+ const config = await loadConfig();
83
+ const text = await readFile(file, "utf-8");
84
+ const result = await runTool(tools.summarize_transcript, {
85
+ transcriptText: text,
86
+ model: config.model,
87
+ ollamaBaseUrl: config.ollamaBaseUrl,
88
+ });
89
+ console.log(result.summary);
90
+ }
91
+ catch (error) {
92
+ console.error(`Error: ${error instanceof Error ? error.message : String(error)}`);
93
+ process.exitCode = 1;
94
+ }
95
+ });
96
+ program
97
+ .command("actions")
98
+ .description("Extract action items from a transcript file")
99
+ .argument("<file>", "Path to the transcript text file")
100
+ .action(async (file) => {
101
+ try {
102
+ const config = await loadConfig();
103
+ const text = await readFile(file, "utf-8");
104
+ const result = await runTool(tools.extract_action_items, {
105
+ transcriptText: text,
106
+ model: config.model,
107
+ ollamaBaseUrl: config.ollamaBaseUrl,
108
+ });
109
+ console.log(JSON.stringify(result, null, 2));
110
+ }
111
+ catch (error) {
112
+ console.error(`Error: ${error instanceof Error ? error.message : String(error)}`);
113
+ process.exitCode = 1;
114
+ }
115
+ });
116
+ program
117
+ .command("decisions")
118
+ .description("Extract decisions from a transcript file")
119
+ .argument("<file>", "Path to the transcript text file")
120
+ .action(async (file) => {
121
+ try {
122
+ const config = await loadConfig();
123
+ const text = await readFile(file, "utf-8");
124
+ const result = await runTool(tools.extract_decisions, {
125
+ transcriptText: text,
126
+ model: config.model,
127
+ ollamaBaseUrl: config.ollamaBaseUrl,
128
+ });
129
+ console.log(JSON.stringify(result, null, 2));
130
+ }
131
+ catch (error) {
132
+ console.error(`Error: ${error instanceof Error ? error.message : String(error)}`);
133
+ process.exitCode = 1;
134
+ }
135
+ });
136
+ program
137
+ .command("init")
138
+ .description("Create the Samuraizer config file with default values")
139
+ .action(async () => {
140
+ try {
141
+ const result = await initConfig();
142
+ if (result.created) {
143
+ console.log(`Created config at: ${result.path}`);
144
+ }
145
+ else {
146
+ console.log(`Config already exists at: ${result.path}`);
147
+ console.log("No changes were made. Edit the file manually to update values.");
148
+ }
149
+ }
150
+ catch (error) {
151
+ console.error(`Error: ${error instanceof Error ? error.message : String(error)}`);
152
+ process.exitCode = 1;
153
+ }
154
+ });
155
+ const configCommand = program.command("config").description("Inspect Samuraizer configuration");
156
+ configCommand
157
+ .command("path")
158
+ .description("Print the absolute path to the config file")
159
+ .action(() => {
160
+ console.log(getConfigFilePath());
161
+ });
162
+ configCommand
163
+ .command("get")
164
+ .description("Print the resolved configuration (file + env) as JSON")
165
+ .action(async () => {
166
+ try {
167
+ const config = await loadConfig();
168
+ console.log(JSON.stringify(config, null, 2));
169
+ }
170
+ catch (error) {
171
+ console.error(`Error: ${error instanceof Error ? error.message : String(error)}`);
172
+ process.exitCode = 1;
173
+ }
174
+ });
175
+ program.parse(process.argv);
@@ -0,0 +1,21 @@
1
+ import { processMeeting } from "../orchestrators/process-meeting.js";
2
+ export async function processCommand(inputPath) {
3
+ const whisperModelPath = process.env.WHISPER_MODEL_PATH;
4
+ const summaryModel = process.env.SUMMARY_MODEL ?? "qwen2.5:14b";
5
+ const outputRootDir = process.env.SAMMURAIZER_OUTPUT_DIR ?? "./output";
6
+ const language = process.env.TRANSCRIPT_LANGUAGE || "auto";
7
+ if (!whisperModelPath) {
8
+ throw new Error("WHISPER_MODEL_PATH is not set.");
9
+ }
10
+ const result = await processMeeting({
11
+ inputPath,
12
+ outputRootDir,
13
+ whisperModelPath,
14
+ summaryModel,
15
+ language,
16
+ });
17
+ console.log("samuraizer finished successfully");
18
+ console.log(`Output directory: ${result.outputDir}`);
19
+ console.log(`Transcript path: ${result.transcriptPath}`);
20
+ console.log(`Summary path: ${result.summaryPath}`);
21
+ }
@@ -0,0 +1,7 @@
1
+ export const defaultConfig = {
2
+ model: "qwen2.5:14b",
3
+ ollamaBaseUrl: "http://127.0.0.1:11434",
4
+ whisperCommand: "whisper-cli",
5
+ ffmpegCommand: "ffmpeg",
6
+ ffprobeCommand: "ffprobe",
7
+ };
@@ -0,0 +1,21 @@
1
+ import { access, mkdir, writeFile } from "node:fs/promises";
2
+ import { getConfigDir, getConfigFilePath } from "./paths.js";
3
+ import { configTemplate } from "./template.js";
4
+ async function fileExists(filePath) {
5
+ try {
6
+ await access(filePath);
7
+ return true;
8
+ }
9
+ catch {
10
+ return false;
11
+ }
12
+ }
13
+ export async function initConfig() {
14
+ const filePath = getConfigFilePath();
15
+ if (await fileExists(filePath)) {
16
+ return { path: filePath, created: false };
17
+ }
18
+ await mkdir(getConfigDir(), { recursive: true });
19
+ await writeFile(filePath, `${JSON.stringify(configTemplate, null, 2)}\n`, "utf-8");
20
+ return { path: filePath, created: true };
21
+ }
@@ -0,0 +1,57 @@
1
+ import { access, readFile } from "node:fs/promises";
2
+ import { getConfigFilePath } from "./paths.js";
3
+ import { configSchema, partialConfigSchema } from "./schema.js";
4
+ async function fileExists(filePath) {
5
+ try {
6
+ await access(filePath);
7
+ return true;
8
+ }
9
+ catch {
10
+ return false;
11
+ }
12
+ }
13
+ async function readConfigFile(filePath) {
14
+ const raw = await readFile(filePath, "utf-8");
15
+ let parsed;
16
+ try {
17
+ parsed = JSON.parse(raw);
18
+ }
19
+ catch {
20
+ throw new Error(`Invalid config file at ${filePath}. Please fix the JSON or run 'samuraizer init' to recreate it.`);
21
+ }
22
+ const result = partialConfigSchema.safeParse(parsed);
23
+ if (!result.success) {
24
+ throw new Error(`Config file at ${filePath} is invalid: ${result.error.message}`);
25
+ }
26
+ return result.data;
27
+ }
28
+ function readEnvOverrides() {
29
+ const env = {};
30
+ if (process.env.SAMURAIZER_MODEL)
31
+ env.model = process.env.SAMURAIZER_MODEL;
32
+ if (process.env.SAMURAIZER_OLLAMA_BASE_URL)
33
+ env.ollamaBaseUrl = process.env.SAMURAIZER_OLLAMA_BASE_URL;
34
+ if (process.env.SAMURAIZER_WHISPER_COMMAND)
35
+ env.whisperCommand = process.env.SAMURAIZER_WHISPER_COMMAND;
36
+ if (process.env.SAMURAIZER_WHISPER_MODEL_PATH)
37
+ env.whisperModelPath = process.env.SAMURAIZER_WHISPER_MODEL_PATH;
38
+ if (process.env.SAMURAIZER_LANGUAGE)
39
+ env.language = process.env.SAMURAIZER_LANGUAGE;
40
+ if (process.env.SAMURAIZER_FFMPEG_COMMAND)
41
+ env.ffmpegCommand = process.env.SAMURAIZER_FFMPEG_COMMAND;
42
+ if (process.env.SAMURAIZER_FFPROBE_COMMAND)
43
+ env.ffprobeCommand = process.env.SAMURAIZER_FFPROBE_COMMAND;
44
+ if (process.env.SAMURAIZER_OUTPUT_DIR)
45
+ env.outputDir = process.env.SAMURAIZER_OUTPUT_DIR;
46
+ return env;
47
+ }
48
+ export async function loadConfig() {
49
+ const filePath = getConfigFilePath();
50
+ if (!(await fileExists(filePath))) {
51
+ throw new Error(`The config file is not created, please run 'samuraizer init' to create it. Expected at: ${filePath}`);
52
+ }
53
+ const fileConfig = await readConfigFile(filePath);
54
+ const envConfig = readEnvOverrides();
55
+ const merged = { ...fileConfig, ...envConfig };
56
+ return configSchema.parse(merged);
57
+ }