@voidwire/llm-summarize 3.0.0 → 3.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/cli.ts +21 -1
  2. package/index.ts +59 -12
  3. package/package.json +5 -5
package/cli.ts CHANGED
@@ -29,7 +29,12 @@
29
29
  * 2 - Client error (missing args, invalid config)
30
30
  */
31
31
 
32
- import { summarize, loadConfig, type SummarizeOptions } from "./index";
32
+ import {
33
+ summarize,
34
+ loadConfig,
35
+ type SummarizeOptions,
36
+ type SummarizeMode,
37
+ } from "./index";
33
38
 
34
39
  /**
35
40
  * Read text from stdin
@@ -60,11 +65,16 @@ Usage: llm-summarize [options] <text>
60
65
  llm-summarize --stdin
61
66
 
62
67
  Options:
68
+ --mode <mode> Summarization mode: quick or insights (default: insights)
63
69
  --model <name> Override model from config
64
70
  --max-tokens <n> Max output tokens (default: from config or 1024)
65
71
  --stdin Read text from stdin
66
72
  -h, --help Show this help
67
73
 
74
+ Modes:
75
+ quick - Fast one-liner summary (for user prompts)
76
+ insights - Full SessionInsights extraction (for responses)
77
+
68
78
  Config file: ~/.config/llm/config.toml
69
79
  [llm]
70
80
  provider = "ollama"
@@ -127,6 +137,7 @@ async function parseArgs(argv: string[]): Promise<ParsedArgs | null> {
127
137
 
128
138
  let modelOverride: string | undefined;
129
139
  let maxTokensOverride: number | undefined;
140
+ let modeOverride: SummarizeMode | undefined;
130
141
  let useStdin = false;
131
142
  let text = "";
132
143
 
@@ -137,6 +148,14 @@ async function parseArgs(argv: string[]): Promise<ParsedArgs | null> {
137
148
  modelOverride = args[++i];
138
149
  } else if (arg === "--max-tokens" && i + 1 < args.length) {
139
150
  maxTokensOverride = parseInt(args[++i], 10);
151
+ } else if (arg === "--mode" && i + 1 < args.length) {
152
+ const mode = args[++i];
153
+ if (mode === "quick" || mode === "insights") {
154
+ modeOverride = mode;
155
+ } else {
156
+ console.error(`Invalid mode: ${mode}. Use 'quick' or 'insights'.`);
157
+ process.exit(2);
158
+ }
140
159
  } else if (arg === "--stdin") {
141
160
  useStdin = true;
142
161
  } else if (!arg.startsWith("-")) {
@@ -154,6 +173,7 @@ async function parseArgs(argv: string[]): Promise<ParsedArgs | null> {
154
173
  options: {
155
174
  model: modelOverride,
156
175
  maxTokens: maxTokensOverride,
176
+ mode: modeOverride,
157
177
  },
158
178
  };
159
179
  }
package/index.ts CHANGED
@@ -45,36 +45,71 @@ export interface LLMConfig {
45
45
  export interface SummarizeOptions {
46
46
  model?: string;
47
47
  maxTokens?: number;
48
+ mode?: "quick" | "insights";
49
+ /** User name to include in summary (e.g., "Rudy") */
50
+ userName?: string;
48
51
  }
49
52
 
50
53
  export type ProviderType = "anthropic" | "openai" | "ollama";
54
+ export type SummarizeMode = "quick" | "insights";
51
55
 
52
56
  // ============================================================================
53
- // System Prompt
57
+ // System Prompts
54
58
  // ============================================================================
55
59
 
56
- const SYSTEM_PROMPT = `You are an experienced engineering manager reviewing session transcripts to extract actionable team insights.
60
+ /**
61
+ * Build quick mode prompt with optional user name
62
+ */
63
+ function buildQuickPrompt(userName?: string): string {
64
+ const nameInstruction = userName ? `Start with "${userName}".` : "";
65
+
66
+ return `Summarize what the user is asking or doing in one sentence.
67
+ ${nameInstruction}
68
+ Output JSON only: {"summary": "One sentence summary"}`;
69
+ }
70
+
71
+ /**
72
+ * Build insights mode prompt with optional user name
73
+ */
74
+ function buildInsightsPrompt(userName?: string): string {
75
+ const nameInstruction = userName
76
+ ? `Start the summary with "${userName}".`
77
+ : "";
57
78
 
58
- Analyze the development session conversation and extract structured observations.
79
+ return `You are an experienced engineering manager reviewing session transcripts to extract actionable insights.
80
+
81
+ Analyze the development session and extract structured observations.
59
82
 
60
83
  <output_schema>
61
84
  {
62
85
  "summary": "One sentence: what was accomplished or decided",
63
86
  "decisions": ["Specific decision and its reasoning"],
64
87
  "patterns_used": ["Development pattern or approach observed"],
65
- "preferences_expressed": ["User preference revealed through actions or statements"],
66
- "problems_solved": ["Problem that was addressed and how"],
67
- "tools_heavy": ["Tool used repeatedly or in notable ways"]
88
+ "preferences_expressed": ["Preference revealed through actions - DO NOT include user name"],
89
+ "problems_solved": ["Problem addressed and how - DO NOT include user name"],
90
+ "tools_heavy": ["Tool used repeatedly or notably"]
68
91
  }
69
92
  </output_schema>
70
93
 
71
94
  <rules>
95
+ - ${nameInstruction || "Write summary in third person."}
72
96
  - Include a field ONLY when the conversation provides clear evidence
73
97
  - Extract specifics: "Chose SQLite over Postgres for single-user simplicity" not "Made a database decision"
74
98
  - Omit empty arrays entirely
99
+ - IMPORTANT: Only use user name in the summary field, nowhere else
75
100
  </rules>
76
101
 
77
102
  Output valid JSON only. No markdown code blocks, no explanation.`;
103
+ }
104
+
105
+ /**
106
+ * Get prompt for the specified mode
107
+ */
108
+ function getPromptForMode(mode: SummarizeMode, userName?: string): string {
109
+ return mode === "quick"
110
+ ? buildQuickPrompt(userName)
111
+ : buildInsightsPrompt(userName);
112
+ }
78
113
 
79
114
  // ============================================================================
80
115
  // Response Parsing
@@ -258,6 +293,7 @@ async function callAnthropic(
258
293
  model: string,
259
294
  maxTokens: number,
260
295
  apiKey: string,
296
+ systemPrompt: string,
261
297
  apiBase?: string,
262
298
  ): Promise<SummarizeResult> {
263
299
  const endpoint = apiBase || "https://api.anthropic.com/v1/messages";
@@ -274,7 +310,7 @@ async function callAnthropic(
274
310
  model,
275
311
  max_tokens: maxTokens,
276
312
  temperature: 0.3,
277
- system: SYSTEM_PROMPT,
313
+ system: systemPrompt,
278
314
  messages: [
279
315
  {
280
316
  role: "user",
@@ -321,6 +357,7 @@ async function callOpenAI(
321
357
  model: string,
322
358
  maxTokens: number,
323
359
  apiKey: string,
360
+ systemPrompt: string,
324
361
  apiBase?: string,
325
362
  ): Promise<SummarizeResult> {
326
363
  const endpoint = apiBase || "https://api.openai.com/v1/chat/completions";
@@ -339,7 +376,7 @@ async function callOpenAI(
339
376
  messages: [
340
377
  {
341
378
  role: "system",
342
- content: SYSTEM_PROMPT,
379
+ content: systemPrompt,
343
380
  },
344
381
  {
345
382
  role: "user",
@@ -386,6 +423,7 @@ async function callOllama(
386
423
  model: string,
387
424
  maxTokens: number,
388
425
  apiBase: string,
426
+ systemPrompt: string,
389
427
  ): Promise<SummarizeResult> {
390
428
  const endpoint = `${apiBase}/api/chat`;
391
429
 
@@ -400,7 +438,7 @@ async function callOllama(
400
438
  messages: [
401
439
  {
402
440
  role: "system",
403
- content: SYSTEM_PROMPT,
441
+ content: systemPrompt,
404
442
  },
405
443
  {
406
444
  role: "user",
@@ -453,8 +491,12 @@ async function callOllama(
453
491
  *
454
492
  * @param text - Text to summarize
455
493
  * @param config - LLM configuration (from loadConfig())
456
- * @param options - Optional overrides for model and maxTokens
457
- * @returns SummarizeResult with summary or error
494
+ * @param options - Optional overrides for model, maxTokens, and mode
495
+ * @returns SummarizeResult with insights or error
496
+ *
497
+ * Modes:
498
+ * - "quick": Fast one-liner summary (for user prompts)
499
+ * - "insights": Full SessionInsights extraction (for responses, default)
458
500
  */
459
501
  export async function summarize(
460
502
  text: string,
@@ -465,6 +507,9 @@ export async function summarize(
465
507
  const model = options?.model || config.model;
466
508
  const maxTokens = options?.maxTokens || config.maxTokens;
467
509
  const apiKey = config.apiKey;
510
+ const mode: SummarizeMode = options?.mode || "insights";
511
+ const userName = options?.userName;
512
+ const systemPrompt = getPromptForMode(mode, userName);
468
513
 
469
514
  // Validate config
470
515
  if (!provider) {
@@ -493,6 +538,7 @@ export async function summarize(
493
538
  model,
494
539
  maxTokens,
495
540
  apiKey!,
541
+ systemPrompt,
496
542
  config.apiBase || undefined,
497
543
  );
498
544
  } else if (provider === "openai") {
@@ -501,6 +547,7 @@ export async function summarize(
501
547
  model,
502
548
  maxTokens,
503
549
  apiKey!,
550
+ systemPrompt,
504
551
  config.apiBase || undefined,
505
552
  );
506
553
  } else if (provider === "ollama") {
@@ -509,7 +556,7 @@ export async function summarize(
509
556
  error: `No api_base configured for ollama. Set api_base in ~/.config/llm/config.toml`,
510
557
  };
511
558
  }
512
- return callOllama(text, model, maxTokens, config.apiBase);
559
+ return callOllama(text, model, maxTokens, config.apiBase, systemPrompt);
513
560
  } else {
514
561
  return {
515
562
  error: `Unknown provider: ${provider}. Supported: anthropic, openai, ollama`,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@voidwire/llm-summarize",
3
- "version": "3.0.0",
3
+ "version": "3.2.0",
4
4
  "description": "Structured session insight extraction for knowledge systems",
5
5
  "type": "module",
6
6
  "main": "./index.ts",
@@ -18,6 +18,9 @@
18
18
  "README.md",
19
19
  "LICENSE"
20
20
  ],
21
+ "scripts": {
22
+ "test": "bun test"
23
+ },
21
24
  "keywords": [
22
25
  "llm",
23
26
  "summarize",
@@ -39,8 +42,5 @@
39
42
  },
40
43
  "engines": {
41
44
  "bun": ">=1.0.0"
42
- },
43
- "scripts": {
44
- "test": "bun test"
45
45
  }
46
- }
46
+ }