@voidwire/llm-summarize 3.3.0 → 3.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.ts +86 -5
  2. package/package.json +1 -1
package/index.ts CHANGED
@@ -18,8 +18,20 @@ import { join } from "path";
18
18
  // Types
19
19
  // ============================================================================
20
20
 
21
+ export interface Extraction {
22
+ term: string;
23
+ type: "project" | "topic" | "tool" | "person";
24
+ confidence: "high" | "medium";
25
+ }
26
+
21
27
  export interface SessionInsights {
22
28
  summary: string;
29
+ // Quick mode extraction fields
30
+ should_search?: boolean;
31
+ extractions?: Extraction[];
32
+ // Insights mode fields
33
+ current_focus?: string;
34
+ next_steps?: string[];
23
35
  decisions?: string[];
24
36
  patterns_used?: string[];
25
37
  preferences_expressed?: string[];
@@ -29,6 +41,7 @@ export interface SessionInsights {
29
41
 
30
42
  export interface SummarizeResult {
31
43
  insights?: SessionInsights;
44
+ rawText?: string;
32
45
  error?: string;
33
46
  model?: string;
34
47
  tokens_used?: number;
@@ -48,6 +61,8 @@ export interface SummarizeOptions {
48
61
  mode?: "quick" | "insights";
49
62
  /** User name to include in summary (e.g., "Rudy") */
50
63
  userName?: string;
64
+ /** Override the system prompt (bypasses mode-based prompt selection) */
65
+ systemPrompt?: string;
51
66
  }
52
67
 
53
68
  export type ProviderType = "anthropic" | "openai" | "ollama";
@@ -59,13 +74,71 @@ export type SummarizeMode = "quick" | "insights";
59
74
 
60
75
  /**
61
76
  * Build quick mode prompt with optional user name
77
+ * Now includes context extraction for knowledge retrieval
62
78
  */
63
79
  function buildQuickPrompt(userName?: string): string {
64
- const nameInstruction = userName ? `Start with "${userName}".` : "";
80
+ const name = userName || "User";
81
+
82
+ return `You are a context classifier for knowledge retrieval. Analyze conversation context to determine what prior knowledge would be valuable.
83
+
84
+ Input format:
85
+ Project: <project name>
86
+ Previous Assistant: <last assistant message>
87
+ User Prompt: <current user message>
88
+
89
+ Produce JSON with:
90
+ 1. summary: Brief description (1-2 sentences) of what the user is doing/asking. Start with "${name}".
91
+ 2. should_search: Whether to search the knowledge base
92
+ 3. extractions: Terms worth searching for
93
+
94
+ should_search = true when:
95
+ - References past work, decisions, discussions
96
+ - Mentions project, tool, or person by name
97
+ - Asks "what was...", "how did we...", "remember when..."
98
+ - Technical domain benefits from prior learnings
99
+
100
+ should_search = false when:
101
+ - Greetings, acknowledgments ("ready", "thanks", "ok")
102
+ - Simple commands ("run tests", "commit this")
103
+ - Continuation signals ("yes", "do it", "go ahead")
104
+
105
+ Extraction types:
106
+ - project: Named codebase, repo, system (sable, lore, momentum)
107
+ - topic: Domain, concept, technical area (hooks, authentication, Tier 2)
108
+ - tool: Library, CLI, framework (llm-summarize, SQLite, Bun)
109
+ - person: Named individual
110
+
111
+ Confidence:
112
+ - high: Explicitly stated
113
+ - medium: Strongly implied
114
+
115
+ Skip generic words. Only extract terms that yield useful knowledge results.
116
+
117
+ <example>
118
+ Project: sable
119
+ Previous Assistant: I'll update the UserPromptSubmit hook to call llm-summarize.
120
+ User Prompt: What does Lore return for project queries?
121
+
122
+ {"summary": "${name} is asking about Lore's return format for project queries", "should_search": true, "extractions": [{"term": "Lore", "type": "project", "confidence": "high"}, {"term": "project queries", "type": "topic", "confidence": "high"}]}
123
+ </example>
124
+
125
+ <example>
126
+ Project: sable
127
+ Previous Assistant: The extraction prompt is ready. Should I add it?
128
+ User Prompt: yes do it
129
+
130
+ {"summary": "${name} is confirming to proceed with the extraction prompt", "should_search": false, "extractions": []}
131
+ </example>
132
+
133
+ <example>
134
+ Project: sable
135
+ Previous Assistant: Starting new session.
136
+ User Prompt: What was the issue we hit with the stop hook last time?
137
+
138
+ {"summary": "${name} is asking about a previous issue with the stop hook", "should_search": true, "extractions": [{"term": "stop hook", "type": "topic", "confidence": "high"}, {"term": "sable", "type": "project", "confidence": "medium"}]}
139
+ </example>
65
140
 
66
- return `Summarize what the user is asking or doing in one sentence.
67
- ${nameInstruction}
68
- Output JSON only: {"summary": "One sentence summary"}`;
141
+ Output valid JSON only. No markdown, no explanation.`;
69
142
  }
70
143
 
71
144
  /**
@@ -84,6 +157,8 @@ Your job: extract what's worth remembering for future sessions.
84
157
  <output_schema>
85
158
  {
86
159
  "summary": "One sentence capturing what was accomplished and how",
160
+ "current_focus": "What work is actively in progress — the specific task, feature, or thread (omit if session was exploratory with no clear focus)",
161
+ "next_steps": ["What should happen when work resumes — concrete actions, not vague intentions"],
87
162
  "decisions": ["Decision made with reasoning and trade-offs considered"],
88
163
  "patterns_used": ["Development pattern or approach, with context on why it was chosen"],
89
164
  "preferences_expressed": ["Preference revealed through direction or feedback"],
@@ -353,12 +428,14 @@ async function callAnthropic(
353
428
 
354
429
  if (!insights) {
355
430
  return {
431
+ rawText: content,
356
432
  error: `Failed to parse response as JSON: ${content.slice(0, 200)}`,
357
433
  };
358
434
  }
359
435
 
360
436
  return {
361
437
  insights,
438
+ rawText: content,
362
439
  model,
363
440
  tokens_used: result.usage?.output_tokens,
364
441
  };
@@ -419,12 +496,14 @@ async function callOpenAI(
419
496
 
420
497
  if (!insights) {
421
498
  return {
499
+ rawText: content,
422
500
  error: `Failed to parse response as JSON: ${content.slice(0, 200)}`,
423
501
  };
424
502
  }
425
503
 
426
504
  return {
427
505
  insights,
506
+ rawText: content,
428
507
  model,
429
508
  tokens_used: result.usage?.completion_tokens,
430
509
  };
@@ -486,12 +565,14 @@ async function callOllama(
486
565
 
487
566
  if (!insights) {
488
567
  return {
568
+ rawText: content,
489
569
  error: `Failed to parse response as JSON: ${content.slice(0, 200)}`,
490
570
  };
491
571
  }
492
572
 
493
573
  return {
494
574
  insights,
575
+ rawText: content,
495
576
  model,
496
577
  tokens_used: result.eval_count,
497
578
  };
@@ -529,7 +610,7 @@ export async function summarize(
529
610
  const apiKey = config.apiKey;
530
611
  const mode: SummarizeMode = options?.mode || "insights";
531
612
  const userName = options?.userName;
532
- const systemPrompt = getPromptForMode(mode, userName);
613
+ const systemPrompt = options?.systemPrompt || getPromptForMode(mode, userName);
533
614
 
534
615
  // Validate config
535
616
  if (!provider) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@voidwire/llm-summarize",
3
- "version": "3.3.0",
3
+ "version": "3.5.0",
4
4
  "description": "Structured session insight extraction for knowledge systems",
5
5
  "type": "module",
6
6
  "main": "./index.ts",