@voidwire/llm-summarize 3.4.0 → 3.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.ts +54 -44
  2. package/package.json +5 -5
package/index.ts CHANGED
@@ -30,15 +30,17 @@ export interface SessionInsights {
30
30
  should_search?: boolean;
31
31
  extractions?: Extraction[];
32
32
  // Insights mode fields
33
+ current_focus?: string;
34
+ next_steps?: string[];
33
35
  decisions?: string[];
34
36
  patterns_used?: string[];
35
37
  preferences_expressed?: string[];
36
38
  problems_solved?: string[];
37
- tools_heavy?: string[];
38
39
  }
39
40
 
40
41
  export interface SummarizeResult {
41
42
  insights?: SessionInsights;
43
+ rawText?: string;
42
44
  error?: string;
43
45
  model?: string;
44
46
  tokens_used?: number;
@@ -58,6 +60,8 @@ export interface SummarizeOptions {
58
60
  mode?: "quick" | "insights";
59
61
  /** User name to include in summary (e.g., "Rudy") */
60
62
  userName?: string;
63
+ /** Override the system prompt (bypasses mode-based prompt selection) */
64
+ systemPrompt?: string;
61
65
  }
62
66
 
63
67
  export type ProviderType = "anthropic" | "openai" | "ollama";
@@ -141,53 +145,52 @@ Output valid JSON only. No markdown, no explanation.`;
141
145
  * Note: userName param kept for API compatibility but not used in insights mode
142
146
  */
143
147
  function buildInsightsPrompt(_userName?: string): string {
144
- return `You are a senior engineering manager extracting reusable insights from development sessions.
148
+ return `You are an engineering knowledge extractor. Given a development session transcript, extract reusable insights as structured JSON.
145
149
 
146
- You receive transcripts with clear role markers:
147
- - "User Asked:" = the human directing work (requests, approves, provides context)
148
- - "Assistant Response:" = the AI executing work (implements, builds, debugs, explains)
150
+ Transcripts use role markers:
151
+ - "User Asked:" = the human (directs, decides, provides context)
152
+ - "Assistant Response:" = the AI (implements, builds, debugs)
149
153
 
150
- Your job: extract what's worth remembering for future sessions.
154
+ <output_format>
155
+ Return a JSON object with these fields. Include a field ONLY when the transcript provides clear evidence. Omit empty arrays entirely.
151
156
 
152
- <output_schema>
153
157
  {
154
- "summary": "One sentence capturing what was accomplished and how",
155
- "decisions": ["Decision made with reasoning and trade-offs considered"],
156
- "patterns_used": ["Development pattern or approach, with context on why it was chosen"],
157
- "preferences_expressed": ["Preference revealed through direction or feedback"],
158
- "problems_solved": ["Problem encountered and the specific solution applied"],
159
- "tools_heavy": ["Tool used repeatedly or for critical work"]
158
+ "summary": "One sentence: what was accomplished and the key outcome",
159
+ "current_focus": "The specific task, feature, or problem actively being worked on (omit if exploratory)",
160
+ "next_steps": ["Concrete action to take when work resumes name the actual task"],
161
+ "decisions": ["Decision made rationale and what alternatives were considered"],
162
+ "patterns_used": ["Technique or approach applied why it was chosen over alternatives"],
163
+ "preferences_expressed": ["User preference revealed through direction, correction, or explicit statement"],
164
+ "problems_solved": ["Problem encountered — root cause identified and specific fix applied"]
160
165
  }
161
- </output_schema>
162
-
163
- <attribution_rules>
164
- - User actions: requested, approved, directed, provided, chose, preferred
165
- - Assistant actions: implemented, built, debugged, refactored, created, fixed
166
- - Never say "User implemented" or "User built" — users direct, assistants execute
167
- </attribution_rules>
168
-
169
- <quality_guidance>
170
- Extract specifics with context, not bare facts:
171
-
172
- SPARSE (avoid):
173
- - "Made a database decision"
174
- - "Fixed a bug"
175
- - "Used TypeScript"
176
-
177
- RICH (prefer):
178
- - "Chose SQLite over Postgres for single-user CLI tool avoids server dependency"
179
- - "Fixed race condition in webhook handler by adding mutex lock — was causing duplicate events"
180
- - "Used Zod for runtime validation at API boundary — catches malformed input before it hits business logic"
181
- </quality_guidance>
182
-
183
- <rules>
184
- - Include a field ONLY when the transcript provides clear evidence
185
- - Omit empty arrays entirely
186
- - Capture the "why" when present — reasoning is more valuable than the decision alone
187
- - Technical specifics (library names, patterns, trade-offs) make insights reusable
188
- </rules>
189
-
190
- Output valid JSON only. No markdown code blocks, no explanation.`;
166
+ </output_format>
167
+
168
+ <quality_rules>
169
+ Every value MUST be a complete sentence with context. Never output bare nouns, short phrases, or sentence fragments.
170
+
171
+ BAD (will be rejected):
172
+ - "SQLite"
173
+ - "detached worker"
174
+ - "Fixed bug"
175
+ - "Continue working"
176
+
177
+ GOOD (specific, contextual, reusable):
178
+ - "Chose SQLite over Postgres for single-user CLI — no server dependency needed"
179
+ - "Used detached worker pattern to avoid blocking the stop hook during LLM calls"
180
+ - "Fixed state file writing to wrong directory — was using read-only data path instead of persistent home"
181
+ - "Wire up the webhook endpoint to the event processor and verify with integration test"
182
+
183
+ For next_steps specifically: never say "Continue from current position" or "Resume work"name the actual task to be done.
184
+ </quality_rules>
185
+
186
+ <attribution>
187
+ Users direct and decide. Assistants implement and execute.
188
+ - User: requested, approved, directed, chose, preferred, corrected
189
+ - Assistant: implemented, built, debugged, refactored, created, fixed
190
+ - Never say "User implemented" or "User built"
191
+ </attribution>
192
+
193
+ Output valid JSON only. No markdown, no code blocks, no explanation.`;
191
194
  }
192
195
 
193
196
  /**
@@ -421,12 +424,14 @@ async function callAnthropic(
421
424
 
422
425
  if (!insights) {
423
426
  return {
427
+ rawText: content,
424
428
  error: `Failed to parse response as JSON: ${content.slice(0, 200)}`,
425
429
  };
426
430
  }
427
431
 
428
432
  return {
429
433
  insights,
434
+ rawText: content,
430
435
  model,
431
436
  tokens_used: result.usage?.output_tokens,
432
437
  };
@@ -487,12 +492,14 @@ async function callOpenAI(
487
492
 
488
493
  if (!insights) {
489
494
  return {
495
+ rawText: content,
490
496
  error: `Failed to parse response as JSON: ${content.slice(0, 200)}`,
491
497
  };
492
498
  }
493
499
 
494
500
  return {
495
501
  insights,
502
+ rawText: content,
496
503
  model,
497
504
  tokens_used: result.usage?.completion_tokens,
498
505
  };
@@ -554,12 +561,14 @@ async function callOllama(
554
561
 
555
562
  if (!insights) {
556
563
  return {
564
+ rawText: content,
557
565
  error: `Failed to parse response as JSON: ${content.slice(0, 200)}`,
558
566
  };
559
567
  }
560
568
 
561
569
  return {
562
570
  insights,
571
+ rawText: content,
563
572
  model,
564
573
  tokens_used: result.eval_count,
565
574
  };
@@ -597,7 +606,8 @@ export async function summarize(
597
606
  const apiKey = config.apiKey;
598
607
  const mode: SummarizeMode = options?.mode || "insights";
599
608
  const userName = options?.userName;
600
- const systemPrompt = getPromptForMode(mode, userName);
609
+ const systemPrompt =
610
+ options?.systemPrompt || getPromptForMode(mode, userName);
601
611
 
602
612
  // Validate config
603
613
  if (!provider) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@voidwire/llm-summarize",
3
- "version": "3.4.0",
3
+ "version": "3.6.0",
4
4
  "description": "Structured session insight extraction for knowledge systems",
5
5
  "type": "module",
6
6
  "main": "./index.ts",
@@ -18,9 +18,6 @@
18
18
  "README.md",
19
19
  "LICENSE"
20
20
  ],
21
- "scripts": {
22
- "test": "bun test"
23
- },
24
21
  "keywords": [
25
22
  "llm",
26
23
  "summarize",
@@ -42,5 +39,8 @@
42
39
  },
43
40
  "engines": {
44
41
  "bun": ">=1.0.0"
42
+ },
43
+ "scripts": {
44
+ "test": "bun test"
45
45
  }
46
- }
46
+ }