@voidwire/llm-summarize 3.4.0 → 3.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.ts +14 -1
  2. package/package.json +5 -5
package/index.ts CHANGED
@@ -30,6 +30,8 @@ export interface SessionInsights {
30
30
  should_search?: boolean;
31
31
  extractions?: Extraction[];
32
32
  // Insights mode fields
33
+ current_focus?: string;
34
+ next_steps?: string[];
33
35
  decisions?: string[];
34
36
  patterns_used?: string[];
35
37
  preferences_expressed?: string[];
@@ -39,6 +41,7 @@ export interface SessionInsights {
39
41
 
40
42
  export interface SummarizeResult {
41
43
  insights?: SessionInsights;
44
+ rawText?: string;
42
45
  error?: string;
43
46
  model?: string;
44
47
  tokens_used?: number;
@@ -58,6 +61,8 @@ export interface SummarizeOptions {
58
61
  mode?: "quick" | "insights";
59
62
  /** User name to include in summary (e.g., "Rudy") */
60
63
  userName?: string;
64
+ /** Override the system prompt (bypasses mode-based prompt selection) */
65
+ systemPrompt?: string;
61
66
  }
62
67
 
63
68
  export type ProviderType = "anthropic" | "openai" | "ollama";
@@ -152,6 +157,8 @@ Your job: extract what's worth remembering for future sessions.
152
157
  <output_schema>
153
158
  {
154
159
  "summary": "One sentence capturing what was accomplished and how",
160
+ "current_focus": "What work is actively in progress — the specific task, feature, or thread (omit if session was exploratory with no clear focus)",
161
+ "next_steps": ["What should happen when work resumes — concrete actions, not vague intentions"],
155
162
  "decisions": ["Decision made with reasoning and trade-offs considered"],
156
163
  "patterns_used": ["Development pattern or approach, with context on why it was chosen"],
157
164
  "preferences_expressed": ["Preference revealed through direction or feedback"],
@@ -421,12 +428,14 @@ async function callAnthropic(
421
428
 
422
429
  if (!insights) {
423
430
  return {
431
+ rawText: content,
424
432
  error: `Failed to parse response as JSON: ${content.slice(0, 200)}`,
425
433
  };
426
434
  }
427
435
 
428
436
  return {
429
437
  insights,
438
+ rawText: content,
430
439
  model,
431
440
  tokens_used: result.usage?.output_tokens,
432
441
  };
@@ -487,12 +496,14 @@ async function callOpenAI(
487
496
 
488
497
  if (!insights) {
489
498
  return {
499
+ rawText: content,
490
500
  error: `Failed to parse response as JSON: ${content.slice(0, 200)}`,
491
501
  };
492
502
  }
493
503
 
494
504
  return {
495
505
  insights,
506
+ rawText: content,
496
507
  model,
497
508
  tokens_used: result.usage?.completion_tokens,
498
509
  };
@@ -554,12 +565,14 @@ async function callOllama(
554
565
 
555
566
  if (!insights) {
556
567
  return {
568
+ rawText: content,
557
569
  error: `Failed to parse response as JSON: ${content.slice(0, 200)}`,
558
570
  };
559
571
  }
560
572
 
561
573
  return {
562
574
  insights,
575
+ rawText: content,
563
576
  model,
564
577
  tokens_used: result.eval_count,
565
578
  };
@@ -597,7 +610,7 @@ export async function summarize(
597
610
  const apiKey = config.apiKey;
598
611
  const mode: SummarizeMode = options?.mode || "insights";
599
612
  const userName = options?.userName;
600
- const systemPrompt = getPromptForMode(mode, userName);
613
+ const systemPrompt = options?.systemPrompt || getPromptForMode(mode, userName);
601
614
 
602
615
  // Validate config
603
616
  if (!provider) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@voidwire/llm-summarize",
3
- "version": "3.4.0",
3
+ "version": "3.5.0",
4
4
  "description": "Structured session insight extraction for knowledge systems",
5
5
  "type": "module",
6
6
  "main": "./index.ts",
@@ -18,9 +18,6 @@
18
18
  "README.md",
19
19
  "LICENSE"
20
20
  ],
21
- "scripts": {
22
- "test": "bun test"
23
- },
24
21
  "keywords": [
25
22
  "llm",
26
23
  "summarize",
@@ -42,5 +39,8 @@
42
39
  },
43
40
  "engines": {
44
41
  "bun": ">=1.0.0"
42
+ },
43
+ "scripts": {
44
+ "test": "bun test"
45
45
  }
46
- }
46
+ }