@voidwire/llm-summarize 3.0.0 → 3.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cli.ts +21 -1
- package/index.ts +40 -8
- package/package.json +5 -5
package/cli.ts
CHANGED
|
@@ -29,7 +29,12 @@
|
|
|
29
29
|
* 2 - Client error (missing args, invalid config)
|
|
30
30
|
*/
|
|
31
31
|
|
|
32
|
-
import {
|
|
32
|
+
import {
|
|
33
|
+
summarize,
|
|
34
|
+
loadConfig,
|
|
35
|
+
type SummarizeOptions,
|
|
36
|
+
type SummarizeMode,
|
|
37
|
+
} from "./index";
|
|
33
38
|
|
|
34
39
|
/**
|
|
35
40
|
* Read text from stdin
|
|
@@ -60,11 +65,16 @@ Usage: llm-summarize [options] <text>
|
|
|
60
65
|
llm-summarize --stdin
|
|
61
66
|
|
|
62
67
|
Options:
|
|
68
|
+
--mode <mode> Summarization mode: quick or insights (default: insights)
|
|
63
69
|
--model <name> Override model from config
|
|
64
70
|
--max-tokens <n> Max output tokens (default: from config or 1024)
|
|
65
71
|
--stdin Read text from stdin
|
|
66
72
|
-h, --help Show this help
|
|
67
73
|
|
|
74
|
+
Modes:
|
|
75
|
+
quick - Fast one-liner summary (for user prompts)
|
|
76
|
+
insights - Full SessionInsights extraction (for responses)
|
|
77
|
+
|
|
68
78
|
Config file: ~/.config/llm/config.toml
|
|
69
79
|
[llm]
|
|
70
80
|
provider = "ollama"
|
|
@@ -127,6 +137,7 @@ async function parseArgs(argv: string[]): Promise<ParsedArgs | null> {
|
|
|
127
137
|
|
|
128
138
|
let modelOverride: string | undefined;
|
|
129
139
|
let maxTokensOverride: number | undefined;
|
|
140
|
+
let modeOverride: SummarizeMode | undefined;
|
|
130
141
|
let useStdin = false;
|
|
131
142
|
let text = "";
|
|
132
143
|
|
|
@@ -137,6 +148,14 @@ async function parseArgs(argv: string[]): Promise<ParsedArgs | null> {
|
|
|
137
148
|
modelOverride = args[++i];
|
|
138
149
|
} else if (arg === "--max-tokens" && i + 1 < args.length) {
|
|
139
150
|
maxTokensOverride = parseInt(args[++i], 10);
|
|
151
|
+
} else if (arg === "--mode" && i + 1 < args.length) {
|
|
152
|
+
const mode = args[++i];
|
|
153
|
+
if (mode === "quick" || mode === "insights") {
|
|
154
|
+
modeOverride = mode;
|
|
155
|
+
} else {
|
|
156
|
+
console.error(`Invalid mode: ${mode}. Use 'quick' or 'insights'.`);
|
|
157
|
+
process.exit(2);
|
|
158
|
+
}
|
|
140
159
|
} else if (arg === "--stdin") {
|
|
141
160
|
useStdin = true;
|
|
142
161
|
} else if (!arg.startsWith("-")) {
|
|
@@ -154,6 +173,7 @@ async function parseArgs(argv: string[]): Promise<ParsedArgs | null> {
|
|
|
154
173
|
options: {
|
|
155
174
|
model: modelOverride,
|
|
156
175
|
maxTokens: maxTokensOverride,
|
|
176
|
+
mode: modeOverride,
|
|
157
177
|
},
|
|
158
178
|
};
|
|
159
179
|
}
|
package/index.ts
CHANGED
|
@@ -45,15 +45,28 @@ export interface LLMConfig {
|
|
|
45
45
|
export interface SummarizeOptions {
|
|
46
46
|
model?: string;
|
|
47
47
|
maxTokens?: number;
|
|
48
|
+
mode?: "quick" | "insights";
|
|
48
49
|
}
|
|
49
50
|
|
|
50
51
|
export type ProviderType = "anthropic" | "openai" | "ollama";
|
|
52
|
+
export type SummarizeMode = "quick" | "insights";
|
|
51
53
|
|
|
52
54
|
// ============================================================================
|
|
53
|
-
// System
|
|
55
|
+
// System Prompts
|
|
54
56
|
// ============================================================================
|
|
55
57
|
|
|
56
|
-
|
|
58
|
+
/**
|
|
59
|
+
* Quick mode: Fast one-liner summary for user prompts
|
|
60
|
+
*/
|
|
61
|
+
const QUICK_PROMPT = `Summarize what the user is asking or doing in one sentence.
|
|
62
|
+
Use the user's name from the context in your summary (e.g., "Rudy asked about...").
|
|
63
|
+
|
|
64
|
+
Output JSON only: {"summary": "One sentence summary"}`;
|
|
65
|
+
|
|
66
|
+
/**
|
|
67
|
+
* Insights mode: Full SessionInsights extraction for responses
|
|
68
|
+
*/
|
|
69
|
+
const INSIGHTS_PROMPT = `You are an experienced engineering manager reviewing session transcripts to extract actionable team insights.
|
|
57
70
|
|
|
58
71
|
Analyze the development session conversation and extract structured observations.
|
|
59
72
|
|
|
@@ -69,6 +82,7 @@ Analyze the development session conversation and extract structured observations
|
|
|
69
82
|
</output_schema>
|
|
70
83
|
|
|
71
84
|
<rules>
|
|
85
|
+
- Use the user's name from the context in the summary field (e.g., "Rudy implemented...")
|
|
72
86
|
- Include a field ONLY when the conversation provides clear evidence
|
|
73
87
|
- Extract specifics: "Chose SQLite over Postgres for single-user simplicity" not "Made a database decision"
|
|
74
88
|
- Omit empty arrays entirely
|
|
@@ -76,6 +90,13 @@ Analyze the development session conversation and extract structured observations
|
|
|
76
90
|
|
|
77
91
|
Output valid JSON only. No markdown code blocks, no explanation.`;
|
|
78
92
|
|
|
93
|
+
/**
|
|
94
|
+
* Get prompt for the specified mode
|
|
95
|
+
*/
|
|
96
|
+
function getPromptForMode(mode: SummarizeMode): string {
|
|
97
|
+
return mode === "quick" ? QUICK_PROMPT : INSIGHTS_PROMPT;
|
|
98
|
+
}
|
|
99
|
+
|
|
79
100
|
// ============================================================================
|
|
80
101
|
// Response Parsing
|
|
81
102
|
// ============================================================================
|
|
@@ -258,6 +279,7 @@ async function callAnthropic(
|
|
|
258
279
|
model: string,
|
|
259
280
|
maxTokens: number,
|
|
260
281
|
apiKey: string,
|
|
282
|
+
systemPrompt: string,
|
|
261
283
|
apiBase?: string,
|
|
262
284
|
): Promise<SummarizeResult> {
|
|
263
285
|
const endpoint = apiBase || "https://api.anthropic.com/v1/messages";
|
|
@@ -274,7 +296,7 @@ async function callAnthropic(
|
|
|
274
296
|
model,
|
|
275
297
|
max_tokens: maxTokens,
|
|
276
298
|
temperature: 0.3,
|
|
277
|
-
system:
|
|
299
|
+
system: systemPrompt,
|
|
278
300
|
messages: [
|
|
279
301
|
{
|
|
280
302
|
role: "user",
|
|
@@ -321,6 +343,7 @@ async function callOpenAI(
|
|
|
321
343
|
model: string,
|
|
322
344
|
maxTokens: number,
|
|
323
345
|
apiKey: string,
|
|
346
|
+
systemPrompt: string,
|
|
324
347
|
apiBase?: string,
|
|
325
348
|
): Promise<SummarizeResult> {
|
|
326
349
|
const endpoint = apiBase || "https://api.openai.com/v1/chat/completions";
|
|
@@ -339,7 +362,7 @@ async function callOpenAI(
|
|
|
339
362
|
messages: [
|
|
340
363
|
{
|
|
341
364
|
role: "system",
|
|
342
|
-
content:
|
|
365
|
+
content: systemPrompt,
|
|
343
366
|
},
|
|
344
367
|
{
|
|
345
368
|
role: "user",
|
|
@@ -386,6 +409,7 @@ async function callOllama(
|
|
|
386
409
|
model: string,
|
|
387
410
|
maxTokens: number,
|
|
388
411
|
apiBase: string,
|
|
412
|
+
systemPrompt: string,
|
|
389
413
|
): Promise<SummarizeResult> {
|
|
390
414
|
const endpoint = `${apiBase}/api/chat`;
|
|
391
415
|
|
|
@@ -400,7 +424,7 @@ async function callOllama(
|
|
|
400
424
|
messages: [
|
|
401
425
|
{
|
|
402
426
|
role: "system",
|
|
403
|
-
content:
|
|
427
|
+
content: systemPrompt,
|
|
404
428
|
},
|
|
405
429
|
{
|
|
406
430
|
role: "user",
|
|
@@ -453,8 +477,12 @@ async function callOllama(
|
|
|
453
477
|
*
|
|
454
478
|
* @param text - Text to summarize
|
|
455
479
|
* @param config - LLM configuration (from loadConfig())
|
|
456
|
-
* @param options - Optional overrides for model and
|
|
457
|
-
* @returns SummarizeResult with
|
|
480
|
+
* @param options - Optional overrides for model, maxTokens, and mode
|
|
481
|
+
* @returns SummarizeResult with insights or error
|
|
482
|
+
*
|
|
483
|
+
* Modes:
|
|
484
|
+
* - "quick": Fast one-liner summary (for user prompts)
|
|
485
|
+
* - "insights": Full SessionInsights extraction (for responses, default)
|
|
458
486
|
*/
|
|
459
487
|
export async function summarize(
|
|
460
488
|
text: string,
|
|
@@ -465,6 +493,8 @@ export async function summarize(
|
|
|
465
493
|
const model = options?.model || config.model;
|
|
466
494
|
const maxTokens = options?.maxTokens || config.maxTokens;
|
|
467
495
|
const apiKey = config.apiKey;
|
|
496
|
+
const mode: SummarizeMode = options?.mode || "insights";
|
|
497
|
+
const systemPrompt = getPromptForMode(mode);
|
|
468
498
|
|
|
469
499
|
// Validate config
|
|
470
500
|
if (!provider) {
|
|
@@ -493,6 +523,7 @@ export async function summarize(
|
|
|
493
523
|
model,
|
|
494
524
|
maxTokens,
|
|
495
525
|
apiKey!,
|
|
526
|
+
systemPrompt,
|
|
496
527
|
config.apiBase || undefined,
|
|
497
528
|
);
|
|
498
529
|
} else if (provider === "openai") {
|
|
@@ -501,6 +532,7 @@ export async function summarize(
|
|
|
501
532
|
model,
|
|
502
533
|
maxTokens,
|
|
503
534
|
apiKey!,
|
|
535
|
+
systemPrompt,
|
|
504
536
|
config.apiBase || undefined,
|
|
505
537
|
);
|
|
506
538
|
} else if (provider === "ollama") {
|
|
@@ -509,7 +541,7 @@ export async function summarize(
|
|
|
509
541
|
error: `No api_base configured for ollama. Set api_base in ~/.config/llm/config.toml`,
|
|
510
542
|
};
|
|
511
543
|
}
|
|
512
|
-
return callOllama(text, model, maxTokens, config.apiBase);
|
|
544
|
+
return callOllama(text, model, maxTokens, config.apiBase, systemPrompt);
|
|
513
545
|
} else {
|
|
514
546
|
return {
|
|
515
547
|
error: `Unknown provider: ${provider}. Supported: anthropic, openai, ollama`,
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@voidwire/llm-summarize",
|
|
3
|
-
"version": "3.
|
|
3
|
+
"version": "3.1.0",
|
|
4
4
|
"description": "Structured session insight extraction for knowledge systems",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./index.ts",
|
|
@@ -18,6 +18,9 @@
|
|
|
18
18
|
"README.md",
|
|
19
19
|
"LICENSE"
|
|
20
20
|
],
|
|
21
|
+
"scripts": {
|
|
22
|
+
"test": "bun test"
|
|
23
|
+
},
|
|
21
24
|
"keywords": [
|
|
22
25
|
"llm",
|
|
23
26
|
"summarize",
|
|
@@ -39,8 +42,5 @@
|
|
|
39
42
|
},
|
|
40
43
|
"engines": {
|
|
41
44
|
"bun": ">=1.0.0"
|
|
42
|
-
},
|
|
43
|
-
"scripts": {
|
|
44
|
-
"test": "bun test"
|
|
45
45
|
}
|
|
46
|
-
}
|
|
46
|
+
}
|