@voidwire/llm-summarize 3.7.0 → 3.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cli.ts +11 -48
- package/index.ts +47 -383
- package/package.json +4 -1
package/cli.ts
CHANGED
|
@@ -2,27 +2,15 @@
|
|
|
2
2
|
/**
|
|
3
3
|
* llm-summarize CLI
|
|
4
4
|
*
|
|
5
|
-
*
|
|
6
|
-
* -
|
|
7
|
-
*
|
|
8
|
-
* - Deterministic JSON output for tooling integration
|
|
9
|
-
* - Config-driven - no hardcoded defaults
|
|
5
|
+
* Structured session insight extraction for knowledge systems.
|
|
6
|
+
* Uses @voidwire/llm-core for LLM transport — services configured
|
|
7
|
+
* via ~/.config/llm-core/services.toml, API keys via apiconf.
|
|
10
8
|
*
|
|
11
9
|
* Usage:
|
|
12
10
|
* llm-summarize <text>
|
|
13
11
|
* llm-summarize --stdin
|
|
14
12
|
* echo "text" | llm-summarize --stdin
|
|
15
13
|
*
|
|
16
|
-
* Config: ~/.config/llm/config.toml
|
|
17
|
-
* [llm]
|
|
18
|
-
* provider = "ollama"
|
|
19
|
-
* model = "Qwen2.5:3b"
|
|
20
|
-
* api_base = "https://ollama.example.com"
|
|
21
|
-
* max_tokens = 1024
|
|
22
|
-
*
|
|
23
|
-
* Secrets: ~/.config/llm/.env
|
|
24
|
-
* ANTHROPIC_API_KEY=sk-ant-...
|
|
25
|
-
*
|
|
26
14
|
* Exit codes:
|
|
27
15
|
* 0 - Success
|
|
28
16
|
* 1 - API error (rate limit, auth, network)
|
|
@@ -56,18 +44,13 @@ function printUsage(): void {
|
|
|
56
44
|
console.error(`
|
|
57
45
|
llm-summarize - Extract structured insights from session transcripts
|
|
58
46
|
|
|
59
|
-
Philosophy:
|
|
60
|
-
Structured session insight extraction for knowledge systems.
|
|
61
|
-
Config-driven - specify exact provider/model.
|
|
62
|
-
JSON output for tooling integration.
|
|
63
|
-
|
|
64
47
|
Usage: llm-summarize [options] <text>
|
|
65
48
|
llm-summarize --stdin
|
|
66
49
|
|
|
67
50
|
Options:
|
|
68
51
|
--mode <mode> Summarization mode: quick or insights (default: insights)
|
|
69
|
-
--model <name> Override model
|
|
70
|
-
--max-tokens <n> Max output tokens (default:
|
|
52
|
+
--model <name> Override model (default: claude-3-5-haiku-20241022)
|
|
53
|
+
--max-tokens <n> Max output tokens (default: 1024)
|
|
71
54
|
--stdin Read text from stdin
|
|
72
55
|
-h, --help Show this help
|
|
73
56
|
|
|
@@ -75,26 +58,10 @@ Modes:
|
|
|
75
58
|
quick - Fast one-liner summary (for user prompts)
|
|
76
59
|
insights - Full SessionInsights extraction (for responses)
|
|
77
60
|
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
api_base = "https://ollama.example.com"
|
|
83
|
-
max_tokens = 1024
|
|
84
|
-
|
|
85
|
-
Secrets file: ~/.config/llm/.env
|
|
86
|
-
ANTHROPIC_API_KEY=sk-ant-...
|
|
87
|
-
OPENAI_API_KEY=sk-...
|
|
88
|
-
|
|
89
|
-
Environment overrides:
|
|
90
|
-
LLM_PROVIDER Override provider
|
|
91
|
-
LLM_MODEL Override model
|
|
92
|
-
LLM_API_KEY Override API key
|
|
93
|
-
|
|
94
|
-
Supported providers:
|
|
95
|
-
anthropic - Claude models (claude-3-5-haiku-latest, claude-sonnet-4-20250514)
|
|
96
|
-
openai - GPT models (gpt-4.1-mini, gpt-4o)
|
|
97
|
-
ollama - Local models (Qwen2.5:3b, llama3.2:3b, etc.) - no API key needed
|
|
61
|
+
Configuration:
|
|
62
|
+
LLM transport is handled by @voidwire/llm-core.
|
|
63
|
+
Services: ~/.config/llm-core/services.toml
|
|
64
|
+
API keys: managed via apiconf (see @voidwire/apiconf)
|
|
98
65
|
|
|
99
66
|
Output format:
|
|
100
67
|
{
|
|
@@ -103,18 +70,14 @@ Output format:
|
|
|
103
70
|
"decisions": ["Specific decisions with reasoning"],
|
|
104
71
|
"patterns_used": ["Development patterns observed"],
|
|
105
72
|
"preferences_expressed": ["User preferences revealed"],
|
|
106
|
-
"problems_solved": ["Problems addressed and how"]
|
|
107
|
-
"tools_heavy": ["Tools used notably"]
|
|
73
|
+
"problems_solved": ["Problems addressed and how"]
|
|
108
74
|
},
|
|
109
|
-
"model": "
|
|
75
|
+
"model": "claude-3-5-haiku-20241022",
|
|
110
76
|
"tokens_used": 150
|
|
111
77
|
}
|
|
112
78
|
|
|
113
79
|
Examples:
|
|
114
|
-
# Extract insights from session transcript
|
|
115
80
|
cat session.txt | llm-summarize --stdin
|
|
116
|
-
|
|
117
|
-
# From clipboard
|
|
118
81
|
pbpaste | llm-summarize --stdin
|
|
119
82
|
`);
|
|
120
83
|
}
|
package/index.ts
CHANGED
|
@@ -11,8 +11,7 @@
|
|
|
11
11
|
* // result.insights.summary, result.insights.decisions, etc.
|
|
12
12
|
*/
|
|
13
13
|
|
|
14
|
-
import {
|
|
15
|
-
import { join } from "path";
|
|
14
|
+
import { complete } from "@voidwire/llm-core";
|
|
16
15
|
|
|
17
16
|
// ============================================================================
|
|
18
17
|
// Types
|
|
@@ -46,12 +45,10 @@ export interface SummarizeResult {
|
|
|
46
45
|
tokens_used?: number;
|
|
47
46
|
}
|
|
48
47
|
|
|
49
|
-
export interface
|
|
50
|
-
|
|
51
|
-
model
|
|
52
|
-
|
|
53
|
-
apiBase: string | null;
|
|
54
|
-
maxTokens: number;
|
|
48
|
+
export interface SummarizeConfig {
|
|
49
|
+
service?: string; // Named service from services.toml (optional, uses default_service)
|
|
50
|
+
model?: string; // Model override — falls back to service default_model if omitted
|
|
51
|
+
maxTokens: number; // Max output tokens
|
|
55
52
|
}
|
|
56
53
|
|
|
57
54
|
export interface SummarizeOptions {
|
|
@@ -64,7 +61,6 @@ export interface SummarizeOptions {
|
|
|
64
61
|
systemPrompt?: string;
|
|
65
62
|
}
|
|
66
63
|
|
|
67
|
-
export type ProviderType = "anthropic" | "openai" | "ollama";
|
|
68
64
|
export type SummarizeMode = "quick" | "insights";
|
|
69
65
|
|
|
70
66
|
// ============================================================================
|
|
@@ -259,329 +255,21 @@ function extractJson(raw: string): SessionInsights | null {
|
|
|
259
255
|
// ============================================================================
|
|
260
256
|
|
|
261
257
|
/**
|
|
262
|
-
* Load
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
const content = readFileSync(envPath, "utf-8");
|
|
273
|
-
for (const line of content.split("\n")) {
|
|
274
|
-
const trimmed = line.trim();
|
|
275
|
-
if (!trimmed || trimmed.startsWith("#")) continue;
|
|
276
|
-
|
|
277
|
-
const eqIdx = trimmed.indexOf("=");
|
|
278
|
-
if (eqIdx === -1) continue;
|
|
279
|
-
|
|
280
|
-
const key = trimmed.slice(0, eqIdx).trim();
|
|
281
|
-
let value = trimmed.slice(eqIdx + 1).trim();
|
|
282
|
-
|
|
283
|
-
// Remove quotes if present
|
|
284
|
-
if (
|
|
285
|
-
(value.startsWith('"') && value.endsWith('"')) ||
|
|
286
|
-
(value.startsWith("'") && value.endsWith("'"))
|
|
287
|
-
) {
|
|
288
|
-
value = value.slice(1, -1);
|
|
289
|
-
}
|
|
290
|
-
|
|
291
|
-
env[key] = value;
|
|
292
|
-
}
|
|
293
|
-
} catch {
|
|
294
|
-
// Ignore parse errors
|
|
295
|
-
}
|
|
296
|
-
|
|
297
|
-
return env;
|
|
298
|
-
}
|
|
299
|
-
|
|
300
|
-
/**
|
|
301
|
-
* Resolve env: references in config values
|
|
302
|
-
*/
|
|
303
|
-
function resolveEnvRef(
|
|
304
|
-
value: string,
|
|
305
|
-
envVars: Record<string, string>,
|
|
306
|
-
): string | null {
|
|
307
|
-
if (value.startsWith("env:")) {
|
|
308
|
-
const varName = value.slice(4);
|
|
309
|
-
return envVars[varName] || process.env[varName] || null;
|
|
310
|
-
}
|
|
311
|
-
return value;
|
|
312
|
-
}
|
|
313
|
-
|
|
314
|
-
/**
|
|
315
|
-
* Load configuration from config.toml with env file support
|
|
316
|
-
* Config: ~/.config/llm/config.toml
|
|
317
|
-
* Secrets: ~/.config/llm/.env
|
|
258
|
+
* Load configuration for llm-summarize.
|
|
259
|
+
*
|
|
260
|
+
* Service and model are resolved by llm-core from services.toml.
|
|
261
|
+
* Override service/model here only when llm-summarize needs to
|
|
262
|
+
* differ from the default_service and its default_model.
|
|
263
|
+
*
|
|
264
|
+
* To configure:
|
|
265
|
+
* 1. Set up services: ~/.config/llm-core/services.toml (with default_model per service)
|
|
266
|
+
* 2. Set up API keys: ~/.config/apiconf/config.toml (for cloud services)
|
|
267
|
+
* 3. Optionally override service/model/maxTokens via SummarizeOptions
|
|
318
268
|
*/
|
|
319
|
-
export function loadConfig():
|
|
320
|
-
|
|
321
|
-
const configPath = join(configDir, "config.toml");
|
|
322
|
-
const envPath = join(configDir, ".env");
|
|
323
|
-
|
|
324
|
-
// Load .env file first
|
|
325
|
-
const envVars = loadEnvFile(envPath);
|
|
326
|
-
|
|
327
|
-
// No defaults - config required
|
|
328
|
-
const config: LLMConfig = {
|
|
329
|
-
provider: null,
|
|
330
|
-
model: null,
|
|
331
|
-
apiKey: null,
|
|
332
|
-
apiBase: null,
|
|
269
|
+
export function loadConfig(): SummarizeConfig {
|
|
270
|
+
return {
|
|
333
271
|
maxTokens: 1024,
|
|
334
272
|
};
|
|
335
|
-
|
|
336
|
-
if (!existsSync(configPath)) {
|
|
337
|
-
return config;
|
|
338
|
-
}
|
|
339
|
-
|
|
340
|
-
try {
|
|
341
|
-
const content = readFileSync(configPath, "utf-8");
|
|
342
|
-
|
|
343
|
-
// Parse [llm] section
|
|
344
|
-
const providerMatch = content.match(/^\s*provider\s*=\s*"([^"]+)"/m);
|
|
345
|
-
if (providerMatch) {
|
|
346
|
-
config.provider = providerMatch[1];
|
|
347
|
-
}
|
|
348
|
-
|
|
349
|
-
const modelMatch = content.match(/^\s*model\s*=\s*"([^"]+)"/m);
|
|
350
|
-
if (modelMatch) {
|
|
351
|
-
config.model = modelMatch[1];
|
|
352
|
-
}
|
|
353
|
-
|
|
354
|
-
const apiKeyMatch = content.match(/^\s*api_key\s*=\s*"([^"]+)"/m);
|
|
355
|
-
if (apiKeyMatch) {
|
|
356
|
-
config.apiKey = resolveEnvRef(apiKeyMatch[1], envVars);
|
|
357
|
-
}
|
|
358
|
-
|
|
359
|
-
const apiBaseMatch = content.match(/^\s*api_base\s*=\s*"([^"]+)"/m);
|
|
360
|
-
if (apiBaseMatch) {
|
|
361
|
-
config.apiBase = apiBaseMatch[1];
|
|
362
|
-
}
|
|
363
|
-
|
|
364
|
-
const maxTokensMatch = content.match(/^\s*max_tokens\s*=\s*(\d+)/m);
|
|
365
|
-
if (maxTokensMatch) {
|
|
366
|
-
config.maxTokens = parseInt(maxTokensMatch[1], 10);
|
|
367
|
-
}
|
|
368
|
-
} catch {
|
|
369
|
-
// Ignore parse errors
|
|
370
|
-
}
|
|
371
|
-
|
|
372
|
-
// Environment variables override config
|
|
373
|
-
if (process.env.LLM_PROVIDER) config.provider = process.env.LLM_PROVIDER;
|
|
374
|
-
if (process.env.LLM_MODEL) config.model = process.env.LLM_MODEL;
|
|
375
|
-
if (process.env.LLM_API_KEY) config.apiKey = process.env.LLM_API_KEY;
|
|
376
|
-
|
|
377
|
-
return config;
|
|
378
|
-
}
|
|
379
|
-
|
|
380
|
-
// ============================================================================
|
|
381
|
-
// Provider Implementations
|
|
382
|
-
// ============================================================================
|
|
383
|
-
|
|
384
|
-
/**
|
|
385
|
-
* Call Anthropic API
|
|
386
|
-
*/
|
|
387
|
-
async function callAnthropic(
|
|
388
|
-
text: string,
|
|
389
|
-
model: string,
|
|
390
|
-
maxTokens: number,
|
|
391
|
-
apiKey: string,
|
|
392
|
-
systemPrompt: string,
|
|
393
|
-
apiBase?: string,
|
|
394
|
-
): Promise<SummarizeResult> {
|
|
395
|
-
const endpoint = apiBase || "https://api.anthropic.com/v1/messages";
|
|
396
|
-
|
|
397
|
-
try {
|
|
398
|
-
const response = await fetch(endpoint, {
|
|
399
|
-
method: "POST",
|
|
400
|
-
headers: {
|
|
401
|
-
"x-api-key": apiKey,
|
|
402
|
-
"anthropic-version": "2023-06-01",
|
|
403
|
-
"content-type": "application/json",
|
|
404
|
-
},
|
|
405
|
-
body: JSON.stringify({
|
|
406
|
-
model,
|
|
407
|
-
max_tokens: maxTokens,
|
|
408
|
-
temperature: 0.3,
|
|
409
|
-
system: systemPrompt,
|
|
410
|
-
messages: [
|
|
411
|
-
{
|
|
412
|
-
role: "user",
|
|
413
|
-
content: text,
|
|
414
|
-
},
|
|
415
|
-
],
|
|
416
|
-
}),
|
|
417
|
-
});
|
|
418
|
-
|
|
419
|
-
if (!response.ok) {
|
|
420
|
-
const errorText = await response.text();
|
|
421
|
-
return {
|
|
422
|
-
error: `Anthropic API error: ${response.status} ${errorText}`,
|
|
423
|
-
};
|
|
424
|
-
}
|
|
425
|
-
|
|
426
|
-
const result = await response.json();
|
|
427
|
-
const content = result.content?.[0]?.text || "";
|
|
428
|
-
const insights = extractJson(content);
|
|
429
|
-
|
|
430
|
-
if (!insights) {
|
|
431
|
-
return {
|
|
432
|
-
rawText: content,
|
|
433
|
-
error: `Failed to parse response as JSON: ${content.slice(0, 200)}`,
|
|
434
|
-
};
|
|
435
|
-
}
|
|
436
|
-
|
|
437
|
-
return {
|
|
438
|
-
insights,
|
|
439
|
-
rawText: content,
|
|
440
|
-
model,
|
|
441
|
-
tokens_used: result.usage?.output_tokens,
|
|
442
|
-
};
|
|
443
|
-
} catch (error) {
|
|
444
|
-
return {
|
|
445
|
-
error: `Anthropic request failed: ${String(error)}`,
|
|
446
|
-
};
|
|
447
|
-
}
|
|
448
|
-
}
|
|
449
|
-
|
|
450
|
-
/**
|
|
451
|
-
* Call OpenAI API
|
|
452
|
-
*/
|
|
453
|
-
async function callOpenAI(
|
|
454
|
-
text: string,
|
|
455
|
-
model: string,
|
|
456
|
-
maxTokens: number,
|
|
457
|
-
apiKey: string,
|
|
458
|
-
systemPrompt: string,
|
|
459
|
-
apiBase?: string,
|
|
460
|
-
): Promise<SummarizeResult> {
|
|
461
|
-
const endpoint = apiBase || "https://api.openai.com/v1/chat/completions";
|
|
462
|
-
|
|
463
|
-
try {
|
|
464
|
-
const response = await fetch(endpoint, {
|
|
465
|
-
method: "POST",
|
|
466
|
-
headers: {
|
|
467
|
-
Authorization: `Bearer ${apiKey}`,
|
|
468
|
-
"Content-Type": "application/json",
|
|
469
|
-
},
|
|
470
|
-
body: JSON.stringify({
|
|
471
|
-
model,
|
|
472
|
-
max_tokens: maxTokens,
|
|
473
|
-
temperature: 0.3,
|
|
474
|
-
messages: [
|
|
475
|
-
{
|
|
476
|
-
role: "system",
|
|
477
|
-
content: systemPrompt,
|
|
478
|
-
},
|
|
479
|
-
{
|
|
480
|
-
role: "user",
|
|
481
|
-
content: text,
|
|
482
|
-
},
|
|
483
|
-
],
|
|
484
|
-
}),
|
|
485
|
-
});
|
|
486
|
-
|
|
487
|
-
if (!response.ok) {
|
|
488
|
-
const errorText = await response.text();
|
|
489
|
-
return {
|
|
490
|
-
error: `OpenAI API error: ${response.status} ${errorText}`,
|
|
491
|
-
};
|
|
492
|
-
}
|
|
493
|
-
|
|
494
|
-
const result = await response.json();
|
|
495
|
-
const content = result.choices?.[0]?.message?.content || "";
|
|
496
|
-
const insights = extractJson(content);
|
|
497
|
-
|
|
498
|
-
if (!insights) {
|
|
499
|
-
return {
|
|
500
|
-
rawText: content,
|
|
501
|
-
error: `Failed to parse response as JSON: ${content.slice(0, 200)}`,
|
|
502
|
-
};
|
|
503
|
-
}
|
|
504
|
-
|
|
505
|
-
return {
|
|
506
|
-
insights,
|
|
507
|
-
rawText: content,
|
|
508
|
-
model,
|
|
509
|
-
tokens_used: result.usage?.completion_tokens,
|
|
510
|
-
};
|
|
511
|
-
} catch (error) {
|
|
512
|
-
return {
|
|
513
|
-
error: `OpenAI request failed: ${String(error)}`,
|
|
514
|
-
};
|
|
515
|
-
}
|
|
516
|
-
}
|
|
517
|
-
|
|
518
|
-
/**
|
|
519
|
-
* Call Ollama API (chat endpoint for system prompt support)
|
|
520
|
-
*/
|
|
521
|
-
async function callOllama(
|
|
522
|
-
text: string,
|
|
523
|
-
model: string,
|
|
524
|
-
maxTokens: number,
|
|
525
|
-
apiBase: string,
|
|
526
|
-
systemPrompt: string,
|
|
527
|
-
): Promise<SummarizeResult> {
|
|
528
|
-
const endpoint = `${apiBase}/api/chat`;
|
|
529
|
-
|
|
530
|
-
try {
|
|
531
|
-
const response = await fetch(endpoint, {
|
|
532
|
-
method: "POST",
|
|
533
|
-
headers: {
|
|
534
|
-
"Content-Type": "application/json",
|
|
535
|
-
},
|
|
536
|
-
body: JSON.stringify({
|
|
537
|
-
model,
|
|
538
|
-
messages: [
|
|
539
|
-
{
|
|
540
|
-
role: "system",
|
|
541
|
-
content: systemPrompt,
|
|
542
|
-
},
|
|
543
|
-
{
|
|
544
|
-
role: "user",
|
|
545
|
-
content: text,
|
|
546
|
-
},
|
|
547
|
-
],
|
|
548
|
-
stream: false,
|
|
549
|
-
options: {
|
|
550
|
-
num_predict: maxTokens,
|
|
551
|
-
temperature: 0.3,
|
|
552
|
-
},
|
|
553
|
-
}),
|
|
554
|
-
});
|
|
555
|
-
|
|
556
|
-
if (!response.ok) {
|
|
557
|
-
const errorText = await response.text();
|
|
558
|
-
return {
|
|
559
|
-
error: `Ollama API error: ${response.status} ${errorText}`,
|
|
560
|
-
};
|
|
561
|
-
}
|
|
562
|
-
|
|
563
|
-
const result = await response.json();
|
|
564
|
-
const content = result.message?.content || "";
|
|
565
|
-
const insights = extractJson(content);
|
|
566
|
-
|
|
567
|
-
if (!insights) {
|
|
568
|
-
return {
|
|
569
|
-
rawText: content,
|
|
570
|
-
error: `Failed to parse response as JSON: ${content.slice(0, 200)}`,
|
|
571
|
-
};
|
|
572
|
-
}
|
|
573
|
-
|
|
574
|
-
return {
|
|
575
|
-
insights,
|
|
576
|
-
rawText: content,
|
|
577
|
-
model,
|
|
578
|
-
tokens_used: result.eval_count,
|
|
579
|
-
};
|
|
580
|
-
} catch (error) {
|
|
581
|
-
return {
|
|
582
|
-
error: `Ollama request failed: ${String(error)}`,
|
|
583
|
-
};
|
|
584
|
-
}
|
|
585
273
|
}
|
|
586
274
|
|
|
587
275
|
// ============================================================================
|
|
@@ -592,7 +280,7 @@ async function callOllama(
|
|
|
592
280
|
* Summarize text using configured LLM
|
|
593
281
|
*
|
|
594
282
|
* @param text - Text to summarize
|
|
595
|
-
* @param config -
|
|
283
|
+
* @param config - Summarize configuration (from loadConfig())
|
|
596
284
|
* @param options - Optional overrides for model, maxTokens, and mode
|
|
597
285
|
* @returns SummarizeResult with insights or error
|
|
598
286
|
*
|
|
@@ -602,67 +290,43 @@ async function callOllama(
|
|
|
602
290
|
*/
|
|
603
291
|
export async function summarize(
|
|
604
292
|
text: string,
|
|
605
|
-
config:
|
|
293
|
+
config: SummarizeConfig,
|
|
606
294
|
options?: SummarizeOptions,
|
|
607
295
|
): Promise<SummarizeResult> {
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
if (!provider) {
|
|
619
|
-
return {
|
|
620
|
-
error: `No provider configured. Set provider in ~/.config/llm/config.toml`,
|
|
621
|
-
};
|
|
622
|
-
}
|
|
623
|
-
|
|
624
|
-
if (!model) {
|
|
625
|
-
return {
|
|
626
|
-
error: `No model configured. Set model in ~/.config/llm/config.toml`,
|
|
627
|
-
};
|
|
628
|
-
}
|
|
629
|
-
|
|
630
|
-
// API key required for cloud providers
|
|
631
|
-
if (!apiKey && provider !== "ollama") {
|
|
632
|
-
return {
|
|
633
|
-
error: `No API key configured. Set api_key = "env:VAR_NAME" in ~/.config/llm/config.toml`,
|
|
634
|
-
};
|
|
635
|
-
}
|
|
636
|
-
|
|
637
|
-
// Call appropriate provider
|
|
638
|
-
if (provider === "anthropic") {
|
|
639
|
-
return callAnthropic(
|
|
640
|
-
text,
|
|
641
|
-
model,
|
|
642
|
-
maxTokens,
|
|
643
|
-
apiKey!,
|
|
644
|
-
systemPrompt,
|
|
645
|
-
config.apiBase || undefined,
|
|
646
|
-
);
|
|
647
|
-
} else if (provider === "openai") {
|
|
648
|
-
return callOpenAI(
|
|
649
|
-
text,
|
|
296
|
+
try {
|
|
297
|
+
const mode: SummarizeMode = options?.mode || "insights";
|
|
298
|
+
const userName = options?.userName;
|
|
299
|
+
const systemPrompt =
|
|
300
|
+
options?.systemPrompt || getPromptForMode(mode, userName);
|
|
301
|
+
// Model resolution: options.model > config.model > service default_model (in llm-core)
|
|
302
|
+
const model = options?.model || config.model;
|
|
303
|
+
|
|
304
|
+
const result = await complete({
|
|
305
|
+
service: config.service,
|
|
650
306
|
model,
|
|
651
|
-
|
|
652
|
-
apiKey!,
|
|
307
|
+
prompt: text,
|
|
653
308
|
systemPrompt,
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
309
|
+
maxTokens: options?.maxTokens || config.maxTokens,
|
|
310
|
+
temperature: 0.3,
|
|
311
|
+
});
|
|
312
|
+
|
|
313
|
+
const insights = extractJson(result.text);
|
|
314
|
+
|
|
315
|
+
if (!insights) {
|
|
658
316
|
return {
|
|
659
|
-
error:
|
|
317
|
+
error: "Failed to parse insights from response",
|
|
318
|
+
rawText: result.text,
|
|
660
319
|
};
|
|
661
320
|
}
|
|
662
|
-
|
|
663
|
-
} else {
|
|
321
|
+
|
|
664
322
|
return {
|
|
665
|
-
|
|
323
|
+
insights,
|
|
324
|
+
rawText: result.text,
|
|
325
|
+
model: result.model,
|
|
326
|
+
tokens_used: result.tokens.output,
|
|
666
327
|
};
|
|
328
|
+
} catch (err) {
|
|
329
|
+
const error = err instanceof Error ? err.message : String(err);
|
|
330
|
+
return { error };
|
|
667
331
|
}
|
|
668
332
|
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@voidwire/llm-summarize",
|
|
3
|
-
"version": "3.
|
|
3
|
+
"version": "3.9.0",
|
|
4
4
|
"description": "Structured session insight extraction for knowledge systems",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./index.ts",
|
|
@@ -40,6 +40,9 @@
|
|
|
40
40
|
"engines": {
|
|
41
41
|
"bun": ">=1.0.0"
|
|
42
42
|
},
|
|
43
|
+
"dependencies": {
|
|
44
|
+
"@voidwire/llm-core": "0.2.0"
|
|
45
|
+
},
|
|
43
46
|
"scripts": {
|
|
44
47
|
"test": "bun test"
|
|
45
48
|
}
|