@voidwire/llm-summarize 3.6.0 → 3.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/cli.ts +11 -48
  2. package/index.ts +105 -428
  3. package/package.json +4 -1
package/cli.ts CHANGED
@@ -2,27 +2,15 @@
2
2
  /**
3
3
  * llm-summarize CLI
4
4
  *
5
- * Philosophy:
6
- * - Structured session insight extraction for knowledge systems
7
- * - Multi-provider support (Anthropic, OpenAI, Ollama)
8
- * - Deterministic JSON output for tooling integration
9
- * - Config-driven - no hardcoded defaults
5
+ * Structured session insight extraction for knowledge systems.
6
+ * Uses @voidwire/llm-core for LLM transport services configured
7
+ * via ~/.config/llm-core/services.toml, API keys via apiconf.
10
8
  *
11
9
  * Usage:
12
10
  * llm-summarize <text>
13
11
  * llm-summarize --stdin
14
12
  * echo "text" | llm-summarize --stdin
15
13
  *
16
- * Config: ~/.config/llm/config.toml
17
- * [llm]
18
- * provider = "ollama"
19
- * model = "Qwen2.5:3b"
20
- * api_base = "https://ollama.example.com"
21
- * max_tokens = 1024
22
- *
23
- * Secrets: ~/.config/llm/.env
24
- * ANTHROPIC_API_KEY=sk-ant-...
25
- *
26
14
  * Exit codes:
27
15
  * 0 - Success
28
16
  * 1 - API error (rate limit, auth, network)
@@ -56,18 +44,13 @@ function printUsage(): void {
56
44
  console.error(`
57
45
  llm-summarize - Extract structured insights from session transcripts
58
46
 
59
- Philosophy:
60
- Structured session insight extraction for knowledge systems.
61
- Config-driven - specify exact provider/model.
62
- JSON output for tooling integration.
63
-
64
47
  Usage: llm-summarize [options] <text>
65
48
  llm-summarize --stdin
66
49
 
67
50
  Options:
68
51
  --mode <mode> Summarization mode: quick or insights (default: insights)
69
- --model <name> Override model from config
70
- --max-tokens <n> Max output tokens (default: from config or 1024)
52
+ --model <name> Override model (default: claude-3-5-haiku-20241022)
53
+ --max-tokens <n> Max output tokens (default: 1024)
71
54
  --stdin Read text from stdin
72
55
  -h, --help Show this help
73
56
 
@@ -75,26 +58,10 @@ Modes:
75
58
  quick - Fast one-liner summary (for user prompts)
76
59
  insights - Full SessionInsights extraction (for responses)
77
60
 
78
- Config file: ~/.config/llm/config.toml
79
- [llm]
80
- provider = "ollama"
81
- model = "Qwen2.5:3b"
82
- api_base = "https://ollama.example.com"
83
- max_tokens = 1024
84
-
85
- Secrets file: ~/.config/llm/.env
86
- ANTHROPIC_API_KEY=sk-ant-...
87
- OPENAI_API_KEY=sk-...
88
-
89
- Environment overrides:
90
- LLM_PROVIDER Override provider
91
- LLM_MODEL Override model
92
- LLM_API_KEY Override API key
93
-
94
- Supported providers:
95
- anthropic - Claude models (claude-3-5-haiku-latest, claude-sonnet-4-20250514)
96
- openai - GPT models (gpt-4.1-mini, gpt-4o)
97
- ollama - Local models (Qwen2.5:3b, llama3.2:3b, etc.) - no API key needed
61
+ Configuration:
62
+ LLM transport is handled by @voidwire/llm-core.
63
+ Services: ~/.config/llm-core/services.toml
64
+ API keys: managed via apiconf (see @voidwire/apiconf)
98
65
 
99
66
  Output format:
100
67
  {
@@ -103,18 +70,14 @@ Output format:
103
70
  "decisions": ["Specific decisions with reasoning"],
104
71
  "patterns_used": ["Development patterns observed"],
105
72
  "preferences_expressed": ["User preferences revealed"],
106
- "problems_solved": ["Problems addressed and how"],
107
- "tools_heavy": ["Tools used notably"]
73
+ "problems_solved": ["Problems addressed and how"]
108
74
  },
109
- "model": "qwen2.5:3b",
75
+ "model": "claude-3-5-haiku-20241022",
110
76
  "tokens_used": 150
111
77
  }
112
78
 
113
79
  Examples:
114
- # Extract insights from session transcript
115
80
  cat session.txt | llm-summarize --stdin
116
-
117
- # From clipboard
118
81
  pbpaste | llm-summarize --stdin
119
82
  `);
120
83
  }
package/index.ts CHANGED
@@ -11,8 +11,7 @@
11
11
  * // result.insights.summary, result.insights.decisions, etc.
12
12
  */
13
13
 
14
- import { readFileSync, existsSync } from "fs";
15
- import { join } from "path";
14
+ import { complete } from "@voidwire/llm-core";
16
15
 
17
16
  // ============================================================================
18
17
  // Types
@@ -46,12 +45,10 @@ export interface SummarizeResult {
46
45
  tokens_used?: number;
47
46
  }
48
47
 
49
- export interface LLMConfig {
50
- provider: string | null;
51
- model: string | null;
52
- apiKey: string | null;
53
- apiBase: string | null;
54
- maxTokens: number;
48
+ export interface SummarizeConfig {
49
+ service?: string; // Named service from services.toml (optional, uses default_service)
50
+ model: string; // Model name — required by complete()
51
+ maxTokens: number; // Max output tokens
55
52
  }
56
53
 
57
54
  export interface SummarizeOptions {
@@ -64,7 +61,6 @@ export interface SummarizeOptions {
64
61
  systemPrompt?: string;
65
62
  }
66
63
 
67
- export type ProviderType = "anthropic" | "openai" | "ollama";
68
64
  export type SummarizeMode = "quick" | "insights";
69
65
 
70
66
  // ============================================================================
@@ -145,52 +141,57 @@ Output valid JSON only. No markdown, no explanation.`;
145
141
  * Note: userName param kept for API compatibility but not used in insights mode
146
142
  */
147
143
  function buildInsightsPrompt(_userName?: string): string {
148
- return `You are an engineering knowledge extractor. Given a development session transcript, extract reusable insights as structured JSON.
149
-
150
- Transcripts use role markers:
151
- - "User Asked:" = the human (directs, decides, provides context)
152
- - "Assistant Response:" = the AI (implements, builds, debugs)
153
-
154
- <output_format>
155
- Return a JSON object with these fields. Include a field ONLY when the transcript provides clear evidence. Omit empty arrays entirely.
156
-
157
- {
158
- "summary": "One sentence: what was accomplished and the key outcome",
159
- "current_focus": "The specific task, feature, or problem actively being worked on (omit if exploratory)",
160
- "next_steps": ["Concrete action to take when work resumes — name the actual task"],
161
- "decisions": ["Decision made rationale and what alternatives were considered"],
162
- "patterns_used": ["Technique or approach applied why it was chosen over alternatives"],
163
- "preferences_expressed": ["User preference revealed through direction, correction, or explicit statement"],
164
- "problems_solved": ["Problem encountered root cause identified and specific fix applied"]
165
- }
166
- </output_format>
167
-
168
- <quality_rules>
169
- Every value MUST be a complete sentence with context. Never output bare nouns, short phrases, or sentence fragments.
170
-
171
- BAD (will be rejected):
172
- - "SQLite"
173
- - "detached worker"
174
- - "Fixed bug"
175
- - "Continue working"
176
-
177
- GOOD (specific, contextual, reusable):
178
- - "Chose SQLite over Postgres for single-user CLI — no server dependency needed"
179
- - "Used detached worker pattern to avoid blocking the stop hook during LLM calls"
180
- - "Fixed state file writing to wrong directorywas using read-only data path instead of persistent home"
181
- - "Wire up the webhook endpoint to the event processor and verify with integration test"
182
-
183
- For next_steps specifically: never say "Continue from current position" or "Resume work" — name the actual task to be done.
184
- </quality_rules>
185
-
186
- <attribution>
187
- Users direct and decide. Assistants implement and execute.
188
- - User: requested, approved, directed, chose, preferred, corrected
189
- - Assistant: implemented, built, debugged, refactored, created, fixed
190
- - Never say "User implemented" or "User built"
191
- </attribution>
192
-
193
- Output valid JSON only. No markdown, no code blocks, no explanation.`;
144
+ return `You are a session state extractor. Given a development conversation, produce a JSON snapshot of the session's current state.
145
+
146
+ <instructions>
147
+ 1. Read the conversation in the <transcript> section
148
+ 2. Ignore the <previous_state> section it is background context only, not part of this session
149
+ 3. Extract ONLY what happened in the transcript
150
+ 4. Produce a JSON object with the fields described below
151
+ </instructions>
152
+
153
+ <fields>
154
+ - summary: One sentence describing what was accomplished this session
155
+ - current_focus: The specific task or feature being worked on (omit if exploratory)
156
+ - next_steps: Array of concrete next actions. Name the specific task.
157
+ - decisions: Array of decisions made this session, each with rationale
158
+ - patterns_used: Array of techniques or approaches applied, each with context
159
+ - preferences_expressed: Array of user preferences revealed through direction or correction
160
+ - problems_solved: Array of problems encountered with root cause and fix
161
+ </fields>
162
+
163
+ Include a field only when the transcript contains clear evidence. Omit empty arrays. Every value must be a complete sentence.
164
+
165
+ <example>
166
+ <input>
167
+ <previous_state>Focus: Building authentication system</previous_state>
168
+ <transcript>
169
+ User Asked: Let's use JWT instead of sessions for auth
170
+ Assistant Response: Switched from express-session to jsonwebtoken. JWTs are stateless so we don't need Redis for session storage anymore. Updated the middleware to verify tokens on each request.
171
+ User Asked: Make sure the tokens expire after 24 hours
172
+ Assistant Response: Set expiresIn to 24h in the sign options. Also added a refresh token flow so users don't get logged out mid-work.
173
+ </transcript>
174
+ </input>
175
+ <output>
176
+ {"summary":"Implemented JWT-based authentication replacing session-based auth, with 24-hour token expiry and refresh token flow","current_focus":"Authentication system implementation","next_steps":["Test the refresh token flow with expired tokens","Add token revocation for logout"],"decisions":["Chose JWT over sessions eliminates Redis dependency since tokens are stateless","Set 24-hour token expiry with refresh flow — balances security with user convenience"],"preferences_expressed":["User directed specific token expiry of 24 hours"]}
177
+ </output>
178
+ </example>
179
+
180
+ <example>
181
+ <input>
182
+ <previous_state>Focus: Investigating test failures</previous_state>
183
+ <transcript>
184
+ User Asked: The CI is failing on the webhook tests
185
+ Assistant Response: Found the issue the test was using a hardcoded timestamp that expired. Changed it to use a relative timestamp. Also found that the webhook handler had a race condition where two events could arrive simultaneously and both pass the idempotency check. Added a mutex lock.
186
+ User Asked: Good catch on the race condition
187
+ </transcript>
188
+ </input>
189
+ <output>
190
+ {"summary":"Fixed CI test failure caused by hardcoded timestamp and discovered a race condition in the webhook handler","current_focus":"Webhook test failures and handler reliability","problems_solved":["Fixed expired hardcoded timestamp in webhook tests — replaced with relative timestamp calculation","Fixed race condition in webhook handler where simultaneous events bypassed idempotency check — added mutex lock"],"next_steps":["Verify CI passes with the timestamp and mutex fixes"]}
191
+ </output>
192
+ </example>
193
+
194
+ Output valid JSON only.`;
194
195
  }
195
196
 
196
197
  /**
@@ -254,329 +255,22 @@ function extractJson(raw: string): SessionInsights | null {
254
255
  // ============================================================================
255
256
 
256
257
  /**
257
- * Load environment variables from .env file
258
- */
259
- function loadEnvFile(envPath: string): Record<string, string> {
260
- const env: Record<string, string> = {};
261
-
262
- if (!existsSync(envPath)) {
263
- return env;
264
- }
265
-
266
- try {
267
- const content = readFileSync(envPath, "utf-8");
268
- for (const line of content.split("\n")) {
269
- const trimmed = line.trim();
270
- if (!trimmed || trimmed.startsWith("#")) continue;
271
-
272
- const eqIdx = trimmed.indexOf("=");
273
- if (eqIdx === -1) continue;
274
-
275
- const key = trimmed.slice(0, eqIdx).trim();
276
- let value = trimmed.slice(eqIdx + 1).trim();
277
-
278
- // Remove quotes if present
279
- if (
280
- (value.startsWith('"') && value.endsWith('"')) ||
281
- (value.startsWith("'") && value.endsWith("'"))
282
- ) {
283
- value = value.slice(1, -1);
284
- }
285
-
286
- env[key] = value;
287
- }
288
- } catch {
289
- // Ignore parse errors
290
- }
291
-
292
- return env;
293
- }
294
-
295
- /**
296
- * Resolve env: references in config values
297
- */
298
- function resolveEnvRef(
299
- value: string,
300
- envVars: Record<string, string>,
301
- ): string | null {
302
- if (value.startsWith("env:")) {
303
- const varName = value.slice(4);
304
- return envVars[varName] || process.env[varName] || null;
305
- }
306
- return value;
307
- }
308
-
309
- /**
310
- * Load configuration from config.toml with env file support
311
- * Config: ~/.config/llm/config.toml
312
- * Secrets: ~/.config/llm/.env
258
+ * Load configuration for llm-summarize.
259
+ *
260
+ * Returns defaults suitable for llm-core's complete() function.
261
+ * Service resolution (API keys, endpoints) is handled by llm-core
262
+ * via ~/.config/llm-core/services.toml and apiconf.
263
+ *
264
+ * To configure:
265
+ * 1. Set up apiconf: ~/.config/apiconf/config.toml
266
+ * 2. Set up services: ~/.config/llm-core/services.toml
267
+ * 3. Optionally override model/maxTokens via SummarizeOptions
313
268
  */
314
- export function loadConfig(): LLMConfig {
315
- const configDir = join(process.env.HOME!, ".config", "llm");
316
- const configPath = join(configDir, "config.toml");
317
- const envPath = join(configDir, ".env");
318
-
319
- // Load .env file first
320
- const envVars = loadEnvFile(envPath);
321
-
322
- // No defaults - config required
323
- const config: LLMConfig = {
324
- provider: null,
325
- model: null,
326
- apiKey: null,
327
- apiBase: null,
269
+ export function loadConfig(): SummarizeConfig {
270
+ return {
271
+ model: "claude-3-5-haiku-20241022",
328
272
  maxTokens: 1024,
329
273
  };
330
-
331
- if (!existsSync(configPath)) {
332
- return config;
333
- }
334
-
335
- try {
336
- const content = readFileSync(configPath, "utf-8");
337
-
338
- // Parse [llm] section
339
- const providerMatch = content.match(/^\s*provider\s*=\s*"([^"]+)"/m);
340
- if (providerMatch) {
341
- config.provider = providerMatch[1];
342
- }
343
-
344
- const modelMatch = content.match(/^\s*model\s*=\s*"([^"]+)"/m);
345
- if (modelMatch) {
346
- config.model = modelMatch[1];
347
- }
348
-
349
- const apiKeyMatch = content.match(/^\s*api_key\s*=\s*"([^"]+)"/m);
350
- if (apiKeyMatch) {
351
- config.apiKey = resolveEnvRef(apiKeyMatch[1], envVars);
352
- }
353
-
354
- const apiBaseMatch = content.match(/^\s*api_base\s*=\s*"([^"]+)"/m);
355
- if (apiBaseMatch) {
356
- config.apiBase = apiBaseMatch[1];
357
- }
358
-
359
- const maxTokensMatch = content.match(/^\s*max_tokens\s*=\s*(\d+)/m);
360
- if (maxTokensMatch) {
361
- config.maxTokens = parseInt(maxTokensMatch[1], 10);
362
- }
363
- } catch {
364
- // Ignore parse errors
365
- }
366
-
367
- // Environment variables override config
368
- if (process.env.LLM_PROVIDER) config.provider = process.env.LLM_PROVIDER;
369
- if (process.env.LLM_MODEL) config.model = process.env.LLM_MODEL;
370
- if (process.env.LLM_API_KEY) config.apiKey = process.env.LLM_API_KEY;
371
-
372
- return config;
373
- }
374
-
375
- // ============================================================================
376
- // Provider Implementations
377
- // ============================================================================
378
-
379
- /**
380
- * Call Anthropic API
381
- */
382
- async function callAnthropic(
383
- text: string,
384
- model: string,
385
- maxTokens: number,
386
- apiKey: string,
387
- systemPrompt: string,
388
- apiBase?: string,
389
- ): Promise<SummarizeResult> {
390
- const endpoint = apiBase || "https://api.anthropic.com/v1/messages";
391
-
392
- try {
393
- const response = await fetch(endpoint, {
394
- method: "POST",
395
- headers: {
396
- "x-api-key": apiKey,
397
- "anthropic-version": "2023-06-01",
398
- "content-type": "application/json",
399
- },
400
- body: JSON.stringify({
401
- model,
402
- max_tokens: maxTokens,
403
- temperature: 0.3,
404
- system: systemPrompt,
405
- messages: [
406
- {
407
- role: "user",
408
- content: text,
409
- },
410
- ],
411
- }),
412
- });
413
-
414
- if (!response.ok) {
415
- const errorText = await response.text();
416
- return {
417
- error: `Anthropic API error: ${response.status} ${errorText}`,
418
- };
419
- }
420
-
421
- const result = await response.json();
422
- const content = result.content?.[0]?.text || "";
423
- const insights = extractJson(content);
424
-
425
- if (!insights) {
426
- return {
427
- rawText: content,
428
- error: `Failed to parse response as JSON: ${content.slice(0, 200)}`,
429
- };
430
- }
431
-
432
- return {
433
- insights,
434
- rawText: content,
435
- model,
436
- tokens_used: result.usage?.output_tokens,
437
- };
438
- } catch (error) {
439
- return {
440
- error: `Anthropic request failed: ${String(error)}`,
441
- };
442
- }
443
- }
444
-
445
- /**
446
- * Call OpenAI API
447
- */
448
- async function callOpenAI(
449
- text: string,
450
- model: string,
451
- maxTokens: number,
452
- apiKey: string,
453
- systemPrompt: string,
454
- apiBase?: string,
455
- ): Promise<SummarizeResult> {
456
- const endpoint = apiBase || "https://api.openai.com/v1/chat/completions";
457
-
458
- try {
459
- const response = await fetch(endpoint, {
460
- method: "POST",
461
- headers: {
462
- Authorization: `Bearer ${apiKey}`,
463
- "Content-Type": "application/json",
464
- },
465
- body: JSON.stringify({
466
- model,
467
- max_tokens: maxTokens,
468
- temperature: 0.3,
469
- messages: [
470
- {
471
- role: "system",
472
- content: systemPrompt,
473
- },
474
- {
475
- role: "user",
476
- content: text,
477
- },
478
- ],
479
- }),
480
- });
481
-
482
- if (!response.ok) {
483
- const errorText = await response.text();
484
- return {
485
- error: `OpenAI API error: ${response.status} ${errorText}`,
486
- };
487
- }
488
-
489
- const result = await response.json();
490
- const content = result.choices?.[0]?.message?.content || "";
491
- const insights = extractJson(content);
492
-
493
- if (!insights) {
494
- return {
495
- rawText: content,
496
- error: `Failed to parse response as JSON: ${content.slice(0, 200)}`,
497
- };
498
- }
499
-
500
- return {
501
- insights,
502
- rawText: content,
503
- model,
504
- tokens_used: result.usage?.completion_tokens,
505
- };
506
- } catch (error) {
507
- return {
508
- error: `OpenAI request failed: ${String(error)}`,
509
- };
510
- }
511
- }
512
-
513
- /**
514
- * Call Ollama API (chat endpoint for system prompt support)
515
- */
516
- async function callOllama(
517
- text: string,
518
- model: string,
519
- maxTokens: number,
520
- apiBase: string,
521
- systemPrompt: string,
522
- ): Promise<SummarizeResult> {
523
- const endpoint = `${apiBase}/api/chat`;
524
-
525
- try {
526
- const response = await fetch(endpoint, {
527
- method: "POST",
528
- headers: {
529
- "Content-Type": "application/json",
530
- },
531
- body: JSON.stringify({
532
- model,
533
- messages: [
534
- {
535
- role: "system",
536
- content: systemPrompt,
537
- },
538
- {
539
- role: "user",
540
- content: text,
541
- },
542
- ],
543
- stream: false,
544
- options: {
545
- num_predict: maxTokens,
546
- temperature: 0.3,
547
- },
548
- }),
549
- });
550
-
551
- if (!response.ok) {
552
- const errorText = await response.text();
553
- return {
554
- error: `Ollama API error: ${response.status} ${errorText}`,
555
- };
556
- }
557
-
558
- const result = await response.json();
559
- const content = result.message?.content || "";
560
- const insights = extractJson(content);
561
-
562
- if (!insights) {
563
- return {
564
- rawText: content,
565
- error: `Failed to parse response as JSON: ${content.slice(0, 200)}`,
566
- };
567
- }
568
-
569
- return {
570
- insights,
571
- rawText: content,
572
- model,
573
- tokens_used: result.eval_count,
574
- };
575
- } catch (error) {
576
- return {
577
- error: `Ollama request failed: ${String(error)}`,
578
- };
579
- }
580
274
  }
581
275
 
582
276
  // ============================================================================
@@ -587,7 +281,7 @@ async function callOllama(
587
281
  * Summarize text using configured LLM
588
282
  *
589
283
  * @param text - Text to summarize
590
- * @param config - LLM configuration (from loadConfig())
284
+ * @param config - Summarize configuration (from loadConfig())
591
285
  * @param options - Optional overrides for model, maxTokens, and mode
592
286
  * @returns SummarizeResult with insights or error
593
287
  *
@@ -597,67 +291,50 @@ async function callOllama(
597
291
  */
598
292
  export async function summarize(
599
293
  text: string,
600
- config: LLMConfig,
294
+ config: SummarizeConfig,
601
295
  options?: SummarizeOptions,
602
296
  ): Promise<SummarizeResult> {
603
- const provider = config.provider;
604
- const model = options?.model || config.model;
605
- const maxTokens = options?.maxTokens || config.maxTokens;
606
- const apiKey = config.apiKey;
607
- const mode: SummarizeMode = options?.mode || "insights";
608
- const userName = options?.userName;
609
- const systemPrompt =
610
- options?.systemPrompt || getPromptForMode(mode, userName);
611
-
612
- // Validate config
613
- if (!provider) {
614
- return {
615
- error: `No provider configured. Set provider in ~/.config/llm/config.toml`,
616
- };
617
- }
618
-
619
- if (!model) {
620
- return {
621
- error: `No model configured. Set model in ~/.config/llm/config.toml`,
622
- };
623
- }
624
-
625
- // API key required for cloud providers
626
- if (!apiKey && provider !== "ollama") {
627
- return {
628
- error: `No API key configured. Set api_key = "env:VAR_NAME" in ~/.config/llm/config.toml`,
629
- };
630
- }
297
+ try {
298
+ const mode: SummarizeMode = options?.mode || "insights";
299
+ const userName = options?.userName;
300
+ const systemPrompt =
301
+ options?.systemPrompt || getPromptForMode(mode, userName);
302
+ const model = options?.model || config.model;
303
+
304
+ // Validate model before calling complete() (which throws on empty model)
305
+ if (!model) {
306
+ return {
307
+ error:
308
+ "No model configured. Set model in loadConfig() or pass via options.model",
309
+ };
310
+ }
631
311
 
632
- // Call appropriate provider
633
- if (provider === "anthropic") {
634
- return callAnthropic(
635
- text,
636
- model,
637
- maxTokens,
638
- apiKey!,
639
- systemPrompt,
640
- config.apiBase || undefined,
641
- );
642
- } else if (provider === "openai") {
643
- return callOpenAI(
644
- text,
312
+ const result = await complete({
313
+ service: config.service,
645
314
  model,
646
- maxTokens,
647
- apiKey!,
315
+ prompt: text,
648
316
  systemPrompt,
649
- config.apiBase || undefined,
650
- );
651
- } else if (provider === "ollama") {
652
- if (!config.apiBase) {
317
+ maxTokens: options?.maxTokens || config.maxTokens,
318
+ temperature: 0.3,
319
+ });
320
+
321
+ const insights = extractJson(result.text);
322
+
323
+ if (!insights) {
653
324
  return {
654
- error: `No api_base configured for ollama. Set api_base in ~/.config/llm/config.toml`,
325
+ error: "Failed to parse insights from response",
326
+ rawText: result.text,
655
327
  };
656
328
  }
657
- return callOllama(text, model, maxTokens, config.apiBase, systemPrompt);
658
- } else {
329
+
659
330
  return {
660
- error: `Unknown provider: ${provider}. Supported: anthropic, openai, ollama`,
331
+ insights,
332
+ rawText: result.text,
333
+ model: result.model,
334
+ tokens_used: result.tokens.output,
661
335
  };
336
+ } catch (err) {
337
+ const error = err instanceof Error ? err.message : String(err);
338
+ return { error };
662
339
  }
663
340
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@voidwire/llm-summarize",
3
- "version": "3.6.0",
3
+ "version": "3.8.0",
4
4
  "description": "Structured session insight extraction for knowledge systems",
5
5
  "type": "module",
6
6
  "main": "./index.ts",
@@ -40,6 +40,9 @@
40
40
  "engines": {
41
41
  "bun": ">=1.0.0"
42
42
  },
43
+ "dependencies": {
44
+ "@voidwire/llm-core": "0.2.0"
45
+ },
43
46
  "scripts": {
44
47
  "test": "bun test"
45
48
  }