@voidwire/llm-summarize 3.8.0 → 3.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.ts +8 -16
  2. package/package.json +1 -1
package/index.ts CHANGED
@@ -47,7 +47,7 @@ export interface SummarizeResult {
47
47
 
48
48
  export interface SummarizeConfig {
49
49
  service?: string; // Named service from services.toml (optional, uses default_service)
50
- model: string; // Model namerequired by complete()
50
+ model?: string; // Model overridefalls back to service default_model if omitted
51
51
  maxTokens: number; // Max output tokens
52
52
  }
53
53
 
@@ -257,18 +257,17 @@ function extractJson(raw: string): SessionInsights | null {
257
257
  /**
258
258
  * Load configuration for llm-summarize.
259
259
  *
260
- * Returns defaults suitable for llm-core's complete() function.
261
- * Service resolution (API keys, endpoints) is handled by llm-core
262
- * via ~/.config/llm-core/services.toml and apiconf.
260
+ * Service and model are resolved by llm-core from services.toml.
261
+ * Override service/model here only when llm-summarize needs to
262
+ * differ from the default_service and its default_model.
263
263
  *
264
264
  * To configure:
265
- * 1. Set up apiconf: ~/.config/apiconf/config.toml
266
- * 2. Set up services: ~/.config/llm-core/services.toml
267
- * 3. Optionally override model/maxTokens via SummarizeOptions
265
+ * 1. Set up services: ~/.config/llm-core/services.toml (with default_model per service)
266
+ * 2. Set up API keys: ~/.config/apiconf/config.toml (for cloud services)
267
+ * 3. Optionally override service/model/maxTokens via SummarizeOptions
268
268
  */
269
269
  export function loadConfig(): SummarizeConfig {
270
270
  return {
271
- model: "claude-3-5-haiku-20241022",
272
271
  maxTokens: 1024,
273
272
  };
274
273
  }
@@ -299,16 +298,9 @@ export async function summarize(
299
298
  const userName = options?.userName;
300
299
  const systemPrompt =
301
300
  options?.systemPrompt || getPromptForMode(mode, userName);
301
+ // Model resolution: options.model > config.model > service default_model (in llm-core)
302
302
  const model = options?.model || config.model;
303
303
 
304
- // Validate model before calling complete() (which throws on empty model)
305
- if (!model) {
306
- return {
307
- error:
308
- "No model configured. Set model in loadConfig() or pass via options.model",
309
- };
310
- }
311
-
312
304
  const result = await complete({
313
305
  service: config.service,
314
306
  model,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@voidwire/llm-summarize",
3
- "version": "3.8.0",
3
+ "version": "3.9.0",
4
4
  "description": "Structured session insight extraction for knowledge systems",
5
5
  "type": "module",
6
6
  "main": "./index.ts",