@martian-engineering/lossless-claw 0.2.8 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,13 +17,33 @@
17
17
  "label": "Database Path",
18
18
  "help": "Path to LCM SQLite database (default: ~/.openclaw/lcm.db)"
19
19
  },
20
+ "ignoreSessionPatterns": {
21
+ "label": "Ignored Sessions",
22
+ "help": "Glob patterns for session keys to exclude from LCM storage"
23
+ },
24
+ "statelessSessionPatterns": {
25
+ "label": "Stateless Sessions",
26
+ "help": "Glob patterns for session keys that can read from LCM but never write to it"
27
+ },
28
+ "skipStatelessSessions": {
29
+ "label": "Skip Stateless Sessions",
30
+ "help": "When enabled, matching stateless session keys skip LCM persistence and grant writes"
31
+ },
20
32
  "summaryModel": {
21
33
  "label": "Summary Model",
22
- "help": "Model override for LCM summarization (e.g., 'gpt-5.4' or 'openai-resp/gpt-5.4')"
34
+ "help": "Model override for LCM summarization (e.g., 'gpt-5.4' to reuse the session provider, or 'openai-resp/gpt-5.4' for a full cross-provider ref)"
23
35
  },
24
36
  "summaryProvider": {
25
37
  "label": "Summary Provider",
26
- "help": "Provider override for LCM summarization (e.g., 'openai-resp')"
38
+ "help": "Provider override used only when summaryModel is a bare model name (e.g., 'openai-resp')"
39
+ },
40
+ "expansionModel": {
41
+ "label": "Expansion Model",
42
+ "help": "Model override for lcm_expand_query sub-agent (e.g., 'anthropic/claude-haiku-4-5')"
43
+ },
44
+ "expansionProvider": {
45
+ "label": "Expansion Provider",
46
+ "help": "Provider override for lcm_expand_query sub-agent (e.g., 'anthropic')"
27
47
  }
28
48
  },
29
49
  "configSchema": {
@@ -61,6 +81,21 @@
61
81
  "dbPath": {
62
82
  "type": "string"
63
83
  },
84
+ "ignoreSessionPatterns": {
85
+ "type": "array",
86
+ "items": {
87
+ "type": "string"
88
+ }
89
+ },
90
+ "statelessSessionPatterns": {
91
+ "type": "array",
92
+ "items": {
93
+ "type": "string"
94
+ }
95
+ },
96
+ "skipStatelessSessions": {
97
+ "type": "boolean"
98
+ },
64
99
  "largeFileThresholdTokens": {
65
100
  "type": "integer",
66
101
  "minimum": 1000
@@ -70,6 +105,12 @@
70
105
  },
71
106
  "summaryProvider": {
72
107
  "type": "string"
108
+ },
109
+ "expansionModel": {
110
+ "type": "string"
111
+ },
112
+ "expansionProvider": {
113
+ "type": "string"
73
114
  }
74
115
  }
75
116
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@martian-engineering/lossless-claw",
3
- "version": "0.2.8",
3
+ "version": "0.4.0",
4
4
  "description": "Lossless Context Management plugin for OpenClaw — DAG-based conversation summarization with incremental compaction",
5
5
  "type": "module",
6
6
  "main": "index.ts",
@@ -15,6 +15,12 @@
15
15
  "conversation-memory",
16
16
  "dag"
17
17
  ],
18
+ "scripts": {
19
+ "changeset": "changeset",
20
+ "release:verify": "npm test && npm pack --dry-run",
21
+ "test": "vitest run --dir test",
22
+ "version-packages": "changeset version"
23
+ },
18
24
  "files": [
19
25
  "index.ts",
20
26
  "src/**/*.ts",
@@ -23,21 +29,22 @@
23
29
  "README.md",
24
30
  "LICENSE"
25
31
  ],
26
- "scripts": {
27
- "test": "vitest run --dir test"
28
- },
29
32
  "dependencies": {
30
33
  "@mariozechner/pi-agent-core": "*",
31
34
  "@mariozechner/pi-ai": "*",
32
35
  "@sinclair/typebox": "0.34.48"
33
36
  },
34
37
  "devDependencies": {
38
+ "@changesets/cli": "^2.30.0",
35
39
  "typescript": "^5.7.0",
36
40
  "vitest": "^3.0.0"
37
41
  },
38
42
  "peerDependencies": {
39
43
  "openclaw": "*"
40
44
  },
45
+ "publishConfig": {
46
+ "access": "public"
47
+ },
41
48
  "openclaw": {
42
49
  "extensions": [
43
50
  "./index.ts"
package/src/assembler.ts CHANGED
@@ -65,7 +65,7 @@ function buildSystemPromptAddition(summarySignals: SummaryPromptSignal[]): strin
65
65
  "",
66
66
  "Summaries above are compressed context — maps to details, not the details themselves.",
67
67
  "",
68
- "**Recall priority:** LCM tools first, then qmd (for Granola/Limitless/pre-LCM data), then memory_search as last resort.",
68
+ "**Recall priority:** Use LCM tools first for compacted conversation history. If LCM does not cover the needed data, prefer any available memory/recall tool before falling back to raw text search.",
69
69
  "",
70
70
  "**Tool escalation:**",
71
71
  "1. `lcm_grep` — search by regex or full-text across messages and summaries",
@@ -211,7 +211,8 @@ function tryRestoreOpenAIReasoning(raw: Record<string, unknown>): Record<string,
211
211
  return null;
212
212
  }
213
213
 
214
- function toolCallBlockFromPart(part: MessagePartRecord, rawType?: string): unknown {
214
+ /** @internal Exported for testing only. */
215
+ export function toolCallBlockFromPart(part: MessagePartRecord, rawType?: string): unknown {
215
216
  const type =
216
217
  rawType === "function_call" ||
217
218
  rawType === "functionCall" ||
@@ -245,7 +246,10 @@ function toolCallBlockFromPart(part: MessagePartRecord, rawType?: string): unkno
245
246
  }
246
247
 
247
248
  if (input !== undefined) {
248
- if (type === "functionCall") {
249
+ // toolCall and functionCall use "arguments" (consumed by OpenAI/xAI Chat
250
+ // Completions extractToolCalls and Responses API paths in OpenClaw).
251
+ // tool_use and variants use "input" (Anthropic native format).
252
+ if (type === "functionCall" || type === "toolCall") {
249
253
  block.arguments = input;
250
254
  } else {
251
255
  block.input = input;
@@ -254,13 +258,40 @@ function toolCallBlockFromPart(part: MessagePartRecord, rawType?: string): unkno
254
258
  return block;
255
259
  }
256
260
 
257
- function toolResultBlockFromPart(part: MessagePartRecord, rawType?: string): unknown {
261
+ /** @internal Exported for testing only. */
262
+ export function toolResultBlockFromPart(
263
+ part: MessagePartRecord,
264
+ rawType?: string,
265
+ raw?: Record<string, unknown>,
266
+ ): unknown {
258
267
  const type =
259
268
  rawType === "function_call_output" || rawType === "toolResult" || rawType === "tool_result"
260
269
  ? rawType
261
270
  : "tool_result";
262
- const output = parseStoredValue(part.toolOutput) ?? part.textContent ?? "";
263
- const block: Record<string, unknown> = { type, output };
271
+ const output = parseStoredValue(part.toolOutput);
272
+ const block: Record<string, unknown> = { type };
273
+
274
+ if (typeof part.toolName === "string" && part.toolName.length > 0) {
275
+ block.name = part.toolName;
276
+ }
277
+
278
+ if (output !== undefined) {
279
+ block.output = output;
280
+ } else if (typeof part.textContent === "string") {
281
+ block.output = part.textContent;
282
+ } else if (raw && raw.output !== undefined) {
283
+ block.output = raw.output;
284
+ } else if (raw && raw.content !== undefined) {
285
+ block.content = raw.content;
286
+ } else {
287
+ block.output = "";
288
+ }
289
+
290
+ if (raw && typeof raw.is_error === "boolean") {
291
+ block.is_error = raw.is_error;
292
+ } else if (raw && typeof raw.isError === "boolean") {
293
+ block.isError = raw.isError;
294
+ }
264
295
 
265
296
  if (type === "function_call_output") {
266
297
  if (typeof part.toolCallId === "string" && part.toolCallId.length > 0) {
@@ -303,14 +334,34 @@ function toRuntimeRole(
303
334
  return "user"; // user | system
304
335
  }
305
336
 
306
- function blockFromPart(part: MessagePartRecord): unknown {
337
+ /** @internal Exported for testing only. */
338
+ export function blockFromPart(part: MessagePartRecord): unknown {
307
339
  const metadata = getPartMetadata(part);
308
340
  if (metadata.raw && typeof metadata.raw === "object") {
309
341
  // If this is an OpenClaw-normalised OpenAI reasoning block, restore the original
310
342
  // OpenAI format so the Responses API gets the {type:"reasoning", id:"rs_…"} it expects.
311
343
  const restored = tryRestoreOpenAIReasoning(metadata.raw as Record<string, unknown>);
312
344
  if (restored) return restored;
313
- return metadata.raw;
345
+
346
+ // Don't return raw for tool call/result blocks — they need to go through
347
+ // toolCallBlockFromPart/toolResultBlockFromPart which properly normalize
348
+ // arguments (stringify if object) and format for the target provider.
349
+ // Returning raw here causes arguments to be passed as a JS object instead
350
+ // of a JSON string, which breaks xAI/OpenAI Chat Completions API (422).
351
+ const rawType = (metadata.raw as Record<string, unknown>).type as string | undefined;
352
+ const isToolBlock =
353
+ rawType === "toolCall" ||
354
+ rawType === "tool_use" ||
355
+ rawType === "tool-use" ||
356
+ rawType === "toolUse" ||
357
+ rawType === "functionCall" ||
358
+ rawType === "function_call" ||
359
+ rawType === "function_call_output" ||
360
+ rawType === "toolResult" ||
361
+ rawType === "tool_result";
362
+ if (!isToolBlock) {
363
+ return metadata.raw;
364
+ }
314
365
  }
315
366
 
316
367
  if (part.partType === "reasoning") {
@@ -318,7 +369,13 @@ function blockFromPart(part: MessagePartRecord): unknown {
318
369
  }
319
370
  if (part.partType === "tool") {
320
371
  if (metadata.originalRole === "toolResult" || metadata.rawType === "function_call_output") {
321
- return toolResultBlockFromPart(part, metadata.rawType);
372
+ return toolResultBlockFromPart(
373
+ part,
374
+ metadata.rawType,
375
+ metadata.raw && typeof metadata.raw === "object"
376
+ ? (metadata.raw as Record<string, unknown>)
377
+ : undefined,
378
+ );
322
379
  }
323
380
  return toolCallBlockFromPart(part, metadata.rawType);
324
381
  }
@@ -337,7 +394,13 @@ function blockFromPart(part: MessagePartRecord): unknown {
337
394
  metadata.rawType === "tool_result" ||
338
395
  metadata.rawType === "toolResult"
339
396
  ) {
340
- return toolResultBlockFromPart(part, metadata.rawType);
397
+ return toolResultBlockFromPart(
398
+ part,
399
+ metadata.rawType,
400
+ metadata.raw && typeof metadata.raw === "object"
401
+ ? (metadata.raw as Record<string, unknown>)
402
+ : undefined,
403
+ );
341
404
  }
342
405
  if (part.partType === "text") {
343
406
  return { type: "text", text: part.textContent ?? "" };
@@ -395,6 +458,10 @@ function pickToolCallId(parts: MessagePartRecord[]): string | undefined {
395
458
  if (!decoded || typeof decoded !== "object") {
396
459
  continue;
397
460
  }
461
+ const metadataToolCallId = (decoded as { toolCallId?: unknown }).toolCallId;
462
+ if (typeof metadataToolCallId === "string" && metadataToolCallId.length > 0) {
463
+ return metadataToolCallId;
464
+ }
398
465
  const raw = (decoded as { raw?: unknown }).raw;
399
466
  if (!raw || typeof raw !== "object") {
400
467
  continue;
@@ -411,6 +478,49 @@ function pickToolCallId(parts: MessagePartRecord[]): string | undefined {
411
478
  return undefined;
412
479
  }
413
480
 
481
+ function pickToolName(parts: MessagePartRecord[]): string | undefined {
482
+ for (const part of parts) {
483
+ if (typeof part.toolName === "string" && part.toolName.length > 0) {
484
+ return part.toolName;
485
+ }
486
+ const decoded = parseJson(part.metadata);
487
+ if (!decoded || typeof decoded !== "object") {
488
+ continue;
489
+ }
490
+ const metadataToolName = (decoded as { toolName?: unknown }).toolName;
491
+ if (typeof metadataToolName === "string" && metadataToolName.length > 0) {
492
+ return metadataToolName;
493
+ }
494
+ const raw = (decoded as { raw?: unknown }).raw;
495
+ if (!raw || typeof raw !== "object") {
496
+ continue;
497
+ }
498
+ const maybe = (raw as { name?: unknown }).name;
499
+ if (typeof maybe === "string" && maybe.length > 0) {
500
+ return maybe;
501
+ }
502
+ const maybeCamel = (raw as { toolName?: unknown }).toolName;
503
+ if (typeof maybeCamel === "string" && maybeCamel.length > 0) {
504
+ return maybeCamel;
505
+ }
506
+ }
507
+ return undefined;
508
+ }
509
+
510
+ function pickToolIsError(parts: MessagePartRecord[]): boolean | undefined {
511
+ for (const part of parts) {
512
+ const decoded = parseJson(part.metadata);
513
+ if (!decoded || typeof decoded !== "object") {
514
+ continue;
515
+ }
516
+ const metadataIsError = (decoded as { isError?: unknown }).isError;
517
+ if (typeof metadataIsError === "boolean") {
518
+ return metadataIsError;
519
+ }
520
+ }
521
+ return undefined;
522
+ }
523
+
414
524
  /** Format a Date for XML attributes in the agent's timezone. */
415
525
  function formatDateForAttribute(date: Date, timezone?: string): string {
416
526
  const tz = timezone ?? "UTC";
@@ -674,16 +784,19 @@ export class ContextAssembler {
674
784
 
675
785
  const parts = await this.conversationStore.getMessageParts(msg.messageId);
676
786
  const roleFromStore = toRuntimeRole(msg.role, parts);
677
- const toolCallId = roleFromStore === "toolResult" ? pickToolCallId(parts) : undefined;
787
+ const isToolResult = roleFromStore === "toolResult";
788
+ const toolCallId = isToolResult ? pickToolCallId(parts) : undefined;
789
+ const toolName = isToolResult ? (pickToolName(parts) ?? "unknown") : undefined;
790
+ const toolIsError = isToolResult ? pickToolIsError(parts) : undefined;
678
791
  // Tool results without a call id cannot be serialized for Anthropic-compatible APIs.
679
792
  // This happens for legacy/bootstrap rows that have role=tool but no message_parts.
680
793
  // Preserve the text by degrading to assistant content instead of emitting invalid toolResult.
681
794
  const role: "user" | "assistant" | "toolResult" =
682
- roleFromStore === "toolResult" && !toolCallId ? "assistant" : roleFromStore;
795
+ isToolResult && !toolCallId ? "assistant" : roleFromStore;
683
796
  const content = contentFromParts(parts, role, msg.content);
684
797
  const contentText =
685
798
  typeof content === "string" ? content : (JSON.stringify(content) ?? msg.content);
686
- const tokenCount = msg.tokenCount > 0 ? msg.tokenCount : estimateTokens(contentText);
799
+ const tokenCount = estimateTokens(contentText);
687
800
 
688
801
  // Cast: these are reconstructed from DB storage, not live agent messages,
689
802
  // so they won't carry the full AgentMessage metadata (timestamp, usage, etc.)
@@ -713,6 +826,8 @@ export class ContextAssembler {
713
826
  role,
714
827
  content,
715
828
  ...(toolCallId ? { toolCallId } : {}),
829
+ ...(toolName ? { toolName } : {}),
830
+ ...(role === "toolResult" && toolIsError !== undefined ? { isError: toolIsError } : {}),
716
831
  } as AgentMessage),
717
832
  tokens: tokenCount,
718
833
  isMessage: true,
package/src/compaction.ts CHANGED
@@ -86,7 +86,7 @@ function estimateTokens(content: string): number {
86
86
  }
87
87
 
88
88
  /** Format a timestamp as `YYYY-MM-DD HH:mm TZ` for prompt source text. */
89
- function formatTimestamp(value: Date, timezone: string = "UTC"): string {
89
+ export function formatTimestamp(value: Date, timezone: string = "UTC"): string {
90
90
  try {
91
91
  const fmt = new Intl.DateTimeFormat("en-CA", {
92
92
  timeZone: timezone,
@@ -282,6 +282,14 @@ export class CompactionEngine {
282
282
  summarize,
283
283
  previousSummaryContent,
284
284
  );
285
+ if (!leafResult) {
286
+ return {
287
+ actionTaken: false,
288
+ tokensBefore,
289
+ tokensAfter: tokensBefore,
290
+ condensed: false,
291
+ };
292
+ }
285
293
  const tokensAfterLeaf = await this.summaryStore.getContextTokenCount(conversationId);
286
294
 
287
295
  await this.persistCompactionEvents({
@@ -315,6 +323,9 @@ export class CompactionEngine {
315
323
  targetDepth,
316
324
  summarize,
317
325
  );
326
+ if (!condenseResult) {
327
+ break;
328
+ }
318
329
  const passTokensAfter = await this.summaryStore.getContextTokenCount(conversationId);
319
330
  await this.persistCompactionEvents({
320
331
  conversationId,
@@ -406,6 +417,9 @@ export class CompactionEngine {
406
417
  summarize,
407
418
  previousSummaryContent,
408
419
  );
420
+ if (!leafResult) {
421
+ break;
422
+ }
409
423
  const passTokensAfter = await this.summaryStore.getContextTokenCount(conversationId);
410
424
  await this.persistCompactionEvents({
411
425
  conversationId,
@@ -421,6 +435,10 @@ export class CompactionEngine {
421
435
  level = leafResult.level;
422
436
  previousSummaryContent = leafResult.content;
423
437
 
438
+ if (!force && passTokensAfter <= threshold) {
439
+ previousTokens = passTokensAfter;
440
+ break;
441
+ }
424
442
  if (passTokensAfter >= passTokensBefore || passTokensAfter >= previousTokens) {
425
443
  break;
426
444
  }
@@ -428,7 +446,7 @@ export class CompactionEngine {
428
446
  }
429
447
 
430
448
  // Phase 2: depth-aware condensed passes, always processing shallowest depth first.
431
- while (true) {
449
+ while (force || previousTokens > threshold) {
432
450
  const candidate = await this.selectShallowestCondensationCandidate({
433
451
  conversationId,
434
452
  hardTrigger: hardTrigger === true,
@@ -444,6 +462,9 @@ export class CompactionEngine {
444
462
  candidate.targetDepth,
445
463
  summarize,
446
464
  );
465
+ if (!condenseResult) {
466
+ break;
467
+ }
447
468
  const passTokensAfter = await this.summaryStore.getContextTokenCount(conversationId);
448
469
  await this.persistCompactionEvents({
449
470
  conversationId,
@@ -459,6 +480,10 @@ export class CompactionEngine {
459
480
  createdSummaryId = condenseResult.summaryId;
460
481
  level = condenseResult.level;
461
482
 
483
+ if (!force && passTokensAfter <= threshold) {
484
+ previousTokens = passTokensAfter;
485
+ break;
486
+ }
462
487
  if (passTokensAfter >= passTokensBefore || passTokensAfter >= previousTokens) {
463
488
  break;
464
489
  }
@@ -964,7 +989,7 @@ export class CompactionEngine {
964
989
  sourceText: string;
965
990
  summarize: CompactionSummarizeFn;
966
991
  options?: CompactionSummarizeOptions;
967
- }): Promise<{ content: string; level: CompactionLevel }> {
992
+ }): Promise<{ content: string; level: CompactionLevel } | null> {
968
993
  const sourceText = params.sourceText.trim();
969
994
  if (!sourceText) {
970
995
  return {
@@ -974,11 +999,25 @@ export class CompactionEngine {
974
999
  }
975
1000
  const inputTokens = Math.max(1, estimateTokens(sourceText));
976
1001
 
977
- let summaryText = await params.summarize(sourceText, false, params.options);
1002
+ const runSummarizer = async (aggressiveMode: boolean): Promise<string | null> => {
1003
+ const output = await params.summarize(sourceText, aggressiveMode, params.options);
1004
+ const trimmed = output.trim();
1005
+ return trimmed || null;
1006
+ };
1007
+
1008
+ const initialSummary = await runSummarizer(false);
1009
+ if (initialSummary === null) {
1010
+ return null;
1011
+ }
1012
+ let summaryText = initialSummary;
978
1013
  let level: CompactionLevel = "normal";
979
1014
 
980
1015
  if (estimateTokens(summaryText) >= inputTokens) {
981
- summaryText = await params.summarize(sourceText, true, params.options);
1016
+ const aggressiveSummary = await runSummarizer(true);
1017
+ if (aggressiveSummary === null) {
1018
+ return null;
1019
+ }
1020
+ summaryText = aggressiveSummary;
982
1021
  level = "aggressive";
983
1022
 
984
1023
  if (estimateTokens(summaryText) >= inputTokens) {
@@ -986,7 +1025,8 @@ export class CompactionEngine {
986
1025
  sourceText.length > FALLBACK_MAX_CHARS
987
1026
  ? sourceText.slice(0, FALLBACK_MAX_CHARS)
988
1027
  : sourceText;
989
- summaryText = `${truncated}\n[Truncated from ${inputTokens} tokens]`;
1028
+ summaryText = `${truncated}
1029
+ [Truncated from ${inputTokens} tokens]`;
990
1030
  level = "fallback";
991
1031
  }
992
1032
  }
@@ -1004,7 +1044,7 @@ export class CompactionEngine {
1004
1044
  messageItems: ContextItemRecord[],
1005
1045
  summarize: CompactionSummarizeFn,
1006
1046
  previousSummaryContent?: string,
1007
- ): Promise<{ summaryId: string; level: CompactionLevel; content: string }> {
1047
+ ): Promise<{ summaryId: string; level: CompactionLevel; content: string } | null> {
1008
1048
  // Fetch full message content for each context item
1009
1049
  const messageContents: { messageId: number; content: string; createdAt: Date; tokenCount: number }[] =
1010
1050
  [];
@@ -1037,6 +1077,12 @@ export class CompactionEngine {
1037
1077
  isCondensed: false,
1038
1078
  },
1039
1079
  });
1080
+ if (!summary) {
1081
+ console.warn(
1082
+ `[lcm] leaf summarizer returned empty content; conversationId=${conversationId}; chunkMessages=${messageContents.length}; skipping leaf chunk`,
1083
+ );
1084
+ return null;
1085
+ }
1040
1086
 
1041
1087
  // Persist the leaf summary
1042
1088
  const summaryId = generateSummaryId(summary.content);
@@ -1095,7 +1141,7 @@ export class CompactionEngine {
1095
1141
  summaryItems: ContextItemRecord[],
1096
1142
  targetDepth: number,
1097
1143
  summarize: CompactionSummarizeFn,
1098
- ): Promise<PassResult> {
1144
+ ): Promise<PassResult | null> {
1099
1145
  // Fetch full summary records
1100
1146
  const summaryRecords: SummaryRecord[] = [];
1101
1147
  for (const item of summaryItems) {
@@ -1136,6 +1182,12 @@ export class CompactionEngine {
1136
1182
  depth: targetDepth + 1,
1137
1183
  },
1138
1184
  });
1185
+ if (!condensed) {
1186
+ console.warn(
1187
+ `[lcm] condensed summarizer returned empty content; conversationId=${conversationId}; depth=${targetDepth}; chunkSummaries=${summaryRecords.length}; skipping condensed chunk`,
1188
+ );
1189
+ return null;
1190
+ }
1139
1191
 
1140
1192
  // Persist the condensed summary
1141
1193
  const summaryId = generateSummaryId(condensed.content);
package/src/db/config.ts CHANGED
@@ -4,6 +4,12 @@ import { join } from "path";
4
4
  export type LcmConfig = {
5
5
  enabled: boolean;
6
6
  databasePath: string;
7
+ /** Glob patterns for session keys to exclude from LCM storage entirely. */
8
+ ignoreSessionPatterns: string[];
9
+ /** Glob patterns for session keys that may read from LCM but never write to it. */
10
+ statelessSessionPatterns: string[];
11
+ /** When true, stateless session pattern matching is enforced. */
12
+ skipStatelessSessions: boolean;
7
13
  contextThreshold: number;
8
14
  freshTailCount: number;
9
15
  leafMinFanout: number;
@@ -15,10 +21,22 @@ export type LcmConfig = {
15
21
  condensedTargetTokens: number;
16
22
  maxExpandTokens: number;
17
23
  largeFileTokenThreshold: number;
24
+ /** Provider override for compaction summarization. */
25
+ summaryProvider: string;
26
+ /** Model override for compaction summarization. */
27
+ summaryModel: string;
18
28
  /** Provider override for large-file text summarization. */
19
29
  largeFileSummaryProvider: string;
20
30
  /** Model override for large-file text summarization. */
21
31
  largeFileSummaryModel: string;
32
+ /** Model override for conversation summarization. */
33
+ summaryModel: string;
34
+ /** Provider override for conversation summarization. */
35
+ summaryProvider: string;
36
+ /** Provider override for lcm_expand_query sub-agent. */
37
+ expansionProvider: string;
38
+ /** Model override for lcm_expand_query sub-agent. */
39
+ expansionModel: string;
22
40
  autocompactDisabled: boolean;
23
41
  /** IANA timezone for timestamps in summaries (from TZ env or system default) */
24
42
  timezone: string;
@@ -53,6 +71,24 @@ function toStr(value: unknown): string | undefined {
53
71
  return undefined;
54
72
  }
55
73
 
74
+ /** Coerce a plugin config value into a trimmed string array when possible. */
75
+ function toStrArray(value: unknown): string[] | undefined {
76
+ if (Array.isArray(value)) {
77
+ const normalized = value
78
+ .map((entry) => toStr(entry))
79
+ .filter((entry): entry is string => typeof entry === "string");
80
+ return normalized.length > 0 ? normalized : [];
81
+ }
82
+ const single = toStr(value);
83
+ if (!single) {
84
+ return undefined;
85
+ }
86
+ return single
87
+ .split(",")
88
+ .map((entry) => entry.trim())
89
+ .filter(Boolean);
90
+ }
91
+
56
92
  /**
57
93
  * Resolve LCM configuration with three-tier precedence:
58
94
  * 1. Environment variables (highest — backward compat)
@@ -75,6 +111,24 @@ export function resolveLcmConfig(
75
111
  ?? toStr(pc.dbPath)
76
112
  ?? toStr(pc.databasePath)
77
113
  ?? join(homedir(), ".openclaw", "lcm.db"),
114
+ ignoreSessionPatterns:
115
+ env.LCM_IGNORE_SESSION_PATTERNS !== undefined
116
+ ? env.LCM_IGNORE_SESSION_PATTERNS
117
+ .split(",")
118
+ .map((entry) => entry.trim())
119
+ .filter(Boolean)
120
+ : toStrArray(pc.ignoreSessionPatterns) ?? [],
121
+ statelessSessionPatterns:
122
+ env.LCM_STATELESS_SESSION_PATTERNS !== undefined
123
+ ? env.LCM_STATELESS_SESSION_PATTERNS
124
+ .split(",")
125
+ .map((entry) => entry.trim())
126
+ .filter(Boolean)
127
+ : toStrArray(pc.statelessSessionPatterns) ?? [],
128
+ skipStatelessSessions:
129
+ env.LCM_SKIP_STATELESS_SESSIONS !== undefined
130
+ ? env.LCM_SKIP_STATELESS_SESSIONS === "true"
131
+ : toBool(pc.skipStatelessSessions) ?? true,
78
132
  contextThreshold:
79
133
  (env.LCM_CONTEXT_THRESHOLD !== undefined ? parseFloat(env.LCM_CONTEXT_THRESHOLD) : undefined)
80
134
  ?? toNumber(pc.contextThreshold) ?? 0.75,
@@ -110,10 +164,18 @@ export function resolveLcmConfig(
110
164
  ?? toNumber(pc.largeFileThresholdTokens)
111
165
  ?? toNumber(pc.largeFileTokenThreshold)
112
166
  ?? 25000,
167
+ summaryProvider:
168
+ env.LCM_SUMMARY_PROVIDER?.trim() ?? toStr(pc.summaryProvider) ?? "",
169
+ summaryModel:
170
+ env.LCM_SUMMARY_MODEL?.trim() ?? toStr(pc.summaryModel) ?? "",
113
171
  largeFileSummaryProvider:
114
172
  env.LCM_LARGE_FILE_SUMMARY_PROVIDER?.trim() ?? toStr(pc.largeFileSummaryProvider) ?? "",
115
173
  largeFileSummaryModel:
116
174
  env.LCM_LARGE_FILE_SUMMARY_MODEL?.trim() ?? toStr(pc.largeFileSummaryModel) ?? "",
175
+ expansionProvider:
176
+ env.LCM_EXPANSION_PROVIDER?.trim() ?? toStr(pc.expansionProvider) ?? "",
177
+ expansionModel:
178
+ env.LCM_EXPANSION_MODEL?.trim() ?? toStr(pc.expansionModel) ?? "",
117
179
  autocompactDisabled:
118
180
  env.LCM_AUTOCOMPACT_DISABLED !== undefined
119
181
  ? env.LCM_AUTOCOMPACT_DISABLED === "true"