@martian-engineering/lossless-claw 0.5.3 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,55 @@
1
+ # Recall Tools
2
+
3
+ Use recall tools when the question depends on exact historical evidence from compacted context.
4
+
5
+ ## Tool selection
6
+
7
+ ### `lcm_grep`
8
+
9
+ Use for:
10
+
11
+ - finding whether a term, file name, error string, or identifier appears in compacted history
12
+ - narrowing the search space before deeper inspection
13
+
14
+ Do not use it for:
15
+
16
+ - answering detail-heavy questions by itself
17
+
18
+ ### `lcm_describe`
19
+
20
+ Use for:
21
+
22
+ - inspecting a specific summary or stored-file record by ID
23
+ - reading lineage and content for a known summary node
24
+
25
+ Do not use it for:
26
+
27
+ - broad discovery when you do not know the target ID yet
28
+
29
+ ### `lcm_expand_query`
30
+
31
+ Use for:
32
+
33
+ - focused questions that need richer detail recovered from summaries
34
+ - evidence-oriented follow-up after `lcm_grep` or `lcm_describe`
35
+
36
+ This is the best recall tool when the user asks for:
37
+
38
+ - exact commands
39
+ - exact file paths
40
+ - precise timestamps
41
+ - root-cause chains
42
+
43
+ ### `lcm_expand`
44
+
45
+ Treat as a specialized sub-agent flow, not the default first step.
46
+
47
+ ## Recommended workflow
48
+
49
+ 1. Start with `lcm_grep` to find likely evidence.
50
+ 2. Use `lcm_describe` when you have a summary or file ID.
51
+ 3. Use `lcm_expand_query` when the answer requires precise recovery rather than a high-level summary.
52
+
53
+ ## Important guardrail
54
+
55
+ Do not infer exact details from summaries alone when the user needs evidence. Expand first or state that the answer still needs expansion.
@@ -0,0 +1,59 @@
1
+ # Session lifecycle (`/new` and `/reset`)
2
+
3
+ This reference describes the current behavior on `main`.
4
+
5
+ ## Short version
6
+
7
+ For stock `lossless-claw` on current main:
8
+
9
+ - OpenClaw handles `/new` and `/reset` as session-reset operations.
10
+ - `lossless-claw` does **not** currently register its own `before_reset` hook or a custom reset policy.
11
+ - `lossless-claw` prefers **`sessionKey`** as the stable identity for an LCM conversation.
12
+ - When the same `sessionKey` reappears with a new `sessionId`, `lossless-claw` updates the stored `sessionId` on the existing LCM conversation row instead of creating a brand-new LCM conversation.
13
+
14
+ ## What that means in practice
15
+
16
+ If a user asks whether `/new` or `/reset` gives them a fresh LCM conversation, the answer is usually **no** under the current implementation.
17
+
18
+ They get a fresh OpenClaw session runtime, but LCM continuity still follows the stable `sessionKey` when one is available.
19
+
20
+ So today:
21
+
22
+ - `/new` and `/reset` can reset the runtime session
23
+ - but LCM history may continue in the same conversation row if the chat/thread keeps the same `sessionKey`
24
+
25
+ ## Why
26
+
27
+ Current lossless-claw conversation resolution does this:
28
+
29
+ 1. look up by `sessionKey` first
30
+ 2. fall back to `sessionId` only when no `sessionKey` match exists
31
+ 3. if the `sessionKey` already exists but the `sessionId` changed, update the stored `sessionId` on that same conversation
32
+
33
+ That behavior preserves continuity across session resets for the same chat identity.
34
+
35
+ ## Important limitation
36
+
37
+ There is currently **no plugin-specific `/new` vs `/reset` split** in stock lossless-claw docs or runtime behavior.
38
+
39
+ If someone is asking for semantics like:
40
+
41
+ - `/new` keeps LCM history but rotates transcript
42
+ - `/reset` archives old LCM conversation and starts a new one
43
+
44
+ that is a **design/spec topic**, not current stock behavior.
45
+
46
+ ## Safe operator guidance
47
+
48
+ When answering users:
49
+
50
+ - do not promise that `/new` or `/reset` clears LCM history
51
+ - explain that current stock behavior follows `sessionKey` continuity
52
+ - if they need a truly separate LCM history, use a different session key context (for example a different chat/thread/binding) or explicit non-MVP migration/surgery tools
53
+
54
+ ## Relation to `/status`
55
+
56
+ This session behavior is separate from `/status` metrics.
57
+
58
+ - `/status` reflects runtime session state and the last assembled request snapshot
59
+ - `/lossless` reflects LCM conversation state keyed by the plugin's conversation mapping rules
package/src/assembler.ts CHANGED
@@ -25,6 +25,8 @@ export interface AssembleContextInput {
25
25
  tokenBudget: number;
26
26
  /** Number of most recent raw turns to always include (default: 8) */
27
27
  freshTailCount?: number;
28
+ /** Optional user query for relevance-based eviction scoring (BM25-lite). When absent or unsearchable, falls back to chronological eviction. */
29
+ prompt?: string;
28
30
  }
29
31
 
30
32
  export interface AssembleContextResult {
@@ -52,10 +54,11 @@ function estimateTokens(text: string): number {
52
54
  type SummaryPromptSignal = Pick<SummaryRecord, "kind" | "depth" | "descendantCount">;
53
55
 
54
56
  /**
55
- * Build LCM usage guidance for the runtime system prompt.
57
+ * Build dynamic prompt guidance for compacted session context.
56
58
  *
57
59
  * Guidance is emitted only when summaries are present in assembled context.
58
- * Depth-aware: minimal for shallow compaction, full guidance for deep trees.
60
+ * Static recall policy lives in the plugin prompt hook so this addition
61
+ * remains session-specific and reflects only the current compaction state.
59
62
  */
60
63
  function buildSystemPromptAddition(summarySignals: SummaryPromptSignal[]): string | undefined {
61
64
  if (summarySignals.length === 0) {
@@ -68,36 +71,24 @@ function buildSystemPromptAddition(summarySignals: SummaryPromptSignal[]): strin
68
71
 
69
72
  const sections: string[] = [];
70
73
 
71
- // Core recall workflow — always present when summaries exist
74
+ // Dynamic compaction reminder — always present when summaries exist.
72
75
  sections.push(
73
- "## LCM Recall",
76
+ "## Compacted Conversation Context",
74
77
  "",
75
- "Summaries above are compressed context — maps to details, not the details themselves.",
78
+ "Summaries above are compressed context, not full detail.",
76
79
  "",
77
- "**Recall priority:** Use LCM tools first for compacted conversation history. If LCM does not cover the needed data, prefer any available memory/recall tool before falling back to raw text search.",
80
+ "Treat summaries as compressed recall cues rather than proof of exact wording or exact values.",
78
81
  "",
79
- "**Conflict handling:** If newer evidence conflicts with an older summary or recollection, prefer the newer evidence. Do not trust a stale summary over fresher contradictory information.",
80
- "",
81
- "**Contradictions/uncertainty:** If facts seem contradictory or uncertain, verify with LCM tools before answering instead of trusting the summary at face value.",
82
- "",
83
- "**Tool escalation:**",
84
- "1. `lcm_grep` — search by regex or full-text across messages and summaries",
85
- "2. `lcm_describe` — inspect a specific summary (cheap, no sub-agent)",
86
- "3. `lcm_expand_query` — deep recall: spawns bounded sub-agent, expands DAG, returns answer with cited summary IDs (~120s, don't ration it)",
87
- "",
88
- "**`lcm_expand_query` usage** — two patterns (always requires `prompt`):",
89
- "- With IDs: `lcm_expand_query(summaryIds: [\"sum_xxx\"], prompt: \"What config changes were discussed?\")`",
90
- "- With search: `lcm_expand_query(query: \"database migration\", prompt: \"What strategy was decided?\")`",
91
- "- Optional: `maxTokens` (default 2000), `conversationId`, `allConversations: true`",
92
- "",
93
- "**Summaries include \"Expand for details about:\" footers** listing compressed specifics. Use `lcm_expand_query` with that summary's ID to retrieve them.",
82
+ "If a summary includes an \"Expand for details about:\" footer, use it as a cue to expand before asserting specifics.",
94
83
  );
95
84
 
96
- // Precision/evidence rules — always present but stronger when heavily compacted
85
+ // Precision/evidence rules — always present but stronger when heavily compacted.
97
86
  if (heavilyCompacted) {
98
87
  sections.push(
99
88
  "",
100
- "**\u26a0 Deeply compacted context expand before asserting specifics.**",
89
+ "**Deeply compacted context: expand before asserting specifics.**",
90
+ "",
91
+ "Before answering with exact commands, SHAs, paths, timestamps, config values, or causal chains, expand for the missing detail.",
101
92
  "",
102
93
  "Default recall flow for precision work:",
103
94
  "1) `lcm_grep` to locate relevant summary/message IDs",
@@ -105,20 +96,20 @@ function buildSystemPromptAddition(summarySignals: SummaryPromptSignal[]): strin
105
96
  "3) Answer with citations to summary IDs used",
106
97
  "",
107
98
  "**Uncertainty checklist (run before answering):**",
108
- "- Am I relying on an older summary even though newer evidence disagrees?",
109
- "- Am I making exact factual claims from a condensed summary?",
99
+ "- Am I making an exact factual claim from a compressed or condensed summary?",
110
100
  "- Could compaction have omitted a crucial detail?",
111
- "- Would this answer fail if the user asks for proof?",
101
+ "- Would I need an expansion step if the user asks for proof or the exact text?",
102
+ "- Should I state uncertainty instead of asserting specifics until I expand?",
112
103
  "",
113
- "If yes to any \u2192 expand first.",
104
+ "If yes to any item, expand first or explicitly say that you need to expand.",
114
105
  "",
115
- "**Do not guess** exact commands, SHAs, file paths, timestamps, config values, or causal claims from condensed summaries. Expand first or state that you need to expand.",
106
+ "Do not guess exact commands, SHAs, file paths, timestamps, config values, or causal claims from condensed summaries. Expand first or explicitly say that you need to expand.",
116
107
  );
117
108
  } else {
118
109
  sections.push(
119
110
  "",
120
- "**For precision/evidence questions** (exact commands, SHAs, paths, timestamps, config values, root-cause chains): expand before answering.",
121
- "Do not guess from condensed summaries — expand first or state uncertainty.",
111
+ "For exact commands, SHAs, paths, timestamps, config values, or causal chains, expand for details before answering.",
112
+ "State uncertainty instead of guessing from compressed summaries.",
122
113
  );
123
114
  }
124
115
 
@@ -281,6 +272,20 @@ export function toolResultBlockFromPart(
281
272
  rawType?: string,
282
273
  raw?: Record<string, unknown>,
283
274
  ): unknown {
275
+ if (
276
+ raw &&
277
+ typeof raw.text === "string" &&
278
+ raw.output === undefined &&
279
+ raw.content === undefined &&
280
+ (part.toolOutput == null || part.toolOutput === "") &&
281
+ (part.textContent == null || part.textContent === raw.text)
282
+ ) {
283
+ return {
284
+ type: "text",
285
+ text: raw.text,
286
+ };
287
+ }
288
+
284
289
  const type =
285
290
  rawType === "function_call_output" || rawType === "toolResult" || rawType === "tool_result"
286
291
  ? rawType
@@ -468,7 +473,8 @@ export function blockFromPart(part: MessagePartRecord): unknown {
468
473
  return { type: "text", text: "" };
469
474
  }
470
475
 
471
- function contentFromParts(
476
+ /** @internal Exported for transcript-maintenance reconstruction. */
477
+ export function contentFromParts(
472
478
  parts: MessagePartRecord[],
473
479
  role: "user" | "assistant" | "toolResult",
474
480
  fallbackContent: string,
@@ -497,7 +503,8 @@ function contentFromParts(
497
503
  return blocks;
498
504
  }
499
505
 
500
- function pickToolCallId(parts: MessagePartRecord[]): string | undefined {
506
+ /** @internal Exported for transcript-maintenance reconstruction. */
507
+ export function pickToolCallId(parts: MessagePartRecord[]): string | undefined {
501
508
  for (const part of parts) {
502
509
  if (typeof part.toolCallId === "string" && part.toolCallId.length > 0) {
503
510
  return part.toolCallId;
@@ -526,7 +533,8 @@ function pickToolCallId(parts: MessagePartRecord[]): string | undefined {
526
533
  return undefined;
527
534
  }
528
535
 
529
- function pickToolName(parts: MessagePartRecord[]): string | undefined {
536
+ /** @internal Exported for transcript-maintenance reconstruction. */
537
+ export function pickToolName(parts: MessagePartRecord[]): string | undefined {
530
538
  for (const part of parts) {
531
539
  if (typeof part.toolName === "string" && part.toolName.length > 0) {
532
540
  return part.toolName;
@@ -555,7 +563,8 @@ function pickToolName(parts: MessagePartRecord[]): string | undefined {
555
563
  return undefined;
556
564
  }
557
565
 
558
- function pickToolIsError(parts: MessagePartRecord[]): boolean | undefined {
566
+ /** @internal Exported for transcript-maintenance reconstruction. */
567
+ export function pickToolIsError(parts: MessagePartRecord[]): boolean | undefined {
559
568
  for (const part of parts) {
560
569
  const decoded = parseJson(part.metadata);
561
570
  if (!decoded || typeof decoded !== "object") {
@@ -814,10 +823,60 @@ interface ResolvedItem {
814
823
  tokens: number;
815
824
  /** Whether this came from a raw message (vs. a summary) */
816
825
  isMessage: boolean;
826
+ /** Pre-extracted plain text used for relevance scoring */
827
+ text: string;
817
828
  /** Summary metadata used for dynamic system prompt guidance */
818
829
  summarySignal?: SummaryPromptSignal;
819
830
  }
820
831
 
832
+ // ── BM25-lite relevance scorer ────────────────────────────────────────────────
833
+
834
+ /** @internal Exported for testing only. Tokenize text into lowercase alphanumeric terms. */
835
+ export function tokenizeText(text: string): string[] {
836
+ return text
837
+ .toLowerCase()
838
+ .split(/[^a-z0-9]+/)
839
+ .filter((t) => t.length > 1);
840
+ }
841
+
842
+ /**
843
+ * @internal Exported for testing only.
844
+ * Score an item's text against a prompt using BM25-lite (term-frequency overlap).
845
+ * Higher scores indicate stronger keyword overlap. Returns 0 when either input is empty.
846
+ */
847
+ export function scoreRelevance(itemText: string, prompt: string): number {
848
+ const promptTerms = tokenizeText(prompt);
849
+ if (promptTerms.length === 0) return 0;
850
+
851
+ const itemTerms = tokenizeText(itemText);
852
+ if (itemTerms.length === 0) return 0;
853
+
854
+ // Build term-frequency map for the item
855
+ const freq = new Map<string, number>();
856
+ for (const term of itemTerms) {
857
+ freq.set(term, (freq.get(term) ?? 0) + 1);
858
+ }
859
+
860
+ // Sum TF contribution for each unique prompt term
861
+ const seen = new Set<string>();
862
+ let score = 0;
863
+ for (const term of promptTerms) {
864
+ if (seen.has(term)) continue;
865
+ seen.add(term);
866
+ const tf = freq.get(term) ?? 0;
867
+ if (tf > 0) {
868
+ // Normalised TF: tf / itemLength (BM25-lite saturation skipped for simplicity)
869
+ score += tf / itemTerms.length;
870
+ }
871
+ }
872
+ return score;
873
+ }
874
+
875
+ /** Return true when a prompt contains at least one searchable term. */
876
+ function hasSearchablePrompt(prompt?: string): prompt is string {
877
+ return typeof prompt === "string" && tokenizeText(prompt).length > 0;
878
+ }
879
+
821
880
  // ── ContextAssembler ─────────────────────────────────────────────────────────
822
881
 
823
882
  export class ContextAssembler {
@@ -910,8 +969,32 @@ export class ContextAssembler {
910
969
  // Everything fits
911
970
  selected.push(...evictable);
912
971
  evictableTokens = evictableTotalTokens;
972
+ } else if (hasSearchablePrompt(input.prompt)) {
973
+ // Prompt-aware eviction: score each evictable item by relevance to the
974
+ // prompt, then greedily fill budget from highest-scoring items down.
975
+ // Re-sort selected items by ordinal to restore chronological order.
976
+ const scored = evictable.map((item, idx) => ({
977
+ item,
978
+ score: scoreRelevance(item.text, input.prompt),
979
+ idx, // original index — higher = more recent, used as tiebreaker
980
+ }));
981
+ // Sort: highest relevance first; most recent (higher idx) breaks ties
982
+ scored.sort((a, b) => b.score - a.score || b.idx - a.idx);
983
+
984
+ const kept: ResolvedItem[] = [];
985
+ let accum = 0;
986
+ for (const { item } of scored) {
987
+ if (accum + item.tokens <= remainingBudget) {
988
+ kept.push(item);
989
+ accum += item.tokens;
990
+ }
991
+ }
992
+ // Restore chronological order by ordinal before appending freshTail
993
+ kept.sort((a, b) => a.ordinal - b.ordinal);
994
+ selected.push(...kept);
995
+ evictableTokens = accum;
913
996
  } else {
914
- // Need to drop oldest items until we fit.
997
+ // Chronological eviction (default): drop oldest items until we fit.
915
998
  // Walk from the END of evictable (newest first) accumulating tokens,
916
999
  // then reverse to restore chronological order.
917
1000
  const kept: ResolvedItem[] = [];
@@ -949,8 +1032,19 @@ export class ContextAssembler {
949
1032
  }
950
1033
  }
951
1034
 
1035
+ // Filter out assistant messages with empty content — these can occur when
1036
+ // tool-use-only turns are stored with content="" and zero message_parts,
1037
+ // or when filterNonFreshAssistantToolCalls strips all tool_use blocks.
1038
+ // Anthropic (and other providers) reject empty content arrays/strings.
1039
+ const cleaned = rawMessages.filter(
1040
+ (m) =>
1041
+ !(
1042
+ m?.role === "assistant" &&
1043
+ (Array.isArray(m.content) ? m.content.length === 0 : !m.content)
1044
+ ),
1045
+ );
952
1046
  return {
953
- messages: sanitizeToolUseResultPairing(rawMessages) as AgentMessage[],
1047
+ messages: sanitizeToolUseResultPairing(cleaned) as AgentMessage[],
954
1048
  estimatedTokens,
955
1049
  systemPromptAddition,
956
1050
  stats: {
@@ -1056,6 +1150,7 @@ export class ContextAssembler {
1056
1150
  } as AgentMessage),
1057
1151
  tokens: tokenCount,
1058
1152
  isMessage: true,
1153
+ text: contentText,
1059
1154
  };
1060
1155
  }
1061
1156
 
@@ -1078,6 +1173,7 @@ export class ContextAssembler {
1078
1173
  message: { role: "user" as const, content } as AgentMessage,
1079
1174
  tokens,
1080
1175
  isMessage: false,
1176
+ text: summary.content,
1081
1177
  summarySignal: {
1082
1178
  kind: summary.kind,
1083
1179
  depth: summary.depth,
package/src/compaction.ts CHANGED
@@ -25,6 +25,8 @@ export interface CompactionResult {
25
25
  condensed: boolean;
26
26
  /** Escalation level used: "normal" | "aggressive" | "fallback" */
27
27
  level?: CompactionLevel;
28
+ /** Whether compaction was blocked by a provider auth failure */
29
+ authFailure?: boolean;
28
30
  }
29
31
 
30
32
  export interface CompactionConfig {
@@ -465,6 +467,7 @@ export class CompactionEngine {
465
467
  tokensBefore,
466
468
  tokensAfter: tokensBefore,
467
469
  condensed: false,
470
+ authFailure: true,
468
471
  };
469
472
  }
470
473
  const tokensAfterLeaf = await this.summaryStore.getContextTokenCount(conversationId);
@@ -581,6 +584,7 @@ export class CompactionEngine {
581
584
  let level: CompactionLevel | undefined;
582
585
  let previousSummaryContent: string | undefined;
583
586
  let previousTokens = tokensBefore;
587
+ let hadAuthFailure = false;
584
588
 
585
589
  // Phase 1: leaf passes over oldest raw chunks outside the protected tail.
586
590
  while (true) {
@@ -598,6 +602,7 @@ export class CompactionEngine {
598
602
  input.summaryModel,
599
603
  );
600
604
  if (!leafResult) {
605
+ hadAuthFailure = true;
601
606
  break;
602
607
  }
603
608
  const passTokensAfter = await this.summaryStore.getContextTokenCount(conversationId);
@@ -644,6 +649,7 @@ export class CompactionEngine {
644
649
  input.summaryModel,
645
650
  );
646
651
  if (!condenseResult) {
652
+ hadAuthFailure = true;
647
653
  break;
648
654
  }
649
655
  const passTokensAfter = await this.summaryStore.getContextTokenCount(conversationId);
@@ -680,6 +686,7 @@ export class CompactionEngine {
680
686
  createdSummaryId,
681
687
  condensed,
682
688
  level,
689
+ ...(hadAuthFailure ? { authFailure: true } : {}),
683
690
  };
684
691
  }
685
692
 
@@ -693,7 +700,7 @@ export class CompactionEngine {
693
700
  currentTokens?: number;
694
701
  summarize: CompactionSummarizeFn;
695
702
  summaryModel?: string;
696
- }): Promise<{ success: boolean; rounds: number; finalTokens: number }> {
703
+ }): Promise<{ success: boolean; rounds: number; finalTokens: number; authFailure?: boolean }> {
697
704
  const { conversationId, tokenBudget, summarize } = input;
698
705
  const targetTokens =
699
706
  typeof input.targetTokens === "number" &&
@@ -727,6 +734,15 @@ export class CompactionEngine {
727
734
  summaryModel: input.summaryModel,
728
735
  });
729
736
 
737
+ if (result.authFailure) {
738
+ return {
739
+ success: false,
740
+ rounds: round,
741
+ finalTokens: result.tokensAfter,
742
+ authFailure: true,
743
+ };
744
+ }
745
+
730
746
  if (result.tokensAfter <= targetTokens) {
731
747
  return {
732
748
  success: true,
@@ -1542,12 +1558,7 @@ export class CompactionEngine {
1542
1558
  return { summaryId, level: condensed.level };
1543
1559
  }
1544
1560
 
1545
- /**
1546
- * Persist durable compaction events into canonical history as message parts.
1547
- *
1548
- * Event persistence is best-effort: failures are swallowed to avoid
1549
- * compromising the core compaction path.
1550
- */
1561
+ /** Emit compaction telemetry without mutating canonical conversation history. */
1551
1562
  private async persistCompactionEvents(input: {
1552
1563
  conversationId: number;
1553
1564
  tokensBefore: number;
@@ -1608,7 +1619,7 @@ export class CompactionEngine {
1608
1619
  }
1609
1620
  }
1610
1621
 
1611
- /** Write one compaction event message + part atomically where possible. */
1622
+ /** Log one compaction event without appending a synthetic chat message. */
1612
1623
  private async persistCompactionEvent(input: {
1613
1624
  conversationId: number;
1614
1625
  sessionId: string;
@@ -1621,43 +1632,8 @@ export class CompactionEngine {
1621
1632
  condensedPassOccurred: boolean;
1622
1633
  }): Promise<void> {
1623
1634
  const content = `LCM compaction ${input.pass} pass (${input.level}): ${input.tokensBefore} -> ${input.tokensAfter}`;
1624
- const metadata = JSON.stringify({
1625
- conversationId: input.conversationId,
1626
- pass: input.pass,
1627
- level: input.level,
1628
- tokensBefore: input.tokensBefore,
1629
- tokensAfter: input.tokensAfter,
1630
- createdSummaryId: input.createdSummaryId,
1631
- createdSummaryIds: input.createdSummaryIds,
1632
- condensedPassOccurred: input.condensedPassOccurred,
1633
- });
1634
-
1635
- const writeEvent = async (): Promise<void> => {
1636
- const seq = (await this.conversationStore.getMaxSeq(input.conversationId)) + 1;
1637
- const eventMessage = await this.conversationStore.createMessage({
1638
- conversationId: input.conversationId,
1639
- seq,
1640
- role: "system",
1641
- content,
1642
- tokenCount: estimateTokens(content),
1643
- });
1644
-
1645
- const parts: CreateMessagePartInput[] = [
1646
- {
1647
- sessionId: input.sessionId,
1648
- partType: "compaction",
1649
- ordinal: 0,
1650
- textContent: content,
1651
- metadata,
1652
- },
1653
- ];
1654
- await this.conversationStore.createMessageParts(eventMessage.messageId, parts);
1655
- };
1656
-
1657
- try {
1658
- await this.conversationStore.withTransaction(() => writeEvent());
1659
- } catch {
1660
- // Compaction should still succeed if event persistence fails.
1661
- }
1635
+ console.info(
1636
+ `[lcm] ${content} conversation=${input.conversationId} summary=${input.createdSummaryId}`,
1637
+ );
1662
1638
  }
1663
1639
  }