@xdarkicex/openclaw-memory-libravdb 1.3.18 → 1.3.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -131,7 +131,7 @@ If your daemon runs elsewhere, set an explicit `sidecarPath`, for example:
131
131
 
132
132
  ```text
133
133
  OpenClaw host
134
- -> memoryPromptSection (durable user/global recall)
134
+ -> memoryPromptSection (static capability header)
135
135
  -> memory runtime bridge (built-in memory_search)
136
136
  -> context engine (bootstrap / ingest / assemble / compact)
137
137
  -> plugin runtime
@@ -10,7 +10,7 @@ repository as of the current `main` branch.
10
10
  flowchart LR
11
11
  Host["OpenClaw host process\n(TypeScript plugin shell)"]
12
12
  CE["Context engine factory\nbootstrap / ingest / assemble / compact"]
13
- MPS["memoryPromptSection\nuser+global recall"]
13
+ MPS["memoryPromptSection\nstatic header"]
14
14
  Runtime["Plugin runtime\nlazy daemon connect + RPC client"]
15
15
  Sidecar["Go daemon process"]
16
16
  RPC["JSON-RPC over newline-delimited frames\nUnix socket or TCP loopback on Windows"]
@@ -28,7 +28,6 @@ flowchart LR
28
28
  Host --> CE
29
29
  Host --> MPS
30
30
  CE --> Runtime
31
- MPS --> Runtime
32
31
  Runtime --> RPC
33
32
  RPC --> Sidecar
34
33
  Sidecar --> Embed
@@ -80,17 +79,12 @@ Important constraints from the current implementation:
80
79
 
81
80
  Implemented in [`src/memory-provider.ts`](../src/memory-provider.ts).
82
81
 
83
- Before the main assembly path runs, the plugin builds a lightweight recall
84
- section:
82
+ Before the main assembly path runs, the plugin returns a lightweight static
83
+ header fragment that tells the host persistent memory is active.
85
84
 
86
- 1. search `user:<userId>`
87
- 2. search `global`
88
- 3. hybrid-rank the combined hits
89
- 4. fit them to a fixed prompt budget of `800` estimated tokens
90
- 5. return a textual header fragment for the host prompt
91
-
92
- This path does not search session memory. Its job is durable context recall, not
93
- active-turn recall.
85
+ This path is intentionally synchronous and does not perform RPC retrieval.
86
+ Durable recall now happens entirely inside `assemble`, which keeps embedded
87
+ prompt construction compatible with OpenClaw's synchronous memory prompt hook.
94
88
 
95
89
  ### 2.3 `assemble`
96
90
 
@@ -107,7 +101,7 @@ For the current query text (last message content), the host:
107
101
 
108
102
  Current implementation details that matter:
109
103
 
110
- - user/global hits may be reused from the earlier prompt-section cache
104
+ - user/global hits are cached within `assemble` and reused on repeated queries
111
105
  - `assemble` falls back to the unmodified message list on RPC failure
112
106
  - `assemble` does not mutate the original `messages` array in place; it returns
113
107
  a new array
@@ -146,7 +140,7 @@ from the original spec phrasing.
146
140
  |---|---|---|
147
141
  | Daemon unavailable on first RPC use | `getRpc()` rejects when first connect or health check fails | That hook fails or falls back, but plugin registration itself does not crash eagerly |
148
142
  | Daemon connection closes mid-session | `SidecarSupervisor` retries with exponential backoff until retry budget is exhausted, then enters degraded mode | Memory becomes unavailable until the daemon is reachable again |
149
- | `memoryPromptSection` RPC failure | individual searches are caught and replaced with empty result sets | Prompt section becomes empty rather than crashing the run |
143
+ | `memoryPromptSection` failure | returns a static header with no RPC dependency | Prompt section stays available and does not block the run |
150
144
  | `assemble` RPC failure | returns original messages, original token count, and empty `systemPromptAddition` | That turn gets no recall augmentation |
151
145
  | `ingest` gating or durable insert failure | session write already happened; durable promotion is skipped | Session memory survives, durable memory may miss that turn |
152
146
  | Compaction summarizer unavailable | extractive summarizer remains required; optional abstractive path is skipped | Compaction still runs extractively when extractive is healthy |
@@ -18,8 +18,8 @@ Why:
18
18
  - `ingest`
19
19
  - `assemble`
20
20
  - `compact`
21
- - the lightweight memory prompt section remains useful as a separate early
22
- durable-recall pass
21
+ - the lightweight memory prompt section remains useful as a synchronous
22
+ capability/header hook while durable recall stays in `assemble`
23
23
 
24
24
  This is why the code registers both `registerContextEngine("libravdb-memory", …)`
25
25
  and `registerMemoryPromptSection(...)` instead of relying on only one hook.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@xdarkicex/openclaw-memory-libravdb",
3
- "version": "1.3.18",
3
+ "version": "1.3.20",
4
4
  "type": "module",
5
5
  "publishConfig": {
6
6
  "access": "public"
@@ -26,18 +26,18 @@ class Libravdbd < Formula
26
26
  if OS.mac?
27
27
  resource "onnxruntime" do
28
28
  url "https://github.com/microsoft/onnxruntime/releases/download/v1.23.0/onnxruntime-osx-universal2-1.23.0.tgz"
29
- sha256 :no_check
29
+ sha256 "5e4365fb4a05aef353f6232b9a1848f37e608c421c9227e9224572205c0cfc08"
30
30
  end
31
31
  elsif OS.linux?
32
32
  if Hardware::CPU.arm?
33
33
  resource "onnxruntime" do
34
34
  url "https://github.com/microsoft/onnxruntime/releases/download/v1.23.0/onnxruntime-linux-aarch64-1.23.0.tgz"
35
- sha256 :no_check # TODO: pin real checksum when Linux ARM64 CI is available
35
+ sha256 "0b9f47d140411d938e47915824d8daaa424df95a88b5f1fc843172a75168f7a0"
36
36
  end
37
37
  else
38
38
  resource "onnxruntime" do
39
39
  url "https://github.com/microsoft/onnxruntime/releases/download/v1.23.0/onnxruntime-linux-x64-1.23.0.tgz"
40
- sha256 :no_check # TODO: pin real checksum when Linux AMD64 CI is available
40
+ sha256 "b6deea7f2e22c10c043019f294a0ea4d2a6c0ae52a009c34847640db75ec5580"
41
41
  end
42
42
  end
43
43
  end
@@ -7,6 +7,7 @@ import {
7
7
  import {
8
8
  detectRetrievalFailure,
9
9
  expandSection7HopCandidates,
10
+ rankRawUserRecoveryCandidates,
10
11
  mergeSection7VariantCandidates,
11
12
  rankSection7VariantCandidates,
12
13
  } from "./scoring.js";
@@ -179,6 +180,7 @@ export function buildContextEngineFactory(
179
180
  },
180
181
  async assemble({ sessionId, userId, messages, tokenBudget }: ContextAssembleArgs) {
181
182
  const PROFILE = process.env.OPENCLAW_PROFILE_ASSEMBLE === "1";
183
+ const DEBUG_RECOVERY = process.env.LONGMEMEVAL_DEBUG_RANKING === "1";
182
184
 
183
185
  const queryText = messages.at(-1)?.content ?? "";
184
186
  if (!queryText) {
@@ -256,6 +258,7 @@ export function buildContextEngineFactory(
256
258
  messages,
257
259
  tokenBudget,
258
260
  profiler,
261
+ debugRecovery: DEBUG_RECOVERY,
259
262
  });
260
263
 
261
264
  const profileLines = profiler?.lines() ?? [];
@@ -289,6 +292,7 @@ export function buildContextEngineFactory(
289
292
  messages,
290
293
  tokenBudget,
291
294
  profiler,
295
+ debugRecovery,
292
296
  }: {
293
297
  rpc: Awaited<ReturnType<RpcGetter>>;
294
298
  cfg: PluginConfig;
@@ -304,6 +308,7 @@ export function buildContextEngineFactory(
304
308
  messages: Array<{ role: string; content: string }>;
305
309
  tokenBudget: number;
306
310
  profiler: { mark(label: string): void; emit(): void } | null;
311
+ debugRecovery: boolean;
307
312
  }): Promise<ContextAssembleResult> {
308
313
  const memoryBudget = tokenBudget * (cfg.tokenBudgetFraction ?? 0.25);
309
314
  const hardItems = authoredHard;
@@ -517,7 +522,10 @@ export function buildContextEngineFactory(
517
522
  minTopK: cfg.recoveryMinTopK ?? 4,
518
523
  meanConfidenceThresh: cfg.recoveryMinConfidenceMean ?? 0.5,
519
524
  });
520
- const recoveryReserveTokens = recoveryTrigger.fire
525
+ const crossSessionRawRecovery =
526
+ rawSessionTurns.length === 0 &&
527
+ sessionHits.results.length === 0;
528
+ const recoveryReserveTokens = (recoveryTrigger.fire || crossSessionRawRecovery)
521
529
  ? Math.min(memoryBudget, Math.max(Math.floor(memoryBudget * 0.10), 16), 128)
522
530
  : 0;
523
531
  const elevatedGuidanceBudget = Math.max(
@@ -553,26 +561,83 @@ export function buildContextEngineFactory(
553
561
  // Recovery is a policy overlay — it appends raw content only when triggered,
554
562
  // it never modifies the C_total(q) output and does not spend from tau_V.
555
563
  let recoveryItems: SearchResult[] = [];
556
- if (recoveryTrigger.fire) {
564
+ let rawUserRecoveryDebug: NonNullable<NonNullable<ContextAssembleResult["_debug"]>["rawUserRecoveryCandidates"]> = [];
565
+ if (recoveryTrigger.fire || crossSessionRawRecovery) {
557
566
  profiler?.mark("recovery_expand");
558
- // Recovery searches immutable raw history directly — never the active view, elevated shards,
559
- // or authored collections. Raw turns are immutable (storage axiom, unchanged).
560
567
  const recoveryExcludeIDs = [...excluded, ...recentTailIDs, ...theoremSelectedIDs];
561
- const rawResults = await rpc.call<{ results: SearchResult[] }>("query_raw_session", {
562
- sessionId,
563
- text: queryText,
564
- k: Math.max(cfg.topK ?? 8, 4),
565
- excludeIds: recoveryExcludeIDs,
566
- });
567
- // Fit recovered raw items to the reserved recovery budget — never exceed it.
568
- const fittedRecovery = fitPromptBudget(rawResults.results ?? [], recoveryReserveTokens);
569
- recoveryItems = fittedRecovery.map((item: SearchResult) => ({
570
- ...item,
571
- metadata: {
572
- ...item.metadata,
573
- recovery_fallback: true,
574
- },
575
- }));
568
+ const recoveryCandidates: SearchResult[] = [];
569
+
570
+ if (recoveryTrigger.fire) {
571
+ // Recovery searches immutable raw session history directly — never the active view,
572
+ // elevated shards, or authored collections.
573
+ const rawResults = await rpc.call<{ results: SearchResult[] }>("query_raw_session", {
574
+ sessionId,
575
+ text: queryText,
576
+ k: Math.max(cfg.topK ?? 8, 4),
577
+ excludeIds: recoveryExcludeIDs,
578
+ });
579
+ recoveryCandidates.push(
580
+ ...(rawResults.results ?? []).map((item) => ({
581
+ ...item,
582
+ finalScore: typeof item.finalScore === "number" ? item.finalScore : item.score,
583
+ metadata: {
584
+ ...item.metadata,
585
+ recovery_fallback: true,
586
+ recovery_scope: "session_raw",
587
+ },
588
+ })),
589
+ );
590
+ }
591
+
592
+ if (crossSessionRawRecovery) {
593
+ // When a fresh query session has no searchable history yet, durable memory can be too
594
+ // coarse for exact-turn recall. Search the immutable per-user raw turn index instead of
595
+ // widening topK so precise historical turns still have a bounded path back into context.
596
+ const rawUserResults = await rpc.call<{ results: SearchResult[] }>("search_text", {
597
+ collection: `turns:${userId}`,
598
+ text: queryText,
599
+ k: Math.max((cfg.topK ?? 8) * 4, 8),
600
+ excludeIds: recoveryExcludeIDs,
601
+ });
602
+ const reranked = rankRawUserRecoveryCandidates(
603
+ annotateCollection(rawUserResults.results ?? [], `turns:${userId}`),
604
+ { queryText },
605
+ );
606
+ if (debugRecovery) {
607
+ rawUserRecoveryDebug = reranked.debug.slice(0, 8).map((item) => ({
608
+ ...item,
609
+ selected: false,
610
+ }));
611
+ }
612
+ recoveryCandidates.push(
613
+ ...reranked.ranked.map((item) => ({
614
+ ...item,
615
+ finalScore: typeof item.finalScore === "number" ? item.finalScore : item.score,
616
+ metadata: {
617
+ ...item.metadata,
618
+ recovery_fallback: true,
619
+ recovery_scope: "user_turns",
620
+ },
621
+ })),
622
+ );
623
+ }
624
+
625
+ const fittedRecovery = fitPromptBudget(
626
+ dedupeRecoveryCandidates(recoveryCandidates),
627
+ recoveryReserveTokens,
628
+ );
629
+ recoveryItems = fittedRecovery;
630
+ if (debugRecovery && rawUserRecoveryDebug.length > 0) {
631
+ const selectedIDs = new Set(
632
+ fittedRecovery
633
+ .filter((item) => item.metadata.recovery_scope === "user_turns")
634
+ .map((item: SearchResult) => item.id),
635
+ );
636
+ rawUserRecoveryDebug = rawUserRecoveryDebug.map((item) => ({
637
+ ...item,
638
+ selected: selectedIDs.has(item.id),
639
+ }));
640
+ }
576
641
  }
577
642
 
578
643
  const selected = [
@@ -598,6 +663,13 @@ export function buildContextEngineFactory(
598
663
  messages: [...selectedMessages, ...messages],
599
664
  estimatedTokens: countTokens(selectedMessages) + countTokens(messages),
600
665
  systemPromptAddition: buildMemoryHeader(selected),
666
+ _debug: debugRecovery
667
+ ? {
668
+ recoveryTriggerFired: recoveryTrigger.fire,
669
+ crossSessionRawRecovery,
670
+ rawUserRecoveryCandidates: rawUserRecoveryDebug,
671
+ }
672
+ : undefined,
601
673
  };
602
674
  },
603
675
  async compact({ sessionId, force, targetSize }: ContextCompactArgs) {
@@ -836,6 +908,19 @@ function groupAccessCountUpdates(items: SearchResult[]): Array<{ collection: str
836
908
  return [...grouped.entries()].map(([collection, ids]) => ({ collection, ids }));
837
909
  }
838
910
 
911
+ function dedupeRecoveryCandidates(items: SearchResult[]): SearchResult[] {
912
+ const byKey = new Map<string, SearchResult>();
913
+ for (const item of items) {
914
+ const collection = typeof item.metadata.collection === "string" ? item.metadata.collection : "";
915
+ const key = `${collection}::${item.id}`;
916
+ const existing = byKey.get(key);
917
+ if (!existing || (item.finalScore ?? item.score) > (existing.finalScore ?? existing.score)) {
918
+ byKey.set(key, item);
919
+ }
920
+ }
921
+ return [...byKey.values()].sort((left, right) => (right.finalScore ?? right.score) - (left.finalScore ?? left.score));
922
+ }
923
+
839
924
  function clampFraction(value: number | undefined): number {
840
925
  if (typeof value !== "number" || !Number.isFinite(value)) {
841
926
  return 0;
@@ -1,87 +1,25 @@
1
+ import type { MemoryPromptSectionBuilder } from "openclaw/plugin-sdk/plugin-entry";
1
2
  import type { PluginConfig, RecallCache, SearchResult } from "./types.js";
2
3
  import type { RpcGetter } from "./plugin-runtime.js";
3
- import { scoreCandidates } from "./scoring.js";
4
- import { fitPromptBudget } from "./tokens.js";
5
- import { buildMemoryHeader } from "./recall-utils.js";
6
4
 
7
- const MEMORY_PROMPT_BUDGET = 800;
5
+ const MEMORY_PROMPT_HEADER = [
6
+ "## Memory",
7
+ "LibraVDB persistent memory is configured. Recalled memories may appear",
8
+ "in context via the context-engine assembler when available and relevant.",
9
+ "",
10
+ ] as const;
8
11
 
9
12
  export function buildMemoryPromptSection(
10
- getRpc: RpcGetter,
11
- cfg: PluginConfig,
12
- recallCache: RecallCache<SearchResult>,
13
- ): (params: {
14
- availableTools: Set<string>;
15
- citationsMode?: string;
16
- messages?: Array<{ role: string; content: string }>;
17
- userId?: string;
18
- }) => Promise<string[]> {
19
- return async function memoryPromptSection(params: {
20
- availableTools: Set<string>;
21
- citationsMode?: string;
22
- messages?: Array<{ role: string; content: string }>;
23
- userId?: string;
24
- }): Promise<string[]> {
25
- const queryText = params.messages?.at(-1)?.content ?? "";
26
- const userId = params.userId ?? "default";
27
-
28
- if (!queryText) {
29
- return [
30
- "## Memory",
31
- "LibraVDB persistent memory is active. Recalled memories will appear",
32
- "in context via the context-engine assembler when relevant.",
33
- "",
34
- ];
35
- }
36
-
37
- const rpc = await getRpc();
38
-
39
- const [userHitsResult, globalHitsResult] = await Promise.all([
40
- rpc.call<{ results: SearchResult[] }>("search_text", {
41
- collection: `user:${userId}`,
42
- text: queryText,
43
- k: Math.ceil((cfg.topK ?? 8) / 2),
44
- }),
45
- rpc.call<{ results: SearchResult[] }>("search_text", {
46
- collection: "global",
47
- text: queryText,
48
- k: Math.ceil((cfg.topK ?? 8) / 4),
49
- }),
50
- ]);
51
-
52
- const userHits = userHitsResult.results;
53
- const globalHits = globalHitsResult.results;
54
-
55
- recallCache.put({
56
- userId,
57
- queryText,
58
- durableVariantHits: [],
59
- userHits,
60
- globalHits,
61
- });
62
-
63
- const ranked = scoreCandidates([...userHits, ...globalHits], {
64
- alpha: cfg.alpha,
65
- beta: cfg.beta,
66
- gamma: cfg.gamma,
67
- sessionId: "",
68
- userId,
69
- });
70
-
71
- const selected = fitPromptBudget(ranked, MEMORY_PROMPT_BUDGET);
72
- const recallHeader = buildMemoryHeader(selected);
73
-
74
- const lines: string[] = [
75
- "## Memory",
76
- "LibraVDB persistent memory is active. Recalled memories will appear",
77
- "in context via the context-engine assembler when relevant.",
78
- ];
79
-
80
- if (recallHeader) {
81
- lines.push(...recallHeader.split("\n"));
82
- }
83
-
84
- lines.push("");
85
- return lines;
13
+ _getRpc: RpcGetter,
14
+ _cfg: PluginConfig,
15
+ _recallCache: RecallCache<SearchResult>,
16
+ ): MemoryPromptSectionBuilder {
17
+ return function memoryPromptSection({
18
+ availableTools: _availableTools,
19
+ citationsMode: _citationsMode,
20
+ }): string[] {
21
+ // OpenClaw builds the memory prompt section synchronously for embedded runs.
22
+ // Actual retrieval and ranking happen in the context engine during assemble().
23
+ return [...MEMORY_PROMPT_HEADER];
86
24
  };
87
- }
25
+ }
@@ -1,4 +1,9 @@
1
1
  declare module "openclaw/plugin-sdk/plugin-entry" {
2
+ export type MemoryPromptSectionBuilder = (params: {
3
+ availableTools: Set<string>;
4
+ citationsMode?: string;
5
+ }) => string[];
6
+
2
7
  interface OpenClawCliCommand {
3
8
  commands?: OpenClawCliCommand[];
4
9
  command(name: string): OpenClawCliCommand;
@@ -18,7 +23,7 @@ declare module "openclaw/plugin-sdk/plugin-entry" {
18
23
  warn?(message: string): void;
19
24
  };
20
25
  registerContextEngine(id: string, factory: () => unknown): void;
21
- registerMemoryPromptSection(builder: unknown): void;
26
+ registerMemoryPromptSection(builder: MemoryPromptSectionBuilder): void;
22
27
  registerMemoryFlushPlan?(resolver: unknown): void;
23
28
  registerMemoryRuntime?(runtime: unknown): void;
24
29
  registerMemoryEmbeddingProvider?(provider: unknown): void;
package/src/scoring.ts CHANGED
@@ -32,6 +32,22 @@ interface HopOptions {
32
32
  thetaHop?: number;
33
33
  }
34
34
 
35
+ interface RawUserRecoveryOptions {
36
+ queryText: string;
37
+ nowMs?: number;
38
+ recencyLambda?: number;
39
+ }
40
+
41
+ export interface RawUserRecoveryDebugCandidate {
42
+ id: string;
43
+ text: string;
44
+ semanticScore: number;
45
+ lexicalCoverage: number;
46
+ recencyScore: number;
47
+ finalScore: number;
48
+ rationale: string;
49
+ }
50
+
35
51
  interface ExpansionOptions {
36
52
  confidenceThreshold?: number;
37
53
  maxDepth?: number;
@@ -296,6 +312,61 @@ export function expandSection7HopCandidates(
296
312
  .sort((left, right) => (right.finalScore ?? 0) - (left.finalScore ?? 0));
297
313
  }
298
314
 
315
+ export function rankRawUserRecoveryCandidates(
316
+ items: SearchResult[],
317
+ opts: RawUserRecoveryOptions,
318
+ ): { ranked: SearchResult[]; debug: RawUserRecoveryDebugCandidate[] } {
319
+ const now = opts.nowMs ?? Date.now();
320
+ const recencyLambda = Math.max(0, opts.recencyLambda ?? 0.00001);
321
+ const keywords = extractKeywords(opts.queryText);
322
+
323
+ const ranked = items
324
+ .map((item) => {
325
+ const semanticScore = clamp01(typeof item.score === "number" ? item.score : 0);
326
+ const lexicalCoverage = normalizedKeywordCoverage(keywords, item.text);
327
+ const recencyScore = computeRecencyScore(item, now, recencyLambda);
328
+ const finalScore = clamp01((0.30 * semanticScore) + (0.60 * lexicalCoverage) + (0.10 * recencyScore));
329
+ const rationale = buildRawUserRecoveryRationale({
330
+ semanticScore,
331
+ lexicalCoverage,
332
+ recencyScore,
333
+ });
334
+
335
+ return {
336
+ ranked: {
337
+ ...item,
338
+ finalScore,
339
+ },
340
+ debug: {
341
+ id: item.id,
342
+ text: item.text,
343
+ semanticScore,
344
+ lexicalCoverage,
345
+ recencyScore,
346
+ finalScore,
347
+ rationale,
348
+ },
349
+ };
350
+ })
351
+ .sort((left, right) => {
352
+ if (right.ranked.finalScore !== left.ranked.finalScore) {
353
+ return (right.ranked.finalScore ?? 0) - (left.ranked.finalScore ?? 0);
354
+ }
355
+ if (right.debug.lexicalCoverage !== left.debug.lexicalCoverage) {
356
+ return right.debug.lexicalCoverage - left.debug.lexicalCoverage;
357
+ }
358
+ if (right.debug.semanticScore !== left.debug.semanticScore) {
359
+ return right.debug.semanticScore - left.debug.semanticScore;
360
+ }
361
+ return left.ranked.id.localeCompare(right.ranked.id);
362
+ });
363
+
364
+ return {
365
+ ranked: ranked.map((entry) => entry.ranked),
366
+ debug: ranked.map((entry) => entry.debug),
367
+ };
368
+ }
369
+
299
370
  function clamp01(value: number): number {
300
371
  return Math.min(1, Math.max(0, value));
301
372
  }
@@ -392,6 +463,30 @@ function normalizedFrequency(accessCount: number, maxAccessCount: number): numbe
392
463
  return Math.log(1 + accessCount) / Math.log(1 + maxAccessCount + 1);
393
464
  }
394
465
 
466
+ function computeRecencyScore(item: SearchResult, now: number, recencyLambda: number): number {
467
+ const ts = typeof item.metadata.ts === "number" ? item.metadata.ts : now;
468
+ const ageSeconds = Math.max(0, now - ts) / 1000;
469
+ return Math.exp(-recencyLambda * ageSeconds);
470
+ }
471
+
472
+ function buildRawUserRecoveryRationale(scores: {
473
+ semanticScore: number;
474
+ lexicalCoverage: number;
475
+ recencyScore: number;
476
+ }): string {
477
+ const lexicalDelta = scores.lexicalCoverage - scores.semanticScore;
478
+ if (lexicalDelta > 0.15) {
479
+ return "lexical coverage lifted this candidate above its semantic score";
480
+ }
481
+ if (lexicalDelta < -0.15) {
482
+ return "semantic similarity carried this candidate despite weaker lexical coverage";
483
+ }
484
+ if (scores.recencyScore > 0.9) {
485
+ return "semantic and lexical scores were close; recency broke the tie";
486
+ }
487
+ return "semantic and lexical scores were balanced";
488
+ }
489
+
395
490
  function extractKeywords(text: string): string[] {
396
491
  const tokens = normalizeTerms(text);
397
492
  const seen = new Set<string>();
package/src/types.ts CHANGED
@@ -196,6 +196,20 @@ export interface ContextAssembleResult {
196
196
  estimatedTokens: number;
197
197
  systemPromptAddition: string;
198
198
  _profile?: string[];
199
+ _debug?: {
200
+ recoveryTriggerFired?: boolean;
201
+ crossSessionRawRecovery?: boolean;
202
+ rawUserRecoveryCandidates?: Array<{
203
+ id: string;
204
+ text: string;
205
+ selected: boolean;
206
+ semanticScore: number;
207
+ lexicalCoverage: number;
208
+ recencyScore: number;
209
+ finalScore: number;
210
+ rationale: string;
211
+ }>;
212
+ };
199
213
  }
200
214
 
201
215
  export interface ContextCompactArgs {