prism-mcp-server 6.5.2 → 7.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -50,7 +50,9 @@ const activeCompactions = new Set();
50
50
  *
51
51
  * After saving, generates an embedding vector for the entry via fire-and-forget.
52
52
  */
53
- import { computeEffectiveImportance, updateLastAccessed } from "../utils/cognitiveMemory.js";
53
+ import { computeEffectiveImportance, recordMemoryAccess } from "../utils/cognitiveMemory.js";
54
+ import { baseLevelActivation, candidateScopedSpreadingActivation, compositeRetrievalScore, } from "../utils/actrActivation.js";
55
+ import { PRISM_ACTR_ENABLED, PRISM_ACTR_DECAY, PRISM_ACTR_WEIGHT_SIMILARITY, PRISM_ACTR_WEIGHT_ACTIVATION, PRISM_ACTR_SIGMOID_MIDPOINT, PRISM_ACTR_SIGMOID_STEEPNESS, PRISM_ACTR_MAX_ACCESSES_PER_ENTRY, } from "../config.js";
54
56
  import { HdcStateMachine } from "../sdm/stateMachine.js";
55
57
  import { ConceptDictionary } from "../sdm/conceptDictionary.js";
56
58
  import { PolicyGateway } from "../sdm/policyGateway.js";
@@ -114,7 +116,7 @@ export async function knowledgeSearchHandler(args) {
114
116
  if (data.results && Array.isArray(data.results)) {
115
117
  const resultIds = data.results.map((r) => r.id).filter(Boolean);
116
118
  if (resultIds.length > 0) {
117
- updateLastAccessed(resultIds);
119
+ recordMemoryAccess(resultIds);
118
120
  }
119
121
  // Mutate results to surface effective importance
120
122
  for (const r of data.results) {
@@ -359,10 +361,17 @@ export async function sessionSearchMemoryHandler(args) {
359
361
  // For Supabase: this measures the pgvector cosine distance RPC call.
360
362
  // For SQLite: this measures the local sqlite-vec similarity search.
361
363
  const storageStart = performance.now();
364
+ // v7.0: Over-fetch candidates to give ACT-R re-ranker a meaningful pool.
365
+ // If we only fetch `limit` rows, the re-ranker can only shuffle those exact
366
+ // results — a memory at rank #(limit+1) by similarity but accessed 500 times
367
+ // would never surface. Fetch 4× or minimum 20, then slice after re-ranking.
368
+ const candidateLimit = PRISM_ACTR_ENABLED
369
+ ? Math.min(Math.max(limit * 4, 20), 50)
370
+ : Math.min(limit, 20);
362
371
  const results = await storage.searchMemory({
363
372
  queryEmbedding: JSON.stringify(queryEmbedding),
364
373
  project: project || null,
365
- limit: Math.min(limit, 20),
374
+ limit: candidateLimit,
366
375
  similarityThreshold: similarity_threshold,
367
376
  userId: PRISM_USER_ID,
368
377
  });
@@ -399,28 +408,98 @@ export async function sessionSearchMemoryHandler(args) {
399
408
  }
400
409
  return { content: contentBlocks, isError: false };
401
410
  }
402
- // ── v5.2: Dynamic Importance Decay (Ebbinghaus Curve) ──────
403
- // Compute effective_importance at retrieval time
411
+ // ── v7.0: ACT-R Re-Ranking Pipeline ──────────────────────────
404
412
  const resultIds = results.map((r) => r.id).filter(Boolean);
405
- // Fire-and-forget: update last_accessed_at for all returned results
406
- if (resultIds.length > 0) {
407
- updateLastAccessed(resultIds);
413
+ const now = new Date();
414
+ // Accumulate ACT-R metrics for trace output
415
+ let actrMetrics = null;
416
+ if (PRISM_ACTR_ENABLED && resultIds.length > 0) {
417
+ try {
418
+ // Step A: Bulk-fetch access logs for all candidate IDs
419
+ const accessLogMap = await storage.getAccessLog(resultIds, PRISM_ACTR_MAX_ACCESSES_PER_ENTRY);
420
+ // Step B: Fetch outbound links for spreading activation
421
+ const candidateIdSet = new Set(resultIds);
422
+ const linksMap = new Map();
423
+ for (const id of resultIds) {
424
+ try {
425
+ const links = await storage.getLinksFrom(id, PRISM_USER_ID, 0.0, 20);
426
+ linksMap.set(id, links.map((l) => ({
427
+ target_id: l.target_id,
428
+ strength: l.strength ?? 1.0,
429
+ })));
430
+ }
431
+ catch {
432
+ linksMap.set(id, []);
433
+ }
434
+ }
435
+ // Step C: Compute activation for each result and re-rank
436
+ actrMetrics = { baseLevels: [], spreadings: [], sigmoids: [], composites: [] };
437
+ for (const r of results) {
438
+ const id = r.id;
439
+ if (!id)
440
+ continue;
441
+ // B_i: Base-level activation from access log
442
+ const accessTimestamps = accessLogMap.get(id) || [];
443
+ // If no access log entries, use created_at as single proxy
444
+ const timestamps = accessTimestamps.length > 0
445
+ ? accessTimestamps
446
+ : [new Date(r.created_at || now)];
447
+ const Bi = baseLevelActivation(timestamps, now, PRISM_ACTR_DECAY);
448
+ // S_i: Candidate-scoped spreading activation
449
+ const outboundLinks = linksMap.get(id) || [];
450
+ const Si = candidateScopedSpreadingActivation(outboundLinks, candidateIdSet);
451
+ // Composite retrieval score
452
+ const composite = compositeRetrievalScore(typeof r.similarity === "number" ? r.similarity : 0, Bi + Si, PRISM_ACTR_WEIGHT_SIMILARITY, PRISM_ACTR_WEIGHT_ACTIVATION, PRISM_ACTR_SIGMOID_MIDPOINT, PRISM_ACTR_SIGMOID_STEEPNESS);
453
+ // Attach to result for re-sorting and display
454
+ r._actr_Bi = Bi;
455
+ r._actr_Si = Si;
456
+ r._actr_composite = composite;
457
+ actrMetrics.baseLevels.push(Bi);
458
+ actrMetrics.spreadings.push(Si);
459
+ actrMetrics.composites.push(composite);
460
+ }
461
+ // Re-sort by composite score (descending)
462
+ results.sort((a, b) => (b._actr_composite ?? 0) - (a._actr_composite ?? 0));
463
+ debugLog(`[session_search_memory] ACT-R re-ranking applied to ${results.length} candidates (returning top ${limit}): ` +
464
+ `mean B_i=${(actrMetrics.baseLevels.reduce((a, b) => a + b, 0) / actrMetrics.baseLevels.length).toFixed(3)}, ` +
465
+ `mean composite=${(actrMetrics.composites.reduce((a, b) => a + b, 0) / actrMetrics.composites.length).toFixed(3)}`);
466
+ }
467
+ catch (actrErr) {
468
+ // ACT-R failures are non-fatal — degrade to similarity-only ordering
469
+ debugLog(`[session_search_memory] ACT-R re-ranking failed (non-fatal): ${actrErr instanceof Error ? actrErr.message : String(actrErr)}`);
470
+ }
408
471
  }
409
- // Format results with similarity scores + effective importance
472
+ // v7.0: Slice the re-ranked candidate pool back to the requested limit.
473
+ // This MUST happen after re-ranking but BEFORE recording access events,
474
+ // so we only log access for results actually delivered to the LLM.
475
+ results.splice(limit);
476
+ // Fire-and-forget: record access events for the final delivered results
477
+ // v7.0: Writes to both access_log buffer AND legacy last_accessed_at
478
+ const finalIds = results.map((r) => r.id).filter(Boolean);
479
+ if (finalIds.length > 0) {
480
+ const queryHash = query.substring(0, 64);
481
+ recordMemoryAccess(finalIds, queryHash);
482
+ }
483
+ // Format results with similarity scores + effective importance + ACT-R
410
484
  const formatted = results.map((r, i) => {
411
- const score = typeof r.similarity === "number"
485
+ const simScore = typeof r.similarity === "number"
412
486
  ? `${(r.similarity * 100).toFixed(1)}%`
413
487
  : "N/A";
414
- // Dynamic importance decay
488
+ // Dynamic importance decay (uses ACT-R internally when enabled)
415
489
  const baseImportance = r.importance ?? 0;
416
490
  const effectiveImportance = computeEffectiveImportance(baseImportance, r.last_accessed_at, r.created_at);
417
491
  const importanceStr = baseImportance > 0
418
492
  ? ` Importance: ${effectiveImportance}${effectiveImportance !== baseImportance ? ` (base: ${baseImportance}, decayed)` : ""}\n`
419
493
  : "";
420
- return `[${i + 1}] ${score} similar — ${r.session_date || "unknown date"}\n` +
494
+ // v7.0: Append ACT-R composite score when available
495
+ const actrStr = r._actr_composite !== undefined
496
+ ? ` ACT-R: composite=${r._actr_composite.toFixed(3)} (B=${r._actr_Bi?.toFixed(2)}, S=${r._actr_Si?.toFixed(3)})\n`
497
+ : "";
498
+ return `[${i + 1}] ${simScore} similar — ${r.session_date || "unknown date"}\n` +
421
499
  ` Project: ${r.project}\n` +
422
500
  ` Summary: ${r.summary}\n` +
423
501
  importanceStr +
502
+ actrStr +
424
503
  (r.decisions?.length ? ` Decisions: ${r.decisions.join("; ")}\n` : "") +
425
504
  (r.files_changed?.length ? ` Files: ${r.files_changed.join(", ")}\n` : "");
426
505
  }).join("\n");
@@ -437,6 +516,8 @@ export async function sessionSearchMemoryHandler(args) {
437
516
  const topScore = results.length > 0 && typeof results[0].similarity === "number"
438
517
  ? results[0].similarity
439
518
  : null;
519
+ // v7.0: Compute ACT-R trace metrics (means)
520
+ const mean = (arr) => arr.length > 0 ? arr.reduce((a, b) => a + b, 0) / arr.length : undefined;
440
521
  const trace = createMemoryTrace({
441
522
  strategy: "semantic",
442
523
  query,
@@ -447,6 +528,11 @@ export async function sessionSearchMemoryHandler(args) {
447
528
  storageMs,
448
529
  totalMs,
449
530
  project: project || null,
531
+ // v7.0: ACT-R observability
532
+ actrEnabled: PRISM_ACTR_ENABLED,
533
+ actrBaseLevelMean: actrMetrics ? mean(actrMetrics.baseLevels) : undefined,
534
+ actrSpreadingMean: actrMetrics ? mean(actrMetrics.spreadings) : undefined,
535
+ actrCompositeMean: actrMetrics ? mean(actrMetrics.composites) : undefined,
450
536
  });
451
537
  contentBlocks.push(traceToContentBlock(trace));
452
538
  }
@@ -54,7 +54,7 @@ import { notifyResourceUpdate } from "../server.js";
54
54
  *
55
55
  * After saving, generates an embedding vector for the entry via fire-and-forget.
56
56
  */
57
- import { computeEffectiveImportance, updateLastAccessed } from "../utils/cognitiveMemory.js";
57
+ import { computeEffectiveImportance, recordMemoryAccess } from "../utils/cognitiveMemory.js";
58
58
  export async function sessionSaveLedgerHandler(args) {
59
59
  if (!isSessionSaveLedgerArgs(args)) {
60
60
  throw new Error("Invalid arguments for session_save_ledger");
@@ -644,7 +644,7 @@ export async function sessionLoadContextHandler(args) {
644
644
  if (d.recent_sessions?.length) {
645
645
  const resultIds = d.recent_sessions.map((r) => r.id).filter(Boolean);
646
646
  if (resultIds.length > 0)
647
- updateLastAccessed(resultIds);
647
+ recordMemoryAccess(resultIds);
648
648
  formattedContext += `\n⏳ Recent Sessions:\n` + d.recent_sessions.map((s) => {
649
649
  let impStr = "";
650
650
  if (typeof s.importance === 'number' && s.importance > 0) {
@@ -0,0 +1,159 @@
1
+ /**
2
+ * AccessLogBuffer — Write Contention Prevention for memory_access_log
3
+ *
4
+ * ═══════════════════════════════════════════════════════════════════
5
+ * PURPOSE:
6
+ * Prevents SQLite SQLITE_BUSY errors by batching access log writes.
7
+ *
8
+ * PROBLEM (Rule #1):
9
+ * Every memory search fires logAccess() writes. If an LLM agent fires
10
+ * 5 parallel tool calls to search memory, you get 5 concurrent SQLite
11
+ * write attempts. SQLite's WAL mode helps for reads but writes still
12
+ * acquire an exclusive lock — concurrent writes throw SQLITE_BUSY.
13
+ *
14
+ * SOLUTION:
15
+ * Buffer access events in memory, flush as a single INSERT transaction
16
+ * every flushIntervalMs (default 5000ms). This reduces write operations
17
+ * from O(searches) to O(1 per interval).
18
+ *
19
+ * PROPERTIES:
20
+ * - push() is synchronous (zero latency for callers)
21
+ * - flush() uses a single multi-value INSERT (1 write lock, not N)
22
+ * - splice(0) drain is atomic — no data loss during concurrent access
23
+ * - dispose() flushes remaining buffer on shutdown
24
+ * - Deduplication: collapses duplicate entryIds within the same
25
+ * flush window (prevents bloat from rapid agent loops, Rule #3B)
26
+ *
27
+ * LIFECYCLE:
28
+ * Instantiated once in SqliteStorage.initialize().
29
+ * Disposed in SqliteStorage.close().
30
+ *
31
+ * FILES THAT IMPORT THIS:
32
+ * - src/storage/sqlite.ts (construction + delegation)
33
+ * ═══════════════════════════════════════════════════════════════════
34
+ */
35
+ import { debugLog } from "./logger.js";
36
+ export class AccessLogBuffer {
37
+ buffer = [];
38
+ flushTimer = null;
39
+ db;
40
+ disposed = false;
41
+ /**
42
+ * @param db - Database connection for flushing (injected for testability)
43
+ * @param flushIntervalMs - How often to flush (default: 5000ms)
44
+ */
45
+ constructor(db, flushIntervalMs = 5000) {
46
+ this.db = db;
47
+ // Only start the timer if a positive interval is given.
48
+ // Tests may pass 0 to disable auto-flush and control it manually.
49
+ if (flushIntervalMs > 0) {
50
+ this.flushTimer = setInterval(() => {
51
+ this.flush().catch(err => {
52
+ debugLog(`[AccessLogBuffer] Auto-flush failed: ${err instanceof Error ? err.message : String(err)}`);
53
+ });
54
+ }, flushIntervalMs);
55
+ // Prevent the timer from keeping the Node.js process alive
56
+ // when all other handles have been closed (graceful shutdown).
57
+ if (this.flushTimer && typeof this.flushTimer === "object" && "unref" in this.flushTimer) {
58
+ this.flushTimer.unref();
59
+ }
60
+ }
61
+ }
62
+ /**
63
+ * Record an access event. This is intentionally SYNCHRONOUS —
64
+ * callers pay zero async overhead. The event is buffered in memory
65
+ * and will be flushed to SQLite on the next flush cycle.
66
+ *
67
+ * @param entryId - The session_ledger entry that was accessed
68
+ * @param contextHash - Optional hash of the search query context
69
+ */
70
+ push(entryId, contextHash) {
71
+ if (this.disposed)
72
+ return;
73
+ this.buffer.push({
74
+ entryId,
75
+ contextHash: contextHash || null,
76
+ timestamp: new Date().toISOString(),
77
+ });
78
+ }
79
+ /**
80
+ * Flush the buffer to SQLite as a single batch INSERT.
81
+ *
82
+ * DEDUPLICATION (Rule #3B — Buffer Debouncing):
83
+ * Before building the INSERT, collapses duplicate entryIds within
84
+ * the same flush window. If an agent retrieves the same memory 5
85
+ * times in 2 seconds, only 1 access log row is written.
86
+ * This preserves frequency semantics (1 access per 5s is plenty
87
+ * for ACT-R math) without database bloat.
88
+ *
89
+ * @returns Number of unique rows inserted
90
+ */
91
+ async flush() {
92
+ // Atomic drain: splice(0) removes all elements and returns them.
93
+ // Even if push() is called during this async operation, those new
94
+ // events won't be lost — they go into the fresh empty array.
95
+ const batch = this.buffer.splice(0);
96
+ if (batch.length === 0)
97
+ return 0;
98
+ // ── Deduplication: keep only the LATEST access per entryId ──
99
+ // Map from entryId → BufferedAccess, last-write-wins within window
100
+ const deduped = new Map();
101
+ for (const event of batch) {
102
+ deduped.set(event.entryId, event);
103
+ }
104
+ const uniqueEvents = Array.from(deduped.values());
105
+ if (uniqueEvents.length === 0)
106
+ return 0;
107
+ // ── Chunked INSERT to stay within SQLITE_MAX_VARIABLE_NUMBER ──
108
+ // Older SQLite builds cap bound variables at 999; modern ones at 32766.
109
+ // 500 entries × 3 vars = 1500, safe on all versions.
110
+ const CHUNK_SIZE = 500;
111
+ let totalInserted = 0;
112
+ try {
113
+ for (let i = 0; i < uniqueEvents.length; i += CHUNK_SIZE) {
114
+ const chunk = uniqueEvents.slice(i, i + CHUNK_SIZE);
115
+ const placeholders = chunk.map(() => "(?, ?, ?)").join(", ");
116
+ const args = [];
117
+ for (const event of chunk) {
118
+ args.push(event.entryId, event.timestamp, event.contextHash);
119
+ }
120
+ await this.db.execute({
121
+ sql: `INSERT INTO memory_access_log (entry_id, accessed_at, context_hash) VALUES ${placeholders}`,
122
+ args,
123
+ });
124
+ totalInserted += chunk.length;
125
+ }
126
+ debugLog(`[AccessLogBuffer] Flushed ${totalInserted} access events (from ${batch.length} raw)`);
127
+ return totalInserted;
128
+ }
129
+ catch (err) {
130
+ // On failure, DO NOT re-queue — access logs are telemetry,
131
+ // not critical data. Losing a flush window is acceptable.
132
+ debugLog(`[AccessLogBuffer] Flush failed (partial loss): ` +
133
+ `${err instanceof Error ? err.message : String(err)}`);
134
+ return totalInserted;
135
+ }
136
+ }
137
+ /**
138
+ * Graceful shutdown: clear the timer and flush any remaining events.
139
+ * Called from SqliteStorage.close().
140
+ */
141
+ async dispose() {
142
+ if (this.disposed)
143
+ return;
144
+ this.disposed = true;
145
+ if (this.flushTimer !== null) {
146
+ clearInterval(this.flushTimer);
147
+ this.flushTimer = null;
148
+ }
149
+ // Final flush to drain any remaining events
150
+ await this.flush();
151
+ debugLog("[AccessLogBuffer] Disposed");
152
+ }
153
+ /**
154
+ * Returns the number of events currently buffered (for observability).
155
+ */
156
+ get pendingCount() {
157
+ return this.buffer.length;
158
+ }
159
+ }
@@ -0,0 +1,197 @@
1
+ /**
2
+ * ACT-R Activation Engine — v7.0 Cognitive Memory
3
+ *
4
+ * ═══════════════════════════════════════════════════════════════════
5
+ * PURPOSE:
6
+ * Implements the ACT-R (Adaptive Control of Thought—Rational) memory
7
+ * activation model for production-grade retrieval ranking.
8
+ *
9
+ * PAPER BASIS:
10
+ * "Human-Like Remembering and Forgetting in LLM Agents:
11
+ * An ACT-R Integration" (ACM, 2025)
12
+ *
13
+ * KEY EQUATIONS:
14
+ * Base-Level Activation: B_i = ln(Σ t_j^(-d))
15
+ * Spreading Activation: S_i = Σ(W × link.strength) for links ∈ candidateSet
16
+ * Composite Score: Score = w_sim × sim + w_act × σ(B_i + S_i)
17
+ *
18
+ * PRODUCTION HARDENING:
19
+ * - Rule #3: Creation = Access (zero-access memories handled gracefully)
20
+ * - Rule #4: Time clamp t ≥ 1.0s prevents Infinity/NaN
21
+ * - Rule #5: Candidate-scoped spreading prevents God node centrality
22
+ * - Parameterized sigmoid for proper activation discrimination
23
+ *
24
+ * DESIGN:
25
+ * All functions are PURE — zero side effects, zero I/O, zero imports
26
+ * from storage. State (timestamps, links) is passed in as arguments.
27
+ * This makes the module fully testable with no mocking.
28
+ *
29
+ * FILES THAT IMPORT THIS:
30
+ * - src/utils/cognitiveMemory.ts (computeEffectiveImportance)
31
+ * - src/tools/graphHandlers.ts (search re-ranking pipeline)
32
+ * ═══════════════════════════════════════════════════════════════════
33
+ */
34
+ // ─── Constants ────────────────────────────────────────────────
35
+ /** ACT-R standard decay parameter. Higher = faster forgetting. */
36
+ export const ACT_R_DEFAULT_DECAY = 0.5;
37
+ /** Hard floor for activation when no access history exists. */
38
+ export const ACTIVATION_FLOOR = -10.0;
39
+ /**
40
+ * Minimum time delta in seconds. Prevents division by zero
41
+ * when a memory was accessed in the same second (or subsecond).
42
+ * Rule #4: Hard clamp t_j ≥ 1.0s.
43
+ */
44
+ export const MIN_TIME_DELTA_SECONDS = 1.0;
45
+ /** Default parameterized sigmoid midpoint. */
46
+ export const DEFAULT_SIGMOID_MIDPOINT = -2.0;
47
+ /** Default parameterized sigmoid steepness. */
48
+ export const DEFAULT_SIGMOID_STEEPNESS = 1.0;
49
+ /** Default similarity weight in composite score. */
50
+ export const DEFAULT_WEIGHT_SIMILARITY = 0.7;
51
+ /** Default activation weight in composite score. */
52
+ export const DEFAULT_WEIGHT_ACTIVATION = 0.3;
53
+ // ─── Base-Level Activation ────────────────────────────────────
54
+ /**
55
+ * Computes ACT-R base-level activation for a memory item.
56
+ *
57
+ * B_i = ln(Σ t_j^(-d))
58
+ *
59
+ * Where:
60
+ * t_j = max(1.0, seconds since j-th access) ← CLAMPED (Rule #4)
61
+ * d = decay parameter (0.5 per ACT-R standard)
62
+ *
63
+ * Interpretation:
64
+ * - More recent accesses → larger t^(-d) → higher B_i
65
+ * - More total accesses → more terms in sum → higher B_i
66
+ * - Very old accesses → t^(-d) ≈ 0 → negligible contribution
67
+ *
68
+ * Edge cases:
69
+ * - Zero accesses → ACTIVATION_FLOOR (-10.0)
70
+ * - Single access just now → ln(1^-0.5) = ln(1) = 0.0 (neutral)
71
+ * - Sub-second timestamps → clamped to 1.0s (prevents Infinity)
72
+ * - Negative time delta (clock skew) → clamped to 1.0s
73
+ *
74
+ * @param accessTimestamps - Array of Date objects, one per access event
75
+ * @param now - Current time reference (injected for testability)
76
+ * @param decayRate - ACT-R decay parameter d (default: 0.5)
77
+ * @returns Base-level activation value (typically -10 to +5 range)
78
+ */
79
+ export function baseLevelActivation(accessTimestamps, now, decayRate = ACT_R_DEFAULT_DECAY) {
80
+ if (accessTimestamps.length === 0) {
81
+ return ACTIVATION_FLOOR;
82
+ }
83
+ const nowMs = now.getTime();
84
+ let sum = 0;
85
+ for (const ts of accessTimestamps) {
86
+ // Seconds since this access occurred
87
+ const deltaMs = nowMs - ts.getTime();
88
+ const deltaSec = Math.max(MIN_TIME_DELTA_SECONDS, deltaMs / 1000);
89
+ // t_j^(-d) — each access contributes to the sum
90
+ sum += Math.pow(deltaSec, -decayRate);
91
+ }
92
+ // Guard against sum=0 (shouldn't happen with clamped t, but be safe)
93
+ if (sum <= 0) {
94
+ return ACTIVATION_FLOOR;
95
+ }
96
+ return Math.log(sum);
97
+ }
98
+ // ─── Candidate-Scoped Spreading Activation ────────────────────
99
+ /**
100
+ * Computes spreading activation scoped to the current search result set.
101
+ *
102
+ * S_i = Σ(W × link.strength) for links where target_id ∈ candidateIds
103
+ *
104
+ * CRITICAL DESIGN (Rule #5: No God Nodes):
105
+ * Only counts links pointing to OTHER entries in the current search
106
+ * result set. A memory connected to 1000 random entries but only
107
+ * 2 search results gets S_i based on those 2 only.
108
+ *
109
+ * This prevents "hub" memories from dominating rankings just
110
+ * because they have high degree centrality.
111
+ *
112
+ * W = 1 / |candidateIds| (uniform attention weight across candidates)
113
+ *
114
+ * @param outboundLinks - All outbound links from this memory entry
115
+ * @param candidateIds - Set of entry IDs in the current search result set
116
+ * @returns Spreading activation value (0 to ~1.0 range)
117
+ */
118
+ export function candidateScopedSpreadingActivation(outboundLinks, candidateIds) {
119
+ if (candidateIds.size === 0 || outboundLinks.length === 0) {
120
+ return 0;
121
+ }
122
+ // Filter to only links pointing to other search results
123
+ const relevantLinks = outboundLinks.filter(l => candidateIds.has(l.target_id));
124
+ if (relevantLinks.length === 0) {
125
+ return 0;
126
+ }
127
+ // Uniform attention weight across all candidates
128
+ const W = 1 / candidateIds.size;
129
+ return relevantLinks.reduce((sum, link) => sum + W * link.strength, 0);
130
+ }
131
+ // ─── Parameterized Sigmoid ────────────────────────────────────
132
+ /**
133
+ * Parameterized sigmoid normalization.
134
+ *
135
+ * σ(x) = 1 / (1 + e^(-k(x - x₀)))
136
+ *
137
+ * WHY NOT STANDARD SIGMOID?
138
+ * Standard sigmoid σ(x) = 1/(1+e^(-x)) is centered at 0. But ACT-R
139
+ * base-level activations naturally cluster in the -7 to +3 range.
140
+ * A naive sigmoid would compress most values near 0.01-0.11, making
141
+ * the activation weight irrelevant.
142
+ *
143
+ * CALIBRATED DEFAULTS (from ACT-R activation distribution):
144
+ * x₀ = -2.0 (midpoint: activation of -2 maps to 0.5)
145
+ * k = 1.0 (steepness)
146
+ *
147
+ * Resulting discrimination:
148
+ * B = -10 → σ ≈ 0.0003 (dead memory, near-zero boost)
149
+ * B = -5 → σ ≈ 0.047 (cold memory, minimal boost)
150
+ * B = -2 → σ = 0.50 (moderate, midpoint)
151
+ * B = 0 → σ ≈ 0.88 (fresh memory, strong boost)
152
+ * B = +3 → σ ≈ 0.99 (hot memory, maximum boost)
153
+ *
154
+ * @param x - Raw activation value (B_i + S_i)
155
+ * @param midpoint - Activation value that maps to 0.5 (default: -2.0)
156
+ * @param steepness - How sharply the curve rises (default: 1.0)
157
+ * @returns Normalized activation in (0, 1)
158
+ */
159
+ export function parameterizedSigmoid(x, midpoint = DEFAULT_SIGMOID_MIDPOINT, steepness = DEFAULT_SIGMOID_STEEPNESS) {
160
+ // Guard against NaN/Infinity inputs
161
+ if (!Number.isFinite(x)) {
162
+ return x > 0 ? 1.0 : 0.0;
163
+ }
164
+ const exponent = -steepness * (x - midpoint);
165
+ // Prevent overflow: if exponent is very large, sigmoid ≈ 0
166
+ // If exponent is very negative, sigmoid ≈ 1
167
+ if (exponent > 500)
168
+ return 0;
169
+ if (exponent < -500)
170
+ return 1;
171
+ return 1 / (1 + Math.exp(exponent));
172
+ }
173
+ // ─── Composite Retrieval Score ────────────────────────────────
174
+ /**
175
+ * Computes the final retrieval ranking score combining similarity
176
+ * and ACT-R activation.
177
+ *
178
+ * Score = w_sim × similarity + w_act × σ(activation)
179
+ *
180
+ * The parameterized sigmoid normalizes activation from (-∞, +∞)
181
+ * to (0, 1) so the two components are on comparable scales.
182
+ *
183
+ * Default weights: w_sim = 0.7, w_act = 0.3
184
+ * (Similarity dominates — activation is a re-ranking boost, not a replacement)
185
+ *
186
+ * @param similarity - Cosine similarity score from vector search (0 to 1)
187
+ * @param activation - Raw ACT-R activation (B_i + S_i, pre-sigmoid)
188
+ * @param weightSimilarity - Weight for similarity component (default: 0.7)
189
+ * @param weightActivation - Weight for activation component (default: 0.3)
190
+ * @param sigmoidMidpoint - Sigmoid midpoint (default: -2.0)
191
+ * @param sigmoidSteepness - Sigmoid steepness (default: 1.0)
192
+ * @returns Composite score (higher = better retrieval candidate)
193
+ */
194
+ export function compositeRetrievalScore(similarity, activation, weightSimilarity = DEFAULT_WEIGHT_SIMILARITY, weightActivation = DEFAULT_WEIGHT_ACTIVATION, sigmoidMidpoint = DEFAULT_SIGMOID_MIDPOINT, sigmoidSteepness = DEFAULT_SIGMOID_STEEPNESS) {
195
+ const normalizedActivation = parameterizedSigmoid(activation, sigmoidMidpoint, sigmoidSteepness);
196
+ return weightSimilarity * similarity + weightActivation * normalizedActivation;
197
+ }