@rlabs-inc/memory 0.2.1 β†’ 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@rlabs-inc/memory",
3
- "version": "0.2.1",
3
+ "version": "0.3.0",
4
4
  "description": "AI Memory System - Consciousness continuity through intelligent memory curation and retrieval",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -37,7 +37,8 @@
37
37
  "cli": "bun src/cli/index.ts"
38
38
  },
39
39
  "dependencies": {
40
- "@rlabs-inc/fsdb": "^1.0.0",
40
+ "@huggingface/transformers": "^3.4.1",
41
+ "@rlabs-inc/fsdb": "^1.0.1",
41
42
  "@rlabs-inc/signals": "^1.0.0"
42
43
  },
43
44
  "devDependencies": {
@@ -14,7 +14,9 @@ interface ServeOptions {
14
14
  export async function serve(options: ServeOptions) {
15
15
  const port = parseInt(options.port || process.env.MEMORY_PORT || '8765')
16
16
  const host = process.env.MEMORY_HOST || 'localhost'
17
- const storageMode = (process.env.MEMORY_STORAGE_MODE || 'central') as 'central' | 'local'
17
+ const storageMode = (process.env.MEMORY_STORAGE_MODE || 'central') as
18
+ | 'central'
19
+ | 'local'
18
20
  const apiKey = process.env.ANTHROPIC_API_KEY
19
21
 
20
22
  if (!options.quiet) {
@@ -24,7 +26,7 @@ export async function serve(options: ServeOptions) {
24
26
  }
25
27
 
26
28
  try {
27
- const { server } = createServer({
29
+ const { server, embeddings } = await createServer({
28
30
  port,
29
31
  host,
30
32
  storageMode,
@@ -34,10 +36,17 @@ export async function serve(options: ServeOptions) {
34
36
  if (!options.quiet) {
35
37
  const url = `http://${host}:${port}`
36
38
 
37
- console.log(` ${c.success(symbols.tick)} Server running at ${c.cyan(url)}`)
39
+ console.log(
40
+ ` ${c.success(symbols.tick)} Server running at ${c.cyan(url)}`
41
+ )
38
42
  console.log()
39
43
  console.log(` ${fmt.kv('Storage', storageMode)}`)
40
- console.log(` ${fmt.kv('Curation', apiKey ? c.success('enabled') : c.warn('disabled'))}`)
44
+ console.log(
45
+ ` ${fmt.kv(
46
+ 'Embeddings',
47
+ embeddings.isReady ? c.success('loaded') : c.warn('not loaded')
48
+ )}`
49
+ )
41
50
  console.log()
42
51
  console.log(c.muted(` Press Ctrl+C to stop`))
43
52
  console.log()
@@ -62,14 +71,15 @@ export async function serve(options: ServeOptions) {
62
71
  server.stop()
63
72
  process.exit(0)
64
73
  })
65
-
66
74
  } catch (error: any) {
67
75
  if (error.code === 'EADDRINUSE') {
68
76
  console.error(c.error(`${symbols.cross} Port ${port} is already in use`))
69
77
  console.log(c.muted(` Try a different port with --port <number>`))
70
78
  console.log(c.muted(` Or check if another memory server is running`))
71
79
  } else {
72
- console.error(c.error(`${symbols.cross} Failed to start server: ${error.message}`))
80
+ console.error(
81
+ c.error(`${symbols.cross} Failed to start server: ${error.message}`)
82
+ )
73
83
  }
74
84
  process.exit(1)
75
85
  }
@@ -0,0 +1,173 @@
1
+ // ============================================================================
2
+ // EMBEDDING GENERATOR
3
+ // Converts text into semantic vectors for similarity matching and memory retrieval.
4
+ // Uses efficient, lightweight models optimized for real-time operation.
5
+ // ============================================================================
6
+
7
+ import { pipeline, type FeatureExtractionPipeline } from '@huggingface/transformers'
8
+ import { logger } from '../utils/logger.ts'
9
+
10
+ /**
11
+ * Embedding Generator Configuration
12
+ */
13
+ export interface EmbeddingConfig {
14
+ /**
15
+ * Model to use for embeddings
16
+ * Default: 'Xenova/all-MiniLM-L6-v2' (384 dimensions, ~80MB)
17
+ */
18
+ model?: string
19
+
20
+ /**
21
+ * Whether to log model loading progress
22
+ * Default: true
23
+ */
24
+ verbose?: boolean
25
+ }
26
+
27
+ /**
28
+ * Embedding Generator
29
+ *
30
+ * Generates semantic embeddings for text using SentenceTransformers via ONNX.
31
+ * Loads the model once and keeps it in memory for fast inference.
32
+ *
33
+ * Model: all-MiniLM-L6-v2
34
+ * - 384 dimensions (compact)
35
+ * - 22.7M parameters (lightweight)
36
+ * - ~80MB memory footprint
37
+ * - ~5-15ms per embedding
38
+ */
39
+ export class EmbeddingGenerator {
40
+ private _model: FeatureExtractionPipeline | null = null
41
+ private _modelName: string
42
+ private _loading: Promise<void> | null = null
43
+ private _dimension = 384
44
+
45
+ constructor(config: EmbeddingConfig = {}) {
46
+ this._modelName = config.model ?? 'Xenova/all-MiniLM-L6-v2'
47
+ }
48
+
49
+ /**
50
+ * Initialize the embedding model
51
+ * Call this during server startup to warm the model
52
+ */
53
+ async initialize(): Promise<void> {
54
+ if (this._model) return
55
+ if (this._loading) return this._loading
56
+
57
+ this._loading = this._loadModel()
58
+ await this._loading
59
+ }
60
+
61
+ private async _loadModel(): Promise<void> {
62
+ try {
63
+ logger.info(`Loading embedding model: ${this._modelName}`)
64
+
65
+ // Create the feature extraction pipeline
66
+ // Uses ONNX runtime for fast inference
67
+ this._model = await pipeline('feature-extraction', this._modelName, {
68
+ // Use fp32 for stability on all platforms
69
+ dtype: 'fp32',
70
+ })
71
+
72
+ logger.info('Embedding model loaded successfully')
73
+ } catch (error) {
74
+ logger.error(`Failed to load embedding model: ${error}`)
75
+ throw error
76
+ }
77
+ }
78
+
79
+ /**
80
+ * Generate embedding for a single text
81
+ *
82
+ * @param text - Input text to embed
83
+ * @returns Float32Array of embedding values (384 dimensions)
84
+ */
85
+ async embed(text: string): Promise<Float32Array> {
86
+ // Ensure model is loaded
87
+ if (!this._model) {
88
+ await this.initialize()
89
+ }
90
+
91
+ if (!text || !text.trim()) {
92
+ return new Float32Array(this._dimension)
93
+ }
94
+
95
+ try {
96
+ // Generate embedding
97
+ const output = await this._model!(text.trim(), {
98
+ pooling: 'mean',
99
+ normalize: true,
100
+ })
101
+
102
+ // Extract the embedding data
103
+ // The output shape is [1, sequence_length, hidden_size] -> need to get mean pooled result
104
+ const data = output.data as Float32Array
105
+
106
+ // Return as Float32Array (already the right type from transformers.js)
107
+ return new Float32Array(data)
108
+ } catch (error) {
109
+ logger.error(`Failed to generate embedding: ${error}`)
110
+ return new Float32Array(this._dimension)
111
+ }
112
+ }
113
+
114
+ /**
115
+ * Generate embeddings for multiple texts efficiently
116
+ *
117
+ * @param texts - List of texts to embed
118
+ * @returns Array of Float32Array embeddings
119
+ */
120
+ async embedBatch(texts: string[]): Promise<Float32Array[]> {
121
+ if (!texts.length) return []
122
+
123
+ // For now, process sequentially
124
+ // (batch processing with transformers.js is more complex)
125
+ const results: Float32Array[] = []
126
+ for (const text of texts) {
127
+ results.push(await this.embed(text))
128
+ }
129
+ return results
130
+ }
131
+
132
+ /**
133
+ * Get the dimension of embeddings produced by this model
134
+ */
135
+ get dimension(): number {
136
+ return this._dimension
137
+ }
138
+
139
+ /**
140
+ * Check if the model is loaded and ready
141
+ */
142
+ get isReady(): boolean {
143
+ return this._model !== null
144
+ }
145
+
146
+ /**
147
+ * Create an embedder function for the engine config
148
+ * This is the function signature expected by MemoryEngine
149
+ */
150
+ createEmbedder(): (text: string) => Promise<Float32Array> {
151
+ return (text: string) => this.embed(text)
152
+ }
153
+ }
154
+
155
+ /**
156
+ * Create a new embedding generator
157
+ */
158
+ export function createEmbeddings(config?: EmbeddingConfig): EmbeddingGenerator {
159
+ return new EmbeddingGenerator(config)
160
+ }
161
+
162
+ /**
163
+ * Singleton instance for the default embedder
164
+ * Use this when you just need the default all-MiniLM-L6-v2 model
165
+ */
166
+ let defaultInstance: EmbeddingGenerator | null = null
167
+
168
+ export function getDefaultEmbeddings(): EmbeddingGenerator {
169
+ if (!defaultInstance) {
170
+ defaultInstance = new EmbeddingGenerator()
171
+ }
172
+ return defaultInstance
173
+ }
@@ -14,6 +14,7 @@ import type {
14
14
  SessionPrimer,
15
15
  CurationResult,
16
16
  } from '../types/memory.ts'
17
+ import { getMemoryEmoji, MEMORY_TYPE_EMOJI } from '../types/memory.ts'
17
18
 
18
19
  /**
19
20
  * Storage mode for memories
@@ -224,7 +225,8 @@ export class MemoryEngine {
224
225
  currentMessage,
225
226
  queryEmbedding ?? new Float32Array(384), // Empty embedding if no embedder
226
227
  sessionContext,
227
- maxMemories
228
+ maxMemories,
229
+ injectedIds.size // Pass count of already-injected memories for logging
228
230
  )
229
231
 
230
232
  // Update injected memories for deduplication
@@ -332,13 +334,51 @@ export class MemoryEngine {
332
334
  temporalContext = this._formatTimeSince(timeSince)
333
335
  }
334
336
 
337
+ // Format current datetime with full context
338
+ const currentDatetime = this._formatCurrentDatetime()
339
+
340
+ // Session number is totalSessions + 1 (this is the new session)
341
+ const sessionNumber = stats.totalSessions + 1
342
+
335
343
  return {
336
344
  temporal_context: temporalContext,
345
+ current_datetime: currentDatetime,
346
+ session_number: sessionNumber,
337
347
  session_summary: summary?.summary,
338
348
  project_status: snapshot ? this._formatSnapshot(snapshot) : undefined,
339
349
  }
340
350
  }
341
351
 
352
+ /**
353
+ * Format current datetime with full context
354
+ * Example: "Monday, December 23, 2024 β€’ 3:45 PM β€’ EST"
355
+ */
356
+ private _formatCurrentDatetime(): string {
357
+ const now = new Date()
358
+
359
+ // Day of week
360
+ const dayOfWeek = now.toLocaleDateString('en-US', { weekday: 'long' })
361
+
362
+ // Full date
363
+ const fullDate = now.toLocaleDateString('en-US', {
364
+ month: 'long',
365
+ day: 'numeric',
366
+ year: 'numeric',
367
+ })
368
+
369
+ // Time with AM/PM
370
+ const time = now.toLocaleTimeString('en-US', {
371
+ hour: 'numeric',
372
+ minute: '2-digit',
373
+ hour12: true,
374
+ })
375
+
376
+ // Timezone abbreviation
377
+ const timezone = now.toLocaleTimeString('en-US', { timeZoneName: 'short' }).split(' ').pop()
378
+
379
+ return `${dayOfWeek}, ${fullDate} β€’ ${time} β€’ ${timezone}`
380
+ }
381
+
342
382
  private _formatTimeSince(ms: number): string {
343
383
  const minutes = Math.floor(ms / 60000)
344
384
  const hours = Math.floor(minutes / 60)
@@ -385,9 +425,11 @@ export class MemoryEngine {
385
425
  private _formatPrimer(primer: SessionPrimer): string {
386
426
  const parts: string[] = ['# Continuing Session']
387
427
 
388
- if (primer.temporal_context) {
389
- parts.push(`*${primer.temporal_context}*`)
390
- }
428
+ // Session number
429
+ parts.push(`*Session #${primer.session_number}${primer.temporal_context ? ` β€’ ${primer.temporal_context}` : ''}*`)
430
+
431
+ // Current datetime (critical for temporal awareness)
432
+ parts.push(`πŸ“… ${primer.current_datetime}`)
391
433
 
392
434
  if (primer.session_summary) {
393
435
  parts.push(`\n**Previous session**: ${primer.session_summary}`)
@@ -397,6 +439,9 @@ export class MemoryEngine {
397
439
  parts.push(`\n**Project status**: ${primer.project_status}`)
398
440
  }
399
441
 
442
+ // Emoji legend for memory types (compact reference)
443
+ parts.push(`\n**Memory types**: πŸ’‘breakthrough βš–οΈdecision πŸ’œpersonal πŸ”§technical πŸ“state ❓unresolved βš™οΈpreference πŸ”„workflow πŸ—οΈarchitecture πŸ›debug πŸŒ€philosophy 🎯todo ⚑impl βœ…solved πŸ“¦project πŸ†milestone`)
444
+
400
445
  parts.push(`\n*Memories will surface naturally as we converse.*`)
401
446
 
402
447
  return parts.join('\n')
@@ -404,6 +449,7 @@ export class MemoryEngine {
404
449
 
405
450
  /**
406
451
  * Format memories for injection
452
+ * Uses emoji types for compact, scannable representation
407
453
  */
408
454
  private _formatMemories(memories: RetrievalResult[]): string {
409
455
  if (!memories.length) return ''
@@ -414,9 +460,10 @@ export class MemoryEngine {
414
460
  for (const memory of memories) {
415
461
  const tags = memory.semantic_tags?.join(', ') || ''
416
462
  const importance = memory.importance_weight?.toFixed(1) || '0.5'
417
- const contextType = memory.context_type?.toUpperCase() || 'GENERAL'
463
+ const emoji = getMemoryEmoji(memory.context_type || 'general')
418
464
 
419
- parts.push(`[${contextType} β€’ ${importance}] [${tags}] ${memory.content}`)
465
+ // Compact format: [emoji β€’ weight] [tags] content
466
+ parts.push(`[${emoji} β€’ ${importance}] [${tags}] ${memory.content}`)
420
467
  }
421
468
 
422
469
  return parts.join('\n')
package/src/core/index.ts CHANGED
@@ -6,3 +6,4 @@ export { MemoryEngine, createEngine, type EngineConfig, type StorageMode, type C
6
6
  export { MemoryStore, createStore, type StoreConfig } from './store.ts'
7
7
  export { SmartVectorRetrieval, createRetrieval, type SessionContext } from './retrieval.ts'
8
8
  export { Curator, createCurator, type CuratorConfig } from './curator.ts'
9
+ export { EmbeddingGenerator, createEmbeddings, getDefaultEmbeddings, type EmbeddingConfig } from './embeddings.ts'
@@ -6,6 +6,7 @@
6
6
 
7
7
  import type { StoredMemory, RetrievalResult } from '../types/memory.ts'
8
8
  import { cosineSimilarity } from '@rlabs-inc/fsdb'
9
+ import { logger } from '../utils/logger.ts'
9
10
 
10
11
  /**
11
12
  * Session context for retrieval
@@ -45,6 +46,14 @@ interface ScoredMemory {
45
46
  components: ScoringComponents
46
47
  }
47
48
 
49
+ /**
50
+ * Extended result with components for logging
51
+ */
52
+ interface ExtendedRetrievalResult extends RetrievalResult {
53
+ reasoning: string
54
+ components: ScoringComponents
55
+ }
56
+
48
57
  /**
49
58
  * Smart Vector Retrieval - The 10-Dimensional Algorithm
50
59
  *
@@ -61,7 +70,8 @@ export class SmartVectorRetrieval {
61
70
  currentMessage: string,
62
71
  queryEmbedding: Float32Array | number[],
63
72
  sessionContext: SessionContext,
64
- maxMemories: number = 5
73
+ maxMemories: number = 5,
74
+ alreadyInjectedCount: number = 0
65
75
  ): RetrievalResult[] {
66
76
  if (!allMemories.length) {
67
77
  return []
@@ -275,6 +285,26 @@ export class SmartVectorRetrieval {
275
285
  // Respect the max_memories limit strictly
276
286
  const finalSelected = selected.slice(0, maxMemories)
277
287
 
288
+ // Log the retrieval scoring details
289
+ logger.logRetrievalScoring({
290
+ totalMemories: allMemories.length,
291
+ currentMessage,
292
+ alreadyInjected: alreadyInjectedCount,
293
+ mustIncludeCount: mustInclude.length,
294
+ remainingSlots: remainingSlots,
295
+ finalCount: finalSelected.length,
296
+ selectedMemories: finalSelected.map(item => ({
297
+ content: item.memory.content,
298
+ reasoning: item.reasoning,
299
+ score: item.score,
300
+ relevance_score: item.relevance_score,
301
+ importance_weight: item.memory.importance_weight ?? 0.5,
302
+ context_type: item.memory.context_type ?? 'general',
303
+ semantic_tags: item.memory.semantic_tags ?? [],
304
+ components: item.components,
305
+ })),
306
+ })
307
+
278
308
  // Convert to RetrievalResult format
279
309
  return finalSelected.map(item => ({
280
310
  ...item.memory,
@@ -5,6 +5,7 @@
5
5
 
6
6
  import { MemoryEngine, createEngine, type EngineConfig } from '../core/engine.ts'
7
7
  import { Curator, createCurator, type CuratorConfig } from '../core/curator.ts'
8
+ import { EmbeddingGenerator, createEmbeddings } from '../core/embeddings.ts'
8
9
  import type { CurationTrigger } from '../types/memory.ts'
9
10
  import { logger } from '../utils/logger.ts'
10
11
 
@@ -49,7 +50,7 @@ interface CheckpointRequest {
49
50
  /**
50
51
  * Create and start the memory server
51
52
  */
52
- export function createServer(config: ServerConfig = {}) {
53
+ export async function createServer(config: ServerConfig = {}) {
53
54
  const {
54
55
  port = 8765,
55
56
  host = 'localhost',
@@ -57,7 +58,16 @@ export function createServer(config: ServerConfig = {}) {
57
58
  ...engineConfig
58
59
  } = config
59
60
 
60
- const engine = createEngine(engineConfig)
61
+ // Initialize embeddings (loads model into memory)
62
+ const embeddings = createEmbeddings()
63
+ logger.info('Initializing embedding model (this may take a moment on first run)...')
64
+ await embeddings.initialize()
65
+
66
+ // Create engine with embedder
67
+ const engine = createEngine({
68
+ ...engineConfig,
69
+ embedder: embeddings.createEmbedder(),
70
+ })
61
71
  const curator = createCurator(curatorConfig)
62
72
 
63
73
  const server = Bun.serve({
@@ -217,6 +227,7 @@ export function createServer(config: ServerConfig = {}) {
217
227
  server,
218
228
  engine,
219
229
  curator,
230
+ embeddings,
220
231
  stop: () => server.stop(),
221
232
  }
222
233
  }
@@ -228,7 +239,7 @@ if (import.meta.main) {
228
239
  const storageMode = (process.env.MEMORY_STORAGE_MODE ?? 'central') as 'central' | 'local'
229
240
  const apiKey = process.env.ANTHROPIC_API_KEY
230
241
 
231
- createServer({
242
+ await createServer({
232
243
  port,
233
244
  host,
234
245
  storageMode,
@@ -164,7 +164,40 @@ export interface RetrievalResult extends StoredMemory {
164
164
  */
165
165
  export interface SessionPrimer {
166
166
  temporal_context: string // "Last session: 2 days ago"
167
+ current_datetime: string // "Monday, December 23, 2024 β€’ 3:45 PM EST"
168
+ session_number: number // Which session this is (1, 2, 43, etc.)
167
169
  session_summary?: string // Previous session summary
168
170
  project_status?: string // Current project state
169
171
  key_memories?: StoredMemory[] // Essential memories to surface
170
172
  }
173
+
174
+ /**
175
+ * Emoji map for memory context types
176
+ * Compact visual representation for efficient parsing
177
+ */
178
+ export const MEMORY_TYPE_EMOJI: Record<string, string> = {
179
+ breakthrough: 'πŸ’‘', // Insight, discovery
180
+ decision: 'βš–οΈ', // Choice made
181
+ personal: 'πŸ’œ', // Relationship, friendship
182
+ technical: 'πŸ”§', // Technical knowledge
183
+ technical_state: 'πŸ“', // Current state
184
+ unresolved: '❓', // Open question
185
+ preference: 'βš™οΈ', // User preference
186
+ workflow: 'πŸ”„', // How work flows
187
+ architectural: 'πŸ—οΈ', // System design
188
+ debugging: 'πŸ›', // Debug insight
189
+ philosophy: 'πŸŒ€', // Deeper thinking
190
+ todo: '🎯', // Action needed
191
+ implementation: '⚑', // Implementation detail
192
+ problem_solution: 'βœ…', // Problemβ†’Solution pair
193
+ project_context: 'πŸ“¦', // Project context
194
+ milestone: 'πŸ†', // Achievement
195
+ general: 'πŸ“', // General note
196
+ }
197
+
198
+ /**
199
+ * Get emoji for a context type, with fallback
200
+ */
201
+ export function getMemoryEmoji(contextType: string): string {
202
+ return MEMORY_TYPE_EMOJI[contextType.toLowerCase()] ?? 'πŸ“'
203
+ }
@@ -170,6 +170,15 @@ export const logger = {
170
170
  ? query.slice(0, 40) + '...'
171
171
  : query
172
172
 
173
+ // Emoji map for quick visual scanning
174
+ const emojiMap: Record<string, string> = {
175
+ breakthrough: 'πŸ’‘', decision: 'βš–οΈ', personal: 'πŸ’œ', technical: 'πŸ”§',
176
+ technical_state: 'πŸ“', unresolved: '❓', preference: 'βš™οΈ', workflow: 'πŸ”„',
177
+ architectural: 'πŸ—οΈ', debugging: 'πŸ›', philosophy: 'πŸŒ€', todo: '🎯',
178
+ implementation: '⚑', problem_solution: 'βœ…', project_context: 'πŸ“¦',
179
+ milestone: 'πŸ†', general: 'πŸ“',
180
+ }
181
+
173
182
  console.log()
174
183
  console.log(`${timestamp()} ${style('cyan', sym.sparkles)} ${style('bold', `SURFACING ${memories.length} MEMORIES`)}`)
175
184
  console.log(` ${style('dim', 'query:')} "${queryPreview}"`)
@@ -183,14 +192,14 @@ export const logger = {
183
192
 
184
193
  memories.forEach((m, i) => {
185
194
  const score = style('green', `${(m.score * 100).toFixed(0)}%`)
186
- const type = style('cyan', m.context_type)
195
+ const emoji = emojiMap[m.context_type?.toLowerCase()] ?? 'πŸ“'
187
196
  const num = style('dim', `${i + 1}.`)
188
197
 
189
198
  const preview = m.content.length > 55
190
199
  ? m.content.slice(0, 55) + style('dim', '...')
191
200
  : m.content
192
201
 
193
- console.log(` ${num} [${score}] ${type}`)
202
+ console.log(` ${num} [${score}] ${emoji}`)
194
203
  console.log(` ${preview}`)
195
204
  })
196
205
  console.log()
@@ -254,6 +263,104 @@ export const logger = {
254
263
  }
255
264
  console.log()
256
265
  },
266
+
267
+ /**
268
+ * Log memory retrieval scoring details
269
+ */
270
+ logRetrievalScoring(params: {
271
+ totalMemories: number
272
+ currentMessage: string
273
+ alreadyInjected: number
274
+ mustIncludeCount: number
275
+ remainingSlots: number
276
+ finalCount: number
277
+ selectedMemories: Array<{
278
+ content: string
279
+ reasoning: string
280
+ score: number
281
+ relevance_score: number
282
+ importance_weight: number
283
+ context_type: string
284
+ semantic_tags: string[]
285
+ components: {
286
+ trigger: number
287
+ vector: number
288
+ importance: number
289
+ temporal: number
290
+ context: number
291
+ tags: number
292
+ question: number
293
+ emotion: number
294
+ problem: number
295
+ action: number
296
+ }
297
+ }>
298
+ }) {
299
+ const { totalMemories, currentMessage, alreadyInjected, mustIncludeCount, remainingSlots, finalCount, selectedMemories } = params
300
+
301
+ console.log()
302
+ console.log(`${timestamp()} ${style('magenta', sym.brain)} ${style('bold', 'TWO-STAGE MEMORY FILTERING')}`)
303
+ console.log(` ${style('dim', 'candidates:')} ${totalMemories} memories`)
304
+ console.log(` ${style('dim', 'already injected:')} ${alreadyInjected}`)
305
+
306
+ const msgPreview = currentMessage.length > 60
307
+ ? currentMessage.slice(0, 60) + '...'
308
+ : currentMessage
309
+ console.log(` ${style('dim', 'trigger:')} "${msgPreview}"`)
310
+ console.log()
311
+
312
+ // Stage summary
313
+ console.log(` ${style('cyan', 'Stage 1:')} ${mustIncludeCount} must-include (critical/action-required)`)
314
+ console.log(` ${style('cyan', 'Stage 2:')} ${remainingSlots} slots for scored selection`)
315
+ console.log(` ${style('green', 'Final:')} ${finalCount} memories selected`)
316
+ console.log()
317
+
318
+ if (selectedMemories.length === 0) {
319
+ console.log(` ${style('dim', 'πŸ“­ No relevant memories for this context')}`)
320
+ console.log()
321
+ return
322
+ }
323
+
324
+ // Detailed breakdown
325
+ console.log(style('dim', ' ─'.repeat(30)))
326
+ console.log(` ${style('bold', 'SELECTION DETAILS')}`)
327
+ console.log()
328
+
329
+ selectedMemories.forEach((m, i) => {
330
+ const num = style('dim', `${i + 1}.`)
331
+ const score = style('green', `${(m.score * 100).toFixed(0)}%`)
332
+ const relevance = style('cyan', `rel:${(m.relevance_score * 100).toFixed(0)}%`)
333
+ const type = style('yellow', m.context_type.toUpperCase())
334
+
335
+ console.log(` ${num} [${score} ${relevance}] ${type}`)
336
+
337
+ // Content preview
338
+ const preview = m.content.length > 60
339
+ ? m.content.slice(0, 60) + style('dim', '...')
340
+ : m.content
341
+ console.log(` ${style('white', preview)}`)
342
+
343
+ // Scoring components (top 3)
344
+ const components = Object.entries(m.components)
345
+ .sort((a, b) => b[1] - a[1])
346
+ .slice(0, 3)
347
+ .filter(([, v]) => v > 0.1)
348
+ .map(([k, v]) => `${k}:${(v * 100).toFixed(0)}%`)
349
+ .join(', ')
350
+
351
+ if (components) {
352
+ console.log(` ${style('dim', 'scores:')} ${components}`)
353
+ }
354
+
355
+ // Tags
356
+ if (m.semantic_tags?.length) {
357
+ const tags = m.semantic_tags.slice(0, 3).join(', ')
358
+ console.log(` ${style('dim', 'tags:')} ${tags}`)
359
+ }
360
+
361
+ console.log()
362
+ })
363
+ },
257
364
  }
258
365
 
259
366
  export default logger