@ulrichc1/sparn 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,567 @@
1
+ /**
2
+ * Core memory entry types for Sparn's context optimization engine.
3
+ * Maps to neuroscience-inspired memory model with decay and state transitions.
4
+ */
5
+ /**
6
+ * Confidence state for memory entries.
7
+ * Based on multi-state synapse model from neuroscience.
8
+ */
9
+ type ConfidenceState = 'silent' | 'ready' | 'active';
10
+ /**
11
+ * Represents a single context memory entry with neuroscience-inspired metadata.
12
+ *
13
+ * State transitions:
14
+ * - score ≤ 0.3 → silent (not included in context)
15
+ * - 0.3 < score ≤ 0.7 → ready (included if space permits)
16
+ * - score > 0.7 → active (always included)
17
+ * - isBTSP = true → active (bypass score check)
18
+ */
19
+ interface MemoryEntry {
20
+ /** Unique identifier (UUID v4) */
21
+ id: string;
22
+ /** The actual memory content/context data */
23
+ content: string;
24
+ /** SHA-256 hash of content for deduplication */
25
+ hash: string;
26
+ /** Unix timestamp (seconds) of creation */
27
+ timestamp: number;
28
+ /** Current engram score (0.0-1.0) */
29
+ score: number;
30
+ /** Time-to-live in seconds remaining */
31
+ ttl: number;
32
+ /** Confidence state */
33
+ state: ConfidenceState;
34
+ /** Number of times accessed/retrieved */
35
+ accessCount: number;
36
+ /** User-defined tags for categorization */
37
+ tags: string[];
38
+ /** Additional key-value pairs */
39
+ metadata: Record<string, unknown>;
40
+ /** Flag indicating one-shot learned entry (BTSP) */
41
+ isBTSP: boolean;
42
+ }
43
+ /**
44
+ * Query filters for memory store operations.
45
+ */
46
+ interface MemoryQueryFilters {
47
+ state?: ConfidenceState;
48
+ minScore?: number;
49
+ maxScore?: number;
50
+ tags?: string[];
51
+ isBTSP?: boolean;
52
+ limit?: number;
53
+ offset?: number;
54
+ }
55
+ /**
56
+ * State distribution aggregation.
57
+ */
58
+ interface StateDistribution {
59
+ active: number;
60
+ ready: number;
61
+ silent: number;
62
+ total: number;
63
+ }
64
+
65
+ /**
66
+ * KV Memory Store Module
67
+ * Implements hippocampal key-value storage with dual index/value tables.
68
+ * Maps to: Hippocampal Key-Value — the hippocampus separates what to store from how to retrieve it.
69
+ */
70
+
71
+ /**
72
+ * Optimization statistics record.
73
+ */
74
+ interface OptimizationStats {
75
+ id: number;
76
+ timestamp: number;
77
+ tokens_before: number;
78
+ tokens_after: number;
79
+ entries_pruned: number;
80
+ duration_ms: number;
81
+ }
82
+ /**
83
+ * KV Memory interface.
84
+ */
85
+ interface KVMemory {
86
+ /** Store a memory entry */
87
+ put(entry: MemoryEntry): Promise<void>;
88
+ /** Retrieve a memory entry by ID */
89
+ get(id: string): Promise<MemoryEntry | null>;
90
+ /** Query entries by filters */
91
+ query(filters: MemoryQueryFilters): Promise<MemoryEntry[]>;
92
+ /** Delete a memory entry */
93
+ delete(id: string): Promise<void>;
94
+ /** List all entry IDs */
95
+ list(): Promise<string[]>;
96
+ /** Compact database (remove expired entries) */
97
+ compact(): Promise<number>;
98
+ /** Close database connection */
99
+ close(): Promise<void>;
100
+ /** Record optimization statistics */
101
+ recordOptimization(stats: Omit<OptimizationStats, 'id'>): Promise<void>;
102
+ /** Get all optimization statistics */
103
+ getOptimizationStats(): Promise<OptimizationStats[]>;
104
+ /** Clear all optimization statistics */
105
+ clearOptimizationStats(): Promise<void>;
106
+ }
107
+ /**
108
+ * Create KV Memory store with SQLite backend.
109
+ *
110
+ * Initializes database with dual table schema:
111
+ * - entries_index: Fast lookups (id, hash, timestamp, score, ttl, state, accessCount, isBTSP)
112
+ * - entries_value: Content storage (id, content, tags, metadata)
113
+ *
114
+ * @param dbPath - Path to SQLite database file
115
+ * @returns KVMemory instance
116
+ */
117
+ declare function createKVMemory(dbPath: string): Promise<KVMemory>;
118
+
119
+ /**
120
+ * Agent adapter interface types.
121
+ * Enables agent-agnostic design per Article IV.
122
+ */
123
+
124
+ /**
125
+ * Options for optimization operations.
126
+ */
127
+ interface OptimizeOptions {
128
+ /** Dry-run mode (don't modify memory store) */
129
+ dryRun?: boolean;
130
+ /** Verbose output */
131
+ verbose?: boolean;
132
+ /** Custom pruning threshold (overrides config) */
133
+ threshold?: number;
134
+ }
135
+ /**
136
+ * Result of an optimization operation.
137
+ */
138
+ interface OptimizationResult {
139
+ /** Optimized context text */
140
+ optimizedContext: string;
141
+ /** Token count before optimization */
142
+ tokensBefore: number;
143
+ /** Token count after optimization */
144
+ tokensAfter: number;
145
+ /** Reduction percentage (0.0-1.0) */
146
+ reduction: number;
147
+ /** Total entries processed */
148
+ entriesProcessed: number;
149
+ /** Entries kept after optimization */
150
+ entriesKept: number;
151
+ /** Optimization duration in milliseconds */
152
+ durationMs: number;
153
+ /** State distribution after optimization */
154
+ stateDistribution: StateDistribution;
155
+ /** Optional: Detailed per-entry information (when verbose=true) */
156
+ details?: Array<{
157
+ id: string;
158
+ score: number;
159
+ state: string;
160
+ isBTSP: boolean;
161
+ tokens: number;
162
+ }>;
163
+ }
164
+ /**
165
+ * Agent adapter interface.
166
+ * All agent-specific logic must implement this contract.
167
+ */
168
+ interface AgentAdapter {
169
+ /**
170
+ * Optimize context using this agent's strategy.
171
+ *
172
+ * @param context - Input context (plain text)
173
+ * @param options - Optimization options
174
+ * @returns Optimization result
175
+ */
176
+ optimize(context: string, options?: OptimizeOptions): Promise<OptimizationResult>;
177
+ }
178
+
179
+ /**
180
+ * Configuration types for Sparn behavior customization.
181
+ */
182
+ /**
183
+ * Agent adapter type.
184
+ */
185
+ type AgentType = 'claude-code' | 'generic';
186
+ /**
187
+ * Pruning configuration.
188
+ */
189
+ interface PruningConfig {
190
+ /** Percentage of top-scored entries to keep (1-100, default: 5) */
191
+ threshold: number;
192
+ /** Aggressiveness scale 0-100 (affects TF-IDF weighting, default: 50) */
193
+ aggressiveness: number;
194
+ }
195
+ /**
196
+ * Decay configuration.
197
+ */
198
+ interface DecayConfig {
199
+ /** Default TTL in hours (default: 24) */
200
+ defaultTTL: number;
201
+ /** Decay threshold for pruning (0.0-1.0, default: 0.95) */
202
+ decayThreshold: number;
203
+ }
204
+ /**
205
+ * Confidence state threshold configuration.
206
+ */
207
+ interface StatesConfig {
208
+ /** Score threshold for active state (default: 0.7) */
209
+ activeThreshold: number;
210
+ /** Score threshold for ready state (default: 0.3) */
211
+ readyThreshold: number;
212
+ }
213
+ /**
214
+ * UI configuration.
215
+ */
216
+ interface UIConfig {
217
+ /** Enable colored output (default: true) */
218
+ colors: boolean;
219
+ /** Enable sound effects (default: false) */
220
+ sounds: boolean;
221
+ /** Verbose logging (default: false) */
222
+ verbose: boolean;
223
+ }
224
+ /**
225
+ * Complete Sparn configuration.
226
+ */
227
+ interface SparnConfig {
228
+ pruning: PruningConfig;
229
+ decay: DecayConfig;
230
+ states: StatesConfig;
231
+ agent: AgentType;
232
+ ui: UIConfig;
233
+ /** Auto-consolidation interval in hours, or null for manual */
234
+ autoConsolidate: number | null;
235
+ }
236
+ /**
237
+ * Default configuration values.
238
+ */
239
+ declare const DEFAULT_CONFIG: SparnConfig;
240
+
241
+ /**
242
+ * Claude Code Adapter - Claude Code-specific optimization pipeline
243
+ *
244
+ * Optimized for Claude Code's conversation patterns, tool use, and context management.
245
+ * Implements the same AgentAdapter interface as GenericAdapter but with Claude-specific tuning.
246
+ */
247
+
248
+ /**
249
+ * Create a Claude Code adapter instance
250
+ * @param memory - KV memory store
251
+ * @param config - Sparn configuration
252
+ * @returns AgentAdapter instance optimized for Claude Code
253
+ */
254
+ declare function createClaudeCodeAdapter(memory: KVMemory, config: SparnConfig): AgentAdapter;
255
+
256
+ /**
257
+ * Generic Adapter - Agent-agnostic optimization pipeline
258
+ *
259
+ * Orchestrates all 6 neuroscience modules to optimize context memory.
260
+ */
261
+
262
+ /**
263
+ * Create a generic adapter instance
264
+ * @param memory - KV memory store
265
+ * @param config - Sparn configuration
266
+ * @returns AgentAdapter instance
267
+ */
268
+ declare function createGenericAdapter(memory: KVMemory, config: SparnConfig): AgentAdapter;
269
+
270
+ /**
271
+ * BTSP Embedder - Implements behavioral timescale synaptic plasticity
272
+ *
273
+ * Neuroscience: One-shot learning from critical events (errors, conflicts).
274
+ * Application: Detect high-importance patterns and mark for permanent retention.
275
+ */
276
+
277
+ interface BTSPEmbedder {
278
+ /**
279
+ * Detect if content contains BTSP patterns (errors, stack traces, conflicts, git diffs)
280
+ * @param content - Content to analyze
281
+ * @returns True if BTSP pattern detected
282
+ */
283
+ detectBTSP(content: string): boolean;
284
+ /**
285
+ * Create a new memory entry marked as BTSP (one-shot learned)
286
+ * @param content - Entry content
287
+ * @param tags - Optional tags
288
+ * @param metadata - Optional metadata
289
+ * @returns BTSP-marked memory entry
290
+ */
291
+ createBTSPEntry(content: string, tags?: string[], metadata?: Record<string, unknown>): MemoryEntry;
292
+ }
293
+ /**
294
+ * Create a BTSP embedder instance
295
+ * @returns BTSPEmbedder instance
296
+ */
297
+ declare function createBTSPEmbedder(): BTSPEmbedder;
298
+
299
+ /**
300
+ * Confidence States - Implements multi-state synapses
301
+ *
302
+ * Neuroscience: Synapses exist in three states: silent, ready (potentiated), active.
303
+ * Application: Classify memory entries by score into silent/ready/active states.
304
+ */
305
+
306
+ interface ConfidenceStatesConfig {
307
+ /** Score threshold for active state (e.g., 0.7) */
308
+ activeThreshold: number;
309
+ /** Score threshold for ready state (e.g., 0.3) */
310
+ readyThreshold: number;
311
+ }
312
+ interface ConfidenceStates {
313
+ /**
314
+ * Calculate state based on entry score and BTSP flag
315
+ * @param entry - Memory entry
316
+ * @returns Confidence state
317
+ */
318
+ calculateState(entry: MemoryEntry): ConfidenceState;
319
+ /**
320
+ * Transition entry to correct state based on its score
321
+ * @param entry - Entry to transition
322
+ * @returns Entry with updated state
323
+ */
324
+ transition(entry: MemoryEntry): MemoryEntry;
325
+ /**
326
+ * Get distribution of states across all entries
327
+ * @param entries - All memory entries
328
+ * @returns State distribution with counts
329
+ */
330
+ getDistribution(entries: MemoryEntry[]): StateDistribution;
331
+ }
332
+ /**
333
+ * Create a confidence states manager
334
+ * @param config - States configuration
335
+ * @returns ConfidenceStates instance
336
+ */
337
+ declare function createConfidenceStates(config: ConfidenceStatesConfig): ConfidenceStates;
338
+
339
+ /**
340
+ * Engram Scorer - Implements engram theory (memory decay)
341
+ *
342
+ * Neuroscience: Memories fade over time without reinforcement.
343
+ * Application: Apply exponential decay formula to memory scores based on age and access count.
344
+ *
345
+ * Formula: decay = 1 - e^(-age/TTL)
346
+ * Score adjustment: score_new = score_old * (1 - decay) + (accessCount bonus)
347
+ */
348
+
349
+ interface EngramScorerConfig {
350
+ /** Default TTL in hours for new entries */
351
+ defaultTTL: number;
352
+ /** Decay threshold (0.0-1.0) above which entries are marked for pruning */
353
+ decayThreshold: number;
354
+ }
355
+ interface EngramScorer {
356
+ /**
357
+ * Calculate current score for an entry based on decay and access count
358
+ * @param entry - Memory entry to score
359
+ * @param currentTime - Current timestamp in milliseconds (for testing)
360
+ * @returns Updated score (0.0-1.0)
361
+ */
362
+ calculateScore(entry: MemoryEntry, currentTime?: number): number;
363
+ /**
364
+ * Refresh TTL to default value
365
+ * @param entry - Entry to refresh
366
+ * @returns Entry with refreshed TTL and timestamp
367
+ */
368
+ refreshTTL(entry: MemoryEntry): MemoryEntry;
369
+ /**
370
+ * Calculate decay factor (0.0-1.0) based on age and TTL
371
+ * @param ageInSeconds - Age of entry in seconds
372
+ * @param ttlInSeconds - TTL in seconds
373
+ * @returns Decay factor (0.0 = fresh, 1.0 = fully decayed)
374
+ */
375
+ calculateDecay(ageInSeconds: number, ttlInSeconds: number): number;
376
+ }
377
+ /**
378
+ * Create an engram scorer instance
379
+ * @param config - Scorer configuration
380
+ * @returns EngramScorer instance
381
+ */
382
+ declare function createEngramScorer(config: EngramScorerConfig): EngramScorer;
383
+
384
+ /**
385
+ * Sleep compressor (consolidation) types.
386
+ * Implements sleep replay principle from neuroscience.
387
+ */
388
+
389
+ /**
390
+ * Result of a consolidation operation.
391
+ */
392
+ interface ConsolidateResult {
393
+ /** Entries kept after consolidation */
394
+ kept: MemoryEntry[];
395
+ /** Entries removed during consolidation */
396
+ removed: MemoryEntry[];
397
+ /** Entries before consolidation */
398
+ entriesBefore: number;
399
+ /** Entries after consolidation */
400
+ entriesAfter: number;
401
+ /** Number of decayed entries removed */
402
+ decayedRemoved: number;
403
+ /** Number of duplicate entries merged */
404
+ duplicatesRemoved: number;
405
+ /** Compression ratio (0.0-1.0) */
406
+ compressionRatio: number;
407
+ /** Consolidation duration in milliseconds */
408
+ durationMs: number;
409
+ }
410
+ /**
411
+ * Group of duplicate entries.
412
+ */
413
+ interface DuplicateGroup {
414
+ /** Entries in this duplicate group */
415
+ entries: MemoryEntry[];
416
+ /** Similarity score (0.0-1.0) */
417
+ similarity: number;
418
+ }
419
+
420
+ /**
421
+ * Sleep Compressor - Implements sleep replay principle
422
+ *
423
+ * Neuroscience: During sleep, the brain consolidates memories by replaying important ones
424
+ * and discarding irrelevant information.
425
+ * Application: Periodic consolidation removes decayed entries and merges duplicates.
426
+ */
427
+
428
+ interface SleepCompressor {
429
+ /**
430
+ * Consolidate entries: remove decayed, merge duplicates
431
+ * @param entries - All memory entries
432
+ * @returns Consolidation result
433
+ */
434
+ consolidate(entries: MemoryEntry[]): ConsolidateResult;
435
+ /**
436
+ * Find duplicate entries (exact hash or near-duplicate by similarity)
437
+ * @param entries - Memory entries
438
+ * @returns Groups of duplicates
439
+ */
440
+ findDuplicates(entries: MemoryEntry[]): DuplicateGroup[];
441
+ /**
442
+ * Merge duplicate entries, keeping highest score
443
+ * @param groups - Duplicate groups
444
+ * @returns Merged entries
445
+ */
446
+ mergeDuplicates(groups: DuplicateGroup[]): MemoryEntry[];
447
+ }
448
+ /**
449
+ * Create a sleep compressor instance
450
+ * @returns SleepCompressor instance
451
+ */
452
+ declare function createSleepCompressor(): SleepCompressor;
453
+
454
+ /**
455
+ * Sparse pruner types.
456
+ * Implements sparse coding principle from neuroscience.
457
+ */
458
+
459
+ /**
460
+ * Result of a pruning operation.
461
+ */
462
+ interface PruneResult {
463
+ /** Entries kept after pruning */
464
+ kept: MemoryEntry[];
465
+ /** Entries removed during pruning */
466
+ removed: MemoryEntry[];
467
+ /** Original token count before pruning */
468
+ originalTokens: number;
469
+ /** Token count after pruning */
470
+ prunedTokens: number;
471
+ }
472
+
473
+ /**
474
+ * Sparse Pruner - Implements sparse coding principle
475
+ *
476
+ * Neuroscience: Only 2-5% of neurons fire at any given time.
477
+ * Application: Keep only top 5% most relevant context entries by TF-IDF score.
478
+ */
479
+
480
+ interface SparsePrunerConfig {
481
+ /** Percentage threshold for pruning (e.g., 5 = keep top 5%) */
482
+ threshold: number;
483
+ }
484
+ interface SparsePruner {
485
+ /**
486
+ * Prune entries to keep only top N% by relevance score
487
+ * @param entries - Memory entries to prune
488
+ * @returns Result with kept/removed entries and token counts
489
+ */
490
+ prune(entries: MemoryEntry[]): PruneResult;
491
+ /**
492
+ * Calculate TF-IDF relevance score for a single entry
493
+ * @param entry - Entry to score
494
+ * @param allEntries - All entries for IDF calculation
495
+ * @returns Relevance score (0.0-1.0)
496
+ */
497
+ scoreEntry(entry: MemoryEntry, allEntries: MemoryEntry[]): number;
498
+ }
499
+ /**
500
+ * Create a sparse pruner instance
501
+ * @param config - Pruner configuration
502
+ * @returns SparsePruner instance
503
+ */
504
+ declare function createSparsePruner(config: SparsePrunerConfig): SparsePruner;
505
+
506
+ /**
507
+ * Content hashing utilities.
508
+ * Uses SHA-256 for deduplication.
509
+ */
510
+ /**
511
+ * Generate SHA-256 hash of content for deduplication.
512
+ *
513
+ * @param content - Content to hash
514
+ * @returns 64-character hex string (SHA-256)
515
+ *
516
+ * @example
517
+ * ```typescript
518
+ * const hash = hashContent('Hello world');
519
+ * console.log(hash.length); // 64
520
+ * ```
521
+ */
522
+ declare function hashContent(content: string): string;
523
+
524
+ /**
525
+ * Logging utility.
526
+ * Simple console wrapper with log levels.
527
+ */
528
+ type LogLevel = 'debug' | 'info' | 'warn' | 'error';
529
+ /**
530
+ * Logger interface.
531
+ */
532
+ interface Logger {
533
+ debug(message: string, ...args: unknown[]): void;
534
+ info(message: string, ...args: unknown[]): void;
535
+ warn(message: string, ...args: unknown[]): void;
536
+ error(message: string, ...args: unknown[]): void;
537
+ }
538
+ /**
539
+ * Create a logger with optional verbosity control.
540
+ *
541
+ * @param verbose - Enable debug-level logging
542
+ * @returns Logger instance
543
+ */
544
+ declare function createLogger(verbose?: boolean): Logger;
545
+
546
+ /**
547
+ * Token estimation utilities.
548
+ * Uses whitespace heuristic (~90% accuracy vs GPT tokenizer).
549
+ */
550
+ /**
551
+ * Estimate token count for text using heuristic.
552
+ *
553
+ * Approximation: 1 token ≈ 4 chars or 0.75 words
554
+ * Provides ~90% accuracy compared to GPT tokenizer, sufficient for optimization heuristics.
555
+ *
556
+ * @param text - Text to count
557
+ * @returns Estimated token count
558
+ *
559
+ * @example
560
+ * ```typescript
561
+ * const tokens = estimateTokens('Hello world');
562
+ * console.log(tokens); // ~2
563
+ * ```
564
+ */
565
+ declare function estimateTokens(text: string): number;
566
+
567
+ export { type AgentAdapter, type AgentType, type BTSPEmbedder, type ConfidenceState, type ConfidenceStates, type ConfidenceStatesConfig, type ConsolidateResult, DEFAULT_CONFIG, type DecayConfig, type DuplicateGroup, type EngramScorer, type EngramScorerConfig, type KVMemory, type LogLevel, type Logger, type MemoryEntry, type MemoryQueryFilters, type OptimizationResult, type OptimizeOptions, type PruneResult, type PruningConfig, type SleepCompressor, type SparnConfig, type SparsePruner, type SparsePrunerConfig, type StateDistribution, type StatesConfig, type UIConfig, createBTSPEmbedder, createClaudeCodeAdapter, createConfidenceStates, createEngramScorer, createGenericAdapter, createKVMemory, createLogger, createSleepCompressor, createSparsePruner, estimateTokens, hashContent };