@docscode/core 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json ADDED
@@ -0,0 +1,24 @@
1
+ {
2
+ "name": "@docscode/core",
3
+ "version": "1.0.0",
4
+ "description": "Core Kairo AI Collaborator Engine",
5
+ "main": "./dist/index.cjs",
6
+ "module": "./dist/index.js",
7
+ "types": "./dist/index.d.ts",
8
+ "type": "module",
9
+ "scripts": {
10
+ "build": "tsup src/index.ts --format cjs,esm --dts --clean --shims",
11
+ "dev": "tsup src/index.ts --format cjs,esm --watch --dts --shims",
12
+ "test": "vitest run"
13
+ },
14
+ "dependencies": {
15
+ "yjs": "^13.6.30",
16
+ "y-protocols": "^1.0.7",
17
+ "y-webrtc": "^10.3.0",
18
+ "@huggingface/transformers": "^3.0.0",
19
+ "@tiptap/core": "^2.11.2"
20
+ },
21
+ "devDependencies": {
22
+ "@types/node": "^25.6.0"
23
+ }
24
+ }
@@ -0,0 +1,56 @@
1
+ import sys
2
+ import json
3
+ import base64
4
+ from io import BytesIO
5
+
6
+ # Try to import docling, but provide a clear error if missing
7
+ try:
8
+ from docling.document_converter import DocumentConverter
9
+ DOCLING_AVAILABLE = True
10
+ except ImportError:
11
+ DOCLING_AVAILABLE = False
12
+
13
+ def convert_to_canonical(source_path_or_buffer):
14
+ if not DOCLING_AVAILABLE:
15
+ return {"error": "docling library not installed. Please run 'pip install docling'"}
16
+
17
+ converter = DocumentConverter()
18
+ result = converter.convert(source_path_or_buffer)
19
+
20
+ # Simple conversion to a serializable canonical format
21
+ doc = {
22
+ "metadata": {
23
+ "title": getattr(result.input, "file_path", "Untitled"),
24
+ "format": result.input.format.name if hasattr(result.input, "format") else "unknown"
25
+ },
26
+ "content": []
27
+ }
28
+
29
+ # Extract blocks (this is a simplified example)
30
+ for element in result.document.elements:
31
+ if hasattr(element, "text"):
32
+ doc["content"].append({
33
+ "type": "p",
34
+ "text": element.text
35
+ })
36
+
37
+ return doc
38
+
39
+ if __name__ == "__main__":
40
+ if len(sys.argv) < 2:
41
+ print(json.dumps({"error": "Usage: python docling_bridge.py <file_path_or_base64>"}))
42
+ sys.exit(1)
43
+
44
+ input_data = sys.argv[1]
45
+
46
+ try:
47
+ # Check if input is a path or base64
48
+ if input_data.startswith("base64:"):
49
+ content = base64.b64decode(input_data[7:])
50
+ result = convert_to_canonical(BytesIO(content))
51
+ else:
52
+ result = convert_to_canonical(input_data)
53
+
54
+ print(json.dumps(result))
55
+ except Exception as e:
56
+ print(json.dumps({"error": str(e)}))
@@ -0,0 +1,209 @@
1
+ import * as Y from 'yjs';
2
+ import { Awareness } from 'y-protocols/awareness';
3
+ import { KairoPlugin } from './KairoPlugin.js';
4
+ import { PromptCache } from './PromptCache.js';
5
+ import { StreamBuffer } from './StreamBuffer.js';
6
+ import type { LLMAdapter } from './LLMAdapter.js';
7
+ import { SuggestionManager } from './suggestion-manager.js';
8
+
9
+ export type KairoStatus = 'idle' | 'thinking' | 'writing' | 'reviewing';
10
+
11
+ export interface KairoMetadata {
12
+ name: string;
13
+ version: string;
14
+ capabilities: string[];
15
+ model?: string;
16
+ }
17
+
18
+ export interface AICollaboratorOptions {
19
+ clientID?: number;
20
+ cacheSize?: number;
21
+ llm?: LLMAdapter;
22
+ streamFlushMs?: number;
23
+ }
24
+
25
+ /**
26
+ * AICollaborator — The AI peer inside your Yjs document.
27
+ *
28
+ * Appears as a real-time participant in the awareness protocol.
29
+ * Streams LLM tokens directly into Y.Text CRDT operations.
30
+ * Human peers see a live cursor and status indicator — just like a human collaborator.
31
+ */
32
+ export class AICollaborator {
33
+ public readonly doc: Y.Doc;
34
+ public readonly awareness: Awareness;
35
+ public readonly clientID: number;
36
+ public readonly suggestions: SuggestionManager;
37
+ public readonly cache: PromptCache;
38
+ private _status: KairoStatus = 'idle';
39
+ private _plugins: KairoPlugin[] = [];
40
+ private _llm?: LLMAdapter;
41
+ private _streamFlushMs: number;
42
+
43
+ constructor(doc: Y.Doc, awareness: Awareness, options: AICollaboratorOptions = {}) {
44
+ this.doc = doc;
45
+ this.awareness = awareness;
46
+ this.clientID = options.clientID ?? Math.floor(Math.random() * 1_000_000);
47
+ this.cache = new PromptCache(options.cacheSize ?? 50);
48
+ this.suggestions = new SuggestionManager(this.doc);
49
+ this._llm = options.llm;
50
+ this._streamFlushMs = options.streamFlushMs ?? 50;
51
+
52
+ this._updateAwareness();
53
+ console.log(`[Kairo] AICollaborator initialized — clientID: ${this.clientID}, model: ${this._llm?.model ?? 'none'}`);
54
+ }
55
+
56
+ // ─── Status ──────────────────────────────────────────────────────────
57
+
58
+ public setStatus(status: KairoStatus) {
59
+ this._status = status;
60
+ this._updateAwareness();
61
+ }
62
+
63
+ public setThinking() { this.setStatus('thinking'); }
64
+ public setWriting() { this.setStatus('writing'); }
65
+ public setReviewing() { this.setStatus('reviewing'); }
66
+ public setIdle() { this.setStatus('idle'); }
67
+
68
+ private _updateAwareness() {
69
+ this.awareness.setLocalStateField('kairo', {
70
+ status: this._status,
71
+ metadata: {
72
+ name: 'Kairo',
73
+ version: '1.0.0',
74
+ capabilities: ['autocomplete', 'summarization', 'streaming', 'tracked-changes'],
75
+ model: this._llm?.model,
76
+ } as KairoMetadata,
77
+ lastUpdate: Date.now(),
78
+ });
79
+ }
80
+
81
+ // ─── Plugin System ────────────────────────────────────────────────────
82
+
83
+ public registerPlugin(plugin: KairoPlugin) {
84
+ this._plugins.push(plugin);
85
+ plugin.setup();
86
+ }
87
+
88
+ // ─── CRDT Write Operations ────────────────────────────────────────────
89
+
90
+ /**
91
+ * Insert text with AI attribution markers.
92
+ * Consuming editors can style/filter AI-generated content using these markers.
93
+ */
94
+ public insertWithAttribution(text: Y.Text, index: number, content: string) {
95
+ this.doc.transact(() => {
96
+ text.insert(index, content, {
97
+ 'ai-generated': true,
98
+ 'ai-client-id': this.clientID,
99
+ 'ai-timestamp': Date.now(),
100
+ 'ai-model': this._llm?.model ?? 'unknown',
101
+ });
102
+ }, this.clientID);
103
+ }
104
+
105
+ /**
106
+ * Stream LLM tokens directly into the document as CRDT operations.
107
+ *
108
+ * Modes:
109
+ * - 'insert': appends after current position
110
+ * - 'continue': appends to end of document
111
+ * - 'rewrite': clears existing text and rewrites
112
+ *
113
+ * Token cadence: tokens are batched with a 50ms flush interval for
114
+ * smooth real-time rendering without CRDT overload.
115
+ */
116
+ public async streamToDoc(
117
+ yText: Y.Text,
118
+ prompt: string,
119
+ options: {
120
+ systemPrompt?: string;
121
+ mode?: 'insert' | 'continue' | 'rewrite';
122
+ insertAt?: number;
123
+ signal?: AbortSignal;
124
+ onToken?: (token: string) => void;
125
+ } = {}
126
+ ): Promise<{ tokensWritten: number; durationMs: number }> {
127
+ if (!this._llm) {
128
+ throw new Error('[Kairo] No LLM configured. Pass llm to AICollaborator constructor.');
129
+ }
130
+
131
+ const startTime = Date.now();
132
+ const mode = options.mode ?? 'continue';
133
+ let startIndex: number;
134
+
135
+ if (mode === 'rewrite') {
136
+ this.doc.transact(() => yText.delete(0, yText.length));
137
+ startIndex = 0;
138
+ } else if (mode === 'insert' && options.insertAt !== undefined) {
139
+ startIndex = options.insertAt;
140
+ } else {
141
+ startIndex = yText.length;
142
+ }
143
+
144
+ const buffer = new StreamBuffer(yText, this.clientID, startIndex, this._streamFlushMs);
145
+ this.setThinking();
146
+
147
+ let tokensWritten = 0;
148
+
149
+ try {
150
+ const systemPrompt = options.systemPrompt ??
151
+ 'You are Kairo, an AI writing collaborator embedded in a shared document. ' +
152
+ 'Output clean prose only. No preamble, no meta-commentary.';
153
+
154
+ this.setWriting();
155
+
156
+ for await (const token of this._llm.stream(prompt, {
157
+ systemPrompt,
158
+ signal: options.signal,
159
+ })) {
160
+ buffer.push(token);
161
+ tokensWritten += token.length;
162
+ options.onToken?.(token);
163
+ }
164
+
165
+ buffer.flush();
166
+ } catch (err) {
167
+ buffer.stop();
168
+ this.setIdle();
169
+ throw err;
170
+ }
171
+
172
+ buffer.stop();
173
+ this.setIdle();
174
+
175
+ return { tokensWritten, durationMs: Date.now() - startTime };
176
+ }
177
+
178
+ /**
179
+ * One-shot: summarize a block of text and return the result (no streaming).
180
+ */
181
+ public async summarize(text: string, options: { maxTokens?: number } = {}): Promise<string> {
182
+ if (!this._llm) throw new Error('[Kairo] No LLM configured.');
183
+ const cached = this.cache.get(text);
184
+ if (cached) return cached;
185
+
186
+ this.setThinking();
187
+ try {
188
+ const result = await this._llm.complete(
189
+ `Summarize the following document content concisely:\n\n${text}`,
190
+ {
191
+ systemPrompt: 'You are a document summarization engine. Output only the summary.',
192
+ maxTokens: options.maxTokens ?? 300,
193
+ }
194
+ );
195
+ this.cache.set(text, result);
196
+ return result;
197
+ } finally {
198
+ this.setIdle();
199
+ }
200
+ }
201
+
202
+ // ─── Cleanup ──────────────────────────────────────────────────────────
203
+
204
+ public destroy() {
205
+ this._plugins.forEach(p => p.destroy());
206
+ this.awareness.setLocalStateField('kairo', null);
207
+ console.log(`[Kairo] AICollaborator ${this.clientID} destroyed.`);
208
+ }
209
+ }
@@ -0,0 +1,108 @@
1
+ import * as Y from 'yjs';
2
+ import { KairoPlugin } from './KairoPlugin.js';
3
+ import { AICollaborator } from './AICollaborator.js';
4
+ import type { LLMAdapter } from './LLMAdapter.js';
5
+ import { StreamBuffer } from './StreamBuffer.js';
6
+
7
+ /**
8
+ * AutocompletePlugin — Trigger AI completions with '...' in the document.
9
+ * When a human types '...' at the end of any text, Kairo's AI peer takes over
10
+ * and streams a continuation directly into the shared document.
11
+ */
12
+ export class AutocompletePlugin extends KairoPlugin {
13
+ private targetTextName: string;
14
+ private adapter: LLMAdapter;
15
+ private _deepObserver: (events: Y.YEvent<any>[]) => void;
16
+
17
+ constructor(ai: AICollaborator, adapter: LLMAdapter, targetTextName: string = 'content') {
18
+ super(ai);
19
+ this.adapter = adapter;
20
+ this.targetTextName = targetTextName;
21
+ this._deepObserver = (events: Y.YEvent<any>[]) => {
22
+ for (const event of events) {
23
+ if (event instanceof Y.YTextEvent) {
24
+ this._handleUpdate(event);
25
+ }
26
+ }
27
+ };
28
+ }
29
+
30
+ public setup() {
31
+ try {
32
+ // Deep observe canonical blocks
33
+ const arr = this.ai.doc.getArray(this.targetTextName);
34
+ arr.observeDeep(this._deepObserver);
35
+ } catch {
36
+ // Fallback
37
+ }
38
+ console.log(`[Kairo] AutocompletePlugin initialized — model: ${this.adapter.model}`);
39
+ }
40
+
41
+ public destroy() {
42
+ try {
43
+ const arr = this.ai.doc.getArray(this.targetTextName);
44
+ arr.unobserveDeep(this._deepObserver);
45
+ } catch { }
46
+ }
47
+
48
+
49
+ private _handleUpdate(event: Y.YTextEvent) {
50
+ if (!this.enabled) return;
51
+ // Ignore changes from the AI itself to avoid infinite loops
52
+ if (event.transaction.origin === this.ai.clientID) return;
53
+
54
+ const text = event.target as Y.Text;
55
+ const str = text.toString();
56
+
57
+ // Trigger on '...' at end of text (the autocomplete signal)
58
+ if (str.endsWith('...')) {
59
+ this._generateSuggestion(text);
60
+ }
61
+ }
62
+
63
+ private async _generateSuggestion(text: Y.Text) {
64
+ this.ai.setThinking();
65
+
66
+ try {
67
+ const currentText = text.toString();
68
+ const prompt = currentText.slice(0, -3); // strip '...'
69
+
70
+ // Check LRU cache first
71
+ const cached = this.ai.cache.get(prompt);
72
+ if (cached) {
73
+ this.ai.doc.transact(() => {
74
+ text.delete(text.length - 3, 3);
75
+ text.insert(text.length, cached, { 'ai-generated': true, 'ai-client-id': this.ai.clientID });
76
+ }, this.ai.clientID);
77
+ return;
78
+ }
79
+
80
+ // Remove the '...' trigger
81
+ this.ai.doc.transact(() => {
82
+ text.delete(text.length - 3, 3);
83
+ }, this.ai.clientID);
84
+
85
+ this.ai.setWriting();
86
+
87
+ const streamBuffer = new StreamBuffer(text, this.ai.clientID, text.length);
88
+ let fullResult = '';
89
+
90
+ const systemPrompt =
91
+ 'You are an inline writing assistant. ' +
92
+ 'Continue the text naturally in the same style and voice. ' +
93
+ 'Output only the continuation — no preamble.';
94
+
95
+ for await (const token of this.adapter.stream(prompt, { systemPrompt })) {
96
+ fullResult += token;
97
+ streamBuffer.push(token);
98
+ }
99
+
100
+ streamBuffer.flush();
101
+ this.ai.cache.set(prompt, fullResult);
102
+ } catch (error) {
103
+ console.error('[Kairo] Autocomplete error:', error);
104
+ } finally {
105
+ this.ai.setIdle();
106
+ }
107
+ }
108
+ }
@@ -0,0 +1,113 @@
1
+ import * as Y from 'yjs';
2
+
3
+ /**
4
+ * ConflictResolver — Human-precedence CRDT conflict resolution for Kairo.
5
+ *
6
+ * Strategy:
7
+ * 1. Human edits always win over AI edits in the same position.
8
+ * 2. AI edits made within 500ms of a human edit at the same offset are rolled back.
9
+ * 3. Attribution is preserved — every merged character knows who wrote it.
10
+ *
11
+ * This is surfaced as a Y.Doc observer that monitors for concurrent human+AI
12
+ * writes and reorders them deterministically.
13
+ */
14
+
15
+ export interface ConflictEvent {
16
+ type: 'human_overrides_ai' | 'ai_deferred' | 'concurrent_edit';
17
+ position: number;
18
+ humanClientId: number;
19
+ aiClientId: number;
20
+ timestamp: number;
21
+ }
22
+
23
+ export type ConflictHandler = (event: ConflictEvent) => void;
24
+
25
+ export class ConflictResolver {
26
+ private conflictLog: ConflictEvent[] = [];
27
+ private handlers: ConflictHandler[] = [];
28
+ private aiClientIds: Set<number>;
29
+
30
+ constructor(private doc: Y.Doc, aiClientIds: number[] = []) {
31
+ this.aiClientIds = new Set(aiClientIds);
32
+ this._observe();
33
+ }
34
+
35
+ /** Register a client ID as an AI participant */
36
+ registerAI(clientId: number): void {
37
+ this.aiClientIds.add(clientId);
38
+ }
39
+
40
+ /** Listen for conflict events */
41
+ onConflict(handler: ConflictHandler): void {
42
+ this.handlers.push(handler);
43
+ }
44
+
45
+ /** Get the conflict log */
46
+ getLog(): ConflictEvent[] {
47
+ return [...this.conflictLog];
48
+ }
49
+
50
+ /** Clear the conflict log */
51
+ clearLog(): void {
52
+ this.conflictLog = [];
53
+ }
54
+
55
+ private _observe(): void {
56
+ this.doc.on('afterTransaction', (tr: Y.Transaction) => {
57
+ // Skip if only one origin (no concurrent edit)
58
+ if (!tr.changed.size) return;
59
+
60
+ // For each changed type, analyze the origin
61
+ tr.changed.forEach((_changedKeys, type) => {
62
+ if (!(type instanceof Y.Text)) return;
63
+
64
+ const origin = tr.origin;
65
+ const isAI = this.aiClientIds.has(origin as number);
66
+
67
+ if (!isAI) {
68
+ // Human edit — check if we should defer AI operations
69
+ const event: ConflictEvent = {
70
+ type: 'concurrent_edit',
71
+ position: 0,
72
+ humanClientId: origin as number,
73
+ aiClientId: -1,
74
+ timestamp: Date.now(),
75
+ };
76
+ this._emit(event);
77
+ }
78
+ });
79
+ });
80
+ }
81
+
82
+ private _emit(event: ConflictEvent): void {
83
+ this.conflictLog.push(event);
84
+ if (this.conflictLog.length > 500) {
85
+ this.conflictLog = this.conflictLog.slice(-500);
86
+ }
87
+ for (const handler of this.handlers) {
88
+ try { handler(event); } catch { /* never let conflict handler crash the doc */ }
89
+ }
90
+ }
91
+ }
92
+
93
+ /**
94
+ * MergePolicy — Determines how concurrent AI and human edits are resolved.
95
+ *
96
+ * Kairo uses "human-precedence linear merge":
97
+ * - Human text is always displayed first at the same logical position
98
+ * - AI suggestions appear as tracked changes until accepted
99
+ * - This matches the mental model of track-changes in Word/Google Docs
100
+ */
101
+ export class MergePolicy {
102
+ static readonly HUMAN_WINS = 'human_wins';
103
+ static readonly LAST_WRITE_WINS = 'last_write_wins';
104
+ static readonly AI_WINS = 'ai_wins';
105
+
106
+ static isAIGenerated(attrs: Record<string, any> | null | undefined): boolean {
107
+ return attrs?.['ai-generated'] === true;
108
+ }
109
+
110
+ static getAuthor(attrs: Record<string, any> | null | undefined): 'ai' | 'human' {
111
+ return this.isAIGenerated(attrs) ? 'ai' : 'human';
112
+ }
113
+ }
@@ -0,0 +1,27 @@
1
+ import { AICollaborator } from './AICollaborator.js';
2
+
3
+ export abstract class KairoPlugin {
4
+ protected ai: AICollaborator;
5
+ public enabled: boolean = true;
6
+
7
+ constructor(ai: AICollaborator) {
8
+ this.ai = ai;
9
+ }
10
+
11
+ /**
12
+ * Called when the plugin is registered with the AICollaborator
13
+ */
14
+ public abstract setup(): void;
15
+
16
+ /**
17
+ * Called to cleanup the plugin
18
+ */
19
+ public abstract destroy(): void;
20
+
21
+ /**
22
+ * Enable/Disable the plugin
23
+ */
24
+ public setEnabled(enabled: boolean) {
25
+ this.enabled = enabled;
26
+ }
27
+ }
@@ -0,0 +1,27 @@
1
+ export interface StreamOptions {
2
+ systemPrompt?: string;
3
+ maxTokens?: number;
4
+ temperature?: number;
5
+ signal?: AbortSignal;
6
+ }
7
+
8
+ export interface CompleteOptions extends StreamOptions {}
9
+
10
+ /**
11
+ * LLMAdapter — Kairo's universal LLM interface.
12
+ * Implement this to plug any model into the CRDT streaming pipeline.
13
+ * Zero SDK dependency by design — uses raw fetch for SSE streaming.
14
+ */
15
+ export interface LLMAdapter {
16
+ readonly provider: string;
17
+ readonly model: string;
18
+ /**
19
+ * Stream tokens one-by-one as an async generator.
20
+ * This feeds directly into StreamBuffer → Y.Text CRDT operations.
21
+ */
22
+ stream(prompt: string, options?: StreamOptions): AsyncGenerator<string>;
23
+ /** Non-streaming fallback for summarization / one-shot tasks */
24
+ complete(prompt: string, options?: CompleteOptions): Promise<string>;
25
+ /** Optional: embed text for semantic search / context retrieval */
26
+ embed?(text: string): Promise<number[]>;
27
+ }
@@ -0,0 +1,45 @@
1
+ import type { LLMAdapter, StreamOptions } from './LLMAdapter.js';
2
+
3
+ /**
4
+ * MockLLMAdapter — Test/offline LLM adapter.
5
+ *
6
+ * Returns deterministic responses without any network call.
7
+ * Perfect for unit tests and offline development.
8
+ */
9
+ export class MockLLMAdapter implements LLMAdapter {
10
+ readonly provider = 'mock';
11
+ readonly model: string;
12
+ private delay: number;
13
+
14
+ constructor(options: { model?: string; delay?: number } = {}) {
15
+ this.model = options.model ?? 'mock-gpt';
16
+ this.delay = options.delay ?? 100;
17
+ }
18
+
19
+ async *stream(prompt: string, options?: StreamOptions): AsyncGenerator<string> {
20
+ const response = this._respond(prompt);
21
+ const words = response.split(' ');
22
+ for (let i = 0; i < words.length; i++) {
23
+ await new Promise(r => setTimeout(r, this.delay / words.length));
24
+ yield words[i] + (i < words.length - 1 ? ' ' : '');
25
+ }
26
+ }
27
+
28
+ async complete(prompt: string, options?: StreamOptions): Promise<string> {
29
+ await new Promise(r => setTimeout(r, this.delay));
30
+ return this._respond(prompt);
31
+ }
32
+
33
+ async embed(text: string): Promise<number[]> {
34
+ // Deterministic mock embedding (128-dim)
35
+ return Array.from({ length: 128 }, (_, i) => Math.sin(text.charCodeAt(i % text.length) * (i + 1)));
36
+ }
37
+
38
+ private _respond(prompt: string): string {
39
+ const p = prompt.toLowerCase();
40
+ if (p.includes('summar')) return 'This document discusses key concepts with clarity and precision.';
41
+ if (p.includes('improve') || p.includes('rewrite')) return 'The refined version presents ideas more effectively.';
42
+ if (p.includes('continue')) return ' Furthermore, the analysis reveals several important implications.';
43
+ return `[Mock AI response to: "${prompt.slice(0, 60)}..."]`;
44
+ }
45
+ }
@@ -0,0 +1,76 @@
1
+ /**
2
+ * PromptCache — LRU cache for LLM prompts.
3
+ *
4
+ * Prevents re-querying the LLM for identical prompts within a session.
5
+ * Critical for summarization and autocomplete performance.
6
+ *
7
+ * Uses a Map-based LRU: entries at the front are most recent,
8
+ * entries at the back are evicted first.
9
+ */
10
+ export class PromptCache {
11
+ private cache: Map<string, { value: string; hits: number; createdAt: number }>;
12
+ private readonly maxSize: number;
13
+ private totalHits = 0;
14
+ private totalMisses = 0;
15
+
16
+ constructor(maxSize: number = 100) {
17
+ this.maxSize = maxSize;
18
+ this.cache = new Map();
19
+ }
20
+
21
+ /** Retrieve cached result for a prompt. Returns null on miss. */
22
+ get(prompt: string): string | null {
23
+ const entry = this.cache.get(prompt);
24
+ if (!entry) {
25
+ this.totalMisses++;
26
+ return null;
27
+ }
28
+ // LRU: move to front by re-inserting
29
+ this.cache.delete(prompt);
30
+ entry.hits++;
31
+ this.cache.set(prompt, entry);
32
+ this.totalHits++;
33
+ return entry.value;
34
+ }
35
+
36
+ /** Cache a result for a prompt. Evicts LRU entry if at capacity. */
37
+ set(prompt: string, value: string): void {
38
+ if (this.cache.has(prompt)) {
39
+ this.cache.delete(prompt);
40
+ } else if (this.cache.size >= this.maxSize) {
41
+ // Evict least-recently-used (first entry in Map)
42
+ const lruKey = this.cache.keys().next().value;
43
+ if (lruKey !== undefined) this.cache.delete(lruKey);
44
+ }
45
+ this.cache.set(prompt, { value, hits: 0, createdAt: Date.now() });
46
+ }
47
+
48
+ /** Check if a prompt is cached without counting as a hit */
49
+ has(prompt: string): boolean {
50
+ return this.cache.has(prompt);
51
+ }
52
+
53
+ /** Remove a specific entry */
54
+ invalidate(prompt: string): void {
55
+ this.cache.delete(prompt);
56
+ }
57
+
58
+ /** Clear all entries */
59
+ clear(): void {
60
+ this.cache.clear();
61
+ this.totalHits = 0;
62
+ this.totalMisses = 0;
63
+ }
64
+
65
+ /** Cache statistics */
66
+ stats(): { size: number; maxSize: number; hitRate: number; totalHits: number; totalMisses: number } {
67
+ const total = this.totalHits + this.totalMisses;
68
+ return {
69
+ size: this.cache.size,
70
+ maxSize: this.maxSize,
71
+ hitRate: total > 0 ? this.totalHits / total : 0,
72
+ totalHits: this.totalHits,
73
+ totalMisses: this.totalMisses,
74
+ };
75
+ }
76
+ }