recursive-llm-ts 5.0.2 → 5.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. package/README.md +2 -12
  2. package/dist/cjs/bridge-factory.d.ts +7 -0
  3. package/dist/cjs/bridge-factory.js +96 -0
  4. package/dist/{bridge-interface.d.ts → cjs/bridge-interface.d.ts} +1 -2
  5. package/dist/{config.js → cjs/config.js} +0 -6
  6. package/dist/{coordinator.js → cjs/coordinator.js} +1 -1
  7. package/dist/{go-bridge.d.ts → cjs/go-bridge.d.ts} +2 -2
  8. package/dist/{go-bridge.js → cjs/go-bridge.js} +36 -4
  9. package/dist/cjs/package.json +3 -0
  10. package/dist/cjs/pkg-dir.d.ts +7 -0
  11. package/dist/cjs/pkg-dir.js +79 -0
  12. package/dist/{rlm.d.ts → cjs/rlm.d.ts} +1 -1
  13. package/dist/{rlm.js → cjs/rlm.js} +3 -3
  14. package/dist/esm/bridge-factory.d.ts +7 -0
  15. package/dist/esm/bridge-factory.js +60 -0
  16. package/dist/esm/bridge-interface.d.ts +269 -0
  17. package/dist/esm/bridge-interface.js +1 -0
  18. package/dist/esm/cache.d.ts +78 -0
  19. package/dist/esm/cache.js +207 -0
  20. package/dist/esm/config.d.ts +37 -0
  21. package/dist/esm/config.js +152 -0
  22. package/dist/esm/coordinator.d.ts +17 -0
  23. package/dist/esm/coordinator.js +41 -0
  24. package/dist/esm/errors.d.ts +113 -0
  25. package/dist/esm/errors.js +205 -0
  26. package/dist/esm/events.d.ts +126 -0
  27. package/dist/esm/events.js +73 -0
  28. package/dist/esm/file-storage.d.ts +122 -0
  29. package/dist/esm/file-storage.js +656 -0
  30. package/dist/esm/go-bridge.d.ts +5 -0
  31. package/dist/esm/go-bridge.js +133 -0
  32. package/dist/esm/index.d.ts +12 -0
  33. package/dist/esm/index.js +17 -0
  34. package/dist/esm/package.json +3 -0
  35. package/dist/esm/pkg-dir.d.ts +7 -0
  36. package/dist/esm/pkg-dir.js +43 -0
  37. package/dist/esm/retry.d.ts +56 -0
  38. package/dist/esm/retry.js +181 -0
  39. package/dist/esm/rlm.d.ts +435 -0
  40. package/dist/esm/rlm.js +1122 -0
  41. package/dist/esm/streaming.d.ts +96 -0
  42. package/dist/esm/streaming.js +205 -0
  43. package/dist/esm/structured-types.d.ts +28 -0
  44. package/dist/esm/structured-types.js +1 -0
  45. package/package.json +32 -5
  46. package/scripts/build-go-binary.js +44 -5
  47. package/dist/bridge-factory.d.ts +0 -6
  48. package/dist/bridge-factory.js +0 -134
  49. package/dist/bunpy-bridge.d.ts +0 -7
  50. package/dist/bunpy-bridge.js +0 -37
  51. package/dist/rlm-bridge.d.ts +0 -8
  52. package/dist/rlm-bridge.js +0 -179
  53. /package/dist/{bridge-interface.js → cjs/bridge-interface.js} +0 -0
  54. /package/dist/{cache.d.ts → cjs/cache.d.ts} +0 -0
  55. /package/dist/{cache.js → cjs/cache.js} +0 -0
  56. /package/dist/{config.d.ts → cjs/config.d.ts} +0 -0
  57. /package/dist/{coordinator.d.ts → cjs/coordinator.d.ts} +0 -0
  58. /package/dist/{errors.d.ts → cjs/errors.d.ts} +0 -0
  59. /package/dist/{errors.js → cjs/errors.js} +0 -0
  60. /package/dist/{events.d.ts → cjs/events.d.ts} +0 -0
  61. /package/dist/{events.js → cjs/events.js} +0 -0
  62. /package/dist/{file-storage.d.ts → cjs/file-storage.d.ts} +0 -0
  63. /package/dist/{file-storage.js → cjs/file-storage.js} +0 -0
  64. /package/dist/{index.d.ts → cjs/index.d.ts} +0 -0
  65. /package/dist/{index.js → cjs/index.js} +0 -0
  66. /package/dist/{retry.d.ts → cjs/retry.d.ts} +0 -0
  67. /package/dist/{retry.js → cjs/retry.js} +0 -0
  68. /package/dist/{streaming.d.ts → cjs/streaming.d.ts} +0 -0
  69. /package/dist/{streaming.js → cjs/streaming.js} +0 -0
  70. /package/dist/{structured-types.d.ts → cjs/structured-types.d.ts} +0 -0
  71. /package/dist/{structured-types.js → cjs/structured-types.js} +0 -0
@@ -0,0 +1,269 @@
1
+ export interface RLMStats {
2
+ llm_calls: number;
3
+ iterations: number;
4
+ depth: number;
5
+ parsing_retries?: number;
6
+ total_tokens?: number;
7
+ prompt_tokens?: number;
8
+ completion_tokens?: number;
9
+ }
10
+ export interface RLMResult {
11
+ result: string;
12
+ stats: RLMStats;
13
+ structured_result?: boolean;
14
+ trace_events?: TraceEvent[];
15
+ }
16
+ export interface MetaAgentConfig {
17
+ enabled: boolean;
18
+ model?: string;
19
+ max_optimize_len?: number;
20
+ }
21
+ export interface ObservabilityConfig {
22
+ debug?: boolean;
23
+ trace_enabled?: boolean;
24
+ trace_endpoint?: string;
25
+ service_name?: string;
26
+ log_output?: string;
27
+ langfuse_enabled?: boolean;
28
+ langfuse_public_key?: string;
29
+ langfuse_secret_key?: string;
30
+ langfuse_host?: string;
31
+ }
32
+ export interface TraceEvent {
33
+ timestamp: string;
34
+ type: string;
35
+ name: string;
36
+ attributes: Record<string, string>;
37
+ duration?: number;
38
+ trace_id?: string;
39
+ span_id?: string;
40
+ parent_id?: string;
41
+ }
42
+ export interface ContextOverflowConfig {
43
+ /** Enable automatic context overflow recovery (default: true) */
44
+ enabled?: boolean;
45
+ /** Override detected model token limit (0 = auto-detect from API errors) */
46
+ max_model_tokens?: number;
47
+ /** Strategy: 'mapreduce' (default), 'truncate', 'chunked', 'tfidf', 'textrank', or 'refine' */
48
+ strategy?: 'mapreduce' | 'truncate' | 'chunked' | 'tfidf' | 'textrank' | 'refine';
49
+ /** Fraction of token budget to reserve for prompts/overhead (default: 0.15) */
50
+ safety_margin?: number;
51
+ /** Maximum reduction attempts before giving up (default: 3) */
52
+ max_reduction_attempts?: number;
53
+ }
54
+ export interface LCMConfig {
55
+ /** Enable LCM context management (default: false for backward compat) */
56
+ enabled?: boolean;
57
+ /** Soft token threshold — async compaction begins above this (default: 70% of model limit) */
58
+ soft_threshold?: number;
59
+ /** Hard token threshold — blocking compaction above this (default: 90% of model limit) */
60
+ hard_threshold?: number;
61
+ /** Number of messages to compact at once (default: 10) */
62
+ compaction_block_size?: number;
63
+ /** Target tokens per summary node (default: 500) */
64
+ summary_target_tokens?: number;
65
+ /** Large file handling configuration */
66
+ file_handling?: LCMFileConfig;
67
+ /** Episode-based context grouping configuration */
68
+ episodes?: EpisodeConfig;
69
+ /** Persistence backend configuration (default: in-memory) */
70
+ store_backend?: StoreBackendConfig;
71
+ }
72
+ export interface LCMFileConfig {
73
+ /** Token count above which files are stored externally with exploration summaries (default: 25000) */
74
+ token_threshold?: number;
75
+ }
76
+ export interface EpisodeConfig {
77
+ /** Max tokens before auto-closing an episode (default: 2000) */
78
+ max_episode_tokens?: number;
79
+ /** Max messages before auto-closing an episode (default: 20) */
80
+ max_episode_messages?: number;
81
+ /** Topic change sensitivity 0-1 (reserved for future semantic detection) */
82
+ topic_change_threshold?: number;
83
+ /** Auto-generate summary when episode closes (default: true) */
84
+ auto_compact_after_close?: boolean;
85
+ }
86
+ export interface Episode {
87
+ id: string;
88
+ title: string;
89
+ message_ids: string[];
90
+ start_time: string;
91
+ end_time: string;
92
+ tokens: number;
93
+ summary?: string;
94
+ summary_tokens?: number;
95
+ status: 'active' | 'compacted' | 'archived';
96
+ tags?: string[];
97
+ parent_episode_id?: string;
98
+ }
99
+ export interface StoreBackendConfig {
100
+ /** Backend type: 'memory' (default) or 'sqlite' */
101
+ type?: 'memory' | 'sqlite';
102
+ /** Path for SQLite database file (required when type is 'sqlite', use ':memory:' for in-memory SQLite) */
103
+ path?: string;
104
+ }
105
+ export interface LLMMapConfig {
106
+ /** Path to JSONL input file */
107
+ input_path: string;
108
+ /** Path to JSONL output file */
109
+ output_path: string;
110
+ /** Prompt template — use {{item}} as placeholder for each item */
111
+ prompt: string;
112
+ /** JSON Schema for output validation */
113
+ output_schema?: Record<string, any>;
114
+ /** Worker pool concurrency (default: 16) */
115
+ concurrency?: number;
116
+ /** Per-item retry limit (default: 3) */
117
+ max_retries?: number;
118
+ /** Model to use (defaults to engine model) */
119
+ model?: string;
120
+ }
121
+ export interface LLMMapResult {
122
+ total_items: number;
123
+ completed: number;
124
+ failed: number;
125
+ output_path: string;
126
+ duration_ms: number;
127
+ tokens_used: number;
128
+ }
129
+ export interface AgenticMapConfig {
130
+ /** Path to JSONL input file */
131
+ input_path: string;
132
+ /** Path to JSONL output file */
133
+ output_path: string;
134
+ /** Prompt template — use {{item}} as placeholder for each item */
135
+ prompt: string;
136
+ /** JSON Schema for output validation */
137
+ output_schema?: Record<string, any>;
138
+ /** Worker pool concurrency (default: 8) */
139
+ concurrency?: number;
140
+ /** Per-item retry limit (default: 2) */
141
+ max_retries?: number;
142
+ /** Model for sub-agents (defaults to engine model) */
143
+ model?: string;
144
+ /** If true, sub-agents cannot modify filesystem */
145
+ read_only?: boolean;
146
+ /** Max recursion depth for sub-agents (default: 3) */
147
+ max_depth?: number;
148
+ /** Max iterations per sub-agent (default: 15) */
149
+ max_iterations?: number;
150
+ }
151
+ export interface AgenticMapResult {
152
+ total_items: number;
153
+ completed: number;
154
+ failed: number;
155
+ output_path: string;
156
+ duration_ms: number;
157
+ tokens_used: number;
158
+ }
159
+ export interface DelegationRequest {
160
+ /** Task description for the sub-agent */
161
+ prompt: string;
162
+ /** Specific slice of work being handed off (required for non-root) */
163
+ delegated_scope?: string;
164
+ /** Work the caller retains (required for non-root) */
165
+ kept_work?: string;
166
+ /** Read-only exploration agent (exempt from guard) */
167
+ read_only?: boolean;
168
+ /** Parallel decomposition (exempt from guard) */
169
+ parallel?: boolean;
170
+ }
171
+ export interface LCMStoreStats {
172
+ total_messages: number;
173
+ total_summaries: number;
174
+ active_context_items: number;
175
+ active_context_tokens: number;
176
+ immutable_store_tokens: number;
177
+ compression_ratio: number;
178
+ }
179
+ export interface LCMGrepResult {
180
+ message_id: string;
181
+ role: string;
182
+ content: string;
183
+ summary_id?: string;
184
+ match_line: string;
185
+ }
186
+ export interface LCMDescribeResult {
187
+ type: 'message' | 'summary';
188
+ id: string;
189
+ tokens: number;
190
+ role?: string;
191
+ kind?: 'leaf' | 'condensed';
192
+ level?: number;
193
+ covered_ids?: string[];
194
+ file_ids?: string[];
195
+ content?: string;
196
+ }
197
+ export interface EpisodeListResult {
198
+ episodes: Episode[];
199
+ active_episode_id?: string;
200
+ total_episodes: number;
201
+ }
202
+ export interface RLMConfig {
203
+ recursive_model?: string;
204
+ api_base?: string;
205
+ api_key?: string;
206
+ max_depth?: number;
207
+ max_iterations?: number;
208
+ go_binary_path?: string;
209
+ meta_agent?: MetaAgentConfig;
210
+ observability?: ObservabilityConfig;
211
+ context_overflow?: ContextOverflowConfig;
212
+ lcm?: LCMConfig;
213
+ debug?: boolean;
214
+ api_version?: string;
215
+ timeout?: number;
216
+ temperature?: number;
217
+ max_tokens?: number;
218
+ structured?: any;
219
+ [key: string]: any;
220
+ }
221
+ export interface FileStorageConfig {
222
+ /** Storage type: 'local' or 's3' */
223
+ type: 'local' | 's3';
224
+ /** For local: root directory path. For S3: bucket name */
225
+ path: string;
226
+ /** For S3: the prefix (folder path) within the bucket */
227
+ prefix?: string;
228
+ /** For S3: AWS region (falls back to AWS_REGION env var, then 'us-east-1') */
229
+ region?: string;
230
+ /**
231
+ * For S3: explicit credentials.
232
+ * Resolution order:
233
+ * 1. This field (explicit credentials)
234
+ * 2. Environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN
235
+ * 3. AWS SDK default credential chain (IAM role, ~/.aws/credentials, ECS task role, etc.)
236
+ */
237
+ credentials?: {
238
+ accessKeyId: string;
239
+ secretAccessKey: string;
240
+ sessionToken?: string;
241
+ };
242
+ /**
243
+ * For S3: custom endpoint URL.
244
+ * Use for S3-compatible services: MinIO, LocalStack, DigitalOcean Spaces, Backblaze B2.
245
+ * When set, forcePathStyle is automatically enabled.
246
+ */
247
+ endpoint?: string;
248
+ /**
249
+ * For S3: force path-style addressing (bucket in path, not subdomain).
250
+ * Automatically true when endpoint is set.
251
+ */
252
+ forcePathStyle?: boolean;
253
+ /** Glob patterns to include (e.g. ['*.ts', '*.md']) */
254
+ includePatterns?: string[];
255
+ /** Glob patterns to exclude (e.g. ['node_modules/**']) */
256
+ excludePatterns?: string[];
257
+ /** Maximum file size in bytes to include (default: 1MB) */
258
+ maxFileSize?: number;
259
+ /** Maximum total context size in bytes (default: 10MB) */
260
+ maxTotalSize?: number;
261
+ /** Maximum number of files to include (default: 1000) */
262
+ maxFiles?: number;
263
+ /** File extensions to include (e.g. ['.ts', '.md', '.txt']) */
264
+ extensions?: string[];
265
+ }
266
+ export interface Bridge {
267
+ completion(model: string, query: string, context: string, rlmConfig: RLMConfig): Promise<RLMResult>;
268
+ cleanup(): Promise<void>;
269
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,78 @@
1
+ /**
2
+ * Caching layer for recursive-llm-ts completions.
3
+ *
4
+ * Provides exact-match caching to avoid redundant API calls for
5
+ * identical query+context pairs. Supports in-memory and file-based storage.
6
+ */
7
+ export interface CacheConfig {
8
+ /** Enable/disable caching (default: false) */
9
+ enabled?: boolean;
10
+ /** Cache strategy (default: 'exact') */
11
+ strategy?: 'exact' | 'none';
12
+ /** Maximum number of cached entries (default: 1000) */
13
+ maxEntries?: number;
14
+ /** Time-to-live in seconds (default: 3600 = 1 hour) */
15
+ ttl?: number;
16
+ /** Storage backend (default: 'memory') */
17
+ storage?: 'memory' | 'file';
18
+ /** Directory for file-based cache (default: .rlm-cache) */
19
+ cacheDir?: string;
20
+ }
21
+ export interface CacheStats {
22
+ hits: number;
23
+ misses: number;
24
+ size: number;
25
+ hitRate: number;
26
+ evictions: number;
27
+ }
28
+ export interface CacheProvider {
29
+ get<T>(key: string): T | undefined;
30
+ set<T>(key: string, value: T, ttl: number): void;
31
+ has(key: string): boolean;
32
+ delete(key: string): boolean;
33
+ clear(): void;
34
+ size(): number;
35
+ }
36
+ export declare class MemoryCache implements CacheProvider {
37
+ private store;
38
+ private maxEntries;
39
+ constructor(maxEntries?: number);
40
+ get<T>(key: string): T | undefined;
41
+ set<T>(key: string, value: T, ttl: number): void;
42
+ has(key: string): boolean;
43
+ delete(key: string): boolean;
44
+ clear(): void;
45
+ size(): number;
46
+ }
47
+ export declare class FileCache implements CacheProvider {
48
+ private cacheDir;
49
+ private maxEntries;
50
+ constructor(cacheDir?: string, maxEntries?: number);
51
+ private filePath;
52
+ get<T>(key: string): T | undefined;
53
+ set<T>(key: string, value: T, ttl: number): void;
54
+ has(key: string): boolean;
55
+ delete(key: string): boolean;
56
+ clear(): void;
57
+ size(): number;
58
+ }
59
+ export declare class RLMCache {
60
+ private provider;
61
+ private config;
62
+ private stats;
63
+ constructor(config?: CacheConfig);
64
+ /** Check if caching is enabled */
65
+ get enabled(): boolean;
66
+ /** Look up a cached result */
67
+ lookup<T>(model: string, query: string, context: string, extra?: Record<string, unknown>): {
68
+ hit: boolean;
69
+ value?: T;
70
+ };
71
+ /** Store a result in the cache */
72
+ store<T>(model: string, query: string, context: string, value: T, extra?: Record<string, unknown>): void;
73
+ /** Get cache statistics */
74
+ getStats(): CacheStats;
75
+ /** Clear the cache */
76
+ clear(): void;
77
+ private updateHitRate;
78
+ }
@@ -0,0 +1,207 @@
1
+ /**
2
+ * Caching layer for recursive-llm-ts completions.
3
+ *
4
+ * Provides exact-match caching to avoid redundant API calls for
5
+ * identical query+context pairs. Supports in-memory and file-based storage.
6
+ */
7
+ import * as fs from 'fs';
8
+ import * as path from 'path';
9
+ import * as crypto from 'crypto';
10
+ // ─── Cache Key Generator ─────────────────────────────────────────────────────
11
+ function generateCacheKey(model, query, context, config) {
12
+ const data = JSON.stringify({ model, query, context, config });
13
+ return crypto.createHash('sha256').update(data).digest('hex');
14
+ }
15
+ // ─── In-Memory Cache ─────────────────────────────────────────────────────────
16
+ export class MemoryCache {
17
+ constructor(maxEntries = 1000) {
18
+ this.store = new Map();
19
+ this.maxEntries = maxEntries;
20
+ }
21
+ get(key) {
22
+ const entry = this.store.get(key);
23
+ if (!entry)
24
+ return undefined;
25
+ // Check TTL
26
+ if (Date.now() - entry.createdAt > entry.ttl * 1000) {
27
+ this.store.delete(key);
28
+ return undefined;
29
+ }
30
+ entry.hitCount++;
31
+ return entry.value;
32
+ }
33
+ set(key, value, ttl) {
34
+ // Evict oldest if at capacity
35
+ if (this.store.size >= this.maxEntries && !this.store.has(key)) {
36
+ const oldestKey = this.store.keys().next().value;
37
+ if (oldestKey !== undefined) {
38
+ this.store.delete(oldestKey);
39
+ }
40
+ }
41
+ this.store.set(key, {
42
+ key,
43
+ value,
44
+ createdAt: Date.now(),
45
+ ttl,
46
+ hitCount: 0,
47
+ });
48
+ }
49
+ has(key) {
50
+ const entry = this.store.get(key);
51
+ if (!entry)
52
+ return false;
53
+ if (Date.now() - entry.createdAt > entry.ttl * 1000) {
54
+ this.store.delete(key);
55
+ return false;
56
+ }
57
+ return true;
58
+ }
59
+ delete(key) {
60
+ return this.store.delete(key);
61
+ }
62
+ clear() {
63
+ this.store.clear();
64
+ }
65
+ size() {
66
+ // Clean expired entries
67
+ const now = Date.now();
68
+ for (const [key, entry] of this.store) {
69
+ if (now - entry.createdAt > entry.ttl * 1000) {
70
+ this.store.delete(key);
71
+ }
72
+ }
73
+ return this.store.size;
74
+ }
75
+ }
76
+ // ─── File-Based Cache ────────────────────────────────────────────────────────
77
+ export class FileCache {
78
+ constructor(cacheDir = '.rlm-cache', maxEntries = 1000) {
79
+ this.cacheDir = path.resolve(cacheDir);
80
+ this.maxEntries = maxEntries;
81
+ if (!fs.existsSync(this.cacheDir)) {
82
+ fs.mkdirSync(this.cacheDir, { recursive: true });
83
+ }
84
+ }
85
+ filePath(key) {
86
+ return path.join(this.cacheDir, `${key}.json`);
87
+ }
88
+ get(key) {
89
+ const fp = this.filePath(key);
90
+ if (!fs.existsSync(fp))
91
+ return undefined;
92
+ try {
93
+ const data = JSON.parse(fs.readFileSync(fp, 'utf-8'));
94
+ if (Date.now() - data.createdAt > data.ttl * 1000) {
95
+ fs.unlinkSync(fp);
96
+ return undefined;
97
+ }
98
+ return data.value;
99
+ }
100
+ catch (_a) {
101
+ return undefined;
102
+ }
103
+ }
104
+ set(key, value, ttl) {
105
+ const entry = {
106
+ key,
107
+ value,
108
+ createdAt: Date.now(),
109
+ ttl,
110
+ hitCount: 0,
111
+ };
112
+ try {
113
+ fs.writeFileSync(this.filePath(key), JSON.stringify(entry), 'utf-8');
114
+ }
115
+ catch (_a) {
116
+ // Silently fail on write errors
117
+ }
118
+ }
119
+ has(key) {
120
+ return this.get(key) !== undefined;
121
+ }
122
+ delete(key) {
123
+ const fp = this.filePath(key);
124
+ if (fs.existsSync(fp)) {
125
+ fs.unlinkSync(fp);
126
+ return true;
127
+ }
128
+ return false;
129
+ }
130
+ clear() {
131
+ if (fs.existsSync(this.cacheDir)) {
132
+ const files = fs.readdirSync(this.cacheDir);
133
+ for (const file of files) {
134
+ if (file.endsWith('.json')) {
135
+ fs.unlinkSync(path.join(this.cacheDir, file));
136
+ }
137
+ }
138
+ }
139
+ }
140
+ size() {
141
+ if (!fs.existsSync(this.cacheDir))
142
+ return 0;
143
+ return fs.readdirSync(this.cacheDir).filter(f => f.endsWith('.json')).length;
144
+ }
145
+ }
146
+ // ─── RLM Cache Manager ──────────────────────────────────────────────────────
147
+ export class RLMCache {
148
+ constructor(config = {}) {
149
+ var _a, _b, _c, _d, _e, _f;
150
+ this.stats = { hits: 0, misses: 0, size: 0, hitRate: 0, evictions: 0 };
151
+ this.config = {
152
+ enabled: (_a = config.enabled) !== null && _a !== void 0 ? _a : false,
153
+ strategy: (_b = config.strategy) !== null && _b !== void 0 ? _b : 'exact',
154
+ maxEntries: (_c = config.maxEntries) !== null && _c !== void 0 ? _c : 1000,
155
+ ttl: (_d = config.ttl) !== null && _d !== void 0 ? _d : 3600,
156
+ storage: (_e = config.storage) !== null && _e !== void 0 ? _e : 'memory',
157
+ cacheDir: (_f = config.cacheDir) !== null && _f !== void 0 ? _f : '.rlm-cache',
158
+ };
159
+ if (this.config.storage === 'file') {
160
+ this.provider = new FileCache(this.config.cacheDir, this.config.maxEntries);
161
+ }
162
+ else {
163
+ this.provider = new MemoryCache(this.config.maxEntries);
164
+ }
165
+ }
166
+ /** Check if caching is enabled */
167
+ get enabled() {
168
+ return this.config.enabled && this.config.strategy !== 'none';
169
+ }
170
+ /** Look up a cached result */
171
+ lookup(model, query, context, extra) {
172
+ if (!this.enabled)
173
+ return { hit: false };
174
+ const key = generateCacheKey(model, query, context, extra);
175
+ const value = this.provider.get(key);
176
+ if (value !== undefined) {
177
+ this.stats.hits++;
178
+ this.updateHitRate();
179
+ return { hit: true, value };
180
+ }
181
+ this.stats.misses++;
182
+ this.updateHitRate();
183
+ return { hit: false };
184
+ }
185
+ /** Store a result in the cache */
186
+ store(model, query, context, value, extra) {
187
+ if (!this.enabled)
188
+ return;
189
+ const key = generateCacheKey(model, query, context, extra);
190
+ this.provider.set(key, value, this.config.ttl);
191
+ this.stats.size = this.provider.size();
192
+ }
193
+ /** Get cache statistics */
194
+ getStats() {
195
+ this.stats.size = this.provider.size();
196
+ return Object.assign({}, this.stats);
197
+ }
198
+ /** Clear the cache */
199
+ clear() {
200
+ this.provider.clear();
201
+ this.stats = { hits: 0, misses: 0, size: 0, hitRate: 0, evictions: 0 };
202
+ }
203
+ updateHitRate() {
204
+ const total = this.stats.hits + this.stats.misses;
205
+ this.stats.hitRate = total > 0 ? this.stats.hits / total : 0;
206
+ }
207
+ }
@@ -0,0 +1,37 @@
1
+ /**
2
+ * Configuration validation for recursive-llm-ts.
3
+ *
4
+ * Validates RLMConfig at construction time with clear error messages.
5
+ */
6
+ import { RLMConfig } from './bridge-interface';
7
+ import { CacheConfig } from './cache';
8
+ import { RetryConfig, FallbackConfig } from './retry';
9
+ export interface RLMExtendedConfig extends RLMConfig {
10
+ /** Cache configuration */
11
+ cache?: CacheConfig;
12
+ /** Retry configuration */
13
+ retry?: RetryConfig;
14
+ /** Fallback model configuration */
15
+ fallback?: FallbackConfig;
16
+ /** LiteLLM passthrough parameters */
17
+ litellm_params?: Record<string, unknown>;
18
+ }
19
+ export type ValidationLevel = 'error' | 'warning' | 'info';
20
+ export interface ValidationIssue {
21
+ level: ValidationLevel;
22
+ field: string;
23
+ message: string;
24
+ }
25
+ export interface ValidationResult {
26
+ valid: boolean;
27
+ issues: ValidationIssue[];
28
+ }
29
+ /**
30
+ * Validate an RLM configuration.
31
+ * Returns issues rather than throwing, allowing callers to handle gracefully.
32
+ */
33
+ export declare function validateConfig(config: RLMExtendedConfig): ValidationResult;
34
+ /**
35
+ * Validate config and throw on errors. Logs warnings.
36
+ */
37
+ export declare function assertValidConfig(config: RLMExtendedConfig): void;