recursive-llm-ts 5.1.1 → 5.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/dist/{bridge-factory.js → cjs/bridge-factory.js} +25 -1
  2. package/dist/{go-bridge.js → cjs/go-bridge.js} +35 -3
  3. package/dist/cjs/package.json +3 -0
  4. package/dist/cjs/pkg-dir.d.ts +7 -0
  5. package/dist/cjs/pkg-dir.js +79 -0
  6. package/dist/esm/bridge-factory.d.ts +7 -0
  7. package/dist/esm/bridge-factory.js +60 -0
  8. package/dist/esm/bridge-interface.d.ts +269 -0
  9. package/dist/esm/bridge-interface.js +1 -0
  10. package/dist/esm/cache.d.ts +78 -0
  11. package/dist/esm/cache.js +207 -0
  12. package/dist/esm/config.d.ts +37 -0
  13. package/dist/esm/config.js +152 -0
  14. package/dist/esm/coordinator.d.ts +17 -0
  15. package/dist/esm/coordinator.js +41 -0
  16. package/dist/esm/errors.d.ts +113 -0
  17. package/dist/esm/errors.js +205 -0
  18. package/dist/esm/events.d.ts +126 -0
  19. package/dist/esm/events.js +73 -0
  20. package/dist/esm/file-storage.d.ts +122 -0
  21. package/dist/esm/file-storage.js +656 -0
  22. package/dist/esm/go-bridge.d.ts +5 -0
  23. package/dist/esm/go-bridge.js +133 -0
  24. package/dist/esm/index.d.ts +12 -0
  25. package/dist/esm/index.js +17 -0
  26. package/dist/esm/package.json +3 -0
  27. package/dist/esm/pkg-dir.d.ts +7 -0
  28. package/dist/esm/pkg-dir.js +43 -0
  29. package/dist/esm/retry.d.ts +56 -0
  30. package/dist/esm/retry.js +181 -0
  31. package/dist/esm/rlm.d.ts +435 -0
  32. package/dist/esm/rlm.js +1122 -0
  33. package/dist/esm/streaming.d.ts +96 -0
  34. package/dist/esm/streaming.js +205 -0
  35. package/dist/esm/structured-types.d.ts +28 -0
  36. package/dist/esm/structured-types.js +1 -0
  37. package/package.json +20 -8
  38. package/scripts/build-go-binary.js +26 -0
  39. /package/dist/{bridge-factory.d.ts → cjs/bridge-factory.d.ts} +0 -0
  40. /package/dist/{bridge-interface.d.ts → cjs/bridge-interface.d.ts} +0 -0
  41. /package/dist/{bridge-interface.js → cjs/bridge-interface.js} +0 -0
  42. /package/dist/{cache.d.ts → cjs/cache.d.ts} +0 -0
  43. /package/dist/{cache.js → cjs/cache.js} +0 -0
  44. /package/dist/{config.d.ts → cjs/config.d.ts} +0 -0
  45. /package/dist/{config.js → cjs/config.js} +0 -0
  46. /package/dist/{coordinator.d.ts → cjs/coordinator.d.ts} +0 -0
  47. /package/dist/{coordinator.js → cjs/coordinator.js} +0 -0
  48. /package/dist/{errors.d.ts → cjs/errors.d.ts} +0 -0
  49. /package/dist/{errors.js → cjs/errors.js} +0 -0
  50. /package/dist/{events.d.ts → cjs/events.d.ts} +0 -0
  51. /package/dist/{events.js → cjs/events.js} +0 -0
  52. /package/dist/{file-storage.d.ts → cjs/file-storage.d.ts} +0 -0
  53. /package/dist/{file-storage.js → cjs/file-storage.js} +0 -0
  54. /package/dist/{go-bridge.d.ts → cjs/go-bridge.d.ts} +0 -0
  55. /package/dist/{index.d.ts → cjs/index.d.ts} +0 -0
  56. /package/dist/{index.js → cjs/index.js} +0 -0
  57. /package/dist/{retry.d.ts → cjs/retry.d.ts} +0 -0
  58. /package/dist/{retry.js → cjs/retry.js} +0 -0
  59. /package/dist/{rlm.d.ts → cjs/rlm.d.ts} +0 -0
  60. /package/dist/{rlm.js → cjs/rlm.js} +0 -0
  61. /package/dist/{streaming.d.ts → cjs/streaming.d.ts} +0 -0
  62. /package/dist/{streaming.js → cjs/streaming.js} +0 -0
  63. /package/dist/{structured-types.d.ts → cjs/structured-types.d.ts} +0 -0
  64. /package/dist/{structured-types.js → cjs/structured-types.js} +0 -0
@@ -0,0 +1,435 @@
1
+ /**
2
+ * Main RLM (Recursive Language Model) class.
3
+ *
4
+ * Provides the primary API for recursive completions, structured output,
5
+ * streaming, file-based context, caching, retry/resilience, and events.
6
+ *
7
+ * @example
8
+ * ```typescript
9
+ * import { RLM } from 'recursive-llm-ts';
10
+ *
11
+ * const rlm = new RLM('gpt-4o-mini', { api_key: process.env.OPENAI_API_KEY });
12
+ * const result = await rlm.completion('Summarize this', longDocument);
13
+ * console.log(result.result);
14
+ * ```
15
+ */
16
+ import { RLMConfig, RLMResult, RLMStats, TraceEvent, FileStorageConfig, ContextOverflowConfig } from './bridge-interface';
17
+ import { BridgeType } from './bridge-factory';
18
+ import { z } from 'zod';
19
+ import { StructuredRLMResult } from './structured-types';
20
+ import { FileStorageResult } from './file-storage';
21
+ import { RLMEventType, RLMEventMap } from './events';
22
+ import { CacheConfig } from './cache';
23
+ import { RetryConfig } from './retry';
24
+ import { RLMStream, StreamOptions } from './streaming';
25
+ import { RLMExtendedConfig, ValidationResult } from './config';
26
+ /** Extended result with cache information */
27
+ export interface RLMCompletionResult extends RLMResult {
28
+ /** Whether this result was served from cache */
29
+ cached: boolean;
30
+ /** Model that was actually used (relevant with fallback) */
31
+ model: string;
32
+ }
33
+ /** Pretty-printable result wrapper */
34
+ export declare class RLMResultFormatter {
35
+ readonly result: string;
36
+ readonly stats: RLMStats;
37
+ readonly cached: boolean;
38
+ readonly model: string;
39
+ readonly trace_events?: TraceEvent[] | undefined;
40
+ constructor(result: string, stats: RLMStats, cached: boolean, model: string, trace_events?: TraceEvent[] | undefined);
41
+ /** Format stats as a concise one-liner */
42
+ prettyStats(): string;
43
+ /** Serialize to a JSON-safe object */
44
+ toJSON(): Record<string, unknown>;
45
+ /** Format as Markdown */
46
+ toMarkdown(): string;
47
+ }
48
+ /**
49
+ * Fluent builder for configuring RLM instances.
50
+ *
51
+ * @example
52
+ * ```typescript
53
+ * const rlm = RLM.builder('gpt-4o-mini')
54
+ * .maxDepth(10)
55
+ * .withMetaAgent()
56
+ * .withDebug()
57
+ * .withCache({ strategy: 'exact' })
58
+ * .withRetry({ maxRetries: 3 })
59
+ * .build();
60
+ * ```
61
+ */
62
+ export declare class RLMBuilder {
63
+ private model;
64
+ private config;
65
+ private bridgeType;
66
+ constructor(model: string);
67
+ /** Set the API key */
68
+ apiKey(key: string): this;
69
+ /** Set the API base URL */
70
+ apiBase(url: string): this;
71
+ /** Set maximum recursion depth */
72
+ maxDepth(depth: number): this;
73
+ /** Set maximum iterations */
74
+ maxIterations(iterations: number): this;
75
+ /** Enable meta-agent query optimization */
76
+ withMetaAgent(config?: {
77
+ model?: string;
78
+ max_optimize_len?: number;
79
+ }): this;
80
+ /** Enable debug mode */
81
+ withDebug(logOutput?: string): this;
82
+ /** Configure observability */
83
+ withObservability(config: RLMConfig['observability']): this;
84
+ /** Configure caching */
85
+ withCache(config?: CacheConfig): this;
86
+ /** Configure retry behavior */
87
+ withRetry(config?: RetryConfig): this;
88
+ /** Configure fallback models */
89
+ withFallback(models: string[]): this;
90
+ /** Set the bridge type */
91
+ bridge(type: BridgeType): this;
92
+ /** Configure context overflow recovery */
93
+ withContextOverflow(config?: ContextOverflowConfig): this;
94
+ /** Set the Go binary path */
95
+ binaryPath(path: string): this;
96
+ /** Add LiteLLM passthrough parameters */
97
+ litellmParams(params: Record<string, unknown>): this;
98
+ /** Build the RLM instance */
99
+ build(): RLM;
100
+ }
101
+ export declare class RLM {
102
+ private bridge;
103
+ private model;
104
+ private rlmConfig;
105
+ private bridgeType;
106
+ private lastTraceEvents;
107
+ private events;
108
+ private cache;
109
+ /**
110
+ * Create a new RLM instance.
111
+ *
112
+ * @param model - The LLM model identifier (e.g., 'gpt-4o-mini', 'claude-sonnet-4-20250514')
113
+ * @param rlmConfig - Configuration options for the RLM engine
114
+ * @param bridgeType - Bridge selection: 'go' (default)
115
+ *
116
+ * @example
117
+ * ```typescript
118
+ * const rlm = new RLM('gpt-4o-mini', {
119
+ * api_key: process.env.OPENAI_API_KEY,
120
+ * max_depth: 5,
121
+ * cache: { enabled: true },
122
+ * retry: { maxRetries: 3 },
123
+ * });
124
+ * ```
125
+ */
126
+ constructor(model: string, rlmConfig?: RLMExtendedConfig, bridgeType?: BridgeType);
127
+ /**
128
+ * Create an RLM instance using environment variables for configuration.
129
+ *
130
+ * @param model - The LLM model identifier
131
+ * @returns RLM instance configured from environment
132
+ *
133
+ * @example
134
+ * ```typescript
135
+ * // Uses OPENAI_API_KEY from environment
136
+ * const rlm = RLM.fromEnv('gpt-4o-mini');
137
+ * ```
138
+ */
139
+ static fromEnv(model: string): RLM;
140
+ /**
141
+ * Create an RLM instance with debug logging enabled.
142
+ *
143
+ * @param model - The LLM model identifier
144
+ * @param config - Additional configuration options
145
+ * @returns RLM instance with debug mode active
146
+ */
147
+ static withDebug(model: string, config?: RLMExtendedConfig): RLM;
148
+ /**
149
+ * Create an RLM instance configured for Azure OpenAI.
150
+ *
151
+ * @param deploymentName - Azure deployment name
152
+ * @param config - Azure-specific configuration
153
+ * @returns RLM instance configured for Azure
154
+ */
155
+ static forAzure(deploymentName: string, config: {
156
+ apiBase: string;
157
+ apiKey?: string;
158
+ apiVersion?: string;
159
+ }): RLM;
160
+ /**
161
+ * Create a fluent builder for advanced configuration.
162
+ *
163
+ * @param model - The LLM model identifier
164
+ * @returns Builder instance
165
+ *
166
+ * @example
167
+ * ```typescript
168
+ * const rlm = RLM.builder('gpt-4o-mini')
169
+ * .apiKey(process.env.OPENAI_API_KEY!)
170
+ * .maxDepth(10)
171
+ * .withMetaAgent()
172
+ * .withCache({ strategy: 'exact' })
173
+ * .build();
174
+ * ```
175
+ */
176
+ static builder(model: string): RLMBuilder;
177
+ private normalizeConfig;
178
+ private ensureBridge;
179
+ /**
180
+ * Register an event listener.
181
+ *
182
+ * @param event - Event type to listen for
183
+ * @param listener - Callback function
184
+ *
185
+ * @example
186
+ * ```typescript
187
+ * rlm.on('llm_call', (e) => console.log(`Calling ${e.model}`));
188
+ * rlm.on('error', (e) => reportError(e.error));
189
+ * rlm.on('cache', (e) => console.log(`Cache ${e.action}`));
190
+ * ```
191
+ */
192
+ on<K extends RLMEventType>(event: K, listener: (event: RLMEventMap[K]) => void): this;
193
+ /**
194
+ * Register a one-time event listener.
195
+ *
196
+ * @param event - Event type to listen for
197
+ * @param listener - Callback function (called once then removed)
198
+ */
199
+ once<K extends RLMEventType>(event: K, listener: (event: RLMEventMap[K]) => void): this;
200
+ /**
201
+ * Remove an event listener.
202
+ *
203
+ * @param event - Event type
204
+ * @param listener - The listener function to remove
205
+ */
206
+ off<K extends RLMEventType>(event: K, listener: (event: RLMEventMap[K]) => void): this;
207
+ /** Remove all event listeners */
208
+ removeAllListeners(event?: RLMEventType): this;
209
+ /**
210
+ * Execute a completion against an LLM with recursive decomposition.
211
+ *
212
+ * @param query - The question or instruction for the LLM
213
+ * @param context - The document or data to process (can be very large)
214
+ * @param options - Optional completion settings
215
+ * @returns The LLM response with execution statistics
216
+ *
217
+ * @example
218
+ * ```typescript
219
+ * const result = await rlm.completion('Summarize the key points', longDocument);
220
+ * console.log(result.result);
221
+ * console.log(`Used ${result.stats.llm_calls} LLM calls`);
222
+ * ```
223
+ */
224
+ completion(query: string, context: string, options?: {
225
+ signal?: AbortSignal;
226
+ }): Promise<RLMCompletionResult>;
227
+ /**
228
+ * Extract structured, typed data from context using a Zod schema.
229
+ *
230
+ * @param query - The extraction task to perform
231
+ * @param context - The document or data to process
232
+ * @param schema - Zod schema defining the expected output structure
233
+ * @param options - Execution options (parallelExecution, maxRetries, signal)
234
+ * @returns Typed result matching your Zod schema
235
+ *
236
+ * @example
237
+ * ```typescript
238
+ * const schema = z.object({
239
+ * summary: z.string(),
240
+ * score: z.number().min(1).max(10),
241
+ * tags: z.array(z.string()),
242
+ * });
243
+ *
244
+ * const result = await rlm.structuredCompletion('Analyze this document', doc, schema);
245
+ * console.log(result.result.summary); // string
246
+ * console.log(result.result.score); // number
247
+ * console.log(result.result.tags); // string[]
248
+ * ```
249
+ */
250
+ structuredCompletion<T>(query: string, context: string, schema: z.ZodSchema<T>, options?: {
251
+ maxRetries?: number;
252
+ parallelExecution?: boolean;
253
+ signal?: AbortSignal;
254
+ }): Promise<StructuredRLMResult<T>>;
255
+ /**
256
+ * Stream a completion with progressive text output.
257
+ *
258
+ * Returns an async iterable of stream chunks. Supports AbortController
259
+ * for cancellation.
260
+ *
261
+ * Note: Currently simulates streaming by chunking the full response.
262
+ * Full streaming support (from the Go binary) is planned.
263
+ *
264
+ * @param query - The question or instruction for the LLM
265
+ * @param context - The document or data to process
266
+ * @param options - Stream options including AbortController signal
267
+ * @returns Async iterable stream of chunks
268
+ *
269
+ * @example
270
+ * ```typescript
271
+ * const stream = rlm.streamCompletion(query, context);
272
+ * for await (const chunk of stream) {
273
+ * if (chunk.type === 'text') process.stdout.write(chunk.text);
274
+ * }
275
+ *
276
+ * // Or collect as string
277
+ * const text = await rlm.streamCompletion(query, context).toText();
278
+ *
279
+ * // With abort
280
+ * const controller = new AbortController();
281
+ * const stream = rlm.streamCompletion(query, context, { signal: controller.signal });
282
+ * setTimeout(() => controller.abort(), 5000);
283
+ * ```
284
+ */
285
+ streamCompletion(query: string, context: string, options?: StreamOptions): RLMStream;
286
+ /**
287
+ * Stream a structured completion with partial object updates.
288
+ *
289
+ * @param query - The extraction task to perform
290
+ * @param context - The document or data to process
291
+ * @param schema - Zod schema for the output structure
292
+ * @param options - Stream and execution options
293
+ * @returns Async iterable stream with partial object chunks
294
+ */
295
+ streamStructuredCompletion<T>(query: string, context: string, schema: z.ZodSchema<T>, options?: StreamOptions & {
296
+ maxRetries?: number;
297
+ parallelExecution?: boolean;
298
+ }): RLMStream<T>;
299
+ /**
300
+ * Execute multiple completions in parallel with concurrency control.
301
+ *
302
+ * @param queries - Array of query+context pairs to process
303
+ * @param options - Batch options including concurrency limit
304
+ * @returns Array of results in the same order as input
305
+ *
306
+ * @example
307
+ * ```typescript
308
+ * const results = await rlm.batchCompletion([
309
+ * { query: 'Summarize chapter 1', context: ch1 },
310
+ * { query: 'Summarize chapter 2', context: ch2 },
311
+ * { query: 'Summarize chapter 3', context: ch3 },
312
+ * ], { concurrency: 2 });
313
+ * ```
314
+ */
315
+ batchCompletion(queries: Array<{
316
+ query: string;
317
+ context: string;
318
+ }>, options?: {
319
+ concurrency?: number;
320
+ signal?: AbortSignal;
321
+ }): Promise<Array<RLMCompletionResult | Error>>;
322
+ /**
323
+ * Execute multiple structured completions in parallel.
324
+ *
325
+ * @param queries - Array of query+context+schema triples
326
+ * @param options - Batch options including concurrency limit
327
+ * @returns Array of typed results
328
+ */
329
+ batchStructuredCompletion<T>(queries: Array<{
330
+ query: string;
331
+ context: string;
332
+ schema: z.ZodSchema<T>;
333
+ }>, options?: {
334
+ concurrency?: number;
335
+ signal?: AbortSignal;
336
+ }): Promise<Array<StructuredRLMResult<T> | Error>>;
337
+ /**
338
+ * Run a completion using files from a folder (local or S3) as context.
339
+ *
340
+ * @param query - The question or task to perform
341
+ * @param fileConfig - File storage configuration (local path or S3 bucket)
342
+ * @returns Result with fileStorage metadata (files included, skipped, total size)
343
+ *
344
+ * @example
345
+ * ```typescript
346
+ * const result = await rlm.completionFromFiles(
347
+ * 'Summarize the architecture',
348
+ * { type: 'local', path: './src', extensions: ['.ts'] }
349
+ * );
350
+ * console.log(result.result);
351
+ * console.log(`Processed ${result.fileStorage.files.length} files`);
352
+ * ```
353
+ */
354
+ completionFromFiles(query: string, fileConfig: FileStorageConfig): Promise<RLMCompletionResult & {
355
+ fileStorage: FileStorageResult;
356
+ }>;
357
+ /**
358
+ * Run a structured completion using files from a folder (local or S3) as context.
359
+ *
360
+ * @param query - The extraction task to perform
361
+ * @param fileConfig - File storage configuration
362
+ * @param schema - Zod schema for the output structure
363
+ * @param options - Execution options
364
+ * @returns Typed result with fileStorage metadata
365
+ */
366
+ structuredCompletionFromFiles<T>(query: string, fileConfig: FileStorageConfig, schema: z.ZodSchema<T>, options?: {
367
+ maxRetries?: number;
368
+ parallelExecution?: boolean;
369
+ }): Promise<StructuredRLMResult<T> & {
370
+ fileStorage: FileStorageResult;
371
+ }>;
372
+ /**
373
+ * Preview which files would be included from a file storage config
374
+ * without actually reading them. Useful for dry-runs.
375
+ *
376
+ * @param fileConfig - File storage configuration
377
+ * @returns Array of relative file paths that match the config
378
+ */
379
+ previewFiles(fileConfig: FileStorageConfig): Promise<string[]>;
380
+ /**
381
+ * Build context from a file storage config without running a completion.
382
+ * Useful for inspecting the generated context string.
383
+ *
384
+ * @param fileConfig - File storage configuration
385
+ * @returns Built context with metadata
386
+ */
387
+ buildFileContext(fileConfig: FileStorageConfig): Promise<FileStorageResult>;
388
+ /**
389
+ * Returns trace events from the last operation.
390
+ * Only populated when observability is enabled in the config.
391
+ *
392
+ * @returns Array of trace events from the most recent completion
393
+ */
394
+ getTraceEvents(): TraceEvent[];
395
+ /**
396
+ * Get cache statistics (hits, misses, hit rate).
397
+ *
398
+ * @returns Cache performance statistics
399
+ */
400
+ getCacheStats(): import("./cache").CacheStats;
401
+ /** Clear the completion cache */
402
+ clearCache(): void;
403
+ /**
404
+ * Validate the current configuration without making any API calls.
405
+ * Checks binary existence, config validity, and connectivity hints.
406
+ *
407
+ * @returns Validation result with issues
408
+ *
409
+ * @example
410
+ * ```typescript
411
+ * const issues = rlm.validate();
412
+ * if (!issues.valid) {
413
+ * console.error('Config issues:', issues.issues);
414
+ * }
415
+ * ```
416
+ */
417
+ validate(): ValidationResult;
418
+ /**
419
+ * Create a formatted result wrapper from a completion result.
420
+ *
421
+ * @param result - The completion result to format
422
+ * @returns Formatter with prettyStats(), toJSON(), and toMarkdown() methods
423
+ */
424
+ formatResult(result: RLMCompletionResult): RLMResultFormatter;
425
+ /**
426
+ * Clean up the bridge connection and free resources.
427
+ * Call this when you're done using the RLM instance.
428
+ */
429
+ cleanup(): Promise<void>;
430
+ /**
431
+ * Support for `Symbol.asyncDispose` (Node 22+ `await using`).
432
+ */
433
+ [Symbol.asyncDispose](): Promise<void>;
434
+ private zodToJsonSchema;
435
+ }