@elizaos/plugin-rlm 2.0.0-alpha.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,168 @@
1
+ import { Plugin } from '@elizaos/core';
2
+
3
+ /** Types for the RLM (Recursive Language Model) plugin. */
4
+ type RLMBackend = "openai" | "anthropic" | "gemini" | "groq" | "openrouter";
5
+ type RLMEnvironment = "local" | "docker" | "modal" | "prime";
6
+ interface RLMConfig {
7
+ backend: RLMBackend;
8
+ backendKwargs: Record<string, string>;
9
+ environment: RLMEnvironment;
10
+ maxIterations: number;
11
+ maxDepth: number;
12
+ verbose: boolean;
13
+ pythonPath: string;
14
+ maxRetries?: number;
15
+ retryBaseDelay?: number;
16
+ retryMaxDelay?: number;
17
+ }
18
+ interface RLMMessage {
19
+ role: "user" | "assistant" | "system";
20
+ content: string;
21
+ }
22
+ interface RLMMetadata {
23
+ stub: boolean;
24
+ iterations?: number;
25
+ depth?: number;
26
+ error?: string;
27
+ }
28
+ interface RLMResult {
29
+ text: string;
30
+ metadata: RLMMetadata;
31
+ }
32
+ interface RLMInferOptions {
33
+ /** Model identifier for this request */
34
+ model?: string;
35
+ /** Maximum tokens to generate */
36
+ maxTokens?: number;
37
+ /** Sampling temperature */
38
+ temperature?: number;
39
+ /** Top-p sampling parameter */
40
+ topP?: number;
41
+ /** Stop sequences */
42
+ stopSequences?: string[];
43
+ /** User identifier for tracking */
44
+ user?: string;
45
+ /** Enable streaming (not yet supported by RLM) */
46
+ stream?: boolean;
47
+ /** Override max iterations for this request */
48
+ maxIterations?: number;
49
+ /** Override max recursion depth for this request */
50
+ maxDepth?: number;
51
+ /** Override root model for this request */
52
+ rootModel?: string;
53
+ /** Override subcall model for this request */
54
+ subcallModel?: string;
55
+ /** Enable trajectory logging for this request */
56
+ logTrajectories?: boolean;
57
+ /** Enable cost tracking for this request */
58
+ trackCosts?: boolean;
59
+ }
60
+ interface RLMStatusResponse {
61
+ available: boolean;
62
+ backend: string;
63
+ environment: string;
64
+ maxIterations: number;
65
+ maxDepth: number;
66
+ }
67
+ interface GenerateTextParams {
68
+ prompt?: string;
69
+ system?: string;
70
+ messages?: RLMMessage[];
71
+ model?: string;
72
+ maxTokens?: number;
73
+ temperature?: number;
74
+ topP?: number;
75
+ stopSequences?: string[];
76
+ user?: string;
77
+ stream?: boolean;
78
+ }
79
+ declare const DEFAULT_CONFIG: RLMConfig;
80
+ interface RLMMetrics {
81
+ totalRequests: number;
82
+ successfulRequests: number;
83
+ failedRequests: number;
84
+ stubResponses: number;
85
+ totalRetries: number;
86
+ averageLatencyMs: number;
87
+ p95LatencyMs: number;
88
+ lastRequestTimestamp: number;
89
+ lastErrorTimestamp?: number;
90
+ lastError?: string;
91
+ }
92
+ type MetricsCallback = (metrics: RLMMetrics) => void;
93
+ declare const ENV_VARS: {
94
+ readonly BACKEND: "ELIZA_RLM_BACKEND";
95
+ readonly ENVIRONMENT: "ELIZA_RLM_ENV";
96
+ readonly MAX_ITERATIONS: "ELIZA_RLM_MAX_ITERATIONS";
97
+ readonly MAX_DEPTH: "ELIZA_RLM_MAX_DEPTH";
98
+ readonly VERBOSE: "ELIZA_RLM_VERBOSE";
99
+ readonly PYTHON_PATH: "ELIZA_RLM_PYTHON_PATH";
100
+ readonly MAX_RETRIES: "ELIZA_RLM_MAX_RETRIES";
101
+ readonly RETRY_BASE_DELAY: "ELIZA_RLM_RETRY_BASE_DELAY";
102
+ readonly RETRY_MAX_DELAY: "ELIZA_RLM_RETRY_MAX_DELAY";
103
+ };
104
+
105
+ /** RLM Client - communicates with Python subprocess via JSON-RPC IPC. */
106
+
107
+ interface Logger {
108
+ info: (message: string, ...args: unknown[]) => void;
109
+ warn: (message: string, ...args: unknown[]) => void;
110
+ error: (message: string, ...args: unknown[]) => void;
111
+ debug: (message: string, ...args: unknown[]) => void;
112
+ }
113
+ declare function configFromEnv(env?: NodeJS.ProcessEnv): RLMConfig;
114
+ declare class RLMClient {
115
+ private config;
116
+ private process;
117
+ private reader;
118
+ private requestId;
119
+ private pendingRequests;
120
+ private isReady;
121
+ private isAvailable;
122
+ private hasStartupError;
123
+ private logger;
124
+ private metrics;
125
+ private static readonly MAX_LATENCY_SAMPLES;
126
+ private latencies;
127
+ private latencyIndex;
128
+ private latencyCount;
129
+ private metricsCallback;
130
+ constructor(config?: Partial<RLMConfig>, logger?: Logger, strictValidation?: boolean);
131
+ private startServer;
132
+ private waitForReady;
133
+ private handleMessage;
134
+ private sendRequest;
135
+ private ensureServer;
136
+ get available(): boolean;
137
+ getMetrics(): RLMMetrics;
138
+ onMetrics(callback: MetricsCallback): void;
139
+ private updateMetrics;
140
+ static normalizeMessages(messages: string | RLMMessage[]): RLMMessage[];
141
+ infer(messages: string | RLMMessage[], opts?: RLMInferOptions): Promise<RLMResult>;
142
+ getStatus(): Promise<RLMStatusResponse>;
143
+ shutdown(): Promise<void>;
144
+ }
145
+ declare function stubResult(error?: string): RLMResult;
146
+
147
+ /**
148
+ * RLM (Recursive Language Model) plugin for elizaOS.
149
+ *
150
+ * This plugin integrates Recursive Language Models into elizaOS, enabling
151
+ * LLMs to process arbitrarily long contexts through recursive self-calls
152
+ * in a REPL environment.
153
+ *
154
+ * Reference:
155
+ * - Paper: https://arxiv.org/abs/2512.24601
156
+ * - Implementation: https://github.com/alexzhang13/rlm
157
+ */
158
+
159
+ /**
160
+ * Reset the client singleton. Useful for testing or forced reinitialization.
161
+ */
162
+ declare function resetClient(): Promise<void>;
163
+ /**
164
+ * RLM plugin definition.
165
+ */
166
+ declare const rlmPlugin: Plugin;
167
+
168
+ export { DEFAULT_CONFIG, ENV_VARS, type GenerateTextParams, RLMClient, type RLMConfig, type RLMInferOptions, type RLMMessage, type RLMMetadata, type RLMResult, type RLMStatusResponse, configFromEnv, rlmPlugin as default, resetClient, rlmPlugin, stubResult };
package/dist/index.js ADDED
@@ -0,0 +1,517 @@
1
+ // index.ts
2
+ import { ModelType, logger } from "@elizaos/core";
3
+
4
+ // client.ts
5
+ import { spawn } from "child_process";
6
+ import * as path from "path";
7
+ import * as readline from "readline";
8
+
9
+ // types.ts
10
+ var DEFAULT_CONFIG = {
11
+ backend: "gemini",
12
+ backendKwargs: {},
13
+ environment: "local",
14
+ maxIterations: 4,
15
+ maxDepth: 1,
16
+ verbose: false,
17
+ pythonPath: "python",
18
+ maxRetries: 3,
19
+ retryBaseDelay: 1e3,
20
+ retryMaxDelay: 3e4
21
+ };
22
+ var ENV_VARS = {
23
+ BACKEND: "ELIZA_RLM_BACKEND",
24
+ ENVIRONMENT: "ELIZA_RLM_ENV",
25
+ MAX_ITERATIONS: "ELIZA_RLM_MAX_ITERATIONS",
26
+ MAX_DEPTH: "ELIZA_RLM_MAX_DEPTH",
27
+ VERBOSE: "ELIZA_RLM_VERBOSE",
28
+ PYTHON_PATH: "ELIZA_RLM_PYTHON_PATH",
29
+ MAX_RETRIES: "ELIZA_RLM_MAX_RETRIES",
30
+ RETRY_BASE_DELAY: "ELIZA_RLM_RETRY_BASE_DELAY",
31
+ RETRY_MAX_DELAY: "ELIZA_RLM_RETRY_MAX_DELAY"
32
+ };
33
+ var VALID_BACKENDS = [
34
+ "openai",
35
+ "anthropic",
36
+ "gemini",
37
+ "groq",
38
+ "openrouter"
39
+ ];
40
+ var VALID_ENVIRONMENTS = ["local", "docker", "modal", "prime"];
41
+ var RLMConfigError = class extends Error {
42
+ constructor(message) {
43
+ super(message);
44
+ this.name = "RLMConfigError";
45
+ }
46
+ };
47
+ function validateConfig(config, strict = false) {
48
+ const errors = [];
49
+ if (config.backend !== void 0 && !VALID_BACKENDS.includes(config.backend)) {
50
+ errors.push(`Invalid backend '${config.backend}'. Valid options: ${VALID_BACKENDS.join(", ")}`);
51
+ }
52
+ if (config.environment !== void 0 && !VALID_ENVIRONMENTS.includes(config.environment)) {
53
+ errors.push(
54
+ `Invalid environment '${config.environment}'. Valid options: ${VALID_ENVIRONMENTS.join(", ")}`
55
+ );
56
+ }
57
+ if (config.maxIterations !== void 0 && config.maxIterations < 1) {
58
+ errors.push("maxIterations must be >= 1");
59
+ }
60
+ if (config.maxDepth !== void 0 && config.maxDepth < 1) {
61
+ errors.push("maxDepth must be >= 1");
62
+ }
63
+ if (config.maxRetries !== void 0 && config.maxRetries < 0) {
64
+ errors.push("maxRetries must be >= 0");
65
+ }
66
+ if (config.retryBaseDelay !== void 0 && config.retryBaseDelay < 0) {
67
+ errors.push("retryBaseDelay must be >= 0");
68
+ }
69
+ if (config.retryBaseDelay !== void 0 && config.retryMaxDelay !== void 0 && config.retryMaxDelay < config.retryBaseDelay) {
70
+ errors.push("retryMaxDelay must be >= retryBaseDelay");
71
+ }
72
+ if (strict && errors.length > 0) {
73
+ throw new RLMConfigError(errors.join("; "));
74
+ }
75
+ return errors;
76
+ }
77
+
78
+ // client.ts
79
+ var defaultLogger = {
80
+ info: (msg, ...args) => console.log(`[RLM] ${msg}`, ...args),
81
+ warn: (msg, ...args) => console.warn(`[RLM] ${msg}`, ...args),
82
+ error: (msg, ...args) => console.error(`[RLM] ${msg}`, ...args),
83
+ debug: (msg, ...args) => console.debug(`[RLM] ${msg}`, ...args)
84
+ };
85
+ function configFromEnv(env2 = process.env) {
86
+ return {
87
+ backend: env2[ENV_VARS.BACKEND] ?? DEFAULT_CONFIG.backend,
88
+ backendKwargs: {},
89
+ environment: env2[ENV_VARS.ENVIRONMENT] ?? DEFAULT_CONFIG.environment,
90
+ maxIterations: Number.parseInt(env2[ENV_VARS.MAX_ITERATIONS] ?? "", 10) || DEFAULT_CONFIG.maxIterations,
91
+ maxDepth: Number.parseInt(env2[ENV_VARS.MAX_DEPTH] ?? "", 10) || DEFAULT_CONFIG.maxDepth,
92
+ verbose: ["1", "true", "yes"].includes((env2[ENV_VARS.VERBOSE] ?? "").toLowerCase()),
93
+ pythonPath: env2[ENV_VARS.PYTHON_PATH] ?? DEFAULT_CONFIG.pythonPath,
94
+ maxRetries: Number.parseInt(env2[ENV_VARS.MAX_RETRIES] ?? "", 10) || DEFAULT_CONFIG.maxRetries,
95
+ retryBaseDelay: Number.parseInt(env2[ENV_VARS.RETRY_BASE_DELAY] ?? "", 10) || DEFAULT_CONFIG.retryBaseDelay,
96
+ retryMaxDelay: Number.parseInt(env2[ENV_VARS.RETRY_MAX_DELAY] ?? "", 10) || DEFAULT_CONFIG.retryMaxDelay
97
+ };
98
+ }
99
+ var RLMClient = class _RLMClient {
100
+ config;
101
+ process = null;
102
+ reader = null;
103
+ requestId = 0;
104
+ pendingRequests = /* @__PURE__ */ new Map();
105
+ isReady = false;
106
+ isAvailable = false;
107
+ hasStartupError = false;
108
+ logger;
109
+ metrics = {
110
+ totalRequests: 0,
111
+ successfulRequests: 0,
112
+ failedRequests: 0,
113
+ stubResponses: 0,
114
+ totalRetries: 0,
115
+ averageLatencyMs: 0,
116
+ p95LatencyMs: 0,
117
+ lastRequestTimestamp: 0
118
+ };
119
+ static MAX_LATENCY_SAMPLES = 1e3;
120
+ latencies = new Array(_RLMClient.MAX_LATENCY_SAMPLES).fill(0);
121
+ latencyIndex = 0;
122
+ latencyCount = 0;
123
+ metricsCallback = null;
124
+ constructor(config, logger2, strictValidation = false) {
125
+ this.config = { ...configFromEnv(), ...config };
126
+ this.logger = logger2 ?? defaultLogger;
127
+ const errors = validateConfig(this.config);
128
+ if (errors.length > 0) {
129
+ if (strictValidation) {
130
+ throw new Error(`RLM configuration invalid: ${errors.join("; ")}`);
131
+ }
132
+ for (const error of errors) {
133
+ this.logger.warn(`Configuration warning: ${error}`);
134
+ }
135
+ }
136
+ }
137
+ async startServer() {
138
+ if (this.process) return;
139
+ this.hasStartupError = false;
140
+ this.logger.debug(
141
+ `Starting RLM server: ${this.config.pythonPath} -m elizaos_plugin_rlm.server`
142
+ );
143
+ try {
144
+ this.process = spawn(this.config.pythonPath, ["-m", "elizaos_plugin_rlm.server"], {
145
+ stdio: ["pipe", "pipe", "pipe"],
146
+ env: {
147
+ ...process.env,
148
+ [ENV_VARS.BACKEND]: this.config.backend,
149
+ [ENV_VARS.ENVIRONMENT]: this.config.environment,
150
+ [ENV_VARS.MAX_ITERATIONS]: String(this.config.maxIterations),
151
+ [ENV_VARS.MAX_DEPTH]: String(this.config.maxDepth),
152
+ [ENV_VARS.VERBOSE]: this.config.verbose ? "true" : "false"
153
+ },
154
+ cwd: path.join(__dirname, "..", "python")
155
+ });
156
+ } catch (error) {
157
+ this.logger.warn(`Failed to start RLM server: ${error}`);
158
+ this.isAvailable = false;
159
+ return;
160
+ }
161
+ if (!this.process.stdout || !this.process.stdin) {
162
+ this.logger.warn("RLM server process missing stdio streams");
163
+ this.isAvailable = false;
164
+ return;
165
+ }
166
+ this.reader = readline.createInterface({
167
+ input: this.process.stdout,
168
+ crlfDelay: Number.POSITIVE_INFINITY
169
+ });
170
+ this.reader.on("line", (line) => {
171
+ this.handleMessage(line);
172
+ });
173
+ this.process.stderr?.on("data", (data) => {
174
+ this.logger.debug(`RLM server stderr: ${data.toString()}`);
175
+ });
176
+ this.process.on("error", (error) => {
177
+ this.logger.error(`RLM server error: ${error.message}`);
178
+ this.isAvailable = false;
179
+ this.hasStartupError = true;
180
+ });
181
+ this.process.on("exit", (code) => {
182
+ this.logger.debug(`RLM server exited with code ${code}`);
183
+ this.isReady = false;
184
+ this.isAvailable = false;
185
+ this.process = null;
186
+ });
187
+ await this.waitForReady();
188
+ }
189
+ waitForReady() {
190
+ return new Promise((resolve, reject) => {
191
+ const TIMEOUT_MS = 1e4;
192
+ const POLL_INTERVAL_MS = 100;
193
+ const deadline = Date.now() + TIMEOUT_MS;
194
+ const checkReady = () => {
195
+ if (this.isReady) {
196
+ resolve();
197
+ } else if (!this.process || this.hasStartupError) {
198
+ reject(new Error("RLM server process failed before ready"));
199
+ } else if (Date.now() > deadline) {
200
+ reject(new Error("RLM server startup timeout"));
201
+ } else {
202
+ setTimeout(checkReady, POLL_INTERVAL_MS);
203
+ }
204
+ };
205
+ checkReady();
206
+ });
207
+ }
208
+ handleMessage(line) {
209
+ try {
210
+ const message = JSON.parse(line);
211
+ if ("ready" in message) {
212
+ const readyMsg = message;
213
+ this.isReady = true;
214
+ this.isAvailable = readyMsg.available;
215
+ this.logger.info(`RLM server ready, available: ${this.isAvailable}`);
216
+ return;
217
+ }
218
+ const response = message;
219
+ const pending = this.pendingRequests.get(response.id);
220
+ if (pending) {
221
+ this.pendingRequests.delete(response.id);
222
+ pending.resolve(response);
223
+ }
224
+ } catch (error) {
225
+ this.logger.error(`Failed to parse RLM server message: ${line}`);
226
+ }
227
+ }
228
+ async sendRequest(method, params = {}) {
229
+ await this.ensureServer();
230
+ if (!this.process?.stdin) {
231
+ throw new Error("RLM server not running");
232
+ }
233
+ const id = ++this.requestId;
234
+ const request = { id, method, params };
235
+ return new Promise((resolve, reject) => {
236
+ const timeout = setTimeout(() => {
237
+ this.pendingRequests.delete(id);
238
+ reject(new Error(`RLM request timeout: ${method}`));
239
+ }, 6e4);
240
+ this.pendingRequests.set(id, {
241
+ resolve: (response) => {
242
+ clearTimeout(timeout);
243
+ if (response.error) {
244
+ reject(new Error(response.error));
245
+ } else {
246
+ resolve(response.result);
247
+ }
248
+ },
249
+ reject: (error) => {
250
+ clearTimeout(timeout);
251
+ reject(error);
252
+ }
253
+ });
254
+ this.process?.stdin?.write(`${JSON.stringify(request)}
255
+ `);
256
+ });
257
+ }
258
+ async ensureServer() {
259
+ if (!this.process || !this.isReady) {
260
+ await this.startServer();
261
+ }
262
+ }
263
+ get available() {
264
+ return this.isAvailable;
265
+ }
266
+ getMetrics() {
267
+ return { ...this.metrics };
268
+ }
269
+ onMetrics(callback) {
270
+ this.metricsCallback = callback;
271
+ }
272
+ updateMetrics(latencyMs, success, isStub, error) {
273
+ this.metrics.totalRequests++;
274
+ this.metrics.lastRequestTimestamp = Date.now();
275
+ if (isStub) {
276
+ this.metrics.stubResponses++;
277
+ } else if (success) {
278
+ this.metrics.successfulRequests++;
279
+ } else {
280
+ this.metrics.failedRequests++;
281
+ this.metrics.lastErrorTimestamp = Date.now();
282
+ this.metrics.lastError = error;
283
+ }
284
+ this.latencies[this.latencyIndex] = latencyMs;
285
+ this.latencyIndex = (this.latencyIndex + 1) % _RLMClient.MAX_LATENCY_SAMPLES;
286
+ this.latencyCount = Math.min(this.latencyCount + 1, _RLMClient.MAX_LATENCY_SAMPLES);
287
+ if (this.latencyCount > 0) {
288
+ const valid = this.latencies.slice(0, this.latencyCount);
289
+ this.metrics.averageLatencyMs = valid.reduce((a, b) => a + b, 0) / this.latencyCount;
290
+ this.metrics.p95LatencyMs = [...valid].sort((a, b) => a - b)[Math.floor(valid.length * 0.95)] ?? 0;
291
+ }
292
+ this.metricsCallback?.(this.getMetrics());
293
+ }
294
+ static normalizeMessages(messages) {
295
+ return typeof messages === "string" ? [{ role: "user", content: messages }] : messages;
296
+ }
297
+ async infer(messages, opts) {
298
+ const startTime = Date.now();
299
+ const { maxRetries = 3, retryBaseDelay = 1e3, retryMaxDelay = 3e4 } = this.config;
300
+ const RETRYABLE_PATTERNS = ["timeout", "rate limit", "connection", "503", "429", "econnreset"];
301
+ let lastError = null;
302
+ for (let attempt = 0; attempt < maxRetries; attempt++) {
303
+ try {
304
+ await this.ensureServer();
305
+ if (!this.isReady) {
306
+ this.updateMetrics(Date.now() - startTime, true, true);
307
+ return stubResult();
308
+ }
309
+ const result = await this.sendRequest("infer", {
310
+ messages: _RLMClient.normalizeMessages(messages),
311
+ opts: opts ?? {}
312
+ });
313
+ this.updateMetrics(Date.now() - startTime, true, false);
314
+ return result;
315
+ } catch (error) {
316
+ lastError = error instanceof Error ? error : new Error(String(error));
317
+ const isRetryable = RETRYABLE_PATTERNS.some(
318
+ (p) => lastError?.message.toLowerCase().includes(p)
319
+ );
320
+ if (!isRetryable || attempt === maxRetries - 1) {
321
+ this.logger.error(`RLM inference failed after ${attempt + 1} attempts: ${error}`);
322
+ this.updateMetrics(Date.now() - startTime, true, true, lastError.message);
323
+ return stubResult(lastError.message);
324
+ }
325
+ const delay = Math.min(retryBaseDelay * 2 ** attempt, retryMaxDelay) * (0.75 + Math.random() * 0.5);
326
+ this.metrics.totalRetries++;
327
+ this.logger.warn(
328
+ `RLM attempt ${attempt + 1}/${maxRetries} failed. Retrying in ${Math.round(delay)}ms`
329
+ );
330
+ await new Promise((resolve) => setTimeout(resolve, delay));
331
+ }
332
+ }
333
+ this.updateMetrics(Date.now() - startTime, false, false, lastError?.message);
334
+ return stubResult(lastError?.message);
335
+ }
336
+ async getStatus() {
337
+ try {
338
+ await this.ensureServer();
339
+ return await this.sendRequest("status");
340
+ } catch {
341
+ return {
342
+ available: false,
343
+ backend: this.config.backend,
344
+ environment: this.config.environment,
345
+ maxIterations: this.config.maxIterations,
346
+ maxDepth: this.config.maxDepth
347
+ };
348
+ }
349
+ }
350
+ async shutdown() {
351
+ if (!this.process) return;
352
+ try {
353
+ await this.sendRequest("shutdown");
354
+ } catch {
355
+ }
356
+ this.reader?.close();
357
+ this.process?.kill();
358
+ this.process = null;
359
+ this.isReady = false;
360
+ this.isAvailable = false;
361
+ }
362
+ };
363
+ function stubResult(error) {
364
+ return {
365
+ text: "[RLM STUB] RLM backend not available",
366
+ metadata: { stub: true, error }
367
+ };
368
+ }
369
+
370
+ // index.ts
371
+ var env = typeof process !== "undefined" ? process.env : {};
372
+ var clientState = {
373
+ client: null,
374
+ initPromise: null,
375
+ configHash: null
376
+ };
377
+ function computeConfigHash(config) {
378
+ const key = [
379
+ config.backend ?? "",
380
+ config.environment ?? "",
381
+ String(config.maxIterations ?? ""),
382
+ String(config.maxDepth ?? ""),
383
+ config.pythonPath ?? ""
384
+ ].join("|");
385
+ return key;
386
+ }
387
+ function getOrCreateClient(runtime) {
388
+ const runtimeConfig = runtime.rlmConfig;
389
+ const configHash = computeConfigHash(runtimeConfig ?? {});
390
+ if (clientState.client && clientState.configHash !== configHash) {
391
+ logger.info("[RLM] Config changed, recreating client");
392
+ clientState.client.shutdown().catch((err) => {
393
+ logger.warn("[RLM] Error shutting down old client:", err);
394
+ });
395
+ clientState.client = null;
396
+ clientState.initPromise = null;
397
+ }
398
+ if (clientState.client) {
399
+ return clientState.client;
400
+ }
401
+ const client = new RLMClient(runtimeConfig);
402
+ clientState.client = client;
403
+ clientState.configHash = configHash;
404
+ return client;
405
+ }
406
+ async function resetClient() {
407
+ if (clientState.client) {
408
+ await clientState.client.shutdown();
409
+ }
410
+ clientState.client = null;
411
+ clientState.initPromise = null;
412
+ clientState.configHash = null;
413
+ }
414
+ async function handleTextGeneration(runtime, params) {
415
+ const client = getOrCreateClient(runtime);
416
+ const input = params.prompt ?? "";
417
+ const opts = {
418
+ maxTokens: params.maxTokens,
419
+ temperature: params.temperature,
420
+ topP: params.topP,
421
+ stopSequences: params.stopSequences,
422
+ user: params.user
423
+ };
424
+ const cleanOpts = Object.fromEntries(Object.entries(opts).filter(([, v]) => v !== void 0));
425
+ const result = await client.infer(input, cleanOpts);
426
+ return result.text;
427
+ }
428
+ var rlmPlugin = {
429
+ name: "rlm",
430
+ description: "RLM (Recursive Language Model) adapter for elizaOS - enables processing of arbitrarily long contexts through recursive self-calls",
431
+ config: {
432
+ [ENV_VARS.BACKEND]: env[ENV_VARS.BACKEND] ?? DEFAULT_CONFIG.backend,
433
+ [ENV_VARS.ENVIRONMENT]: env[ENV_VARS.ENVIRONMENT] ?? DEFAULT_CONFIG.environment,
434
+ [ENV_VARS.MAX_ITERATIONS]: env[ENV_VARS.MAX_ITERATIONS] ?? String(DEFAULT_CONFIG.maxIterations),
435
+ [ENV_VARS.MAX_DEPTH]: env[ENV_VARS.MAX_DEPTH] ?? String(DEFAULT_CONFIG.maxDepth),
436
+ [ENV_VARS.VERBOSE]: env[ENV_VARS.VERBOSE] ?? "false",
437
+ [ENV_VARS.PYTHON_PATH]: env[ENV_VARS.PYTHON_PATH] ?? DEFAULT_CONFIG.pythonPath
438
+ },
439
+ async init(config, runtime) {
440
+ logger.info("[RLM] Initializing RLM plugin");
441
+ runtime.rlmConfig = {
442
+ backend: config[ENV_VARS.BACKEND] ?? DEFAULT_CONFIG.backend,
443
+ environment: config[ENV_VARS.ENVIRONMENT] ?? DEFAULT_CONFIG.environment,
444
+ maxIterations: Number.parseInt(config[ENV_VARS.MAX_ITERATIONS] ?? "", 10) || DEFAULT_CONFIG.maxIterations,
445
+ maxDepth: Number.parseInt(config[ENV_VARS.MAX_DEPTH] ?? "", 10) || DEFAULT_CONFIG.maxDepth,
446
+ verbose: ["1", "true", "yes"].includes((config[ENV_VARS.VERBOSE] ?? "").toLowerCase()),
447
+ pythonPath: config[ENV_VARS.PYTHON_PATH] ?? DEFAULT_CONFIG.pythonPath
448
+ };
449
+ const client = getOrCreateClient(runtime);
450
+ const status = await client.getStatus();
451
+ if (status.available) {
452
+ logger.info(`[RLM] Backend available: ${status.backend}`);
453
+ } else {
454
+ logger.warn("[RLM] Backend not available - running in stub mode");
455
+ }
456
+ },
457
+ models: {
458
+ [ModelType.TEXT_SMALL]: handleTextGeneration,
459
+ [ModelType.TEXT_LARGE]: handleTextGeneration,
460
+ [ModelType.TEXT_REASONING_SMALL]: handleTextGeneration,
461
+ [ModelType.TEXT_REASONING_LARGE]: handleTextGeneration,
462
+ [ModelType.TEXT_COMPLETION]: handleTextGeneration
463
+ },
464
+ tests: [
465
+ {
466
+ name: "rlm_plugin_tests",
467
+ tests: [
468
+ {
469
+ name: "rlm_test_stub_mode",
470
+ fn: async (_runtime) => {
471
+ const result = stubResult();
472
+ if (!result.metadata.stub) {
473
+ throw new Error("Stub result should have stub=true");
474
+ }
475
+ logger.info("[RLM Test] Stub mode test passed");
476
+ }
477
+ },
478
+ {
479
+ name: "rlm_test_text_generation",
480
+ fn: async (runtime) => {
481
+ const text = await runtime.useModel(ModelType.TEXT_LARGE, {
482
+ prompt: "Say 'hello' in exactly one word."
483
+ });
484
+ if (typeof text !== "string") {
485
+ throw new Error("TEXT_LARGE should return string");
486
+ }
487
+ logger.info(`[RLM Test] TEXT_LARGE generated: "${text.substring(0, 50)}..."`);
488
+ }
489
+ },
490
+ {
491
+ name: "rlm_test_message_format",
492
+ fn: async (runtime) => {
493
+ const text = await runtime.useModel(ModelType.TEXT_LARGE, {
494
+ prompt: "What is 2 + 2?"
495
+ });
496
+ if (typeof text !== "string") {
497
+ throw new Error("TEXT_LARGE with prompt should return string");
498
+ }
499
+ logger.info("[RLM Test] Message format test passed");
500
+ }
501
+ }
502
+ ]
503
+ }
504
+ ]
505
+ };
506
+ var index_default = rlmPlugin;
507
+ export {
508
+ DEFAULT_CONFIG,
509
+ ENV_VARS,
510
+ RLMClient,
511
+ configFromEnv,
512
+ index_default as default,
513
+ resetClient,
514
+ rlmPlugin,
515
+ stubResult
516
+ };
517
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../index.ts","../client.ts","../types.ts"],"sourcesContent":["/**\n * RLM (Recursive Language Model) plugin for elizaOS.\n *\n * This plugin integrates Recursive Language Models into elizaOS, enabling\n * LLMs to process arbitrarily long contexts through recursive self-calls\n * in a REPL environment.\n *\n * Reference:\n * - Paper: https://arxiv.org/abs/2512.24601\n * - Implementation: https://github.com/alexzhang13/rlm\n */\n\nimport type { GenerateTextParams, IAgentRuntime, Plugin } from \"@elizaos/core\";\nimport { ModelType, logger } from \"@elizaos/core\";\n\nimport { RLMClient, stubResult } from \"./client\";\nimport type { RLMConfig } from \"./types\";\nimport { DEFAULT_CONFIG, ENV_VARS } from \"./types\";\n\n// Safe env access for browser/non-Node environments\nconst env: Record<string, string | undefined> = typeof process !== \"undefined\" ? process.env : {};\n\n// ============================================================================\n// Thread-safe Singleton Client Management\n// ============================================================================\n\n/**\n * Singleton state for thread-safe client initialization.\n *\n * In concurrent environments, multiple requests could try to initialize\n * the client simultaneously. We use a promise-based lock to ensure\n * only one initialization happens.\n */\ninterface ClientState {\n client: RLMClient | null;\n initPromise: Promise<RLMClient> | null;\n configHash: string | null;\n}\n\nconst clientState: ClientState = {\n client: null,\n initPromise: null,\n configHash: null,\n};\n\n/**\n * Compute a simple hash of config for detecting changes.\n */\nfunction computeConfigHash(config: Partial<RLMConfig>): string {\n const key = [\n config.backend ?? \"\",\n config.environment ?? \"\",\n String(config.maxIterations ?? \"\"),\n String(config.maxDepth ?? \"\"),\n config.pythonPath ?? \"\",\n ].join(\"|\");\n return key;\n}\n\n/**\n * Get or create the shared RLM client instance (thread-safe).\n *\n * This implementation ensures:\n * 1. Only one client is created even under concurrent initialization\n * 2. Config changes are detected and client is recreated\n * 3. Initialization errors are properly handled\n */\nfunction getOrCreateClient(runtime: IAgentRuntime): RLMClient {\n // Get config from runtime or environment\n const runtimeConfig = (runtime as unknown as Record<string, unknown>).rlmConfig as\n | Partial<RLMConfig>\n | undefined;\n const configHash = computeConfigHash(runtimeConfig ?? {});\n\n // Check if config changed - need to recreate client\n if (clientState.client && clientState.configHash !== configHash) {\n logger.info(\"[RLM] Config changed, recreating client\");\n // Shutdown old client (fire and forget)\n clientState.client.shutdown().catch((err) => {\n logger.warn(\"[RLM] Error shutting down old client:\", err);\n });\n clientState.client = null;\n clientState.initPromise = null;\n }\n\n // Fast path: client already exists with same config\n if (clientState.client) {\n return clientState.client;\n }\n\n // Slow path: need to create client\n // Create synchronously to avoid Promise in synchronous getter\n const client = new RLMClient(runtimeConfig);\n clientState.client = client;\n clientState.configHash = configHash;\n\n return client;\n}\n\n/**\n * Reset the client singleton. Useful for testing or forced reinitialization.\n */\nexport async function resetClient(): Promise<void> {\n if (clientState.client) {\n await clientState.client.shutdown();\n }\n clientState.client = null;\n clientState.initPromise = null;\n clientState.configHash = null;\n}\n\n/**\n * Handle text generation using RLM.\n */\nasync function handleTextGeneration(\n runtime: IAgentRuntime,\n params: GenerateTextParams,\n): Promise<string> {\n const client = getOrCreateClient(runtime);\n\n // Use prompt from params\n const input = params.prompt ?? \"\";\n\n const opts = {\n maxTokens: params.maxTokens,\n temperature: params.temperature,\n topP: params.topP,\n stopSequences: params.stopSequences,\n user: params.user,\n };\n\n // Remove undefined values\n const cleanOpts = Object.fromEntries(Object.entries(opts).filter(([, v]) => v !== undefined));\n\n const result = await client.infer(input, cleanOpts);\n return result.text;\n}\n\n/**\n * RLM plugin definition.\n */\nexport const rlmPlugin: Plugin = {\n name: \"rlm\",\n description:\n \"RLM (Recursive Language Model) adapter for elizaOS - enables processing of arbitrarily long contexts through recursive self-calls\",\n\n config: {\n [ENV_VARS.BACKEND]: env[ENV_VARS.BACKEND] ?? DEFAULT_CONFIG.backend,\n [ENV_VARS.ENVIRONMENT]: env[ENV_VARS.ENVIRONMENT] ?? DEFAULT_CONFIG.environment,\n [ENV_VARS.MAX_ITERATIONS]: env[ENV_VARS.MAX_ITERATIONS] ?? String(DEFAULT_CONFIG.maxIterations),\n [ENV_VARS.MAX_DEPTH]: env[ENV_VARS.MAX_DEPTH] ?? String(DEFAULT_CONFIG.maxDepth),\n [ENV_VARS.VERBOSE]: env[ENV_VARS.VERBOSE] ?? \"false\",\n [ENV_VARS.PYTHON_PATH]: env[ENV_VARS.PYTHON_PATH] ?? DEFAULT_CONFIG.pythonPath,\n },\n\n async init(config: Record<string, string>, runtime: IAgentRuntime): Promise<void> {\n logger.info(\"[RLM] Initializing RLM plugin\");\n\n // Store config on runtime\n (runtime as unknown as Record<string, unknown>).rlmConfig = {\n backend: config[ENV_VARS.BACKEND] ?? DEFAULT_CONFIG.backend,\n environment: config[ENV_VARS.ENVIRONMENT] ?? DEFAULT_CONFIG.environment,\n maxIterations:\n Number.parseInt(config[ENV_VARS.MAX_ITERATIONS] ?? \"\", 10) || DEFAULT_CONFIG.maxIterations,\n maxDepth: Number.parseInt(config[ENV_VARS.MAX_DEPTH] ?? \"\", 10) || DEFAULT_CONFIG.maxDepth,\n verbose: [\"1\", \"true\", \"yes\"].includes((config[ENV_VARS.VERBOSE] ?? \"\").toLowerCase()),\n pythonPath: config[ENV_VARS.PYTHON_PATH] ?? DEFAULT_CONFIG.pythonPath,\n };\n\n // Pre-initialize client\n const client = getOrCreateClient(runtime);\n const status = await client.getStatus();\n\n if (status.available) {\n logger.info(`[RLM] Backend available: ${status.backend}`);\n } else {\n logger.warn(\"[RLM] Backend not available - running in stub mode\");\n }\n },\n\n models: {\n [ModelType.TEXT_SMALL]: handleTextGeneration,\n [ModelType.TEXT_LARGE]: handleTextGeneration,\n [ModelType.TEXT_REASONING_SMALL]: handleTextGeneration,\n [ModelType.TEXT_REASONING_LARGE]: handleTextGeneration,\n [ModelType.TEXT_COMPLETION]: handleTextGeneration,\n },\n\n tests: [\n {\n name: \"rlm_plugin_tests\",\n tests: [\n {\n name: \"rlm_test_stub_mode\",\n fn: async (_runtime: IAgentRuntime): Promise<void> => {\n // Test that stub mode works\n const result = stubResult();\n if (!result.metadata.stub) {\n throw new Error(\"Stub result should have stub=true\");\n }\n logger.info(\"[RLM Test] Stub mode test passed\");\n },\n },\n {\n name: \"rlm_test_text_generation\",\n fn: async (runtime: IAgentRuntime): Promise<void> => {\n const text = await runtime.useModel(ModelType.TEXT_LARGE, {\n prompt: \"Say 'hello' in exactly one word.\",\n });\n\n if (typeof text !== \"string\") {\n throw new Error(\"TEXT_LARGE should return string\");\n }\n\n logger.info(`[RLM Test] TEXT_LARGE generated: \"${text.substring(0, 50)}...\"`);\n },\n },\n {\n name: \"rlm_test_message_format\",\n fn: async (runtime: IAgentRuntime): Promise<void> => {\n const text = await runtime.useModel(ModelType.TEXT_LARGE, {\n prompt: \"What is 2 + 2?\",\n });\n\n if (typeof text !== \"string\") {\n throw new Error(\"TEXT_LARGE with prompt should return string\");\n }\n\n logger.info(\"[RLM Test] Message format test passed\");\n },\n },\n ],\n },\n ],\n};\n\nexport default rlmPlugin;\n\n// Re-export types and client\nexport { configFromEnv, RLMClient, stubResult } from \"./client\";\nexport type {\n GenerateTextParams,\n RLMConfig,\n RLMInferOptions,\n RLMMessage,\n RLMMetadata,\n RLMResult,\n RLMStatusResponse,\n} from \"./types\";\nexport { DEFAULT_CONFIG, ENV_VARS } from \"./types\";\n","/** RLM Client - communicates with Python subprocess via JSON-RPC IPC. */\n\nimport { type ChildProcess, spawn } from \"node:child_process\";\nimport * as path from \"node:path\";\nimport * as readline from \"node:readline\";\n\nimport type {\n IPCReadyMessage,\n IPCRequest,\n IPCResponse,\n MetricsCallback,\n RLMConfig,\n RLMInferOptions,\n RLMMessage,\n RLMMetrics,\n RLMResult,\n RLMStatusResponse,\n} from \"./types\";\nimport { DEFAULT_CONFIG, ENV_VARS, validateConfig } from \"./types\";\n\nexport { DEFAULT_CONFIG };\nexport type { RLMMetrics, MetricsCallback };\n\ninterface Logger {\n info: (message: string, ...args: unknown[]) => void;\n warn: (message: string, ...args: unknown[]) => void;\n error: (message: string, ...args: unknown[]) => void;\n debug: (message: string, ...args: unknown[]) => void;\n}\n\nconst defaultLogger: Logger = {\n info: (msg, ...args) => console.log(`[RLM] ${msg}`, ...args),\n warn: (msg, ...args) => console.warn(`[RLM] ${msg}`, ...args),\n error: (msg, ...args) => console.error(`[RLM] ${msg}`, ...args),\n debug: (msg, ...args) => console.debug(`[RLM] ${msg}`, ...args),\n};\n\nexport function configFromEnv(env: NodeJS.ProcessEnv = process.env): RLMConfig {\n return {\n backend: (env[ENV_VARS.BACKEND] as RLMConfig[\"backend\"]) ?? DEFAULT_CONFIG.backend,\n backendKwargs: {},\n environment:\n (env[ENV_VARS.ENVIRONMENT] as RLMConfig[\"environment\"]) ?? DEFAULT_CONFIG.environment,\n maxIterations:\n Number.parseInt(env[ENV_VARS.MAX_ITERATIONS] ?? \"\", 10) || DEFAULT_CONFIG.maxIterations,\n maxDepth: Number.parseInt(env[ENV_VARS.MAX_DEPTH] ?? \"\", 10) || DEFAULT_CONFIG.maxDepth,\n verbose: [\"1\", \"true\", \"yes\"].includes((env[ENV_VARS.VERBOSE] ?? \"\").toLowerCase()),\n pythonPath: env[ENV_VARS.PYTHON_PATH] ?? DEFAULT_CONFIG.pythonPath,\n maxRetries: Number.parseInt(env[ENV_VARS.MAX_RETRIES] ?? \"\", 10) || DEFAULT_CONFIG.maxRetries,\n retryBaseDelay:\n Number.parseInt(env[ENV_VARS.RETRY_BASE_DELAY] ?? \"\", 10) || DEFAULT_CONFIG.retryBaseDelay,\n retryMaxDelay:\n Number.parseInt(env[ENV_VARS.RETRY_MAX_DELAY] ?? \"\", 10) || DEFAULT_CONFIG.retryMaxDelay,\n };\n}\n\nexport class RLMClient {\n private config: RLMConfig;\n private process: ChildProcess | null = null;\n private reader: readline.Interface | null = null;\n private requestId = 0;\n private pendingRequests = new Map<\n number,\n { resolve: (value: IPCResponse) => void; reject: (error: Error) => void }\n >();\n private isReady = false;\n private isAvailable = false;\n private hasStartupError = false;\n private logger: Logger;\n private metrics: RLMMetrics = {\n totalRequests: 0,\n successfulRequests: 0,\n failedRequests: 0,\n stubResponses: 0,\n totalRetries: 0,\n averageLatencyMs: 0,\n p95LatencyMs: 0,\n lastRequestTimestamp: 0,\n };\n private static readonly MAX_LATENCY_SAMPLES = 1000;\n private latencies: number[] = new Array(RLMClient.MAX_LATENCY_SAMPLES).fill(0);\n private latencyIndex = 0;\n private latencyCount = 0;\n private metricsCallback: MetricsCallback | null = null;\n\n constructor(config?: Partial<RLMConfig>, logger?: Logger, strictValidation = false) {\n this.config = { ...configFromEnv(), ...config };\n this.logger = logger ?? defaultLogger;\n\n const errors = validateConfig(this.config);\n if (errors.length > 0) {\n if (strictValidation) {\n throw new Error(`RLM configuration invalid: ${errors.join(\"; \")}`);\n }\n for (const error of errors) {\n this.logger.warn(`Configuration warning: ${error}`);\n }\n }\n }\n\n private async startServer(): Promise<void> {\n if (this.process) return;\n this.hasStartupError = false;\n\n this.logger.debug(\n `Starting RLM server: ${this.config.pythonPath} -m elizaos_plugin_rlm.server`,\n );\n\n try {\n this.process = spawn(this.config.pythonPath, [\"-m\", \"elizaos_plugin_rlm.server\"], {\n stdio: [\"pipe\", \"pipe\", \"pipe\"],\n env: {\n ...process.env,\n [ENV_VARS.BACKEND]: this.config.backend,\n [ENV_VARS.ENVIRONMENT]: this.config.environment,\n [ENV_VARS.MAX_ITERATIONS]: String(this.config.maxIterations),\n [ENV_VARS.MAX_DEPTH]: String(this.config.maxDepth),\n [ENV_VARS.VERBOSE]: this.config.verbose ? \"true\" : \"false\",\n },\n cwd: path.join(__dirname, \"..\", \"python\"),\n });\n } catch (error) {\n this.logger.warn(`Failed to start RLM server: ${error}`);\n this.isAvailable = false;\n return;\n }\n\n if (!this.process.stdout || !this.process.stdin) {\n this.logger.warn(\"RLM server process missing stdio streams\");\n this.isAvailable = false;\n return;\n }\n\n // Set up line reader for responses\n this.reader = readline.createInterface({\n input: this.process.stdout,\n crlfDelay: Number.POSITIVE_INFINITY,\n });\n\n // Handle incoming messages\n this.reader.on(\"line\", (line: string) => {\n this.handleMessage(line);\n });\n\n // Handle errors\n this.process.stderr?.on(\"data\", (data: Buffer) => {\n this.logger.debug(`RLM server stderr: ${data.toString()}`);\n });\n\n this.process.on(\"error\", (error: Error) => {\n this.logger.error(`RLM server error: ${error.message}`);\n this.isAvailable = false;\n this.hasStartupError = true;\n });\n\n this.process.on(\"exit\", (code: number | null) => {\n this.logger.debug(`RLM server exited with code ${code}`);\n this.isReady = false;\n this.isAvailable = false;\n this.process = null;\n });\n\n await this.waitForReady();\n }\n\n private waitForReady(): Promise<void> {\n return new Promise((resolve, reject) => {\n const TIMEOUT_MS = 10000;\n const POLL_INTERVAL_MS = 100;\n const deadline = Date.now() + TIMEOUT_MS;\n\n const checkReady = () => {\n if (this.isReady) {\n resolve();\n } else if (!this.process || this.hasStartupError) {\n reject(new Error(\"RLM server process failed before ready\"));\n } else if (Date.now() > deadline) {\n reject(new Error(\"RLM server startup timeout\"));\n } else {\n setTimeout(checkReady, POLL_INTERVAL_MS);\n }\n };\n checkReady();\n });\n }\n\n private handleMessage(line: string): void {\n try {\n const message = JSON.parse(line) as IPCResponse | IPCReadyMessage;\n\n // Check for ready message\n if (\"ready\" in message) {\n const readyMsg = message as IPCReadyMessage;\n this.isReady = true;\n this.isAvailable = readyMsg.available;\n this.logger.info(`RLM server ready, available: ${this.isAvailable}`);\n return;\n }\n\n // Handle response\n const response = message as IPCResponse;\n const pending = this.pendingRequests.get(response.id);\n if (pending) {\n this.pendingRequests.delete(response.id);\n pending.resolve(response);\n }\n } catch (error) {\n this.logger.error(`Failed to parse RLM server message: ${line}`);\n }\n }\n\n private async sendRequest<T>(\n method: IPCRequest[\"method\"],\n params: Record<string, unknown> = {},\n ): Promise<T> {\n await this.ensureServer();\n\n if (!this.process?.stdin) {\n throw new Error(\"RLM server not running\");\n }\n\n const id = ++this.requestId;\n const request: IPCRequest = { id, method, params };\n\n return new Promise((resolve, reject) => {\n const timeout = setTimeout(() => {\n this.pendingRequests.delete(id);\n reject(new Error(`RLM request timeout: ${method}`));\n }, 60000);\n\n this.pendingRequests.set(id, {\n resolve: (response: IPCResponse) => {\n clearTimeout(timeout);\n if (response.error) {\n reject(new Error(response.error));\n } else {\n resolve(response.result as T);\n }\n },\n reject: (error: Error) => {\n clearTimeout(timeout);\n reject(error);\n },\n });\n\n this.process?.stdin?.write(`${JSON.stringify(request)}\\n`);\n });\n }\n\n private async ensureServer(): Promise<void> {\n if (!this.process || !this.isReady) {\n await this.startServer();\n }\n }\n\n get available(): boolean {\n return this.isAvailable;\n }\n\n getMetrics(): RLMMetrics {\n return { ...this.metrics };\n }\n\n onMetrics(callback: MetricsCallback): void {\n this.metricsCallback = callback;\n }\n\n private updateMetrics(\n latencyMs: number,\n success: boolean,\n isStub: boolean,\n error?: string,\n ): void {\n this.metrics.totalRequests++;\n this.metrics.lastRequestTimestamp = Date.now();\n\n if (isStub) {\n this.metrics.stubResponses++;\n } else if (success) {\n this.metrics.successfulRequests++;\n } else {\n this.metrics.failedRequests++;\n this.metrics.lastErrorTimestamp = Date.now();\n this.metrics.lastError = error;\n }\n\n // Track latency in circular buffer\n this.latencies[this.latencyIndex] = latencyMs;\n this.latencyIndex = (this.latencyIndex + 1) % RLMClient.MAX_LATENCY_SAMPLES;\n this.latencyCount = Math.min(this.latencyCount + 1, RLMClient.MAX_LATENCY_SAMPLES);\n\n // Calculate stats from valid samples\n if (this.latencyCount > 0) {\n const valid = this.latencies.slice(0, this.latencyCount);\n this.metrics.averageLatencyMs = valid.reduce((a, b) => a + b, 0) / this.latencyCount;\n this.metrics.p95LatencyMs =\n [...valid].sort((a, b) => a - b)[Math.floor(valid.length * 0.95)] ?? 0;\n }\n\n this.metricsCallback?.(this.getMetrics());\n }\n\n static normalizeMessages(messages: string | RLMMessage[]): RLMMessage[] {\n return typeof messages === \"string\" ? [{ role: \"user\", content: messages }] : messages;\n }\n\n async infer(messages: string | RLMMessage[], opts?: RLMInferOptions): Promise<RLMResult> {\n const startTime = Date.now();\n const { maxRetries = 3, retryBaseDelay = 1000, retryMaxDelay = 30000 } = this.config;\n const RETRYABLE_PATTERNS = [\"timeout\", \"rate limit\", \"connection\", \"503\", \"429\", \"econnreset\"];\n\n let lastError: Error | null = null;\n\n for (let attempt = 0; attempt < maxRetries; attempt++) {\n try {\n await this.ensureServer();\n\n if (!this.isReady) {\n this.updateMetrics(Date.now() - startTime, true, true);\n return stubResult();\n }\n\n const result = await this.sendRequest<RLMResult>(\"infer\", {\n messages: RLMClient.normalizeMessages(messages),\n opts: opts ?? {},\n });\n\n this.updateMetrics(Date.now() - startTime, true, false);\n return result;\n } catch (error) {\n lastError = error instanceof Error ? error : new Error(String(error));\n const isRetryable = RETRYABLE_PATTERNS.some((p) =>\n lastError?.message.toLowerCase().includes(p),\n );\n\n if (!isRetryable || attempt === maxRetries - 1) {\n this.logger.error(`RLM inference failed after ${attempt + 1} attempts: ${error}`);\n this.updateMetrics(Date.now() - startTime, true, true, lastError.message);\n return stubResult(lastError.message);\n }\n\n const delay =\n Math.min(retryBaseDelay * 2 ** attempt, retryMaxDelay) * (0.75 + Math.random() * 0.5);\n this.metrics.totalRetries++;\n this.logger.warn(\n `RLM attempt ${attempt + 1}/${maxRetries} failed. Retrying in ${Math.round(delay)}ms`,\n );\n await new Promise((resolve) => setTimeout(resolve, delay));\n }\n }\n\n this.updateMetrics(Date.now() - startTime, false, false, lastError?.message);\n return stubResult(lastError?.message);\n }\n\n async getStatus(): Promise<RLMStatusResponse> {\n try {\n await this.ensureServer();\n return await this.sendRequest<RLMStatusResponse>(\"status\");\n } catch {\n return {\n available: false,\n backend: this.config.backend,\n environment: this.config.environment,\n maxIterations: this.config.maxIterations,\n maxDepth: this.config.maxDepth,\n };\n }\n }\n\n async shutdown(): Promise<void> {\n if (!this.process) return;\n try {\n await this.sendRequest(\"shutdown\");\n } catch {\n /* ignore */\n }\n this.reader?.close();\n this.process?.kill();\n this.process = null;\n this.isReady = false;\n this.isAvailable = false;\n }\n}\n\nexport function stubResult(error?: string): RLMResult {\n return {\n text: \"[RLM STUB] RLM backend not available\",\n metadata: { stub: true, error },\n };\n}\n","/** Types for the RLM (Recursive Language Model) plugin. */\n\nexport type RLMBackend = \"openai\" | \"anthropic\" | \"gemini\" | \"groq\" | \"openrouter\";\nexport type RLMEnvironment = \"local\" | \"docker\" | \"modal\" | \"prime\";\n\nexport interface RLMConfig {\n backend: RLMBackend;\n backendKwargs: Record<string, string>;\n environment: RLMEnvironment;\n maxIterations: number;\n maxDepth: number;\n verbose: boolean;\n pythonPath: string;\n maxRetries?: number;\n retryBaseDelay?: number;\n retryMaxDelay?: number;\n}\n\nexport interface RLMMessage {\n role: \"user\" | \"assistant\" | \"system\";\n content: string;\n}\n\nexport interface RLMMetadata {\n stub: boolean;\n iterations?: number;\n depth?: number;\n error?: string;\n}\n\nexport interface RLMResult {\n text: string;\n metadata: RLMMetadata;\n}\n\nexport interface RLMInferOptions {\n /** Model identifier for this request */\n model?: string;\n /** Maximum tokens to generate */\n maxTokens?: number;\n /** Sampling temperature */\n temperature?: number;\n /** Top-p sampling parameter */\n topP?: number;\n /** Stop sequences */\n stopSequences?: string[];\n /** User identifier for tracking */\n user?: string;\n /** Enable streaming (not yet supported by RLM) */\n stream?: boolean;\n\n // Per-request RLM overrides (Paper Algorithm 1)\n /** Override max iterations for this request */\n maxIterations?: number;\n /** Override max recursion depth for this request */\n maxDepth?: number;\n /** Override root model for this request */\n rootModel?: string;\n /** Override subcall model for this request */\n subcallModel?: string;\n /** Enable trajectory logging for this request */\n logTrajectories?: boolean;\n /** Enable cost tracking for this request */\n trackCosts?: boolean;\n\n // NOTE: Custom REPL tool injection is NOT supported by the upstream RLM library.\n // See: https://arxiv.org/abs/2512.24601 Section 3.3 - the paper describes the concept\n // but the current library implementation does not expose this capability.\n}\n\nexport interface RLMStatusResponse {\n available: boolean;\n backend: string;\n environment: string;\n maxIterations: number;\n maxDepth: number;\n}\n\nexport interface IPCRequest {\n id: number;\n method: \"infer\" | \"status\" | \"shutdown\";\n params: Record<string, unknown>;\n}\n\nexport interface IPCResponse<T = unknown> {\n id: number;\n result?: T;\n error?: string;\n}\n\nexport interface IPCReadyMessage {\n ready: boolean;\n available: boolean;\n}\n\nexport interface GenerateTextParams {\n prompt?: string;\n system?: string;\n messages?: RLMMessage[];\n model?: string;\n maxTokens?: number;\n temperature?: number;\n topP?: number;\n stopSequences?: string[];\n user?: string;\n stream?: boolean;\n}\n\nexport const DEFAULT_CONFIG: RLMConfig = {\n backend: \"gemini\",\n backendKwargs: {},\n environment: \"local\",\n maxIterations: 4,\n maxDepth: 1,\n verbose: false,\n pythonPath: \"python\",\n maxRetries: 3,\n retryBaseDelay: 1000,\n retryMaxDelay: 30000,\n};\n\nexport interface RLMMetrics {\n totalRequests: number;\n successfulRequests: number;\n failedRequests: number;\n stubResponses: number;\n totalRetries: number;\n averageLatencyMs: number;\n p95LatencyMs: number;\n lastRequestTimestamp: number;\n lastErrorTimestamp?: number;\n lastError?: string;\n}\n\nexport type MetricsCallback = (metrics: RLMMetrics) => void;\n\nexport const ENV_VARS = {\n BACKEND: \"ELIZA_RLM_BACKEND\",\n ENVIRONMENT: \"ELIZA_RLM_ENV\",\n MAX_ITERATIONS: \"ELIZA_RLM_MAX_ITERATIONS\",\n MAX_DEPTH: \"ELIZA_RLM_MAX_DEPTH\",\n VERBOSE: \"ELIZA_RLM_VERBOSE\",\n PYTHON_PATH: \"ELIZA_RLM_PYTHON_PATH\",\n MAX_RETRIES: \"ELIZA_RLM_MAX_RETRIES\",\n RETRY_BASE_DELAY: \"ELIZA_RLM_RETRY_BASE_DELAY\",\n RETRY_MAX_DELAY: \"ELIZA_RLM_RETRY_MAX_DELAY\",\n} as const;\n\nexport const VALID_BACKENDS: readonly RLMBackend[] = [\n \"openai\",\n \"anthropic\",\n \"gemini\",\n \"groq\",\n \"openrouter\",\n];\nexport const VALID_ENVIRONMENTS: readonly RLMEnvironment[] = [\"local\", \"docker\", \"modal\", \"prime\"];\n\nexport class RLMConfigError extends Error {\n constructor(message: string) {\n super(message);\n this.name = \"RLMConfigError\";\n }\n}\n\n/** Validate config. Returns errors array, or throws if strict=true. */\nexport function validateConfig(config: Partial<RLMConfig>, strict = false): string[] {\n const errors: string[] = [];\n\n if (config.backend !== undefined && !VALID_BACKENDS.includes(config.backend)) {\n errors.push(`Invalid backend '${config.backend}'. Valid options: ${VALID_BACKENDS.join(\", \")}`);\n }\n\n if (config.environment !== undefined && !VALID_ENVIRONMENTS.includes(config.environment)) {\n errors.push(\n `Invalid environment '${config.environment}'. Valid options: ${VALID_ENVIRONMENTS.join(\", \")}`,\n );\n }\n\n if (config.maxIterations !== undefined && config.maxIterations < 1) {\n errors.push(\"maxIterations must be >= 1\");\n }\n\n if (config.maxDepth !== undefined && config.maxDepth < 1) {\n errors.push(\"maxDepth must be >= 1\");\n }\n\n if (config.maxRetries !== undefined && config.maxRetries < 0) {\n errors.push(\"maxRetries must be >= 0\");\n }\n\n if (config.retryBaseDelay !== undefined && config.retryBaseDelay < 0) {\n errors.push(\"retryBaseDelay must be >= 0\");\n }\n\n if (\n config.retryBaseDelay !== undefined &&\n config.retryMaxDelay !== undefined &&\n config.retryMaxDelay < config.retryBaseDelay\n ) {\n errors.push(\"retryMaxDelay must be >= retryBaseDelay\");\n }\n\n if (strict && errors.length > 0) {\n throw new RLMConfigError(errors.join(\"; \"));\n }\n\n return errors;\n}\n"],"mappings":";AAaA,SAAS,WAAW,cAAc;;;ACXlC,SAA4B,aAAa;AACzC,YAAY,UAAU;AACtB,YAAY,cAAc;;;ACwGnB,IAAM,iBAA4B;AAAA,EACvC,SAAS;AAAA,EACT,eAAe,CAAC;AAAA,EAChB,aAAa;AAAA,EACb,eAAe;AAAA,EACf,UAAU;AAAA,EACV,SAAS;AAAA,EACT,YAAY;AAAA,EACZ,YAAY;AAAA,EACZ,gBAAgB;AAAA,EAChB,eAAe;AACjB;AAiBO,IAAM,WAAW;AAAA,EACtB,SAAS;AAAA,EACT,aAAa;AAAA,EACb,gBAAgB;AAAA,EAChB,WAAW;AAAA,EACX,SAAS;AAAA,EACT,aAAa;AAAA,EACb,aAAa;AAAA,EACb,kBAAkB;AAAA,EAClB,iBAAiB;AACnB;AAEO,IAAM,iBAAwC;AAAA,EACnD;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AACO,IAAM,qBAAgD,CAAC,SAAS,UAAU,SAAS,OAAO;AAE1F,IAAM,iBAAN,cAA6B,MAAM;AAAA,EACxC,YAAY,SAAiB;AAC3B,UAAM,OAAO;AACb,SAAK,OAAO;AAAA,EACd;AACF;AAGO,SAAS,eAAe,QAA4B,SAAS,OAAiB;AACnF,QAAM,SAAmB,CAAC;AAE1B,MAAI,OAAO,YAAY,UAAa,CAAC,eAAe,SAAS,OAAO,OAAO,GAAG;AAC5E,WAAO,KAAK,oBAAoB,OAAO,OAAO,qBAAqB,eAAe,KAAK,IAAI,CAAC,EAAE;AAAA,EAChG;AAEA,MAAI,OAAO,gBAAgB,UAAa,CAAC,mBAAmB,SAAS,OAAO,WAAW,GAAG;AACxF,WAAO;AAAA,MACL,wBAAwB,OAAO,WAAW,qBAAqB,mBAAmB,KAAK,IAAI,CAAC;AAAA,IAC9F;AAAA,EACF;AAEA,MAAI,OAAO,kBAAkB,UAAa,OAAO,gBAAgB,GAAG;AAClE,WAAO,KAAK,4BAA4B;AAAA,EAC1C;AAEA,MAAI,OAAO,aAAa,UAAa,OAAO,WAAW,GAAG;AACxD,WAAO,KAAK,uBAAuB;AAAA,EACrC;AAEA,MAAI,OAAO,eAAe,UAAa,OAAO,aAAa,GAAG;AAC5D,WAAO,KAAK,yBAAyB;AAAA,EACvC;AAEA,MAAI,OAAO,mBAAmB,UAAa,OAAO,iBAAiB,GAAG;AACpE,WAAO,KAAK,6BAA6B;AAAA,EAC3C;AAEA,MACE,OAAO,mBAAmB,UAC1B,OAAO,kBAAkB,UACzB,OAAO,gBAAgB,OAAO,gBAC9B;AACA,WAAO,KAAK,yCAAyC;AAAA,EACvD;AAEA,MAAI,UAAU,OAAO,SAAS,GAAG;AAC/B,UAAM,IAAI,eAAe,OAAO,KAAK,IAAI,CAAC;AAAA,EAC5C;AAEA,SAAO;AACT;;;ADjLA,IAAM,gBAAwB;AAAA,EAC5B,MAAM,CAAC,QAAQ,SAAS,QAAQ,IAAI,SAAS,GAAG,IAAI,GAAG,IAAI;AAAA,EAC3D,MAAM,CAAC,QAAQ,SAAS,QAAQ,KAAK,SAAS,GAAG,IAAI,GAAG,IAAI;AAAA,EAC5D,OAAO,CAAC,QAAQ,SAAS,QAAQ,MAAM,SAAS,GAAG,IAAI,GAAG,IAAI;AAAA,EAC9D,OAAO,CAAC,QAAQ,SAAS,QAAQ,MAAM,SAAS,GAAG,IAAI,GAAG,IAAI;AAChE;AAEO,SAAS,cAAcA,OAAyB,QAAQ,KAAgB;AAC7E,SAAO;AAAA,IACL,SAAUA,KAAI,SAAS,OAAO,KAA8B,eAAe;AAAA,IAC3E,eAAe,CAAC;AAAA,IAChB,aACGA,KAAI,SAAS,WAAW,KAAkC,eAAe;AAAA,IAC5E,eACE,OAAO,SAASA,KAAI,SAAS,cAAc,KAAK,IAAI,EAAE,KAAK,eAAe;AAAA,IAC5E,UAAU,OAAO,SAASA,KAAI,SAAS,SAAS,KAAK,IAAI,EAAE,KAAK,eAAe;AAAA,IAC/E,SAAS,CAAC,KAAK,QAAQ,KAAK,EAAE,UAAUA,KAAI,SAAS,OAAO,KAAK,IAAI,YAAY,CAAC;AAAA,IAClF,YAAYA,KAAI,SAAS,WAAW,KAAK,eAAe;AAAA,IACxD,YAAY,OAAO,SAASA,KAAI,SAAS,WAAW,KAAK,IAAI,EAAE,KAAK,eAAe;AAAA,IACnF,gBACE,OAAO,SAASA,KAAI,SAAS,gBAAgB,KAAK,IAAI,EAAE,KAAK,eAAe;AAAA,IAC9E,eACE,OAAO,SAASA,KAAI,SAAS,eAAe,KAAK,IAAI,EAAE,KAAK,eAAe;AAAA,EAC/E;AACF;AAEO,IAAM,YAAN,MAAM,WAAU;AAAA,EACb;AAAA,EACA,UAA+B;AAAA,EAC/B,SAAoC;AAAA,EACpC,YAAY;AAAA,EACZ,kBAAkB,oBAAI,IAG5B;AAAA,EACM,UAAU;AAAA,EACV,cAAc;AAAA,EACd,kBAAkB;AAAA,EAClB;AAAA,EACA,UAAsB;AAAA,IAC5B,eAAe;AAAA,IACf,oBAAoB;AAAA,IACpB,gBAAgB;AAAA,IAChB,eAAe;AAAA,IACf,cAAc;AAAA,IACd,kBAAkB;AAAA,IAClB,cAAc;AAAA,IACd,sBAAsB;AAAA,EACxB;AAAA,EACA,OAAwB,sBAAsB;AAAA,EACtC,YAAsB,IAAI,MAAM,WAAU,mBAAmB,EAAE,KAAK,CAAC;AAAA,EACrE,eAAe;AAAA,EACf,eAAe;AAAA,EACf,kBAA0C;AAAA,EAElD,YAAY,QAA6BC,SAAiB,mBAAmB,OAAO;AAClF,SAAK,SAAS,EAAE,GAAG,cAAc,GAAG,GAAG,OAAO;AAC9C,SAAK,SAASA,WAAU;AAExB,UAAM,SAAS,eAAe,KAAK,MAAM;AACzC,QAAI,OAAO,SAAS,GAAG;AACrB,UAAI,kBAAkB;AACpB,cAAM,IAAI,MAAM,8BAA8B,OAAO,KAAK,IAAI,CAAC,EAAE;AAAA,MACnE;AACA,iBAAW,SAAS,QAAQ;AAC1B,aAAK,OAAO,KAAK,0BAA0B,KAAK,EAAE;AAAA,MACpD;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAc,cAA6B;AACzC,QAAI,KAAK,QAAS;AAClB,SAAK,kBAAkB;AAEvB,SAAK,OAAO;AAAA,MACV,wBAAwB,KAAK,OAAO,UAAU;AAAA,IAChD;AAEA,QAAI;AACF,WAAK,UAAU,MAAM,KAAK,OAAO,YAAY,CAAC,MAAM,2BAA2B,GAAG;AAAA,QAChF,OAAO,CAAC,QAAQ,QAAQ,MAAM;AAAA,QAC9B,KAAK;AAAA,UACH,GAAG,QAAQ;AAAA,UACX,CAAC,SAAS,OAAO,GAAG,KAAK,OAAO;AAAA,UAChC,CAAC,SAAS,WAAW,GAAG,KAAK,OAAO;AAAA,UACpC,CAAC,SAAS,cAAc,GAAG,OAAO,KAAK,OAAO,aAAa;AAAA,UAC3D,CAAC,SAAS,SAAS,GAAG,OAAO,KAAK,OAAO,QAAQ;AAAA,UACjD,CAAC,SAAS,OAAO,GAAG,KAAK,OAAO,UAAU,SAAS;AAAA,QACrD;AAAA,QACA,KAAU,UAAK,WAAW,MAAM,QAAQ;AAAA,MAC1C,CAAC;AAAA,IACH,SAAS,OAAO;AACd,WAAK,OAAO,KAAK,+BAA+B,KAAK,EAAE;AACvD,WAAK,cAAc;AACnB;AAAA,IACF;AAEA,QAAI,CAAC,KAAK,QAAQ,UAAU,CAAC,KAAK,QAAQ,OAAO;AAC/C,WAAK,OAAO,KAAK,0CAA0C;AAC3D,WAAK,cAAc;AACnB;AAAA,IACF;AAGA,SAAK,SAAkB,yBAAgB;AAAA,MACrC,OAAO,KAAK,QAAQ;AAAA,MACpB,WAAW,OAAO;AAAA,IACpB,CAAC;AAGD,SAAK,OAAO,GAAG,QAAQ,CAAC,SAAiB;AACvC,WAAK,cAAc,IAAI;AAAA,IACzB,CAAC;AAGD,SAAK,QAAQ,QAAQ,GAAG,QAAQ,CAAC,SAAiB;AAChD,WAAK,OAAO,MAAM,sBAAsB,KAAK,SAAS,CAAC,EAAE;AAAA,IAC3D,CAAC;AAED,SAAK,QAAQ,GAAG,SAAS,CAAC,UAAiB;AACzC,WAAK,OAAO,MAAM,qBAAqB,MAAM,OAAO,EAAE;AACtD,WAAK,cAAc;AACnB,WAAK,kBAAkB;AAAA,IACzB,CAAC;AAED,SAAK,QAAQ,GAAG,QAAQ,CAAC,SAAwB;AAC/C,WAAK,OAAO,MAAM,+BAA+B,IAAI,EAAE;AACvD,WAAK,UAAU;AACf,WAAK,cAAc;AACnB,WAAK,UAAU;AAAA,IACjB,CAAC;AAED,UAAM,KAAK,aAAa;AAAA,EAC1B;AAAA,EAEQ,eAA8B;AACpC,WAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,YAAM,aAAa;AACnB,YAAM,mBAAmB;AACzB,YAAM,WAAW,KAAK,IAAI,IAAI;AAE9B,YAAM,aAAa,MAAM;AACvB,YAAI,KAAK,SAAS;AAChB,kBAAQ;AAAA,QACV,WAAW,CAAC,KAAK,WAAW,KAAK,iBAAiB;AAChD,iBAAO,IAAI,MAAM,wCAAwC,CAAC;AAAA,QAC5D,WAAW,KAAK,IAAI,IAAI,UAAU;AAChC,iBAAO,IAAI,MAAM,4BAA4B,CAAC;AAAA,QAChD,OAAO;AACL,qBAAW,YAAY,gBAAgB;AAAA,QACzC;AAAA,MACF;AACA,iBAAW;AAAA,IACb,CAAC;AAAA,EACH;AAAA,EAEQ,cAAc,MAAoB;AACxC,QAAI;AACF,YAAM,UAAU,KAAK,MAAM,IAAI;AAG/B,UAAI,WAAW,SAAS;AACtB,cAAM,WAAW;AACjB,aAAK,UAAU;AACf,aAAK,cAAc,SAAS;AAC5B,aAAK,OAAO,KAAK,gCAAgC,KAAK,WAAW,EAAE;AACnE;AAAA,MACF;AAGA,YAAM,WAAW;AACjB,YAAM,UAAU,KAAK,gBAAgB,IAAI,SAAS,EAAE;AACpD,UAAI,SAAS;AACX,aAAK,gBAAgB,OAAO,SAAS,EAAE;AACvC,gBAAQ,QAAQ,QAAQ;AAAA,MAC1B;AAAA,IACF,SAAS,OAAO;AACd,WAAK,OAAO,MAAM,uCAAuC,IAAI,EAAE;AAAA,IACjE;AAAA,EACF;AAAA,EAEA,MAAc,YACZ,QACA,SAAkC,CAAC,GACvB;AACZ,UAAM,KAAK,aAAa;AAExB,QAAI,CAAC,KAAK,SAAS,OAAO;AACxB,YAAM,IAAI,MAAM,wBAAwB;AAAA,IAC1C;AAEA,UAAM,KAAK,EAAE,KAAK;AAClB,UAAM,UAAsB,EAAE,IAAI,QAAQ,OAAO;AAEjD,WAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,YAAM,UAAU,WAAW,MAAM;AAC/B,aAAK,gBAAgB,OAAO,EAAE;AAC9B,eAAO,IAAI,MAAM,wBAAwB,MAAM,EAAE,CAAC;AAAA,MACpD,GAAG,GAAK;AAER,WAAK,gBAAgB,IAAI,IAAI;AAAA,QAC3B,SAAS,CAAC,aAA0B;AAClC,uBAAa,OAAO;AACpB,cAAI,SAAS,OAAO;AAClB,mBAAO,IAAI,MAAM,SAAS,KAAK,CAAC;AAAA,UAClC,OAAO;AACL,oBAAQ,SAAS,MAAW;AAAA,UAC9B;AAAA,QACF;AAAA,QACA,QAAQ,CAAC,UAAiB;AACxB,uBAAa,OAAO;AACpB,iBAAO,KAAK;AAAA,QACd;AAAA,MACF,CAAC;AAED,WAAK,SAAS,OAAO,MAAM,GAAG,KAAK,UAAU,OAAO,CAAC;AAAA,CAAI;AAAA,IAC3D,CAAC;AAAA,EACH;AAAA,EAEA,MAAc,eAA8B;AAC1C,QAAI,CAAC,KAAK,WAAW,CAAC,KAAK,SAAS;AAClC,YAAM,KAAK,YAAY;AAAA,IACzB;AAAA,EACF;AAAA,EAEA,IAAI,YAAqB;AACvB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,aAAyB;AACvB,WAAO,EAAE,GAAG,KAAK,QAAQ;AAAA,EAC3B;AAAA,EAEA,UAAU,UAAiC;AACzC,SAAK,kBAAkB;AAAA,EACzB;AAAA,EAEQ,cACN,WACA,SACA,QACA,OACM;AACN,SAAK,QAAQ;AACb,SAAK,QAAQ,uBAAuB,KAAK,IAAI;AAE7C,QAAI,QAAQ;AACV,WAAK,QAAQ;AAAA,IACf,WAAW,SAAS;AAClB,WAAK,QAAQ;AAAA,IACf,OAAO;AACL,WAAK,QAAQ;AACb,WAAK,QAAQ,qBAAqB,KAAK,IAAI;AAC3C,WAAK,QAAQ,YAAY;AAAA,IAC3B;AAGA,SAAK,UAAU,KAAK,YAAY,IAAI;AACpC,SAAK,gBAAgB,KAAK,eAAe,KAAK,WAAU;AACxD,SAAK,eAAe,KAAK,IAAI,KAAK,eAAe,GAAG,WAAU,mBAAmB;AAGjF,QAAI,KAAK,eAAe,GAAG;AACzB,YAAM,QAAQ,KAAK,UAAU,MAAM,GAAG,KAAK,YAAY;AACvD,WAAK,QAAQ,mBAAmB,MAAM,OAAO,CAAC,GAAG,MAAM,IAAI,GAAG,CAAC,IAAI,KAAK;AACxE,WAAK,QAAQ,eACX,CAAC,GAAG,KAAK,EAAE,KAAK,CAAC,GAAG,MAAM,IAAI,CAAC,EAAE,KAAK,MAAM,MAAM,SAAS,IAAI,CAAC,KAAK;AAAA,IACzE;AAEA,SAAK,kBAAkB,KAAK,WAAW,CAAC;AAAA,EAC1C;AAAA,EAEA,OAAO,kBAAkB,UAA+C;AACtE,WAAO,OAAO,aAAa,WAAW,CAAC,EAAE,MAAM,QAAQ,SAAS,SAAS,CAAC,IAAI;AAAA,EAChF;AAAA,EAEA,MAAM,MAAM,UAAiC,MAA4C;AACvF,UAAM,YAAY,KAAK,IAAI;AAC3B,UAAM,EAAE,aAAa,GAAG,iBAAiB,KAAM,gBAAgB,IAAM,IAAI,KAAK;AAC9E,UAAM,qBAAqB,CAAC,WAAW,cAAc,cAAc,OAAO,OAAO,YAAY;AAE7F,QAAI,YAA0B;AAE9B,aAAS,UAAU,GAAG,UAAU,YAAY,WAAW;AACrD,UAAI;AACF,cAAM,KAAK,aAAa;AAExB,YAAI,CAAC,KAAK,SAAS;AACjB,eAAK,cAAc,KAAK,IAAI,IAAI,WAAW,MAAM,IAAI;AACrD,iBAAO,WAAW;AAAA,QACpB;AAEA,cAAM,SAAS,MAAM,KAAK,YAAuB,SAAS;AAAA,UACxD,UAAU,WAAU,kBAAkB,QAAQ;AAAA,UAC9C,MAAM,QAAQ,CAAC;AAAA,QACjB,CAAC;AAED,aAAK,cAAc,KAAK,IAAI,IAAI,WAAW,MAAM,KAAK;AACtD,eAAO;AAAA,MACT,SAAS,OAAO;AACd,oBAAY,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AACpE,cAAM,cAAc,mBAAmB;AAAA,UAAK,CAAC,MAC3C,WAAW,QAAQ,YAAY,EAAE,SAAS,CAAC;AAAA,QAC7C;AAEA,YAAI,CAAC,eAAe,YAAY,aAAa,GAAG;AAC9C,eAAK,OAAO,MAAM,8BAA8B,UAAU,CAAC,cAAc,KAAK,EAAE;AAChF,eAAK,cAAc,KAAK,IAAI,IAAI,WAAW,MAAM,MAAM,UAAU,OAAO;AACxE,iBAAO,WAAW,UAAU,OAAO;AAAA,QACrC;AAEA,cAAM,QACJ,KAAK,IAAI,iBAAiB,KAAK,SAAS,aAAa,KAAK,OAAO,KAAK,OAAO,IAAI;AACnF,aAAK,QAAQ;AACb,aAAK,OAAO;AAAA,UACV,eAAe,UAAU,CAAC,IAAI,UAAU,wBAAwB,KAAK,MAAM,KAAK,CAAC;AAAA,QACnF;AACA,cAAM,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,KAAK,CAAC;AAAA,MAC3D;AAAA,IACF;AAEA,SAAK,cAAc,KAAK,IAAI,IAAI,WAAW,OAAO,OAAO,WAAW,OAAO;AAC3E,WAAO,WAAW,WAAW,OAAO;AAAA,EACtC;AAAA,EAEA,MAAM,YAAwC;AAC5C,QAAI;AACF,YAAM,KAAK,aAAa;AACxB,aAAO,MAAM,KAAK,YAA+B,QAAQ;AAAA,IAC3D,QAAQ;AACN,aAAO;AAAA,QACL,WAAW;AAAA,QACX,SAAS,KAAK,OAAO;AAAA,QACrB,aAAa,KAAK,OAAO;AAAA,QACzB,eAAe,KAAK,OAAO;AAAA,QAC3B,UAAU,KAAK,OAAO;AAAA,MACxB;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,WAA0B;AAC9B,QAAI,CAAC,KAAK,QAAS;AACnB,QAAI;AACF,YAAM,KAAK,YAAY,UAAU;AAAA,IACnC,QAAQ;AAAA,IAER;AACA,SAAK,QAAQ,MAAM;AACnB,SAAK,SAAS,KAAK;AACnB,SAAK,UAAU;AACf,SAAK,UAAU;AACf,SAAK,cAAc;AAAA,EACrB;AACF;AAEO,SAAS,WAAW,OAA2B;AACpD,SAAO;AAAA,IACL,MAAM;AAAA,IACN,UAAU,EAAE,MAAM,MAAM,MAAM;AAAA,EAChC;AACF;;;ADlXA,IAAM,MAA0C,OAAO,YAAY,cAAc,QAAQ,MAAM,CAAC;AAmBhG,IAAM,cAA2B;AAAA,EAC/B,QAAQ;AAAA,EACR,aAAa;AAAA,EACb,YAAY;AACd;AAKA,SAAS,kBAAkB,QAAoC;AAC7D,QAAM,MAAM;AAAA,IACV,OAAO,WAAW;AAAA,IAClB,OAAO,eAAe;AAAA,IACtB,OAAO,OAAO,iBAAiB,EAAE;AAAA,IACjC,OAAO,OAAO,YAAY,EAAE;AAAA,IAC5B,OAAO,cAAc;AAAA,EACvB,EAAE,KAAK,GAAG;AACV,SAAO;AACT;AAUA,SAAS,kBAAkB,SAAmC;AAE5D,QAAM,gBAAiB,QAA+C;AAGtE,QAAM,aAAa,kBAAkB,iBAAiB,CAAC,CAAC;AAGxD,MAAI,YAAY,UAAU,YAAY,eAAe,YAAY;AAC/D,WAAO,KAAK,yCAAyC;AAErD,gBAAY,OAAO,SAAS,EAAE,MAAM,CAAC,QAAQ;AAC3C,aAAO,KAAK,yCAAyC,GAAG;AAAA,IAC1D,CAAC;AACD,gBAAY,SAAS;AACrB,gBAAY,cAAc;AAAA,EAC5B;AAGA,MAAI,YAAY,QAAQ;AACtB,WAAO,YAAY;AAAA,EACrB;AAIA,QAAM,SAAS,IAAI,UAAU,aAAa;AAC1C,cAAY,SAAS;AACrB,cAAY,aAAa;AAEzB,SAAO;AACT;AAKA,eAAsB,cAA6B;AACjD,MAAI,YAAY,QAAQ;AACtB,UAAM,YAAY,OAAO,SAAS;AAAA,EACpC;AACA,cAAY,SAAS;AACrB,cAAY,cAAc;AAC1B,cAAY,aAAa;AAC3B;AAKA,eAAe,qBACb,SACA,QACiB;AACjB,QAAM,SAAS,kBAAkB,OAAO;AAGxC,QAAM,QAAQ,OAAO,UAAU;AAE/B,QAAM,OAAO;AAAA,IACX,WAAW,OAAO;AAAA,IAClB,aAAa,OAAO;AAAA,IACpB,MAAM,OAAO;AAAA,IACb,eAAe,OAAO;AAAA,IACtB,MAAM,OAAO;AAAA,EACf;AAGA,QAAM,YAAY,OAAO,YAAY,OAAO,QAAQ,IAAI,EAAE,OAAO,CAAC,CAAC,EAAE,CAAC,MAAM,MAAM,MAAS,CAAC;AAE5F,QAAM,SAAS,MAAM,OAAO,MAAM,OAAO,SAAS;AAClD,SAAO,OAAO;AAChB;AAKO,IAAM,YAAoB;AAAA,EAC/B,MAAM;AAAA,EACN,aACE;AAAA,EAEF,QAAQ;AAAA,IACN,CAAC,SAAS,OAAO,GAAG,IAAI,SAAS,OAAO,KAAK,eAAe;AAAA,IAC5D,CAAC,SAAS,WAAW,GAAG,IAAI,SAAS,WAAW,KAAK,eAAe;AAAA,IACpE,CAAC,SAAS,cAAc,GAAG,IAAI,SAAS,cAAc,KAAK,OAAO,eAAe,aAAa;AAAA,IAC9F,CAAC,SAAS,SAAS,GAAG,IAAI,SAAS,SAAS,KAAK,OAAO,eAAe,QAAQ;AAAA,IAC/E,CAAC,SAAS,OAAO,GAAG,IAAI,SAAS,OAAO,KAAK;AAAA,IAC7C,CAAC,SAAS,WAAW,GAAG,IAAI,SAAS,WAAW,KAAK,eAAe;AAAA,EACtE;AAAA,EAEA,MAAM,KAAK,QAAgC,SAAuC;AAChF,WAAO,KAAK,+BAA+B;AAG3C,IAAC,QAA+C,YAAY;AAAA,MAC1D,SAAS,OAAO,SAAS,OAAO,KAAK,eAAe;AAAA,MACpD,aAAa,OAAO,SAAS,WAAW,KAAK,eAAe;AAAA,MAC5D,eACE,OAAO,SAAS,OAAO,SAAS,cAAc,KAAK,IAAI,EAAE,KAAK,eAAe;AAAA,MAC/E,UAAU,OAAO,SAAS,OAAO,SAAS,SAAS,KAAK,IAAI,EAAE,KAAK,eAAe;AAAA,MAClF,SAAS,CAAC,KAAK,QAAQ,KAAK,EAAE,UAAU,OAAO,SAAS,OAAO,KAAK,IAAI,YAAY,CAAC;AAAA,MACrF,YAAY,OAAO,SAAS,WAAW,KAAK,eAAe;AAAA,IAC7D;AAGA,UAAM,SAAS,kBAAkB,OAAO;AACxC,UAAM,SAAS,MAAM,OAAO,UAAU;AAEtC,QAAI,OAAO,WAAW;AACpB,aAAO,KAAK,4BAA4B,OAAO,OAAO,EAAE;AAAA,IAC1D,OAAO;AACL,aAAO,KAAK,oDAAoD;AAAA,IAClE;AAAA,EACF;AAAA,EAEA,QAAQ;AAAA,IACN,CAAC,UAAU,UAAU,GAAG;AAAA,IACxB,CAAC,UAAU,UAAU,GAAG;AAAA,IACxB,CAAC,UAAU,oBAAoB,GAAG;AAAA,IAClC,CAAC,UAAU,oBAAoB,GAAG;AAAA,IAClC,CAAC,UAAU,eAAe,GAAG;AAAA,EAC/B;AAAA,EAEA,OAAO;AAAA,IACL;AAAA,MACE,MAAM;AAAA,MACN,OAAO;AAAA,QACL;AAAA,UACE,MAAM;AAAA,UACN,IAAI,OAAO,aAA2C;AAEpD,kBAAM,SAAS,WAAW;AAC1B,gBAAI,CAAC,OAAO,SAAS,MAAM;AACzB,oBAAM,IAAI,MAAM,mCAAmC;AAAA,YACrD;AACA,mBAAO,KAAK,kCAAkC;AAAA,UAChD;AAAA,QACF;AAAA,QACA;AAAA,UACE,MAAM;AAAA,UACN,IAAI,OAAO,YAA0C;AACnD,kBAAM,OAAO,MAAM,QAAQ,SAAS,UAAU,YAAY;AAAA,cACxD,QAAQ;AAAA,YACV,CAAC;AAED,gBAAI,OAAO,SAAS,UAAU;AAC5B,oBAAM,IAAI,MAAM,iCAAiC;AAAA,YACnD;AAEA,mBAAO,KAAK,qCAAqC,KAAK,UAAU,GAAG,EAAE,CAAC,MAAM;AAAA,UAC9E;AAAA,QACF;AAAA,QACA;AAAA,UACE,MAAM;AAAA,UACN,IAAI,OAAO,YAA0C;AACnD,kBAAM,OAAO,MAAM,QAAQ,SAAS,UAAU,YAAY;AAAA,cACxD,QAAQ;AAAA,YACV,CAAC;AAED,gBAAI,OAAO,SAAS,UAAU;AAC5B,oBAAM,IAAI,MAAM,6CAA6C;AAAA,YAC/D;AAEA,mBAAO,KAAK,uCAAuC;AAAA,UACrD;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;AAEA,IAAO,gBAAQ;","names":["env","logger"]}
package/package.json ADDED
@@ -0,0 +1,74 @@
1
+ {
2
+ "name": "@elizaos/plugin-rlm",
3
+ "version": "2.0.0-alpha.3",
4
+ "description": "RLM (Recursive Language Model) plugin for elizaOS - enables processing of arbitrarily long contexts",
5
+ "type": "module",
6
+ "main": "dist/index.js",
7
+ "types": "dist/index.d.ts",
8
+ "exports": {
9
+ ".": {
10
+ "import": "./dist/index.js",
11
+ "types": "./dist/index.d.ts"
12
+ }
13
+ },
14
+ "files": [
15
+ "dist",
16
+ "src"
17
+ ],
18
+ "scripts": {
19
+ "build": "tsup",
20
+ "dev": "tsup --watch",
21
+ "test": "vitest run",
22
+ "test:watch": "vitest",
23
+ "lint": "bunx @biomejs/biome check --write --unsafe .",
24
+ "lint:fix": "biome check --write .",
25
+ "typecheck": "tsc --noEmit",
26
+ "lint:check": "bunx @biomejs/biome check .",
27
+ "format": "bunx @biomejs/biome format --write .",
28
+ "format:check": "bunx @biomejs/biome format ."
29
+ },
30
+ "keywords": [
31
+ "elizaos",
32
+ "plugin",
33
+ "rlm",
34
+ "recursive-language-model",
35
+ "ai",
36
+ "agent",
37
+ "long-context"
38
+ ],
39
+ "author": "elizaOS Contributors",
40
+ "license": "MIT",
41
+ "repository": {
42
+ "type": "git",
43
+ "url": "git+https://github.com/elizaos/eliza.git",
44
+ "directory": "plugins/plugin-rlm/typescript"
45
+ },
46
+ "dependencies": {
47
+ "@elizaos/core": "2.0.0-alpha.3"
48
+ },
49
+ "devDependencies": {
50
+ "@biomejs/biome": "^1.9.4",
51
+ "@types/node": "^22.10.0",
52
+ "tsup": "^8.3.5",
53
+ "typescript": "^5.7.2",
54
+ "vitest": "^3.0.0"
55
+ },
56
+ "peerDependencies": {
57
+ "@elizaos/core": "2.0.0-alpha.3"
58
+ },
59
+ "engines": {
60
+ "node": ">=20"
61
+ },
62
+ "milaidy": {
63
+ "platforms": [
64
+ "node"
65
+ ],
66
+ "runtime": "node",
67
+ "platformDetails": {
68
+ "node": "ESM build available via exports.import"
69
+ }
70
+ },
71
+ "publishConfig": {
72
+ "access": "public"
73
+ }
74
+ }