mr-memory 1.0.11 → 2.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.ts +317 -213
  2. package/package.json +1 -1
package/index.ts CHANGED
@@ -2,118 +2,73 @@
2
2
  * MemoryRouter Plugin for OpenClaw
3
3
  *
4
4
  * Persistent AI memory via MemoryRouter (memoryrouter.ai).
5
- * Routes LLM calls through MemoryRouter's API which injects relevant
6
- * past context and captures conversations automatically.
5
+ * Uses before_agent_start + agent_end hooks to inject/store memories
6
+ * via the MemoryRouter relay API. No proxy interception needed.
7
7
  *
8
- * BYOK — provider API keys pass through untouched.
8
+ * BYOK — provider API keys never touch MemoryRouter.
9
9
  */
10
10
 
11
+ import { readFile, readdir, stat } from "node:fs/promises";
12
+ import { join } from "node:path";
11
13
  import { spawn } from "node:child_process";
12
14
  import type { OpenClawPluginApi } from "openclaw/plugin-sdk";
13
15
 
14
16
  const DEFAULT_ENDPOINT = "https://api.memoryrouter.ai";
15
17
 
18
+ // Workspace files OpenClaw loads into the system prompt
19
+ const WORKSPACE_FILES = [
20
+ "IDENTITY.md", "USER.md", "MEMORY.md", "HEARTBEAT.md",
21
+ "TOOLS.md", "AGENTS.md", "SOUL.md", "BOOTSTRAP.md",
22
+ ];
23
+
16
24
  type MemoryRouterConfig = {
17
25
  key: string;
18
26
  endpoint?: string;
19
- density?: 'low' | 'high' | 'xhigh';
20
- };
21
-
22
- type CompatApi = OpenClawPluginApi & {
23
- updatePluginConfig?: (config: Record<string, unknown>) => Promise<void>;
24
- updatePluginEnabled?: (enabled: boolean) => Promise<void>;
27
+ density?: "low" | "high" | "xhigh";
28
+ mode?: "relay" | "proxy";
25
29
  };
26
30
 
27
- /**
28
- * Supported provider APIs that MemoryRouter can proxy.
29
- */
30
- const SUPPORTED_APIS = new Set([
31
- "anthropic-messages",
32
- "openai-completions",
33
- "openai-responses",
34
- "azure-openai-responses",
35
- "ollama",
36
- ]);
37
-
38
- /**
39
- * Detect if the current LLM call is a tool-use iteration (not direct user conversation).
40
- * Tool iterations have tool_result (Anthropic) or tool-role (OpenAI) messages
41
- * after the last real user message.
42
- */
43
- function isToolUseIteration(context: { messages?: Array<{ role: string; content?: unknown }> }): boolean {
44
- const messages = context.messages;
45
- if (!messages || messages.length === 0) return false;
46
-
47
- for (let i = messages.length - 1; i >= 0; i--) {
48
- const msg = messages[i];
49
-
50
- if (msg.role === "tool") return true;
51
-
52
- if (msg.role === "user" && Array.isArray(msg.content)) {
53
- const hasToolResult = (msg.content as Array<{ type?: string }>).some(
54
- (block) => block.type === "tool_result",
55
- );
56
- if (hasToolResult) return true;
57
- }
58
-
59
- if (msg.role === "user" && typeof msg.content === "string") return false;
60
- if (msg.role === "assistant") continue;
61
- }
62
-
63
- return false;
64
- }
31
+ // ──────────────────────────────────────────────────────
32
+ // Helpers
33
+ // ──────────────────────────────────────────────────────
65
34
 
66
35
  function resolveOpenClawInvocation(): { command: string; args: string[] } {
67
36
  const entry = process.argv[1];
68
- if (entry) {
69
- return {
70
- command: process.execPath,
71
- args: [entry],
72
- };
73
- }
74
-
75
- return {
76
- command: "openclaw",
77
- args: [],
78
- };
37
+ if (entry) return { command: process.execPath, args: [entry] };
38
+ return { command: "openclaw", args: [] };
79
39
  }
80
40
 
81
41
  async function runOpenClawConfigSet(path: string, value: string, json = false): Promise<void> {
82
42
  const base = resolveOpenClawInvocation();
83
43
  const args = [...base.args, "config", "set", path, value];
84
- if (json) {
85
- args.push("--json");
86
- }
44
+ if (json) args.push("--json");
87
45
 
88
46
  await new Promise<void>((resolve, reject) => {
89
47
  const child = spawn(base.command, args, {
90
48
  stdio: ["ignore", "ignore", "pipe"],
91
49
  env: process.env,
92
50
  });
93
-
94
51
  let stderr = "";
95
- child.stderr.on("data", (chunk) => {
96
- stderr += String(chunk);
97
- });
98
-
99
- child.on("error", (err) => reject(err));
52
+ child.stderr.on("data", (chunk) => { stderr += String(chunk); });
53
+ child.on("error", reject);
100
54
  child.on("close", (code) => {
101
- if (code === 0) {
102
- resolve();
103
- return;
104
- }
105
- reject(new Error(`openclaw config set failed (exit ${code}): ${stderr.trim()}`));
55
+ if (code === 0) resolve();
56
+ else reject(new Error(`openclaw config set failed (exit ${code}): ${stderr.trim()}`));
106
57
  });
107
58
  });
108
59
  }
109
60
 
61
+ type CompatApi = OpenClawPluginApi & {
62
+ updatePluginConfig?: (config: Record<string, unknown>) => Promise<void>;
63
+ updatePluginEnabled?: (enabled: boolean) => Promise<void>;
64
+ };
65
+
110
66
  async function setPluginConfig(api: OpenClawPluginApi, config: Record<string, unknown>): Promise<void> {
111
67
  const compat = api as CompatApi;
112
68
  if (typeof compat.updatePluginConfig === "function") {
113
69
  await compat.updatePluginConfig(config);
114
70
  return;
115
71
  }
116
-
117
72
  await runOpenClawConfigSet(`plugins.entries.${api.id}.config`, JSON.stringify(config), true);
118
73
  }
119
74
 
@@ -123,10 +78,50 @@ async function setPluginEnabled(api: OpenClawPluginApi, enabled: boolean): Promi
123
78
  await compat.updatePluginEnabled(enabled);
124
79
  return;
125
80
  }
126
-
127
81
  await runOpenClawConfigSet(`plugins.entries.${api.id}.enabled`, enabled ? "true" : "false", true);
128
82
  }
129
83
 
84
+ /**
85
+ * Read all workspace files and return as a single text blob for token counting.
86
+ */
87
+ async function readWorkspaceFiles(workspaceDir: string): Promise<string> {
88
+ const parts: string[] = [];
89
+ for (const file of WORKSPACE_FILES) {
90
+ try {
91
+ const content = await readFile(join(workspaceDir, file), "utf8");
92
+ if (content.trim()) parts.push(`## ${file}\n${content}`);
93
+ } catch { /* file doesn't exist — skip */ }
94
+ }
95
+ return parts.join("\n\n");
96
+ }
97
+
98
+ /**
99
+ * Build a text representation of tools config for token counting.
100
+ */
101
+ function serializeToolsConfig(config: Record<string, unknown>): string {
102
+ const tools = config.tools;
103
+ if (!tools) return "";
104
+ try {
105
+ return `## Tools Config\n${JSON.stringify(tools, null, 2)}`;
106
+ } catch { return ""; }
107
+ }
108
+
109
+ /**
110
+ * Build a text representation of skills for token counting.
111
+ */
112
+ function serializeSkillsConfig(config: Record<string, unknown>): string {
113
+ const skills = (config as any).skills?.entries;
114
+ if (!skills || typeof skills !== "object") return "";
115
+ try {
116
+ const names = Object.keys(skills);
117
+ return `## Skills (${names.length})\n${names.join(", ")}`;
118
+ } catch { return ""; }
119
+ }
120
+
121
+ // ──────────────────────────────────────────────────────
122
+ // Plugin Definition
123
+ // ──────────────────────────────────────────────────────
124
+
130
125
  const memoryRouterPlugin = {
131
126
  id: "mr-memory",
132
127
  name: "MemoryRouter",
@@ -136,57 +131,210 @@ const memoryRouterPlugin = {
136
131
  const cfg = api.pluginConfig as MemoryRouterConfig | undefined;
137
132
  const endpoint = cfg?.endpoint?.replace(/\/v1\/?$/, "") || DEFAULT_ENDPOINT;
138
133
  const memoryKey = cfg?.key;
139
- const density = cfg?.density || 'high';
140
-
141
- // ==================================================================
142
- // Core: Route LLM calls through MemoryRouter (only when key is set)
143
- // ==================================================================
134
+ const density = cfg?.density || "high";
135
+ const mode = cfg?.mode || "relay";
144
136
 
145
137
  if (memoryKey) {
146
- api.logger.info?.(`memoryrouter: registered (endpoint: ${endpoint})`);
138
+ api.logger.info?.(`memoryrouter: active (key: ${memoryKey.slice(0, 6)}..., mode: ${mode})`);
147
139
  } else {
148
140
  api.logger.info?.("memoryrouter: no key configured — run: openclaw mr <key>");
149
141
  }
150
142
 
143
+ // ==================================================================
144
+ // Core: before_agent_start — search memories, inject context
145
+ // ==================================================================
146
+
151
147
  if (memoryKey) {
152
- api.registerStreamFnWrapper((next) => {
153
- return (model, context, options) => {
154
- // Only proxy supported APIs
155
- if (!SUPPORTED_APIS.has(model.api)) {
156
- return next(model, context, options);
148
+ // Track whether we've already fired for this prompt (dedup double-fire)
149
+ let lastPreparedPrompt = "";
150
+
151
+ api.on("before_prompt_build", async (event, ctx) => {
152
+ try {
153
+ const prompt = event.prompt;
154
+
155
+ // Deduplicate — if we already prepared this exact prompt, skip
156
+ if (prompt === lastPreparedPrompt && lastPreparedPrompt !== "") {
157
+ return;
158
+ }
159
+ lastPreparedPrompt = prompt;
160
+
161
+ // 1. Read workspace files for full token count
162
+ const workspaceDir = ctx.workspaceDir || "";
163
+ let workspaceText = "";
164
+ if (workspaceDir) {
165
+ workspaceText = await readWorkspaceFiles(workspaceDir);
166
+ }
167
+
168
+ // 2. Serialize tools + skills from config
169
+ const toolsText = serializeToolsConfig(api.config as unknown as Record<string, unknown>);
170
+ const skillsText = serializeSkillsConfig(api.config as unknown as Record<string, unknown>);
171
+
172
+ // 3. Build full context payload (messages + workspace + tools + skills)
173
+ const contextPayload: Array<{ role: string; content: string }> = [];
174
+
175
+ // Add workspace context as a system-level entry
176
+ const fullContext = [workspaceText, toolsText, skillsText].filter(Boolean).join("\n\n");
177
+ if (fullContext) {
178
+ contextPayload.push({ role: "system", content: fullContext });
179
+ }
180
+
181
+ // Add conversation history
182
+ if (event.messages && Array.isArray(event.messages)) {
183
+ let skipped = 0;
184
+ for (const msg of event.messages) {
185
+ const m = msg as { role?: string; content?: unknown };
186
+ if (!m.role) continue;
187
+
188
+ let text = "";
189
+ if (typeof m.content === "string") {
190
+ text = m.content;
191
+ } else if (Array.isArray(m.content)) {
192
+ // Handle Anthropic-style content blocks [{type:"text", text:"..."}, ...]
193
+ text = (m.content as Array<{ type?: string; text?: string }>)
194
+ .filter(b => b.type === "text" && b.text)
195
+ .map(b => b.text!)
196
+ .join("\n");
197
+ }
198
+
199
+ if (text) {
200
+ contextPayload.push({ role: m.role, content: text });
201
+ } else {
202
+ skipped++;
203
+ }
204
+ }
205
+ }
206
+
207
+ // Add current user prompt
208
+ contextPayload.push({ role: "user", content: prompt });
209
+
210
+ // 4. Call /v1/memory/prepare
211
+ const densityMap: Record<string, number> = { low: 40, high: 80, xhigh: 160 };
212
+ const contextLimit = densityMap[density] || 80;
213
+
214
+ const res = await fetch(`${endpoint}/v1/memory/prepare`, {
215
+ method: "POST",
216
+ headers: {
217
+ "Content-Type": "application/json",
218
+ Authorization: `Bearer ${memoryKey}`,
219
+ },
220
+ body: JSON.stringify({
221
+ messages: contextPayload,
222
+ density,
223
+ context_limit: contextLimit,
224
+ }),
225
+ });
226
+
227
+ if (!res.ok) {
228
+ api.logger.warn?.(`memoryrouter: prepare failed (${res.status})`);
229
+ return;
230
+ }
231
+
232
+ const data = (await res.json()) as {
233
+ context?: string;
234
+ memories_found?: number;
235
+ tokens_billed?: number;
236
+ };
237
+
238
+ if (data.context) {
239
+ api.logger.info?.(
240
+ `memoryrouter: injected ${data.memories_found || 0} memories (${data.tokens_billed || 0} tokens billed)`,
241
+ );
242
+ return { prependContext: data.context };
243
+ }
244
+ } catch (err) {
245
+ api.logger.warn?.(
246
+ `memoryrouter: prepare error — ${err instanceof Error ? err.message : String(err)}`,
247
+ );
157
248
  }
249
+ });
250
+
251
+ // ==================================================================
252
+ // Core: agent_end — store this turn's conversation
253
+ // ==================================================================
254
+
255
+ api.on("agent_end", async (event, ctx) => {
256
+ try {
257
+ const msgs = event.messages;
258
+ if (!msgs || !Array.isArray(msgs) || msgs.length === 0) return;
259
+
260
+ // Extract text from a message (handles string + content block arrays)
261
+ function extractText(content: unknown): string {
262
+ if (typeof content === "string") return content;
263
+ if (Array.isArray(content)) {
264
+ return (content as Array<{ type?: string; text?: string }>)
265
+ .filter(b => b.type === "text" && b.text)
266
+ .map(b => b.text!)
267
+ .join("\n");
268
+ }
269
+ return "";
270
+ }
158
271
 
159
- // Route through MemoryRouter
160
- const mrModel = {
161
- ...model,
162
- baseUrl: model.api === "anthropic-messages"
163
- ? endpoint // Anthropic: baseUrl is without /v1
164
- : `${endpoint}/v1`,
165
- };
272
+ // Find the last user message, then collect ALL assistant messages after it
273
+ // This captures the full response even if she sent multiple messages
274
+ let lastUserIdx = -1;
275
+ for (let i = msgs.length - 1; i >= 0; i--) {
276
+ const msg = msgs[i] as { role?: string; content?: unknown };
277
+ const text = extractText(msg.content);
278
+ if (msg.role === "user" && text) {
279
+ lastUserIdx = i;
280
+ break;
281
+ }
282
+ }
166
283
 
167
- // Detect tool iterations don't store intermediate work
168
- const toolIteration = isToolUseIteration(
169
- context as { messages?: Array<{ role: string; content?: unknown }> },
170
- );
171
-
172
- // Inject MemoryRouter headers
173
- const mrOptions = {
174
- ...options,
175
- headers: {
176
- ...options?.headers,
177
- "X-Memory-Key": memoryKey,
178
- "X-Memory-Store": toolIteration ? "false" : "true",
179
- "X-Memory-Density": density,
180
- },
181
- };
284
+ const toStore: Array<{ role: string; content: string }> = [];
182
285
 
183
- return next(mrModel, context, mrOptions);
184
- };
185
- });
186
- } // end if (memoryKey) for streamFn wrapper
286
+ // Add the user message
287
+ if (lastUserIdx >= 0) {
288
+ const userMsg = msgs[lastUserIdx] as { content?: unknown };
289
+ const userText = extractText(userMsg.content);
290
+ if (userText) toStore.push({ role: "user", content: userText });
291
+ }
292
+
293
+ // Collect ALL assistant messages after the last user message
294
+ const assistantParts: string[] = [];
295
+ for (let i = (lastUserIdx >= 0 ? lastUserIdx + 1 : 0); i < msgs.length; i++) {
296
+ const msg = msgs[i] as { role?: string; content?: unknown };
297
+ if (msg.role === "assistant") {
298
+ const text = extractText(msg.content);
299
+ if (text) assistantParts.push(text);
300
+ }
301
+ }
302
+ if (assistantParts.length > 0) {
303
+ toStore.push({ role: "assistant", content: assistantParts.join("\n\n") });
304
+ }
305
+
306
+ if (toStore.length === 0) return;
307
+
308
+ // Fire and forget — don't block the response
309
+ fetch(`${endpoint}/v1/memory/ingest`, {
310
+ method: "POST",
311
+ headers: {
312
+ "Content-Type": "application/json",
313
+ Authorization: `Bearer ${memoryKey}`,
314
+ },
315
+ body: JSON.stringify({
316
+ messages: toStore,
317
+ session_id: ctx.sessionKey,
318
+ }),
319
+ }).then(async (res) => {
320
+ if (!res.ok) {
321
+ api.logger.warn?.(`memoryrouter: ingest failed (${res.status})`);
322
+ }
323
+ }).catch((err) => {
324
+ api.logger.warn?.(
325
+ `memoryrouter: ingest error — ${err instanceof Error ? err.message : String(err)}`,
326
+ );
327
+ });
328
+ } catch (err) {
329
+ api.logger.warn?.(
330
+ `memoryrouter: agent_end error — ${err instanceof Error ? err.message : String(err)}`,
331
+ );
332
+ }
333
+ });
334
+ } // end if (memoryKey)
187
335
 
188
336
  // ==================================================================
189
- // CLI Commands (always registered — even without key, for enable/off)
337
+ // CLI Commands
190
338
  // ==================================================================
191
339
 
192
340
  api.registerCli(
@@ -196,7 +344,6 @@ const memoryRouterPlugin = {
196
344
  console.error("Invalid key format. Keys start with 'mk' (e.g. mk_xxx)");
197
345
  return;
198
346
  }
199
-
200
347
  try {
201
348
  await setPluginConfig(api, { key });
202
349
  await setPluginEnabled(api, true);
@@ -205,30 +352,22 @@ const memoryRouterPlugin = {
205
352
  } catch (err) {
206
353
  const message = err instanceof Error ? err.message : String(err);
207
354
  console.error(`Failed to enable MemoryRouter: ${message}`);
208
- console.error("Fallback: openclaw config set plugins.entries.mr-memory.config.key <key>");
209
- console.error("Then: openclaw config set plugins.entries.mr-memory.enabled true --json");
210
355
  }
211
356
  };
212
357
 
213
- const mr = program.command("mr")
358
+ const mr = program
359
+ .command("mr")
214
360
  .description("MemoryRouter memory commands")
215
361
  .argument("[key]", "Your MemoryRouter memory key (mk_xxx)")
216
362
  .action(async (key: string | undefined) => {
217
- if (!key) {
218
- // No key provided — show help
219
- mr.help();
220
- return;
221
- }
363
+ if (!key) { mr.help(); return; }
222
364
  await applyKey(key);
223
365
  });
224
366
 
225
- // Backward compat: `openclaw mr enable <key>` still works
226
367
  mr.command("enable")
227
368
  .description("Enable MemoryRouter with a memory key (alias)")
228
369
  .argument("<key>", "Your MemoryRouter memory key (mk_xxx)")
229
- .action(async (key: string) => {
230
- await applyKey(key);
231
- });
370
+ .action(async (key: string) => { await applyKey(key); });
232
371
 
233
372
  mr.command("off")
234
373
  .description("Disable MemoryRouter (removes key)")
@@ -236,143 +375,110 @@ const memoryRouterPlugin = {
236
375
  try {
237
376
  await setPluginConfig(api, {});
238
377
  await setPluginEnabled(api, false);
239
- console.log("✓ MemoryRouter disabled. LLM calls go direct to provider.");
240
- console.log(" Key removed. Re-enable with: openclaw mr <key>");
378
+ console.log("✓ MemoryRouter disabled.");
241
379
  } catch (err) {
242
- console.error(`Failed to disable MemoryRouter: ${err instanceof Error ? err.message : String(err)}`);
380
+ console.error(`Failed to disable: ${err instanceof Error ? err.message : String(err)}`);
243
381
  }
244
382
  });
245
383
 
246
384
  // Density commands
247
- mr.command("xhigh")
248
- .description("Set memory density to xhigh (160 results, ~50k tokens)")
249
- .action(async () => {
250
- if (!memoryKey) {
251
- console.error("MemoryRouter not configured. Run: openclaw mr <key>");
252
- return;
253
- }
254
- try {
255
- await setPluginConfig(api, { key: memoryKey, endpoint: cfg?.endpoint, density: 'xhigh' });
256
- console.log("✓ Memory density set to xhigh (160 results, ~50k tokens)");
257
- console.log(" Maximum context injection for deepest memory.");
258
- } catch (err) {
259
- console.error(`Failed to set density: ${err instanceof Error ? err.message : String(err)}`);
260
- }
261
- });
262
-
263
- mr.command("high")
264
- .description("Set memory density to high (80 results, ~24k tokens) [default]")
265
- .action(async () => {
266
- if (!memoryKey) {
267
- console.error("MemoryRouter not configured. Run: openclaw mr <key>");
268
- return;
269
- }
270
- try {
271
- await setPluginConfig(api, { key: memoryKey, endpoint: cfg?.endpoint, density: 'high' });
272
- console.log("✓ Memory density set to high (80 results, ~24k tokens)");
273
- } catch (err) {
274
- console.error(`Failed to set density: ${err instanceof Error ? err.message : String(err)}`);
275
- }
276
- });
385
+ for (const [name, desc] of [
386
+ ["xhigh", "Set density to xhigh (160 results, ~50k tokens)"],
387
+ ["high", "Set density to high (80 results, ~24k tokens) [default]"],
388
+ ["low", "Set density to low (40 results, ~12k tokens)"],
389
+ ] as const) {
390
+ mr.command(name)
391
+ .description(desc)
392
+ .action(async () => {
393
+ if (!memoryKey) { console.error("Not configured. Run: openclaw mr <key>"); return; }
394
+ try {
395
+ await setPluginConfig(api, { key: memoryKey, endpoint: cfg?.endpoint, density: name });
396
+ console.log(`✓ Memory density set to ${name}`);
397
+ } catch (err) {
398
+ console.error(`Failed: ${err instanceof Error ? err.message : String(err)}`);
399
+ }
400
+ });
401
+ }
277
402
 
278
- mr.command("low")
279
- .description("Set memory density to low (40 results, ~12k tokens)")
280
- .action(async () => {
281
- if (!memoryKey) {
282
- console.error("MemoryRouter not configured. Run: openclaw mr <key>");
283
- return;
284
- }
285
- try {
286
- await setPluginConfig(api, { key: memoryKey, endpoint: cfg?.endpoint, density: 'low' });
287
- console.log("✓ Memory density set to low (40 results, ~12k tokens)");
288
- console.log(" Lighter context for faster responses or smaller models.");
289
- } catch (err) {
290
- console.error(`Failed to set density: ${err instanceof Error ? err.message : String(err)}`);
291
- }
292
- });
403
+ // Mode commands
404
+ for (const [modeName, modeDesc] of [
405
+ ["relay", "Relay mode — hooks only, works on stock OpenClaw [default]"],
406
+ ["proxy", "Proxy mode — memory on every LLM call (requires registerStreamFnWrapper)"],
407
+ ] as const) {
408
+ mr.command(modeName)
409
+ .description(modeDesc)
410
+ .action(async () => {
411
+ if (!memoryKey) { console.error("Not configured. Run: openclaw mr <key>"); return; }
412
+ try {
413
+ await setPluginConfig(api, { key: memoryKey, endpoint: cfg?.endpoint, density, mode: modeName });
414
+ console.log(`✓ Mode set to ${modeName}`);
415
+ } catch (err) {
416
+ console.error(`Failed: ${err instanceof Error ? err.message : String(err)}`);
417
+ }
418
+ });
419
+ }
293
420
 
294
421
  mr.command("status")
295
422
  .description("Show MemoryRouter vault stats")
296
423
  .option("--json", "JSON output")
297
424
  .action(async (opts) => {
298
- if (!memoryKey) {
299
- console.error("MemoryRouter not configured. Run: openclaw mr <key>");
300
- return;
301
- }
425
+ if (!memoryKey) { console.error("Not configured. Run: openclaw mr <key>"); return; }
302
426
  try {
303
427
  const res = await fetch(`${endpoint}/v1/memory/stats`, {
304
428
  headers: { Authorization: `Bearer ${memoryKey}` },
305
429
  });
306
- const data = await res.json() as Record<string, unknown>;
307
-
430
+ const data = (await res.json()) as { totalVectors?: number; totalTokens?: number };
308
431
  if (opts.json) {
309
- console.log(JSON.stringify({ enabled: true, key: memoryKey, density, stats: data }, null, 2));
432
+ console.log(JSON.stringify({ enabled: true, key: memoryKey, density, mode, stats: data }, null, 2));
310
433
  } else {
311
434
  console.log("MemoryRouter Status");
312
435
  console.log("───────────────────────────");
313
436
  console.log(`Enabled: ✓ Yes`);
314
437
  console.log(`Key: ${memoryKey.slice(0, 6)}...${memoryKey.slice(-3)}`);
315
- console.log(`Density: ${density} (openclaw mr xhigh|high|low)`);
438
+ console.log(`Mode: ${mode}`);
439
+ console.log(`Density: ${density}`);
316
440
  console.log(`Endpoint: ${endpoint}`);
317
- console.log("");
318
- console.log("Vault Stats:");
319
- const stats = data as { totalVectors?: number; totalTokens?: number };
320
- console.log(` Memories: ${stats.totalVectors ?? 0}`);
321
- console.log(` Tokens: ${stats.totalTokens ?? 0}`);
441
+ console.log(`Memories: ${data.totalVectors ?? 0}`);
442
+ console.log(`Tokens: ${data.totalTokens ?? 0}`);
322
443
  }
323
444
  } catch (err) {
324
- console.error(`Failed to fetch stats: ${err instanceof Error ? err.message : String(err)}`);
445
+ console.error(`Failed: ${err instanceof Error ? err.message : String(err)}`);
325
446
  }
326
447
  });
327
448
 
328
449
  mr.command("upload")
329
450
  .description("Upload workspace + session history to vault")
330
451
  .argument("[path]", "Specific file or directory to upload")
331
- .option("--workspace <dir>", "Workspace directory (default: cwd)")
332
- .option("--brain <dir>", "State directory with sessions (default: ~/.openclaw)")
452
+ .option("--workspace <dir>", "Workspace directory")
453
+ .option("--brain <dir>", "State directory with sessions")
333
454
  .action(async (targetPath: string | undefined, opts: { workspace?: string; brain?: string }) => {
334
- if (!memoryKey) {
335
- console.error("MemoryRouter not configured. Run: openclaw mr <key>");
336
- return;
337
- }
455
+ if (!memoryKey) { console.error("Not configured. Run: openclaw mr <key>"); return; }
338
456
  const os = await import("node:os");
339
457
  const path = await import("node:path");
340
458
  const stateDir = opts.brain ? path.resolve(opts.brain) : path.join(os.homedir(), ".openclaw");
341
- // Use OpenClaw's configured workspace, not cwd
342
- const configWorkspace = api.config.workspace || api.config.agents?.defaults?.workspace;
459
+ const configWorkspace = (api.config as any).workspace || (api.config as any).agents?.defaults?.workspace;
343
460
  const workspacePath = opts.workspace
344
461
  ? path.resolve(opts.workspace)
345
462
  : configWorkspace
346
463
  ? path.resolve(configWorkspace.replace(/^~/, os.homedir()))
347
464
  : path.join(os.homedir(), ".openclaw", "workspace");
348
465
  const { runUpload } = await import("./upload.js");
349
- await runUpload({
350
- memoryKey,
351
- endpoint,
352
- targetPath,
353
- stateDir,
354
- workspacePath,
355
- hasWorkspaceFlag: !!opts.workspace,
356
- hasBrainFlag: !!opts.brain,
357
- });
466
+ await runUpload({ memoryKey, endpoint, targetPath, stateDir, workspacePath, hasWorkspaceFlag: !!opts.workspace, hasBrainFlag: !!opts.brain });
358
467
  });
359
468
 
360
469
  mr.command("delete")
361
470
  .description("Clear all memories from vault")
362
471
  .action(async () => {
363
- if (!memoryKey) {
364
- console.error("MemoryRouter not configured. Run: openclaw mr <key>");
365
- return;
366
- }
472
+ if (!memoryKey) { console.error("Not configured. Run: openclaw mr <key>"); return; }
367
473
  try {
368
474
  const res = await fetch(`${endpoint}/v1/memory`, {
369
475
  method: "DELETE",
370
476
  headers: { Authorization: `Bearer ${memoryKey}` },
371
477
  });
372
- const data = await res.json() as { message?: string };
478
+ const data = (await res.json()) as { message?: string };
373
479
  console.log(`✓ ${data.message || "Vault cleared"}`);
374
480
  } catch (err) {
375
- console.error(`Failed to clear vault: ${err instanceof Error ? err.message : String(err)}`);
481
+ console.error(`Failed: ${err instanceof Error ? err.message : String(err)}`);
376
482
  }
377
483
  });
378
484
  },
@@ -386,9 +492,7 @@ const memoryRouterPlugin = {
386
492
  api.registerService({
387
493
  id: "mr-memory",
388
494
  start: () => {
389
- if (memoryKey) {
390
- api.logger.info?.(`memoryrouter: active (key: ${memoryKey.slice(0, 6)}...)`);
391
- }
495
+ if (memoryKey) api.logger.info?.(`memoryrouter: active (key: ${memoryKey.slice(0, 6)}...)`);
392
496
  },
393
497
  stop: () => {
394
498
  api.logger.info?.("memoryrouter: stopped");
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mr-memory",
3
- "version": "1.0.11",
3
+ "version": "2.2.0",
4
4
  "description": "MemoryRouter persistent memory plugin for OpenClaw — your AI remembers every conversation",
5
5
  "type": "module",
6
6
  "files": [