mr-memory 2.8.0 → 2.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.ts +133 -201
  2. package/package.json +1 -1
package/index.ts CHANGED
@@ -8,23 +8,33 @@
8
8
  * BYOK — provider API keys never touch MemoryRouter.
9
9
  */
10
10
 
11
- import { readFile } from "node:fs/promises";
11
+ import { readFile, readdir, stat } from "node:fs/promises";
12
12
  import { join } from "node:path";
13
13
  import { spawn } from "node:child_process";
14
14
  import type { OpenClawPluginApi } from "openclaw/plugin-sdk";
15
15
 
16
16
  const DEFAULT_ENDPOINT = "https://api.memoryrouter.ai";
17
17
 
18
+ /** Wrap raw memory context in XML tags with a strong instruction */
19
+ /** Wrap API response in extraction markers so we can strip it next turn. */
20
+ function wrapForInjection(context: string): string {
21
+ return `<mr-memory>\n${context}\n</mr-memory>`;
22
+ }
23
+
24
+ /** Strip previous memory injections from message text to prevent stacking.
25
+ * prependContext persists in conversation history — without stripping,
26
+ * each turn accumulates another full injection (~20K tokens). */
27
+ const MEMORY_TAG_RE = /<mr-memory>[\s\S]*?<\/mr-memory>\s*/g;
28
+ /** Legacy tag pattern for backward compat (pre-2.7.0 injections still in history) */
29
+ const LEGACY_TAG_RE = /<memory_context>[\s\S]*?<\/memory_context>\s*(?:The above are retrieved memories|IMPORTANT: The above block contains retrieved memories)[^\n]*\n*/g;
30
+ function stripOldMemory(text: string): string {
31
+ return text.replace(MEMORY_TAG_RE, "").replace(LEGACY_TAG_RE, "").trim();
32
+ }
33
+
18
34
  // Workspace files OpenClaw loads into the system prompt
19
35
  const WORKSPACE_FILES = [
20
- "IDENTITY.md",
21
- "USER.md",
22
- "MEMORY.md",
23
- "HEARTBEAT.md",
24
- "TOOLS.md",
25
- "AGENTS.md",
26
- "SOUL.md",
27
- "BOOTSTRAP.md",
36
+ "IDENTITY.md", "USER.md", "MEMORY.md", "HEARTBEAT.md",
37
+ "TOOLS.md", "AGENTS.md", "SOUL.md", "BOOTSTRAP.md",
28
38
  ];
29
39
 
30
40
  type MemoryRouterConfig = {
@@ -56,9 +66,7 @@ async function runOpenClawConfigSet(path: string, value: string, json = false):
56
66
  env: process.env,
57
67
  });
58
68
  let stderr = "";
59
- child.stderr.on("data", (chunk) => {
60
- stderr += String(chunk);
61
- });
69
+ child.stderr.on("data", (chunk) => { stderr += String(chunk); });
62
70
  child.on("error", reject);
63
71
  child.on("close", (code) => {
64
72
  if (code === 0) resolve();
@@ -72,10 +80,7 @@ type CompatApi = OpenClawPluginApi & {
72
80
  updatePluginEnabled?: (enabled: boolean) => Promise<void>;
73
81
  };
74
82
 
75
- async function setPluginConfig(
76
- api: OpenClawPluginApi,
77
- config: Record<string, unknown>,
78
- ): Promise<void> {
83
+ async function setPluginConfig(api: OpenClawPluginApi, config: Record<string, unknown>): Promise<void> {
79
84
  const compat = api as CompatApi;
80
85
  if (typeof compat.updatePluginConfig === "function") {
81
86
  await compat.updatePluginConfig(config);
@@ -102,9 +107,7 @@ async function readWorkspaceFiles(workspaceDir: string): Promise<string> {
102
107
  try {
103
108
  const content = await readFile(join(workspaceDir, file), "utf8");
104
109
  if (content.trim()) parts.push(`## ${file}\n${content}`);
105
- } catch {
106
- /* file doesn't exist — skip */
107
- }
110
+ } catch { /* file doesn't exist — skip */ }
108
111
  }
109
112
  return parts.join("\n\n");
110
113
  }
@@ -117,9 +120,7 @@ function serializeToolsConfig(config: Record<string, unknown>): string {
117
120
  if (!tools) return "";
118
121
  try {
119
122
  return `## Tools Config\n${JSON.stringify(tools, null, 2)}`;
120
- } catch {
121
- return "";
122
- }
123
+ } catch { return ""; }
123
124
  }
124
125
 
125
126
  /**
@@ -131,9 +132,7 @@ function serializeSkillsConfig(config: Record<string, unknown>): string {
131
132
  try {
132
133
  const names = Object.keys(skills);
133
134
  return `## Skills (${names.length})\n${names.join(", ")}`;
134
- } catch {
135
- return "";
136
- }
135
+ } catch { return ""; }
137
136
  }
138
137
 
139
138
  // ──────────────────────────────────────────────────────
@@ -151,7 +150,7 @@ const memoryRouterPlugin = {
151
150
  const memoryKey = cfg?.key;
152
151
  const density = cfg?.density || "high";
153
152
  const mode = cfg?.mode || "relay";
154
- const logging = cfg?.logging ?? false; // off by default
153
+ const logging = cfg?.logging ?? false;
155
154
  const log = (msg: string) => { if (logging) api.logger.info?.(msg); };
156
155
 
157
156
  if (memoryKey) {
@@ -165,42 +164,28 @@ const memoryRouterPlugin = {
165
164
  // ==================================================================
166
165
 
167
166
  if (memoryKey) {
168
- // Track prompt-build dedupe independently from llm_input dedupe.
169
- let lastPromptBuildPrompt = "";
170
- // Skip exactly one matching llm_input after a successful before_prompt_build call.
171
- let skipNextLlmInput: { prompt: string; sessionKey?: string } | null = null;
172
- // Deduplicate repeated llm_input events without suppressing unrelated calls.
173
- let lastLlmInputSignature = "";
167
+ // Track whether we've already fired for this prompt (dedup double-fire)
168
+ let lastPreparedPrompt = "";
169
+ // Track whether before_prompt_build already handled the first call in this run
170
+ let promptBuildFiredThisRun = false;
174
171
 
175
172
  // ── llm_input: fires on EVERY LLM call (tool iterations, cron, sub-agents)
176
173
  // On stock OpenClaw, the return value is ignored (fire-and-forget).
177
174
  // When PR #24122 merges, OpenClaw will use the returned prependContext.
178
175
  // This gives forward compatibility — no plugin update needed.
179
176
  api.on("llm_input", async (event, ctx) => {
177
+ log(`memoryrouter: llm_input fired (sessionKey=${ctx.sessionKey}, promptBuildFired=${promptBuildFiredThisRun})`);
178
+ // Skip the first call — before_prompt_build already handled it
179
+ // (before_prompt_build includes workspace+tools+skills for accurate billing)
180
+ if (promptBuildFiredThisRun) {
181
+ promptBuildFiredThisRun = false; // reset so subsequent calls go through
182
+ return;
183
+ }
184
+
180
185
  try {
181
186
  const prompt = event.prompt;
182
-
183
- // Skip one duplicate llm_input right after a successful before_prompt_build.
184
- if (
185
- skipNextLlmInput &&
186
- skipNextLlmInput.prompt === prompt &&
187
- skipNextLlmInput.sessionKey === ctx.sessionKey
188
- ) {
189
- skipNextLlmInput = null;
190
- return;
191
- }
192
- if (skipNextLlmInput && skipNextLlmInput.sessionKey !== ctx.sessionKey) {
193
- skipNextLlmInput = null;
194
- }
195
-
196
- // Dedupe exact repeated llm_input events only (same run/session/prompt/history size).
197
- const llmInputSignature = [
198
- ctx.sessionKey || "",
199
- prompt,
200
- String(Array.isArray(event.historyMessages) ? event.historyMessages.length : 0),
201
- ].join("|");
202
- if (llmInputSignature === lastLlmInputSignature) return;
203
- lastLlmInputSignature = llmInputSignature;
187
+ if (prompt === lastPreparedPrompt && lastPreparedPrompt !== "") return;
188
+ lastPreparedPrompt = prompt;
204
189
 
205
190
  // Build lightweight context (no workspace/tools — just history + prompt)
206
191
  const contextPayload: Array<{ role: string; content: string }> = [];
@@ -212,14 +197,16 @@ const memoryRouterPlugin = {
212
197
  if (typeof m.content === "string") text = m.content;
213
198
  else if (Array.isArray(m.content)) {
214
199
  text = (m.content as Array<{ type?: string; text?: string }>)
215
- .filter((b) => b.type === "text" && b.text)
216
- .map((b) => b.text!)
200
+ .filter(b => b.type === "text" && b.text)
201
+ .map(b => b.text!)
217
202
  .join("\n");
218
203
  }
219
- if (text) contextPayload.push({ role: m.role, content: text });
204
+ // Strip old memory injections to prevent stacking
205
+ if (text) contextPayload.push({ role: m.role, content: m.role === "user" ? stripOldMemory(text) : text });
220
206
  }
221
207
  }
222
- contextPayload.push({ role: "user", content: prompt });
208
+ contextPayload.push({ role: "user", content: stripOldMemory(prompt) });
209
+
223
210
 
224
211
  const res = await fetch(`${endpoint}/v1/memory/prepare`, {
225
212
  method: "POST",
@@ -231,15 +218,11 @@ const memoryRouterPlugin = {
231
218
  messages: contextPayload,
232
219
  session_id: ctx.sessionKey,
233
220
  density,
221
+
234
222
  }),
235
223
  });
236
224
 
237
- if (!res.ok) {
238
- const details = await res.text().catch(() => "");
239
- const suffix = details ? ` — ${details.slice(0, 200)}` : "";
240
- log(`memoryrouter: llm_input prepare failed (${res.status})${suffix}`);
241
- return;
242
- }
225
+ if (!res.ok) return;
243
226
 
244
227
  const data = (await res.json()) as {
245
228
  context?: string;
@@ -248,26 +231,28 @@ const memoryRouterPlugin = {
248
231
  };
249
232
 
250
233
  if (data.context) {
251
- log(`memoryrouter: injected ${data.memories_found || 0} memories on tool iteration (${data.tokens_billed || 0} tokens)`);
252
- // llm_input is typed as void on current OpenClaw builds; cast keeps
253
- // runtime forward-compat for builds that consume prependContext.
254
- return { prependContext: data.context } as unknown as void;
234
+ log(
235
+ `memoryrouter: injected ${data.memories_found || 0} memories on tool iteration (${data.tokens_billed || 0} tokens)`,
236
+ );
237
+ return { prependContext: wrapForInjection(data.context) };
255
238
  }
256
- } catch (err) {
257
- log(`memoryrouter: llm_input prepare error ${err instanceof Error ? err.message : String(err)}`);
239
+ } catch {
240
+ // Silent fail on tool iterations don't block the agent
258
241
  }
259
242
  });
260
243
 
261
244
  // ── before_prompt_build: fires once per run (primary, includes full billing context)
262
245
  api.on("before_prompt_build", async (event, ctx) => {
246
+ promptBuildFiredThisRun = true;
247
+ log(`memoryrouter: before_prompt_build fired (sessionKey=${ctx.sessionKey}, promptLen=${event.prompt?.length})`);
263
248
  try {
264
249
  const prompt = event.prompt;
265
250
 
266
- // Dedupe only within before_prompt_build path.
267
- if (prompt === lastPromptBuildPrompt && lastPromptBuildPrompt !== "") {
251
+ // Deduplicate if we already prepared this exact prompt, skip
252
+ if (prompt === lastPreparedPrompt && lastPreparedPrompt !== "") {
268
253
  return;
269
254
  }
270
- lastPromptBuildPrompt = prompt;
255
+ lastPreparedPrompt = prompt;
271
256
 
272
257
  // 1. Read workspace files for full token count
273
258
  const workspaceDir = ctx.workspaceDir || "";
@@ -278,9 +263,7 @@ const memoryRouterPlugin = {
278
263
 
279
264
  // 2. Serialize tools + skills from config
280
265
  const toolsText = serializeToolsConfig(api.config as unknown as Record<string, unknown>);
281
- const skillsText = serializeSkillsConfig(
282
- api.config as unknown as Record<string, unknown>,
283
- );
266
+ const skillsText = serializeSkillsConfig(api.config as unknown as Record<string, unknown>);
284
267
 
285
268
  // 3. Build full context payload (messages + workspace + tools + skills)
286
269
  const contextPayload: Array<{ role: string; content: string }> = [];
@@ -293,29 +276,33 @@ const memoryRouterPlugin = {
293
276
 
294
277
  // Add conversation history
295
278
  if (event.messages && Array.isArray(event.messages)) {
279
+ let skipped = 0;
296
280
  for (const msg of event.messages) {
297
281
  const m = msg as { role?: string; content?: unknown };
298
282
  if (!m.role) continue;
299
-
283
+
300
284
  let text = "";
301
285
  if (typeof m.content === "string") {
302
286
  text = m.content;
303
287
  } else if (Array.isArray(m.content)) {
304
288
  // Handle Anthropic-style content blocks [{type:"text", text:"..."}, ...]
305
289
  text = (m.content as Array<{ type?: string; text?: string }>)
306
- .filter((b) => b.type === "text" && b.text)
307
- .map((b) => b.text!)
290
+ .filter(b => b.type === "text" && b.text)
291
+ .map(b => b.text!)
308
292
  .join("\n");
309
293
  }
310
-
294
+
311
295
  if (text) {
312
- contextPayload.push({ role: m.role, content: text });
296
+ // Strip old memory injections to prevent stacking
297
+ contextPayload.push({ role: m.role, content: m.role === "user" ? stripOldMemory(text) : text });
298
+ } else {
299
+ skipped++;
313
300
  }
314
301
  }
315
302
  }
316
303
 
317
- // Add current user prompt
318
- contextPayload.push({ role: "user", content: prompt });
304
+ // Add current user prompt (strip any residual memory tags)
305
+ contextPayload.push({ role: "user", content: stripOldMemory(prompt) });
319
306
 
320
307
  // 4. Call /v1/memory/prepare
321
308
 
@@ -329,6 +316,7 @@ const memoryRouterPlugin = {
329
316
  messages: contextPayload,
330
317
  session_id: ctx.sessionKey,
331
318
  density,
319
+
332
320
  }),
333
321
  });
334
322
 
@@ -337,9 +325,6 @@ const memoryRouterPlugin = {
337
325
  return;
338
326
  }
339
327
 
340
- // Suppress the immediately-following duplicate llm_input for this prompt/session.
341
- skipNextLlmInput = { prompt, sessionKey: ctx.sessionKey };
342
-
343
328
  const data = (await res.json()) as {
344
329
  context?: string;
345
330
  memories_found?: number;
@@ -347,11 +332,15 @@ const memoryRouterPlugin = {
347
332
  };
348
333
 
349
334
  if (data.context) {
350
- log(`memoryrouter: injected ${data.memories_found || 0} memories (${data.tokens_billed || 0} tokens)`);
351
- return { prependContext: data.context };
335
+ log(
336
+ `memoryrouter: injected ${data.memories_found || 0} memories (${data.tokens_billed || 0} tokens)`,
337
+ );
338
+ return { prependContext: wrapForInjection(data.context) };
352
339
  }
353
340
  } catch (err) {
354
- log(`memoryrouter: prepare error — ${err instanceof Error ? err.message : String(err)}`);
341
+ log(
342
+ `memoryrouter: prepare error — ${err instanceof Error ? err.message : String(err)}`,
343
+ );
355
344
  }
356
345
  });
357
346
 
@@ -364,22 +353,13 @@ const memoryRouterPlugin = {
364
353
  const msgs = event.messages;
365
354
  if (!msgs || !Array.isArray(msgs) || msgs.length === 0) return;
366
355
 
367
- // Default relay behavior: do not store subagent sessions.
368
- const sessionKey = ctx.sessionKey || "";
369
- if (typeof sessionKey === "string" && sessionKey.includes(":subagent:")) {
370
- api.logger.debug?.(
371
- `memoryrouter: skipping ingest for subagent session (${sessionKey})`,
372
- );
373
- return;
374
- }
375
-
376
356
  // Extract text from a message (handles string + content block arrays)
377
357
  function extractText(content: unknown): string {
378
358
  if (typeof content === "string") return content;
379
359
  if (Array.isArray(content)) {
380
360
  return (content as Array<{ type?: string; text?: string }>)
381
- .filter((b) => b.type === "text" && b.text)
382
- .map((b) => b.text!)
361
+ .filter(b => b.type === "text" && b.text)
362
+ .map(b => b.text!)
383
363
  .join("\n");
384
364
  }
385
365
  return "";
@@ -408,7 +388,7 @@ const memoryRouterPlugin = {
408
388
 
409
389
  // Collect ALL assistant messages after the last user message
410
390
  const assistantParts: string[] = [];
411
- for (let i = lastUserIdx >= 0 ? lastUserIdx + 1 : 0; i < msgs.length; i++) {
391
+ for (let i = (lastUserIdx >= 0 ? lastUserIdx + 1 : 0); i < msgs.length; i++) {
412
392
  const msg = msgs[i] as { role?: string; content?: unknown };
413
393
  if (msg.role === "assistant") {
414
394
  const text = extractText(msg.content);
@@ -421,31 +401,36 @@ const memoryRouterPlugin = {
421
401
 
422
402
  if (toStore.length === 0) return;
423
403
 
424
- // Fire-and-forget: don't block agent completion on memory ingestion.
425
- void fetch(`${endpoint}/v1/memory/ingest`, {
426
- method: "POST",
427
- headers: {
428
- "Content-Type": "application/json",
429
- Authorization: `Bearer ${memoryKey}`,
430
- },
431
- body: JSON.stringify({
432
- messages: toStore,
433
- session_id: ctx.sessionKey,
434
- model: "unknown",
435
- }),
436
- })
437
- .then(async (res) => {
438
- if (!res.ok) {
439
- const details = await res.text().catch(() => "");
440
- const suffix = details ? ` — ${details.slice(0, 200)}` : "";
441
- log(`memoryrouter: ingest failed (${res.status})${suffix}`);
442
- }
443
- })
444
- .catch((err) => {
445
- log(`memoryrouter: ingest error — ${err instanceof Error ? err.message : String(err)}`);
404
+ // Await the fetch so OpenClaw's runVoidHook keeps the event loop alive.
405
+ try {
406
+ const res = await fetch(`${endpoint}/v1/memory/ingest`, {
407
+ method: "POST",
408
+ headers: {
409
+ "Content-Type": "application/json",
410
+ Authorization: `Bearer ${memoryKey}`,
411
+ },
412
+ body: JSON.stringify({
413
+ messages: toStore,
414
+ session_id: ctx.sessionKey,
415
+ model: "unknown",
416
+ }),
446
417
  });
418
+ if (!res.ok) {
419
+ const details = await res.text().catch(() => "");
420
+ const suffix = details ? ` — ${details.slice(0, 200)}` : "";
421
+ log(`memoryrouter: ingest failed (${res.status})${suffix}`);
422
+ } else {
423
+ api.logger.debug?.(`memoryrouter: ingest accepted (${toStore.length} messages)`);
424
+ }
425
+ } catch (err) {
426
+ log(
427
+ `memoryrouter: ingest error — ${err instanceof Error ? err.message : String(err)}`,
428
+ );
429
+ }
447
430
  } catch (err) {
448
- log(`memoryrouter: agent_end error — ${err instanceof Error ? err.message : String(err)}`);
431
+ log(
432
+ `memoryrouter: agent_end error — ${err instanceof Error ? err.message : String(err)}`,
433
+ );
449
434
  }
450
435
  });
451
436
  } // end if (memoryKey)
@@ -477,19 +462,14 @@ const memoryRouterPlugin = {
477
462
  .description("MemoryRouter memory commands")
478
463
  .argument("[key]", "Your MemoryRouter memory key (mk_xxx)")
479
464
  .action(async (key: string | undefined) => {
480
- if (!key) {
481
- mr.help();
482
- return;
483
- }
465
+ if (!key) { mr.help(); return; }
484
466
  await applyKey(key);
485
467
  });
486
468
 
487
469
  mr.command("enable")
488
470
  .description("Enable MemoryRouter with a memory key (alias)")
489
471
  .argument("<key>", "Your MemoryRouter memory key (mk_xxx)")
490
- .action(async (key: string) => {
491
- await applyKey(key);
492
- });
472
+ .action(async (key: string) => { await applyKey(key); });
493
473
 
494
474
  mr.command("off")
495
475
  .description("Disable MemoryRouter (removes key)")
@@ -499,9 +479,7 @@ const memoryRouterPlugin = {
499
479
  await setPluginEnabled(api, false);
500
480
  console.log("✓ MemoryRouter disabled.");
501
481
  } catch (err) {
502
- console.error(
503
- `Failed to disable: ${err instanceof Error ? err.message : String(err)}`,
504
- );
482
+ console.error(`Failed to disable: ${err instanceof Error ? err.message : String(err)}`);
505
483
  }
506
484
  });
507
485
 
@@ -514,16 +492,9 @@ const memoryRouterPlugin = {
514
492
  mr.command(name)
515
493
  .description(desc)
516
494
  .action(async () => {
517
- if (!memoryKey) {
518
- console.error("Not configured. Run: openclaw mr <key>");
519
- return;
520
- }
495
+ if (!memoryKey) { console.error("Not configured. Run: openclaw mr <key>"); return; }
521
496
  try {
522
- await setPluginConfig(api, {
523
- key: memoryKey,
524
- endpoint: cfg?.endpoint,
525
- density: name,
526
- });
497
+ await setPluginConfig(api, { key: memoryKey, endpoint: cfg?.endpoint, density: name });
527
498
  console.log(`✓ Memory density set to ${name}`);
528
499
  } catch (err) {
529
500
  console.error(`Failed: ${err instanceof Error ? err.message : String(err)}`);
@@ -562,17 +533,9 @@ const memoryRouterPlugin = {
562
533
  mr.command(modeName)
563
534
  .description(modeDesc)
564
535
  .action(async () => {
565
- if (!memoryKey) {
566
- console.error("Not configured. Run: openclaw mr <key>");
567
- return;
568
- }
536
+ if (!memoryKey) { console.error("Not configured. Run: openclaw mr <key>"); return; }
569
537
  try {
570
- await setPluginConfig(api, {
571
- key: memoryKey,
572
- endpoint: cfg?.endpoint,
573
- density,
574
- mode: modeName,
575
- });
538
+ await setPluginConfig(api, { key: memoryKey, endpoint: cfg?.endpoint, density, mode: modeName });
576
539
  console.log(`✓ Mode set to ${modeName}`);
577
540
  } catch (err) {
578
541
  console.error(`Failed: ${err instanceof Error ? err.message : String(err)}`);
@@ -584,23 +547,14 @@ const memoryRouterPlugin = {
584
547
  .description("Show MemoryRouter vault stats")
585
548
  .option("--json", "JSON output")
586
549
  .action(async (opts) => {
587
- if (!memoryKey) {
588
- console.error("Not configured. Run: openclaw mr <key>");
589
- return;
590
- }
550
+ if (!memoryKey) { console.error("Not configured. Run: openclaw mr <key>"); return; }
591
551
  try {
592
552
  const res = await fetch(`${endpoint}/v1/memory/stats`, {
593
553
  headers: { Authorization: `Bearer ${memoryKey}` },
594
554
  });
595
555
  const data = (await res.json()) as { totalVectors?: number; totalTokens?: number };
596
556
  if (opts.json) {
597
- console.log(
598
- JSON.stringify(
599
- { enabled: true, key: memoryKey, density, mode, stats: data },
600
- null,
601
- 2,
602
- ),
603
- );
557
+ console.log(JSON.stringify({ enabled: true, key: memoryKey, density, mode, stats: data }, null, 2));
604
558
  } else {
605
559
  console.log("MemoryRouter Status");
606
560
  console.log("───────────────────────────");
@@ -622,47 +576,25 @@ const memoryRouterPlugin = {
622
576
  .argument("[path]", "Specific file or directory to upload")
623
577
  .option("--workspace <dir>", "Workspace directory")
624
578
  .option("--brain <dir>", "State directory with sessions")
625
- .action(
626
- async (
627
- targetPath: string | undefined,
628
- opts: { workspace?: string; brain?: string },
629
- ) => {
630
- if (!memoryKey) {
631
- console.error("Not configured. Run: openclaw mr <key>");
632
- return;
633
- }
634
- const os = await import("node:os");
635
- const path = await import("node:path");
636
- const stateDir = opts.brain
637
- ? path.resolve(opts.brain)
638
- : path.join(os.homedir(), ".openclaw");
639
- const configWorkspace =
640
- (api.config as any).workspace || (api.config as any).agents?.defaults?.workspace;
641
- const workspacePath = opts.workspace
642
- ? path.resolve(opts.workspace)
643
- : configWorkspace
644
- ? path.resolve(configWorkspace.replace(/^~/, os.homedir()))
645
- : path.join(os.homedir(), ".openclaw", "workspace");
646
- const { runUpload } = await import("./upload.js");
647
- await runUpload({
648
- memoryKey,
649
- endpoint,
650
- targetPath,
651
- stateDir,
652
- workspacePath,
653
- hasWorkspaceFlag: !!opts.workspace,
654
- hasBrainFlag: !!opts.brain,
655
- });
656
- },
657
- );
579
+ .action(async (targetPath: string | undefined, opts: { workspace?: string; brain?: string }) => {
580
+ if (!memoryKey) { console.error("Not configured. Run: openclaw mr <key>"); return; }
581
+ const os = await import("node:os");
582
+ const path = await import("node:path");
583
+ const stateDir = opts.brain ? path.resolve(opts.brain) : path.join(os.homedir(), ".openclaw");
584
+ const configWorkspace = (api.config as any).workspace || (api.config as any).agents?.defaults?.workspace;
585
+ const workspacePath = opts.workspace
586
+ ? path.resolve(opts.workspace)
587
+ : configWorkspace
588
+ ? path.resolve(configWorkspace.replace(/^~/, os.homedir()))
589
+ : path.join(os.homedir(), ".openclaw", "workspace");
590
+ const { runUpload } = await import("./upload.js");
591
+ await runUpload({ memoryKey, endpoint, targetPath, stateDir, workspacePath, hasWorkspaceFlag: !!opts.workspace, hasBrainFlag: !!opts.brain });
592
+ });
658
593
 
659
594
  mr.command("delete")
660
595
  .description("Clear all memories from vault")
661
596
  .action(async () => {
662
- if (!memoryKey) {
663
- console.error("Not configured. Run: openclaw mr <key>");
664
- return;
665
- }
597
+ if (!memoryKey) { console.error("Not configured. Run: openclaw mr <key>"); return; }
666
598
  try {
667
599
  const res = await fetch(`${endpoint}/v1/memory`, {
668
600
  method: "DELETE",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mr-memory",
3
- "version": "2.8.0",
3
+ "version": "2.9.0",
4
4
  "description": "MemoryRouter persistent memory plugin for OpenClaw — your AI remembers every conversation",
5
5
  "type": "module",
6
6
  "files": [