mr-memory 1.0.11 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.ts +115 -86
  2. package/package.json +1 -1
package/index.ts CHANGED
@@ -1,11 +1,11 @@
1
1
  /**
2
- * MemoryRouter Plugin for OpenClaw
2
+ * MemoryRouter Plugin for OpenClaw (v2 — Relay Architecture)
3
3
  *
4
4
  * Persistent AI memory via MemoryRouter (memoryrouter.ai).
5
- * Routes LLM calls through MemoryRouter's API which injects relevant
6
- * past context and captures conversations automatically.
5
+ * Uses OpenClaw's native plugin hooks (before_agent_start + agent_end)
6
+ * to inject relevant memories and capture conversations.
7
7
  *
8
- * BYOK provider API keys pass through untouched.
8
+ * No patching required. Works with stock OpenClaw.
9
9
  */
10
10
 
11
11
  import { spawn } from "node:child_process";
@@ -19,50 +19,6 @@ type MemoryRouterConfig = {
19
19
  density?: 'low' | 'high' | 'xhigh';
20
20
  };
21
21
 
22
- type CompatApi = OpenClawPluginApi & {
23
- updatePluginConfig?: (config: Record<string, unknown>) => Promise<void>;
24
- updatePluginEnabled?: (enabled: boolean) => Promise<void>;
25
- };
26
-
27
- /**
28
- * Supported provider APIs that MemoryRouter can proxy.
29
- */
30
- const SUPPORTED_APIS = new Set([
31
- "anthropic-messages",
32
- "openai-completions",
33
- "openai-responses",
34
- "azure-openai-responses",
35
- "ollama",
36
- ]);
37
-
38
- /**
39
- * Detect if the current LLM call is a tool-use iteration (not direct user conversation).
40
- * Tool iterations have tool_result (Anthropic) or tool-role (OpenAI) messages
41
- * after the last real user message.
42
- */
43
- function isToolUseIteration(context: { messages?: Array<{ role: string; content?: unknown }> }): boolean {
44
- const messages = context.messages;
45
- if (!messages || messages.length === 0) return false;
46
-
47
- for (let i = messages.length - 1; i >= 0; i--) {
48
- const msg = messages[i];
49
-
50
- if (msg.role === "tool") return true;
51
-
52
- if (msg.role === "user" && Array.isArray(msg.content)) {
53
- const hasToolResult = (msg.content as Array<{ type?: string }>).some(
54
- (block) => block.type === "tool_result",
55
- );
56
- if (hasToolResult) return true;
57
- }
58
-
59
- if (msg.role === "user" && typeof msg.content === "string") return false;
60
- if (msg.role === "assistant") continue;
61
- }
62
-
63
- return false;
64
- }
65
-
66
22
  function resolveOpenClawInvocation(): { command: string; args: string[] } {
67
23
  const entry = process.argv[1];
68
24
  if (entry) {
@@ -108,9 +64,9 @@ async function runOpenClawConfigSet(path: string, value: string, json = false):
108
64
  }
109
65
 
110
66
  async function setPluginConfig(api: OpenClawPluginApi, config: Record<string, unknown>): Promise<void> {
111
- const compat = api as CompatApi;
112
- if (typeof compat.updatePluginConfig === "function") {
113
- await compat.updatePluginConfig(config);
67
+ const anyApi = api as any;
68
+ if (typeof anyApi.updatePluginConfig === "function") {
69
+ await anyApi.updatePluginConfig(config);
114
70
  return;
115
71
  }
116
72
 
@@ -118,9 +74,9 @@ async function setPluginConfig(api: OpenClawPluginApi, config: Record<string, un
118
74
  }
119
75
 
120
76
  async function setPluginEnabled(api: OpenClawPluginApi, enabled: boolean): Promise<void> {
121
- const compat = api as CompatApi;
122
- if (typeof compat.updatePluginEnabled === "function") {
123
- await compat.updatePluginEnabled(enabled);
77
+ const anyApi = api as any;
78
+ if (typeof anyApi.updatePluginEnabled === "function") {
79
+ await anyApi.updatePluginEnabled(enabled);
124
80
  return;
125
81
  }
126
82
 
@@ -139,7 +95,7 @@ const memoryRouterPlugin = {
139
95
  const density = cfg?.density || 'high';
140
96
 
141
97
  // ==================================================================
142
- // Core: Route LLM calls through MemoryRouter (only when key is set)
98
+ // Core: Relay architecture memory via API hooks (no patching needed)
143
99
  // ==================================================================
144
100
 
145
101
  if (memoryKey) {
@@ -149,41 +105,114 @@ const memoryRouterPlugin = {
149
105
  }
150
106
 
151
107
  if (memoryKey) {
152
- api.registerStreamFnWrapper((next) => {
153
- return (model, context, options) => {
154
- // Only proxy supported APIs
155
- if (!SUPPORTED_APIS.has(model.api)) {
156
- return next(model, context, options);
108
+ // RETRIEVAL — inject relevant memories before every agent turn
109
+ api.on("before_agent_start", async (event: Record<string, unknown>) => {
110
+ const prompt = event.prompt as string | undefined;
111
+ if (!prompt || prompt.length < 5) return;
112
+
113
+ try {
114
+ const res = await fetch(`${endpoint}/v1/memory/prepare`, {
115
+ method: "POST",
116
+ headers: {
117
+ "Authorization": `Bearer ${memoryKey}`,
118
+ "Content-Type": "application/json",
119
+ },
120
+ body: JSON.stringify({
121
+ messages: [{ role: "user", content: prompt }],
122
+ density,
123
+ }),
124
+ });
125
+
126
+ if (!res.ok) {
127
+ api.logger.warn?.(`memoryrouter: prepare failed (${res.status})`);
128
+ return;
129
+ }
130
+
131
+ const data = await res.json() as { context: string | null; tokens_billed: number; memories_found: number };
132
+
133
+ if (data.context) {
134
+ api.logger.info?.(`memoryrouter: injected ${data.memories_found} memories (${data.tokens_billed} tokens)`);
135
+ return { prependContext: data.context };
136
+ }
137
+ } catch (err) {
138
+ api.logger.warn?.(`memoryrouter: prepare error: ${String(err)}`);
157
139
  }
140
+ });
141
+
142
+ // STORAGE — capture conversation after every agent turn
143
+ // Only stores the LAST user message + assistant response per turn (no duplication)
144
+ api.on("agent_end", async (event: Record<string, unknown>) => {
145
+ if (!event.success || !event.messages) return;
146
+
147
+ const msgs = event.messages as Array<Record<string, unknown>>;
148
+ if (!msgs.length) return;
149
+
150
+ try {
151
+ // Walk backwards: find the last assistant, then the last user before it
152
+ let assistantMsg: { role: string; content: string } | null = null;
153
+ let userMsg: { role: string; content: string } | null = null;
154
+
155
+ for (let i = msgs.length - 1; i >= 0; i--) {
156
+ const role = msgs[i].role as string;
157
+ if (!role) continue;
158
+
159
+ // Extract text content
160
+ let text = "";
161
+ const content = msgs[i].content;
162
+ if (typeof content === "string") {
163
+ text = content;
164
+ } else if (Array.isArray(content)) {
165
+ for (const block of content as Array<Record<string, unknown>>) {
166
+ if (typeof block.text === "string") {
167
+ text += (text ? "\n" : "") + block.text;
168
+ }
169
+ }
170
+ }
158
171
 
159
- // Route through MemoryRouter
160
- const mrModel = {
161
- ...model,
162
- baseUrl: model.api === "anthropic-messages"
163
- ? endpoint // Anthropic: baseUrl is without /v1
164
- : `${endpoint}/v1`,
165
- };
172
+ if (!text || text.length < 10) continue;
166
173
 
167
- // Detect tool iterations don't store intermediate work
168
- const toolIteration = isToolUseIteration(
169
- context as { messages?: Array<{ role: string; content?: unknown }> },
170
- );
171
-
172
- // Inject MemoryRouter headers
173
- const mrOptions = {
174
- ...options,
175
- headers: {
176
- ...options?.headers,
177
- "X-Memory-Key": memoryKey,
178
- "X-Memory-Store": toolIteration ? "false" : "true",
179
- "X-Memory-Density": density,
180
- },
181
- };
174
+ if (!assistantMsg && role === "assistant") {
175
+ assistantMsg = { role, content: text };
176
+ }
177
+ if (assistantMsg && !userMsg && role === "user") {
178
+ // Strip any injected memory context from user messages
179
+ let cleanText = text;
180
+ if (cleanText.includes("[Memory Context]")) {
181
+ cleanText = cleanText.replace(/\[Memory Context\][\s\S]*?\n\n/g, "").trim();
182
+ }
183
+ if (cleanText.length >= 10) {
184
+ userMsg = { role, content: cleanText };
185
+ }
186
+ break;
187
+ }
188
+ }
182
189
 
183
- return next(mrModel, context, mrOptions);
184
- };
185
- });
186
- } // end if (memoryKey) for streamFn wrapper
190
+ // Only store this turn's pair — no duplication
191
+ const toStore: Array<{ role: string; content: string }> = [];
192
+ if (userMsg) toStore.push(userMsg);
193
+ if (assistantMsg) toStore.push(assistantMsg);
194
+
195
+ if (toStore.length === 0) return;
196
+
197
+ const res = await fetch(`${endpoint}/v1/memory/ingest`, {
198
+ method: "POST",
199
+ headers: {
200
+ "Authorization": `Bearer ${memoryKey}`,
201
+ "Content-Type": "application/json",
202
+ },
203
+ body: JSON.stringify({
204
+ messages: toStore,
205
+ }),
206
+ });
207
+
208
+ if (!res.ok) {
209
+ api.logger.warn?.(`memoryrouter: ingest failed (${res.status})`);
210
+ }
211
+ } catch (err) {
212
+ api.logger.warn?.(`memoryrouter: ingest error: ${String(err)}`);
213
+ }
214
+ });
215
+ } // end if (memoryKey) for relay hooks
187
216
 
188
217
  // ==================================================================
189
218
  // CLI Commands (always registered — even without key, for enable/off)
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mr-memory",
3
- "version": "1.0.11",
3
+ "version": "2.0.0",
4
4
  "description": "MemoryRouter persistent memory plugin for OpenClaw — your AI remembers every conversation",
5
5
  "type": "module",
6
6
  "files": [