mr-memory 2.2.0 → 2.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.ts +75 -0
  2. package/package.json +1 -1
package/index.ts CHANGED
@@ -147,8 +147,83 @@ const memoryRouterPlugin = {
147
147
  if (memoryKey) {
148
148
  // Track whether we've already fired for this prompt (dedup double-fire)
149
149
  let lastPreparedPrompt = "";
150
+ // Track whether before_prompt_build already handled the first call in this run
151
+ let promptBuildFiredThisRun = false;
152
+
153
+ // ── llm_input: fires on EVERY LLM call (tool iterations, cron, sub-agents)
154
+ // On stock OpenClaw, the return value is ignored (fire-and-forget).
155
+ // When PR #24122 merges, OpenClaw will use the returned prependContext.
156
+ // This gives forward compatibility — no plugin update needed.
157
+ api.on("llm_input", async (event, ctx) => {
158
+ // Skip the first call — before_prompt_build already handled it
159
+ // (before_prompt_build includes workspace+tools+skills for accurate billing)
160
+ if (promptBuildFiredThisRun) {
161
+ promptBuildFiredThisRun = false; // reset so subsequent calls go through
162
+ return;
163
+ }
164
+
165
+ try {
166
+ const prompt = event.prompt;
167
+ if (prompt === lastPreparedPrompt && lastPreparedPrompt !== "") return;
168
+ lastPreparedPrompt = prompt;
169
+
170
+ // Build lightweight context (no workspace/tools — just history + prompt)
171
+ const contextPayload: Array<{ role: string; content: string }> = [];
172
+ if (event.historyMessages && Array.isArray(event.historyMessages)) {
173
+ for (const msg of event.historyMessages) {
174
+ const m = msg as { role?: string; content?: unknown };
175
+ if (!m.role) continue;
176
+ let text = "";
177
+ if (typeof m.content === "string") text = m.content;
178
+ else if (Array.isArray(m.content)) {
179
+ text = (m.content as Array<{ type?: string; text?: string }>)
180
+ .filter(b => b.type === "text" && b.text)
181
+ .map(b => b.text!)
182
+ .join("\n");
183
+ }
184
+ if (text) contextPayload.push({ role: m.role, content: text });
185
+ }
186
+ }
187
+ contextPayload.push({ role: "user", content: prompt });
188
+
189
+ const densityMap: Record<string, number> = { low: 40, high: 80, xhigh: 160 };
190
+ const contextLimit = densityMap[density] || 80;
191
+
192
+ const res = await fetch(`${endpoint}/v1/memory/prepare`, {
193
+ method: "POST",
194
+ headers: {
195
+ "Content-Type": "application/json",
196
+ Authorization: `Bearer ${memoryKey}`,
197
+ },
198
+ body: JSON.stringify({
199
+ messages: contextPayload,
200
+ density,
201
+ context_limit: contextLimit,
202
+ }),
203
+ });
204
+
205
+ if (!res.ok) return;
206
+
207
+ const data = (await res.json()) as {
208
+ context?: string;
209
+ memories_found?: number;
210
+ tokens_billed?: number;
211
+ };
212
+
213
+ if (data.context) {
214
+ api.logger.info?.(
215
+ `memoryrouter: injected ${data.memories_found || 0} memories on tool iteration (${data.tokens_billed || 0} tokens billed)`,
216
+ );
217
+ return { prependContext: data.context };
218
+ }
219
+ } catch {
220
+ // Silent fail on tool iterations — don't block the agent
221
+ }
222
+ });
150
223
 
224
+ // ── before_prompt_build: fires once per run (primary, includes full billing context)
151
225
  api.on("before_prompt_build", async (event, ctx) => {
226
+ promptBuildFiredThisRun = true;
152
227
  try {
153
228
  const prompt = event.prompt;
154
229
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mr-memory",
3
- "version": "2.2.0",
3
+ "version": "2.3.0",
4
4
  "description": "MemoryRouter persistent memory plugin for OpenClaw — your AI remembers every conversation",
5
5
  "type": "module",
6
6
  "files": [