@wipcomputer/wip-ldm-os 0.4.73-alpha.30 → 0.4.73-alpha.32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/deploy.mjs +63 -1
- package/package.json +1 -1
- package/src/hooks/inbox-check-hook.mjs +41 -3
- package/src/hooks/inbox-rewake-hook.mjs +79 -29
package/lib/deploy.mjs
CHANGED
|
@@ -558,6 +558,55 @@ function updateToolsAllow(pluginName) {
|
|
|
558
558
|
}
|
|
559
559
|
}
|
|
560
560
|
|
|
561
|
+
/**
|
|
562
|
+
* Reconcile tools.allow against plugins.entries in ~/.openclaw/openclaw.json.
|
|
563
|
+
*
|
|
564
|
+
* In OpenClaw 2026.4.8+, any plugin registered in plugins.entries but missing
|
|
565
|
+
* from tools.allow is silently blocked at runtime. Each blocked tool call
|
|
566
|
+
* spawns a root-key approval prompt to the user, flooding iMessage with
|
|
567
|
+
* approve-ids. This was observed on 2026-04-11 for model-provider plugins
|
|
568
|
+
* (anthropic, openai, xai) and imessage, which were enabled in plugins.entries
|
|
569
|
+
* but never added to tools.allow, because the per-plugin updateToolsAllow path
|
|
570
|
+
* only runs during new plugin deploys and the alpha.27/28 ReferenceError had
|
|
571
|
+
* silently dropped those entries anyway.
|
|
572
|
+
*
|
|
573
|
+
* This function is the self-healing step: at install time, walk plugins.entries,
|
|
574
|
+
* find any enabled plugin whose name is not already in tools.allow, and add it.
|
|
575
|
+
* Idempotent. Disabled plugins are skipped. Runs at both ends of installFromPath
|
|
576
|
+
* so a single `ldm install` repairs existing broken state without requiring a
|
|
577
|
+
* new plugin deploy.
|
|
578
|
+
*
|
|
579
|
+
* Background:
|
|
580
|
+
* ai/product/bugs/code-fka-devopstoolkit/2026-04-11--cc-mini--update-tools-allow-reference-error.md
|
|
581
|
+
*
|
|
582
|
+
* This function MUST remain at module top level, same as updateToolsAllow.
|
|
583
|
+
*/
|
|
584
|
+
function reconcileToolsAllow() {
|
|
585
|
+
const ocConfigPath = join(OC_ROOT, 'openclaw.json');
|
|
586
|
+
if (!existsSync(ocConfigPath)) return;
|
|
587
|
+
try {
|
|
588
|
+
const raw = readFileSync(ocConfigPath, 'utf8');
|
|
589
|
+
const config = JSON.parse(raw);
|
|
590
|
+
if (!config.plugins?.entries || typeof config.plugins.entries !== 'object') return;
|
|
591
|
+
if (!config.tools?.allow || !Array.isArray(config.tools.allow)) return;
|
|
592
|
+
|
|
593
|
+
const enabledPlugins = Object.entries(config.plugins.entries)
|
|
594
|
+
.filter(([, entry]) => entry && entry.enabled !== false)
|
|
595
|
+
.map(([name]) => name);
|
|
596
|
+
|
|
597
|
+
const allow = config.tools.allow;
|
|
598
|
+
const missing = enabledPlugins.filter(name => !allow.includes(name));
|
|
599
|
+
|
|
600
|
+
if (missing.length === 0) return;
|
|
601
|
+
|
|
602
|
+
for (const name of missing) allow.push(name);
|
|
603
|
+
writeFileSync(ocConfigPath, JSON.stringify(config, null, 2) + '\n');
|
|
604
|
+
log(`Reconciled openclaw.json tools.allow: added ${missing.join(', ')}`);
|
|
605
|
+
} catch (e) {
|
|
606
|
+
log(`Warning: failed to reconcile tools.allow: ${e.message}`);
|
|
607
|
+
}
|
|
608
|
+
}
|
|
609
|
+
|
|
561
610
|
// ── OpenClaw plugin naming (fix #8) ──
|
|
562
611
|
|
|
563
612
|
function resolveOcPluginName(repoPath, toolName) {
|
|
@@ -1199,10 +1248,19 @@ export function installToolbox(repoPath) {
|
|
|
1199
1248
|
// ── Full install pipeline ──
|
|
1200
1249
|
|
|
1201
1250
|
export async function installFromPath(repoPath) {
|
|
1251
|
+
// Heal tools.allow before any install runs, so the current session picks up
|
|
1252
|
+
// any drift left by earlier broken installs (alpha.27/28 ReferenceError).
|
|
1253
|
+
// Idempotent: no-op if plugins.entries and tools.allow are already in sync.
|
|
1254
|
+
reconcileToolsAllow();
|
|
1255
|
+
|
|
1202
1256
|
const subTools = detectToolbox(repoPath);
|
|
1203
1257
|
|
|
1204
1258
|
if (subTools.length > 0) {
|
|
1205
|
-
|
|
1259
|
+
const result = installToolbox(repoPath);
|
|
1260
|
+
// Heal again after toolbox install in case any plugin was newly registered
|
|
1261
|
+
// in plugins.entries but never added to tools.allow by its deploy path.
|
|
1262
|
+
reconcileToolsAllow();
|
|
1263
|
+
return result;
|
|
1206
1264
|
}
|
|
1207
1265
|
|
|
1208
1266
|
const installed = installSingleTool(repoPath);
|
|
@@ -1219,6 +1277,10 @@ export async function installFromPath(repoPath) {
|
|
|
1219
1277
|
console.log('');
|
|
1220
1278
|
}
|
|
1221
1279
|
|
|
1280
|
+
// Final reconcile pass after single-tool install, for the same reason as
|
|
1281
|
+
// the toolbox branch above.
|
|
1282
|
+
reconcileToolsAllow();
|
|
1283
|
+
|
|
1222
1284
|
return { tools: 1, interfaces: installed };
|
|
1223
1285
|
}
|
|
1224
1286
|
|
package/package.json
CHANGED
|
@@ -6,11 +6,23 @@
|
|
|
6
6
|
* and surfaces them as additionalContext before CC responds.
|
|
7
7
|
*
|
|
8
8
|
* Follows guard.mjs pattern: stdin JSON in, stdout JSON out, exit 0 always.
|
|
9
|
-
*
|
|
9
|
+
*
|
|
10
|
+
* As of alpha.31 this hook DOES mark messages as `read: true` after
|
|
11
|
+
* surfacing them. Previously we deferred draining to `lesa_check_inbox`,
|
|
12
|
+
* but that caused a dedup race with `inbox-rewake-hook.mjs`: if layer 2
|
|
13
|
+
* (this hook) surfaced a message without marking it read, then layer 1
|
|
14
|
+
* (the rewake Stop hook) would fire on the same unread message on the
|
|
15
|
+
* next Stop event and re-deliver it, costing another model turn. Marking
|
|
16
|
+
* read here makes the two layers cooperative ... each unread message
|
|
17
|
+
* surfaces exactly once regardless of which layer catches it first.
|
|
18
|
+
*
|
|
19
|
+
* See the dedup diagnosis in:
|
|
20
|
+
* ai/product/plans-prds/bridge/2026-04-11--cc-mini--autonomous-push-architecture.md
|
|
21
|
+
*
|
|
10
22
|
* Zero external dependencies beyond node:fs and node:path.
|
|
11
23
|
*/
|
|
12
24
|
|
|
13
|
-
import { existsSync, readFileSync, readdirSync } from 'node:fs';
|
|
25
|
+
import { existsSync, readFileSync, readdirSync, writeFileSync } from 'node:fs';
|
|
14
26
|
import { join, basename } from 'node:path';
|
|
15
27
|
import { homedir } from 'node:os';
|
|
16
28
|
|
|
@@ -29,6 +41,25 @@ function readJSON(path) {
|
|
|
29
41
|
}
|
|
30
42
|
}
|
|
31
43
|
|
|
44
|
+
/**
|
|
45
|
+
* Mark a message file's `read` field to true so the rewake hook and
|
|
46
|
+
* future UserPromptSubmit invocations skip it. Idempotent and best
|
|
47
|
+
* effort; failures are swallowed because they are not fatal ... the
|
|
48
|
+
* worst case is that we re-surface the message once more, which is
|
|
49
|
+
* the old (pre-alpha.31) behavior.
|
|
50
|
+
*/
|
|
51
|
+
function markRead(filePath) {
|
|
52
|
+
try {
|
|
53
|
+
const data = readJSON(filePath);
|
|
54
|
+
if (!data) return;
|
|
55
|
+
if (data.read === true) return;
|
|
56
|
+
data.read = true;
|
|
57
|
+
writeFileSync(filePath, JSON.stringify(data, null, 2) + '\n');
|
|
58
|
+
} catch {
|
|
59
|
+
// Non-fatal.
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
|
|
32
63
|
function getAgentId() {
|
|
33
64
|
// Try LDM config first
|
|
34
65
|
const config = readJSON(LDM_CONFIG_PATH);
|
|
@@ -141,7 +172,8 @@ async function main() {
|
|
|
141
172
|
const seen = new Set();
|
|
142
173
|
|
|
143
174
|
for (const file of files) {
|
|
144
|
-
const
|
|
175
|
+
const fullPath = join(MESSAGES_DIR, file);
|
|
176
|
+
const data = readJSON(fullPath);
|
|
145
177
|
if (!data) continue;
|
|
146
178
|
|
|
147
179
|
// Skip already-read messages (if the field exists)
|
|
@@ -155,6 +187,12 @@ async function main() {
|
|
|
155
187
|
if (data.id) seen.add(data.id);
|
|
156
188
|
|
|
157
189
|
pending.push(data);
|
|
190
|
+
|
|
191
|
+
// Mark the message read on disk so the rewake hook (layer 1) does
|
|
192
|
+
// not re-deliver it on the next Stop event and cost another model
|
|
193
|
+
// turn. This was the dedup race observed and reported by the canary
|
|
194
|
+
// session during the alpha.30 autonomous-push test.
|
|
195
|
+
markRead(fullPath);
|
|
158
196
|
}
|
|
159
197
|
|
|
160
198
|
// Fast exit if nothing pending
|
|
@@ -195,19 +195,62 @@ function markRead(filePath) {
|
|
|
195
195
|
}
|
|
196
196
|
}
|
|
197
197
|
|
|
198
|
-
|
|
199
|
-
|
|
198
|
+
/**
|
|
199
|
+
* Batch fire: mark every matching pending file read, then write a
|
|
200
|
+
* single combined payload to stderr and exit code 2. One wake cycle
|
|
201
|
+
* surfaces every message that was pending at the moment we scanned,
|
|
202
|
+
* instead of one wake per message (which costs one model turn each
|
|
203
|
+
* and adds up quickly under load). Shipped in alpha.31 after the
|
|
204
|
+
* canary session reported "each message = one wake = one Opus turn"
|
|
205
|
+
* during the alpha.30 autonomous-push test.
|
|
206
|
+
*
|
|
207
|
+
* Messages are sorted oldest first before output so the reader sees
|
|
208
|
+
* them in the original arrival order.
|
|
209
|
+
*
|
|
210
|
+
* Marks are written before stderr output so that if the process dies
|
|
211
|
+
* mid-fire (SIGKILL, crash), the files are still flagged read and we
|
|
212
|
+
* do not re-deliver the same batch on the next wake.
|
|
213
|
+
*/
|
|
214
|
+
function fireBatch(matches, lockPath, agentId, sessionName) {
|
|
215
|
+
// Mark read first for atomicity against mid-fire death.
|
|
216
|
+
for (const { filePath } of matches) markRead(filePath);
|
|
217
|
+
|
|
218
|
+
matches.sort((a, b) => {
|
|
219
|
+
const ta = a.data.timestamp || '';
|
|
220
|
+
const tb = b.data.timestamp || '';
|
|
221
|
+
return ta.localeCompare(tb);
|
|
222
|
+
});
|
|
200
223
|
|
|
201
|
-
const
|
|
224
|
+
const plural = matches.length > 1 ? 's' : '';
|
|
225
|
+
const header =
|
|
202
226
|
`== Bridge Push (autonomous) ==\n` +
|
|
203
|
-
`You have
|
|
204
|
-
`
|
|
205
|
-
`
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
227
|
+
`You have ${matches.length} new message${plural} delivered by the inbox-rewake ` +
|
|
228
|
+
`hook while you were idle. They are addressed to ${agentId}:${sessionName} and ` +
|
|
229
|
+
`are now marked read in the inbox.\n\n`;
|
|
230
|
+
|
|
231
|
+
const body = matches
|
|
232
|
+
.map(({ data: m }) => {
|
|
233
|
+
const h =
|
|
234
|
+
`[${m.type || 'chat'}] from ${m.from || 'unknown'} ` +
|
|
235
|
+
`(${m.timestamp || 'no timestamp'}):`;
|
|
236
|
+
return `${h}\n${m.body || '(empty)'}`;
|
|
237
|
+
})
|
|
238
|
+
.join('\n\n---\n\n');
|
|
239
|
+
|
|
240
|
+
const footer =
|
|
241
|
+
`\n\nAcknowledge or respond as appropriate. Use lesa_check_inbox or ` +
|
|
242
|
+
`ldm_send_message to continue the thread.`;
|
|
243
|
+
|
|
244
|
+
process.stderr.write(header + body + footer);
|
|
245
|
+
|
|
246
|
+
const idList = matches
|
|
247
|
+
.map((m) => m.data.id || '(no id)')
|
|
248
|
+
.slice(0, 5)
|
|
249
|
+
.join(', ');
|
|
250
|
+
const trailer = matches.length > 5 ? ` (+${matches.length - 5} more)` : '';
|
|
251
|
+
process.stderr.write(
|
|
252
|
+
`\n${TAG} fired for ${matches.length} message${plural} to ${agentId}:${sessionName}: ${idList}${trailer}\n`,
|
|
253
|
+
);
|
|
211
254
|
|
|
212
255
|
releaseLock(lockPath);
|
|
213
256
|
process.exit(2);
|
|
@@ -261,31 +304,38 @@ async function main() {
|
|
|
261
304
|
// writing `read: true` to disk and the next scan picking it up.
|
|
262
305
|
const seen = new Set();
|
|
263
306
|
|
|
264
|
-
function
|
|
265
|
-
const
|
|
266
|
-
if (!data) return false;
|
|
267
|
-
if (data.read === true) return false;
|
|
268
|
-
if (data.id && seen.has(data.id)) return false;
|
|
269
|
-
if (!messageMatchesAgent(data.to, agentId, sessionName)) return false;
|
|
270
|
-
if (data.id) seen.add(data.id);
|
|
271
|
-
|
|
272
|
-
fireMessage(data, filePath, lockPath, agentId, sessionName);
|
|
273
|
-
return true; // fireMessage exits, but be explicit.
|
|
274
|
-
}
|
|
275
|
-
|
|
276
|
-
function scanDir() {
|
|
307
|
+
function collectPending() {
|
|
308
|
+
const matches = [];
|
|
277
309
|
try {
|
|
278
310
|
const files = readdirSync(MESSAGES_DIR).filter((f) => f.endsWith('.json'));
|
|
279
311
|
for (const file of files) {
|
|
280
|
-
|
|
312
|
+
const filePath = join(MESSAGES_DIR, file);
|
|
313
|
+
const data = readJSON(filePath);
|
|
314
|
+
if (!data) continue;
|
|
315
|
+
if (data.read === true) continue;
|
|
316
|
+
if (data.id && seen.has(data.id)) continue;
|
|
317
|
+
if (!messageMatchesAgent(data.to, agentId, sessionName)) continue;
|
|
318
|
+
if (data.id) seen.add(data.id);
|
|
319
|
+
matches.push({ data, filePath });
|
|
281
320
|
}
|
|
282
321
|
} catch {}
|
|
283
|
-
return
|
|
322
|
+
return matches;
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
function scanAndFire() {
|
|
326
|
+
const matches = collectPending();
|
|
327
|
+
if (matches.length > 0) {
|
|
328
|
+
// fireBatch marks read, writes stderr, releases lock, and exits
|
|
329
|
+
// the process. Control does not return.
|
|
330
|
+
fireBatch(matches, lockPath, agentId, sessionName);
|
|
331
|
+
}
|
|
284
332
|
}
|
|
285
333
|
|
|
286
334
|
// Initial scan: catch any messages that arrived between the previous
|
|
287
|
-
// hook instance exiting and this one starting up.
|
|
288
|
-
|
|
335
|
+
// hook instance exiting and this one starting up. If any match, we
|
|
336
|
+
// fire immediately and exit; the caller never sees this function
|
|
337
|
+
// return.
|
|
338
|
+
scanAndFire();
|
|
289
339
|
|
|
290
340
|
// Set up the fs.watch for new messages.
|
|
291
341
|
let watcher;
|
|
@@ -295,7 +345,7 @@ async function main() {
|
|
|
295
345
|
// Re-scan on every event. fs.watch can coalesce or miss events
|
|
296
346
|
// under load, so scanning the directory is more reliable than
|
|
297
347
|
// trusting the filename argument alone.
|
|
298
|
-
|
|
348
|
+
scanAndFire();
|
|
299
349
|
});
|
|
300
350
|
} catch (e) {
|
|
301
351
|
process.stderr.write(`${TAG} fs.watch failed: ${e.message}\n`);
|