@wipcomputer/wip-ldm-os 0.4.73-alpha.9 → 0.4.75-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +52 -0
- package/SKILL.md +8 -1
- package/bin/ldm.js +600 -81
- package/dist/bridge/chunk-3RG5ZIWI.js +10 -0
- package/dist/bridge/{chunk-LF7EMFBY.js → chunk-7NH6JBIO.js} +127 -49
- package/dist/bridge/cli.js +2 -1
- package/dist/bridge/core.d.ts +13 -1
- package/dist/bridge/core.js +4 -1
- package/dist/bridge/mcp-server.js +52 -7
- package/dist/bridge/openclaw.d.ts +5 -0
- package/dist/bridge/openclaw.js +11 -0
- package/docs/bridge/TECHNICAL.md +86 -0
- package/docs/doc-pipeline/README.md +74 -0
- package/docs/doc-pipeline/TECHNICAL.md +79 -0
- package/lib/deploy.mjs +175 -13
- package/lib/detect.mjs +20 -6
- package/package.json +2 -2
- package/shared/docs/README.md.tmpl +2 -2
- package/shared/docs/dev-guide-wipcomputerinc.md.tmpl +378 -0
- package/shared/docs/how-releases-work.md.tmpl +3 -1
- package/shared/docs/how-worktrees-work.md.tmpl +12 -7
- package/shared/rules/git-conventions.md +3 -3
- package/shared/rules/release-pipeline.md +1 -1
- package/shared/rules/security.md +1 -1
- package/shared/rules/workspace-boundaries.md +1 -1
- package/shared/rules/writing-style.md +1 -1
- package/shared/templates/claude-md-level1.md +7 -3
- package/src/bridge/core.ts +160 -56
- package/src/bridge/mcp-server.ts +93 -8
- package/src/bridge/openclaw.ts +14 -0
- package/src/hooks/inbox-check-hook.mjs +232 -0
- package/src/hooks/inbox-rewake-hook.mjs +388 -0
- package/src/hosted-mcp/.env.example +3 -0
- package/src/hosted-mcp/demo/agent.html +300 -0
- package/src/hosted-mcp/demo/agent.txt +84 -0
- package/src/hosted-mcp/demo/fallback.jpg +0 -0
- package/src/hosted-mcp/demo/footer.js +74 -0
- package/src/hosted-mcp/demo/index.html +1303 -0
- package/src/hosted-mcp/demo/login.html +548 -0
- package/src/hosted-mcp/demo/privacy.html +223 -0
- package/src/hosted-mcp/demo/sprites.jpg +0 -0
- package/src/hosted-mcp/demo/sprites.png +0 -0
- package/src/hosted-mcp/demo/tos.html +198 -0
- package/src/hosted-mcp/deploy.sh +70 -0
- package/src/hosted-mcp/ecosystem.config.cjs +14 -0
- package/src/hosted-mcp/inbox.mjs +64 -0
- package/src/hosted-mcp/legal/internet-services/terms/site.html +205 -0
- package/src/hosted-mcp/legal/privacy/en-ww/index.html +230 -0
- package/src/hosted-mcp/nginx/mcp-oauth.conf +98 -0
- package/src/hosted-mcp/nginx/mcp-server.conf +17 -0
- package/src/hosted-mcp/nginx/wip.computer.conf +45 -0
- package/src/hosted-mcp/package-lock.json +2092 -0
- package/src/hosted-mcp/package.json +23 -0
- package/src/hosted-mcp/prisma/migrations/20260406233014_init/migration.sql +68 -0
- package/src/hosted-mcp/prisma/migrations/migration_lock.toml +3 -0
- package/src/hosted-mcp/prisma/schema.prisma +57 -0
- package/src/hosted-mcp/prisma.config.ts +14 -0
- package/src/hosted-mcp/server.mjs +2093 -0
- package/src/hosted-mcp/shared/kaleidoscope.css +139 -0
- package/src/hosted-mcp/shared/kaleidoscope.js +192 -0
- package/src/hosted-mcp/tools.mjs +73 -0
- package/templates/hooks/pre-commit +5 -0
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* LDM OS Inbox Check Hook
|
|
4
|
+
* UserPromptSubmit hook for Claude Code.
|
|
5
|
+
* Scans ~/.ldm/messages/ for pending messages addressed to this agent
|
|
6
|
+
* and surfaces them as additionalContext before CC responds.
|
|
7
|
+
*
|
|
8
|
+
* Follows guard.mjs pattern: stdin JSON in, stdout JSON out, exit 0 always.
|
|
9
|
+
*
|
|
10
|
+
* As of alpha.31 this hook DOES mark messages as `read: true` after
|
|
11
|
+
* surfacing them. Previously we deferred draining to `lesa_check_inbox`,
|
|
12
|
+
* but that caused a dedup race with `inbox-rewake-hook.mjs`: if layer 2
|
|
13
|
+
* (this hook) surfaced a message without marking it read, then layer 1
|
|
14
|
+
* (the rewake Stop hook) would fire on the same unread message on the
|
|
15
|
+
* next Stop event and re-deliver it, costing another model turn. Marking
|
|
16
|
+
* read here makes the two layers cooperative ... each unread message
|
|
17
|
+
* surfaces exactly once regardless of which layer catches it first.
|
|
18
|
+
*
|
|
19
|
+
* See the dedup diagnosis in:
|
|
20
|
+
* ai/product/plans-prds/bridge/2026-04-11--cc-mini--autonomous-push-architecture.md
|
|
21
|
+
*
|
|
22
|
+
* Zero external dependencies beyond node:fs and node:path.
|
|
23
|
+
*/
|
|
24
|
+
|
|
25
|
+
import { existsSync, readFileSync, readdirSync, writeFileSync } from 'node:fs';
|
|
26
|
+
import { join, basename } from 'node:path';
|
|
27
|
+
import { homedir } from 'node:os';
|
|
28
|
+
|
|
29
|
+
const HOME = homedir();
|
|
30
|
+
const MESSAGES_DIR = join(HOME, '.ldm', 'messages');
|
|
31
|
+
const LDM_CONFIG_PATH = join(HOME, '.ldm', 'config.json');
|
|
32
|
+
const TAG = '[inbox-check-hook]';
|
|
33
|
+
|
|
34
|
+
// ── Helpers ──
|
|
35
|
+
|
|
36
|
+
function readJSON(path) {
|
|
37
|
+
try {
|
|
38
|
+
return JSON.parse(readFileSync(path, 'utf8'));
|
|
39
|
+
} catch {
|
|
40
|
+
return null;
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Mark a message file's `read` field to true so the rewake hook and
|
|
46
|
+
* future UserPromptSubmit invocations skip it. Idempotent and best
|
|
47
|
+
* effort; failures are swallowed because they are not fatal ... the
|
|
48
|
+
* worst case is that we re-surface the message once more, which is
|
|
49
|
+
* the old (pre-alpha.31) behavior.
|
|
50
|
+
*/
|
|
51
|
+
function markRead(filePath) {
|
|
52
|
+
try {
|
|
53
|
+
const data = readJSON(filePath);
|
|
54
|
+
if (!data) return;
|
|
55
|
+
if (data.read === true) return;
|
|
56
|
+
data.read = true;
|
|
57
|
+
writeFileSync(filePath, JSON.stringify(data, null, 2) + '\n');
|
|
58
|
+
} catch {
|
|
59
|
+
// Non-fatal.
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
function getAgentId() {
|
|
64
|
+
// Try LDM config first
|
|
65
|
+
const config = readJSON(LDM_CONFIG_PATH);
|
|
66
|
+
if (config?.agents) {
|
|
67
|
+
// Find the agent entry for this machine's claude-code harness
|
|
68
|
+
for (const [id, agent] of Object.entries(config.agents)) {
|
|
69
|
+
if (agent.harness === 'claude-code') return id;
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
return 'cc-mini';
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
function getSessionName(input) {
|
|
76
|
+
// 1. Try CC session file for the parent PID.
|
|
77
|
+
// CC writes /rename labels to ~/.claude/sessions/<pid>.json.
|
|
78
|
+
// This hook is spawned fresh each time by CC, so ppid = CC PID.
|
|
79
|
+
// Reading the session file picks up /rename and /resume labels
|
|
80
|
+
// without any env var or restart.
|
|
81
|
+
try {
|
|
82
|
+
const ccSessionPath = join(HOME, '.claude', 'sessions', `${process.ppid}.json`);
|
|
83
|
+
const data = JSON.parse(readFileSync(ccSessionPath, 'utf8'));
|
|
84
|
+
if (data.name && typeof data.name === 'string') {
|
|
85
|
+
return data.name;
|
|
86
|
+
}
|
|
87
|
+
} catch {
|
|
88
|
+
// No session file. Normal for non-CC harnesses.
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
// 2. Env var override
|
|
92
|
+
// 3. CWD basename fallback
|
|
93
|
+
// 4. Default
|
|
94
|
+
return (
|
|
95
|
+
process.env.LDM_SESSION_NAME ||
|
|
96
|
+
process.env.CLAUDE_SESSION_NAME ||
|
|
97
|
+
basename(input?.cwd || process.cwd()) ||
|
|
98
|
+
'default'
|
|
99
|
+
);
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
/**
|
|
103
|
+
* Check if a message's "to" field matches this agent.
|
|
104
|
+
* Supported targets:
|
|
105
|
+
* - exact agent ID (e.g. "cc-mini")
|
|
106
|
+
* - agent:session (e.g. "cc-mini:my-session")
|
|
107
|
+
* - agent:* (e.g. "cc-mini:*" ... all sessions of this agent)
|
|
108
|
+
* - "*" or "all" ... broadcast to everyone
|
|
109
|
+
* - exact session name match
|
|
110
|
+
*/
|
|
111
|
+
function messageMatchesAgent(to, agentId, sessionName) {
|
|
112
|
+
if (!to) return false;
|
|
113
|
+
|
|
114
|
+
// Broadcast targets
|
|
115
|
+
if (to === '*' || to === 'all') return true;
|
|
116
|
+
|
|
117
|
+
// Exact agent ID
|
|
118
|
+
if (to === agentId) return true;
|
|
119
|
+
|
|
120
|
+
// Agent wildcard: "cc-mini:*"
|
|
121
|
+
if (to === `${agentId}:*`) return true;
|
|
122
|
+
|
|
123
|
+
// Agent + specific session: "cc-mini:my-session"
|
|
124
|
+
if (to === `${agentId}:${sessionName}`) return true;
|
|
125
|
+
|
|
126
|
+
// Direct session name match
|
|
127
|
+
if (to === sessionName) return true;
|
|
128
|
+
|
|
129
|
+
return false;
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
// ── Main ──
|
|
133
|
+
|
|
134
|
+
async function main() {
|
|
135
|
+
let raw = '';
|
|
136
|
+
for await (const chunk of process.stdin) raw += chunk;
|
|
137
|
+
|
|
138
|
+
let input;
|
|
139
|
+
try {
|
|
140
|
+
input = JSON.parse(raw);
|
|
141
|
+
} catch {
|
|
142
|
+
// Bad input... exit clean with no context
|
|
143
|
+
process.stdout.write(JSON.stringify({}));
|
|
144
|
+
process.exit(0);
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
// Fast exit if messages dir doesn't exist
|
|
148
|
+
if (!existsSync(MESSAGES_DIR)) {
|
|
149
|
+
process.stdout.write(JSON.stringify({}));
|
|
150
|
+
process.exit(0);
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
const agentId = getAgentId();
|
|
154
|
+
const sessionName = getSessionName(input);
|
|
155
|
+
|
|
156
|
+
// Scan for pending messages
|
|
157
|
+
let files;
|
|
158
|
+
try {
|
|
159
|
+
files = readdirSync(MESSAGES_DIR).filter(f => f.endsWith('.json'));
|
|
160
|
+
} catch {
|
|
161
|
+
process.stdout.write(JSON.stringify({}));
|
|
162
|
+
process.exit(0);
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
// Fast exit if no message files
|
|
166
|
+
if (files.length === 0) {
|
|
167
|
+
process.stdout.write(JSON.stringify({}));
|
|
168
|
+
process.exit(0);
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
const pending = [];
|
|
172
|
+
const seen = new Set();
|
|
173
|
+
|
|
174
|
+
for (const file of files) {
|
|
175
|
+
const fullPath = join(MESSAGES_DIR, file);
|
|
176
|
+
const data = readJSON(fullPath);
|
|
177
|
+
if (!data) continue;
|
|
178
|
+
|
|
179
|
+
// Skip already-read messages (if the field exists)
|
|
180
|
+
if (data.read === true) continue;
|
|
181
|
+
|
|
182
|
+
// Check if addressed to us
|
|
183
|
+
if (!messageMatchesAgent(data.to, agentId, sessionName)) continue;
|
|
184
|
+
|
|
185
|
+
// Deduplicate by message ID
|
|
186
|
+
if (data.id && seen.has(data.id)) continue;
|
|
187
|
+
if (data.id) seen.add(data.id);
|
|
188
|
+
|
|
189
|
+
pending.push(data);
|
|
190
|
+
|
|
191
|
+
// Mark the message read on disk so the rewake hook (layer 1) does
|
|
192
|
+
// not re-deliver it on the next Stop event and cost another model
|
|
193
|
+
// turn. This was the dedup race observed and reported by the canary
|
|
194
|
+
// session during the alpha.30 autonomous-push test.
|
|
195
|
+
markRead(fullPath);
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
// Fast exit if nothing pending
|
|
199
|
+
if (pending.length === 0) {
|
|
200
|
+
process.stdout.write(JSON.stringify({}));
|
|
201
|
+
process.exit(0);
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
// Sort by timestamp (oldest first)
|
|
205
|
+
pending.sort((a, b) => (a.timestamp || '').localeCompare(b.timestamp || ''));
|
|
206
|
+
|
|
207
|
+
// Format output
|
|
208
|
+
const msgLines = pending
|
|
209
|
+
.map(m => `[${m.type || 'chat'}] from ${m.from || 'unknown'} (${m.timestamp || 'no timestamp'}):\n ${m.body || '(empty)'}`)
|
|
210
|
+
.join('\n\n');
|
|
211
|
+
|
|
212
|
+
const additionalContext =
|
|
213
|
+
`== Pending Messages (${pending.length}) ==\n` +
|
|
214
|
+
`You have ${pending.length} unread message(s). Review them and respond if needed. Use lesa_check_inbox to mark as read when done.\n\n` +
|
|
215
|
+
msgLines;
|
|
216
|
+
|
|
217
|
+
const output = {
|
|
218
|
+
hookSpecificOutput: {
|
|
219
|
+
hookEventName: 'UserPromptSubmit',
|
|
220
|
+
additionalContext,
|
|
221
|
+
},
|
|
222
|
+
};
|
|
223
|
+
|
|
224
|
+
process.stdout.write(JSON.stringify(output));
|
|
225
|
+
process.stderr.write(`${TAG} ${pending.length} pending message(s) for ${agentId}:${sessionName}\n`);
|
|
226
|
+
process.exit(0);
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
main().catch(() => {
|
|
230
|
+
process.stdout.write(JSON.stringify({}));
|
|
231
|
+
process.exit(0);
|
|
232
|
+
});
|
|
@@ -0,0 +1,388 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* LDM OS Inbox Rewake Hook
|
|
4
|
+
*
|
|
5
|
+
* asyncRewake background hook for Claude Code. Watches ~/.ldm/messages/
|
|
6
|
+
* with fs.watch and, when a new message addressed to this agent:session
|
|
7
|
+
* arrives, writes the message as a system-reminder to stderr and exits
|
|
8
|
+
* with code 2. Claude Code's harness wraps the stderr in a system-reminder
|
|
9
|
+
* task-notification that wakes the model if idle or gets injected mid-query
|
|
10
|
+
* if the model is busy. See:
|
|
11
|
+
*
|
|
12
|
+
* ai/product/plans-prds/bridge/2026-04-11--cc-mini--autonomous-push-architecture.md
|
|
13
|
+
*
|
|
14
|
+
* Attached as a Stop hook with asyncRewake: true. Fires after every CC
|
|
15
|
+
* turn. A lockfile prevents multiple instances from stacking across many
|
|
16
|
+
* Stop events: only the first instance acquires the lock and watches;
|
|
17
|
+
* subsequent instances see the lock held by a live process and exit 0
|
|
18
|
+
* silently. The lock is released when the watching instance exits
|
|
19
|
+
* (message caught, parent dead, hard timeout, or hard cancel).
|
|
20
|
+
*
|
|
21
|
+
* This hook is the "true push" layer 1 from the plan. Layers 2-4
|
|
22
|
+
* (UserPromptSubmit inbox-check hook, SessionStart boot hook, manual
|
|
23
|
+
* lesa_check_inbox) remain as independent fallbacks.
|
|
24
|
+
*
|
|
25
|
+
* Idempotent with inbox-check-hook.mjs: after firing, this hook marks
|
|
26
|
+
* the message file's `read` field to true so the UserPromptSubmit hook
|
|
27
|
+
* on the next user prompt does not re-surface the same message.
|
|
28
|
+
*
|
|
29
|
+
* Zero external dependencies beyond node:fs, node:path, node:os.
|
|
30
|
+
*/
|
|
31
|
+
|
|
32
|
+
import {
|
|
33
|
+
existsSync,
|
|
34
|
+
readFileSync,
|
|
35
|
+
readdirSync,
|
|
36
|
+
writeFileSync,
|
|
37
|
+
unlinkSync,
|
|
38
|
+
watch,
|
|
39
|
+
} from 'node:fs';
|
|
40
|
+
import { join, basename } from 'node:path';
|
|
41
|
+
import { homedir } from 'node:os';
|
|
42
|
+
|
|
43
|
+
const HOME = homedir();
|
|
44
|
+
const MESSAGES_DIR = join(HOME, '.ldm', 'messages');
|
|
45
|
+
const LOCK_PATH = join(MESSAGES_DIR, '.rewake.lock');
|
|
46
|
+
const LDM_CONFIG_PATH = join(HOME, '.ldm', 'config.json');
|
|
47
|
+
const TAG = '[inbox-rewake-hook]';
|
|
48
|
+
|
|
49
|
+
// Hard safety timeout: the watcher exits after 6 hours no matter what,
|
|
50
|
+
// so it cannot leak forever if the parent check or lock cleanup misses.
|
|
51
|
+
const HARD_TIMEOUT_MS = 6 * 60 * 60 * 1000;
|
|
52
|
+
|
|
53
|
+
// Parent CC process liveness check: every minute, verify the parent PID
|
|
54
|
+
// is still alive. If the parent died while this background hook is
|
|
55
|
+
// running, exit cleanly so we do not orphan.
|
|
56
|
+
const PARENT_CHECK_INTERVAL_MS = 60 * 1000;
|
|
57
|
+
|
|
58
|
+
// ── Helpers (mirror inbox-check-hook.mjs) ──
|
|
59
|
+
|
|
60
|
+
function readJSON(path) {
|
|
61
|
+
try {
|
|
62
|
+
return JSON.parse(readFileSync(path, 'utf8'));
|
|
63
|
+
} catch {
|
|
64
|
+
return null;
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
function getAgentId() {
|
|
69
|
+
const config = readJSON(LDM_CONFIG_PATH);
|
|
70
|
+
if (config?.agents) {
|
|
71
|
+
for (const [id, agent] of Object.entries(config.agents)) {
|
|
72
|
+
if (agent.harness === 'claude-code') return id;
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
return 'cc-mini';
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
function getSessionName(input) {
|
|
79
|
+
// Mirror inbox-check-hook.mjs: CC writes /rename labels to
|
|
80
|
+
// ~/.claude/sessions/<pid>.json, and this hook is spawned fresh by CC
|
|
81
|
+
// so ppid = CC PID. Reading the session file picks up /rename and
|
|
82
|
+
// /resume labels without any env var or restart.
|
|
83
|
+
try {
|
|
84
|
+
const ccSessionPath = join(HOME, '.claude', 'sessions', `${process.ppid}.json`);
|
|
85
|
+
const data = JSON.parse(readFileSync(ccSessionPath, 'utf8'));
|
|
86
|
+
if (data.name && typeof data.name === 'string') {
|
|
87
|
+
return data.name;
|
|
88
|
+
}
|
|
89
|
+
} catch {
|
|
90
|
+
// No session file. Normal for non-CC harnesses.
|
|
91
|
+
}
|
|
92
|
+
return (
|
|
93
|
+
process.env.LDM_SESSION_NAME ||
|
|
94
|
+
process.env.CLAUDE_SESSION_NAME ||
|
|
95
|
+
basename(input?.cwd || process.cwd()) ||
|
|
96
|
+
'default'
|
|
97
|
+
);
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
/**
|
|
101
|
+
* Check if a message's "to" field matches this agent:session.
|
|
102
|
+
* Same logic as inbox-check-hook.mjs so both hooks agree on routing.
|
|
103
|
+
*/
|
|
104
|
+
function messageMatchesAgent(to, agentId, sessionName) {
|
|
105
|
+
if (!to) return false;
|
|
106
|
+
if (to === '*' || to === 'all') return true;
|
|
107
|
+
if (to === agentId) return true;
|
|
108
|
+
if (to === `${agentId}:*`) return true;
|
|
109
|
+
if (to === `${agentId}:${sessionName}`) return true;
|
|
110
|
+
if (to === sessionName) return true;
|
|
111
|
+
return false;
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
// ── Lock management ──
|
|
115
|
+
//
|
|
116
|
+
// A single machine may have seven or more concurrent CC sessions, each
|
|
117
|
+
// spawning its own rewake hook on every Stop event. Without a lock, we
|
|
118
|
+
// accumulate fs.watch handles forever and every new message fires all
|
|
119
|
+
// pending hooks simultaneously.
|
|
120
|
+
//
|
|
121
|
+
// The lock is per-session, keyed by agent:session, so different CC
|
|
122
|
+
// sessions do not block each other. The lock file lives at
|
|
123
|
+
// ~/.ldm/messages/.rewake.<agent>-<session>.lock and contains the PID
|
|
124
|
+
// of the watching process. Subsequent hook spawns in the same session
|
|
125
|
+
// see the lock, verify the PID is alive, and exit silently.
|
|
126
|
+
|
|
127
|
+
function lockPathFor(agentId, sessionName) {
|
|
128
|
+
// Replace any filesystem-unfriendly chars with '-' so session names
|
|
129
|
+
// like "memory:crystal" or "brainstorm (oc)" do not produce invalid
|
|
130
|
+
// filenames. Keeps alphanumerics, dashes, underscores.
|
|
131
|
+
const safe = `${agentId}-${sessionName}`.replace(/[^a-zA-Z0-9._-]/g, '-');
|
|
132
|
+
return join(MESSAGES_DIR, `.rewake.${safe}.lock`);
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
function pidIsAlive(pid) {
|
|
136
|
+
try {
|
|
137
|
+
process.kill(pid, 0); // signal 0 is a liveness probe
|
|
138
|
+
return true;
|
|
139
|
+
} catch {
|
|
140
|
+
return false;
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
function acquireLock(lockPath) {
|
|
145
|
+
try {
|
|
146
|
+
if (existsSync(lockPath)) {
|
|
147
|
+
const raw = readFileSync(lockPath, 'utf8').trim();
|
|
148
|
+
const existing = parseInt(raw, 10);
|
|
149
|
+
if (existing && existing > 0 && pidIsAlive(existing)) {
|
|
150
|
+
return false;
|
|
151
|
+
}
|
|
152
|
+
// Stale lock: previous holder is dead, take over.
|
|
153
|
+
try { unlinkSync(lockPath); } catch {}
|
|
154
|
+
}
|
|
155
|
+
} catch {}
|
|
156
|
+
|
|
157
|
+
try {
|
|
158
|
+
writeFileSync(lockPath, String(process.pid));
|
|
159
|
+
return true;
|
|
160
|
+
} catch {
|
|
161
|
+
return false;
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
function releaseLock(lockPath) {
|
|
166
|
+
try {
|
|
167
|
+
if (!existsSync(lockPath)) return;
|
|
168
|
+
const raw = readFileSync(lockPath, 'utf8').trim();
|
|
169
|
+
const pid = parseInt(raw, 10);
|
|
170
|
+
if (pid === process.pid) {
|
|
171
|
+
unlinkSync(lockPath);
|
|
172
|
+
}
|
|
173
|
+
} catch {}
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
// ── Message delivery ──
|
|
177
|
+
//
|
|
178
|
+
// When a match is found, we:
|
|
179
|
+
// 1. Mark the file's `read` field to true (so inbox-check-hook.mjs on
|
|
180
|
+
// the next UserPromptSubmit does not re-surface it).
|
|
181
|
+
// 2. Write the formatted message body to stderr.
|
|
182
|
+
// 3. Release the lock.
|
|
183
|
+
// 4. process.exit(2) to trigger Claude Code's asyncRewake wake path.
|
|
184
|
+
|
|
185
|
+
function markRead(filePath) {
|
|
186
|
+
try {
|
|
187
|
+
const data = readJSON(filePath);
|
|
188
|
+
if (!data) return;
|
|
189
|
+
data.read = true;
|
|
190
|
+
writeFileSync(filePath, JSON.stringify(data, null, 2) + '\n');
|
|
191
|
+
} catch {
|
|
192
|
+
// Non-fatal. Worst case: inbox-check-hook surfaces the same message
|
|
193
|
+
// on the next UserPromptSubmit. That is still correct behavior; it
|
|
194
|
+
// just means the same message gets two surfaces.
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
/**
|
|
199
|
+
* Batch fire: mark every matching pending file read, then write a
|
|
200
|
+
* single combined payload to stderr and exit code 2. One wake cycle
|
|
201
|
+
* surfaces every message that was pending at the moment we scanned,
|
|
202
|
+
* instead of one wake per message (which costs one model turn each
|
|
203
|
+
* and adds up quickly under load). Shipped in alpha.31 after the
|
|
204
|
+
* canary session reported "each message = one wake = one Opus turn"
|
|
205
|
+
* during the alpha.30 autonomous-push test.
|
|
206
|
+
*
|
|
207
|
+
* Messages are sorted oldest first before output so the reader sees
|
|
208
|
+
* them in the original arrival order.
|
|
209
|
+
*
|
|
210
|
+
* Marks are written before stderr output so that if the process dies
|
|
211
|
+
* mid-fire (SIGKILL, crash), the files are still flagged read and we
|
|
212
|
+
* do not re-deliver the same batch on the next wake.
|
|
213
|
+
*/
|
|
214
|
+
function fireBatch(matches, lockPath, agentId, sessionName) {
|
|
215
|
+
// Mark read first for atomicity against mid-fire death.
|
|
216
|
+
for (const { filePath } of matches) markRead(filePath);
|
|
217
|
+
|
|
218
|
+
matches.sort((a, b) => {
|
|
219
|
+
const ta = a.data.timestamp || '';
|
|
220
|
+
const tb = b.data.timestamp || '';
|
|
221
|
+
return ta.localeCompare(tb);
|
|
222
|
+
});
|
|
223
|
+
|
|
224
|
+
const plural = matches.length > 1 ? 's' : '';
|
|
225
|
+
const header =
|
|
226
|
+
`== Bridge Push (autonomous) ==\n` +
|
|
227
|
+
`You have ${matches.length} new message${plural} delivered by the inbox-rewake ` +
|
|
228
|
+
`hook while you were idle. They are addressed to ${agentId}:${sessionName} and ` +
|
|
229
|
+
`are now marked read in the inbox.\n\n`;
|
|
230
|
+
|
|
231
|
+
const body = matches
|
|
232
|
+
.map(({ data: m }) => {
|
|
233
|
+
const h =
|
|
234
|
+
`[${m.type || 'chat'}] from ${m.from || 'unknown'} ` +
|
|
235
|
+
`(${m.timestamp || 'no timestamp'}):`;
|
|
236
|
+
return `${h}\n${m.body || '(empty)'}`;
|
|
237
|
+
})
|
|
238
|
+
.join('\n\n---\n\n');
|
|
239
|
+
|
|
240
|
+
const footer =
|
|
241
|
+
`\n\nAcknowledge or respond as appropriate. Use lesa_check_inbox or ` +
|
|
242
|
+
`ldm_send_message to continue the thread.`;
|
|
243
|
+
|
|
244
|
+
process.stderr.write(header + body + footer);
|
|
245
|
+
|
|
246
|
+
const idList = matches
|
|
247
|
+
.map((m) => m.data.id || '(no id)')
|
|
248
|
+
.slice(0, 5)
|
|
249
|
+
.join(', ');
|
|
250
|
+
const trailer = matches.length > 5 ? ` (+${matches.length - 5} more)` : '';
|
|
251
|
+
process.stderr.write(
|
|
252
|
+
`\n${TAG} fired for ${matches.length} message${plural} to ${agentId}:${sessionName}: ${idList}${trailer}\n`,
|
|
253
|
+
);
|
|
254
|
+
|
|
255
|
+
releaseLock(lockPath);
|
|
256
|
+
process.exit(2);
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
// ── Main ──
|
|
260
|
+
|
|
261
|
+
async function main() {
|
|
262
|
+
// Drain stdin even if we ignore it. Hooks receive JSON per the CC
|
|
263
|
+
// hook protocol; we do not need any of the fields here.
|
|
264
|
+
let raw = '';
|
|
265
|
+
try {
|
|
266
|
+
for await (const chunk of process.stdin) raw += chunk;
|
|
267
|
+
} catch {}
|
|
268
|
+
|
|
269
|
+
let input = {};
|
|
270
|
+
try { input = JSON.parse(raw); } catch {}
|
|
271
|
+
|
|
272
|
+
if (!existsSync(MESSAGES_DIR)) {
|
|
273
|
+
// Nothing to watch. Exit silently. A future install will recreate.
|
|
274
|
+
process.exit(0);
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
const agentId = getAgentId();
|
|
278
|
+
const sessionName = getSessionName(input);
|
|
279
|
+
const lockPath = lockPathFor(agentId, sessionName);
|
|
280
|
+
const parentPid = process.ppid;
|
|
281
|
+
|
|
282
|
+
// Try to take the lock. If another instance of this session's rewake
|
|
283
|
+
// hook is already watching, exit silently and let them handle it.
|
|
284
|
+
if (!acquireLock(lockPath)) {
|
|
285
|
+
process.stderr.write(
|
|
286
|
+
`${TAG} another live instance holds ${basename(lockPath)}, exiting\n`,
|
|
287
|
+
);
|
|
288
|
+
process.exit(0);
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
// Guarantee we release the lock on any exit path.
|
|
292
|
+
process.on('exit', () => releaseLock(lockPath));
|
|
293
|
+
process.on('SIGTERM', () => { releaseLock(lockPath); process.exit(0); });
|
|
294
|
+
process.on('SIGINT', () => { releaseLock(lockPath); process.exit(0); });
|
|
295
|
+
process.on('uncaughtException', (err) => {
|
|
296
|
+
process.stderr.write(`${TAG} uncaughtException: ${err.message}\n`);
|
|
297
|
+
releaseLock(lockPath);
|
|
298
|
+
process.exit(0);
|
|
299
|
+
});
|
|
300
|
+
|
|
301
|
+
// Track which message IDs we have already fired for in this run.
|
|
302
|
+
// fs.watch can fire multiple events for a single file write; the
|
|
303
|
+
// in-memory set prevents duplicate firings in the window between
|
|
304
|
+
// writing `read: true` to disk and the next scan picking it up.
|
|
305
|
+
const seen = new Set();
|
|
306
|
+
|
|
307
|
+
function collectPending() {
|
|
308
|
+
const matches = [];
|
|
309
|
+
try {
|
|
310
|
+
const files = readdirSync(MESSAGES_DIR).filter((f) => f.endsWith('.json'));
|
|
311
|
+
for (const file of files) {
|
|
312
|
+
const filePath = join(MESSAGES_DIR, file);
|
|
313
|
+
const data = readJSON(filePath);
|
|
314
|
+
if (!data) continue;
|
|
315
|
+
if (data.read === true) continue;
|
|
316
|
+
if (data.id && seen.has(data.id)) continue;
|
|
317
|
+
if (!messageMatchesAgent(data.to, agentId, sessionName)) continue;
|
|
318
|
+
if (data.id) seen.add(data.id);
|
|
319
|
+
matches.push({ data, filePath });
|
|
320
|
+
}
|
|
321
|
+
} catch {}
|
|
322
|
+
return matches;
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
function scanAndFire() {
|
|
326
|
+
const matches = collectPending();
|
|
327
|
+
if (matches.length > 0) {
|
|
328
|
+
// fireBatch marks read, writes stderr, releases lock, and exits
|
|
329
|
+
// the process. Control does not return.
|
|
330
|
+
fireBatch(matches, lockPath, agentId, sessionName);
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
// Initial scan: catch any messages that arrived between the previous
|
|
335
|
+
// hook instance exiting and this one starting up. If any match, we
|
|
336
|
+
// fire immediately and exit; the caller never sees this function
|
|
337
|
+
// return.
|
|
338
|
+
scanAndFire();
|
|
339
|
+
|
|
340
|
+
// Set up the fs.watch for new messages.
|
|
341
|
+
let watcher;
|
|
342
|
+
try {
|
|
343
|
+
watcher = watch(MESSAGES_DIR, { persistent: true }, (eventType, filename) => {
|
|
344
|
+
if (!filename || !filename.endsWith('.json')) return;
|
|
345
|
+
// Re-scan on every event. fs.watch can coalesce or miss events
|
|
346
|
+
// under load, so scanning the directory is more reliable than
|
|
347
|
+
// trusting the filename argument alone.
|
|
348
|
+
scanAndFire();
|
|
349
|
+
});
|
|
350
|
+
} catch (e) {
|
|
351
|
+
process.stderr.write(`${TAG} fs.watch failed: ${e.message}\n`);
|
|
352
|
+
releaseLock(lockPath);
|
|
353
|
+
process.exit(0);
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
// Parent process liveness check. If the CC session that spawned us
|
|
357
|
+
// has exited, stop watching and release the lock.
|
|
358
|
+
const parentCheck = setInterval(() => {
|
|
359
|
+
if (!pidIsAlive(parentPid)) {
|
|
360
|
+
process.stderr.write(`${TAG} parent pid ${parentPid} is dead, exiting\n`);
|
|
361
|
+
clearInterval(parentCheck);
|
|
362
|
+
if (watcher) watcher.close();
|
|
363
|
+
releaseLock(lockPath);
|
|
364
|
+
process.exit(0);
|
|
365
|
+
}
|
|
366
|
+
}, PARENT_CHECK_INTERVAL_MS);
|
|
367
|
+
|
|
368
|
+
// Hard safety timeout.
|
|
369
|
+
const hardTimeout = setTimeout(() => {
|
|
370
|
+
process.stderr.write(`${TAG} hard timeout after ${HARD_TIMEOUT_MS / 1000}s\n`);
|
|
371
|
+
clearInterval(parentCheck);
|
|
372
|
+
if (watcher) watcher.close();
|
|
373
|
+
releaseLock(lockPath);
|
|
374
|
+
process.exit(0);
|
|
375
|
+
}, HARD_TIMEOUT_MS);
|
|
376
|
+
|
|
377
|
+
// Let the timers keep the event loop alive. If either timer fires or
|
|
378
|
+
// the watcher fires a message match, the process exits and releases
|
|
379
|
+
// the lock.
|
|
380
|
+
process.stderr.write(
|
|
381
|
+
`${TAG} watching ${MESSAGES_DIR} for ${agentId}:${sessionName} (parent pid ${parentPid})\n`,
|
|
382
|
+
);
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
main().catch((err) => {
|
|
386
|
+
process.stderr.write(`${TAG} fatal in main: ${err && err.message}\n`);
|
|
387
|
+
process.exit(0);
|
|
388
|
+
});
|