@wipcomputer/wip-ldm-os 0.4.73-alpha.9 → 0.4.75-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/LICENSE +52 -0
  2. package/SKILL.md +8 -1
  3. package/bin/ldm.js +600 -81
  4. package/dist/bridge/chunk-3RG5ZIWI.js +10 -0
  5. package/dist/bridge/{chunk-LF7EMFBY.js → chunk-7NH6JBIO.js} +127 -49
  6. package/dist/bridge/cli.js +2 -1
  7. package/dist/bridge/core.d.ts +13 -1
  8. package/dist/bridge/core.js +4 -1
  9. package/dist/bridge/mcp-server.js +52 -7
  10. package/dist/bridge/openclaw.d.ts +5 -0
  11. package/dist/bridge/openclaw.js +11 -0
  12. package/docs/bridge/TECHNICAL.md +86 -0
  13. package/docs/doc-pipeline/README.md +74 -0
  14. package/docs/doc-pipeline/TECHNICAL.md +79 -0
  15. package/lib/deploy.mjs +175 -13
  16. package/lib/detect.mjs +20 -6
  17. package/package.json +2 -2
  18. package/shared/docs/README.md.tmpl +2 -2
  19. package/shared/docs/dev-guide-wipcomputerinc.md.tmpl +378 -0
  20. package/shared/docs/how-releases-work.md.tmpl +3 -1
  21. package/shared/docs/how-worktrees-work.md.tmpl +12 -7
  22. package/shared/rules/git-conventions.md +3 -3
  23. package/shared/rules/release-pipeline.md +1 -1
  24. package/shared/rules/security.md +1 -1
  25. package/shared/rules/workspace-boundaries.md +1 -1
  26. package/shared/rules/writing-style.md +1 -1
  27. package/shared/templates/claude-md-level1.md +7 -3
  28. package/src/bridge/core.ts +160 -56
  29. package/src/bridge/mcp-server.ts +93 -8
  30. package/src/bridge/openclaw.ts +14 -0
  31. package/src/hooks/inbox-check-hook.mjs +232 -0
  32. package/src/hooks/inbox-rewake-hook.mjs +388 -0
  33. package/src/hosted-mcp/.env.example +3 -0
  34. package/src/hosted-mcp/demo/agent.html +300 -0
  35. package/src/hosted-mcp/demo/agent.txt +84 -0
  36. package/src/hosted-mcp/demo/fallback.jpg +0 -0
  37. package/src/hosted-mcp/demo/footer.js +74 -0
  38. package/src/hosted-mcp/demo/index.html +1303 -0
  39. package/src/hosted-mcp/demo/login.html +548 -0
  40. package/src/hosted-mcp/demo/privacy.html +223 -0
  41. package/src/hosted-mcp/demo/sprites.jpg +0 -0
  42. package/src/hosted-mcp/demo/sprites.png +0 -0
  43. package/src/hosted-mcp/demo/tos.html +198 -0
  44. package/src/hosted-mcp/deploy.sh +70 -0
  45. package/src/hosted-mcp/ecosystem.config.cjs +14 -0
  46. package/src/hosted-mcp/inbox.mjs +64 -0
  47. package/src/hosted-mcp/legal/internet-services/terms/site.html +205 -0
  48. package/src/hosted-mcp/legal/privacy/en-ww/index.html +230 -0
  49. package/src/hosted-mcp/nginx/mcp-oauth.conf +98 -0
  50. package/src/hosted-mcp/nginx/mcp-server.conf +17 -0
  51. package/src/hosted-mcp/nginx/wip.computer.conf +45 -0
  52. package/src/hosted-mcp/package-lock.json +2092 -0
  53. package/src/hosted-mcp/package.json +23 -0
  54. package/src/hosted-mcp/prisma/migrations/20260406233014_init/migration.sql +68 -0
  55. package/src/hosted-mcp/prisma/migrations/migration_lock.toml +3 -0
  56. package/src/hosted-mcp/prisma/schema.prisma +57 -0
  57. package/src/hosted-mcp/prisma.config.ts +14 -0
  58. package/src/hosted-mcp/server.mjs +2093 -0
  59. package/src/hosted-mcp/shared/kaleidoscope.css +139 -0
  60. package/src/hosted-mcp/shared/kaleidoscope.js +192 -0
  61. package/src/hosted-mcp/tools.mjs +73 -0
  62. package/templates/hooks/pre-commit +5 -0
@@ -0,0 +1,10 @@
1
+ var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require : typeof Proxy !== "undefined" ? new Proxy(x, {
2
+ get: (a, b) => (typeof require !== "undefined" ? require : a)[b]
3
+ }) : x)(function(x) {
4
+ if (typeof require !== "undefined") return require.apply(this, arguments);
5
+ throw Error('Dynamic require of "' + x + '" is not supported');
6
+ });
7
+
8
+ export {
9
+ __require
10
+ };
@@ -1,3 +1,7 @@
1
+ import {
2
+ __require
3
+ } from "./chunk-3RG5ZIWI.js";
4
+
1
5
  // core.ts
2
6
  import { execSync, exec } from "child_process";
3
7
  import { readdirSync, readFileSync, writeFileSync, existsSync, statSync, mkdirSync, renameSync, unlinkSync } from "fs";
@@ -6,6 +10,27 @@ import { homedir } from "os";
6
10
  import { promisify } from "util";
7
11
  import { randomUUID } from "crypto";
8
12
  var execAsync = promisify(exec);
13
+ var GATEWAY_HOST = "127.0.0.1";
14
+ var DEFAULT_GATEWAY_PORT = 18789;
15
+ var DEFAULT_INBOX_PORT = 18790;
16
+ var GATEWAY_TIMEOUT_MS = 12e4;
17
+ var OP_CLI_TIMEOUT_MS = 1e4;
18
+ var EMBEDDING_API_URL = "https://api.openai.com/v1/embeddings";
19
+ var DEFAULT_EMBEDDING_MODEL = "text-embedding-3-small";
20
+ var DEFAULT_EMBEDDING_DIMS = 1536;
21
+ var VECTOR_SEARCH_ROW_LIMIT = 1e3;
22
+ var RECENCY_DECAY_RATE = 0.01;
23
+ var RECENCY_FLOOR = 0.5;
24
+ var FRESHNESS_FRESH_DAYS = 3;
25
+ var FRESHNESS_RECENT_DAYS = 7;
26
+ var FRESHNESS_AGING_DAYS = 14;
27
+ var DEFAULT_SEARCH_LIMIT = 5;
28
+ var WORKSPACE_MAX_DEPTH = 4;
29
+ var WORKSPACE_MAX_EXCERPTS = 5;
30
+ var WORKSPACE_MAX_RESULTS = 10;
31
+ var SKILL_EXEC_TIMEOUT_MS = 12e4;
32
+ var SKILL_EXEC_MAX_BUFFER = 10 * 1024 * 1024;
33
+ var MS_PER_DAY = 1e3 * 60 * 60 * 24;
9
34
  var HOME = process.env.HOME || homedir();
10
35
  var LDM_ROOT = process.env.LDM_ROOT || join(HOME, ".ldm");
11
36
  function resolveConfig(overrides) {
@@ -14,9 +39,9 @@ function resolveConfig(overrides) {
14
39
  openclawDir,
15
40
  workspaceDir: overrides?.workspaceDir || join(openclawDir, "workspace"),
16
41
  dbPath: overrides?.dbPath || join(openclawDir, "memory", "context-embeddings.sqlite"),
17
- inboxPort: overrides?.inboxPort || parseInt(process.env.LESA_BRIDGE_INBOX_PORT || "18790", 10),
18
- embeddingModel: overrides?.embeddingModel || "text-embedding-3-small",
19
- embeddingDimensions: overrides?.embeddingDimensions || 1536
42
+ inboxPort: overrides?.inboxPort || parseInt(process.env.LESA_BRIDGE_INBOX_PORT || String(DEFAULT_INBOX_PORT), 10),
43
+ embeddingModel: overrides?.embeddingModel || DEFAULT_EMBEDDING_MODEL,
44
+ embeddingDimensions: overrides?.embeddingDimensions || DEFAULT_EMBEDDING_DIMS
20
45
  };
21
46
  }
22
47
  function resolveConfigMulti(overrides) {
@@ -29,9 +54,9 @@ function resolveConfigMulti(overrides) {
29
54
  openclawDir,
30
55
  workspaceDir: raw.workspaceDir || overrides?.workspaceDir || join(openclawDir, "workspace"),
31
56
  dbPath: raw.dbPath || overrides?.dbPath || join(openclawDir, "memory", "context-embeddings.sqlite"),
32
- inboxPort: raw.inboxPort || overrides?.inboxPort || parseInt(process.env.LESA_BRIDGE_INBOX_PORT || "18790", 10),
33
- embeddingModel: raw.embeddingModel || overrides?.embeddingModel || "text-embedding-3-small",
34
- embeddingDimensions: raw.embeddingDimensions || overrides?.embeddingDimensions || 1536
57
+ inboxPort: raw.inboxPort || overrides?.inboxPort || parseInt(process.env.LESA_BRIDGE_INBOX_PORT || String(DEFAULT_INBOX_PORT), 10),
58
+ embeddingModel: raw.embeddingModel || overrides?.embeddingModel || DEFAULT_EMBEDDING_MODEL,
59
+ embeddingDimensions: raw.embeddingDimensions || overrides?.embeddingDimensions || DEFAULT_EMBEDDING_DIMS
35
60
  };
36
61
  } catch {
37
62
  }
@@ -53,7 +78,7 @@ function resolveApiKey(openclawDir) {
53
78
  `op read "op://Agent Secrets/OpenAI API/api key" 2>/dev/null`,
54
79
  {
55
80
  env: { ...process.env, OP_SERVICE_ACCOUNT_TOKEN: saToken },
56
- timeout: 1e4,
81
+ timeout: OP_CLI_TIMEOUT_MS,
57
82
  encoding: "utf-8"
58
83
  }
59
84
  ).trim();
@@ -76,7 +101,7 @@ function resolveGatewayConfig(openclawDir) {
76
101
  }
77
102
  const config = JSON.parse(readFileSync(configPath, "utf-8"));
78
103
  const token = config?.gateway?.auth?.token;
79
- const port = config?.gateway?.port || 18789;
104
+ const port = config?.gateway?.port || DEFAULT_GATEWAY_PORT;
80
105
  if (!token) {
81
106
  throw new Error("No gateway.auth.token found in openclaw.json");
82
107
  }
@@ -94,10 +119,34 @@ function setSessionIdentity(agentId, sessionName) {
94
119
  function getSessionIdentity() {
95
120
  return { agentId: _sessionAgentId, sessionName: _sessionName };
96
121
  }
122
+ function refreshSessionIdentity() {
123
+ try {
124
+ const sessionPath = join(
125
+ process.env.HOME || __require("os").homedir(),
126
+ ".claude",
127
+ "sessions",
128
+ `${process.ppid}.json`
129
+ );
130
+ const data = JSON.parse(readFileSync(sessionPath, "utf-8"));
131
+ if (data.name && typeof data.name === "string" && data.name !== _sessionName) {
132
+ const oldName = _sessionName;
133
+ _sessionName = data.name;
134
+ try {
135
+ registerBridgeSession();
136
+ } catch {
137
+ }
138
+ if (oldName !== _sessionName) {
139
+ process.stderr.write(`wip-bridge: session name updated: ${oldName} -> ${_sessionName}
140
+ `);
141
+ }
142
+ }
143
+ } catch {
144
+ }
145
+ }
97
146
  function parseTarget(to) {
98
147
  if (to === "*") return { agent: "*", session: "*" };
99
148
  const colonIdx = to.indexOf(":");
100
- if (colonIdx === -1) return { agent: to, session: "default" };
149
+ if (colonIdx === -1) return { agent: to, session: "*" };
101
150
  return { agent: to.slice(0, colonIdx), session: to.slice(colonIdx + 1) };
102
151
  }
103
152
  function messageMatchesSession(msgTo, agentId, sessionName) {
@@ -127,6 +176,7 @@ function pushInbox(msg) {
127
176
  }
128
177
  }
129
178
  function drainInbox() {
179
+ refreshSessionIdentity();
130
180
  try {
131
181
  if (!existsSync(MESSAGES_DIR)) return [];
132
182
  const files = readdirSync(MESSAGES_DIR).filter((f) => f.endsWith(".json"));
@@ -157,6 +207,7 @@ function drainInbox() {
157
207
  }
158
208
  }
159
209
  function inboxCount() {
210
+ refreshSessionIdentity();
160
211
  try {
161
212
  if (!existsSync(MESSAGES_DIR)) return 0;
162
213
  const files = readdirSync(MESSAGES_DIR).filter((f) => f.endsWith(".json"));
@@ -262,37 +313,64 @@ async function sendMessage(openclawDir, message, options) {
262
313
  const { token, port } = resolveGatewayConfig(openclawDir);
263
314
  const agentId = options?.agentId || "main";
264
315
  const senderLabel = options?.senderLabel || "Claude Code";
265
- const response = await fetch(`http://127.0.0.1:${port}/v1/chat/completions`, {
266
- method: "POST",
267
- headers: {
268
- Authorization: `Bearer ${token}`,
269
- "Content-Type": "application/json",
270
- "x-openclaw-scopes": "operator.read,operator.write",
271
- "x-openclaw-session-key": `agent:${agentId}:main`
272
- },
273
- body: JSON.stringify({
274
- model: `openclaw/${agentId}`,
275
- messages: [
276
- {
277
- role: "user",
278
- content: `[${senderLabel}]: ${message}`
279
- }
280
- ]
281
- })
316
+ const fireAndForget = options?.fireAndForget ?? false;
317
+ const requestBody = JSON.stringify({
318
+ model: `openclaw/${agentId}`,
319
+ messages: [
320
+ {
321
+ role: "user",
322
+ content: `[${senderLabel}]: ${message}`
323
+ }
324
+ ]
282
325
  });
283
- if (!response.ok) {
284
- const body = await response.text();
285
- throw new Error(`Gateway returned ${response.status}: ${body}`);
326
+ const requestHeaders = {
327
+ Authorization: `Bearer ${token}`,
328
+ "Content-Type": "application/json",
329
+ "x-openclaw-scopes": "operator.read,operator.write",
330
+ "x-openclaw-session-key": `agent:${agentId}:main`
331
+ };
332
+ const url = `http://${GATEWAY_HOST}:${port}/v1/chat/completions`;
333
+ if (fireAndForget) {
334
+ fetch(url, {
335
+ method: "POST",
336
+ headers: requestHeaders,
337
+ body: requestBody
338
+ }).catch(() => {
339
+ });
340
+ return "Message sent (queued). Response will arrive in the TUI.";
286
341
  }
287
- const data = await response.json();
288
- const reply = data.choices?.[0]?.message?.content;
289
- if (!reply) {
290
- throw new Error("No response content from gateway");
342
+ const controller = new AbortController();
343
+ const timeoutId = setTimeout(() => controller.abort(), GATEWAY_TIMEOUT_MS);
344
+ try {
345
+ const response = await fetch(url, {
346
+ method: "POST",
347
+ headers: requestHeaders,
348
+ body: requestBody,
349
+ signal: controller.signal
350
+ });
351
+ clearTimeout(timeoutId);
352
+ if (!response.ok) {
353
+ const body = await response.text();
354
+ throw new Error(`Gateway returned ${response.status}: ${body}`);
355
+ }
356
+ const data = await response.json();
357
+ const reply = data.choices?.[0]?.message?.content;
358
+ if (!reply) {
359
+ throw new Error("No response content from gateway");
360
+ }
361
+ return reply;
362
+ } catch (err) {
363
+ clearTimeout(timeoutId);
364
+ if (err.name === "AbortError") {
365
+ throw new Error(
366
+ "Gateway timeout: Lesa may be busy or the gateway is processing another request. Try again in a moment."
367
+ );
368
+ }
369
+ throw err;
291
370
  }
292
- return reply;
293
371
  }
294
- async function getQueryEmbedding(text, apiKey, model = "text-embedding-3-small", dimensions = 1536) {
295
- const response = await fetch("https://api.openai.com/v1/embeddings", {
372
+ async function getQueryEmbedding(text, apiKey, model = DEFAULT_EMBEDDING_MODEL, dimensions = DEFAULT_EMBEDDING_DIMS) {
373
+ const response = await fetch(EMBEDDING_API_URL, {
296
374
  method: "POST",
297
375
  headers: {
298
376
  Authorization: `Bearer ${apiKey}`,
@@ -331,15 +409,15 @@ function cosineSimilarity(a, b) {
331
409
  return denom === 0 ? 0 : dot / denom;
332
410
  }
333
411
  function recencyWeight(ageDays) {
334
- return Math.max(0.5, 1 - ageDays * 0.01);
412
+ return Math.max(RECENCY_FLOOR, 1 - ageDays * RECENCY_DECAY_RATE);
335
413
  }
336
414
  function freshnessLabel(ageDays) {
337
- if (ageDays < 3) return "fresh";
338
- if (ageDays < 7) return "recent";
339
- if (ageDays < 14) return "aging";
415
+ if (ageDays < FRESHNESS_FRESH_DAYS) return "fresh";
416
+ if (ageDays < FRESHNESS_RECENT_DAYS) return "recent";
417
+ if (ageDays < FRESHNESS_AGING_DAYS) return "aging";
340
418
  return "stale";
341
419
  }
342
- async function searchConversations(config, query, limit = 5) {
420
+ async function searchConversations(config, query, limit = DEFAULT_SEARCH_LIMIT) {
343
421
  const Database = (await import("better-sqlite3")).default;
344
422
  if (!existsSync(config.dbPath)) {
345
423
  throw new Error(`Database not found: ${config.dbPath}`);
@@ -360,12 +438,12 @@ async function searchConversations(config, query, limit = 5) {
360
438
  FROM conversation_chunks
361
439
  WHERE embedding IS NOT NULL
362
440
  ORDER BY timestamp DESC
363
- LIMIT 1000`
441
+ LIMIT ${VECTOR_SEARCH_ROW_LIMIT}`
364
442
  ).all();
365
443
  const now = Date.now();
366
444
  return rows.map((row) => {
367
445
  const cosine = cosineSimilarity(queryEmbedding, blobToEmbedding(row.embedding));
368
- const ageDays = (now - row.timestamp) / (1e3 * 60 * 60 * 24);
446
+ const ageDays = (now - row.timestamp) / MS_PER_DAY;
369
447
  const weight = recencyWeight(ageDays);
370
448
  return {
371
449
  text: row.chunk_text,
@@ -396,7 +474,7 @@ async function searchConversations(config, query, limit = 5) {
396
474
  db.close();
397
475
  }
398
476
  }
399
- function findMarkdownFiles(dir, maxDepth = 4, depth = 0) {
477
+ function findMarkdownFiles(dir, maxDepth = WORKSPACE_MAX_DEPTH, depth = 0) {
400
478
  if (depth > maxDepth || !existsSync(dir)) return [];
401
479
  const files = [];
402
480
  for (const entry of readdirSync(dir, { withFileTypes: true })) {
@@ -426,7 +504,7 @@ function searchWorkspace(workspaceDir, query) {
426
504
  if (score === 0) continue;
427
505
  const lines = content.split("\n");
428
506
  const excerpts = [];
429
- for (let i = 0; i < lines.length && excerpts.length < 5; i++) {
507
+ for (let i = 0; i < lines.length && excerpts.length < WORKSPACE_MAX_EXCERPTS; i++) {
430
508
  const lineLower = lines[i].toLowerCase();
431
509
  if (words.some((w) => lineLower.includes(w))) {
432
510
  const start = Math.max(0, i - 1);
@@ -438,7 +516,7 @@ function searchWorkspace(workspaceDir, query) {
438
516
  } catch {
439
517
  }
440
518
  }
441
- return results.sort((a, b) => b.score - a.score).slice(0, 10);
519
+ return results.sort((a, b) => b.score - a.score).slice(0, WORKSPACE_MAX_RESULTS);
442
520
  }
443
521
  function parseSkillFrontmatter(content) {
444
522
  const match = content.match(/^---\n([\s\S]*?)\n---/);
@@ -532,9 +610,8 @@ async function executeSkillScript(skillDir, scripts, scriptName, args) {
532
610
  `${interpreter} "${scriptPath}" ${args}`,
533
611
  {
534
612
  env: { ...process.env },
535
- timeout: 12e4,
536
- maxBuffer: 10 * 1024 * 1024
537
- // 10MB
613
+ timeout: SKILL_EXEC_TIMEOUT_MS,
614
+ maxBuffer: SKILL_EXEC_MAX_BUFFER
538
615
  }
539
616
  );
540
617
  return stdout || stderr || "(no output)";
@@ -574,6 +651,7 @@ export {
574
651
  resolveGatewayConfig,
575
652
  setSessionIdentity,
576
653
  getSessionIdentity,
654
+ refreshSessionIdentity,
577
655
  pushInbox,
578
656
  drainInbox,
579
657
  inboxCount,
@@ -8,7 +8,8 @@ import {
8
8
  searchConversations,
9
9
  searchWorkspace,
10
10
  sendMessage
11
- } from "./chunk-LF7EMFBY.js";
11
+ } from "./chunk-7NH6JBIO.js";
12
+ import "./chunk-3RG5ZIWI.js";
12
13
 
13
14
  // cli.ts
14
15
  import { existsSync, statSync } from "fs";
@@ -49,6 +49,17 @@ declare function getSessionIdentity(): {
49
49
  agentId: string;
50
50
  sessionName: string;
51
51
  };
52
+ /**
53
+ * Re-read the session name from CC's session metadata file.
54
+ *
55
+ * CC writes the /rename label to ~/.claude/sessions/<pid>.json. The bridge
56
+ * reads this once on boot, but the name can change at any time via /rename
57
+ * or /resume. Calling this before each inbox check ensures the bridge
58
+ * always uses the current label for message targeting.
59
+ *
60
+ * Cheap: one file read per call. No network. No delay.
61
+ */
62
+ declare function refreshSessionIdentity(): void;
52
63
  /**
53
64
  * Write a message to the file-based inbox.
54
65
  * Creates a JSON file at ~/.ldm/messages/{uuid}.json.
@@ -107,6 +118,7 @@ declare function sendMessage(openclawDir: string, message: string, options?: {
107
118
  agentId?: string;
108
119
  user?: string;
109
120
  senderLabel?: string;
121
+ fireAndForget?: boolean;
110
122
  }): Promise<string>;
111
123
  declare function getQueryEmbedding(text: string, apiKey: string, model?: string, dimensions?: number): Promise<number[]>;
112
124
  declare function blobToEmbedding(blob: Buffer): number[];
@@ -132,4 +144,4 @@ declare function discoverSkills(openclawDir: string): SkillInfo[];
132
144
  declare function executeSkillScript(skillDir: string, scripts: string[], scriptName: string | undefined, args: string): Promise<string>;
133
145
  declare function readWorkspaceFile(workspaceDir: string, filePath: string): WorkspaceFileResult;
134
146
 
135
- export { type BridgeConfig, type ConversationResult, type GatewayConfig, type InboxMessage, LDM_ROOT, type SessionInfo, type SkillInfo, type WorkspaceFileResult, type WorkspaceSearchResult, blobToEmbedding, cosineSimilarity, discoverSkills, drainInbox, executeSkillScript, findMarkdownFiles, getQueryEmbedding, getSessionIdentity, inboxCount, inboxCountBySession, listActiveSessions, pushInbox, readWorkspaceFile, registerBridgeSession, resolveApiKey, resolveConfig, resolveConfigMulti, resolveGatewayConfig, searchConversations, searchWorkspace, sendLdmMessage, sendMessage, setSessionIdentity };
147
+ export { type BridgeConfig, type ConversationResult, type GatewayConfig, type InboxMessage, LDM_ROOT, type SessionInfo, type SkillInfo, type WorkspaceFileResult, type WorkspaceSearchResult, blobToEmbedding, cosineSimilarity, discoverSkills, drainInbox, executeSkillScript, findMarkdownFiles, getQueryEmbedding, getSessionIdentity, inboxCount, inboxCountBySession, listActiveSessions, pushInbox, readWorkspaceFile, refreshSessionIdentity, registerBridgeSession, resolveApiKey, resolveConfig, resolveConfigMulti, resolveGatewayConfig, searchConversations, searchWorkspace, sendLdmMessage, sendMessage, setSessionIdentity };
@@ -13,6 +13,7 @@ import {
13
13
  listActiveSessions,
14
14
  pushInbox,
15
15
  readWorkspaceFile,
16
+ refreshSessionIdentity,
16
17
  registerBridgeSession,
17
18
  resolveApiKey,
18
19
  resolveConfig,
@@ -23,7 +24,8 @@ import {
23
24
  sendLdmMessage,
24
25
  sendMessage,
25
26
  setSessionIdentity
26
- } from "./chunk-LF7EMFBY.js";
27
+ } from "./chunk-7NH6JBIO.js";
28
+ import "./chunk-3RG5ZIWI.js";
27
29
  export {
28
30
  LDM_ROOT,
29
31
  blobToEmbedding,
@@ -39,6 +41,7 @@ export {
39
41
  listActiveSessions,
40
42
  pushInbox,
41
43
  readWorkspaceFile,
44
+ refreshSessionIdentity,
42
45
  registerBridgeSession,
43
46
  resolveApiKey,
44
47
  resolveConfig,
@@ -15,13 +15,16 @@ import {
15
15
  sendLdmMessage,
16
16
  sendMessage,
17
17
  setSessionIdentity
18
- } from "./chunk-LF7EMFBY.js";
18
+ } from "./chunk-7NH6JBIO.js";
19
+ import {
20
+ __require
21
+ } from "./chunk-3RG5ZIWI.js";
19
22
 
20
23
  // mcp-server.ts
21
24
  import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
22
25
  import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
23
26
  import { createServer } from "http";
24
- import { appendFileSync, mkdirSync } from "fs";
27
+ import { appendFileSync, mkdirSync, readFileSync } from "fs";
25
28
  import { join } from "path";
26
29
  import { homedir } from "os";
27
30
  import { z } from "zod";
@@ -191,15 +194,29 @@ ${result.content}` }] };
191
194
  server.registerTool(
192
195
  "lesa_send_message",
193
196
  {
194
- description: "Send a message to the OpenClaw agent through the gateway. Routes through the agent's full pipeline: memory, tools, personality, workspace. Use this for direct communication: asking questions, sharing findings, coordinating work, or having a discussion. Messages are prefixed with [Claude Code] so the agent knows the source.",
197
+ description: "Send a message to the OpenClaw agent through the gateway. Routes through the agent's full pipeline: memory, tools, personality, workspace. Use this for direct communication: asking questions, sharing findings, coordinating work, or having a discussion. Messages are prefixed with [Claude Code] so the agent knows the source.\n\nThis is async: returns immediately after sending. The agent's reply will arrive in your inbox (check via lesa_check_inbox or it appears automatically on your next turn).",
195
198
  inputSchema: {
196
199
  message: z.string().describe("Message to send to the OpenClaw agent")
197
200
  }
198
201
  },
199
202
  async ({ message }) => {
200
203
  try {
201
- const reply = await sendMessage(config.openclawDir, message);
202
- return { content: [{ type: "text", text: reply }] };
204
+ await sendMessage(config.openclawDir, message, { fireAndForget: true });
205
+ const { agentId, sessionName } = getSessionIdentity();
206
+ sendLdmMessage({
207
+ from: `${agentId}:${sessionName}`,
208
+ to: "lesa",
209
+ body: message,
210
+ type: "chat"
211
+ });
212
+ return {
213
+ content: [{
214
+ type: "text",
215
+ text: `Sent to L\u0113sa: "${message}"
216
+
217
+ Message delivered to the gateway (fire-and-forget). L\u0113sa will process it through her full pipeline. Her reply will arrive in your inbox. Use lesa_check_inbox to check, or it will appear automatically on your next turn.`
218
+ }]
219
+ };
203
220
  } catch (err) {
204
221
  return { content: [{ type: "text", text: `Error sending message: ${err.message}` }], isError: true };
205
222
  }
@@ -308,11 +325,39 @@ ${lines.join("\n")}` }] };
308
325
  );
309
326
  console.error(`wip-bridge: registered ${executableSkills.length} skill tools + oc_skills_list (${skills.length} total skills)`);
310
327
  }
328
+ function resolveSessionName() {
329
+ const ccSessionDir = join(process.env.HOME || homedir(), ".claude", "sessions");
330
+ const ccSessionPath = join(ccSessionDir, `${process.ppid}.json`);
331
+ for (let attempt = 0; attempt < 3; attempt++) {
332
+ try {
333
+ const data = JSON.parse(readFileSync(ccSessionPath, "utf-8"));
334
+ if (data.name && typeof data.name === "string") {
335
+ return data.name;
336
+ }
337
+ if (attempt < 2) {
338
+ const { execSync } = __require("child_process");
339
+ execSync("sleep 0.5", { stdio: "ignore" });
340
+ }
341
+ } catch {
342
+ if (attempt < 2) {
343
+ try {
344
+ const { execSync } = __require("child_process");
345
+ execSync("sleep 0.5", { stdio: "ignore" });
346
+ } catch {
347
+ }
348
+ }
349
+ }
350
+ }
351
+ if (process.env.LDM_SESSION_NAME) {
352
+ return process.env.LDM_SESSION_NAME;
353
+ }
354
+ return "default";
355
+ }
311
356
  async function main() {
312
357
  const agentId = process.env.LDM_AGENT_ID || "cc-mini";
313
- const sessionName = process.env.LDM_SESSION_NAME || "default";
358
+ const sessionName = resolveSessionName();
314
359
  setSessionIdentity(agentId, sessionName);
315
- console.error(`wip-bridge: session identity: ${agentId}:${sessionName}`);
360
+ console.error(`wip-bridge: session identity: ${agentId}:${sessionName} (resolved from ${sessionName !== "default" ? "CC session file or env" : "default"})`);
316
361
  const session = registerBridgeSession();
317
362
  if (session) {
318
363
  console.error(`wip-bridge: registered session ${agentId}--${sessionName} (pid ${session.pid})`);
@@ -0,0 +1,5 @@
1
+ declare const _default: {
2
+ register(api: any): void;
3
+ };
4
+
5
+ export { _default as default };
@@ -0,0 +1,11 @@
1
+ import "./chunk-3RG5ZIWI.js";
2
+
3
+ // openclaw.ts
4
+ var openclaw_default = {
5
+ register(api) {
6
+ api.logger.info("lesa-bridge plugin registered (skill-only; MCP server runs out of process)");
7
+ }
8
+ };
9
+ export {
10
+ openclaw_default as default
11
+ };
@@ -100,6 +100,92 @@ This enables CC-to-CC awareness without a broker daemon. Any session can discove
100
100
  | `lib/sessions.mjs` | Session registration, discovery, PID liveness |
101
101
  | `dist/bridge/` | Compiled output (ships with npm package) |
102
102
 
103
+ ## ChatCompletions Routing (Fork Patches)
104
+
105
+ OpenClaw's gateway exposes an OpenAI-compatible chatCompletions endpoint at `http://localhost:18789/v1/chat/completions`. Upstream OpenClaw does not route these requests to the main agent session. We carry 4 patches on our fork to make this work.
106
+
107
+ **Patch 1: Session routing via `user=main`.**
108
+ When a CC session or external client sends a chatCompletions request, the gateway needs to know which OpenClaw session to route it to. This patch reads the `user` field from the request body. If `user=main` (or `user=openclaw`), the request routes to the main agent session (`agent:main:main`). Without this, bridge messages get "no session found" errors.
109
+
110
+ ```
111
+ POST /v1/chat/completions
112
+ Authorization: Bearer <gateway-token>
113
+ Content-Type: application/json
114
+
115
+ {"model":"openclaw","messages":[{"role":"user","content":"hi"}],"user":"main"}
116
+ ```
117
+
118
+ **Patch 2-3: Steer-backlog queue integration.**
119
+ When the agent is already busy (processing an iMessage from Parker), a concurrent chatCompletions request would fail or get dropped. These patches wire the chatCompletions endpoint into OpenClaw's `steer-backlog` queue (config: `messages.queue.mode: "steer-backlog"`). The message waits and gets processed after the current turn finishes. Works for both streaming and non-streaming responses. The gateway returns an `x-openclaw-queued: next-turn` header when a message is queued.
120
+
121
+ **Patch 4: Header rename.**
122
+ Cosmetic rename of the queue response header from `x-openclaw-queued: steer` to `x-openclaw-queued: next-turn` for clarity.
123
+
124
+ **Source:** `src/gateway/openai-http.ts`. Total patch size: ~100 lines. Carried on branch `cc-mini/chat-completions-v<version>`, rebased on each OpenClaw upgrade.
125
+
126
+ **Why not upstream:** OpenClaw's chatCompletions endpoint is designed for external API compatibility, not for multi-agent bridge routing. Our use case (CC sessions talking to an OpenClaw agent on the same machine) is specific to the LDM OS architecture.
127
+
128
+ ## Cooperative Push Architecture (Shipped Apr 11)
129
+
130
+ The original bridge used a pull model: CC sessions called `lesa_check_inbox` to check for messages. Messages sat unread until the next manual check. This was replaced with a cooperative push system where messages are delivered automatically.
131
+
132
+ ### Four Delivery Layers
133
+
134
+ Messages flow through four layers in order of priority. All four cooperate via shared `read: true` state on disk so a message delivered by one layer is skipped by the others.
135
+
136
+ | # | Layer | Fires when | Hook type | File |
137
+ |---|-------|-----------|-----------|------|
138
+ | 1 | **asyncRewake** (Stop hook) | New message arrives while session is idle | `fs.watch` on `~/.ldm/messages/` | `src/hooks/inbox-rewake-hook.mjs` |
139
+ | 2 | **UserPromptSubmit** | Next user prompt (typed or automated) | Claude Code hook | `src/hooks/inbox-check-hook.mjs` |
140
+ | 3 | **SessionStart** | New CC session boots | Claude Code hook | `src/hooks/boot-hook.mjs` |
141
+ | 4 | **Manual** | Explicit tool call | MCP tool | `lesa_check_inbox` |
142
+
143
+ **Layer 1 (asyncRewake)** is the autonomous push mechanism. It holds a long-lived `fs.watch` on `~/.ldm/messages/`, uses a per-session lockfile to prevent watcher stacking, and exits code 2 on a match to wake the idle model via Claude Code's task-notification path. It fires `fireBatch()` to deliver all pending matches in one wake cycle (cost linear in unique messages, not in layers).
144
+
145
+ **Layer 2 (UserPromptSubmit)** surfaces messages as `additionalContext` before each prompt. Messages appear in the session context without the user calling `lesa_check_inbox`.
146
+
147
+ **Deduplication:** Each layer marks messages `read: true` on disk after delivery. Subsequent layers check this flag and skip already-delivered messages. No double delivery. Cost is linear in unique messages, not in layers.
148
+
149
+ ### File Inbox
150
+
151
+ Messages live as JSON files at `~/.ldm/messages/`:
152
+
153
+ ```json
154
+ {
155
+ "id": "uuid",
156
+ "type": "chat",
157
+ "from": "lesa",
158
+ "to": "cc-mini:session-name",
159
+ "body": "message text",
160
+ "read": false,
161
+ "timestamp": "2026-04-11T19:05:00-07:00"
162
+ }
163
+ ```
164
+
165
+ ### Addressing
166
+
167
+ | Format | Meaning |
168
+ |--------|---------|
169
+ | `cc-mini` | Default session of agent cc-mini |
170
+ | `cc-mini:brainstorm` | Named session "brainstorm" on cc-mini |
171
+ | `cc-mini:*` | Broadcast to ALL sessions of cc-mini |
172
+ | `*` | Broadcast to all agents on the machine |
173
+ | `lesa` | The OpenClaw agent (routes through gateway chatCompletions) |
174
+
175
+ **Known issue (Apr 11):** Agent-broadcast without session specifier (`to: cc-mini`) fans out to ALL matching sessions. Three sessions replied independently to the same message. The addressing logic needs dedup for agent-broadcast targeting.
176
+
177
+ ### Tools
178
+
179
+ | Tool | Direction | Transport |
180
+ |------|-----------|-----------|
181
+ | `ldm_send_message` | Any agent → file inbox | Writes JSON to `~/.ldm/messages/` |
182
+ | `lesa_send_message` | CC → OpenClaw agent | HTTP POST to gateway chatCompletions |
183
+ | `lesa_check_inbox` | CC ← OpenClaw agent | Reads + drains `~/.ldm/messages/` for this session |
184
+
185
+ ### Plan Document
186
+
187
+ Full architecture: `ai/product/plans-prds/bridge/2026-04-11--cc-mini--autonomous-push-architecture.md` (377 lines, 8 open questions, Phase A shipped, Phase B deferred for CloudKit cross-machine transport).
188
+
103
189
  ## Node Communication (Future)
104
190
 
105
191
  Bridge currently works localhost only (Core). For Node -> Core communication:
@@ -0,0 +1,74 @@
1
+ # Documentation Pipeline
2
+
3
+ Documentation lives in three places. They stay in sync through the installer. This is not optional.
4
+
5
+ ## The Three Levels
6
+
7
+ ### 1. Repo Docs (source of truth)
8
+
9
+ Every repo has documentation at its root and in `docs/` for features:
10
+
11
+ ```
12
+ repo/
13
+ ├── README.md What this repo is
14
+ ├── TECHNICAL.md How it works
15
+ ├── SKILL.md Agent instructions
16
+ ├── CLAUDE.md Agent context for Claude Code
17
+ ├── docs/
18
+ │ └── <feature>/
19
+ │ ├── README.md What this feature is
20
+ │ └── TECHNICAL.md How this feature works
21
+ ```
22
+
23
+ When a feature gets absorbed into a repo, its README and TECHNICAL move into `docs/<feature>/`.
24
+
25
+ Repo docs are the source of truth. Everything else is derived from them.
26
+
27
+ ### 2. Home Docs (human readable, personalized)
28
+
29
+ Location: `~/wipcomputerinc/library/documentation/`
30
+
31
+ These are personalized for YOUR system. "Here's how releases work on YOUR machine." Generated by the installer from repo doc templates + your `~/.ldm/config.json`.
32
+
33
+ The human reads these. They describe how the system is set up on this specific machine, with this specific configuration.
34
+
35
+ ### 3. Agent Docs (OS reference)
36
+
37
+ Location: `~/.ldm/shared/`
38
+
39
+ ```
40
+ ~/.ldm/shared/
41
+ ├── rules/ Thin rules deployed to ~/.claude/rules/
42
+ ├── dev-guide-*.md Org-specific dev conventions
43
+ ├── boot/ Boot sequence config
44
+ └── prompts/ Cron prompts
45
+ ```
46
+
47
+ These are what agents reference. Rules, dev guide, boot config. The installer deploys them so agents always have current instructions.
48
+
49
+ ### 4. ai/ (development process)
50
+
51
+ Location: `<repo>/ai/`
52
+
53
+ Plans, bugs, research, dev updates. Private repo only. Never ships to public. Updated by the dev team (humans + AI agents) during development.
54
+
55
+ ## The Update Flow
56
+
57
+ ### On merge to private main
58
+
59
+ 1. **Repo docs** updated. README, TECHNICAL, docs/<feature>/, SKILL.md, CLAUDE.md. Part of the PR. Code and docs ship together.
60
+ 2. **ai/** updated. Plan archived, bugs closed, dev update written. Notes the version is on alpha.
61
+
62
+ ### On `ldm install`
63
+
64
+ 3. **Home docs** regenerated. Installer reads repo doc templates + config.json, generates personalized `library/documentation/` files.
65
+ 4. **Agent docs** deployed. Installer copies rules, dev guide, boot config from the installed package to `~/.ldm/shared/` and `~/.claude/rules/`.
66
+
67
+ ### On deploy to public
68
+
69
+ 5. **Public repo** updated. `deploy-public.sh` syncs everything except `ai/`.
70
+ 6. **ai/** dev update notes the version moved from alpha to release.
71
+
72
+ ## The Rule
73
+
74
+ Three places, one update, never out of sync. The installer is the bridge between "code landed" and "docs are current everywhere." Developers write repo docs. The installer propagates them. Nobody manually updates home docs or agent docs.