openclaw-node-harness 2.0.4 → 2.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +646 -3
- package/bin/hyperagent.mjs +419 -0
- package/bin/lane-watchdog.js +23 -2
- package/bin/mesh-agent.js +439 -28
- package/bin/mesh-bridge.js +69 -3
- package/bin/mesh-health-publisher.js +41 -1
- package/bin/mesh-task-daemon.js +821 -26
- package/bin/mesh.js +411 -20
- package/config/claude-settings.json +95 -0
- package/config/daemon.json.template +2 -1
- package/config/git-hooks/pre-commit +13 -0
- package/config/git-hooks/pre-push +12 -0
- package/config/harness-rules.json +174 -0
- package/config/plan-templates/team-bugfix.yaml +52 -0
- package/config/plan-templates/team-deploy.yaml +50 -0
- package/config/plan-templates/team-feature.yaml +71 -0
- package/config/roles/qa-engineer.yaml +36 -0
- package/config/roles/solidity-dev.yaml +51 -0
- package/config/roles/tech-architect.yaml +36 -0
- package/config/rules/framework/solidity.md +22 -0
- package/config/rules/framework/typescript.md +21 -0
- package/config/rules/framework/unity.md +21 -0
- package/config/rules/universal/design-docs.md +18 -0
- package/config/rules/universal/git-hygiene.md +18 -0
- package/config/rules/universal/security.md +19 -0
- package/config/rules/universal/test-standards.md +19 -0
- package/identity/DELEGATION.md +6 -6
- package/install.sh +296 -10
- package/lib/agent-activity.js +2 -2
- package/lib/circling-parser.js +119 -0
- package/lib/exec-safety.js +105 -0
- package/lib/hyperagent-store.mjs +652 -0
- package/lib/kanban-io.js +24 -31
- package/lib/llm-providers.js +16 -0
- package/lib/mcp-knowledge/bench.mjs +118 -0
- package/lib/mcp-knowledge/core.mjs +530 -0
- package/lib/mcp-knowledge/package.json +25 -0
- package/lib/mcp-knowledge/server.mjs +252 -0
- package/lib/mcp-knowledge/test.mjs +802 -0
- package/lib/memory-budget.mjs +261 -0
- package/lib/mesh-collab.js +483 -165
- package/lib/mesh-harness.js +427 -0
- package/lib/mesh-plans.js +79 -50
- package/lib/mesh-tasks.js +132 -49
- package/lib/nats-resolve.js +4 -4
- package/lib/plan-templates.js +226 -0
- package/lib/pre-compression-flush.mjs +322 -0
- package/lib/role-loader.js +292 -0
- package/lib/rule-loader.js +358 -0
- package/lib/session-store.mjs +461 -0
- package/lib/transcript-parser.mjs +292 -0
- package/mission-control/drizzle/soul_schema_update.sql +29 -0
- package/mission-control/drizzle.config.ts +1 -4
- package/mission-control/package-lock.json +1571 -83
- package/mission-control/package.json +6 -2
- package/mission-control/scripts/gen-chronology.js +3 -3
- package/mission-control/scripts/import-pipeline-v2.js +0 -16
- package/mission-control/scripts/import-pipeline.js +0 -15
- package/mission-control/src/app/api/cowork/clusters/[id]/members/route.ts +117 -0
- package/mission-control/src/app/api/cowork/clusters/[id]/route.ts +84 -0
- package/mission-control/src/app/api/cowork/clusters/route.ts +141 -0
- package/mission-control/src/app/api/cowork/dispatch/route.ts +128 -0
- package/mission-control/src/app/api/cowork/events/route.ts +65 -0
- package/mission-control/src/app/api/cowork/intervene/route.ts +259 -0
- package/mission-control/src/app/api/cowork/sessions/[id]/route.ts +37 -0
- package/mission-control/src/app/api/cowork/sessions/route.ts +64 -0
- package/mission-control/src/app/api/diagnostics/route.ts +97 -0
- package/mission-control/src/app/api/diagnostics/test-runner/route.ts +990 -0
- package/mission-control/src/app/api/memory/search/route.ts +6 -3
- package/mission-control/src/app/api/mesh/events/route.ts +95 -19
- package/mission-control/src/app/api/mesh/identity/route.ts +11 -0
- package/mission-control/src/app/api/mesh/tasks/[id]/route.ts +92 -0
- package/mission-control/src/app/api/mesh/tasks/route.ts +91 -0
- package/mission-control/src/app/api/souls/[id]/evolution/route.ts +21 -5
- package/mission-control/src/app/api/souls/[id]/prompt/route.ts +7 -1
- package/mission-control/src/app/api/souls/[id]/propagate/route.ts +14 -2
- package/mission-control/src/app/api/tasks/[id]/handoff/route.ts +8 -2
- package/mission-control/src/app/api/tasks/[id]/route.ts +90 -4
- package/mission-control/src/app/api/tasks/route.ts +21 -30
- package/mission-control/src/app/api/workspace/read/route.ts +11 -0
- package/mission-control/src/app/cowork/page.tsx +261 -0
- package/mission-control/src/app/diagnostics/page.tsx +385 -0
- package/mission-control/src/app/graph/page.tsx +26 -0
- package/mission-control/src/app/memory/page.tsx +1 -1
- package/mission-control/src/app/obsidian/page.tsx +36 -6
- package/mission-control/src/app/roadmap/page.tsx +24 -0
- package/mission-control/src/app/souls/page.tsx +2 -2
- package/mission-control/src/components/board/execution-config.tsx +431 -0
- package/mission-control/src/components/board/kanban-board.tsx +75 -9
- package/mission-control/src/components/board/kanban-column.tsx +135 -19
- package/mission-control/src/components/board/task-card.tsx +55 -2
- package/mission-control/src/components/board/unified-task-dialog.tsx +82 -4
- package/mission-control/src/components/cowork/cluster-card.tsx +176 -0
- package/mission-control/src/components/cowork/create-cluster-dialog.tsx +251 -0
- package/mission-control/src/components/cowork/dispatch-form.tsx +423 -0
- package/mission-control/src/components/cowork/role-picker.tsx +102 -0
- package/mission-control/src/components/cowork/session-card.tsx +284 -0
- package/mission-control/src/components/layout/sidebar.tsx +39 -2
- package/mission-control/src/lib/__tests__/daily-log.test.ts +82 -0
- package/mission-control/src/lib/__tests__/memory-md.test.ts +87 -0
- package/mission-control/src/lib/__tests__/mesh-kv-sync.test.ts +465 -0
- package/mission-control/src/lib/__tests__/mocks/mock-kv.ts +131 -0
- package/mission-control/src/lib/__tests__/status-kanban.test.ts +46 -0
- package/mission-control/src/lib/__tests__/task-markdown.test.ts +188 -0
- package/mission-control/src/lib/__tests__/wikilinks.test.ts +175 -0
- package/mission-control/src/lib/config.ts +67 -0
- package/mission-control/src/lib/db/index.ts +85 -1
- package/mission-control/src/lib/db/schema.ts +61 -3
- package/mission-control/src/lib/hooks.ts +309 -0
- package/mission-control/src/lib/memory/entities.ts +3 -2
- package/mission-control/src/lib/memory/extract.ts +2 -1
- package/mission-control/src/lib/memory/retrieval.ts +3 -2
- package/mission-control/src/lib/nats.ts +66 -1
- package/mission-control/src/lib/parsers/task-markdown.ts +52 -2
- package/mission-control/src/lib/parsers/transcript.ts +4 -4
- package/mission-control/src/lib/scheduler.ts +12 -11
- package/mission-control/src/lib/sync/mesh-kv.ts +279 -0
- package/mission-control/src/lib/sync/tasks.ts +23 -1
- package/mission-control/src/lib/task-id.ts +32 -0
- package/mission-control/src/lib/tts/index.ts +33 -9
- package/mission-control/src/middleware.ts +82 -0
- package/mission-control/tsconfig.json +2 -1
- package/mission-control/vitest.config.ts +14 -0
- package/package.json +15 -2
- package/services/launchd/ai.openclaw.log-rotate.plist +11 -0
- package/services/launchd/ai.openclaw.mesh-deploy-listener.plist +4 -0
- package/services/launchd/ai.openclaw.mesh-health-publisher.plist +4 -0
- package/services/launchd/ai.openclaw.mission-control.plist +1 -1
- package/services/service-manifest.json +1 -1
- package/skills/cc-godmode/references/agents.md +8 -8
- package/uninstall.sh +37 -9
- package/workspace-bin/memory-daemon.mjs +199 -5
- package/workspace-bin/session-search.mjs +204 -0
- package/workspace-bin/web-fetch.mjs +65 -0
|
@@ -0,0 +1,322 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* pre-compression-flush.mjs — Pre-compression memory extraction
|
|
3
|
+
*
|
|
4
|
+
* Detects when a session is approaching context compression
|
|
5
|
+
* (by JSONL size / estimated token count) and extracts durable facts from
|
|
6
|
+
* the conversation tail before they're lost.
|
|
7
|
+
*
|
|
8
|
+
* LLM-agnostic: uses transcript-parser.mjs to handle any JSONL format
|
|
9
|
+
* (Claude Code, OpenClaw Gateway, or future backends).
|
|
10
|
+
*
|
|
11
|
+
* Zero token cost — pure JSONL parsing + heuristic extraction.
|
|
12
|
+
* Writes to MEMORY.md with bigram-similarity dedup to prevent bloat.
|
|
13
|
+
*
|
|
14
|
+
* Adapted from Hermes's pre-compression flush pattern, fitted to
|
|
15
|
+
* OpenClaw's daemon architecture.
|
|
16
|
+
*/
|
|
17
|
+
|
|
18
|
+
import fs from 'fs';
|
|
19
|
+
import path from 'path';
|
|
20
|
+
import { parseJsonlFile, estimateFileTokens } from './transcript-parser.mjs';
|
|
21
|
+
|
|
22
|
+
// ── Token Estimation ────────────────────────────────────
|
|
23
|
+
|
|
24
|
+
const CHARS_PER_TOKEN = 4; // rough approximation across common LLM tokenizers
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Estimate token count from character length.
|
|
28
|
+
* Good enough for flush threshold — no tokenizer dependency needed.
|
|
29
|
+
*/
|
|
30
|
+
export function estimateTokens(text) {
|
|
31
|
+
return Math.ceil(text.length / CHARS_PER_TOKEN);
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* Estimate total conversation tokens from a JSONL session file.
|
|
36
|
+
* Format-agnostic — delegates to transcript-parser.
|
|
37
|
+
* Returns { totalTokens, messageCount, tailMessages }.
|
|
38
|
+
*
|
|
39
|
+
* @param {string} jsonlPath
|
|
40
|
+
* @param {number} tailCount
|
|
41
|
+
* @param {Object} opts
|
|
42
|
+
* @param {string} opts.format - Transcript format (auto-detected if omitted)
|
|
43
|
+
*/
|
|
44
|
+
export async function estimateSessionTokens(jsonlPath, tailCount = 40, opts = {}) {
|
|
45
|
+
if (!fs.existsSync(jsonlPath)) return { totalTokens: 0, messageCount: 0, tailMessages: [] };
|
|
46
|
+
|
|
47
|
+
const messages = await parseJsonlFile(jsonlPath, { format: opts.format });
|
|
48
|
+
let totalChars = 0;
|
|
49
|
+
for (const msg of messages) {
|
|
50
|
+
totalChars += msg.content.length;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
const tailMessages = messages.slice(-tailCount);
|
|
54
|
+
|
|
55
|
+
return {
|
|
56
|
+
totalTokens: Math.ceil(totalChars / CHARS_PER_TOKEN),
|
|
57
|
+
messageCount: messages.length,
|
|
58
|
+
tailMessages,
|
|
59
|
+
};
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
// ── Flush Threshold ────────────────────────────────────
|
|
63
|
+
|
|
64
|
+
/**
|
|
65
|
+
* Check if a session should trigger a pre-compression flush.
|
|
66
|
+
*
|
|
67
|
+
* @param {string} jsonlPath - Path to the session's JSONL file
|
|
68
|
+
* @param {Object} opts
|
|
69
|
+
* @param {number} opts.contextWindowTokens - Model's context window size in tokens (default: 200000)
|
|
70
|
+
* @param {number} opts.flushPct - Flush at this % of context window (default: 0.75)
|
|
71
|
+
* @returns {{ shouldFlush: boolean, estimatedTokens: number, pctUsed: number, threshold: number }}
|
|
72
|
+
*/
|
|
73
|
+
export async function shouldFlush(jsonlPath, opts = {}) {
|
|
74
|
+
const { contextWindowTokens = 200000, flushPct = 0.75 } = opts;
|
|
75
|
+
const threshold = Math.floor(contextWindowTokens * flushPct);
|
|
76
|
+
|
|
77
|
+
if (!fs.existsSync(jsonlPath)) return { shouldFlush: false, estimatedTokens: 0, pctUsed: 0, threshold };
|
|
78
|
+
|
|
79
|
+
const stat = fs.statSync(jsonlPath);
|
|
80
|
+
// Quick estimate from file size — ~4 chars/token, but JSONL has overhead (~2x)
|
|
81
|
+
const quickEstimate = Math.ceil(stat.size / (CHARS_PER_TOKEN * 2));
|
|
82
|
+
|
|
83
|
+
return {
|
|
84
|
+
shouldFlush: quickEstimate >= threshold,
|
|
85
|
+
estimatedTokens: quickEstimate,
|
|
86
|
+
pctUsed: Math.round((quickEstimate / contextWindowTokens) * 100),
|
|
87
|
+
threshold,
|
|
88
|
+
};
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
// ── Bigram Similarity ────────────────────────────────────
|
|
92
|
+
|
|
93
|
+
/**
|
|
94
|
+
* Compute bigram similarity between two strings (0.0 - 1.0).
|
|
95
|
+
* Used for dedup when merging new facts into MEMORY.md.
|
|
96
|
+
*/
|
|
97
|
+
export function bigramSimilarity(a, b) {
|
|
98
|
+
if (!a || !b) return 0;
|
|
99
|
+
|
|
100
|
+
const norm = s => s.toLowerCase().replace(/[^a-z0-9\s]/g, '').trim();
|
|
101
|
+
const bigrams = s => {
|
|
102
|
+
const tokens = norm(s).split(/\s+/);
|
|
103
|
+
const bg = new Set();
|
|
104
|
+
for (let i = 0; i < tokens.length - 1; i++) {
|
|
105
|
+
bg.add(`${tokens[i]} ${tokens[i + 1]}`);
|
|
106
|
+
}
|
|
107
|
+
// Also add unigrams for short strings
|
|
108
|
+
for (const t of tokens) bg.add(t);
|
|
109
|
+
return bg;
|
|
110
|
+
};
|
|
111
|
+
|
|
112
|
+
const setA = bigrams(a);
|
|
113
|
+
const setB = bigrams(b);
|
|
114
|
+
|
|
115
|
+
if (setA.size === 0 || setB.size === 0) return 0;
|
|
116
|
+
|
|
117
|
+
let intersection = 0;
|
|
118
|
+
for (const bg of setA) {
|
|
119
|
+
if (setB.has(bg)) intersection++;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
const union = new Set([...setA, ...setB]).size;
|
|
123
|
+
return union === 0 ? 0 : intersection / union;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
// ── Fact Extraction ────────────────────────────────────
|
|
127
|
+
|
|
128
|
+
/**
|
|
129
|
+
* Extract durable facts from conversation tail messages.
|
|
130
|
+
* Heuristic approach — looks for:
|
|
131
|
+
* - User corrections / preferences ("don't...", "always...", "I prefer...")
|
|
132
|
+
* - Decisions ("we decided...", "let's go with...")
|
|
133
|
+
* - Environment discoveries ("the API is at...", "config is in...")
|
|
134
|
+
* - Named entities + context (URLs, file paths, project names)
|
|
135
|
+
*
|
|
136
|
+
* Returns array of { fact, category, confidence } objects.
|
|
137
|
+
*/
|
|
138
|
+
export function extractFacts(tailMessages) {
|
|
139
|
+
const facts = [];
|
|
140
|
+
const seen = new Set();
|
|
141
|
+
|
|
142
|
+
const patterns = [
|
|
143
|
+
// User corrections / preferences
|
|
144
|
+
{ re: /(?:don'?t|never|always|stop|prefer|please)\s+(.{10,80})/i, category: 'preference', confidence: 85 },
|
|
145
|
+
// Decisions
|
|
146
|
+
{ re: /(?:decided|let'?s go with|we'?ll use|switching to|going with)\s+(.{10,80})/i, category: 'decision', confidence: 80 },
|
|
147
|
+
// Environment / config
|
|
148
|
+
{ re: /(?:api|endpoint|url|port|config|database|db)\s+(?:is|at|on|in)\s+(.{5,80})/i, category: 'environment', confidence: 75 },
|
|
149
|
+
// File paths
|
|
150
|
+
{ re: /((?:\/[\w.-]+){3,})/g, category: 'reference', confidence: 60 },
|
|
151
|
+
// URLs
|
|
152
|
+
{ re: /(https?:\/\/\S{10,80})/g, category: 'reference', confidence: 65 },
|
|
153
|
+
];
|
|
154
|
+
|
|
155
|
+
for (const msg of tailMessages) {
|
|
156
|
+
if (msg.role !== 'user') continue; // focus on user statements
|
|
157
|
+
const content = msg.content;
|
|
158
|
+
|
|
159
|
+
for (const { re, category, confidence } of patterns) {
|
|
160
|
+
const flags = re.flags.includes('g') ? re.flags : re.flags + 'g';
|
|
161
|
+
const matches = content.matchAll(new RegExp(re.source, flags.includes('i') ? flags : flags + 'i'));
|
|
162
|
+
for (const match of matches) {
|
|
163
|
+
const factText = match[0].trim().slice(0, 120);
|
|
164
|
+
|
|
165
|
+
// Dedup within extraction
|
|
166
|
+
const key = factText.toLowerCase().replace(/\s+/g, ' ');
|
|
167
|
+
if (seen.has(key)) continue;
|
|
168
|
+
seen.add(key);
|
|
169
|
+
|
|
170
|
+
facts.push({ fact: factText, category, confidence });
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
return facts;
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
// ── MEMORY.md Merge ────────────────────────────────────
|
|
179
|
+
|
|
180
|
+
/**
|
|
181
|
+
* Parse MEMORY.md into structured entries.
|
|
182
|
+
* Each entry is a markdown line (typically a "- " bullet under a section).
|
|
183
|
+
*/
|
|
184
|
+
export function parseMemoryMd(content) {
|
|
185
|
+
const lines = content.split('\n');
|
|
186
|
+
const entries = [];
|
|
187
|
+
let currentSection = '';
|
|
188
|
+
|
|
189
|
+
for (const line of lines) {
|
|
190
|
+
if (line.startsWith('##')) {
|
|
191
|
+
currentSection = line.replace(/^#+\s*/, '').trim();
|
|
192
|
+
} else if (line.startsWith('- ') || line.startsWith('* ')) {
|
|
193
|
+
entries.push({
|
|
194
|
+
section: currentSection,
|
|
195
|
+
text: line.replace(/^[-*]\s*/, '').trim(),
|
|
196
|
+
raw: line,
|
|
197
|
+
});
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
return entries;
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
/**
|
|
205
|
+
* Merge new facts into MEMORY.md content with dedup.
|
|
206
|
+
*
|
|
207
|
+
* Strategy:
|
|
208
|
+
* - >90% similarity to existing entry → skip (already known)
|
|
209
|
+
* - >70% similarity → merge (append new info to existing entry)
|
|
210
|
+
* - <70% similarity → append as new entry under appropriate section
|
|
211
|
+
*
|
|
212
|
+
* @param {string} memoryContent - Current MEMORY.md content
|
|
213
|
+
* @param {Array} facts - Array of { fact, category, confidence }
|
|
214
|
+
* @param {number} charBudget - Max character budget (default 2200)
|
|
215
|
+
* @returns {{ content: string, added: number, merged: number, skipped: number }}
|
|
216
|
+
*/
|
|
217
|
+
export function mergeFacts(memoryContent, facts, charBudget = 2200) {
|
|
218
|
+
const entries = parseMemoryMd(memoryContent);
|
|
219
|
+
let content = memoryContent;
|
|
220
|
+
let added = 0, merged = 0, skipped = 0;
|
|
221
|
+
|
|
222
|
+
for (const { fact, category } of facts) {
|
|
223
|
+
// Check against existing entries
|
|
224
|
+
let bestSim = 0;
|
|
225
|
+
let bestEntry = null;
|
|
226
|
+
|
|
227
|
+
for (const entry of entries) {
|
|
228
|
+
const sim = bigramSimilarity(fact, entry.text);
|
|
229
|
+
if (sim > bestSim) {
|
|
230
|
+
bestSim = sim;
|
|
231
|
+
bestEntry = entry;
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
if (bestSim > 0.9) {
|
|
236
|
+
skipped++;
|
|
237
|
+
continue;
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
if (bestSim > 0.7 && bestEntry) {
|
|
241
|
+
// Merge: replace the existing line with a combined version
|
|
242
|
+
const combined = `${bestEntry.text} (updated: ${fact.slice(0, 60)})`;
|
|
243
|
+
content = content.replace(bestEntry.raw, `- ${combined}`);
|
|
244
|
+
merged++;
|
|
245
|
+
continue;
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
// Budget check before appending
|
|
249
|
+
if (content.length + fact.length + 10 > charBudget) {
|
|
250
|
+
break; // respect character budget
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
// Append under "## Recent" section (create if missing)
|
|
254
|
+
if (!content.includes('## Recent')) {
|
|
255
|
+
content = content.trimEnd() + '\n\n## Recent\n';
|
|
256
|
+
}
|
|
257
|
+
content = content.trimEnd() + `\n- ${fact}`;
|
|
258
|
+
added++;
|
|
259
|
+
entries.push({ section: 'Recent', text: fact, raw: `- ${fact}` });
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
return { content: content.trimEnd() + '\n', added, merged, skipped };
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
// ── Main Flush Pipeline ────────────────────────────────────
|
|
266
|
+
|
|
267
|
+
/**
|
|
268
|
+
* Run the pre-compression flush pipeline.
|
|
269
|
+
*
|
|
270
|
+
* 1. Read tail of JSONL conversation
|
|
271
|
+
* 2. Extract durable facts
|
|
272
|
+
* 3. Merge into MEMORY.md with dedup
|
|
273
|
+
* 4. Return stats
|
|
274
|
+
*
|
|
275
|
+
* @param {string} jsonlPath - Path to current session JSONL
|
|
276
|
+
* @param {string} memoryMdPath - Path to MEMORY.md
|
|
277
|
+
* @param {Object} opts
|
|
278
|
+
* @param {number} opts.tailCount - Number of tail messages to scan (default 40)
|
|
279
|
+
* @param {number} opts.charBudget - MEMORY.md character budget (default 2200)
|
|
280
|
+
* @param {string} opts.format - Transcript format (auto-detected if omitted)
|
|
281
|
+
* @returns {Promise<{ flushed: boolean, facts: number, added: number, merged: number, skipped: number }>}
|
|
282
|
+
*/
|
|
283
|
+
export async function runFlush(jsonlPath, memoryMdPath, opts = {}) {
|
|
284
|
+
const { tailCount = 40, charBudget = 2200, format } = opts;
|
|
285
|
+
|
|
286
|
+
if (!fs.existsSync(jsonlPath)) {
|
|
287
|
+
return { flushed: false, facts: 0, added: 0, merged: 0, skipped: 0 };
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
// 1. Get tail messages (format-agnostic via transcript-parser)
|
|
291
|
+
const { tailMessages } = await estimateSessionTokens(jsonlPath, tailCount, { format });
|
|
292
|
+
|
|
293
|
+
if (tailMessages.length === 0) {
|
|
294
|
+
return { flushed: false, facts: 0, added: 0, merged: 0, skipped: 0 };
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
// 2. Extract facts
|
|
298
|
+
const facts = extractFacts(tailMessages);
|
|
299
|
+
|
|
300
|
+
if (facts.length === 0) {
|
|
301
|
+
return { flushed: true, facts: 0, added: 0, merged: 0, skipped: 0 };
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
// 3. Read and merge into MEMORY.md
|
|
305
|
+
let memoryContent = '';
|
|
306
|
+
if (fs.existsSync(memoryMdPath)) {
|
|
307
|
+
memoryContent = fs.readFileSync(memoryMdPath, 'utf-8');
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
const result = mergeFacts(memoryContent, facts, charBudget);
|
|
311
|
+
|
|
312
|
+
// 4. Write back
|
|
313
|
+
fs.writeFileSync(memoryMdPath, result.content);
|
|
314
|
+
|
|
315
|
+
return {
|
|
316
|
+
flushed: true,
|
|
317
|
+
facts: facts.length,
|
|
318
|
+
added: result.added,
|
|
319
|
+
merged: result.merged,
|
|
320
|
+
skipped: result.skipped,
|
|
321
|
+
};
|
|
322
|
+
}
|
|
@@ -0,0 +1,292 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* role-loader.js — Load, validate, and format role profiles for mesh tasks.
|
|
3
|
+
*
|
|
4
|
+
* Role profiles define:
|
|
5
|
+
* - responsibilities: what the agent SHOULD do (prompt injection)
|
|
6
|
+
* - must_not: what the agent must NOT do (prompt + post-validation)
|
|
7
|
+
* - framework: structured thinking scaffold (prompt injection)
|
|
8
|
+
* - required_outputs: post-completion structural validation
|
|
9
|
+
* - forbidden_patterns: post-completion negative validation
|
|
10
|
+
* - scope_paths: default scope if task doesn't specify one
|
|
11
|
+
* - escalation: failure routing map
|
|
12
|
+
*
|
|
13
|
+
* Roles live in config/roles/*.yaml (shipped) and ~/.openclaw/roles/ (user).
|
|
14
|
+
* Uses js-yaml for parsing (already a dependency via plan-templates).
|
|
15
|
+
*/
|
|
16
|
+
|
|
17
|
+
const fs = require('fs');
|
|
18
|
+
const path = require('path');
|
|
19
|
+
const yaml = require('js-yaml');
|
|
20
|
+
|
|
21
|
+
// ── Role Loading ─────────────────────────────────────
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Load a single role profile from a YAML file.
|
|
25
|
+
*/
|
|
26
|
+
function loadRole(rolePath) {
|
|
27
|
+
const content = fs.readFileSync(rolePath, 'utf-8');
|
|
28
|
+
const role = yaml.load(content);
|
|
29
|
+
if (!role.id) role.id = path.basename(rolePath, '.yaml');
|
|
30
|
+
return role;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/**
|
|
34
|
+
* Find and load a role by ID, searching user dir first then shipped config.
|
|
35
|
+
* @param {string} roleId — e.g. "solidity-dev"
|
|
36
|
+
* @param {string[]} searchDirs — directories to search (first match wins)
|
|
37
|
+
* @returns {object|null} — role profile or null if not found
|
|
38
|
+
*/
|
|
39
|
+
function findRole(roleId, searchDirs) {
|
|
40
|
+
for (const dir of searchDirs) {
|
|
41
|
+
for (const ext of ['.yaml', '.yml']) {
|
|
42
|
+
const candidate = path.join(dir, `${roleId}${ext}`);
|
|
43
|
+
if (fs.existsSync(candidate)) {
|
|
44
|
+
try {
|
|
45
|
+
return loadRole(candidate);
|
|
46
|
+
} catch (err) {
|
|
47
|
+
console.error(`[role-loader] Failed to load ${candidate}: ${err.message}`);
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
return null;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
/**
|
|
56
|
+
* List all available roles across search directories.
|
|
57
|
+
* @returns {Array<{id, name, description, file}>}
|
|
58
|
+
*/
|
|
59
|
+
function listRoles(searchDirs) {
|
|
60
|
+
const seen = new Set();
|
|
61
|
+
const roles = [];
|
|
62
|
+
|
|
63
|
+
for (const dir of searchDirs) {
|
|
64
|
+
if (!fs.existsSync(dir)) continue;
|
|
65
|
+
const files = fs.readdirSync(dir).filter(f => f.endsWith('.yaml') || f.endsWith('.yml'));
|
|
66
|
+
for (const file of files) {
|
|
67
|
+
try {
|
|
68
|
+
const role = loadRole(path.join(dir, file));
|
|
69
|
+
if (!seen.has(role.id)) {
|
|
70
|
+
seen.add(role.id);
|
|
71
|
+
roles.push({
|
|
72
|
+
id: role.id,
|
|
73
|
+
name: role.name || role.id,
|
|
74
|
+
description: role.description || '',
|
|
75
|
+
file,
|
|
76
|
+
});
|
|
77
|
+
}
|
|
78
|
+
} catch { /* skip malformed */ }
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
return roles;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
// ── Role Validation ──────────────────────────────────
|
|
86
|
+
|
|
87
|
+
/**
|
|
88
|
+
* Validate a role profile for structural correctness.
|
|
89
|
+
* @returns {{ valid: boolean, errors: string[] }}
|
|
90
|
+
*/
|
|
91
|
+
function validateRole(role) {
|
|
92
|
+
const errors = [];
|
|
93
|
+
if (!role.id) errors.push('Missing role id');
|
|
94
|
+
if (role.responsibilities && !Array.isArray(role.responsibilities)) {
|
|
95
|
+
errors.push('responsibilities must be an array');
|
|
96
|
+
}
|
|
97
|
+
if (role.must_not && !Array.isArray(role.must_not)) {
|
|
98
|
+
errors.push('must_not must be an array');
|
|
99
|
+
}
|
|
100
|
+
if (role.required_outputs && !Array.isArray(role.required_outputs)) {
|
|
101
|
+
errors.push('required_outputs must be an array');
|
|
102
|
+
}
|
|
103
|
+
if (role.forbidden_patterns && !Array.isArray(role.forbidden_patterns)) {
|
|
104
|
+
errors.push('forbidden_patterns must be an array');
|
|
105
|
+
}
|
|
106
|
+
if (role.escalation && typeof role.escalation !== 'object') {
|
|
107
|
+
errors.push('escalation must be an object');
|
|
108
|
+
}
|
|
109
|
+
return { valid: errors.length === 0, errors };
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
// ── Prompt Formatting ────────────────────────────────
|
|
113
|
+
|
|
114
|
+
/**
|
|
115
|
+
* Format a role profile into markdown for prompt injection.
|
|
116
|
+
* Injected between Scope and Instructions in the agent prompt.
|
|
117
|
+
* LLM-agnostic: standard markdown that any LLM can consume.
|
|
118
|
+
*/
|
|
119
|
+
function formatRoleForPrompt(role) {
|
|
120
|
+
if (!role) return '';
|
|
121
|
+
const parts = [];
|
|
122
|
+
|
|
123
|
+
parts.push(`## Role: ${role.name || role.id}`);
|
|
124
|
+
parts.push('');
|
|
125
|
+
|
|
126
|
+
if (role.responsibilities && role.responsibilities.length > 0) {
|
|
127
|
+
parts.push('### Responsibilities');
|
|
128
|
+
for (const r of role.responsibilities) {
|
|
129
|
+
parts.push(`- ${r}`);
|
|
130
|
+
}
|
|
131
|
+
parts.push('');
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
if (role.must_not && role.must_not.length > 0) {
|
|
135
|
+
parts.push('### Boundaries (Must NOT Do)');
|
|
136
|
+
for (const m of role.must_not) {
|
|
137
|
+
parts.push(`- ❌ ${m}`);
|
|
138
|
+
}
|
|
139
|
+
parts.push('');
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
if (role.framework) {
|
|
143
|
+
parts.push(`### Framework: ${role.framework.name}`);
|
|
144
|
+
parts.push(role.framework.prompt);
|
|
145
|
+
parts.push('');
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
return parts.join('\n');
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
// ── Post-Completion Validation ───────────────────────
|
|
152
|
+
|
|
153
|
+
/**
|
|
154
|
+
* Validate task output against role's required_outputs.
|
|
155
|
+
* @param {object} role — role profile
|
|
156
|
+
* @param {string[]} outputFiles — files created/modified by the task
|
|
157
|
+
* @param {string} worktreePath — path to task worktree
|
|
158
|
+
* @returns {{ passed: boolean, failures: Array<{type, description, detail}> }}
|
|
159
|
+
*/
|
|
160
|
+
function validateRequiredOutputs(role, outputFiles, worktreePath) {
|
|
161
|
+
if (!role || !role.required_outputs) return { passed: true, failures: [] };
|
|
162
|
+
|
|
163
|
+
const failures = [];
|
|
164
|
+
|
|
165
|
+
for (const req of role.required_outputs) {
|
|
166
|
+
if (req.type === 'file_match') {
|
|
167
|
+
// Check if any output file matches the pattern
|
|
168
|
+
const { globMatch } = require('./rule-loader');
|
|
169
|
+
const matched = outputFiles.some(f => globMatch(req.pattern, f));
|
|
170
|
+
if (!matched) {
|
|
171
|
+
failures.push({
|
|
172
|
+
type: 'file_match',
|
|
173
|
+
description: req.description,
|
|
174
|
+
detail: `No output file matches pattern: ${req.pattern}`,
|
|
175
|
+
});
|
|
176
|
+
}
|
|
177
|
+
} else if (req.type === 'content_check') {
|
|
178
|
+
// Check if files matching pattern contain required content
|
|
179
|
+
const { globMatch } = require('./rule-loader');
|
|
180
|
+
const matchingFiles = outputFiles.filter(f => globMatch(req.pattern, f));
|
|
181
|
+
if (matchingFiles.length > 0 && worktreePath) {
|
|
182
|
+
let found = false;
|
|
183
|
+
for (const file of matchingFiles) {
|
|
184
|
+
try {
|
|
185
|
+
const content = fs.readFileSync(path.join(worktreePath, file), 'utf-8');
|
|
186
|
+
if (content.includes(req.check)) {
|
|
187
|
+
found = true;
|
|
188
|
+
break;
|
|
189
|
+
}
|
|
190
|
+
} catch { /* file not readable */ }
|
|
191
|
+
}
|
|
192
|
+
if (!found) {
|
|
193
|
+
failures.push({
|
|
194
|
+
type: 'content_check',
|
|
195
|
+
description: req.description,
|
|
196
|
+
detail: `Required content "${req.check}" not found in ${req.pattern} files`,
|
|
197
|
+
});
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
return { passed: failures.length === 0, failures };
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
/**
|
|
207
|
+
* Check output against role's forbidden_patterns.
|
|
208
|
+
* @param {object} role — role profile
|
|
209
|
+
* @param {string[]} outputFiles — files created/modified
|
|
210
|
+
* @param {string} worktreePath — path to task worktree
|
|
211
|
+
* @returns {{ passed: boolean, violations: Array<{pattern, in, description, file, match}> }}
|
|
212
|
+
*/
|
|
213
|
+
function checkForbiddenPatterns(role, outputFiles, worktreePath) {
|
|
214
|
+
if (!role || !role.forbidden_patterns) return { passed: true, violations: [] };
|
|
215
|
+
|
|
216
|
+
const { globMatch } = require('./rule-loader');
|
|
217
|
+
const violations = [];
|
|
218
|
+
|
|
219
|
+
for (const fp of role.forbidden_patterns) {
|
|
220
|
+
const regex = new RegExp(fp.pattern, 'gm');
|
|
221
|
+
const scopeFiles = fp.in
|
|
222
|
+
? outputFiles.filter(f => globMatch(fp.in, f))
|
|
223
|
+
: outputFiles;
|
|
224
|
+
|
|
225
|
+
for (const file of scopeFiles) {
|
|
226
|
+
if (!worktreePath) continue;
|
|
227
|
+
try {
|
|
228
|
+
const content = fs.readFileSync(path.join(worktreePath, file), 'utf-8');
|
|
229
|
+
const matches = content.match(regex);
|
|
230
|
+
if (matches) {
|
|
231
|
+
violations.push({
|
|
232
|
+
pattern: fp.pattern,
|
|
233
|
+
in: fp.in,
|
|
234
|
+
description: fp.description,
|
|
235
|
+
file,
|
|
236
|
+
match: matches[0].slice(0, 100),
|
|
237
|
+
});
|
|
238
|
+
}
|
|
239
|
+
} catch { /* skip unreadable */ }
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
return { passed: violations.length === 0, violations };
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
/**
|
|
247
|
+
* Find the best-matching role for a set of task scope paths.
|
|
248
|
+
* Matches scope paths against each role's scope_paths field.
|
|
249
|
+
* Returns the role with the most scope path matches, or null.
|
|
250
|
+
*/
|
|
251
|
+
function findRoleByScope(scopePaths, searchDirs) {
|
|
252
|
+
if (!scopePaths || scopePaths.length === 0) return null;
|
|
253
|
+
|
|
254
|
+
const { globMatch } = require('./rule-loader');
|
|
255
|
+
const allRoles = listRoles(searchDirs);
|
|
256
|
+
let bestRole = null;
|
|
257
|
+
let bestScore = 0;
|
|
258
|
+
|
|
259
|
+
for (const roleSummary of allRoles) {
|
|
260
|
+
const role = findRole(roleSummary.id, searchDirs);
|
|
261
|
+
if (!role || !role.scope_paths) continue;
|
|
262
|
+
|
|
263
|
+
// Score: how many of the task's scope paths match this role's scope_paths?
|
|
264
|
+
let score = 0;
|
|
265
|
+
for (const taskPath of scopePaths) {
|
|
266
|
+
for (const rolePattern of role.scope_paths) {
|
|
267
|
+
if (globMatch(rolePattern, taskPath)) {
|
|
268
|
+
score++;
|
|
269
|
+
break; // one match per task path is enough
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
if (score > bestScore) {
|
|
275
|
+
bestScore = score;
|
|
276
|
+
bestRole = role;
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
return bestRole;
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
module.exports = {
|
|
284
|
+
loadRole,
|
|
285
|
+
findRole,
|
|
286
|
+
findRoleByScope,
|
|
287
|
+
listRoles,
|
|
288
|
+
validateRole,
|
|
289
|
+
formatRoleForPrompt,
|
|
290
|
+
validateRequiredOutputs,
|
|
291
|
+
checkForbiddenPatterns,
|
|
292
|
+
};
|