@evomap/evolver 1.29.9 → 1.31.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.js CHANGED
@@ -2,8 +2,8 @@
2
2
  const evolve = require('./src/evolve');
3
3
  const { solidify } = require('./src/gep/solidify');
4
4
  const path = require('path');
5
- // Hardened Env Loading: Ensure .env is loaded before anything else
6
- try { require('dotenv').config({ path: path.resolve(__dirname, './.env') }); } catch (e) { console.warn('[Evolver] Warning: dotenv not found or failed to load .env'); }
5
+ const { getRepoRoot } = require('./src/gep/paths');
6
+ try { require('dotenv').config({ path: path.join(getRepoRoot(), '.env') }); } catch (e) { console.warn('[Evolver] Warning: dotenv not found or failed to load .env'); }
7
7
  const fs = require('fs');
8
8
  const { spawn } = require('child_process');
9
9
 
@@ -24,28 +24,24 @@ function readJsonSafe(p) {
24
24
  }
25
25
  }
26
26
 
27
+ /**
28
+ * Mark a pending evolution run as rejected (state-only, no git rollback).
29
+ * @param {string} statePath - Path to evolution_solidify_state.json
30
+ * @returns {boolean} true if a pending run was found and rejected
31
+ */
27
32
  function rejectPendingRun(statePath) {
28
- try {
29
- const { getRepoRoot } = require('./src/gep/paths');
30
- const { execSync } = require('child_process');
31
- const repoRoot = getRepoRoot();
32
-
33
- execSync('git checkout -- .', { cwd: repoRoot, encoding: 'utf8', timeout: 30000 });
34
- execSync('git clean -fd', { cwd: repoRoot, encoding: 'utf8', timeout: 30000 });
35
- } catch (e) {
36
- console.warn('[Loop] Pending run rollback failed: ' + (e.message || e));
37
- }
38
-
39
33
  try {
40
34
  const state = readJsonSafe(statePath);
41
35
  if (state && state.last_run && state.last_run.run_id) {
42
36
  state.last_solidify = {
43
37
  run_id: state.last_run.run_id,
44
38
  rejected: true,
45
- reason: 'loop_bridge_disabled_autoreject',
39
+ reason: 'loop_bridge_disabled_autoreject_no_rollback',
46
40
  timestamp: new Date().toISOString(),
47
41
  };
48
- fs.writeFileSync(statePath, JSON.stringify(state, null, 2) + '\n', 'utf8');
42
+ const tmp = `${statePath}.tmp`;
43
+ fs.writeFileSync(tmp, JSON.stringify(state, null, 2) + '\n', 'utf8');
44
+ fs.renameSync(tmp, statePath);
49
45
  return true;
50
46
  }
51
47
  } catch (e) {
@@ -131,8 +127,10 @@ async function main() {
131
127
  process.on('SIGTERM', () => { releaseLock(); process.exit(); });
132
128
 
133
129
  process.env.EVOLVE_LOOP = 'true';
134
- process.env.EVOLVE_BRIDGE = 'false';
135
- console.log('Loop mode enabled (internal daemon).');
130
+ if (!process.env.EVOLVE_BRIDGE) {
131
+ process.env.EVOLVE_BRIDGE = 'false';
132
+ }
133
+ console.log(`Loop mode enabled (internal daemon, bridge=${process.env.EVOLVE_BRIDGE}).`);
136
134
 
137
135
  const { getEvolutionDir } = require('./src/gep/paths');
138
136
  const solidifyStatePath = path.join(getEvolutionDir(), 'evolution_solidify_state.json');
@@ -184,7 +182,7 @@ async function main() {
184
182
  if (isPendingSolidify(stAfterRun)) {
185
183
  const cleared = rejectPendingRun(solidifyStatePath);
186
184
  if (cleared) {
187
- console.warn('[Loop] Auto-rejected pending run because bridge is disabled in loop mode.');
185
+ console.warn('[Loop] Auto-rejected pending run because bridge is disabled in loop mode (state only, no rollback).');
188
186
  }
189
187
  }
190
188
  }
@@ -285,11 +283,19 @@ async function main() {
285
283
  if (res && res.ok && !dryRun) {
286
284
  try {
287
285
  const { shouldDistill, prepareDistillation } = require('./src/gep/skillDistiller');
288
- if (shouldDistill()) {
286
+ const { readStateForSolidify } = require('./src/gep/solidify');
287
+ const solidifyState = readStateForSolidify();
288
+ const count = solidifyState.solidify_count || 0;
289
+ const autoDistillInterval = 5;
290
+ const autoTrigger = count > 0 && count % autoDistillInterval === 0;
291
+
292
+ if (autoTrigger || shouldDistill()) {
289
293
  const dr = prepareDistillation();
290
294
  if (dr && dr.ok && dr.promptPath) {
295
+ const trigger = autoTrigger ? `auto (every ${autoDistillInterval} solidifies, count=${count})` : 'threshold';
291
296
  console.log('\n[DISTILL_REQUEST]');
292
- console.log('Distillation prompt ready. Read the prompt file, process it with your LLM,');
297
+ console.log(`Distillation triggered: ${trigger}`);
298
+ console.log('Read the prompt file, process it with your LLM,');
293
299
  console.log('save the LLM response to a file, then run:');
294
300
  console.log(' node index.js distill --response-file=<path_to_llm_response>');
295
301
  console.log('Prompt file: ' + dr.promptPath);
@@ -461,6 +467,112 @@ async function main() {
461
467
  console.log('To reject and rollback: node index.js review --reject');
462
468
  }
463
469
 
470
+ } else if (command === 'fetch') {
471
+ let skillId = null;
472
+ const eqFlag = args.find(a => typeof a === 'string' && (a.startsWith('--skill=') || a.startsWith('-s=')));
473
+ if (eqFlag) {
474
+ skillId = eqFlag.split('=').slice(1).join('=');
475
+ } else {
476
+ const sIdx = args.indexOf('-s');
477
+ const longIdx = args.indexOf('--skill');
478
+ const flagIdx = sIdx !== -1 ? sIdx : longIdx;
479
+ if (flagIdx !== -1 && args[flagIdx + 1] && !String(args[flagIdx + 1]).startsWith('-')) {
480
+ skillId = args[flagIdx + 1];
481
+ }
482
+ }
483
+ if (!skillId) {
484
+ const positional = args[1];
485
+ if (positional && !String(positional).startsWith('-')) skillId = positional;
486
+ }
487
+
488
+ if (!skillId) {
489
+ console.error('Usage: evolver fetch --skill <skill_id>');
490
+ console.error(' evolver fetch -s <skill_id>');
491
+ process.exit(1);
492
+ }
493
+
494
+ const { getHubUrl, getNodeId, buildHubHeaders, sendHelloToHub, getHubNodeSecret } = require('./src/gep/a2aProtocol');
495
+
496
+ const hubUrl = getHubUrl();
497
+ if (!hubUrl) {
498
+ console.error('[fetch] A2A_HUB_URL is not configured.');
499
+ console.error('Set it via environment variable or .env file:');
500
+ console.error(' export A2A_HUB_URL=https://evomap.ai');
501
+ process.exit(1);
502
+ }
503
+
504
+ try {
505
+ if (!getHubNodeSecret()) {
506
+ console.log('[fetch] No node_secret found. Sending hello to Hub to register...');
507
+ const helloResult = await sendHelloToHub();
508
+ if (!helloResult || !helloResult.ok) {
509
+ console.error('[fetch] Failed to register with Hub:', helloResult && helloResult.error || 'unknown');
510
+ process.exit(1);
511
+ }
512
+ console.log('[fetch] Registered as ' + getNodeId());
513
+ }
514
+
515
+ const endpoint = hubUrl.replace(/\/+$/, '') + '/a2a/skill/store/' + encodeURIComponent(skillId) + '/download';
516
+ const nodeId = getNodeId();
517
+
518
+ console.log('[fetch] Downloading skill: ' + skillId);
519
+
520
+ const resp = await fetch(endpoint, {
521
+ method: 'POST',
522
+ headers: buildHubHeaders(),
523
+ body: JSON.stringify({ sender_id: nodeId }),
524
+ signal: AbortSignal.timeout(30000),
525
+ });
526
+
527
+ if (!resp.ok) {
528
+ const body = await resp.text().catch(() => '');
529
+ let msg = 'HTTP ' + resp.status;
530
+ try { const j = JSON.parse(body); msg = j.error || j.message || msg; } catch (_) {}
531
+ console.error('[fetch] Download failed: ' + msg);
532
+ if (resp.status === 404) console.error(' Skill not found or not publicly available.');
533
+ if (resp.status === 401) console.error(' Authentication failed. Try deleting ~/.evomap/node_secret and retry.');
534
+ if (resp.status === 402) console.error(' Insufficient credits.');
535
+ process.exit(1);
536
+ }
537
+
538
+ const data = await resp.json();
539
+ const outFlag = args.find(a => typeof a === 'string' && a.startsWith('--out='));
540
+ const safeId = String(data.skill_id || skillId).replace(/[^a-zA-Z0-9_\-\.]/g, '_');
541
+ const outDir = outFlag
542
+ ? outFlag.slice('--out='.length)
543
+ : path.join('.', 'skills', safeId);
544
+
545
+ if (!fs.existsSync(outDir)) fs.mkdirSync(outDir, { recursive: true });
546
+
547
+ if (data.content) {
548
+ fs.writeFileSync(path.join(outDir, 'SKILL.md'), data.content, 'utf8');
549
+ }
550
+
551
+ const bundled = Array.isArray(data.bundled_files) ? data.bundled_files : [];
552
+ for (const file of bundled) {
553
+ if (!file || !file.name || typeof file.content !== 'string') continue;
554
+ const safeName = path.basename(file.name);
555
+ fs.writeFileSync(path.join(outDir, safeName), file.content, 'utf8');
556
+ }
557
+
558
+ console.log('[fetch] Skill downloaded to: ' + outDir);
559
+ console.log(' Name: ' + (data.name || skillId));
560
+ console.log(' Version: ' + (data.version || '?'));
561
+ console.log(' Files: SKILL.md' + (bundled.length > 0 ? ', ' + bundled.map(f => f.name).join(', ') : ''));
562
+ if (data.already_purchased) {
563
+ console.log(' Cost: free (already purchased)');
564
+ } else {
565
+ console.log(' Cost: ' + (data.credit_cost || 0) + ' credits');
566
+ }
567
+ } catch (error) {
568
+ if (error && error.name === 'TimeoutError') {
569
+ console.error('[fetch] Request timed out. Check your network and A2A_HUB_URL.');
570
+ } else {
571
+ console.error('[fetch] Error:', error && error.message || error);
572
+ }
573
+ process.exit(1);
574
+ }
575
+
464
576
  } else if (command === 'asset-log') {
465
577
  const { summarizeCallLog, readCallLog, getLogPath } = require('./src/gep/assetCallLog');
466
578
 
@@ -505,7 +617,10 @@ async function main() {
505
617
  }
506
618
 
507
619
  } else {
508
- console.log(`Usage: node index.js [run|/evolve|solidify|review|distill|asset-log] [--loop]
620
+ console.log(`Usage: node index.js [run|/evolve|solidify|review|distill|fetch|asset-log] [--loop]
621
+ - fetch flags:
622
+ - --skill=<id> | -s <id> (skill ID to download)
623
+ - --out=<dir> (output directory, default: ./skills/<skill_id>)
509
624
  - solidify flags:
510
625
  - --dry-run
511
626
  - --no-rollback
@@ -528,3 +643,10 @@ async function main() {
528
643
  if (require.main === module) {
529
644
  main();
530
645
  }
646
+
647
+ module.exports = {
648
+ main,
649
+ readJsonSafe,
650
+ rejectPendingRun,
651
+ isPendingSolidify,
652
+ };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@evomap/evolver",
3
- "version": "1.29.9",
3
+ "version": "1.31.0",
4
4
  "description": "A GEP-powered self-evolution engine for AI agents. Features automated log analysis and Genome Evolution Protocol (GEP) for auditable, reusable evolution assets.",
5
5
  "main": "index.js",
6
6
  "bin": {
package/src/evolve.js CHANGED
@@ -2,7 +2,7 @@ const fs = require('fs');
2
2
  const path = require('path');
3
3
  const os = require('os');
4
4
  const { execSync } = require('child_process');
5
- const { getRepoRoot, getMemoryDir, getSessionScope } = require('./gep/paths');
5
+ const { getRepoRoot, getWorkspaceRoot, getMemoryDir, getSessionScope } = require('./gep/paths');
6
6
  const { extractSignals } = require('./gep/signals');
7
7
  const {
8
8
  loadGenes,
@@ -59,6 +59,7 @@ const IS_RANDOM_DRIFT = ARGS.includes('--drift') || String(process.env.RANDOM_DR
59
59
  const MEMORY_DIR = getMemoryDir();
60
60
  const AGENT_NAME = process.env.AGENT_NAME || 'main';
61
61
  const AGENT_SESSIONS_DIR = path.join(os.homedir(), `.openclaw/agents/${AGENT_NAME}/sessions`);
62
+ const CURSOR_TRANSCRIPTS_DIR = process.env.EVOLVER_CURSOR_TRANSCRIPTS_DIR || '';
62
63
  const TODAY_LOG = path.join(MEMORY_DIR, new Date().toISOString().split('T')[0] + '.md');
63
64
 
64
65
  // Ensure memory directory exists so state/cache writes work.
@@ -160,77 +161,177 @@ function formatSessionLog(jsonlContent) {
160
161
  return result.join('\n');
161
162
  }
162
163
 
163
- function readRealSessionLog() {
164
+ function formatCursorTranscript(raw) {
165
+ const lines = raw.split('\n');
166
+ const result = [];
167
+ let skipUntilNextBlock = false;
168
+
169
+ for (let i = 0; i < lines.length; i++) {
170
+ const line = lines[i];
171
+ const trimmed = line.trim();
172
+
173
+ // Keep user messages and assistant text responses
174
+ if (trimmed === 'user:' || trimmed.startsWith('A:')) {
175
+ skipUntilNextBlock = false;
176
+ result.push(trimmed);
177
+ continue;
178
+ }
179
+
180
+ // Tool call lines: keep as compact markers, skip their parameter block
181
+ if (trimmed.startsWith('[Tool call]')) {
182
+ skipUntilNextBlock = true;
183
+ result.push(`[Tool call] ${trimmed.replace('[Tool call]', '').trim()}`);
184
+ continue;
185
+ }
186
+
187
+ // Tool result markers: skip their content (usually large and noisy)
188
+ if (trimmed.startsWith('[Tool result]')) {
189
+ skipUntilNextBlock = true;
190
+ continue;
191
+ }
192
+
193
+ if (skipUntilNextBlock) continue;
194
+
195
+ // Keep user query content and assistant text (skip XML tags like <user_query>)
196
+ if (trimmed.startsWith('<') && trimmed.endsWith('>')) continue;
197
+ if (trimmed) {
198
+ result.push(trimmed.slice(0, 300));
199
+ }
200
+ }
201
+
202
+ return result.join('\n');
203
+ }
204
+
205
+ function readCursorTranscripts() {
206
+ if (!CURSOR_TRANSCRIPTS_DIR) return '';
164
207
  try {
165
- if (!fs.existsSync(AGENT_SESSIONS_DIR)) return '[NO SESSION LOGS FOUND]';
208
+ if (!fs.existsSync(CURSOR_TRANSCRIPTS_DIR)) return '';
166
209
 
167
210
  const now = Date.now();
168
- const ACTIVE_WINDOW_MS = 24 * 60 * 60 * 1000; // 24 hours
211
+ const ACTIVE_WINDOW_MS = 24 * 60 * 60 * 1000;
169
212
  const TARGET_BYTES = 120000;
170
- const PER_SESSION_BYTES = 20000; // Read tail of each active session
171
-
172
- // Session scope isolation: when EVOLVER_SESSION_SCOPE is set,
173
- // only read sessions whose filenames contain the scope identifier.
174
- // This prevents cross-channel/cross-project memory contamination.
175
- const sessionScope = getSessionScope();
213
+ const PER_FILE_BYTES = 20000;
214
+ const RECENCY_GUARD_MS = 30 * 1000;
176
215
 
177
- // Find ALL active sessions (modified in last 24h), sorted newest first
178
216
  let files = fs
179
- .readdirSync(AGENT_SESSIONS_DIR)
180
- .filter(f => f.endsWith('.jsonl') && !f.includes('.lock'))
217
+ .readdirSync(CURSOR_TRANSCRIPTS_DIR)
218
+ .filter(f => f.endsWith('.txt') || f.endsWith('.jsonl'))
181
219
  .map(f => {
182
220
  try {
183
- const st = fs.statSync(path.join(AGENT_SESSIONS_DIR, f));
221
+ const st = fs.statSync(path.join(CURSOR_TRANSCRIPTS_DIR, f));
184
222
  return { name: f, time: st.mtime.getTime(), size: st.size };
185
223
  } catch (e) {
186
224
  return null;
187
225
  }
188
226
  })
189
227
  .filter(f => f && (now - f.time) < ACTIVE_WINDOW_MS)
190
- .sort((a, b) => b.time - a.time); // Newest first
191
-
192
- if (files.length === 0) return '[NO JSONL FILES]';
193
-
194
- // Skip evolver's own sessions to avoid self-reference loops
195
- let nonEvolverFiles = files.filter(f => !f.name.startsWith('evolver_hand_'));
196
-
197
- // Session scope filter: when scope is active, only include sessions
198
- // whose filename contains the scope string (e.g., channel_123456.jsonl).
199
- // If no sessions match the scope, fall back to all non-evolver sessions
200
- // (graceful degradation -- better to evolve with global context than not at all).
201
- if (sessionScope && nonEvolverFiles.length > 0) {
202
- const scopeLower = sessionScope.toLowerCase();
203
- const scopedFiles = nonEvolverFiles.filter(f => f.name.toLowerCase().includes(scopeLower));
204
- if (scopedFiles.length > 0) {
205
- nonEvolverFiles = scopedFiles;
206
- console.log(`[SessionScope] Filtered to ${scopedFiles.length} session(s) matching scope "${sessionScope}".`);
207
- } else {
208
- console.log(`[SessionScope] No sessions match scope "${sessionScope}". Using all ${nonEvolverFiles.length} session(s) (fallback).`);
209
- }
210
- }
228
+ .sort((a, b) => b.time - a.time);
229
+
230
+ if (files.length === 0) return '';
211
231
 
212
- const activeFiles = nonEvolverFiles.length > 0 ? nonEvolverFiles : files.slice(0, 1);
232
+ // Skip the most recently modified file if it was touched in the last 30s --
233
+ // it is likely the current active session that triggered this evolver run,
234
+ // reading it would cause self-referencing signal noise.
235
+ if (files.length > 1 && (now - files[0].time) < RECENCY_GUARD_MS) {
236
+ files = files.slice(1);
237
+ }
213
238
 
214
- // Read from multiple active sessions (up to 6) to get a full picture
215
- const maxSessions = Math.min(activeFiles.length, 6);
239
+ const maxFiles = Math.min(files.length, 6);
216
240
  const sections = [];
217
241
  let totalBytes = 0;
218
242
 
219
- for (let i = 0; i < maxSessions && totalBytes < TARGET_BYTES; i++) {
220
- const f = activeFiles[i];
243
+ for (let i = 0; i < maxFiles && totalBytes < TARGET_BYTES; i++) {
244
+ const f = files[i];
221
245
  const bytesLeft = TARGET_BYTES - totalBytes;
222
- const readSize = Math.min(PER_SESSION_BYTES, bytesLeft);
223
- const raw = readRecentLog(path.join(AGENT_SESSIONS_DIR, f.name), readSize);
224
- const formatted = formatSessionLog(raw);
225
- if (formatted.trim()) {
226
- sections.push(`--- SESSION (${f.name}) ---\n${formatted}`);
227
- totalBytes += formatted.length;
246
+ const readSize = Math.min(PER_FILE_BYTES, bytesLeft);
247
+ const raw = readRecentLog(path.join(CURSOR_TRANSCRIPTS_DIR, f.name), readSize);
248
+ if (raw.trim() && !raw.startsWith('[MISSING]')) {
249
+ const formatted = formatCursorTranscript(raw);
250
+ if (formatted.trim()) {
251
+ sections.push(`--- CURSOR SESSION (${f.name}) ---\n${formatted}`);
252
+ totalBytes += formatted.length;
253
+ }
228
254
  }
229
255
  }
230
256
 
231
- let content = sections.join('\n\n');
257
+ return sections.join('\n\n');
258
+ } catch (e) {
259
+ console.warn(`[CursorTranscripts] Read failed: ${e.message}`);
260
+ return '';
261
+ }
262
+ }
263
+
264
+ function readRealSessionLog() {
265
+ try {
266
+ // Primary source: OpenClaw session logs (.jsonl)
267
+ if (fs.existsSync(AGENT_SESSIONS_DIR)) {
268
+ const now = Date.now();
269
+ const ACTIVE_WINDOW_MS = 24 * 60 * 60 * 1000; // 24 hours
270
+ const TARGET_BYTES = 120000;
271
+ const PER_SESSION_BYTES = 20000;
272
+
273
+ const sessionScope = getSessionScope();
274
+
275
+ let files = fs
276
+ .readdirSync(AGENT_SESSIONS_DIR)
277
+ .filter(f => f.endsWith('.jsonl') && !f.includes('.lock'))
278
+ .map(f => {
279
+ try {
280
+ const st = fs.statSync(path.join(AGENT_SESSIONS_DIR, f));
281
+ return { name: f, time: st.mtime.getTime(), size: st.size };
282
+ } catch (e) {
283
+ return null;
284
+ }
285
+ })
286
+ .filter(f => f && (now - f.time) < ACTIVE_WINDOW_MS)
287
+ .sort((a, b) => b.time - a.time);
288
+
289
+ if (files.length > 0) {
290
+ let nonEvolverFiles = files.filter(f => !f.name.startsWith('evolver_hand_'));
291
+
292
+ if (sessionScope && nonEvolverFiles.length > 0) {
293
+ const scopeLower = sessionScope.toLowerCase();
294
+ const scopedFiles = nonEvolverFiles.filter(f => f.name.toLowerCase().includes(scopeLower));
295
+ if (scopedFiles.length > 0) {
296
+ nonEvolverFiles = scopedFiles;
297
+ console.log(`[SessionScope] Filtered to ${scopedFiles.length} session(s) matching scope "${sessionScope}".`);
298
+ } else {
299
+ console.log(`[SessionScope] No sessions match scope "${sessionScope}". Using all ${nonEvolverFiles.length} session(s) (fallback).`);
300
+ }
301
+ }
232
302
 
233
- return content;
303
+ const activeFiles = nonEvolverFiles.length > 0 ? nonEvolverFiles : files.slice(0, 1);
304
+
305
+ const maxSessions = Math.min(activeFiles.length, 6);
306
+ const sections = [];
307
+ let totalBytes = 0;
308
+
309
+ for (let i = 0; i < maxSessions && totalBytes < TARGET_BYTES; i++) {
310
+ const f = activeFiles[i];
311
+ const bytesLeft = TARGET_BYTES - totalBytes;
312
+ const readSize = Math.min(PER_SESSION_BYTES, bytesLeft);
313
+ const raw = readRecentLog(path.join(AGENT_SESSIONS_DIR, f.name), readSize);
314
+ const formatted = formatSessionLog(raw);
315
+ if (formatted.trim()) {
316
+ sections.push(`--- SESSION (${f.name}) ---\n${formatted}`);
317
+ totalBytes += formatted.length;
318
+ }
319
+ }
320
+
321
+ if (sections.length > 0) {
322
+ return sections.join('\n\n');
323
+ }
324
+ }
325
+ }
326
+
327
+ // Fallback: Cursor agent-transcripts (.txt)
328
+ const cursorContent = readCursorTranscripts();
329
+ if (cursorContent) {
330
+ console.log('[SessionFallback] Using Cursor agent-transcripts as session source.');
331
+ return cursorContent;
332
+ }
333
+
334
+ return '[NO SESSION LOGS FOUND]';
234
335
  } catch (e) {
235
336
  return `[ERROR READING SESSION LOGS: ${e.message}]`;
236
337
  }
@@ -391,7 +492,7 @@ function clearDormantHypothesis() {
391
492
  }
392
493
  // Read MEMORY.md and USER.md from the WORKSPACE root (not the evolver plugin dir).
393
494
  // This avoids symlink breakage if the target file is temporarily deleted.
394
- const WORKSPACE_ROOT = process.env.OPENCLAW_WORKSPACE || path.resolve(REPO_ROOT, '../..');
495
+ const WORKSPACE_ROOT = getWorkspaceRoot();
395
496
  const ROOT_MEMORY = path.join(WORKSPACE_ROOT, 'MEMORY.md');
396
497
  const DIR_MEMORY = path.join(MEMORY_DIR, 'MEMORY.md');
397
498
  const MEMORY_FILE = fs.existsSync(ROOT_MEMORY) ? ROOT_MEMORY : (fs.existsSync(DIR_MEMORY) ? DIR_MEMORY : ROOT_MEMORY);
@@ -1307,6 +1408,15 @@ async function run() {
1307
1408
  console.log('[FailedCapsules] Read failed (non-fatal): ' + e.message);
1308
1409
  }
1309
1410
 
1411
+ // Heartbeat hints: novelty score and capability gaps for diversity-directed drift
1412
+ var heartbeatNovelty = null;
1413
+ var heartbeatCapGaps = [];
1414
+ try {
1415
+ var { getNoveltyHint, getCapabilityGaps: getCapGaps } = require('./gep/a2aProtocol');
1416
+ heartbeatNovelty = getNoveltyHint();
1417
+ heartbeatCapGaps = getCapGaps() || [];
1418
+ } catch (e) {}
1419
+
1310
1420
  const { selectedGene, capsuleCandidates, selector } = selectGeneAndCapsule({
1311
1421
  genes,
1312
1422
  capsules,
@@ -1314,6 +1424,8 @@ async function run() {
1314
1424
  memoryAdvice,
1315
1425
  driftEnabled: IS_RANDOM_DRIFT,
1316
1426
  failedCapsules: recentFailedCapsules,
1427
+ capabilityGaps: heartbeatCapGaps,
1428
+ noveltyScore: heartbeatNovelty && Number.isFinite(heartbeatNovelty.score) ? heartbeatNovelty.score : null,
1317
1429
  });
1318
1430
 
1319
1431
  const selectedBy = memoryAdvice && memoryAdvice.preferredGeneId ? 'memory_graph+selector' : 'selector';
@@ -1420,7 +1532,6 @@ async function run() {
1420
1532
  try {
1421
1533
  const runId = `run_${Date.now()}`;
1422
1534
  const parentEventId = getLastEventId();
1423
- const selectedBy = memoryAdvice && memoryAdvice.preferredGeneId ? 'memory_graph+selector' : 'selector';
1424
1535
 
1425
1536
  // Baseline snapshot (before any edits).
1426
1537
  let baselineUntracked = [];
@@ -402,6 +402,9 @@ var _heartbeatTotalFailed = 0;
402
402
  var _heartbeatFpSent = false;
403
403
  var _latestAvailableWork = [];
404
404
  var _latestOverdueTasks = [];
405
+ var _latestSkillStoreHint = null;
406
+ var _latestNoveltyHint = null;
407
+ var _latestCapabilityGaps = [];
405
408
  var _pendingCommitmentUpdates = [];
406
409
  var _cachedHubNodeSecret = null;
407
410
  var _heartbeatIntervalMs = 0;
@@ -576,6 +579,21 @@ function sendHeartbeat() {
576
579
  _latestOverdueTasks = data.overdue_tasks;
577
580
  console.warn('[Commitment] ' + data.overdue_tasks.length + ' overdue task(s) detected via heartbeat.');
578
581
  }
582
+ if (data.skill_store) {
583
+ _latestSkillStoreHint = data.skill_store;
584
+ if (data.skill_store.eligible && data.skill_store.published_skills === 0) {
585
+ console.log('[Skill Store] ' + data.skill_store.hint);
586
+ }
587
+ }
588
+ if (data.novelty && typeof data.novelty === 'object') {
589
+ _latestNoveltyHint = data.novelty;
590
+ }
591
+ if (Array.isArray(data.capability_gaps) && data.capability_gaps.length > 0) {
592
+ _latestCapabilityGaps = data.capability_gaps;
593
+ }
594
+ if (data.circle_experience && typeof data.circle_experience === 'object') {
595
+ console.log('[EvolutionCircle] Active circle: ' + (data.circle_experience.circle_id || '?') + ' (' + (data.circle_experience.member_count || 0) + ' members)');
596
+ }
579
597
  _heartbeatConsecutiveFailures = 0;
580
598
  try {
581
599
  var logPath = getEvolverLogPath();
@@ -629,12 +647,24 @@ function getOverdueTasks() {
629
647
  return _latestOverdueTasks;
630
648
  }
631
649
 
650
+ function getSkillStoreHint() {
651
+ return _latestSkillStoreHint;
652
+ }
653
+
632
654
  function consumeOverdueTasks() {
633
655
  var tasks = _latestOverdueTasks;
634
656
  _latestOverdueTasks = [];
635
657
  return tasks;
636
658
  }
637
659
 
660
+ function getNoveltyHint() {
661
+ return _latestNoveltyHint;
662
+ }
663
+
664
+ function getCapabilityGaps() {
665
+ return _latestCapabilityGaps;
666
+ }
667
+
638
668
  /**
639
669
  * Queue a commitment deadline update to be sent with the next heartbeat.
640
670
  * @param {string} taskId
@@ -746,7 +776,11 @@ module.exports = {
746
776
  consumeAvailableWork,
747
777
  getOverdueTasks,
748
778
  consumeOverdueTasks,
779
+ getSkillStoreHint,
749
780
  queueCommitmentUpdate,
781
+ getHubUrl,
750
782
  getHubNodeSecret,
751
783
  buildHubHeaders,
784
+ getNoveltyHint,
785
+ getCapabilityGaps,
752
786
  };
@@ -26,8 +26,12 @@ function extractToolCalls(transcript) {
26
26
  const lines = toLines(transcript);
27
27
  const calls = [];
28
28
  for (const line of lines) {
29
+ // OpenClaw format: [TOOL: Shell]
29
30
  const m = line.match(/\[TOOL:\s*([^\]]+)\]/i);
30
- if (m && m[1]) calls.push(m[1].trim());
31
+ if (m && m[1]) { calls.push(m[1].trim()); continue; }
32
+ // Cursor transcript format: [Tool call] Shell
33
+ const m2 = line.match(/\[Tool call\]\s+(\S+)/i);
34
+ if (m2 && m2[1]) calls.push(m2[1].trim());
31
35
  }
32
36
  return calls;
33
37
  }
@@ -0,0 +1,201 @@
1
+ // Execution Trace: structured, desensitized evolution execution summary.
2
+ // Built during solidify and optionally shared with Hub via EvolutionEvent payload.
3
+ //
4
+ // Desensitization rules (applied locally, never on Hub):
5
+ // - File paths: basename + extension only (src/utils/retry.js -> retry.js)
6
+ // - Code content: never sent, only statistical metrics (lines, files)
7
+ // - Error messages: type signature only (TypeError: x is not a function -> TypeError)
8
+ // - Environment variables, secrets, user data: stripped entirely
9
+ // - Configurable via EVOLVER_TRACE_LEVEL: none | minimal | standard (default: minimal)
10
+
11
+ const path = require('path');
12
+
13
+ const TRACE_LEVELS = { none: 0, minimal: 1, standard: 2 };
14
+
15
+ function getTraceLevel() {
16
+ const raw = String(process.env.EVOLVER_TRACE_LEVEL || 'minimal').toLowerCase().trim();
17
+ return TRACE_LEVELS[raw] != null ? raw : 'minimal';
18
+ }
19
+
20
+ function desensitizeFilePath(filePath) {
21
+ if (!filePath || typeof filePath !== 'string') return null;
22
+ const ext = path.extname(filePath);
23
+ const base = path.basename(filePath);
24
+ return base || ext || 'unknown';
25
+ }
26
+
27
+ function extractErrorSignature(errorText) {
28
+ if (!errorText || typeof errorText !== 'string') return null;
29
+ const text = errorText.trim();
30
+
31
+ // Match common error type patterns: TypeError, ReferenceError, SyntaxError, etc.
32
+ const jsError = text.match(/^((?:[A-Z][a-zA-Z]*)?Error)\b/);
33
+ if (jsError) return jsError[1];
34
+
35
+ // Match errno-style: ECONNRESET, ENOENT, EPERM, etc.
36
+ const errno = text.match(/\b(E[A-Z]{2,})\b/);
37
+ if (errno) return errno[1];
38
+
39
+ // Match HTTP status codes
40
+ const http = text.match(/\b((?:4|5)\d{2})\b/);
41
+ if (http) return 'HTTP_' + http[1];
42
+
43
+ // Fallback: first word if it looks like an error type
44
+ const firstWord = text.split(/[\s:]/)[0];
45
+ if (firstWord && firstWord.length <= 40 && /^[A-Z]/.test(firstWord)) return firstWord;
46
+
47
+ return 'UnknownError';
48
+ }
49
+
50
+ function inferToolChain(validationResults, blast) {
51
+ const tools = new Set();
52
+
53
+ if (blast && blast.files > 0) tools.add('file_edit');
54
+
55
+ if (Array.isArray(validationResults)) {
56
+ for (const r of validationResults) {
57
+ const cmd = String(r.cmd || '').trim();
58
+ if (cmd.startsWith('npm test') || cmd.includes('jest') || cmd.includes('mocha')) {
59
+ tools.add('test_run');
60
+ } else if (cmd.includes('lint') || cmd.includes('eslint')) {
61
+ tools.add('lint_check');
62
+ } else if (cmd.includes('validate') || cmd.includes('check')) {
63
+ tools.add('validation_run');
64
+ } else if (cmd.startsWith('node ')) {
65
+ tools.add('node_exec');
66
+ }
67
+ }
68
+ }
69
+
70
+ return Array.from(tools);
71
+ }
72
+
73
+ function classifyBlastLevel(blast) {
74
+ if (!blast) return 'unknown';
75
+ const files = Number(blast.files) || 0;
76
+ const lines = Number(blast.lines) || 0;
77
+ if (files <= 3 && lines <= 50) return 'low';
78
+ if (files <= 10 && lines <= 200) return 'medium';
79
+ return 'high';
80
+ }
81
+
82
+ function buildExecutionTrace({
83
+ gene,
84
+ mutation,
85
+ signals,
86
+ blast,
87
+ constraintCheck,
88
+ validation,
89
+ canary,
90
+ outcomeStatus,
91
+ startedAt,
92
+ }) {
93
+ const level = getTraceLevel();
94
+ if (level === 'none') return null;
95
+
96
+ const trace = {
97
+ gene_id: gene && gene.id ? String(gene.id) : null,
98
+ mutation_category: (mutation && mutation.category) || (gene && gene.category) || null,
99
+ signals_matched: Array.isArray(signals) ? signals.slice(0, 10) : [],
100
+ outcome: outcomeStatus || 'unknown',
101
+ };
102
+
103
+ // Minimal level: core metrics only
104
+ trace.files_changed_count = blast ? Number(blast.files) || 0 : 0;
105
+ trace.lines_added = 0;
106
+ trace.lines_removed = 0;
107
+
108
+ // Compute added/removed from blast if available
109
+ if (blast && blast.lines) {
110
+ // blast.lines is total churn (added + deleted); split heuristically
111
+ const total = Number(blast.lines) || 0;
112
+ if (outcomeStatus === 'success') {
113
+ trace.lines_added = Math.round(total * 0.6);
114
+ trace.lines_removed = total - trace.lines_added;
115
+ } else {
116
+ trace.lines_added = Math.round(total * 0.5);
117
+ trace.lines_removed = total - trace.lines_added;
118
+ }
119
+ }
120
+
121
+ trace.validation_result = validation && validation.ok ? 'pass' : 'fail';
122
+ trace.blast_radius = classifyBlastLevel(blast);
123
+
124
+ // Standard level: richer context
125
+ if (level === 'standard') {
126
+ // Desensitized file list (basenames only)
127
+ if (blast && Array.isArray(blast.changed_files)) {
128
+ trace.file_types = {};
129
+ for (const f of blast.changed_files) {
130
+ const ext = path.extname(f) || '.unknown';
131
+ trace.file_types[ext] = (trace.file_types[ext] || 0) + 1;
132
+ }
133
+ }
134
+
135
+ // Validation commands (already safe -- node/npm/npx only)
136
+ if (validation && Array.isArray(validation.results)) {
137
+ trace.validation_commands = validation.results.map(r => String(r.cmd || '').slice(0, 100));
138
+ }
139
+
140
+ // Error signatures (desensitized)
141
+ trace.error_signatures = [];
142
+ if (constraintCheck && Array.isArray(constraintCheck.violations)) {
143
+ for (const v of constraintCheck.violations) {
144
+ // Constraint violations have known prefixes; classify directly
145
+ const vStr = String(v);
146
+ if (vStr.startsWith('max_files')) trace.error_signatures.push('max_files_exceeded');
147
+ else if (vStr.startsWith('forbidden_path')) trace.error_signatures.push('forbidden_path');
148
+ else if (vStr.startsWith('HARD CAP')) trace.error_signatures.push('hard_cap_breach');
149
+ else if (vStr.startsWith('CRITICAL')) trace.error_signatures.push('critical_overrun');
150
+ else if (vStr.startsWith('critical_path')) trace.error_signatures.push('critical_path_modified');
151
+ else if (vStr.startsWith('canary_failed')) trace.error_signatures.push('canary_failed');
152
+ else if (vStr.startsWith('ethics:')) trace.error_signatures.push('ethics_violation');
153
+ else {
154
+ const sig = extractErrorSignature(v);
155
+ if (sig) trace.error_signatures.push(sig);
156
+ }
157
+ }
158
+ }
159
+ if (validation && Array.isArray(validation.results)) {
160
+ for (const r of validation.results) {
161
+ if (!r.ok && r.err) {
162
+ const sig = extractErrorSignature(r.err);
163
+ if (sig && !trace.error_signatures.includes(sig)) {
164
+ trace.error_signatures.push(sig);
165
+ }
166
+ }
167
+ }
168
+ }
169
+ trace.error_signatures = trace.error_signatures.slice(0, 10);
170
+
171
+ // Tool chain inference
172
+ trace.tool_chain = inferToolChain(
173
+ validation && validation.results ? validation.results : [],
174
+ blast
175
+ );
176
+
177
+ // Duration
178
+ if (validation && validation.startedAt && validation.finishedAt) {
179
+ trace.validation_duration_ms = validation.finishedAt - validation.startedAt;
180
+ }
181
+
182
+ // Canary result
183
+ if (canary && !canary.skipped) {
184
+ trace.canary_ok = !!canary.ok;
185
+ }
186
+ }
187
+
188
+ // Timestamp
189
+ trace.created_at = new Date().toISOString();
190
+
191
+ return trace;
192
+ }
193
+
194
+ module.exports = {
195
+ buildExecutionTrace,
196
+ desensitizeFilePath,
197
+ extractErrorSignature,
198
+ inferToolChain,
199
+ classifyBlastLevel,
200
+ getTraceLevel,
201
+ };
@@ -6,6 +6,10 @@
6
6
  // Two-phase search-then-fetch to minimize credit cost:
7
7
  // Phase 1: POST /a2a/fetch with signals + search_only=true (free, metadata only)
8
8
  // Phase 2: POST /a2a/fetch with asset_ids=[selected] (pays for 1 asset only)
9
+ //
10
+ // Caching layers:
11
+ // 1. Search cache: signal fingerprint -> Phase 1 results (avoids repeat searches)
12
+ // 2. Payload cache: asset_id -> full payload (avoids repeat Phase 2 fetches)
9
13
 
10
14
  const { getNodeId, buildFetch, getHubNodeSecret } = require('./a2aProtocol');
11
15
  const { logAssetCall } = require('./assetCallLog');
@@ -13,7 +17,57 @@ const { logAssetCall } = require('./assetCallLog');
13
17
  const DEFAULT_MIN_REUSE_SCORE = 0.72;
14
18
  const DEFAULT_REUSE_MODE = 'reference'; // 'direct' | 'reference'
15
19
  const MAX_STREAK_CAP = 5;
16
- const TIMEOUT_REASON = 'hub_search_timeout';
20
+
21
+ const SEARCH_CACHE_TTL_MS = 5 * 60 * 1000;
22
+ const SEARCH_CACHE_MAX = 200;
23
+ const PAYLOAD_CACHE_MAX = 100;
24
+ const MIN_PHASE2_MS = 500;
25
+
26
+ // --- In-memory caches (per-process lifetime, bounded) ---
27
+
28
+ const _searchCache = new Map(); // cacheKey -> { ts, value: results[] }
29
+ const _payloadCache = new Map(); // asset_id -> full payload object
30
+
31
+ function _cacheKey(signals) {
32
+ return signals.slice().sort().join('|');
33
+ }
34
+
35
+ function _getSearchCache(key) {
36
+ const entry = _searchCache.get(key);
37
+ if (!entry) return null;
38
+ if (Date.now() - entry.ts > SEARCH_CACHE_TTL_MS) {
39
+ _searchCache.delete(key);
40
+ return null;
41
+ }
42
+ return entry.value;
43
+ }
44
+
45
+ function _setSearchCache(key, value) {
46
+ if (_searchCache.size >= SEARCH_CACHE_MAX) {
47
+ const oldest = _searchCache.keys().next().value;
48
+ _searchCache.delete(oldest);
49
+ }
50
+ _searchCache.set(key, { ts: Date.now(), value });
51
+ }
52
+
53
+ function _getPayloadCache(assetId) {
54
+ return _payloadCache.get(assetId) || null;
55
+ }
56
+
57
+ function _setPayloadCache(assetId, payload) {
58
+ if (_payloadCache.size >= PAYLOAD_CACHE_MAX) {
59
+ const oldest = _payloadCache.keys().next().value;
60
+ _payloadCache.delete(oldest);
61
+ }
62
+ _payloadCache.set(assetId, payload);
63
+ }
64
+
65
+ function clearCaches() {
66
+ _searchCache.clear();
67
+ _payloadCache.clear();
68
+ }
69
+
70
+ // --- Config helpers ---
17
71
 
18
72
  function getHubUrl() {
19
73
  return (process.env.A2A_HUB_URL || '').replace(/\/+$/, '');
@@ -29,6 +83,18 @@ function getMinReuseScore() {
29
83
  return Number.isFinite(n) && n > 0 ? n : DEFAULT_MIN_REUSE_SCORE;
30
84
  }
31
85
 
86
+ function _buildHeaders() {
87
+ const headers = { 'Content-Type': 'application/json', 'Accept': 'application/json' };
88
+ const secret = getHubNodeSecret();
89
+ if (secret) {
90
+ headers['Authorization'] = 'Bearer ' + secret;
91
+ } else {
92
+ const token = process.env.A2A_HUB_TOKEN;
93
+ if (token) headers['Authorization'] = `Bearer ${token}`;
94
+ }
95
+ return headers;
96
+ }
97
+
32
98
  /**
33
99
  * Score a hub asset for local reuse quality.
34
100
  * rank = confidence * min(max(success_streak, 1), MAX_STREAK_CAP) * (reputation / 100)
@@ -77,7 +143,14 @@ function pickBestMatch(results, threshold) {
77
143
  * Phase 1: search_only=true -> get candidate metadata (free, no credit cost)
78
144
  * Phase 2: asset_ids=[best_match] -> fetch full payload for the selected asset only
79
145
  *
80
- * Falls back to single-call fetch (old behavior) if search_only is not supported.
146
+ * Caching:
147
+ * - Phase 1 results are cached by signal fingerprint for 5 minutes.
148
+ * - Phase 2 payloads are cached by asset_id indefinitely (bounded LRU).
149
+ * - Both caches reduce Hub load and eliminate redundant network round-trips.
150
+ *
151
+ * Timeout: a single deadline spans both phases; Phase 2 is skipped if insufficient
152
+ * time remains (< 500ms).
153
+ *
81
154
  * Returns { hit: true, match, score, mode } or { hit: false }.
82
155
  */
83
156
  async function hubSearch(signals, opts) {
@@ -90,56 +163,53 @@ async function hubSearch(signals, opts) {
90
163
  if (signalList.length === 0) return { hit: false, reason: 'no_signals' };
91
164
 
92
165
  const threshold = (opts && Number.isFinite(opts.threshold)) ? opts.threshold : getMinReuseScore();
93
- const timeout = (opts && Number.isFinite(opts.timeoutMs)) ? opts.timeoutMs : 8000;
166
+ const timeoutMs = (opts && Number.isFinite(opts.timeoutMs)) ? opts.timeoutMs : 8000;
167
+ const deadline = Date.now() + timeoutMs;
168
+ const runId = (opts && opts.run_id) || null;
94
169
 
95
170
  try {
96
- // Phase 1: search_only to get candidate metadata (free)
97
- const searchMsg = buildFetch({ signals: signalList, searchOnly: true });
98
171
  const endpoint = hubUrl + '/a2a/fetch';
172
+ const headers = _buildHeaders();
173
+ const cacheKey = _cacheKey(signalList);
99
174
 
100
- const controller = new AbortController();
101
- const timer = setTimeout(() => controller.abort(TIMEOUT_REASON), timeout);
175
+ // --- Phase 1: search_only (free) ---
102
176
 
103
- const headers = { 'Content-Type': 'application/json', 'Accept': 'application/json' };
104
- const secret = getHubNodeSecret();
105
- if (secret) {
106
- headers['Authorization'] = 'Bearer ' + secret;
107
- } else {
108
- const token = process.env.A2A_HUB_TOKEN;
109
- if (token) headers['Authorization'] = `Bearer ${token}`;
110
- }
177
+ let results = _getSearchCache(cacheKey);
178
+ let cacheHit = !!results;
111
179
 
112
- const res = await fetch(endpoint, {
113
- method: 'POST',
114
- headers,
115
- body: JSON.stringify(searchMsg),
116
- signal: controller.signal,
117
- });
118
- clearTimeout(timer);
180
+ if (!results) {
181
+ const searchMsg = buildFetch({ signals: signalList, searchOnly: true });
182
+ const controller = new AbortController();
183
+ const timer = setTimeout(() => controller.abort(), deadline - Date.now());
119
184
 
120
- if (!res.ok) {
121
- logAssetCall({
122
- run_id: (opts && opts.run_id) || null,
123
- action: 'hub_search_miss',
124
- signals: signalList,
125
- reason: `hub_http_${res.status}`,
126
- via: 'search_then_fetch',
185
+ const res = await fetch(endpoint, {
186
+ method: 'POST',
187
+ headers,
188
+ body: JSON.stringify(searchMsg),
189
+ signal: controller.signal,
127
190
  });
128
- return { hit: false, reason: `hub_http_${res.status}` };
129
- }
191
+ clearTimeout(timer);
192
+
193
+ if (!res.ok) {
194
+ logAssetCall({
195
+ run_id: runId, action: 'hub_search_miss', signals: signalList,
196
+ reason: `hub_http_${res.status}`, via: 'search_then_fetch',
197
+ });
198
+ return { hit: false, reason: `hub_http_${res.status}` };
199
+ }
130
200
 
131
- const data = await res.json();
132
- const results = (data && data.payload && Array.isArray(data.payload.results))
133
- ? data.payload.results
134
- : [];
201
+ const data = await res.json();
202
+ results = (data && data.payload && Array.isArray(data.payload.results))
203
+ ? data.payload.results
204
+ : [];
205
+
206
+ _setSearchCache(cacheKey, results);
207
+ }
135
208
 
136
209
  if (results.length === 0) {
137
210
  logAssetCall({
138
- run_id: (opts && opts.run_id) || null,
139
- action: 'hub_search_miss',
140
- signals: signalList,
141
- reason: 'no_results',
142
- via: 'search_then_fetch',
211
+ run_id: runId, action: 'hub_search_miss', signals: signalList,
212
+ reason: 'no_results', via: 'search_then_fetch',
143
213
  });
144
214
  return { hit: false, reason: 'no_results' };
145
215
  }
@@ -147,9 +217,7 @@ async function hubSearch(signals, opts) {
147
217
  const pick = pickBestMatch(results, threshold);
148
218
  if (!pick) {
149
219
  logAssetCall({
150
- run_id: (opts && opts.run_id) || null,
151
- action: 'hub_search_miss',
152
- signals: signalList,
220
+ run_id: runId, action: 'hub_search_miss', signals: signalList,
153
221
  reason: 'below_threshold',
154
222
  extra: { candidates: results.length, threshold },
155
223
  via: 'search_then_fetch',
@@ -157,40 +225,52 @@ async function hubSearch(signals, opts) {
157
225
  return { hit: false, reason: 'below_threshold', candidates: results.length };
158
226
  }
159
227
 
160
- // Phase 2: fetch full payload for the selected asset only (pays for 1 asset)
228
+ // --- Phase 2: fetch full payload (paid, but free if already purchased) ---
229
+
161
230
  const selectedAssetId = pick.match.asset_id;
162
231
  if (selectedAssetId) {
163
- try {
164
- const fetchMsg = buildFetch({ assetIds: [selectedAssetId] });
165
- const controller2 = new AbortController();
166
- const timer2 = setTimeout(() => controller2.abort(TIMEOUT_REASON), timeout);
167
-
168
- const res2 = await fetch(endpoint, {
169
- method: 'POST',
170
- headers,
171
- body: JSON.stringify(fetchMsg),
172
- signal: controller2.signal,
173
- });
174
- clearTimeout(timer2);
175
-
176
- if (res2.ok) {
177
- const data2 = await res2.json();
178
- const fullResults = (data2 && data2.payload && Array.isArray(data2.payload.results))
179
- ? data2.payload.results
180
- : [];
181
- if (fullResults.length > 0) {
182
- pick.match = { ...pick.match, ...fullResults[0] };
232
+ const cachedPayload = _getPayloadCache(selectedAssetId);
233
+ if (cachedPayload) {
234
+ pick.match = { ...pick.match, ...cachedPayload };
235
+ } else {
236
+ const remaining = deadline - Date.now();
237
+ if (remaining > MIN_PHASE2_MS) {
238
+ try {
239
+ const fetchMsg = buildFetch({ assetIds: [selectedAssetId] });
240
+ const controller2 = new AbortController();
241
+ const timer2 = setTimeout(() => controller2.abort(), remaining);
242
+
243
+ const res2 = await fetch(endpoint, {
244
+ method: 'POST',
245
+ headers,
246
+ body: JSON.stringify(fetchMsg),
247
+ signal: controller2.signal,
248
+ });
249
+ clearTimeout(timer2);
250
+
251
+ if (res2.ok) {
252
+ const data2 = await res2.json();
253
+ const fullResults = (data2 && data2.payload && Array.isArray(data2.payload.results))
254
+ ? data2.payload.results
255
+ : [];
256
+ if (fullResults.length > 0) {
257
+ _setPayloadCache(selectedAssetId, fullResults[0]);
258
+ pick.match = { ...pick.match, ...fullResults[0] };
259
+ }
260
+ }
261
+ } catch (fetchErr) {
262
+ console.log(`[HubSearch] Phase 2 fetch failed (non-fatal): ${fetchErr.message}`);
183
263
  }
264
+ } else {
265
+ console.log(`[HubSearch] Phase 2 skipped: ${remaining}ms remaining < ${MIN_PHASE2_MS}ms threshold`);
184
266
  }
185
- } catch (fetchErr) {
186
- console.log(`[HubSearch] Phase 2 fetch failed (non-fatal): ${fetchErr.message}`);
187
267
  }
188
268
  }
189
269
 
190
- console.log(`[HubSearch] Hit via search+fetch: ${pick.match.asset_id || 'unknown'} (score=${pick.score}, mode=${pick.mode})`);
270
+ console.log(`[HubSearch] Hit via search+fetch: ${pick.match.asset_id || 'unknown'} (score=${pick.score}, mode=${pick.mode}${cacheHit ? ', search_cached' : ''})`);
191
271
 
192
272
  logAssetCall({
193
- run_id: (opts && opts.run_id) || null,
273
+ run_id: runId,
194
274
  action: 'hub_search_hit',
195
275
  asset_id: pick.match.asset_id || null,
196
276
  asset_type: pick.match.asset_type || pick.match.type || null,
@@ -199,7 +279,7 @@ async function hubSearch(signals, opts) {
199
279
  score: pick.score,
200
280
  mode: pick.mode,
201
281
  signals: signalList,
202
- via: 'search_then_fetch',
282
+ via: cacheHit ? 'search_cached' : 'search_then_fetch',
203
283
  });
204
284
 
205
285
  return {
@@ -212,11 +292,10 @@ async function hubSearch(signals, opts) {
212
292
  chain_id: pick.match.chain_id || null,
213
293
  };
214
294
  } catch (err) {
215
- const isTimeout = err.name === 'AbortError' || (err.cause && err.cause === TIMEOUT_REASON);
216
- const reason = isTimeout ? 'timeout' : 'fetch_error';
295
+ const reason = err.name === 'AbortError' ? 'timeout' : 'fetch_error';
217
296
  console.log(`[HubSearch] Failed (non-fatal, ${reason}): ${err.message}`);
218
297
  logAssetCall({
219
- run_id: (opts && opts.run_id) || null,
298
+ run_id: runId,
220
299
  action: 'hub_search_miss',
221
300
  signals: signalList,
222
301
  reason,
@@ -234,4 +313,5 @@ module.exports = {
234
313
  getReuseMode,
235
314
  getMinReuseScore,
236
315
  getHubUrl,
316
+ clearCaches,
237
317
  };
@@ -79,6 +79,10 @@ function selectGene(genes, signals, opts) {
79
79
  const driftEnabled = !!(opts && opts.driftEnabled);
80
80
  const preferredGeneId = opts && typeof opts.preferredGeneId === 'string' ? opts.preferredGeneId : null;
81
81
 
82
+ // Diversity-directed drift: capability_gaps from Hub heartbeat
83
+ var capabilityGaps = opts && Array.isArray(opts.capabilityGaps) ? opts.capabilityGaps : [];
84
+ var noveltyScore = opts && Number.isFinite(Number(opts.noveltyScore)) ? Number(opts.noveltyScore) : null;
85
+
82
86
  // Compute continuous drift intensity based on effective population size
83
87
  var driftIntensity = computeDriftIntensity({
84
88
  driftEnabled: driftEnabled,
@@ -99,7 +103,7 @@ function selectGene(genes, signals, opts) {
99
103
  .filter(x => x.score > 0)
100
104
  .sort((a, b) => b.score - a.score);
101
105
 
102
- if (scored.length === 0) return { selected: null, alternatives: [], driftIntensity: driftIntensity };
106
+ if (scored.length === 0) return { selected: null, alternatives: [], driftIntensity: driftIntensity, driftMode: 'none' };
103
107
 
104
108
  // Memory graph preference: only override when the preferred gene is already a match candidate.
105
109
  if (preferredGeneId) {
@@ -111,27 +115,68 @@ function selectGene(genes, signals, opts) {
111
115
  selected: preferred.gene,
112
116
  alternatives: filteredRest.slice(0, 4).map(x => x.gene),
113
117
  driftIntensity: driftIntensity,
118
+ driftMode: 'memory_preferred',
114
119
  };
115
120
  }
116
121
  }
117
122
 
118
123
  // Low-efficiency suppression: do not repeat low-confidence paths unless drift is active.
119
124
  const filtered = useDrift ? scored : scored.filter(x => x.gene && !bannedGeneIds.has(x.gene.id));
120
- if (filtered.length === 0) return { selected: null, alternatives: scored.slice(0, 4).map(x => x.gene), driftIntensity: driftIntensity };
125
+ if (filtered.length === 0) return { selected: null, alternatives: scored.slice(0, 4).map(x => x.gene), driftIntensity: driftIntensity, driftMode: 'none' };
121
126
 
122
- // Stochastic selection under drift: with probability proportional to driftIntensity,
123
- // pick a random gene from the top candidates instead of always picking the best.
127
+ // Diversity-directed drift: when capability gaps are available, prefer genes that
128
+ // cover gap areas instead of pure random selection. This replaces the blind
129
+ // random drift with an informed exploration toward under-covered capabilities.
124
130
  var selectedIdx = 0;
131
+ var driftMode = 'selection';
125
132
  if (driftIntensity > 0 && filtered.length > 1 && Math.random() < driftIntensity) {
126
- // Weighted random selection from top candidates (favor higher-scoring but allow lower)
127
- var topN = Math.min(filtered.length, Math.max(2, Math.ceil(filtered.length * driftIntensity)));
128
- selectedIdx = Math.floor(Math.random() * topN);
133
+ if (capabilityGaps.length > 0) {
134
+ // Directed drift: score each candidate by how well its signals_match
135
+ // covers the capability gap dimensions
136
+ var gapScores = filtered.map(function(entry, idx) {
137
+ var g = entry.gene;
138
+ var patterns = Array.isArray(g.signals_match) ? g.signals_match : [];
139
+ var gapHits = 0;
140
+ for (var gi = 0; gi < capabilityGaps.length && gi < 5; gi++) {
141
+ var gapSignal = capabilityGaps[gi];
142
+ if (typeof gapSignal === 'string' && patterns.some(function(p) { return matchPatternToSignals(p, [gapSignal]); })) {
143
+ gapHits++;
144
+ }
145
+ }
146
+ return { idx: idx, gapHits: gapHits, baseScore: entry.score };
147
+ });
148
+
149
+ var hasGapHits = gapScores.some(function(gs) { return gs.gapHits > 0; });
150
+ if (hasGapHits) {
151
+ // Sort by gap coverage first, then by base score
152
+ gapScores.sort(function(a, b) {
153
+ return b.gapHits - a.gapHits || b.baseScore - a.baseScore;
154
+ });
155
+ selectedIdx = gapScores[0].idx;
156
+ driftMode = 'diversity_directed';
157
+ } else {
158
+ // No gap match: fall back to novelty-weighted random selection
159
+ var topN = Math.min(filtered.length, Math.max(2, Math.ceil(filtered.length * driftIntensity)));
160
+ // If novelty score is low (agent is too similar to others), increase exploration range
161
+ if (noveltyScore != null && noveltyScore < 0.3 && topN < filtered.length) {
162
+ topN = Math.min(filtered.length, topN + 1);
163
+ }
164
+ selectedIdx = Math.floor(Math.random() * topN);
165
+ driftMode = 'random_weighted';
166
+ }
167
+ } else {
168
+ // No capability gap data: original random drift behavior
169
+ var topN = Math.min(filtered.length, Math.max(2, Math.ceil(filtered.length * driftIntensity)));
170
+ selectedIdx = Math.floor(Math.random() * topN);
171
+ driftMode = 'random';
172
+ }
129
173
  }
130
174
 
131
175
  return {
132
176
  selected: filtered[selectedIdx].gene,
133
177
  alternatives: filtered.filter(function(_, i) { return i !== selectedIdx; }).slice(0, 4).map(x => x.gene),
134
178
  driftIntensity: driftIntensity,
179
+ driftMode: driftMode,
135
180
  };
136
181
  }
137
182
 
@@ -182,7 +227,7 @@ function banGenesFromFailedCapsules(failedCapsules, signals, existingBans) {
182
227
  return bans;
183
228
  }
184
229
 
185
- function selectGeneAndCapsule({ genes, capsules, signals, memoryAdvice, driftEnabled, failedCapsules }) {
230
+ function selectGeneAndCapsule({ genes, capsules, signals, memoryAdvice, driftEnabled, failedCapsules, capabilityGaps, noveltyScore }) {
186
231
  const bannedGeneIds =
187
232
  memoryAdvice && memoryAdvice.bannedGeneIds instanceof Set ? memoryAdvice.bannedGeneIds : new Set();
188
233
  const preferredGeneId = memoryAdvice && memoryAdvice.preferredGeneId ? memoryAdvice.preferredGeneId : null;
@@ -197,6 +242,8 @@ function selectGeneAndCapsule({ genes, capsules, signals, memoryAdvice, driftEna
197
242
  bannedGeneIds: effectiveBans,
198
243
  preferredGeneId,
199
244
  driftEnabled: !!driftEnabled,
245
+ capabilityGaps: Array.isArray(capabilityGaps) ? capabilityGaps : [],
246
+ noveltyScore: Number.isFinite(Number(noveltyScore)) ? Number(noveltyScore) : null,
200
247
  });
201
248
  const capsule = selectCapsule(capsules, signals);
202
249
  const selector = buildSelectorDecision({
@@ -20,6 +20,7 @@ const { buildValidationReport } = require('./validationReport');
20
20
  const { logAssetCall } = require('./assetCallLog');
21
21
  const { recordNarrative } = require('./narrativeMemory');
22
22
  const { isLlmReviewEnabled, runLlmReview } = require('./llmReview');
23
+ const { buildExecutionTrace } = require('./executionTrace');
23
24
 
24
25
  function nowIso() {
25
26
  return new Date().toISOString();
@@ -382,12 +383,12 @@ function readStateForSolidify() {
382
383
  }
383
384
 
384
385
  function writeStateForSolidify(state) {
385
- const memoryDir = getMemoryDir();
386
- const statePath = path.join(getEvolutionDir(), 'evolution_solidify_state.json');
386
+ const evolutionDir = getEvolutionDir();
387
+ const statePath = path.join(evolutionDir, 'evolution_solidify_state.json');
387
388
  try {
388
- if (!fs.existsSync(memoryDir)) fs.mkdirSync(memoryDir, { recursive: true });
389
+ if (!fs.existsSync(evolutionDir)) fs.mkdirSync(evolutionDir, { recursive: true });
389
390
  } catch (e) {
390
- console.warn('[evolver] writeStateForSolidify mkdir failed:', memoryDir, e && e.message || e);
391
+ console.warn('[evolver] writeStateForSolidify mkdir failed:', evolutionDir, e && e.message || e);
391
392
  }
392
393
  const tmp = `${statePath}.tmp`;
393
394
  fs.writeFileSync(tmp, JSON.stringify(state, null, 2) + '\n', 'utf8');
@@ -1225,6 +1226,22 @@ function solidify({ intent, summary, dryRun = false, rollbackOnFailure = true }
1225
1226
  memory_graph: memoryGraphPath(),
1226
1227
  },
1227
1228
  };
1229
+ // Build desensitized execution trace for cross-agent experience sharing
1230
+ const executionTrace = buildExecutionTrace({
1231
+ gene: geneUsed,
1232
+ mutation,
1233
+ signals,
1234
+ blast,
1235
+ constraintCheck,
1236
+ validation,
1237
+ canary,
1238
+ outcomeStatus,
1239
+ startedAt: validation.startedAt,
1240
+ });
1241
+ if (executionTrace) {
1242
+ event.execution_trace = executionTrace;
1243
+ }
1244
+
1228
1245
  event.asset_id = computeAssetId(event);
1229
1246
 
1230
1247
  let capsule = null;
@@ -1350,7 +1367,10 @@ function solidify({ intent, summary, dryRun = false, rollbackOnFailure = true }
1350
1367
  state.last_solidify = {
1351
1368
  run_id: runId, at: ts, event_id: event.id, capsule_id: capsuleId, outcome: event.outcome,
1352
1369
  };
1353
- if (!dryRun) writeStateForSolidify(state);
1370
+ if (!dryRun) {
1371
+ state.solidify_count = (state.solidify_count || 0) + 1;
1372
+ writeStateForSolidify(state);
1373
+ }
1354
1374
 
1355
1375
  if (!dryRun) {
1356
1376
  try {