akemon 0.2.23 → 0.2.25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/engine-peripheral.js +118 -42
- package/dist/engine-queue.js +143 -0
- package/dist/engine-queue.test.js +99 -0
- package/dist/engine-routing.js +52 -0
- package/dist/engine-routing.test.js +122 -0
- package/dist/mcp-server.js +18 -23
- package/dist/memory-module.js +2 -0
- package/dist/metrics.js +30 -0
- package/dist/orphan-scan.js +79 -0
- package/dist/orphan-scan.test.js +81 -0
- package/dist/reflection-module.integration.test.js +180 -0
- package/dist/reflection-module.js +27 -29
- package/dist/reflection-module.test.js +66 -0
- package/dist/relay-client.js +17 -1
- package/dist/role-module.js +2 -2
- package/dist/role-module.test.js +208 -0
- package/dist/script-module.js +1 -0
- package/dist/server.js +68 -38
- package/dist/task-helpers.js +26 -0
- package/dist/task-helpers.test.js +88 -0
- package/dist/task-module.js +38 -25
- package/package.json +3 -2
package/dist/mcp-server.js
CHANGED
|
@@ -12,6 +12,22 @@ import { callAgent } from "./relay-client.js";
|
|
|
12
12
|
import { loadConversation, appendRound, buildLLMContext, resolveConvId, loadProductContext, appendProductLog } from "./context.js";
|
|
13
13
|
import { biosPath, loadBioState, saveBioState, localNow, bioStatePromptModifier, feedHunger, appendBioEvent, SHOP_ITEMS, loadAgentConfig, } from "./self.js";
|
|
14
14
|
// ---------------------------------------------------------------------------
|
|
15
|
+
// Shared call_agent handler — used by both createMcpServer and createMcpProxyServer
|
|
16
|
+
// ---------------------------------------------------------------------------
|
|
17
|
+
async function handleCallAgent(agentName, target, task) {
|
|
18
|
+
console.log(`[call_agent] ${agentName} → ${target}: ${task.slice(0, 80)}`);
|
|
19
|
+
try {
|
|
20
|
+
const result = await callAgent(target, task);
|
|
21
|
+
return { content: [{ type: "text", text: result }] };
|
|
22
|
+
}
|
|
23
|
+
catch (err) {
|
|
24
|
+
return {
|
|
25
|
+
content: [{ type: "text", text: `[error] Failed to call agent "${target}": ${err.message}` }],
|
|
26
|
+
isError: true,
|
|
27
|
+
};
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
// ---------------------------------------------------------------------------
|
|
15
31
|
// createMcpServer
|
|
16
32
|
// ---------------------------------------------------------------------------
|
|
17
33
|
export function createMcpServer(opts, deps) {
|
|
@@ -150,21 +166,7 @@ ${productPrefix}${contextPrefix}Current task: ${task}`;
|
|
|
150
166
|
server.tool("call_agent", "Synchronous call to another agent. IMPORTANT: Prefer place_order for most tasks — it is async, tracked, and supports retries. Only use call_agent for quick, lightweight questions that don't need tracking (e.g. 'what is your specialty?'). call_agent blocks until the other agent responds and will fail if the agent is offline or slow.", {
|
|
151
167
|
agent: z.string().describe("Name of the target agent to call"),
|
|
152
168
|
task: z.string().describe("Task to send to the target agent"),
|
|
153
|
-
},
|
|
154
|
-
console.log(`[call_agent] ${agentName} → ${target}: ${task.slice(0, 80)}`);
|
|
155
|
-
try {
|
|
156
|
-
const result = await callAgent(target, task);
|
|
157
|
-
return {
|
|
158
|
-
content: [{ type: "text", text: result }],
|
|
159
|
-
};
|
|
160
|
-
}
|
|
161
|
-
catch (err) {
|
|
162
|
-
return {
|
|
163
|
-
content: [{ type: "text", text: `[error] Failed to call agent "${target}": ${err.message}` }],
|
|
164
|
-
isError: true,
|
|
165
|
-
};
|
|
166
|
-
}
|
|
167
|
-
});
|
|
169
|
+
}, ({ agent: target, task }) => handleCallAgent(agentName, target, task));
|
|
168
170
|
// Discovery tool
|
|
169
171
|
server.tool("list_agents", "List available agents on the relay. Use this to discover agents you can collaborate with via place_order.", {
|
|
170
172
|
tag: z.string().optional().describe("Filter by tag (e.g. 'translation', 'code')"),
|
|
@@ -377,14 +379,7 @@ export function createMcpProxyServer(proxy, agentName) {
|
|
|
377
379
|
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
378
380
|
const { name, arguments: toolArgs } = request.params;
|
|
379
381
|
if (name === "call_agent") {
|
|
380
|
-
|
|
381
|
-
try {
|
|
382
|
-
const result = await callAgent(toolArgs?.agent, toolArgs?.task);
|
|
383
|
-
return { content: [{ type: "text", text: result }] };
|
|
384
|
-
}
|
|
385
|
-
catch (err) {
|
|
386
|
-
return { content: [{ type: "text", text: `[error] ${err.message}` }], isError: true };
|
|
387
|
-
}
|
|
382
|
+
return handleCallAgent(agentName, toolArgs?.agent, toolArgs?.task);
|
|
388
383
|
}
|
|
389
384
|
// Forward to child MCP server
|
|
390
385
|
console.log(`[mcp-proxy] → ${name}(${JSON.stringify(toolArgs).slice(0, 100)})`);
|
package/dist/memory-module.js
CHANGED
|
@@ -164,6 +164,7 @@ Output ONLY a JSON object:`;
|
|
|
164
164
|
context,
|
|
165
165
|
question,
|
|
166
166
|
priority: "normal",
|
|
167
|
+
origin: "self_cycle",
|
|
167
168
|
});
|
|
168
169
|
if (!result.success) {
|
|
169
170
|
console.log(`[memory] Digestion compute failed: ${result.error}`);
|
|
@@ -235,6 +236,7 @@ ${unsummarized.map(i => `- [${i.ts}] who: ${i.who}, doing: ${i.doing}`).join("\n
|
|
|
235
236
|
question: `Write a personality summary (2-4 paragraphs) that captures who you are.
|
|
236
237
|
Reply ONLY with the summary text, no JSON, no markdown headers.`,
|
|
237
238
|
priority: "low",
|
|
239
|
+
origin: "self_cycle",
|
|
238
240
|
});
|
|
239
241
|
if (compressResult.success && compressResult.response?.trim()) {
|
|
240
242
|
const lastEntry = unsummarized[unsummarized.length - 1];
|
package/dist/metrics.js
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* metrics.ts — module-scope metrics state container.
|
|
3
|
+
*
|
|
4
|
+
* Each module writes to metricsState at key events.
|
|
5
|
+
* relay-client reads getMetrics() every 30s and sends it to relay.
|
|
6
|
+
*/
|
|
7
|
+
const startTime = Date.now();
|
|
8
|
+
const metricsState = {
|
|
9
|
+
agentName: "",
|
|
10
|
+
uptime_ms: 0,
|
|
11
|
+
engine_children_active: 0,
|
|
12
|
+
engine_queue_depth: 0,
|
|
13
|
+
engine_last_exec_ms: [],
|
|
14
|
+
task_executing: 0,
|
|
15
|
+
task_pending_retries: 0,
|
|
16
|
+
bio: { hunger: 0, energy: 0, mood: "" },
|
|
17
|
+
};
|
|
18
|
+
export function getMetrics() {
|
|
19
|
+
return { ...metricsState, uptime_ms: Date.now() - startTime };
|
|
20
|
+
}
|
|
21
|
+
export function updateMetrics(patch) {
|
|
22
|
+
Object.assign(metricsState, patch);
|
|
23
|
+
}
|
|
24
|
+
/** Append an exec duration (ms) to the ring buffer, keeping last 10. */
|
|
25
|
+
export function pushExecMs(ms) {
|
|
26
|
+
metricsState.engine_last_exec_ms.push(ms);
|
|
27
|
+
if (metricsState.engine_last_exec_ms.length > 10) {
|
|
28
|
+
metricsState.engine_last_exec_ms.shift();
|
|
29
|
+
}
|
|
30
|
+
}
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Orphan process scanner — run at daemon startup to reclaim stale CLI children
|
|
3
|
+
* left over from a previous crash.
|
|
4
|
+
*
|
|
5
|
+
* Only kills processes that satisfy ALL of:
|
|
6
|
+
* 1. ppid === 1 (re-parented to init/launchd — genuinely orphaned)
|
|
7
|
+
* 2. command matches a known akemon CLI agent pattern
|
|
8
|
+
*
|
|
9
|
+
* Never kills ppid != 1 processes, preventing accidental damage to unrelated
|
|
10
|
+
* processes that happen to have a matching command name.
|
|
11
|
+
*/
|
|
12
|
+
import { execFile } from "child_process";
|
|
13
|
+
// ---------------------------------------------------------------------------
|
|
14
|
+
// Patterns — agent-mode invocations only (not install/update/etc.)
|
|
15
|
+
// ---------------------------------------------------------------------------
|
|
16
|
+
const AGENT_PATTERNS = [
|
|
17
|
+
/\bopencode\s+run\b/, // opencode in run mode
|
|
18
|
+
/\bclaude\s+-p\b/, // claude with -p (print) flag
|
|
19
|
+
/\bcodex\s+exec\b/, // codex exec mode
|
|
20
|
+
/\bgemini\s+-p\b/, // gemini with -p flag
|
|
21
|
+
];
|
|
22
|
+
// ---------------------------------------------------------------------------
|
|
23
|
+
// Pure function — parse `ps -eo pid,ppid,command` output
|
|
24
|
+
// ---------------------------------------------------------------------------
|
|
25
|
+
export function parseProcessList(psOutput) {
|
|
26
|
+
const result = [];
|
|
27
|
+
for (const line of psOutput.split("\n")) {
|
|
28
|
+
const trimmed = line.trim();
|
|
29
|
+
if (!trimmed)
|
|
30
|
+
continue;
|
|
31
|
+
// Header or non-numeric first token → skip
|
|
32
|
+
const match = trimmed.match(/^(\d+)\s+(\d+)\s+(.+)$/);
|
|
33
|
+
if (!match)
|
|
34
|
+
continue;
|
|
35
|
+
const pid = parseInt(match[1], 10);
|
|
36
|
+
const ppid = parseInt(match[2], 10);
|
|
37
|
+
const command = match[3].trim();
|
|
38
|
+
// Must be orphaned (ppid=1) AND match a known agent pattern
|
|
39
|
+
if (ppid !== 1)
|
|
40
|
+
continue;
|
|
41
|
+
if (!AGENT_PATTERNS.some(p => p.test(command)))
|
|
42
|
+
continue;
|
|
43
|
+
result.push({ pid, ppid, command });
|
|
44
|
+
}
|
|
45
|
+
return result;
|
|
46
|
+
}
|
|
47
|
+
// ---------------------------------------------------------------------------
|
|
48
|
+
// Runtime — scan ps output and kill matched orphans
|
|
49
|
+
// ---------------------------------------------------------------------------
|
|
50
|
+
export async function scanAndKillOrphans() {
|
|
51
|
+
return new Promise((resolve) => {
|
|
52
|
+
execFile("ps", ["-eo", "pid,ppid,command"], (err, stdout) => {
|
|
53
|
+
if (err) {
|
|
54
|
+
console.log(`[orphan] ps failed: ${err.message}`);
|
|
55
|
+
resolve(0);
|
|
56
|
+
return;
|
|
57
|
+
}
|
|
58
|
+
const orphans = parseProcessList(stdout);
|
|
59
|
+
if (orphans.length === 0) {
|
|
60
|
+
console.log("[orphan] none found");
|
|
61
|
+
resolve(0);
|
|
62
|
+
return;
|
|
63
|
+
}
|
|
64
|
+
let killed = 0;
|
|
65
|
+
for (const { pid, command } of orphans) {
|
|
66
|
+
console.log(`[orphan] killing pid=${pid} cmd="${command.slice(0, 80)}"`);
|
|
67
|
+
try {
|
|
68
|
+
process.kill(pid, "SIGKILL");
|
|
69
|
+
killed++;
|
|
70
|
+
}
|
|
71
|
+
catch (e) {
|
|
72
|
+
console.log(`[orphan] failed to kill pid=${pid}: ${e.message}`);
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
console.log(`[orphan] killed ${killed} process${killed !== 1 ? "es" : ""}`);
|
|
76
|
+
resolve(killed);
|
|
77
|
+
});
|
|
78
|
+
});
|
|
79
|
+
}
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
import { describe, it } from "node:test";
|
|
2
|
+
import assert from "node:assert/strict";
|
|
3
|
+
import { parseProcessList } from "./orphan-scan.js";
|
|
4
|
+
describe("parseProcessList", () => {
|
|
5
|
+
it("empty string returns empty array", () => {
|
|
6
|
+
assert.deepStrictEqual(parseProcessList(""), []);
|
|
7
|
+
});
|
|
8
|
+
it("whitespace-only string returns empty array", () => {
|
|
9
|
+
assert.deepStrictEqual(parseProcessList("\n\n \n"), []);
|
|
10
|
+
});
|
|
11
|
+
it("header line (PID PPID COMMAND) is silently skipped", () => {
|
|
12
|
+
const output = " PID PPID COMMAND\n 123 1 opencode run --flag\n";
|
|
13
|
+
const result = parseProcessList(output);
|
|
14
|
+
assert.strictEqual(result.length, 1);
|
|
15
|
+
assert.strictEqual(result[0].pid, 123);
|
|
16
|
+
});
|
|
17
|
+
it("ppid=1 + command matches 'opencode run' → hit", () => {
|
|
18
|
+
const output = " 123 1 opencode run --headless\n";
|
|
19
|
+
const result = parseProcessList(output);
|
|
20
|
+
assert.strictEqual(result.length, 1);
|
|
21
|
+
assert.strictEqual(result[0].pid, 123);
|
|
22
|
+
assert.strictEqual(result[0].ppid, 1);
|
|
23
|
+
assert.ok(result[0].command.includes("opencode run"));
|
|
24
|
+
});
|
|
25
|
+
it("ppid=1 + command is 'opencode install' → not hit (install is not agent mode)", () => {
|
|
26
|
+
const output = " 456 1 opencode install some-plugin\n";
|
|
27
|
+
const result = parseProcessList(output);
|
|
28
|
+
assert.strictEqual(result.length, 0);
|
|
29
|
+
});
|
|
30
|
+
it("ppid=1 + command is 'opencode update' → not hit", () => {
|
|
31
|
+
const output = " 789 1 opencode update\n";
|
|
32
|
+
const result = parseProcessList(output);
|
|
33
|
+
assert.strictEqual(result.length, 0);
|
|
34
|
+
});
|
|
35
|
+
it("ppid != 1 but command matches → not hit (never kill non-orphans)", () => {
|
|
36
|
+
const output = " 999 5678 opencode run --headless\n";
|
|
37
|
+
const result = parseProcessList(output);
|
|
38
|
+
assert.strictEqual(result.length, 0);
|
|
39
|
+
});
|
|
40
|
+
it("ppid=1 + 'claude -p' → hit", () => {
|
|
41
|
+
const output = " 100 1 claude -p some prompt text\n";
|
|
42
|
+
const result = parseProcessList(output);
|
|
43
|
+
assert.strictEqual(result.length, 1);
|
|
44
|
+
assert.strictEqual(result[0].pid, 100);
|
|
45
|
+
});
|
|
46
|
+
it("ppid=1 + 'codex exec' → hit", () => {
|
|
47
|
+
const output = " 200 1 codex exec --flag\n";
|
|
48
|
+
const result = parseProcessList(output);
|
|
49
|
+
assert.strictEqual(result.length, 1);
|
|
50
|
+
});
|
|
51
|
+
it("ppid=1 + 'gemini -p' → hit", () => {
|
|
52
|
+
const output = " 300 1 gemini -p --output-format json\n";
|
|
53
|
+
const result = parseProcessList(output);
|
|
54
|
+
assert.strictEqual(result.length, 1);
|
|
55
|
+
});
|
|
56
|
+
it("command with full path still matches", () => {
|
|
57
|
+
const output = " 400 1 /usr/local/bin/opencode run --headless task\n";
|
|
58
|
+
const result = parseProcessList(output);
|
|
59
|
+
assert.strictEqual(result.length, 1);
|
|
60
|
+
assert.strictEqual(result[0].pid, 400);
|
|
61
|
+
});
|
|
62
|
+
it("multiple orphans parsed correctly, non-orphan interleaved is skipped", () => {
|
|
63
|
+
const output = [
|
|
64
|
+
" PID PPID COMMAND",
|
|
65
|
+
" 111 1 opencode run",
|
|
66
|
+
" 222 3456 opencode run", // ppid != 1, not orphan
|
|
67
|
+
" 333 1 claude -p task",
|
|
68
|
+
" 444 1 bash", // not a known pattern
|
|
69
|
+
].join("\n");
|
|
70
|
+
const result = parseProcessList(output);
|
|
71
|
+
assert.strictEqual(result.length, 2);
|
|
72
|
+
assert.strictEqual(result[0].pid, 111);
|
|
73
|
+
assert.strictEqual(result[1].pid, 333);
|
|
74
|
+
});
|
|
75
|
+
it("large PID and PPID values parse correctly", () => {
|
|
76
|
+
const output = " 99999 1 opencode run\n";
|
|
77
|
+
const result = parseProcessList(output);
|
|
78
|
+
assert.strictEqual(result.length, 1);
|
|
79
|
+
assert.strictEqual(result[0].pid, 99999);
|
|
80
|
+
});
|
|
81
|
+
});
|
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
import { describe, it } from "node:test";
|
|
2
|
+
import assert from "node:assert/strict";
|
|
3
|
+
import { mkdtemp, mkdir, writeFile, readFile, rm } from "node:fs/promises";
|
|
4
|
+
import { tmpdir } from "node:os";
|
|
5
|
+
import { join } from "node:path";
|
|
6
|
+
import { ReflectionModule } from "./reflection-module.js";
|
|
7
|
+
import { loadDiscoveries } from "./self.js";
|
|
8
|
+
import { SIG } from "./types.js";
|
|
9
|
+
// ---------------------------------------------------------------------------
|
|
10
|
+
// Fake helpers
|
|
11
|
+
// ---------------------------------------------------------------------------
|
|
12
|
+
class FakeBus {
|
|
13
|
+
handlers = new Map();
|
|
14
|
+
emitted = [];
|
|
15
|
+
on(event, fn) {
|
|
16
|
+
const arr = this.handlers.get(event) ?? [];
|
|
17
|
+
arr.push(fn);
|
|
18
|
+
this.handlers.set(event, arr);
|
|
19
|
+
}
|
|
20
|
+
off(event, fn) {
|
|
21
|
+
const arr = this.handlers.get(event) ?? [];
|
|
22
|
+
this.handlers.set(event, arr.filter(h => h !== fn));
|
|
23
|
+
}
|
|
24
|
+
emit(event, s) {
|
|
25
|
+
this.emitted.push(s);
|
|
26
|
+
(this.handlers.get(event) ?? []).forEach(fn => fn(s));
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
function makeCtx(workdir, agentName, bus, computeResponses) {
|
|
30
|
+
let callIdx = 0;
|
|
31
|
+
const computeCalls = [];
|
|
32
|
+
const ctx = {
|
|
33
|
+
workdir,
|
|
34
|
+
agentName,
|
|
35
|
+
bus,
|
|
36
|
+
requestCompute: async (req) => {
|
|
37
|
+
computeCalls.push({ context: req.context, question: req.question, priority: req.priority });
|
|
38
|
+
return (computeResponses[callIdx++] ?? { success: false });
|
|
39
|
+
},
|
|
40
|
+
getPeripherals: (_capability) => [],
|
|
41
|
+
sendTo: async (_capability, _signal) => null,
|
|
42
|
+
getPromptContributions: () => [],
|
|
43
|
+
};
|
|
44
|
+
return { ctx, computeCalls };
|
|
45
|
+
}
|
|
46
|
+
/** Drain pending microtasks + I/O by waiting n setImmediate ticks. */
|
|
47
|
+
async function flush(n = 8) {
|
|
48
|
+
for (let i = 0; i < n; i++) {
|
|
49
|
+
await new Promise(r => setImmediate(r));
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
/** Wait ms milliseconds — for fire-and-forget chains with multiple sequential I/O steps. */
|
|
53
|
+
function wait(ms) {
|
|
54
|
+
return new Promise(r => setTimeout(r, ms));
|
|
55
|
+
}
|
|
56
|
+
// ---------------------------------------------------------------------------
|
|
57
|
+
// Tests
|
|
58
|
+
// ---------------------------------------------------------------------------
|
|
59
|
+
describe("ReflectionModule reflect integration", () => {
|
|
60
|
+
it("self_cycle=false → start does not schedule reflect, stop does not throw", async () => {
|
|
61
|
+
const tmpDir = await mkdtemp(join(tmpdir(), "akemon-refl-"));
|
|
62
|
+
try {
|
|
63
|
+
const agentName = "test-agent";
|
|
64
|
+
const agentDir = join(tmpDir, ".akemon", "agents", agentName);
|
|
65
|
+
await mkdir(agentDir, { recursive: true });
|
|
66
|
+
await writeFile(join(agentDir, "config.json"), JSON.stringify({ self_cycle: false }));
|
|
67
|
+
const bus = new FakeBus();
|
|
68
|
+
const { ctx, computeCalls } = makeCtx(tmpDir, agentName, bus, []);
|
|
69
|
+
const mod = new ReflectionModule();
|
|
70
|
+
await mod.start(ctx);
|
|
71
|
+
await flush();
|
|
72
|
+
assert.strictEqual(computeCalls.length, 0, "no compute calls expected when self_cycle=false");
|
|
73
|
+
await assert.doesNotReject(() => mod.stop());
|
|
74
|
+
}
|
|
75
|
+
finally {
|
|
76
|
+
await rm(tmpDir, { recursive: true, force: true });
|
|
77
|
+
}
|
|
78
|
+
});
|
|
79
|
+
it("single TASK_FAILED does not trigger reflect (below threshold of 2)", async () => {
|
|
80
|
+
const tmpDir = await mkdtemp(join(tmpdir(), "akemon-refl-"));
|
|
81
|
+
try {
|
|
82
|
+
const agentName = "test-agent";
|
|
83
|
+
const bus = new FakeBus();
|
|
84
|
+
const { ctx, computeCalls } = makeCtx(tmpDir, agentName, bus, []);
|
|
85
|
+
const mod = new ReflectionModule();
|
|
86
|
+
await mod.start(ctx);
|
|
87
|
+
bus.emit(SIG.TASK_FAILED, { type: SIG.TASK_FAILED, data: { taskLabel: "a", error: "oops" } });
|
|
88
|
+
await flush();
|
|
89
|
+
assert.strictEqual(computeCalls.length, 0, "one failure must not trigger reflect");
|
|
90
|
+
await mod.stop();
|
|
91
|
+
}
|
|
92
|
+
finally {
|
|
93
|
+
await rm(tmpDir, { recursive: true, force: true });
|
|
94
|
+
}
|
|
95
|
+
});
|
|
96
|
+
it("two TASK_FAILED events trigger reflect and save discoveries to disk", async () => {
|
|
97
|
+
const tmpDir = await mkdtemp(join(tmpdir(), "akemon-refl-"));
|
|
98
|
+
try {
|
|
99
|
+
const agentName = "test-agent";
|
|
100
|
+
// Self dir must exist for saveDiscoveries to write successfully
|
|
101
|
+
await mkdir(join(tmpDir, ".akemon", "agents", agentName, "self"), { recursive: true });
|
|
102
|
+
const bus = new FakeBus();
|
|
103
|
+
const response = JSON.stringify({
|
|
104
|
+
discoveries: [{ capability: "X", confidence: 0.7, evidence: "Y" }],
|
|
105
|
+
});
|
|
106
|
+
const { ctx, computeCalls } = makeCtx(tmpDir, agentName, bus, [
|
|
107
|
+
{ success: true, response },
|
|
108
|
+
]);
|
|
109
|
+
const mod = new ReflectionModule();
|
|
110
|
+
await mod.start(ctx);
|
|
111
|
+
bus.emit(SIG.TASK_FAILED, { type: SIG.TASK_FAILED, data: { taskLabel: "task1", error: "err1" } });
|
|
112
|
+
bus.emit(SIG.TASK_FAILED, { type: SIG.TASK_FAILED, data: { taskLabel: "task2", error: "err2" } });
|
|
113
|
+
await flush();
|
|
114
|
+
assert.strictEqual(computeCalls.length, 1, "reflect should call requestCompute exactly once");
|
|
115
|
+
const discoveries = await loadDiscoveries(tmpDir, agentName);
|
|
116
|
+
assert.ok(discoveries.length > 0, "at least one discovery should be saved");
|
|
117
|
+
assert.ok(discoveries.some(d => d.capability === "X"), "saved discovery should have capability='X'");
|
|
118
|
+
await mod.stop();
|
|
119
|
+
}
|
|
120
|
+
finally {
|
|
121
|
+
await rm(tmpDir, { recursive: true, force: true });
|
|
122
|
+
}
|
|
123
|
+
});
|
|
124
|
+
it("unparseable compute response → no crash, recentFailures cleared, no discoveries saved", async () => {
|
|
125
|
+
const tmpDir = await mkdtemp(join(tmpdir(), "akemon-refl-"));
|
|
126
|
+
try {
|
|
127
|
+
const agentName = "test-agent";
|
|
128
|
+
const bus = new FakeBus();
|
|
129
|
+
const { ctx, computeCalls } = makeCtx(tmpDir, agentName, bus, [
|
|
130
|
+
{ success: true, response: "this is not json at all" },
|
|
131
|
+
]);
|
|
132
|
+
const mod = new ReflectionModule();
|
|
133
|
+
await mod.start(ctx);
|
|
134
|
+
bus.emit(SIG.TASK_FAILED, { type: SIG.TASK_FAILED, data: { taskLabel: "x1", error: "e1" } });
|
|
135
|
+
bus.emit(SIG.TASK_FAILED, { type: SIG.TASK_FAILED, data: { taskLabel: "x2", error: "e2" } });
|
|
136
|
+
await flush();
|
|
137
|
+
assert.strictEqual(computeCalls.length, 1, "reflect should still have run");
|
|
138
|
+
const state = mod.getState();
|
|
139
|
+
assert.strictEqual(state["recentFailures"], 0, "recentFailures should be cleared even when response is unparseable");
|
|
140
|
+
const discoveries = await loadDiscoveries(tmpDir, agentName);
|
|
141
|
+
assert.strictEqual(discoveries.length, 0, "no discoveries should be saved for bad response");
|
|
142
|
+
await mod.stop();
|
|
143
|
+
}
|
|
144
|
+
finally {
|
|
145
|
+
await rm(tmpDir, { recursive: true, force: true });
|
|
146
|
+
}
|
|
147
|
+
});
|
|
148
|
+
it("TASK_COMPLETED with success=true and productName appends experience to playbook", async () => {
|
|
149
|
+
const tmpDir = await mkdtemp(join(tmpdir(), "akemon-refl-"));
|
|
150
|
+
try {
|
|
151
|
+
const agentName = "test-agent";
|
|
152
|
+
const selfBase = join(tmpDir, ".akemon", "agents", agentName, "self");
|
|
153
|
+
const pbDir = join(selfBase, "playbooks");
|
|
154
|
+
const prodDir = join(selfBase, "products");
|
|
155
|
+
await mkdir(pbDir, { recursive: true });
|
|
156
|
+
await mkdir(prodDir, { recursive: true });
|
|
157
|
+
await writeFile(join(prodDir, "widget.md"), "# Widget\n\n## playbook\nwidget-pb\n\n## products\n- p_w1\n");
|
|
158
|
+
await writeFile(join(pbDir, "widget-pb.md"), "# Widget Playbook\n\n## 经验\n");
|
|
159
|
+
const bus = new FakeBus();
|
|
160
|
+
const { ctx } = makeCtx(tmpDir, agentName, bus, []);
|
|
161
|
+
const mod = new ReflectionModule();
|
|
162
|
+
await mod.start(ctx);
|
|
163
|
+
bus.emit(SIG.TASK_COMPLETED, {
|
|
164
|
+
type: SIG.TASK_COMPLETED,
|
|
165
|
+
data: { success: true, productName: "widget", taskLabel: "deliver-logo", creditsEarned: 3 },
|
|
166
|
+
});
|
|
167
|
+
// Fire-and-forget handler — appendPlaybookExperience chains several sequential I/O ops
|
|
168
|
+
// (readdir + readFile per directory in loadMdFiles). Use a time-based wait to be safe.
|
|
169
|
+
await wait(100);
|
|
170
|
+
const content = await readFile(join(pbDir, "widget-pb.md"), "utf-8");
|
|
171
|
+
assert.ok(content.includes("widget"), "playbook should contain productName");
|
|
172
|
+
assert.ok(content.includes("deliver-logo"), "playbook should contain taskLabel");
|
|
173
|
+
assert.ok(content.includes("earned 3¢"), "playbook should contain credits");
|
|
174
|
+
await mod.stop();
|
|
175
|
+
}
|
|
176
|
+
finally {
|
|
177
|
+
await rm(tmpDir, { recursive: true, force: true });
|
|
178
|
+
}
|
|
179
|
+
});
|
|
180
|
+
});
|
|
@@ -20,6 +20,30 @@ const REFLECT_INITIAL_DELAY = 12 * 60 * 60 * 1000; // 12h after startup
|
|
|
20
20
|
const REFLECT_INTERVAL = 12 * 60 * 60 * 1000; // every 12h
|
|
21
21
|
const MIN_FAILURES_TO_REFLECT = 2; // need at least 2 failures to trigger
|
|
22
22
|
// ---------------------------------------------------------------------------
|
|
23
|
+
// Playbook experience — exported for direct testing and reuse
|
|
24
|
+
// ---------------------------------------------------------------------------
|
|
25
|
+
export async function appendPlaybookExperience(workdir, agentName, productName, taskLabel, credits) {
|
|
26
|
+
const products = await loadProducts(workdir, agentName);
|
|
27
|
+
const playbooks = await loadPlaybooks(workdir, agentName);
|
|
28
|
+
const resolved = resolveProduct(products, playbooks, productName);
|
|
29
|
+
if (!resolved?.product.playbook)
|
|
30
|
+
return;
|
|
31
|
+
const pbPath = join(playbooksDir(workdir, agentName), `${resolved.product.playbook}.md`);
|
|
32
|
+
const line = `\n- [${localNow()}] ${productName}: ${taskLabel} — 成功${credits ? ` (earned ${credits}¢)` : ""}`;
|
|
33
|
+
try {
|
|
34
|
+
let content = await readFile(pbPath, "utf-8");
|
|
35
|
+
if (!content.includes("## 经验")) {
|
|
36
|
+
content += "\n\n## 经验\n";
|
|
37
|
+
}
|
|
38
|
+
content += line;
|
|
39
|
+
await writeFile(pbPath, content, "utf-8");
|
|
40
|
+
console.log(`[reflection] Appended experience to playbook ${resolved.product.playbook}`);
|
|
41
|
+
}
|
|
42
|
+
catch (err) {
|
|
43
|
+
console.log(`[reflection] Failed to append experience: ${err.message}`);
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
// ---------------------------------------------------------------------------
|
|
23
47
|
// ReflectionModule
|
|
24
48
|
// ---------------------------------------------------------------------------
|
|
25
49
|
export class ReflectionModule {
|
|
@@ -67,7 +91,7 @@ export class ReflectionModule {
|
|
|
67
91
|
}
|
|
68
92
|
// Append experience to playbook on successful product orders
|
|
69
93
|
if (success && productName) {
|
|
70
|
-
|
|
94
|
+
appendPlaybookExperience(ctx.workdir, ctx.agentName, productName, taskLabel || "", creditsEarned || 0)
|
|
71
95
|
.catch(err => console.log(`[reflection] playbook experience error: ${err.message}`));
|
|
72
96
|
}
|
|
73
97
|
});
|
|
@@ -118,33 +142,6 @@ export class ReflectionModule {
|
|
|
118
142
|
};
|
|
119
143
|
}
|
|
120
144
|
// ---------------------------------------------------------------------------
|
|
121
|
-
// Playbook experience — append log on successful product orders
|
|
122
|
-
// ---------------------------------------------------------------------------
|
|
123
|
-
async appendPlaybookExperience(productName, taskLabel, credits) {
|
|
124
|
-
if (!this.ctx)
|
|
125
|
-
return;
|
|
126
|
-
const { workdir, agentName } = this.ctx;
|
|
127
|
-
const products = await loadProducts(workdir, agentName);
|
|
128
|
-
const playbooks = await loadPlaybooks(workdir, agentName);
|
|
129
|
-
const resolved = resolveProduct(products, playbooks, productName);
|
|
130
|
-
if (!resolved?.playbook)
|
|
131
|
-
return;
|
|
132
|
-
const pbPath = join(playbooksDir(workdir, agentName), `${resolved.playbook.name}.md`);
|
|
133
|
-
const line = `\n- [${localNow()}] ${productName}: ${taskLabel} — 成功${credits ? ` (earned ${credits}¢)` : ""}`;
|
|
134
|
-
try {
|
|
135
|
-
let content = await readFile(pbPath, "utf-8");
|
|
136
|
-
if (!content.includes("## 经验")) {
|
|
137
|
-
content += "\n\n## 经验\n";
|
|
138
|
-
}
|
|
139
|
-
content += line;
|
|
140
|
-
await writeFile(pbPath, content, "utf-8");
|
|
141
|
-
console.log(`[reflection] Appended experience to playbook ${resolved.playbook.name}`);
|
|
142
|
-
}
|
|
143
|
-
catch (err) {
|
|
144
|
-
console.log(`[reflection] Failed to append experience: ${err.message}`);
|
|
145
|
-
}
|
|
146
|
-
}
|
|
147
|
-
// ---------------------------------------------------------------------------
|
|
148
145
|
// Reflection — analyze patterns and update discoveries
|
|
149
146
|
// ---------------------------------------------------------------------------
|
|
150
147
|
async reflect() {
|
|
@@ -183,7 +180,8 @@ ${discText}`,
|
|
|
183
180
|
- Lower confidence on disproven beliefs
|
|
184
181
|
|
|
185
182
|
Reply ONLY JSON: {"discoveries":[{"capability":"skill or lesson","confidence":0.0-1.0,"evidence":"what supports this"}]}`,
|
|
186
|
-
priority: "
|
|
183
|
+
priority: "normal",
|
|
184
|
+
origin: "reflection",
|
|
187
185
|
});
|
|
188
186
|
if (result.success && result.response) {
|
|
189
187
|
const parsed = extractJson(result.response);
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
import { describe, it, before, after } from "node:test";
|
|
2
|
+
import assert from "node:assert/strict";
|
|
3
|
+
import { mkdtemp, mkdir, writeFile, readFile, rm } from "node:fs/promises";
|
|
4
|
+
import { tmpdir } from "node:os";
|
|
5
|
+
import { join } from "node:path";
|
|
6
|
+
import { appendPlaybookExperience } from "./reflection-module.js";
|
|
7
|
+
const AGENT = "test-agent";
|
|
8
|
+
// ---------------------------------------------------------------------------
|
|
9
|
+
// Shared tmp dir — each test uses uniquely-named products/playbooks to avoid
|
|
10
|
+
// state bleed between cases within the single before/after lifecycle.
|
|
11
|
+
// ---------------------------------------------------------------------------
|
|
12
|
+
describe("appendPlaybookExperience", () => {
|
|
13
|
+
let tmpDir;
|
|
14
|
+
let selfBase;
|
|
15
|
+
let pbDir;
|
|
16
|
+
let prodDir;
|
|
17
|
+
before(async () => {
|
|
18
|
+
tmpDir = await mkdtemp(join(tmpdir(), "akemon-test-"));
|
|
19
|
+
selfBase = join(tmpDir, ".akemon", "agents", AGENT, "self");
|
|
20
|
+
pbDir = join(selfBase, "playbooks");
|
|
21
|
+
prodDir = join(selfBase, "products");
|
|
22
|
+
await mkdir(pbDir, { recursive: true });
|
|
23
|
+
await mkdir(prodDir, { recursive: true });
|
|
24
|
+
});
|
|
25
|
+
after(async () => {
|
|
26
|
+
await rm(tmpDir, { recursive: true, force: true });
|
|
27
|
+
});
|
|
28
|
+
it("happy path: appends experience line with date, productName, taskLabel, and credits", async () => {
|
|
29
|
+
await writeFile(join(prodDir, "widget.md"), `# Widget\n\n## playbook\nwidget-pb\n\n## products\n- p_w1\n`);
|
|
30
|
+
await writeFile(join(pbDir, "widget-pb.md"), `# Widget Playbook\n\n## 经验\n- [2026-01-01T00:00:00] Old entry.\n`);
|
|
31
|
+
await appendPlaybookExperience(tmpDir, AGENT, "widget", "design-logo", 8);
|
|
32
|
+
const content = await readFile(join(pbDir, "widget-pb.md"), "utf-8");
|
|
33
|
+
assert.ok(content.includes("widget"), "should contain productName");
|
|
34
|
+
assert.ok(content.includes("design-logo"), "should contain taskLabel");
|
|
35
|
+
assert.ok(content.includes("(earned 8¢)"), "should contain credits suffix");
|
|
36
|
+
// original entry must still be there
|
|
37
|
+
assert.ok(content.includes("Old entry."), "original 经验 entry must be preserved");
|
|
38
|
+
});
|
|
39
|
+
it("creates ## 经验 section when it does not exist in the playbook file", async () => {
|
|
40
|
+
await writeFile(join(prodDir, "alpha.md"), `# Alpha\n\n## playbook\nalpha-pb\n\n## products\n- p_a1\n`);
|
|
41
|
+
await writeFile(join(pbDir, "alpha-pb.md"), `# Alpha Playbook\n\nSome strategy text without experience section.\n`);
|
|
42
|
+
await appendPlaybookExperience(tmpDir, AGENT, "alpha", "run-campaign", 3);
|
|
43
|
+
const content = await readFile(join(pbDir, "alpha-pb.md"), "utf-8");
|
|
44
|
+
assert.ok(content.includes("## 经验"), "should have created ## 经验 section");
|
|
45
|
+
assert.ok(content.includes("run-campaign"), "should contain taskLabel");
|
|
46
|
+
assert.ok(content.includes("(earned 3¢)"), "should contain credits suffix");
|
|
47
|
+
});
|
|
48
|
+
it("does not throw and writes nothing when product is not found", async () => {
|
|
49
|
+
// No product file for "ghost-product" exists in prodDir
|
|
50
|
+
await assert.doesNotReject(() => appendPlaybookExperience(tmpDir, AGENT, "ghost-product", "some-task", 5));
|
|
51
|
+
});
|
|
52
|
+
it("catches exception and does not throw when playbook file is missing", async () => {
|
|
53
|
+
// Product file references "missing-pb", but no such file in pbDir
|
|
54
|
+
await writeFile(join(prodDir, "orphan.md"), `# Orphan\n\n## playbook\nmissing-pb\n\n## products\n- p_o1\n`);
|
|
55
|
+
await assert.doesNotReject(() => appendPlaybookExperience(tmpDir, AGENT, "orphan", "orphan-task", 2));
|
|
56
|
+
// Ensure nothing was written to any unexpected place
|
|
57
|
+
});
|
|
58
|
+
it("omits (earned X¢) suffix when credits is 0", async () => {
|
|
59
|
+
await writeFile(join(prodDir, "freebie.md"), `# Freebie\n\n## playbook\nfreebie-pb\n\n## products\n- p_f1\n`);
|
|
60
|
+
await writeFile(join(pbDir, "freebie-pb.md"), `# Freebie Playbook\n\n## 经验\n`);
|
|
61
|
+
await appendPlaybookExperience(tmpDir, AGENT, "freebie", "free-task", 0);
|
|
62
|
+
const content = await readFile(join(pbDir, "freebie-pb.md"), "utf-8");
|
|
63
|
+
assert.ok(content.includes("free-task"), "should contain taskLabel");
|
|
64
|
+
assert.ok(!content.includes("earned"), "should NOT contain 'earned' when credits=0");
|
|
65
|
+
});
|
|
66
|
+
});
|
package/dist/relay-client.js
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import WebSocket from "ws";
|
|
2
2
|
import http from "http";
|
|
3
|
+
import { getMetrics, updateMetrics } from "./metrics.js";
|
|
3
4
|
const DEFAULT_RELAY_URL = "wss://relay.akemon.dev";
|
|
4
5
|
// Pending agent_call results (callId → resolve function)
|
|
5
6
|
const pendingAgentCalls = new Map();
|
|
@@ -115,9 +116,12 @@ export function connectRelay(options) {
|
|
|
115
116
|
alive = false;
|
|
116
117
|
try {
|
|
117
118
|
ws.ping();
|
|
119
|
+
// Piggyback metrics update on every heartbeat cycle
|
|
120
|
+
const metrics = getMetrics();
|
|
121
|
+
ws.send(JSON.stringify({ type: "metrics", metrics }));
|
|
118
122
|
}
|
|
119
123
|
catch {
|
|
120
|
-
// ping write failed — connection dead
|
|
124
|
+
// ping or send write failed — connection dead
|
|
121
125
|
clearHeartbeat();
|
|
122
126
|
ws.terminate();
|
|
123
127
|
}
|
|
@@ -127,6 +131,7 @@ export function connectRelay(options) {
|
|
|
127
131
|
console.log(`[relay-ws] Connected. Registering agent "${options.agentName}"...`);
|
|
128
132
|
reconnectDelay = 1000; // reset backoff
|
|
129
133
|
relayWsRef = ws;
|
|
134
|
+
updateMetrics({ agentName: options.agentName });
|
|
130
135
|
// Send registration message
|
|
131
136
|
const reg = {
|
|
132
137
|
type: "register",
|
|
@@ -440,3 +445,14 @@ function extractSSEData(sse) {
|
|
|
440
445
|
}
|
|
441
446
|
return null;
|
|
442
447
|
}
|
|
448
|
+
/** Send a failure event to the relay for observability storage. Fire-and-forget. */
|
|
449
|
+
export function sendFailureEvent(kind, label, message) {
|
|
450
|
+
if (!relayWsRef || relayWsRef.readyState !== WebSocket.OPEN)
|
|
451
|
+
return;
|
|
452
|
+
try {
|
|
453
|
+
relayWsRef.send(JSON.stringify({ type: "failure_event", kind, label, message }));
|
|
454
|
+
}
|
|
455
|
+
catch {
|
|
456
|
+
// best-effort
|
|
457
|
+
}
|
|
458
|
+
}
|
package/dist/role-module.js
CHANGED
|
@@ -14,7 +14,7 @@ import { rolesDir, playbooksDir, productsDir } from "./self.js";
|
|
|
14
14
|
// ---------------------------------------------------------------------------
|
|
15
15
|
// Parsing
|
|
16
16
|
// ---------------------------------------------------------------------------
|
|
17
|
-
function parseRole(name, raw) {
|
|
17
|
+
export function parseRole(name, raw) {
|
|
18
18
|
const lines = raw.split("\n");
|
|
19
19
|
let description = "";
|
|
20
20
|
const triggers = [];
|
|
@@ -72,7 +72,7 @@ function parseRole(name, raw) {
|
|
|
72
72
|
}
|
|
73
73
|
return { name, description, triggers, include, exclude, customRules: customLines.join("\n").trim(), raw };
|
|
74
74
|
}
|
|
75
|
-
function parseProduct(name, raw) {
|
|
75
|
+
export function parseProduct(name, raw) {
|
|
76
76
|
let playbook = "";
|
|
77
77
|
const productIds = [];
|
|
78
78
|
const lines = raw.split("\n");
|