metame-cli 1.4.34 → 1.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +146 -32
- package/index.js +148 -9
- package/package.json +6 -3
- package/scripts/daemon-admin-commands.js +254 -9
- package/scripts/daemon-agent-commands.js +64 -6
- package/scripts/daemon-agent-tools.js +26 -5
- package/scripts/daemon-bridges.js +110 -20
- package/scripts/daemon-claude-engine.js +698 -239
- package/scripts/daemon-command-router.js +24 -8
- package/scripts/daemon-default.yaml +28 -4
- package/scripts/daemon-engine-runtime.js +275 -0
- package/scripts/daemon-exec-commands.js +10 -4
- package/scripts/daemon-notify.js +37 -1
- package/scripts/daemon-runtime-lifecycle.js +2 -1
- package/scripts/daemon-session-commands.js +52 -4
- package/scripts/daemon-session-store.js +2 -1
- package/scripts/daemon-task-scheduler.js +68 -38
- package/scripts/daemon-user-acl.js +26 -9
- package/scripts/daemon.js +81 -17
- package/scripts/distill.js +323 -18
- package/scripts/docs/agent-guide.md +12 -0
- package/scripts/docs/maintenance-manual.md +119 -0
- package/scripts/docs/pointer-map.md +88 -0
- package/scripts/feishu-adapter.js +6 -1
- package/scripts/hooks/stop-session-capture.js +243 -0
- package/scripts/memory-extract.js +100 -5
- package/scripts/memory-nightly-reflect.js +196 -11
- package/scripts/memory.js +134 -3
- package/scripts/mentor-engine.js +405 -0
- package/scripts/platform.js +2 -0
- package/scripts/providers.js +169 -21
- package/scripts/schema.js +12 -0
- package/scripts/session-analytics.js +245 -12
- package/scripts/skill-changelog.js +245 -0
- package/scripts/skill-evolution.js +288 -5
- package/scripts/usage-classifier.js +1 -1
- package/scripts/daemon-admin-commands.test.js +0 -333
- package/scripts/daemon-task-envelope.test.js +0 -59
- package/scripts/daemon-task-scheduler.test.js +0 -106
- package/scripts/reliability-core.test.js +0 -280
- package/scripts/skill-evolution.test.js +0 -113
- package/scripts/task-board.test.js +0 -83
- package/scripts/test_daemon.js +0 -1407
- package/scripts/utils.test.js +0 -192
|
@@ -0,0 +1,245 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* MetaMe Skill Changelog
|
|
5
|
+
*
|
|
6
|
+
* Structured log of all skill lifecycle events:
|
|
7
|
+
* installed, evolved, hot_detected, queue_resolved, sunset
|
|
8
|
+
*
|
|
9
|
+
* Data: ~/.metame/skill_changelog.jsonl (append-only, one JSON per line)
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
'use strict';
|
|
13
|
+
|
|
14
|
+
const fs = require('fs');
|
|
15
|
+
const path = require('path');
|
|
16
|
+
const os = require('os');
|
|
17
|
+
const yaml = require('js-yaml');
|
|
18
|
+
|
|
19
|
+
const METAME_DIR = path.join(os.homedir(), '.metame');
|
|
20
|
+
const CHANGELOG_FILE = path.join(METAME_DIR, 'skill_changelog.jsonl');
|
|
21
|
+
const LAST_SESSION_FILE = path.join(METAME_DIR, 'last_session_start.txt');
|
|
22
|
+
|
|
23
|
+
// Skill directories (same as skill-evolution.js)
|
|
24
|
+
const SKILL_DIRS = [
|
|
25
|
+
path.join(os.homedir(), '.claude', 'skills'),
|
|
26
|
+
path.join(os.homedir(), '.opencode', 'skills'),
|
|
27
|
+
];
|
|
28
|
+
|
|
29
|
+
/** Map action name to display icon. */
|
|
30
|
+
function getActionIcon(action) {
|
|
31
|
+
const iconMap = { evolved: '↑', installed: '+', hot_detected: '!' };
|
|
32
|
+
return iconMap[action] || '·';
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* Scan all SKILL_DIRS and return installed skills as [{name, skillDir}].
|
|
37
|
+
* Deduplicates by skill name across dirs.
|
|
38
|
+
*/
|
|
39
|
+
function _scanSkillDirs() {
|
|
40
|
+
const result = [];
|
|
41
|
+
const seen = new Set();
|
|
42
|
+
for (const dir of SKILL_DIRS) {
|
|
43
|
+
try {
|
|
44
|
+
for (const name of fs.readdirSync(dir)) {
|
|
45
|
+
if (seen.has(name)) continue;
|
|
46
|
+
const skillDir = path.join(dir, name);
|
|
47
|
+
try { fs.statSync(path.join(skillDir, 'SKILL.md')); } catch { continue; }
|
|
48
|
+
seen.add(name);
|
|
49
|
+
result.push({ name, skillDir });
|
|
50
|
+
}
|
|
51
|
+
} catch { /* dir doesn't exist or not readable */ }
|
|
52
|
+
}
|
|
53
|
+
return result;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Append a changelog entry.
|
|
58
|
+
* @param {string} action - installed|evolved|hot_detected|queue_resolved|sunset
|
|
59
|
+
* @param {string} skill - skill name
|
|
60
|
+
* @param {string} summary - one-line human summary
|
|
61
|
+
* @param {string} [detail] - optional extra detail
|
|
62
|
+
*/
|
|
63
|
+
function appendChange(action, skill, summary, detail) {
|
|
64
|
+
try {
|
|
65
|
+
fs.mkdirSync(METAME_DIR, { mode: 0o700, recursive: true });
|
|
66
|
+
const entry = {
|
|
67
|
+
ts: new Date().toISOString(),
|
|
68
|
+
action,
|
|
69
|
+
skill: skill || null,
|
|
70
|
+
summary: summary || '',
|
|
71
|
+
};
|
|
72
|
+
if (detail) entry.detail = detail;
|
|
73
|
+
fs.appendFileSync(CHANGELOG_FILE, JSON.stringify(entry) + '\n', 'utf8');
|
|
74
|
+
} catch { /* non-fatal */ }
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Read changelog entries since a given ISO timestamp.
|
|
79
|
+
* @param {string} [since] - ISO timestamp; if omitted returns all
|
|
80
|
+
* @returns {Array<object>}
|
|
81
|
+
*/
|
|
82
|
+
function getRecentChanges(since) {
|
|
83
|
+
try {
|
|
84
|
+
const lines = fs.readFileSync(CHANGELOG_FILE, 'utf8').trim().split('\n').filter(Boolean);
|
|
85
|
+
const entries = lines.map(l => { try { return JSON.parse(l); } catch { return null; } }).filter(Boolean);
|
|
86
|
+
if (!since) return entries;
|
|
87
|
+
const sinceMs = new Date(since).getTime();
|
|
88
|
+
return entries.filter(e => new Date(e.ts).getTime() > sinceMs);
|
|
89
|
+
} catch {
|
|
90
|
+
return [];
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
/**
|
|
95
|
+
* Get the last session start timestamp.
|
|
96
|
+
* @returns {string|null} ISO timestamp or null
|
|
97
|
+
*/
|
|
98
|
+
function getLastSessionStart() {
|
|
99
|
+
try {
|
|
100
|
+
const ts = fs.readFileSync(LAST_SESSION_FILE, 'utf8').trim();
|
|
101
|
+
return ts || null;
|
|
102
|
+
} catch {
|
|
103
|
+
return null;
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
/**
|
|
108
|
+
* Write current timestamp as last session start.
|
|
109
|
+
*/
|
|
110
|
+
function writeSessionStart() {
|
|
111
|
+
try {
|
|
112
|
+
fs.mkdirSync(METAME_DIR, { mode: 0o700, recursive: true });
|
|
113
|
+
fs.writeFileSync(LAST_SESSION_FILE, new Date().toISOString(), 'utf8');
|
|
114
|
+
} catch { /* non-fatal */ }
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
/**
|
|
118
|
+
* Count installed skills across all skill dirs.
|
|
119
|
+
* @returns {number}
|
|
120
|
+
*/
|
|
121
|
+
function countInstalledSkills() {
|
|
122
|
+
return _scanSkillDirs().length;
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
/**
|
|
126
|
+
* Build a stats object for all skills (for dashboard).
|
|
127
|
+
* @returns {object} { installed: [{name, hasEvolution, evolutionCount, lastEvolved}], queuePending: [], recentChanges: [] }
|
|
128
|
+
*/
|
|
129
|
+
function getSkillStats() {
|
|
130
|
+
const installed = [];
|
|
131
|
+
for (const { name, skillDir } of _scanSkillDirs()) {
|
|
132
|
+
const info = { name, hasEvolution: false, evolutionCount: 0, lastEvolved: null };
|
|
133
|
+
try {
|
|
134
|
+
const evo = JSON.parse(fs.readFileSync(path.join(skillDir, 'evolution.json'), 'utf8'));
|
|
135
|
+
const fixes = (evo.fixes || []).length;
|
|
136
|
+
const prefs = (evo.preferences || []).length;
|
|
137
|
+
const ctxs = (evo.contexts || []).length;
|
|
138
|
+
info.evolutionCount = fixes + prefs + ctxs;
|
|
139
|
+
info.hasEvolution = info.evolutionCount > 0;
|
|
140
|
+
info.lastEvolved = evo.last_updated || null;
|
|
141
|
+
} catch { /* no evolution data */ }
|
|
142
|
+
installed.push(info);
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
// Queue pending items
|
|
146
|
+
let queuePending = [];
|
|
147
|
+
try {
|
|
148
|
+
const queueFile = path.join(METAME_DIR, 'evolution_queue.yaml');
|
|
149
|
+
const data = yaml.load(fs.readFileSync(queueFile, 'utf8'));
|
|
150
|
+
if (data && Array.isArray(data.items)) {
|
|
151
|
+
queuePending = data.items.filter(i => i.status === 'pending' || i.status === 'notified');
|
|
152
|
+
}
|
|
153
|
+
} catch { /* no queue */ }
|
|
154
|
+
|
|
155
|
+
// Recent changes (last 7 days)
|
|
156
|
+
const weekAgo = new Date(Date.now() - 7 * 24 * 60 * 60 * 1000).toISOString();
|
|
157
|
+
const recentChanges = getRecentChanges(weekAgo);
|
|
158
|
+
|
|
159
|
+
return { installed, queuePending, recentChanges };
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
/**
|
|
163
|
+
* Format a human-readable dashboard string.
|
|
164
|
+
* @returns {string}
|
|
165
|
+
*/
|
|
166
|
+
function formatDashboard() {
|
|
167
|
+
const stats = getSkillStats();
|
|
168
|
+
const lines = [];
|
|
169
|
+
|
|
170
|
+
lines.push(`技能面板 (${stats.installed.length} installed)`);
|
|
171
|
+
lines.push('━'.repeat(42));
|
|
172
|
+
|
|
173
|
+
// Sort: has evolution first, then alphabetical
|
|
174
|
+
const sorted = stats.installed.slice().sort((a, b) => {
|
|
175
|
+
if (a.hasEvolution !== b.hasEvolution) return b.hasEvolution ? 1 : -1;
|
|
176
|
+
return a.name.localeCompare(b.name);
|
|
177
|
+
});
|
|
178
|
+
|
|
179
|
+
for (const s of sorted) {
|
|
180
|
+
const status = s.hasEvolution ? '活跃' : '待定';
|
|
181
|
+
const evoInfo = s.hasEvolution
|
|
182
|
+
? `经验: ${s.evolutionCount} 条 | 最近演化: ${formatRelativeTime(s.lastEvolved)}`
|
|
183
|
+
: `经验: 0 条`;
|
|
184
|
+
lines.push(`[${status}] ${s.name.padEnd(28)} | ${evoInfo}`);
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
// Signal count
|
|
188
|
+
try {
|
|
189
|
+
const sigFile = path.join(METAME_DIR, 'skill_signals.jsonl');
|
|
190
|
+
const sigCount = fs.readFileSync(sigFile, 'utf8').trim().split('\n').filter(Boolean).length;
|
|
191
|
+
if (sigCount > 0) {
|
|
192
|
+
lines.push('');
|
|
193
|
+
lines.push(`信号缓冲: ${sigCount} 条待蒸馏`);
|
|
194
|
+
}
|
|
195
|
+
} catch { /* skip */ }
|
|
196
|
+
|
|
197
|
+
// Recent changes
|
|
198
|
+
if (stats.recentChanges.length > 0) {
|
|
199
|
+
lines.push('');
|
|
200
|
+
lines.push('最近变更 (7天内):');
|
|
201
|
+
for (const c of stats.recentChanges.slice(-10)) {
|
|
202
|
+
const date = c.ts.substring(5, 10); // MM-DD
|
|
203
|
+
lines.push(` ${date} ${(c.skill || 'system').padEnd(28)} ${getActionIcon(c.action)} ${c.summary}`);
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
// Queue
|
|
208
|
+
if (stats.queuePending.length > 0) {
|
|
209
|
+
lines.push('');
|
|
210
|
+
lines.push('待处理队列:');
|
|
211
|
+
for (let i = 0; i < stats.queuePending.length; i++) {
|
|
212
|
+
const q = stats.queuePending[i];
|
|
213
|
+
const evidenceStr = q.evidence_count ? `${q.evidence_count}条证据` : '';
|
|
214
|
+
lines.push(` ${i + 1}. [${q.type}] ${q.reason || q.search_hint || '未知'} (${evidenceStr})`);
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
return lines.join('\n');
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
/**
|
|
222
|
+
* Format relative time from ISO string.
|
|
223
|
+
*/
|
|
224
|
+
function formatRelativeTime(isoStr) {
|
|
225
|
+
if (!isoStr) return '从未';
|
|
226
|
+
const diffMs = Date.now() - new Date(isoStr).getTime();
|
|
227
|
+
const diffH = Math.floor(diffMs / 3600000);
|
|
228
|
+
if (diffH < 1) return '刚才';
|
|
229
|
+
if (diffH < 24) return `${diffH}h 前`;
|
|
230
|
+
const diffD = Math.floor(diffH / 24);
|
|
231
|
+
if (diffD === 0) return '今天';
|
|
232
|
+
if (diffD === 1) return '昨天';
|
|
233
|
+
return `${diffD}天前`;
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
module.exports = {
|
|
237
|
+
appendChange,
|
|
238
|
+
getRecentChanges,
|
|
239
|
+
getLastSessionStart,
|
|
240
|
+
writeSessionStart,
|
|
241
|
+
countInstalledSkills,
|
|
242
|
+
getSkillStats,
|
|
243
|
+
formatDashboard,
|
|
244
|
+
getActionIcon,
|
|
245
|
+
};
|
|
@@ -31,6 +31,7 @@
|
|
|
31
31
|
const fs = require('fs');
|
|
32
32
|
const path = require('path');
|
|
33
33
|
const os = require('os');
|
|
34
|
+
const { appendChange } = require('./skill-changelog');
|
|
34
35
|
|
|
35
36
|
const HOME = os.homedir();
|
|
36
37
|
const METAME_DIR = path.join(HOME, '.metame');
|
|
@@ -39,8 +40,15 @@ const SKILL_SIGNAL_OVERFLOW_FILE = path.join(METAME_DIR, 'skill_signals.overflow
|
|
|
39
40
|
const SKILL_SIGNAL_LOCK_FILE = path.join(METAME_DIR, 'skill_signals.lock');
|
|
40
41
|
const EVOLUTION_QUEUE_FILE = path.join(METAME_DIR, 'evolution_queue.yaml');
|
|
41
42
|
const EVOLUTION_POLICY_FILE = path.join(METAME_DIR, 'evolution_policy.yaml');
|
|
43
|
+
const WORKFLOW_SKETCHES_FILE = path.join(METAME_DIR, 'workflow_sketches.yaml');
|
|
42
44
|
const BRAIN_FILE = path.join(HOME, '.claude_profile.yaml');
|
|
43
45
|
|
|
46
|
+
// Read-only exploration tools — excluded from workflow candidate detection
|
|
47
|
+
const READONLY_TOOLS = new Set([
|
|
48
|
+
'Read', 'Glob', 'Grep', 'ListDir', 'ListFiles',
|
|
49
|
+
'ReadFile', 'GrepSearch', 'SearchFiles',
|
|
50
|
+
]);
|
|
51
|
+
|
|
44
52
|
// Skill directories (check both locations)
|
|
45
53
|
const SKILL_DIRS = [
|
|
46
54
|
path.join(HOME, '.claude', 'skills'),
|
|
@@ -68,6 +76,14 @@ const DEFAULT_POLICY = {
|
|
|
68
76
|
max_updates_per_analysis: 3,
|
|
69
77
|
max_gaps_per_analysis: 2,
|
|
70
78
|
|
|
79
|
+
// Workflow discovery
|
|
80
|
+
workflow_discovery_interval: 2, // every N cold-path cycles
|
|
81
|
+
min_signals_for_workflow: 3, // minimum workflow_candidate signals to analyze
|
|
82
|
+
workflow_proposal_threshold: 4, // occurrence_count needed to propose
|
|
83
|
+
workflow_min_confidence: 0.7, // Haiku confidence threshold
|
|
84
|
+
workflow_max_sketches: 10, // max persisted sketches
|
|
85
|
+
workflow_stale_days: 14, // auto-purge after N days without new occurrence
|
|
86
|
+
|
|
71
87
|
// Self-evaluation
|
|
72
88
|
self_eval_interval: 5, // every N cold-path runs, evaluate policy effectiveness
|
|
73
89
|
cold_path_run_count: 0,
|
|
@@ -113,6 +129,38 @@ Respond with ONLY a JSON code block:
|
|
|
113
129
|
\\\`\\\`\\\`
|
|
114
130
|
|
|
115
131
|
If no actionable insights, respond with exactly: NO_EVOLUTION`,
|
|
132
|
+
|
|
133
|
+
workflow_prompt_template: `You are a workflow pattern analyzer for an AI assistant called MetaMe.
|
|
134
|
+
Analyze recent multi-tool interaction signals and cluster them into recurring workflow patterns.
|
|
135
|
+
|
|
136
|
+
KNOWN SKETCHES (existing pattern pool):
|
|
137
|
+
\${knownSketches}
|
|
138
|
+
|
|
139
|
+
NEW SIGNALS (workflow candidates):
|
|
140
|
+
\${workflowSignals}
|
|
141
|
+
|
|
142
|
+
CLUSTERING RULES (MUST follow):
|
|
143
|
+
1. EXISTING SKETCHES are your "known pattern pool". For each new signal, FIRST try to match it to an existing sketch.
|
|
144
|
+
2. If a signal matches an existing sketch: output that sketch's EXACT ID, increment occurrence_count by 1, append the signal's prompt to example_prompts.
|
|
145
|
+
3. Only create a NEW sketch (id: null) when the signal clearly represents a workflow NOT covered by any existing sketch.
|
|
146
|
+
4. A workflow must involve 2+ distinct ACTION steps (search→summarize→post). Pure exploration (read files, search code) is NOT a workflow.
|
|
147
|
+
5. Do NOT rephrase existing sketch patterns — preserve them exactly.
|
|
148
|
+
|
|
149
|
+
Respond with ONLY a JSON code block:
|
|
150
|
+
\\\`\\\`\\\`json
|
|
151
|
+
[
|
|
152
|
+
{
|
|
153
|
+
"id": "existing-sketch-id-or-null",
|
|
154
|
+
"pattern": "description of the workflow (Chinese preferred)",
|
|
155
|
+
"tools_signature": ["WebSearch", "Bash"],
|
|
156
|
+
"example_prompts": ["user prompt example"],
|
|
157
|
+
"occurrence_count": 1,
|
|
158
|
+
"confidence": 0.8
|
|
159
|
+
}
|
|
160
|
+
]
|
|
161
|
+
\\\`\\\`\\\`
|
|
162
|
+
|
|
163
|
+
If no meaningful workflows found, respond with exactly: NO_WORKFLOWS`,
|
|
116
164
|
};
|
|
117
165
|
|
|
118
166
|
function clampInt(value, fallback, min, max) {
|
|
@@ -152,6 +200,12 @@ function sanitizePolicy(input) {
|
|
|
152
200
|
min_evidence_for_gap: clampInt(merged.min_evidence_for_gap, DEFAULT_POLICY.min_evidence_for_gap, 1, 20),
|
|
153
201
|
max_updates_per_analysis: clampInt(merged.max_updates_per_analysis, DEFAULT_POLICY.max_updates_per_analysis, 1, 20),
|
|
154
202
|
max_gaps_per_analysis: clampInt(merged.max_gaps_per_analysis, DEFAULT_POLICY.max_gaps_per_analysis, 1, 20),
|
|
203
|
+
workflow_discovery_interval: clampInt(merged.workflow_discovery_interval, DEFAULT_POLICY.workflow_discovery_interval, 1, 100),
|
|
204
|
+
min_signals_for_workflow: clampInt(merged.min_signals_for_workflow, DEFAULT_POLICY.min_signals_for_workflow, 1, 100),
|
|
205
|
+
workflow_proposal_threshold: clampInt(merged.workflow_proposal_threshold, DEFAULT_POLICY.workflow_proposal_threshold, 2, 50),
|
|
206
|
+
workflow_min_confidence: Math.max(0.1, Math.min(1.0, Number(merged.workflow_min_confidence) || DEFAULT_POLICY.workflow_min_confidence)),
|
|
207
|
+
workflow_max_sketches: clampInt(merged.workflow_max_sketches, DEFAULT_POLICY.workflow_max_sketches, 1, 50),
|
|
208
|
+
workflow_stale_days: clampInt(merged.workflow_stale_days, DEFAULT_POLICY.workflow_stale_days, 1, 365),
|
|
155
209
|
self_eval_interval: clampInt(merged.self_eval_interval, DEFAULT_POLICY.self_eval_interval, 1, 1000),
|
|
156
210
|
cold_path_run_count: clampInt(merged.cold_path_run_count, DEFAULT_POLICY.cold_path_run_count, 0, 1000000),
|
|
157
211
|
complaint_patterns: sanitizePatternList(merged.complaint_patterns, DEFAULT_POLICY.complaint_patterns),
|
|
@@ -159,6 +213,9 @@ function sanitizePolicy(input) {
|
|
|
159
213
|
prompt_template: (typeof merged.prompt_template === 'string' && merged.prompt_template.trim())
|
|
160
214
|
? merged.prompt_template
|
|
161
215
|
: DEFAULT_POLICY.prompt_template,
|
|
216
|
+
workflow_prompt_template: (typeof merged.workflow_prompt_template === 'string' && merged.workflow_prompt_template.trim())
|
|
217
|
+
? merged.workflow_prompt_template
|
|
218
|
+
: DEFAULT_POLICY.workflow_prompt_template,
|
|
162
219
|
};
|
|
163
220
|
return policy;
|
|
164
221
|
}
|
|
@@ -226,8 +283,14 @@ function extractSkillSignal(prompt, output, error, files, cwd, toolUsageLog) {
|
|
|
226
283
|
const outputText = typeof output === 'string' ? output : '';
|
|
227
284
|
const hasToolFailure = /(?:failed|error|not found|not available|skill.{0,20}(?:missing|absent|not.{0,10}install))/i.test(outputText);
|
|
228
285
|
|
|
229
|
-
//
|
|
230
|
-
|
|
286
|
+
// Workflow candidate detection: multi-tool chain with at least 1 action tool
|
|
287
|
+
const toolNames = tools.map(t => t.name).filter(Boolean);
|
|
288
|
+
const hasActionTool = toolNames.some(n => !READONLY_TOOLS.has(n));
|
|
289
|
+
const isWorkflowCandidate = !hasSkills && !hasError && !hasToolFailure &&
|
|
290
|
+
hasActionTool && toolNames.length >= 2;
|
|
291
|
+
|
|
292
|
+
// Skip if no skill involvement, no failure, and not a workflow candidate
|
|
293
|
+
if (!hasSkills && !hasError && !hasToolFailure && !isWorkflowCandidate) return null;
|
|
231
294
|
|
|
232
295
|
return {
|
|
233
296
|
ts: new Date().toISOString(),
|
|
@@ -240,6 +303,7 @@ function extractSkillSignal(prompt, output, error, files, cwd, toolUsageLog) {
|
|
|
240
303
|
files_modified: (files || []).slice(0, 10),
|
|
241
304
|
cwd: cwd || null,
|
|
242
305
|
outcome: (hasError || hasToolFailure) ? 'error' : (outputText ? 'success' : 'empty'),
|
|
306
|
+
workflow_candidate: isWorkflowCandidate || undefined,
|
|
243
307
|
};
|
|
244
308
|
}
|
|
245
309
|
|
|
@@ -425,6 +489,13 @@ function checkHotEvolution(signal) {
|
|
|
425
489
|
|
|
426
490
|
saveEvolutionQueue(yaml, queue);
|
|
427
491
|
|
|
492
|
+
// Log hot detections to changelog
|
|
493
|
+
if (signal.error || signal.has_tool_failure) {
|
|
494
|
+
for (const sk of (signal.skills_invoked || [])) {
|
|
495
|
+
appendChange('hot_detected', sk, `failure detected: ${(signal.error || 'tool_failure').substring(0, 80)}`);
|
|
496
|
+
}
|
|
497
|
+
}
|
|
498
|
+
|
|
428
499
|
// Rule 4: Track insight outcomes (success/failure per skill)
|
|
429
500
|
if (signal.skills_invoked && signal.skills_invoked.length > 0) {
|
|
430
501
|
const isSuccess = signal.outcome === 'success' && !signal.error && !signal.has_tool_failure;
|
|
@@ -589,6 +660,8 @@ async function distillSkills() {
|
|
|
589
660
|
const result = await callHaiku(prompt, distillEnv, 90000);
|
|
590
661
|
|
|
591
662
|
if (result.includes('NO_EVOLUTION')) {
|
|
663
|
+
// Run workflow discovery before clearing signals
|
|
664
|
+
try { await discoverWorkflows(signals, distillEnv); } catch {}
|
|
592
665
|
clearSignals();
|
|
593
666
|
bumpRunCount(yaml, policy);
|
|
594
667
|
return { updates: [], missing_skills: [] };
|
|
@@ -637,6 +710,9 @@ async function distillSkills() {
|
|
|
637
710
|
// Log this run for self-evaluation
|
|
638
711
|
logEvolutionRun(yaml, policy, signals.length, updates.length, missingSkills.length);
|
|
639
712
|
|
|
713
|
+
// Run workflow discovery before clearing signals
|
|
714
|
+
try { await discoverWorkflows(signals, distillEnv); } catch {}
|
|
715
|
+
|
|
640
716
|
clearSignals();
|
|
641
717
|
|
|
642
718
|
// Self-evaluation: periodically let Haiku review and rewrite the policy
|
|
@@ -770,6 +846,190 @@ RULES:
|
|
|
770
846
|
}
|
|
771
847
|
}
|
|
772
848
|
|
|
849
|
+
// ─────────────────────────────────────────────
|
|
850
|
+
// Workflow Discovery (Cold Path extension)
|
|
851
|
+
// ─────────────────────────────────────────────
|
|
852
|
+
|
|
853
|
+
function loadWorkflowSketches(yaml) {
|
|
854
|
+
try {
|
|
855
|
+
if (!fs.existsSync(WORKFLOW_SKETCHES_FILE)) return { version: 1, last_updated: null, sketches: [] };
|
|
856
|
+
const content = fs.readFileSync(WORKFLOW_SKETCHES_FILE, 'utf8');
|
|
857
|
+
const data = yaml.load(content) || {};
|
|
858
|
+
return {
|
|
859
|
+
version: data.version || 1,
|
|
860
|
+
last_updated: data.last_updated || null,
|
|
861
|
+
sketches: Array.isArray(data.sketches) ? data.sketches : [],
|
|
862
|
+
};
|
|
863
|
+
} catch {
|
|
864
|
+
return { version: 1, last_updated: null, sketches: [] };
|
|
865
|
+
}
|
|
866
|
+
}
|
|
867
|
+
|
|
868
|
+
function saveWorkflowSketches(yaml, data) {
|
|
869
|
+
try {
|
|
870
|
+
data.last_updated = new Date().toISOString();
|
|
871
|
+
fs.writeFileSync(WORKFLOW_SKETCHES_FILE, yaml.dump(data, { lineWidth: -1 }), 'utf8');
|
|
872
|
+
} catch {}
|
|
873
|
+
}
|
|
874
|
+
|
|
875
|
+
/**
|
|
876
|
+
* Merge Haiku-clustered results back into persisted sketches.
|
|
877
|
+
* - Existing IDs: increment count, append examples, update last_seen/confidence
|
|
878
|
+
* - New IDs (null): generate ID, add to sketches (respecting max cap)
|
|
879
|
+
* Returns merged sketches array.
|
|
880
|
+
*/
|
|
881
|
+
function mergeWorkflowSketches(existing, clustered, maxSketches) {
|
|
882
|
+
const sketchMap = new Map();
|
|
883
|
+
for (const s of existing) sketchMap.set(s.id, { ...s });
|
|
884
|
+
|
|
885
|
+
const now = new Date().toISOString();
|
|
886
|
+
|
|
887
|
+
for (const c of clustered) {
|
|
888
|
+
if (c.id && sketchMap.has(c.id)) {
|
|
889
|
+
// Update existing sketch
|
|
890
|
+
const s = sketchMap.get(c.id);
|
|
891
|
+
s.occurrence_count = (s.occurrence_count || 0) + 1;
|
|
892
|
+
s.last_seen = now;
|
|
893
|
+
if (c.confidence != null) s.confidence = c.confidence;
|
|
894
|
+
// Append new example prompts (dedup, cap at 5)
|
|
895
|
+
const exSet = new Set(s.example_prompts || []);
|
|
896
|
+
for (const ex of (c.example_prompts || [])) {
|
|
897
|
+
if (!exSet.has(ex) && exSet.size < 5) exSet.add(ex);
|
|
898
|
+
}
|
|
899
|
+
s.example_prompts = [...exSet];
|
|
900
|
+
} else {
|
|
901
|
+
// New sketch
|
|
902
|
+
const newId = `wf-${Date.now().toString(36)}-${Math.random().toString(36).slice(2, 8)}`;
|
|
903
|
+
sketchMap.set(newId, {
|
|
904
|
+
id: newId,
|
|
905
|
+
pattern: c.pattern || 'unknown',
|
|
906
|
+
tools_signature: c.tools_signature || [],
|
|
907
|
+
example_prompts: (c.example_prompts || []).slice(0, 5),
|
|
908
|
+
occurrence_count: c.occurrence_count || 1,
|
|
909
|
+
first_seen: now,
|
|
910
|
+
last_seen: now,
|
|
911
|
+
confidence: c.confidence || 0.5,
|
|
912
|
+
proposed: false,
|
|
913
|
+
});
|
|
914
|
+
}
|
|
915
|
+
}
|
|
916
|
+
|
|
917
|
+
// Enforce max sketches: keep most recently seen
|
|
918
|
+
let sketches = [...sketchMap.values()];
|
|
919
|
+
if (sketches.length > maxSketches) {
|
|
920
|
+
sketches.sort((a, b) => new Date(b.last_seen || 0).getTime() - new Date(a.last_seen || 0).getTime());
|
|
921
|
+
sketches = sketches.slice(0, maxSketches);
|
|
922
|
+
}
|
|
923
|
+
|
|
924
|
+
return sketches;
|
|
925
|
+
}
|
|
926
|
+
|
|
927
|
+
/**
|
|
928
|
+
* Analyze workflow_candidate signals, cluster via Haiku, persist sketches,
|
|
929
|
+
* and promote mature sketches to evolution_queue as workflow_proposal.
|
|
930
|
+
*/
|
|
931
|
+
async function discoverWorkflows(signals, distillEnv) {
|
|
932
|
+
let yaml;
|
|
933
|
+
try { yaml = require('js-yaml'); } catch { return; }
|
|
934
|
+
|
|
935
|
+
const policy = loadPolicy();
|
|
936
|
+
|
|
937
|
+
// Only run every N cold-path cycles
|
|
938
|
+
if ((policy.cold_path_run_count || 0) % policy.workflow_discovery_interval !== 0) return;
|
|
939
|
+
|
|
940
|
+
// Filter workflow candidates
|
|
941
|
+
const wfSignals = signals.filter(s => s.workflow_candidate);
|
|
942
|
+
if (wfSignals.length < policy.min_signals_for_workflow) return;
|
|
943
|
+
|
|
944
|
+
const sketchData = loadWorkflowSketches(yaml);
|
|
945
|
+
|
|
946
|
+
// Build known sketches text for Haiku
|
|
947
|
+
const knownSketches = sketchData.sketches.length > 0
|
|
948
|
+
? sketchData.sketches.map(s =>
|
|
949
|
+
`- id: "${s.id}" pattern: "${s.pattern}" tools: [${(s.tools_signature || []).join(',')}] count: ${s.occurrence_count}`
|
|
950
|
+
).join('\n')
|
|
951
|
+
: '(none)';
|
|
952
|
+
|
|
953
|
+
// Build signals text
|
|
954
|
+
const workflowSignals = wfSignals.map((s, i) => {
|
|
955
|
+
const toolNames = (s.tools_used || []).map(t => t.name).filter(Boolean);
|
|
956
|
+
return `${i + 1}. prompt="${(s.prompt || '').substring(0, 120)}" tools=[${toolNames.join(',')}]`;
|
|
957
|
+
}).join('\n');
|
|
958
|
+
|
|
959
|
+
const prompt = policy.workflow_prompt_template
|
|
960
|
+
.replace(/\$\{knownSketches\}/g, knownSketches)
|
|
961
|
+
.replace(/\$\{workflowSignals\}/g, workflowSignals);
|
|
962
|
+
|
|
963
|
+
try {
|
|
964
|
+
const result = await callHaiku(prompt, distillEnv, 60000);
|
|
965
|
+
|
|
966
|
+
if (result.includes('NO_WORKFLOWS')) return;
|
|
967
|
+
|
|
968
|
+
const jsonMatch = result.match(/```json\s*([\s\S]*?)```/);
|
|
969
|
+
if (!jsonMatch) return;
|
|
970
|
+
|
|
971
|
+
const clustered = JSON.parse(jsonMatch[1]);
|
|
972
|
+
if (!Array.isArray(clustered) || clustered.length === 0) return;
|
|
973
|
+
|
|
974
|
+
// Merge into persisted sketches
|
|
975
|
+
sketchData.sketches = mergeWorkflowSketches(sketchData.sketches, clustered, policy.workflow_max_sketches);
|
|
976
|
+
|
|
977
|
+
// Purge stale sketches (not seen within workflow_stale_days, not proposed)
|
|
978
|
+
const staleCutoff = Date.now() - policy.workflow_stale_days * 24 * 60 * 60 * 1000;
|
|
979
|
+
const veryOldCutoff = Date.now() - 90 * 24 * 60 * 60 * 1000;
|
|
980
|
+
sketchData.sketches = sketchData.sketches.filter(s => {
|
|
981
|
+
const lastSeenMs = new Date(s.last_seen || s.first_seen || 0).getTime();
|
|
982
|
+
if (s.proposed) return true; // keep proposed until dismissed
|
|
983
|
+
if (lastSeenMs <= veryOldCutoff) return false; // 90 days hard cap
|
|
984
|
+
return lastSeenMs > staleCutoff || (s.occurrence_count || 0) >= 2;
|
|
985
|
+
});
|
|
986
|
+
|
|
987
|
+
// Promote mature sketches to evolution queue
|
|
988
|
+
const queue = loadEvolutionQueue(yaml);
|
|
989
|
+
for (const sketch of sketchData.sketches) {
|
|
990
|
+
if (sketch.proposed) continue;
|
|
991
|
+
if ((sketch.occurrence_count || 0) >= policy.workflow_proposal_threshold &&
|
|
992
|
+
(sketch.confidence || 0) >= policy.workflow_min_confidence) {
|
|
993
|
+
addToQueue(queue, {
|
|
994
|
+
type: 'workflow_proposal',
|
|
995
|
+
skill_name: null,
|
|
996
|
+
reason: `检测到重复工作流: ${sketch.pattern}`,
|
|
997
|
+
search_hint: sketch.pattern,
|
|
998
|
+
evidence_count: sketch.occurrence_count,
|
|
999
|
+
workflow_sketch_id: sketch.id,
|
|
1000
|
+
example_prompt: (sketch.example_prompts || [])[0] || '',
|
|
1001
|
+
tools_signature: sketch.tools_signature || [],
|
|
1002
|
+
});
|
|
1003
|
+
sketch.proposed = true;
|
|
1004
|
+
}
|
|
1005
|
+
}
|
|
1006
|
+
saveEvolutionQueue(yaml, queue);
|
|
1007
|
+
saveWorkflowSketches(yaml, sketchData);
|
|
1008
|
+
|
|
1009
|
+
} catch (err) {
|
|
1010
|
+
try { console.log(`⚠️ Workflow discovery failed (non-fatal): ${err.message}`); } catch {}
|
|
1011
|
+
}
|
|
1012
|
+
}
|
|
1013
|
+
|
|
1014
|
+
/**
|
|
1015
|
+
* Reset a workflow sketch after user dismisses a proposal.
|
|
1016
|
+
* Clears proposed flag and occurrence_count so it can re-accumulate.
|
|
1017
|
+
*/
|
|
1018
|
+
function resetWorkflowSketch(sketchId) {
|
|
1019
|
+
let yaml;
|
|
1020
|
+
try { yaml = require('js-yaml'); } catch { return false; }
|
|
1021
|
+
|
|
1022
|
+
const data = loadWorkflowSketches(yaml);
|
|
1023
|
+
const sketch = data.sketches.find(s => s.id === sketchId);
|
|
1024
|
+
if (!sketch) return false;
|
|
1025
|
+
|
|
1026
|
+
sketch.proposed = false;
|
|
1027
|
+
sketch.occurrence_count = 0;
|
|
1028
|
+
sketch.example_prompts = [];
|
|
1029
|
+
saveWorkflowSketches(yaml, data);
|
|
1030
|
+
return true;
|
|
1031
|
+
}
|
|
1032
|
+
|
|
773
1033
|
// ─────────────────────────────────────────────
|
|
774
1034
|
// Evolution Queue Management
|
|
775
1035
|
// ─────────────────────────────────────────────
|
|
@@ -800,13 +1060,21 @@ function saveEvolutionQueue(yaml, queue) {
|
|
|
800
1060
|
} catch {}
|
|
801
1061
|
}
|
|
802
1062
|
|
|
1063
|
+
// Per-type dedup field: type → function(queueItem, newEntry) → bool
|
|
1064
|
+
// Add a new entry here whenever a new queue type with its own dedup key is introduced.
|
|
1065
|
+
const QUEUE_DEDUP_MATCH = {
|
|
1066
|
+
skill_gap: (i, e) => (i.search_hint || '') === (e.search_hint || ''),
|
|
1067
|
+
workflow_proposal: (i, e) => (i.workflow_sketch_id || '') === (e.workflow_sketch_id || ''),
|
|
1068
|
+
};
|
|
1069
|
+
|
|
803
1070
|
function addToQueue(queue, entry) {
|
|
804
|
-
// Dedup pending entries by core key
|
|
1071
|
+
// Dedup pending entries by core key, with per-type extra field matching.
|
|
1072
|
+
const dedupFn = QUEUE_DEDUP_MATCH[entry.type];
|
|
805
1073
|
const existing = queue.items.find(i =>
|
|
806
1074
|
i.type === entry.type &&
|
|
807
1075
|
i.skill_name === entry.skill_name &&
|
|
808
1076
|
i.status === 'pending' &&
|
|
809
|
-
(
|
|
1077
|
+
(!dedupFn || dedupFn(i, entry))
|
|
810
1078
|
);
|
|
811
1079
|
|
|
812
1080
|
if (existing) {
|
|
@@ -886,6 +1154,7 @@ function resolveQueueItem(type, skillName, resolution) {
|
|
|
886
1154
|
item.status = resolution; // 'installed' | 'dismissed'
|
|
887
1155
|
item.resolved_at = new Date().toISOString();
|
|
888
1156
|
saveEvolutionQueue(yaml, queue);
|
|
1157
|
+
appendChange('queue_resolved', skillName || item.search_hint || 'unknown', `${type} → ${resolution}`);
|
|
889
1158
|
}
|
|
890
1159
|
}
|
|
891
1160
|
|
|
@@ -918,7 +1187,9 @@ function listQueueItems({ status = null, limit = 20 } = {}) {
|
|
|
918
1187
|
try { yaml = require('js-yaml'); } catch { return []; }
|
|
919
1188
|
const queue = loadEvolutionQueue(yaml);
|
|
920
1189
|
const items = Array.isArray(queue.items) ? queue.items : [];
|
|
921
|
-
|
|
1190
|
+
// status can be a single string or an array of strings
|
|
1191
|
+
const statuses = Array.isArray(status) ? status : (status ? [status] : null);
|
|
1192
|
+
const filtered = statuses ? items.filter(i => statuses.includes(i.status)) : items;
|
|
922
1193
|
return filtered
|
|
923
1194
|
.slice()
|
|
924
1195
|
.sort((a, b) => new Date(b.last_seen || b.detected || 0).getTime() - new Date(a.last_seen || a.detected || 0).getTime())
|
|
@@ -948,6 +1219,16 @@ function mergeEvolution(skillDir, newData) {
|
|
|
948
1219
|
}
|
|
949
1220
|
|
|
950
1221
|
fs.writeFileSync(evoPath, JSON.stringify(current, null, 2), 'utf8');
|
|
1222
|
+
|
|
1223
|
+
// Log to changelog
|
|
1224
|
+
const skillName = path.basename(skillDir);
|
|
1225
|
+
const added = [];
|
|
1226
|
+
for (const key of ['preferences', 'fixes', 'contexts']) {
|
|
1227
|
+
if (newData[key] && newData[key].length) added.push(`+${newData[key].length} ${key}`);
|
|
1228
|
+
}
|
|
1229
|
+
if (added.length > 0) {
|
|
1230
|
+
appendChange('evolved', skillName, added.join(', '), (newData.fixes || newData.preferences || newData.contexts || [])[0]);
|
|
1231
|
+
}
|
|
951
1232
|
}
|
|
952
1233
|
|
|
953
1234
|
// ─────────────────────────────────────────────
|
|
@@ -1073,6 +1354,8 @@ module.exports = {
|
|
|
1073
1354
|
appendSkillSignal,
|
|
1074
1355
|
checkHotEvolution,
|
|
1075
1356
|
distillSkills,
|
|
1357
|
+
discoverWorkflows,
|
|
1358
|
+
resetWorkflowSketch,
|
|
1076
1359
|
checkEvolutionQueue,
|
|
1077
1360
|
resolveQueueItem,
|
|
1078
1361
|
resolveQueueItemById,
|
|
@@ -94,7 +94,7 @@ function classifyTaskUsage(task, context = {}, opts = {}) {
|
|
|
94
94
|
|
|
95
95
|
if (!joined) return fallbackCategory;
|
|
96
96
|
if (/\bteam[-_\s]?task\b|团队|协作|handoff|dispatch/.test(joined)) return 'team_task';
|
|
97
|
-
if (/\bskill[-_\s]?(?:evo|evolution|manager
|
|
97
|
+
if (/\bskill[-_\s]?(?:evo|evolution|manager)\b|技能演化/.test(joined)) return 'skill_evolution';
|
|
98
98
|
if (/\bmemory(?:-extract)?\b|记忆|facts?|recall|retriev|rag/.test(joined)) return 'memory';
|
|
99
99
|
if (/\bdistill\b|\bcognition\b|认知|反思|洞察/.test(joined)) return 'cognition';
|
|
100
100
|
if (/\bheartbeat\b|提醒|定时|cron|every\s+\d/.test(joined)) return 'heartbeat';
|