metame-cli 1.3.23 → 1.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +185 -26
- package/index.js +187 -141
- package/package.json +3 -3
- package/scripts/daemon-default.yaml +39 -1
- package/scripts/daemon.js +456 -104
- package/scripts/distill.js +40 -90
- package/scripts/feishu-adapter.js +61 -148
- package/scripts/memory-extract.js +263 -0
- package/scripts/memory-search.js +99 -0
- package/scripts/memory.js +439 -0
- package/scripts/providers.js +32 -0
- package/scripts/qmd-client.js +276 -0
- package/scripts/schema.js +37 -40
- package/scripts/session-analytics.js +64 -7
- package/scripts/session-summarize.js +118 -0
- package/scripts/skill-evolution.js +19 -16
|
@@ -0,0 +1,276 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* qmd-client.js — QMD (Hybrid Search Engine) integration for MetaMe
|
|
5
|
+
*
|
|
6
|
+
* Optional dependency: https://github.com/tobi/qmd
|
|
7
|
+
* Install: bun install -g github:tobi/qmd
|
|
8
|
+
*
|
|
9
|
+
* When QMD is present:
|
|
10
|
+
* - Facts are written as markdown files to ~/.metame/facts-docs/
|
|
11
|
+
* - searchFacts() uses qmd_deep_search (BM25 + vector + rerank)
|
|
12
|
+
* - QMD HTTP daemon stays running for fast model reuse
|
|
13
|
+
*
|
|
14
|
+
* When QMD is absent: all calls are no-ops; caller falls back to FTS5.
|
|
15
|
+
*/
|
|
16
|
+
|
|
17
|
+
'use strict';
|
|
18
|
+
|
|
19
|
+
const { execSync, spawn } = require('child_process');
|
|
20
|
+
const fs = require('fs');
|
|
21
|
+
const path = require('path');
|
|
22
|
+
const os = require('os');
|
|
23
|
+
|
|
24
|
+
const HOME = os.homedir();
|
|
25
|
+
const FACTS_DOCS_DIR = path.join(HOME, '.metame', 'facts-docs');
|
|
26
|
+
const QMD_URL = 'http://localhost:8181';
|
|
27
|
+
const COLLECTION = 'metame-facts';
|
|
28
|
+
|
|
29
|
+
// ── Availability ───────────────────────────────────────────────────────────
|
|
30
|
+
|
|
31
|
+
let _available = null;
|
|
32
|
+
function isAvailable() {
|
|
33
|
+
if (_available !== null) return _available;
|
|
34
|
+
try {
|
|
35
|
+
execSync('which qmd', { stdio: 'pipe', timeout: 2000 });
|
|
36
|
+
_available = true;
|
|
37
|
+
} catch {
|
|
38
|
+
_available = false;
|
|
39
|
+
}
|
|
40
|
+
return _available;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
async function isDaemonRunning() {
|
|
44
|
+
if (!isAvailable()) return false;
|
|
45
|
+
try {
|
|
46
|
+
const res = await fetch(`${QMD_URL}/health`, {
|
|
47
|
+
signal: AbortSignal.timeout(800),
|
|
48
|
+
});
|
|
49
|
+
return res.ok;
|
|
50
|
+
} catch {
|
|
51
|
+
return false;
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
// ── Daemon lifecycle ───────────────────────────────────────────────────────
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Start QMD HTTP daemon if not already running.
|
|
59
|
+
* Returns true if daemon is up after this call.
|
|
60
|
+
*/
|
|
61
|
+
async function startDaemon() {
|
|
62
|
+
if (!isAvailable()) return false;
|
|
63
|
+
if (await isDaemonRunning()) return true;
|
|
64
|
+
try {
|
|
65
|
+
execSync('qmd mcp --http --daemon', { stdio: 'ignore', timeout: 8000 });
|
|
66
|
+
// Give it a moment to bind
|
|
67
|
+
await new Promise(r => setTimeout(r, 1000));
|
|
68
|
+
return isDaemonRunning();
|
|
69
|
+
} catch {
|
|
70
|
+
return false;
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
* Stop QMD HTTP daemon. Non-fatal.
|
|
76
|
+
*/
|
|
77
|
+
function stopDaemon() {
|
|
78
|
+
if (!isAvailable()) return;
|
|
79
|
+
try {
|
|
80
|
+
execSync('qmd mcp stop', { stdio: 'ignore', timeout: 3000 });
|
|
81
|
+
} catch { /* ignore */ }
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// ── Collection setup ───────────────────────────────────────────────────────
|
|
85
|
+
|
|
86
|
+
/**
|
|
87
|
+
* Ensure facts-docs/ directory and QMD collection exist.
|
|
88
|
+
* Safe to call multiple times.
|
|
89
|
+
*/
|
|
90
|
+
function ensureCollection() {
|
|
91
|
+
if (!isAvailable()) return;
|
|
92
|
+
fs.mkdirSync(FACTS_DOCS_DIR, { recursive: true });
|
|
93
|
+
try {
|
|
94
|
+
// --name is idempotent: adding an already-named collection is a no-op
|
|
95
|
+
execSync(`qmd collection add "${FACTS_DOCS_DIR}" --name "${COLLECTION}"`, {
|
|
96
|
+
stdio: 'pipe',
|
|
97
|
+
timeout: 5000,
|
|
98
|
+
});
|
|
99
|
+
} catch { /* already exists or QMD error, both ok */ }
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
// ── Upsert ─────────────────────────────────────────────────────────────────
|
|
103
|
+
|
|
104
|
+
/**
|
|
105
|
+
* Convert a fact object to markdown content.
|
|
106
|
+
* Filename = {id}.md, so the ID is recoverable from search results.
|
|
107
|
+
*/
|
|
108
|
+
function factToMd(fact) {
|
|
109
|
+
const tags = Array.isArray(fact.tags) ? fact.tags.join(', ') : '';
|
|
110
|
+
const date = (fact.created_at || '').slice(0, 10) || new Date().toISOString().slice(0, 10);
|
|
111
|
+
return [
|
|
112
|
+
`# [${fact.relation}] ${fact.entity}`,
|
|
113
|
+
'',
|
|
114
|
+
fact.value,
|
|
115
|
+
'',
|
|
116
|
+
`Tags: ${tags}`,
|
|
117
|
+
`Project: ${fact.project || 'unknown'}`,
|
|
118
|
+
`Date: ${date}`,
|
|
119
|
+
].join('\n');
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
/**
|
|
123
|
+
* Write facts as markdown files and trigger async re-embed.
|
|
124
|
+
* facts must be the array returned by memory.saveFacts (with id field).
|
|
125
|
+
* Non-fatal: any error is silently ignored.
|
|
126
|
+
*/
|
|
127
|
+
function upsertFacts(facts) {
|
|
128
|
+
if (!isAvailable() || !Array.isArray(facts) || facts.length === 0) return;
|
|
129
|
+
try {
|
|
130
|
+
ensureCollection();
|
|
131
|
+
for (const f of facts) {
|
|
132
|
+
if (!f.id) continue;
|
|
133
|
+
fs.writeFileSync(path.join(FACTS_DOCS_DIR, `${f.id}.md`), factToMd(f), 'utf8');
|
|
134
|
+
}
|
|
135
|
+
// Incremental embed: detach so it doesn't block caller
|
|
136
|
+
const child = spawn('qmd', ['embed'], {
|
|
137
|
+
detached: true,
|
|
138
|
+
stdio: 'ignore',
|
|
139
|
+
cwd: HOME,
|
|
140
|
+
});
|
|
141
|
+
child.unref();
|
|
142
|
+
} catch { /* non-fatal */ }
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
// ── Search ─────────────────────────────────────────────────────────────────
|
|
146
|
+
|
|
147
|
+
/**
|
|
148
|
+
* Search via QMD HTTP daemon (MCP JSON-RPC).
|
|
149
|
+
* Returns array of fact IDs, or null if unavailable.
|
|
150
|
+
*/
|
|
151
|
+
async function searchViaHttp(query, limit) {
|
|
152
|
+
try {
|
|
153
|
+
const res = await fetch(`${QMD_URL}/mcp`, {
|
|
154
|
+
method: 'POST',
|
|
155
|
+
headers: { 'Content-Type': 'application/json' },
|
|
156
|
+
body: JSON.stringify({
|
|
157
|
+
jsonrpc: '2.0',
|
|
158
|
+
id: 1,
|
|
159
|
+
method: 'tools/call',
|
|
160
|
+
params: {
|
|
161
|
+
name: 'qmd_deep_search',
|
|
162
|
+
arguments: { query, limit, min_score: 0.3 },
|
|
163
|
+
},
|
|
164
|
+
}),
|
|
165
|
+
signal: AbortSignal.timeout(3000),
|
|
166
|
+
});
|
|
167
|
+
if (!res.ok) return null;
|
|
168
|
+
const data = await res.json();
|
|
169
|
+
return parseSearchResult(data?.result?.content);
|
|
170
|
+
} catch {
|
|
171
|
+
return null;
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
/**
|
|
176
|
+
* Search via QMD CLI (slower; models may not be warm).
|
|
177
|
+
* Returns array of fact IDs, or null on failure.
|
|
178
|
+
*/
|
|
179
|
+
function searchViaCli(query, limit) {
|
|
180
|
+
if (!isAvailable()) return null;
|
|
181
|
+
try {
|
|
182
|
+
// Use `qmd query` for hybrid search (BM25 + vector + rerank, requires models)
|
|
183
|
+
// Fall back to `qmd search` (BM25 only) if query fails (e.g. models not yet downloaded)
|
|
184
|
+
let raw;
|
|
185
|
+
try {
|
|
186
|
+
raw = execSync(
|
|
187
|
+
`qmd query ${JSON.stringify(query)} -c "${COLLECTION}" --json -n ${limit}`,
|
|
188
|
+
{ timeout: 30000, encoding: 'utf8', stdio: 'pipe' }
|
|
189
|
+
);
|
|
190
|
+
} catch {
|
|
191
|
+
raw = execSync(
|
|
192
|
+
`qmd search ${JSON.stringify(query)} -c "${COLLECTION}" --json -n ${limit}`,
|
|
193
|
+
{ timeout: 8000, encoding: 'utf8', stdio: 'pipe' }
|
|
194
|
+
);
|
|
195
|
+
}
|
|
196
|
+
return parseSearchResult(raw);
|
|
197
|
+
} catch {
|
|
198
|
+
return null;
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
/**
|
|
203
|
+
* Parse QMD JSON output → array of fact IDs extracted from filenames.
|
|
204
|
+
*
|
|
205
|
+
* CLI output format: [{"docid": "#abc123", "score": 0.85, "file": "path/to/f-xxx.md"}, ...]
|
|
206
|
+
* MCP HTTP format: wrapped in data.result.content as stringified JSON
|
|
207
|
+
*/
|
|
208
|
+
function parseSearchResult(raw) {
|
|
209
|
+
if (!raw) return null;
|
|
210
|
+
try {
|
|
211
|
+
// CLI outputs valid JSON directly; MCP may wrap it
|
|
212
|
+
const text = typeof raw === 'string' ? raw.trim() : JSON.stringify(raw);
|
|
213
|
+
|
|
214
|
+
// Try direct parse first
|
|
215
|
+
let items;
|
|
216
|
+
try {
|
|
217
|
+
const parsed = JSON.parse(text);
|
|
218
|
+
items = Array.isArray(parsed) ? parsed
|
|
219
|
+
: Array.isArray(parsed?.result) ? parsed.result
|
|
220
|
+
: null;
|
|
221
|
+
} catch {
|
|
222
|
+
// MCP content may embed JSON inside a string — find the last [...] block
|
|
223
|
+
// Use lastIndexOf to avoid stopping at ] inside titles like "[bug_lesson]"
|
|
224
|
+
const start = text.lastIndexOf('[');
|
|
225
|
+
const end = text.lastIndexOf(']');
|
|
226
|
+
if (start === -1 || end <= start) return null;
|
|
227
|
+
items = JSON.parse(text.slice(start, end + 1));
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
if (!Array.isArray(items) || items.length === 0) return null;
|
|
231
|
+
|
|
232
|
+
const ids = [];
|
|
233
|
+
for (const item of items) {
|
|
234
|
+
// QMD returns { file: "qmd://metame-facts/f-abc12345-ts-rand.md", score, docid }
|
|
235
|
+
const filePath = item.file || item.path || '';
|
|
236
|
+
// Strip qmd:// virtual path prefix, then get basename
|
|
237
|
+
const stripped = filePath.replace(/^qmd:\/\/[^/]+\//, '');
|
|
238
|
+
const basename = path.basename(stripped, '.md');
|
|
239
|
+
if (basename.startsWith('f-')) {
|
|
240
|
+
ids.push(basename);
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
return ids.length ? ids : null;
|
|
244
|
+
} catch {
|
|
245
|
+
return null;
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
/**
|
|
250
|
+
* Top-level search: try HTTP daemon → CLI → return null (caller uses FTS5).
|
|
251
|
+
* @param {string} query
|
|
252
|
+
* @param {number} limit
|
|
253
|
+
* @returns {Promise<string[]|null>} Array of fact IDs, or null
|
|
254
|
+
*/
|
|
255
|
+
async function search(query, limit = 5) {
|
|
256
|
+
if (!isAvailable()) return null;
|
|
257
|
+
|
|
258
|
+
// Try HTTP daemon first (models warm, fast)
|
|
259
|
+
if (await isDaemonRunning()) {
|
|
260
|
+
const ids = await searchViaHttp(query, limit);
|
|
261
|
+
if (ids) return ids;
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
// Fall back to CLI
|
|
265
|
+
return searchViaCli(query, limit);
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
module.exports = {
|
|
269
|
+
isAvailable,
|
|
270
|
+
isDaemonRunning,
|
|
271
|
+
startDaemon,
|
|
272
|
+
stopDaemon,
|
|
273
|
+
ensureCollection,
|
|
274
|
+
upsertFacts,
|
|
275
|
+
search,
|
|
276
|
+
};
|
package/scripts/schema.js
CHANGED
|
@@ -17,67 +17,64 @@
|
|
|
17
17
|
|
|
18
18
|
const SCHEMA = {
|
|
19
19
|
// === T1: Identity (USER's identity, not agent's) ===
|
|
20
|
-
'identity.role':
|
|
21
|
-
'identity.locale':
|
|
20
|
+
'identity.role': { tier: 'T1', type: 'string', locked: false },
|
|
21
|
+
'identity.locale': { tier: 'T1', type: 'string', locked: true },
|
|
22
22
|
|
|
23
23
|
// === T2: Core Values / Traits ===
|
|
24
|
-
'core_values.*':
|
|
25
|
-
'core_traits.crisis_reflex':
|
|
26
|
-
'core_traits.flow_trigger':
|
|
27
|
-
'core_traits.shadow_self':
|
|
28
|
-
'core_traits.learning_style':
|
|
24
|
+
'core_values.*': { tier: 'T2', type: 'string', locked: true },
|
|
25
|
+
'core_traits.crisis_reflex': { tier: 'T2', type: 'enum', locked: true, values: ['Action', 'Analysis', 'Delegation', 'Freeze'] },
|
|
26
|
+
'core_traits.flow_trigger': { tier: 'T2', type: 'enum', locked: true, values: ['Ideation', 'Execution', 'Teaching', 'Debugging'] },
|
|
27
|
+
'core_traits.shadow_self': { tier: 'T2', type: 'string', locked: true, maxChars: 80 },
|
|
28
|
+
'core_traits.learning_style': { tier: 'T2', type: 'enum', locked: true, values: ['Hands-on', 'Conceptual', 'Social', 'Reflective'] },
|
|
29
29
|
'core_traits.north_star.aspiration': { tier: 'T2', type: 'string', locked: true, maxChars: 80 },
|
|
30
|
-
'core_traits.north_star.realistic':
|
|
30
|
+
'core_traits.north_star.realistic': { tier: 'T2', type: 'string', locked: true, maxChars: 80 },
|
|
31
31
|
|
|
32
32
|
// === T3: Preferences ===
|
|
33
|
-
'preferences.code_style':
|
|
34
|
-
'preferences.communication':
|
|
35
|
-
'preferences.language_mix':
|
|
33
|
+
'preferences.code_style': { tier: 'T3', type: 'enum', values: ['concise', 'verbose', 'documented'] },
|
|
34
|
+
'preferences.communication': { tier: 'T3', type: 'enum', values: ['direct', 'gentle', 'socratic'] },
|
|
35
|
+
'preferences.language_mix': { tier: 'T3', type: 'enum', values: ['zh-only', 'en-only', 'zh-main-en-term', 'code-switch'] },
|
|
36
36
|
'preferences.tech_terms_language': { tier: 'T3', type: 'enum', values: ['zh', 'en'] },
|
|
37
37
|
'preferences.code_comments_language': { tier: 'T3', type: 'enum', values: ['zh', 'en', null] },
|
|
38
38
|
'preferences.explanation_depth': { tier: 'T3', type: 'enum', values: ['result_only', 'brief_rationale', 'deep_dive'] },
|
|
39
39
|
'preferences.interaction_tempo': { tier: 'T3', type: 'enum', values: ['batch', 'incremental'] },
|
|
40
|
-
'preferences.tools':
|
|
41
|
-
'preferences.anti_patterns': { tier: 'T3', type: 'array', maxItems: 5 },
|
|
40
|
+
'preferences.tools': { tier: 'T3', type: 'array', maxItems: 10 },
|
|
42
41
|
|
|
43
42
|
// === T3b: Cognition ===
|
|
44
|
-
'cognition.decision_style':
|
|
45
|
-
'cognition.info_processing.entry_point':
|
|
43
|
+
'cognition.decision_style': { tier: 'T3', type: 'enum', values: ['intuitive', 'analytical', 'adaptive'] },
|
|
44
|
+
'cognition.info_processing.entry_point': { tier: 'T3', type: 'enum', values: ['big_picture', 'details', 'examples'] },
|
|
46
45
|
'cognition.info_processing.preferred_format': { tier: 'T3', type: 'enum', values: ['structured', 'narrative', 'visual_metaphor'] },
|
|
47
|
-
'cognition.abstraction.default_level':
|
|
48
|
-
'cognition.abstraction.range':
|
|
49
|
-
'cognition.cognitive_load.chunk_size':
|
|
46
|
+
'cognition.abstraction.default_level': { tier: 'T3', type: 'enum', values: ['strategic', 'architectural', 'implementation', 'operational'] },
|
|
47
|
+
'cognition.abstraction.range': { tier: 'T3', type: 'enum', values: ['narrow', 'wide'] },
|
|
48
|
+
'cognition.cognitive_load.chunk_size': { tier: 'T3', type: 'enum', values: ['small', 'medium', 'large'] },
|
|
50
49
|
'cognition.cognitive_load.preferred_response_length': { tier: 'T3', type: 'enum', values: ['concise', 'moderate', 'comprehensive'] },
|
|
51
|
-
'cognition.motivation.primary_driver':
|
|
52
|
-
'cognition.motivation.energy_source':
|
|
50
|
+
'cognition.motivation.primary_driver': { tier: 'T3', type: 'enum', values: ['autonomy', 'competence', 'meaning', 'social_proof'] },
|
|
51
|
+
'cognition.motivation.energy_source': { tier: 'T3', type: 'enum', values: ['creation', 'optimization', 'problem_solving', 'teaching'] },
|
|
53
52
|
'cognition.metacognition.self_awareness': { tier: 'T3', type: 'enum', values: ['high', 'medium', 'low'] },
|
|
54
53
|
'cognition.metacognition.receptive_to_challenge': { tier: 'T3', type: 'enum', values: ['yes', 'sometimes', 'no'] },
|
|
55
54
|
|
|
56
55
|
// === T4: Context ===
|
|
57
|
-
'context.focus':
|
|
58
|
-
'context.focus_since':
|
|
59
|
-
'context.active_projects':
|
|
60
|
-
'context.blockers':
|
|
61
|
-
'context.energy':
|
|
62
|
-
'
|
|
63
|
-
'
|
|
64
|
-
'status.focus': { tier: 'T4', type: 'string', maxChars: 80 },
|
|
65
|
-
'status.language': { tier: 'T4', type: 'string' },
|
|
56
|
+
'context.focus': { tier: 'T4', type: 'string', maxChars: 80 },
|
|
57
|
+
'context.focus_since': { tier: 'T4', type: 'string' },
|
|
58
|
+
'context.active_projects': { tier: 'T4', type: 'array', maxItems: 5 },
|
|
59
|
+
'context.blockers': { tier: 'T4', type: 'array', maxItems: 3 },
|
|
60
|
+
'context.energy': { tier: 'T4', type: 'enum', values: ['high', 'medium', 'low', null] },
|
|
61
|
+
'status.focus': { tier: 'T4', type: 'string', maxChars: 80 },
|
|
62
|
+
'status.language': { tier: 'T4', type: 'string' },
|
|
66
63
|
|
|
67
64
|
// === T5: Evolution (system-managed) ===
|
|
68
|
-
'evolution.last_distill':
|
|
69
|
-
'evolution.distill_count':
|
|
70
|
-
'evolution.recent_changes':
|
|
71
|
-
'evolution.auto_distill':
|
|
65
|
+
'evolution.last_distill': { tier: 'T5', type: 'string' },
|
|
66
|
+
'evolution.distill_count': { tier: 'T5', type: 'number' },
|
|
67
|
+
'evolution.recent_changes': { tier: 'T5', type: 'array', maxItems: 5 },
|
|
68
|
+
'evolution.auto_distill': { tier: 'T5', type: 'array', maxItems: 10 },
|
|
72
69
|
|
|
73
70
|
// === T5: Growth (metacognition, system-managed) ===
|
|
74
|
-
'growth.patterns':
|
|
75
|
-
'growth.zone_history':
|
|
76
|
-
'growth.reflections_answered':
|
|
77
|
-
'growth.reflections_skipped':
|
|
78
|
-
'growth.last_reflection':
|
|
79
|
-
'growth.quiet_until':
|
|
80
|
-
'growth.mirror_enabled':
|
|
71
|
+
'growth.patterns': { tier: 'T5', type: 'array', maxItems: 3 },
|
|
72
|
+
'growth.zone_history': { tier: 'T5', type: 'array', maxItems: 10 },
|
|
73
|
+
'growth.reflections_answered': { tier: 'T5', type: 'number' },
|
|
74
|
+
'growth.reflections_skipped': { tier: 'T5', type: 'number' },
|
|
75
|
+
'growth.last_reflection': { tier: 'T5', type: 'string' },
|
|
76
|
+
'growth.quiet_until': { tier: 'T5', type: 'string' },
|
|
77
|
+
'growth.mirror_enabled': { tier: 'T5', type: 'boolean' },
|
|
81
78
|
};
|
|
82
79
|
|
|
83
80
|
/**
|
|
@@ -35,12 +35,15 @@ function loadState() {
|
|
|
35
35
|
* Save analytics state.
|
|
36
36
|
*/
|
|
37
37
|
function saveState(state) {
|
|
38
|
-
// Cap entries
|
|
39
|
-
const
|
|
40
|
-
|
|
41
|
-
const
|
|
42
|
-
|
|
43
|
-
|
|
38
|
+
// Cap entries for both tracking keys
|
|
39
|
+
for (const key of ['analyzed', 'facts_analyzed']) {
|
|
40
|
+
if (!state[key]) continue;
|
|
41
|
+
const keys = Object.keys(state[key]);
|
|
42
|
+
if (keys.length > MAX_STATE_ENTRIES) {
|
|
43
|
+
const sorted = keys.sort((a, b) => (state[key][a] || 0) - (state[key][b] || 0));
|
|
44
|
+
const toRemove = sorted.slice(0, keys.length - MAX_STATE_ENTRIES);
|
|
45
|
+
for (const k of toRemove) delete state[key][k];
|
|
46
|
+
}
|
|
44
47
|
}
|
|
45
48
|
const dir = path.dirname(STATE_FILE);
|
|
46
49
|
if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true });
|
|
@@ -307,7 +310,7 @@ function findAllUnanalyzedSessions(limit = 30) {
|
|
|
307
310
|
}
|
|
308
311
|
|
|
309
312
|
/**
|
|
310
|
-
* Mark a session as analyzed.
|
|
313
|
+
* Mark a session as analyzed (cognitive distill / pattern detection).
|
|
311
314
|
*/
|
|
312
315
|
function markAnalyzed(sessionId) {
|
|
313
316
|
const state = loadState();
|
|
@@ -315,6 +318,58 @@ function markAnalyzed(sessionId) {
|
|
|
315
318
|
saveState(state);
|
|
316
319
|
}
|
|
317
320
|
|
|
321
|
+
/**
|
|
322
|
+
* Find all sessions not yet processed by memory-extract (facts extraction).
|
|
323
|
+
* Uses a separate `facts_analyzed` key so distill and memory-extract don't interfere.
|
|
324
|
+
*/
|
|
325
|
+
function findAllUnextractedSessions(limit = 30) {
|
|
326
|
+
const state = loadState();
|
|
327
|
+
const factsAnalyzed = state.facts_analyzed || {};
|
|
328
|
+
const results = [];
|
|
329
|
+
|
|
330
|
+
try {
|
|
331
|
+
const projectDirs = fs.readdirSync(PROJECTS_ROOT);
|
|
332
|
+
for (const dir of projectDirs) {
|
|
333
|
+
const fullDir = path.join(PROJECTS_ROOT, dir);
|
|
334
|
+
let stat;
|
|
335
|
+
try { stat = fs.statSync(fullDir); } catch { continue; }
|
|
336
|
+
if (!stat.isDirectory()) continue;
|
|
337
|
+
|
|
338
|
+
let files;
|
|
339
|
+
try { files = fs.readdirSync(fullDir); } catch { continue; }
|
|
340
|
+
|
|
341
|
+
for (const file of files) {
|
|
342
|
+
if (!file.endsWith('.jsonl')) continue;
|
|
343
|
+
const sessionId = file.replace('.jsonl', '');
|
|
344
|
+
if (factsAnalyzed[sessionId]) continue;
|
|
345
|
+
|
|
346
|
+
const fullPath = path.join(fullDir, file);
|
|
347
|
+
let fstat;
|
|
348
|
+
try { fstat = fs.statSync(fullPath); } catch { continue; }
|
|
349
|
+
|
|
350
|
+
if (fstat.size > MAX_FILE_SIZE || fstat.size < MIN_FILE_SIZE) continue;
|
|
351
|
+
|
|
352
|
+
results.push({ path: fullPath, session_id: sessionId, mtime: fstat.mtimeMs });
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
} catch {
|
|
356
|
+
return [];
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
results.sort((a, b) => b.mtime - a.mtime);
|
|
360
|
+
return results.slice(0, limit);
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
/**
|
|
364
|
+
* Mark a session as facts-extracted (used by memory-extract, independent of markAnalyzed).
|
|
365
|
+
*/
|
|
366
|
+
function markFactsExtracted(sessionId) {
|
|
367
|
+
const state = loadState();
|
|
368
|
+
if (!state.facts_analyzed) state.facts_analyzed = {};
|
|
369
|
+
state.facts_analyzed[sessionId] = Date.now();
|
|
370
|
+
saveState(state); // saveState() caps both analyzed and facts_analyzed
|
|
371
|
+
}
|
|
372
|
+
|
|
318
373
|
/**
|
|
319
374
|
* Read declared goals from the user's profile.
|
|
320
375
|
* Returns a compact string like "DECLARED_GOALS: focus1 | focus2" (~11 tokens).
|
|
@@ -410,11 +465,13 @@ function summarizeSession(skeleton, jsonlPath) {
|
|
|
410
465
|
module.exports = {
|
|
411
466
|
findLatestUnanalyzedSession,
|
|
412
467
|
findAllUnanalyzedSessions,
|
|
468
|
+
findAllUnextractedSessions,
|
|
413
469
|
extractSkeleton,
|
|
414
470
|
formatForPrompt,
|
|
415
471
|
formatGoalContext,
|
|
416
472
|
summarizeSession,
|
|
417
473
|
markAnalyzed,
|
|
474
|
+
markFactsExtracted,
|
|
418
475
|
};
|
|
419
476
|
|
|
420
477
|
// Direct execution for testing
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* session-summarize.js <chatId> <sessionId>
|
|
4
|
+
* Generates a 3-5 sentence summary for an idle session via Haiku,
|
|
5
|
+
* stores it in daemon_state.json for injection on next resume.
|
|
6
|
+
*
|
|
7
|
+
* Uses session-analytics.extractSkeleton() for robust JSONL parsing
|
|
8
|
+
* (handles tool_use, artifacts, empty chunks without crashing).
|
|
9
|
+
*/
|
|
10
|
+
'use strict';
|
|
11
|
+
|
|
12
|
+
const fs = require('fs');
|
|
13
|
+
const path = require('path');
|
|
14
|
+
const os = require('os');
|
|
15
|
+
|
|
16
|
+
const [,, chatId, sessionId] = process.argv;
|
|
17
|
+
if (!chatId || !sessionId) {
|
|
18
|
+
console.error('Usage: session-summarize.js <chatId> <sessionId>');
|
|
19
|
+
process.exit(1);
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
const HOME = os.homedir();
|
|
23
|
+
const METAME_DIR = path.join(HOME, '.metame');
|
|
24
|
+
const STATE_FILE = path.join(METAME_DIR, 'daemon_state.json');
|
|
25
|
+
const CLAUDE_PROJECTS = path.join(HOME, '.claude', 'projects');
|
|
26
|
+
|
|
27
|
+
function findSessionFile(sid) {
|
|
28
|
+
try {
|
|
29
|
+
for (const dir of fs.readdirSync(CLAUDE_PROJECTS)) {
|
|
30
|
+
const p = path.join(CLAUDE_PROJECTS, dir, `${sid}.jsonl`);
|
|
31
|
+
if (fs.existsSync(p)) return p;
|
|
32
|
+
}
|
|
33
|
+
} catch { /* ignore */ }
|
|
34
|
+
return null;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
function loadState() {
|
|
38
|
+
try { return JSON.parse(fs.readFileSync(STATE_FILE, 'utf8')); } catch { return {}; }
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
function saveState(state) {
|
|
42
|
+
try { fs.writeFileSync(STATE_FILE, JSON.stringify(state, null, 2), 'utf8'); } catch { /* ignore */ }
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
async function main() {
|
|
46
|
+
const sessionFile = findSessionFile(sessionId);
|
|
47
|
+
if (!sessionFile) {
|
|
48
|
+
console.log(`[session-summarize] Session file not found for ${sessionId.slice(0, 8)}`);
|
|
49
|
+
return;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// Use extractSkeleton for robust parsing — already battle-tested on 100+ sessions.
|
|
53
|
+
// Handles tool_use blocks, artifacts, empty chunks, malformed lines gracefully.
|
|
54
|
+
let skeleton;
|
|
55
|
+
try {
|
|
56
|
+
const analytics = require('./session-analytics');
|
|
57
|
+
skeleton = analytics.extractSkeleton(sessionFile);
|
|
58
|
+
} catch (e) {
|
|
59
|
+
console.log(`[session-summarize] extractSkeleton failed: ${e.message}`);
|
|
60
|
+
return;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
const snippets = skeleton.user_snippets || [];
|
|
64
|
+
if (snippets.length < 2) {
|
|
65
|
+
console.log(`[session-summarize] Too few user messages (${snippets.length}), skipping`);
|
|
66
|
+
return;
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
let callHaiku;
|
|
70
|
+
try {
|
|
71
|
+
callHaiku = require('./providers').callHaiku;
|
|
72
|
+
} catch (e) {
|
|
73
|
+
console.log(`[session-summarize] providers not available: ${e.message}`);
|
|
74
|
+
return;
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
// Build compact context from skeleton (safe strings, already sliced to 100 chars each)
|
|
78
|
+
const snippetText = snippets.join('\n- ');
|
|
79
|
+
const meta = [
|
|
80
|
+
skeleton.project ? `项目: ${skeleton.project}` : '',
|
|
81
|
+
skeleton.intent ? `首要意图: ${skeleton.intent}` : '',
|
|
82
|
+
skeleton.duration_min ? `时长: ${skeleton.duration_min}分钟` : '',
|
|
83
|
+
skeleton.total_tool_calls ? `工具调用: ${skeleton.total_tool_calls}次` : '',
|
|
84
|
+
].filter(Boolean).join(',');
|
|
85
|
+
|
|
86
|
+
const prompt = `请用2-4句话简洁总结以下会话的核心内容和关键结论。只说结果和决策,不列举过程。中文输出。
|
|
87
|
+
|
|
88
|
+
${meta}
|
|
89
|
+
|
|
90
|
+
用户主要说了什么:
|
|
91
|
+
- ${snippetText}`;
|
|
92
|
+
|
|
93
|
+
let summary;
|
|
94
|
+
try {
|
|
95
|
+
summary = await Promise.race([
|
|
96
|
+
callHaiku(prompt, {}, 30000),
|
|
97
|
+
new Promise((_, r) => setTimeout(() => r(new Error('timeout')), 35000)),
|
|
98
|
+
]);
|
|
99
|
+
summary = (summary || '').trim().slice(0, 500);
|
|
100
|
+
} catch (e) {
|
|
101
|
+
console.log(`[session-summarize] Haiku call failed: ${e.message}`);
|
|
102
|
+
return;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
if (!summary) return;
|
|
106
|
+
|
|
107
|
+
const state = loadState();
|
|
108
|
+
if (!state.sessions) state.sessions = {};
|
|
109
|
+
if (!state.sessions[chatId]) state.sessions[chatId] = {};
|
|
110
|
+
state.sessions[chatId].last_summary = summary;
|
|
111
|
+
state.sessions[chatId].last_summary_at = Date.now();
|
|
112
|
+
state.sessions[chatId].last_summary_session_id = sessionId;
|
|
113
|
+
saveState(state);
|
|
114
|
+
|
|
115
|
+
console.log(`[session-summarize] Saved for ${chatId} (${sessionId.slice(0, 8)}): ${summary.slice(0, 80)}...`);
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
main().catch(e => console.error(`[session-summarize] Fatal: ${e.message}`));
|
|
@@ -299,16 +299,7 @@ function checkHotEvolution(signal) {
|
|
|
299
299
|
* Every N runs, triggers self-evaluation to optimize the policy.
|
|
300
300
|
* Returns { updates, missing_skills } or null if nothing to process.
|
|
301
301
|
*/
|
|
302
|
-
|
|
303
|
-
const { execFile } = require('child_process');
|
|
304
|
-
return new Promise((resolve, reject) => {
|
|
305
|
-
const proc = execFile('claude', ['-p', '--model', 'haiku', '--no-session-persistence'],
|
|
306
|
-
{ env: { ...process.env, ...distillEnv }, timeout, maxBuffer: 10 * 1024 * 1024 },
|
|
307
|
-
(err, stdout) => { if (err) reject(err); else resolve(stdout.trim()); });
|
|
308
|
-
proc.stdin.write(input);
|
|
309
|
-
proc.stdin.end();
|
|
310
|
-
});
|
|
311
|
-
}
|
|
302
|
+
const { callHaiku, buildDistillEnv } = require('./providers');
|
|
312
303
|
|
|
313
304
|
async function distillSkills() {
|
|
314
305
|
let yaml;
|
|
@@ -368,12 +359,9 @@ async function distillSkills() {
|
|
|
368
359
|
|
|
369
360
|
try {
|
|
370
361
|
let distillEnv = {};
|
|
371
|
-
try {
|
|
372
|
-
const { buildDistillEnv } = require('./providers');
|
|
373
|
-
distillEnv = buildDistillEnv();
|
|
374
|
-
} catch {}
|
|
362
|
+
try { distillEnv = buildDistillEnv(); } catch {}
|
|
375
363
|
|
|
376
|
-
const result = await
|
|
364
|
+
const result = await callHaiku(prompt, distillEnv, 90000);
|
|
377
365
|
|
|
378
366
|
if (result.includes('NO_EVOLUTION')) {
|
|
379
367
|
clearSignals();
|
|
@@ -528,7 +516,7 @@ RULES:
|
|
|
528
516
|
- Be conservative: only change what the data clearly supports
|
|
529
517
|
- prompt_template changes should be surgical, not full rewrites`;
|
|
530
518
|
|
|
531
|
-
const result = await
|
|
519
|
+
const result = await callHaiku(evalPrompt, distillEnv, 30000);
|
|
532
520
|
|
|
533
521
|
if (result.includes('NO_CHANGE')) {
|
|
534
522
|
console.log('🧬 Policy self-eval: no changes needed.');
|
|
@@ -790,3 +778,18 @@ module.exports = {
|
|
|
790
778
|
smartStitch,
|
|
791
779
|
listInstalledSkills,
|
|
792
780
|
};
|
|
781
|
+
|
|
782
|
+
if (require.main === module) {
|
|
783
|
+
distillSkills()
|
|
784
|
+
.then(r => {
|
|
785
|
+
if (r && r.updates && r.updates.length) {
|
|
786
|
+
console.log(`Skill evolution: ${r.updates.length} update(s) applied`);
|
|
787
|
+
} else {
|
|
788
|
+
console.log('Skill evolution: no updates');
|
|
789
|
+
}
|
|
790
|
+
})
|
|
791
|
+
.catch(e => {
|
|
792
|
+
console.error('Skill evolution error:', e.message);
|
|
793
|
+
process.exit(1);
|
|
794
|
+
});
|
|
795
|
+
}
|