metame-cli 1.1.3 → 1.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +134 -25
- package/index.js +236 -3
- package/package.json +6 -2
- package/scripts/distill.js +495 -0
- package/scripts/migrate-v2.js +112 -0
- package/scripts/pending-traits.js +144 -0
- package/scripts/schema.js +186 -0
- package/scripts/signal-capture.js +125 -0
- package/README/344/270/255/346/226/207/347/211/210.md +0 -175
- package/logo.png +0 -0
|
@@ -0,0 +1,495 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* MetaMe Passive Distiller
|
|
5
|
+
*
|
|
6
|
+
* Reads raw signal buffer, calls Claude (haiku, non-interactive)
|
|
7
|
+
* to extract persistent preferences/identity, merges into profile.
|
|
8
|
+
*
|
|
9
|
+
* Runs automatically before each MetaMe session launch.
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
const fs = require('fs');
|
|
13
|
+
const path = require('path');
|
|
14
|
+
const os = require('os');
|
|
15
|
+
const { execSync } = require('child_process');
|
|
16
|
+
|
|
17
|
+
const HOME = os.homedir();
|
|
18
|
+
const BUFFER_FILE = path.join(HOME, '.metame', 'raw_signals.jsonl');
|
|
19
|
+
const BRAIN_FILE = path.join(HOME, '.claude_profile.yaml');
|
|
20
|
+
const LOCK_FILE = path.join(HOME, '.metame', 'distill.lock');
|
|
21
|
+
|
|
22
|
+
const { hasKey, isLocked, getTier, getAllowedKeysForPrompt, estimateTokens, TOKEN_BUDGET } = require('./schema');
|
|
23
|
+
const { loadPending, savePending, upsertPending, getPromotable, removePromoted } = require('./pending-traits');
|
|
24
|
+
|
|
25
|
+
/**
|
|
26
|
+
* Main distillation process.
|
|
27
|
+
* Returns { updated: boolean, summary: string }
|
|
28
|
+
*/
|
|
29
|
+
function distill() {
|
|
30
|
+
// 1. Check if buffer exists and has content
|
|
31
|
+
if (!fs.existsSync(BUFFER_FILE)) {
|
|
32
|
+
return { updated: false, summary: 'No signals to process.' };
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
const raw = fs.readFileSync(BUFFER_FILE, 'utf8').trim();
|
|
36
|
+
if (!raw) {
|
|
37
|
+
return { updated: false, summary: 'Empty buffer.' };
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
const lines = raw.split('\n').filter(l => l.trim());
|
|
41
|
+
if (lines.length === 0) {
|
|
42
|
+
return { updated: false, summary: 'No signals to process.' };
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
// 2. Prevent concurrent distillation
|
|
46
|
+
if (fs.existsSync(LOCK_FILE)) {
|
|
47
|
+
const lockAge = Date.now() - fs.statSync(LOCK_FILE).mtimeMs;
|
|
48
|
+
if (lockAge < 120000) { // 2 min timeout
|
|
49
|
+
return { updated: false, summary: 'Distillation already in progress.' };
|
|
50
|
+
}
|
|
51
|
+
// Stale lock, remove it
|
|
52
|
+
fs.unlinkSync(LOCK_FILE);
|
|
53
|
+
}
|
|
54
|
+
fs.writeFileSync(LOCK_FILE, process.pid.toString());
|
|
55
|
+
|
|
56
|
+
try {
|
|
57
|
+
// 3. Parse signals (preserve confidence from signal-capture)
|
|
58
|
+
const signals = [];
|
|
59
|
+
let highConfidenceCount = 0;
|
|
60
|
+
for (const line of lines) {
|
|
61
|
+
try {
|
|
62
|
+
const entry = JSON.parse(line);
|
|
63
|
+
if (entry.prompt) {
|
|
64
|
+
signals.push(entry.prompt);
|
|
65
|
+
if (entry.confidence === 'high') highConfidenceCount++;
|
|
66
|
+
}
|
|
67
|
+
} catch {
|
|
68
|
+
// Skip malformed lines
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
if (signals.length === 0) {
|
|
73
|
+
cleanup();
|
|
74
|
+
return { updated: false, summary: 'No valid signals.' };
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
// 4. Read current profile
|
|
78
|
+
let currentProfile = '';
|
|
79
|
+
try {
|
|
80
|
+
currentProfile = fs.readFileSync(BRAIN_FILE, 'utf8');
|
|
81
|
+
} catch {
|
|
82
|
+
currentProfile = '(empty profile)';
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
// 5. Build distillation prompt
|
|
86
|
+
const userMessages = signals
|
|
87
|
+
.map((s, i) => `${i + 1}. "${s}"`)
|
|
88
|
+
.join('\n');
|
|
89
|
+
|
|
90
|
+
const allowedKeys = getAllowedKeysForPrompt();
|
|
91
|
+
|
|
92
|
+
const distillPrompt = `You are a MetaMe cognitive profile distiller. Your job is to extract COGNITIVE TRAITS and PREFERENCES — how the user thinks, decides, and communicates. You are NOT a memory system. Do NOT store facts ("user lives in X"). Only store cognitive patterns and preferences.
|
|
93
|
+
|
|
94
|
+
CURRENT PROFILE:
|
|
95
|
+
\`\`\`yaml
|
|
96
|
+
${currentProfile}
|
|
97
|
+
\`\`\`
|
|
98
|
+
|
|
99
|
+
ALLOWED FIELDS (you may ONLY output keys from this list):
|
|
100
|
+
${allowedKeys}
|
|
101
|
+
|
|
102
|
+
RECENT USER MESSAGES:
|
|
103
|
+
${userMessages}
|
|
104
|
+
|
|
105
|
+
INSTRUCTIONS:
|
|
106
|
+
1. Extract ONLY cognitive traits, preferences, and behavioral patterns — NOT facts or events.
|
|
107
|
+
2. IGNORE task-specific messages (e.g., "fix this bug", "add a button").
|
|
108
|
+
3. Only extract things that should persist across ALL future sessions.
|
|
109
|
+
4. You may ONLY output fields from ALLOWED FIELDS. Any other key will be rejected.
|
|
110
|
+
5. Fields marked [LOCKED] must NEVER be changed (T1 and T2 tiers).
|
|
111
|
+
6. For enum fields, you MUST use one of the listed values.
|
|
112
|
+
|
|
113
|
+
COGNITIVE BIAS PREVENTION:
|
|
114
|
+
- A single observation is a STATE, not a TRAIT. Do NOT infer T3 cognition fields from one message.
|
|
115
|
+
- Never infer cognitive style from identity/demographics.
|
|
116
|
+
- If a new signal contradicts an existing profile value, do NOT output the field — contradictions need accumulation.
|
|
117
|
+
- Signal weight hierarchy:
|
|
118
|
+
L1 Surface (word choice, tone) → low weight, needs 5+ observations
|
|
119
|
+
L2 Behavior (question patterns, decision patterns) → medium weight, needs 3 observations
|
|
120
|
+
L3 Self-declaration ("I prefer...", "以后一律...") → high weight, can write directly
|
|
121
|
+
|
|
122
|
+
CONFIDENCE TAGGING:
|
|
123
|
+
- If a message contains strong directives (以后一律/永远/always/never/记住/from now on), mark as HIGH.
|
|
124
|
+
- Add a _confidence block mapping field keys to "high" or "normal".
|
|
125
|
+
- Add a _source block mapping field keys to the quote that triggered the extraction.
|
|
126
|
+
|
|
127
|
+
OUTPUT FORMAT — respond with ONLY a YAML code block:
|
|
128
|
+
\`\`\`yaml
|
|
129
|
+
preferences:
|
|
130
|
+
code_style: concise
|
|
131
|
+
context:
|
|
132
|
+
focus: "API redesign"
|
|
133
|
+
_confidence:
|
|
134
|
+
preferences.code_style: high
|
|
135
|
+
context.focus: normal
|
|
136
|
+
_source:
|
|
137
|
+
preferences.code_style: "以后代码一律简洁风格"
|
|
138
|
+
context.focus: "我现在在做API重构"
|
|
139
|
+
\`\`\`
|
|
140
|
+
|
|
141
|
+
If nothing worth saving: respond with exactly NO_UPDATE
|
|
142
|
+
Do NOT repeat existing unchanged values. Only output NEW or CHANGED fields.`;
|
|
143
|
+
|
|
144
|
+
// 6. Call Claude in print mode with haiku
|
|
145
|
+
let result;
|
|
146
|
+
try {
|
|
147
|
+
result = execSync(
|
|
148
|
+
`claude -p --model haiku`,
|
|
149
|
+
{
|
|
150
|
+
input: distillPrompt,
|
|
151
|
+
encoding: 'utf8',
|
|
152
|
+
timeout: 60000, // 60s — runs in background, no rush
|
|
153
|
+
stdio: ['pipe', 'pipe', 'pipe']
|
|
154
|
+
}
|
|
155
|
+
).trim();
|
|
156
|
+
} catch (err) {
|
|
157
|
+
// Don't cleanup buffer on API failure — retry next launch
|
|
158
|
+
try { fs.unlinkSync(LOCK_FILE); } catch {}
|
|
159
|
+
const isTimeout = err.killed || (err.signal === 'SIGTERM');
|
|
160
|
+
if (isTimeout) {
|
|
161
|
+
return { updated: false, summary: 'Skipped — API too slow. Will retry next launch.' };
|
|
162
|
+
}
|
|
163
|
+
return { updated: false, summary: 'Skipped — Claude not available. Will retry next launch.' };
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
// 7. Parse result
|
|
167
|
+
if (!result || result.includes('NO_UPDATE')) {
|
|
168
|
+
cleanup();
|
|
169
|
+
return { updated: false, summary: `Analyzed ${signals.length} messages — no persistent insights found.` };
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
// Extract YAML block from response — require explicit code block, no fallback
|
|
173
|
+
const yamlMatch = result.match(/```yaml\n([\s\S]*?)```/) || result.match(/```\n([\s\S]*?)```/);
|
|
174
|
+
if (!yamlMatch) {
|
|
175
|
+
cleanup();
|
|
176
|
+
return { updated: false, summary: `Analyzed ${signals.length} messages — no persistent insights found.` };
|
|
177
|
+
}
|
|
178
|
+
const yamlContent = yamlMatch[1].trim();
|
|
179
|
+
|
|
180
|
+
if (!yamlContent) {
|
|
181
|
+
cleanup();
|
|
182
|
+
return { updated: false, summary: 'Distiller returned empty result.' };
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
// 8. Validate against schema + merge into profile
|
|
186
|
+
try {
|
|
187
|
+
const yaml = require('js-yaml');
|
|
188
|
+
const updates = yaml.load(yamlContent);
|
|
189
|
+
if (!updates || typeof updates !== 'object') {
|
|
190
|
+
cleanup();
|
|
191
|
+
return { updated: false, summary: 'Distiller returned invalid data.' };
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
// Schema whitelist filter: drop any keys not in schema or locked
|
|
195
|
+
const filtered = filterBySchema(updates);
|
|
196
|
+
if (Object.keys(filtered).length === 0) {
|
|
197
|
+
cleanup();
|
|
198
|
+
return { updated: false, summary: `Analyzed ${signals.length} messages — all extracted fields rejected by schema.` };
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
const profile = yaml.load(fs.readFileSync(BRAIN_FILE, 'utf8')) || {};
|
|
202
|
+
|
|
203
|
+
// Read raw content to find locked lines and comments
|
|
204
|
+
const rawProfile = fs.readFileSync(BRAIN_FILE, 'utf8');
|
|
205
|
+
const lockedKeys = extractLockedKeys(rawProfile);
|
|
206
|
+
const inlineComments = extractInlineComments(rawProfile);
|
|
207
|
+
|
|
208
|
+
// Strategic merge: tier-aware upsert with pending traits
|
|
209
|
+
const pendingTraits = loadPending();
|
|
210
|
+
const confidenceMap = updates._confidence || {};
|
|
211
|
+
const sourceMap = updates._source || {};
|
|
212
|
+
const merged = strategicMerge(profile, filtered, lockedKeys, pendingTraits, confidenceMap, sourceMap);
|
|
213
|
+
savePending(pendingTraits);
|
|
214
|
+
|
|
215
|
+
// Add distillation log entry (keep last 10, compact format)
|
|
216
|
+
if (!merged.evolution) merged.evolution = {};
|
|
217
|
+
if (!merged.evolution.auto_distill) merged.evolution.auto_distill = [];
|
|
218
|
+
merged.evolution.auto_distill.push({
|
|
219
|
+
ts: new Date().toISOString(),
|
|
220
|
+
signals: signals.length,
|
|
221
|
+
fields: Object.keys(filtered).join(', ')
|
|
222
|
+
});
|
|
223
|
+
// Cap at 10 entries
|
|
224
|
+
if (merged.evolution.auto_distill.length > 10) {
|
|
225
|
+
merged.evolution.auto_distill = merged.evolution.auto_distill.slice(-10);
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
// Dump and restore comments (yaml.dump strips all comments)
|
|
229
|
+
let dumped = yaml.dump(merged, { lineWidth: -1 });
|
|
230
|
+
let restored = restoreComments(dumped, inlineComments);
|
|
231
|
+
|
|
232
|
+
// A3: Token budget check — degrade gracefully if over budget
|
|
233
|
+
let tokens = estimateTokens(restored);
|
|
234
|
+
if (tokens > TOKEN_BUDGET) {
|
|
235
|
+
// Step 1: Clear evolution.recent_changes
|
|
236
|
+
if (merged.evolution.recent_changes) {
|
|
237
|
+
merged.evolution.recent_changes = [];
|
|
238
|
+
}
|
|
239
|
+
dumped = yaml.dump(merged, { lineWidth: -1 });
|
|
240
|
+
restored = restoreComments(dumped, inlineComments);
|
|
241
|
+
tokens = estimateTokens(restored);
|
|
242
|
+
}
|
|
243
|
+
if (tokens > TOKEN_BUDGET) {
|
|
244
|
+
// Step 2: Truncate all arrays to half
|
|
245
|
+
truncateArrays(merged);
|
|
246
|
+
dumped = yaml.dump(merged, { lineWidth: -1 });
|
|
247
|
+
restored = restoreComments(dumped, inlineComments);
|
|
248
|
+
tokens = estimateTokens(restored);
|
|
249
|
+
}
|
|
250
|
+
if (tokens > TOKEN_BUDGET) {
|
|
251
|
+
// Step 3: Reject write entirely, keep previous version
|
|
252
|
+
cleanup();
|
|
253
|
+
return { updated: false, summary: `Profile too large (${tokens} tokens > ${TOKEN_BUDGET}). Write rejected to prevent bloat.` };
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
fs.writeFileSync(BRAIN_FILE, restored, 'utf8');
|
|
257
|
+
|
|
258
|
+
cleanup();
|
|
259
|
+
return {
|
|
260
|
+
updated: true,
|
|
261
|
+
summary: `${Object.keys(filtered).length} new trait${Object.keys(filtered).length > 1 ? 's' : ''} absorbed. (${tokens} tokens)`
|
|
262
|
+
};
|
|
263
|
+
|
|
264
|
+
} catch (err) {
|
|
265
|
+
cleanup();
|
|
266
|
+
return { updated: false, summary: `Profile merge failed: ${err.message}` };
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
} catch (err) {
|
|
270
|
+
cleanup();
|
|
271
|
+
return { updated: false, summary: `Distillation error: ${err.message}` };
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
/**
|
|
276
|
+
* Extract keys that are on lines marked with # [LOCKED]
|
|
277
|
+
*/
|
|
278
|
+
function extractLockedKeys(rawYaml) {
|
|
279
|
+
const locked = new Set();
|
|
280
|
+
const lines = rawYaml.split('\n');
|
|
281
|
+
for (const line of lines) {
|
|
282
|
+
if (line.includes('# [LOCKED]')) {
|
|
283
|
+
const match = line.match(/^\s*([\w_]+)\s*:/);
|
|
284
|
+
if (match) {
|
|
285
|
+
locked.add(match[1]);
|
|
286
|
+
}
|
|
287
|
+
}
|
|
288
|
+
}
|
|
289
|
+
return locked;
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
/**
|
|
293
|
+
* Extract inline comments from original YAML (key → comment mapping)
|
|
294
|
+
* e.g. " nickname: 3D # [LOCKED]" → { "nickname: 3D": "# [LOCKED]" }
|
|
295
|
+
*/
|
|
296
|
+
function extractInlineComments(rawYaml) {
|
|
297
|
+
const comments = new Map();
|
|
298
|
+
for (const line of rawYaml.split('\n')) {
|
|
299
|
+
const commentMatch = line.match(/^(\s*[\w_]+\s*:.+?)\s+(#.+)$/);
|
|
300
|
+
if (commentMatch) {
|
|
301
|
+
// Key: the content part (trimmed), Value: the comment
|
|
302
|
+
const content = commentMatch[1].trim();
|
|
303
|
+
const comment = commentMatch[2];
|
|
304
|
+
comments.set(content, comment);
|
|
305
|
+
}
|
|
306
|
+
// Also handle top-level keys with comments but no value on same line
|
|
307
|
+
const keyCommentMatch = line.match(/^(\s*[\w_]+\s*:)\s+(#.+)$/);
|
|
308
|
+
if (keyCommentMatch) {
|
|
309
|
+
const content = keyCommentMatch[1].trim();
|
|
310
|
+
const comment = keyCommentMatch[2];
|
|
311
|
+
comments.set(content, comment);
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
return comments;
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
/**
|
|
318
|
+
* Restore inline comments to dumped YAML output
|
|
319
|
+
*/
|
|
320
|
+
function restoreComments(dumpedYaml, comments) {
|
|
321
|
+
const lines = dumpedYaml.split('\n');
|
|
322
|
+
const restored = lines.map(line => {
|
|
323
|
+
const trimmed = line.trim();
|
|
324
|
+
for (const [content, comment] of comments) {
|
|
325
|
+
if (trimmed === content || trimmed.startsWith(content)) {
|
|
326
|
+
// Only restore if the comment isn't already present
|
|
327
|
+
if (!line.includes('#')) {
|
|
328
|
+
return `${line} ${comment}`;
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
return line;
|
|
333
|
+
});
|
|
334
|
+
return restored.join('\n');
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
/**
|
|
338
|
+
* Strategic merge: tier-aware upsert with pending trait support.
|
|
339
|
+
*
|
|
340
|
+
* - T1/T2: Never auto-write (locked)
|
|
341
|
+
* - T3: High confidence → direct write; Normal → pending accumulation
|
|
342
|
+
* - T4: Direct overwrite
|
|
343
|
+
* - T5: Direct overwrite (system-managed)
|
|
344
|
+
*
|
|
345
|
+
* Also promotes mature pending traits (count >= 3 or high confidence).
|
|
346
|
+
*/
|
|
347
|
+
function strategicMerge(profile, updates, lockedKeys, pendingTraits, confidenceMap, sourceMap) {
|
|
348
|
+
const result = JSON.parse(JSON.stringify(profile)); // deep clone
|
|
349
|
+
|
|
350
|
+
// Walk flat entries from updates
|
|
351
|
+
const flat = flattenObject(updates);
|
|
352
|
+
for (const [key, value] of Object.entries(flat)) {
|
|
353
|
+
// Skip internal metadata keys
|
|
354
|
+
if (key.startsWith('_')) continue;
|
|
355
|
+
|
|
356
|
+
// Schema check already done by filterBySchema, but double-check locks
|
|
357
|
+
if (lockedKeys.has(key.split('.')[0])) continue;
|
|
358
|
+
if (isLocked(key)) continue;
|
|
359
|
+
|
|
360
|
+
// Null/empty protection — never delete existing values
|
|
361
|
+
if (value === null || value === '') continue;
|
|
362
|
+
|
|
363
|
+
const tier = getTier(key);
|
|
364
|
+
if (!tier) continue;
|
|
365
|
+
|
|
366
|
+
switch (tier) {
|
|
367
|
+
case 'T1':
|
|
368
|
+
case 'T2':
|
|
369
|
+
continue; // Never auto-write
|
|
370
|
+
|
|
371
|
+
case 'T3': {
|
|
372
|
+
const confidence = confidenceMap[key] || 'normal';
|
|
373
|
+
const source = sourceMap[key] || null;
|
|
374
|
+
if (confidence === 'high') {
|
|
375
|
+
setNested(result, key, value);
|
|
376
|
+
} else {
|
|
377
|
+
upsertPending(pendingTraits, key, value, confidence, source);
|
|
378
|
+
}
|
|
379
|
+
break;
|
|
380
|
+
}
|
|
381
|
+
|
|
382
|
+
case 'T4':
|
|
383
|
+
setNested(result, key, value);
|
|
384
|
+
// Auto-set focus_since when focus changes
|
|
385
|
+
if (key === 'context.focus') {
|
|
386
|
+
setNested(result, 'context.focus_since', new Date().toISOString().slice(0, 10));
|
|
387
|
+
}
|
|
388
|
+
break;
|
|
389
|
+
|
|
390
|
+
case 'T5':
|
|
391
|
+
setNested(result, key, value);
|
|
392
|
+
break;
|
|
393
|
+
}
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
// Promote mature pending traits
|
|
397
|
+
const promotable = getPromotable(pendingTraits);
|
|
398
|
+
for (const { key, value } of promotable) {
|
|
399
|
+
setNested(result, key, value);
|
|
400
|
+
}
|
|
401
|
+
removePromoted(pendingTraits, promotable.map(p => p.key));
|
|
402
|
+
|
|
403
|
+
return result;
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
/**
|
|
407
|
+
* Flatten a nested object into dot-path keys.
|
|
408
|
+
* { preferences: { code_style: 'concise' } } → { 'preferences.code_style': 'concise' }
|
|
409
|
+
*/
|
|
410
|
+
function flattenObject(obj, parentKey = '', result = {}) {
|
|
411
|
+
for (const key of Object.keys(obj)) {
|
|
412
|
+
const fullKey = parentKey ? `${parentKey}.${key}` : key;
|
|
413
|
+
const value = obj[key];
|
|
414
|
+
if (typeof value === 'object' && value !== null && !Array.isArray(value)) {
|
|
415
|
+
flattenObject(value, fullKey, result);
|
|
416
|
+
} else {
|
|
417
|
+
result[fullKey] = value;
|
|
418
|
+
}
|
|
419
|
+
}
|
|
420
|
+
return result;
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
/**
|
|
424
|
+
* Set a nested property by dot-path key.
|
|
425
|
+
*/
|
|
426
|
+
function setNested(obj, dotPath, value) {
|
|
427
|
+
const keys = dotPath.split('.');
|
|
428
|
+
let current = obj;
|
|
429
|
+
for (let i = 0; i < keys.length - 1; i++) {
|
|
430
|
+
if (!current[keys[i]] || typeof current[keys[i]] !== 'object') {
|
|
431
|
+
current[keys[i]] = {};
|
|
432
|
+
}
|
|
433
|
+
current = current[keys[i]];
|
|
434
|
+
}
|
|
435
|
+
current[keys[keys.length - 1]] = value;
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
/**
|
|
439
|
+
* Filter updates object: only keep keys that exist in schema and are not locked.
|
|
440
|
+
* Walks nested objects and builds dot-path keys for checking.
|
|
441
|
+
*/
|
|
442
|
+
function filterBySchema(obj, parentKey = '') {
|
|
443
|
+
const result = {};
|
|
444
|
+
for (const key of Object.keys(obj)) {
|
|
445
|
+
const fullKey = parentKey ? `${parentKey}.${key}` : key;
|
|
446
|
+
const value = obj[key];
|
|
447
|
+
|
|
448
|
+
if (typeof value === 'object' && value !== null && !Array.isArray(value)) {
|
|
449
|
+
const nested = filterBySchema(value, fullKey);
|
|
450
|
+
if (Object.keys(nested).length > 0) {
|
|
451
|
+
result[key] = nested;
|
|
452
|
+
}
|
|
453
|
+
} else {
|
|
454
|
+
// Check schema whitelist — allow if key exists and is not locked
|
|
455
|
+
if (hasKey(fullKey) && !isLocked(fullKey)) {
|
|
456
|
+
result[key] = value;
|
|
457
|
+
}
|
|
458
|
+
}
|
|
459
|
+
}
|
|
460
|
+
return result;
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
/**
|
|
464
|
+
* Truncate all arrays in the profile to half their length.
|
|
465
|
+
*/
|
|
466
|
+
function truncateArrays(obj) {
|
|
467
|
+
for (const key of Object.keys(obj)) {
|
|
468
|
+
if (Array.isArray(obj[key]) && obj[key].length > 1) {
|
|
469
|
+
obj[key] = obj[key].slice(-Math.ceil(obj[key].length / 2));
|
|
470
|
+
} else if (typeof obj[key] === 'object' && obj[key] !== null) {
|
|
471
|
+
truncateArrays(obj[key]);
|
|
472
|
+
}
|
|
473
|
+
}
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
/**
|
|
477
|
+
* Clean up: remove buffer and lock
|
|
478
|
+
*/
|
|
479
|
+
function cleanup() {
|
|
480
|
+
try { fs.unlinkSync(BUFFER_FILE); } catch {}
|
|
481
|
+
try { fs.unlinkSync(LOCK_FILE); } catch {}
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
// Export for use in index.js
|
|
485
|
+
module.exports = { distill };
|
|
486
|
+
|
|
487
|
+
// Also allow direct execution
|
|
488
|
+
if (require.main === module) {
|
|
489
|
+
const result = distill();
|
|
490
|
+
if (result.updated) {
|
|
491
|
+
console.log(`🧠 ${result.summary}`);
|
|
492
|
+
} else {
|
|
493
|
+
console.log(`💤 ${result.summary}`);
|
|
494
|
+
}
|
|
495
|
+
}
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* MetaMe Profile Migration: v1 → v2
|
|
5
|
+
*
|
|
6
|
+
* Maps old structure to v2 schema:
|
|
7
|
+
* - status.focus → context.focus
|
|
8
|
+
* - status.language → preferences.language_mix (best guess)
|
|
9
|
+
* - Ensures all v2 sections exist with defaults
|
|
10
|
+
* - Preserves all existing data and LOCKED comments
|
|
11
|
+
*
|
|
12
|
+
* Usage: node migrate-v2.js [--dry-run]
|
|
13
|
+
*/
|
|
14
|
+
|
|
15
|
+
const fs = require('fs');
|
|
16
|
+
const path = require('path');
|
|
17
|
+
const os = require('os');
|
|
18
|
+
|
|
19
|
+
const BRAIN_FILE = path.join(os.homedir(), '.claude_profile.yaml');
|
|
20
|
+
const BACKUP_SUFFIX = '.v1.backup';
|
|
21
|
+
const DRY_RUN = process.argv.includes('--dry-run');
|
|
22
|
+
|
|
23
|
+
function migrate() {
|
|
24
|
+
if (!fs.existsSync(BRAIN_FILE)) {
|
|
25
|
+
console.log('No profile found. Nothing to migrate.');
|
|
26
|
+
return;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
const yaml = require('js-yaml');
|
|
30
|
+
const rawContent = fs.readFileSync(BRAIN_FILE, 'utf8');
|
|
31
|
+
const profile = yaml.load(rawContent) || {};
|
|
32
|
+
|
|
33
|
+
// Check if already v2 (has context section)
|
|
34
|
+
if (profile.context && profile.context.focus !== undefined) {
|
|
35
|
+
console.log('Profile already appears to be v2. Skipping migration.');
|
|
36
|
+
return;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
console.log('Migrating profile from v1 to v2...');
|
|
40
|
+
|
|
41
|
+
// --- Backup ---
|
|
42
|
+
if (!DRY_RUN) {
|
|
43
|
+
const backupPath = BRAIN_FILE + BACKUP_SUFFIX;
|
|
44
|
+
fs.writeFileSync(backupPath, rawContent, 'utf8');
|
|
45
|
+
console.log(` Backup saved to: ${backupPath}`);
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
// --- Migration rules ---
|
|
49
|
+
|
|
50
|
+
// 1. status.focus → context.focus
|
|
51
|
+
if (profile.status && profile.status.focus) {
|
|
52
|
+
if (!profile.context) profile.context = {};
|
|
53
|
+
profile.context.focus = profile.status.focus;
|
|
54
|
+
profile.context.focus_since = new Date().toISOString().slice(0, 10);
|
|
55
|
+
delete profile.status.focus;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
// 2. status.language → status.language (keep, it's in schema)
|
|
59
|
+
// No change needed, status.language is valid in v2
|
|
60
|
+
|
|
61
|
+
// 3. Clean up empty status object
|
|
62
|
+
if (profile.status && Object.keys(profile.status).length === 0) {
|
|
63
|
+
delete profile.status;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
// 4. Ensure context section exists with defaults
|
|
67
|
+
if (!profile.context) profile.context = {};
|
|
68
|
+
if (profile.context.focus === undefined) profile.context.focus = null;
|
|
69
|
+
if (profile.context.focus_since === undefined) profile.context.focus_since = null;
|
|
70
|
+
if (profile.context.active_projects === undefined) profile.context.active_projects = [];
|
|
71
|
+
if (profile.context.blockers === undefined) profile.context.blockers = [];
|
|
72
|
+
if (profile.context.energy === undefined) profile.context.energy = null;
|
|
73
|
+
|
|
74
|
+
// 5. Ensure evolution section exists
|
|
75
|
+
if (!profile.evolution) profile.evolution = {};
|
|
76
|
+
if (profile.evolution.last_distill === undefined) profile.evolution.last_distill = null;
|
|
77
|
+
if (profile.evolution.distill_count === undefined) profile.evolution.distill_count = 0;
|
|
78
|
+
if (profile.evolution.recent_changes === undefined) profile.evolution.recent_changes = [];
|
|
79
|
+
|
|
80
|
+
// 6. Ensure preferences section exists (don't overwrite existing values)
|
|
81
|
+
if (!profile.preferences) profile.preferences = {};
|
|
82
|
+
|
|
83
|
+
// --- Output ---
|
|
84
|
+
const dumped = yaml.dump(profile, { lineWidth: -1 });
|
|
85
|
+
|
|
86
|
+
// Restore LOCKED comments from original
|
|
87
|
+
const lockedLines = rawContent.split('\n').filter(l => l.includes('# [LOCKED]'));
|
|
88
|
+
let restored = dumped;
|
|
89
|
+
for (const lockedLine of lockedLines) {
|
|
90
|
+
const match = lockedLine.match(/^\s*([\w_]+)\s*:\s*(.+?)\s+(#.+)$/);
|
|
91
|
+
if (match) {
|
|
92
|
+
const key = match[1];
|
|
93
|
+
const comment = match[3];
|
|
94
|
+
// Find the corresponding line in dumped output and append comment
|
|
95
|
+
restored = restored.replace(
|
|
96
|
+
new RegExp(`^(\\s*${key}\\s*:.+)$`, 'm'),
|
|
97
|
+
(line) => line.includes('#') ? line : `${line} ${comment}`
|
|
98
|
+
);
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
if (DRY_RUN) {
|
|
103
|
+
console.log('\n--- DRY RUN (would write): ---');
|
|
104
|
+
console.log(restored);
|
|
105
|
+
console.log('--- END DRY RUN ---');
|
|
106
|
+
} else {
|
|
107
|
+
fs.writeFileSync(BRAIN_FILE, restored, 'utf8');
|
|
108
|
+
console.log(' Migration complete. Profile is now v2.');
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
migrate();
|