@geminilight/mindos 0.5.12 → 0.5.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/app/next-env.d.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  /// <reference types="next" />
2
2
  /// <reference types="next/image-types/global" />
3
- import "./.next/dev/types/routes.d.ts";
3
+ import "./.next/types/routes.d.ts";
4
4
 
5
5
  // NOTE: This file should not be edited
6
6
  // see https://nextjs.org/docs/app/api-reference/config/typescript for more information.
package/app/package.json CHANGED
@@ -13,7 +13,6 @@
13
13
  "dependencies": {
14
14
  "@ai-sdk/anthropic": "^3.0.58",
15
15
  "@ai-sdk/openai": "^3.0.41",
16
- "@ai-sdk/react": "^3.0.118",
17
16
  "@base-ui/react": "^1.2.0",
18
17
  "@codemirror/lang-markdown": "^6.5.0",
19
18
  "@codemirror/state": "^6.5.4",
package/bin/lib/build.js CHANGED
@@ -38,9 +38,13 @@ export function cleanNextDir() {
38
38
  }
39
39
 
40
40
  function depsHash() {
41
- const lockPath = resolve(ROOT, 'app', 'package-lock.json');
41
+ // Use package.json (not package-lock.json) so we don't need to ship the
42
+ // 560kB lock file in the npm tarball. package.json changes whenever
43
+ // dependencies are added/removed/bumped, which is the only case that
44
+ // requires a fresh `npm install`.
45
+ const pkgPath = resolve(ROOT, 'app', 'package.json');
42
46
  try {
43
- const content = readFileSync(lockPath);
47
+ const content = readFileSync(pkgPath);
44
48
  return createHash('sha256').update(content).digest('hex').slice(0, 16);
45
49
  } catch {
46
50
  return null;
package/bin/lib/sync.js CHANGED
@@ -1,10 +1,19 @@
1
- import { execSync } from 'node:child_process';
2
- import { existsSync, readFileSync, writeFileSync, mkdirSync } from 'node:fs';
1
+ import { execFileSync } from 'node:child_process';
2
+ import { existsSync, readFileSync, writeFileSync, mkdirSync, renameSync } from 'node:fs';
3
3
  import { resolve } from 'node:path';
4
4
  import { homedir } from 'node:os';
5
5
  import { CONFIG_PATH, MINDOS_DIR } from './constants.js';
6
6
  import { bold, dim, cyan, green, red, yellow } from './colors.js';
7
7
 
8
+ // ── Atomic write helper ────────────────────────────────────────────────────
9
+
10
+ function atomicWriteJSON(filePath, data) {
11
+ const content = JSON.stringify(data, null, 2) + '\n';
12
+ const tmp = filePath + '.tmp';
13
+ writeFileSync(tmp, content, 'utf-8');
14
+ renameSync(tmp, filePath);
15
+ }
16
+
8
17
  // ── Config helpers ──────────────────────────────────────────────────────────
9
18
 
10
19
  function loadSyncConfig() {
@@ -20,7 +29,7 @@ function saveSyncConfig(syncConfig) {
20
29
  let config = {};
21
30
  try { config = JSON.parse(readFileSync(CONFIG_PATH, 'utf-8')); } catch {}
22
31
  config.sync = syncConfig;
23
- writeFileSync(CONFIG_PATH, JSON.stringify(config, null, 2) + '\n', 'utf-8');
32
+ atomicWriteJSON(CONFIG_PATH, config);
24
33
  }
25
34
 
26
35
  function getMindRoot() {
@@ -44,7 +53,7 @@ function loadSyncState() {
44
53
 
45
54
  function saveSyncState(state) {
46
55
  if (!existsSync(MINDOS_DIR)) mkdirSync(MINDOS_DIR, { recursive: true });
47
- writeFileSync(SYNC_STATE_PATH, JSON.stringify(state, null, 2) + '\n', 'utf-8');
56
+ atomicWriteJSON(SYNC_STATE_PATH, state);
48
57
  }
49
58
 
50
59
  // ── Git helpers ─────────────────────────────────────────────────────────────
@@ -53,13 +62,13 @@ function isGitRepo(dir) {
53
62
  return existsSync(resolve(dir, '.git'));
54
63
  }
55
64
 
56
- function gitExec(cmd, cwd) {
57
- return execSync(cmd, { cwd, encoding: 'utf-8', stdio: 'pipe' }).trim();
65
+ function gitExec(args, cwd) {
66
+ return execFileSync('git', args, { cwd, encoding: 'utf-8', stdio: 'pipe' }).trim();
58
67
  }
59
68
 
60
69
  function getRemoteUrl(cwd) {
61
70
  try {
62
- return gitExec('git remote get-url origin', cwd);
71
+ return gitExec(['remote', 'get-url', 'origin'], cwd);
63
72
  } catch {
64
73
  return null;
65
74
  }
@@ -67,7 +76,7 @@ function getRemoteUrl(cwd) {
67
76
 
68
77
  function getBranch(cwd) {
69
78
  try {
70
- return gitExec('git rev-parse --abbrev-ref HEAD', cwd);
79
+ return gitExec(['rev-parse', '--abbrev-ref', 'HEAD'], cwd);
71
80
  } catch {
72
81
  return 'main';
73
82
  }
@@ -75,7 +84,7 @@ function getBranch(cwd) {
75
84
 
76
85
  function getUnpushedCount(cwd) {
77
86
  try {
78
- return gitExec('git rev-list --count @{u}..HEAD', cwd);
87
+ return gitExec(['rev-list', '--count', '@{u}..HEAD'], cwd);
79
88
  } catch {
80
89
  return '?';
81
90
  }
@@ -85,12 +94,12 @@ function getUnpushedCount(cwd) {
85
94
 
86
95
  function autoCommitAndPush(mindRoot) {
87
96
  try {
88
- execSync('git add -A', { cwd: mindRoot, stdio: 'pipe' });
89
- const status = gitExec('git status --porcelain', mindRoot);
97
+ execFileSync('git', ['add', '-A'], { cwd: mindRoot, stdio: 'pipe' });
98
+ const status = gitExec(['status', '--porcelain'], mindRoot);
90
99
  if (!status) return;
91
100
  const timestamp = new Date().toISOString().replace('T', ' ').slice(0, 19);
92
- execSync(`git commit -m "auto-sync: ${timestamp}"`, { cwd: mindRoot, stdio: 'pipe' });
93
- execSync('git push', { cwd: mindRoot, stdio: 'pipe' });
101
+ execFileSync('git', ['commit', '-m', `auto-sync: ${timestamp}`], { cwd: mindRoot, stdio: 'pipe' });
102
+ execFileSync('git', ['push', '-u', 'origin', 'HEAD'], { cwd: mindRoot, stdio: 'pipe' });
94
103
  saveSyncState({ ...loadSyncState(), lastSync: new Date().toISOString(), lastError: null });
95
104
  } catch (err) {
96
105
  saveSyncState({ ...loadSyncState(), lastError: err.message, lastErrorTime: new Date().toISOString() });
@@ -99,31 +108,34 @@ function autoCommitAndPush(mindRoot) {
99
108
 
100
109
  function autoPull(mindRoot) {
101
110
  try {
102
- execSync('git pull --rebase --autostash', { cwd: mindRoot, stdio: 'pipe' });
111
+ execFileSync('git', ['pull', '--rebase', '--autostash'], { cwd: mindRoot, stdio: 'pipe' });
103
112
  saveSyncState({ ...loadSyncState(), lastPull: new Date().toISOString() });
104
113
  } catch {
105
114
  // rebase conflict → abort → merge
106
- try { execSync('git rebase --abort', { cwd: mindRoot, stdio: 'pipe' }); } catch {}
115
+ try { execFileSync('git', ['rebase', '--abort'], { cwd: mindRoot, stdio: 'pipe' }); } catch {}
107
116
  try {
108
- execSync('git pull --no-rebase', { cwd: mindRoot, stdio: 'pipe' });
117
+ execFileSync('git', ['pull', '--no-rebase'], { cwd: mindRoot, stdio: 'pipe' });
109
118
  saveSyncState({ ...loadSyncState(), lastPull: new Date().toISOString() });
110
119
  } catch {
111
120
  // merge conflict → keep both versions
112
121
  try {
113
- const conflicts = gitExec('git diff --name-only --diff-filter=U', mindRoot).split('\n').filter(Boolean);
122
+ const conflicts = gitExec(['diff', '--name-only', '--diff-filter=U'], mindRoot).split('\n').filter(Boolean);
123
+ const conflictWarnings = [];
114
124
  for (const file of conflicts) {
115
125
  try {
116
- const theirs = execSync(`git show :3:${file}`, { cwd: mindRoot, encoding: 'utf-8' });
126
+ const theirs = execFileSync('git', ['show', `:3:${file}`], { cwd: mindRoot, encoding: 'utf-8' });
117
127
  writeFileSync(resolve(mindRoot, file + '.sync-conflict'), theirs, 'utf-8');
118
- } catch {}
119
- try { execSync(`git checkout --ours "${file}"`, { cwd: mindRoot, stdio: 'pipe' }); } catch {}
128
+ } catch {
129
+ conflictWarnings.push(file);
130
+ }
131
+ try { execFileSync('git', ['checkout', '--ours', file], { cwd: mindRoot, stdio: 'pipe' }); } catch {}
120
132
  }
121
- execSync('git add -A', { cwd: mindRoot, stdio: 'pipe' });
122
- execSync('git commit -m "auto-sync: resolved conflicts (kept both versions)"', { cwd: mindRoot, stdio: 'pipe' });
133
+ execFileSync('git', ['add', '-A'], { cwd: mindRoot, stdio: 'pipe' });
134
+ execFileSync('git', ['commit', '-m', 'auto-sync: resolved conflicts (kept both versions)'], { cwd: mindRoot, stdio: 'pipe' });
123
135
  saveSyncState({
124
136
  ...loadSyncState(),
125
137
  lastPull: new Date().toISOString(),
126
- conflicts: conflicts.map(f => ({ file: f, time: new Date().toISOString() })),
138
+ conflicts: conflicts.map(f => ({ file: f, time: new Date().toISOString(), noBackup: conflictWarnings.includes(f) })),
127
139
  });
128
140
  } catch (err) {
129
141
  saveSyncState({ ...loadSyncState(), lastError: err.message, lastErrorTime: new Date().toISOString() });
@@ -133,9 +145,9 @@ function autoPull(mindRoot) {
133
145
 
134
146
  // Retry any pending pushes (handles previous push failures)
135
147
  try {
136
- const unpushed = gitExec('git rev-list --count @{u}..HEAD', mindRoot);
148
+ const unpushed = gitExec(['rev-list', '--count', '@{u}..HEAD'], mindRoot);
137
149
  if (parseInt(unpushed) > 0) {
138
- execSync('git push', { cwd: mindRoot, stdio: 'pipe' });
150
+ execFileSync('git', ['push'], { cwd: mindRoot, stdio: 'pipe' });
139
151
  saveSyncState({ ...loadSyncState(), lastSync: new Date().toISOString(), lastError: null });
140
152
  }
141
153
  } catch {
@@ -196,8 +208,8 @@ export async function initSync(mindRoot, opts = {}) {
196
208
  // 1. Ensure git repo
197
209
  if (!isGitRepo(mindRoot)) {
198
210
  if (!nonInteractive) console.log(dim('Initializing git repository...'));
199
- execSync('git init', { cwd: mindRoot, stdio: 'pipe' });
200
- try { execSync('git checkout -b main', { cwd: mindRoot, stdio: 'pipe' }); } catch {}
211
+ execFileSync('git', ['init'], { cwd: mindRoot, stdio: 'pipe' });
212
+ try { execFileSync('git', ['checkout', '-b', 'main'], { cwd: mindRoot, stdio: 'pipe' }); } catch {}
201
213
  }
202
214
 
203
215
  // 1b. Ensure .gitignore exists
@@ -226,35 +238,61 @@ export async function initSync(mindRoot, opts = {}) {
226
238
  if (platform === 'darwin') helper = 'osxkeychain';
227
239
  else if (platform === 'win32') helper = 'manager';
228
240
  else helper = 'store';
229
- try { execSync(`git config credential.helper '${helper}'`, { cwd: mindRoot, stdio: 'pipe' }); } catch {}
230
- // Store the credential via git credential approve
241
+ try { execFileSync('git', ['config', 'credential.helper', helper], { cwd: mindRoot, stdio: 'pipe' }); } catch (e) {
242
+ console.error(`[sync] credential.helper setup failed: ${e.message}`);
243
+ }
244
+ // Store the credential via git credential approve, then verify it stuck
245
+ let credentialStored = false;
231
246
  try {
232
247
  const credInput = `protocol=${urlObj.protocol.replace(':', '')}\nhost=${urlObj.host}\nusername=oauth2\npassword=${token}\n\n`;
233
- execSync('git credential approve', { cwd: mindRoot, input: credInput, stdio: 'pipe' });
234
- } catch {}
248
+ execFileSync('git', ['credential', 'approve'], { cwd: mindRoot, input: credInput, stdio: 'pipe' });
249
+ // Verify: credential fill should return the password we just stored
250
+ try {
251
+ const fillInput = `protocol=${urlObj.protocol.replace(':', '')}\nhost=${urlObj.host}\nusername=oauth2\n\n`;
252
+ const fillResult = execFileSync('git', ['credential', 'fill'], {
253
+ cwd: mindRoot, input: fillInput, encoding: 'utf-8',
254
+ stdio: ['pipe', 'pipe', 'pipe'], timeout: 5000,
255
+ env: { ...process.env, GIT_TERMINAL_PROMPT: '0' },
256
+ });
257
+ credentialStored = fillResult.includes(`password=${token}`);
258
+ } catch {
259
+ credentialStored = false;
260
+ }
261
+ } catch (e) {
262
+ if (!nonInteractive) console.error(`[sync] credential approve failed: ${e.message}`);
263
+ }
264
+ // If credential helper didn't actually persist, embed token in URL
265
+ if (!credentialStored) {
266
+ if (!nonInteractive) console.log(dim('Credential helper unavailable, using inline token'));
267
+ const fallbackUrl = new URL(remoteUrl);
268
+ fallbackUrl.username = 'oauth2';
269
+ fallbackUrl.password = token;
270
+ remoteUrl = fallbackUrl.toString();
271
+ }
235
272
  // For 'store' helper, restrict file permissions AFTER credential file is created
236
273
  if (helper === 'store') {
237
274
  const credFile = resolve(process.env.HOME || homedir(), '.git-credentials');
238
- try { execSync(`chmod 600 "${credFile}"`, { stdio: 'pipe' }); } catch {}
275
+ try { execFileSync('chmod', ['600', credFile], { stdio: 'pipe' }); } catch {}
239
276
  }
240
277
  }
241
278
 
242
279
  // 4. Set remote
243
280
  try {
244
- execSync(`git remote add origin "${remoteUrl}"`, { cwd: mindRoot, stdio: 'pipe' });
281
+ execFileSync('git', ['remote', 'add', 'origin', remoteUrl], { cwd: mindRoot, stdio: 'pipe' });
245
282
  } catch {
246
- execSync(`git remote set-url origin "${remoteUrl}"`, { cwd: mindRoot, stdio: 'pipe' });
283
+ execFileSync('git', ['remote', 'set-url', 'origin', remoteUrl], { cwd: mindRoot, stdio: 'pipe' });
247
284
  }
248
285
 
249
286
  // 5. Test connection
250
287
  if (!nonInteractive) console.log(dim('Testing connection...'));
251
288
  try {
252
- execSync('git ls-remote --exit-code origin', { cwd: mindRoot, stdio: 'pipe', timeout: 15000 });
289
+ execFileSync('git', ['ls-remote', '--exit-code', 'origin'], { cwd: mindRoot, stdio: 'pipe', timeout: 15000 });
253
290
  if (!nonInteractive) console.log(green('✔ Connection successful'));
254
- } catch {
255
- const errMsg = 'Remote not reachable check URL and credentials';
291
+ } catch (lsErr) {
292
+ const detail = lsErr.stderr ? lsErr.stderr.toString().trim() : '';
293
+ const errMsg = `Remote not reachable${detail ? ': ' + detail : ''} — check URL and credentials`;
256
294
  if (nonInteractive) throw new Error(errMsg);
257
- console.error(red('✘ Could not connect to remote. Check your URL and credentials.'));
295
+ console.error(red(`✘ ${errMsg}`));
258
296
  process.exit(1);
259
297
  }
260
298
 
@@ -272,11 +310,11 @@ export async function initSync(mindRoot, opts = {}) {
272
310
 
273
311
  // 7. First sync: pull if remote has content, push otherwise
274
312
  try {
275
- const refs = gitExec('git ls-remote --heads origin', mindRoot);
313
+ const refs = gitExec(['ls-remote', '--heads', 'origin'], mindRoot);
276
314
  if (refs) {
277
315
  if (!nonInteractive) console.log(dim('Pulling from remote...'));
278
316
  try {
279
- execSync(`git pull origin ${syncConfig.branch} --allow-unrelated-histories`, { cwd: mindRoot, stdio: nonInteractive ? 'pipe' : 'inherit' });
317
+ execFileSync('git', ['pull', 'origin', syncConfig.branch, '--allow-unrelated-histories'], { cwd: mindRoot, stdio: nonInteractive ? 'pipe' : 'inherit' });
280
318
  } catch {
281
319
  if (!nonInteractive) console.log(yellow('Pull completed with warnings. Check for conflicts.'));
282
320
  }
@@ -321,7 +359,10 @@ export async function startSyncDaemon(mindRoot) {
321
359
  autoPull(mindRoot);
322
360
 
323
361
  // Graceful shutdown: flush pending changes before exit
362
+ let shutdownInProgress = false;
324
363
  const gracefulShutdown = () => {
364
+ if (shutdownInProgress) return;
365
+ shutdownInProgress = true;
325
366
  if (commitTimer) { clearTimeout(commitTimer); commitTimer = null; }
326
367
  try { autoCommitAndPush(mindRoot); } catch {}
327
368
  stopSyncDaemon();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@geminilight/mindos",
3
- "version": "0.5.12",
3
+ "version": "0.5.13",
4
4
  "description": "MindOS — Human-Agent Collaborative Mind System. Local-first knowledge base that syncs your mind to all AI Agents via MCP.",
5
5
  "keywords": [
6
6
  "mindos",
@@ -57,7 +57,8 @@
57
57
  "!assets/images",
58
58
  "!mcp/node_modules",
59
59
  "!mcp/dist",
60
- "!mcp/package-lock.json"
60
+ "!mcp/package-lock.json",
61
+ "!app/package-lock.json"
61
62
  ],
62
63
  "scripts": {
63
64
  "setup": "node scripts/setup.js",
package/scripts/setup.js CHANGED
@@ -1225,6 +1225,11 @@ function getLocalIP() {
1225
1225
  }
1226
1226
 
1227
1227
  async function finish(mindDir, startMode = 'start', mcpPort = 8781, authToken = '', installDaemon = false, needsRestart = false, oldPort = 3456) {
1228
+ // startMode 'daemon' stored in config is equivalent to installDaemon flag
1229
+ if (startMode === 'daemon') {
1230
+ installDaemon = true;
1231
+ startMode = 'start';
1232
+ }
1228
1233
  if (needsRestart) {
1229
1234
  const isRunning = await isSelfPort(oldPort);
1230
1235
  if (isRunning) {
@@ -110,172 +110,52 @@ Before any non-trivial write, confirm all checks:
110
110
 
111
111
  ## Execution Patterns
112
112
 
113
- ### Capture or update notes
114
-
115
- 1. Search existing docs.
116
- 2. Read target docs and local rules.
117
- 3. Apply minimal edit.
118
- 4. Keep references consistent when paths change.
119
-
120
- ### Distill cross-agent discussion
121
-
122
- 1. Ask user to confirm key decisions and conclusions.
123
- 2. Locate destination docs.
124
- 3. Structure content as problem, decision, rationale, caveats, next actions.
125
- 4. Write back with minimal invasive edits.
126
-
127
- Never imply access to private history from other agent sessions.
128
-
129
- ### Conversation retrospective and adaptive updates
130
-
131
- 1. Ask the user to confirm retrospective objective and scope for this conversation.
132
- 2. Extract reusable artifacts: decisions, rationale, pitfalls, unresolved questions, and next actions.
133
- 3. Route each artifact to the most appropriate existing file by searching and reading candidate docs.
134
- 4. If a matching file exists, update minimally at section/line level; if not, create a well-scoped new file near related docs.
135
- 5. Keep references/backlinks consistent and add a short trace note of what changed and why.
136
- 6. If confidence in file routing is low, present 1-2 candidate destinations and ask user to choose before writing.
137
-
138
- ### Execute or update workflow/SOP docs
139
-
140
- 1. Read workflow doc fully.
141
- 2. Execute stepwise and record outcomes.
142
- 3. If outdated, update only affected section and include rationale.
143
-
144
- ### CSV operations
145
-
146
- 1. Read header.
147
- 2. Validate field order, count, and type.
148
- 3. Append one row.
149
-
150
- ### Information collection and outreach
151
-
152
- 1. Locate authoritative contact/list sources.
153
- 2. Read relevant outreach/execution workflow docs.
154
- 3. Generate personalized outputs per target using profile tags, domain, and tone.
155
- 4. Write outcomes and next actions back for traceability.
156
-
157
- ### Project bootstrap with personal/team standards
158
-
159
- 1. Read preference/context docs such as stack, style, and constraints.
160
- 2. Read startup/engineering workflow docs.
161
- 3. Produce initial scaffold/configuration aligned with those standards.
162
- 4. Record key decisions and setup status for future handoff.
163
-
164
- ### Standards-aligned code review
165
-
166
- 1. Read applicable review and engineering standards.
167
- 2. Review naming, error handling, performance, security, and maintainability.
168
- 3. Output actionable findings with concrete file-level suggestions.
169
- 4. Keep tone and structure consistent with team review style.
170
-
171
- ### Cross-agent handoff continuity
172
-
173
- 1. Treat the shared knowledge base as handoff contract.
174
- 2. Before continuing work, read task state, decisions, and pending items.
175
- 3. Continue without re-discovery and preserve conventions/rationale.
176
- 4. Write back progress so later sessions can resume immediately.
177
-
178
- ### Relationship and follow-up management
179
-
180
- 1. Extract factual updates, intent, and sentiment from user-provided conversation notes.
181
- 2. Update relationship/contact records in structured form.
182
- 3. Generate next-step strategy, todo items, and suggested follow-up timing.
183
- 4. Store updates in reusable format for future session continuity.
184
-
185
- ### Structure-aware multi-file routing
186
-
187
- A single unstructured input (chat export, meeting notes, voice transcript, braindump) often contains information that belongs in multiple places. MindOS knows the full directory tree and inter-document relationships from bootstrap, so it can decompose the input and route each piece to the right file — in one pass, without the user manually specifying destinations.
188
-
189
- 1. Load structure context via `mindos_bootstrap` and `mindos_list_files` to understand the full knowledge topology.
190
- 2. Parse the input into distinct semantic units (facts, decisions, ideas, action items, relationship updates, new concepts).
191
- 3. For each unit, search and read candidate destination files to find the best match by topic, scope, and existing structure.
192
- 4. **Before writing, present the routing plan to the user for approval.** Show a clear summary table: what will be written, to which file, at which location. Only proceed after user confirms.
193
- 5. Apply minimal edits to each target file at the correct location (section, heading, or line level). Create new files only when no existing file is a reasonable fit.
194
- 6. If routing confidence is low for any unit, present candidate destinations and ask the user to choose.
195
- 7. After all writes, summarize what changed and where, so the user can audit the full update in one glance.
196
-
197
- This pattern is what makes MindOS fundamentally different from document-per-document tools: the structural prior means one input triggers coordinated updates across the knowledge base, and nothing falls through the cracks because the agent sees the whole graph.
198
-
199
- ### Knowledge conflict resolution
200
-
201
- When a decision, preference, or fact changes (e.g., "we switched from Redis to Memcached"), all documents referencing the old information must be updated consistently.
202
-
203
- 1. Search the entire knowledge base for references to the outdated information (use multiple search terms, including abbreviations and variants).
204
- 2. List all affected files and the specific lines/sections that reference the old decision.
205
- 3. Present the full change plan to the user: which files, what will change, and why.
206
- 4. After approval, update each file with minimal targeted edits.
207
- 5. Verify no inconsistent references remain after the update.
208
-
209
- ### Distill experience into new SOP
210
-
211
- When the user has just completed a complex task and wants to capture the process for reuse, create a structured, reusable workflow document — not just a one-time record.
212
-
213
- 1. Extract the procedure, decision points, prerequisites, and common pitfalls from the user's description.
214
- 2. Generalize: remove one-off specifics, keep the reusable pattern.
215
- 3. Find the appropriate location under Workflows/ or a similar directory. Check for existing SOP templates or sibling format.
216
- 4. Create the SOP with clear sections: prerequisites, step-by-step procedure, troubleshooting/pitfalls, and references to related docs.
217
- 5. Link the new SOP from relevant index files if applicable.
218
-
219
- ### Periodic review and summary
220
-
221
- When the user asks for a weekly review, monthly recap, or progress summary:
222
-
223
- 1. Use `mindos_get_recent` and/or `mindos_get_history` to identify files changed in the relevant time window.
224
- 2. Read changed files to understand what happened.
225
- 3. Categorize changes by area (projects, profile, workflows, etc.).
226
- 4. Produce a structured summary: key decisions made, progress on active projects, open items, and things that need attention.
227
- 5. Offer to write the summary back as a review note if the user wants to keep it.
228
-
229
- ### Context-aware question answering
230
-
231
- When the user asks a question about themselves, their projects, preferences, or stored knowledge:
232
-
233
- 1. Reason from the directory tree to identify which files likely contain the answer.
234
- 2. Read the relevant files — do not guess or assume from general knowledge.
235
- 3. Answer grounded in actual stored content, citing source files.
236
- 4. If the information is incomplete or missing, say so explicitly rather than fabricating.
237
-
238
- ### TODO and task list management
239
-
240
- When the user wants to add, complete, or modify tasks:
241
-
242
- 1. Locate the TODO file or task list (search or navigate from directory structure).
243
- 2. Read current content to understand existing format (checkboxes, priorities, categories).
244
- 3. Make minimal targeted edits: mark items, add items, or update status.
245
- 4. Preserve all other existing content and formatting.
246
- 5. Follow the existing format conventions exactly — do not introduce a new task format.
247
-
248
- ### Handoff document synthesis
249
-
250
- When the user needs to create a handoff or briefing document for someone else (new team member, collaborator, manager):
251
-
252
- 1. Identify all relevant source files across the knowledge base for the topic (project docs, decisions, status, tech stack, open items).
253
- 2. Read each source file to extract the relevant information.
254
- 3. Synthesize into a single coherent handoff document with sections: background, key decisions and rationale, current status, open items, and further reading (with links to source files).
255
- 4. Place the handoff document in the appropriate project directory, not the root.
256
-
257
- ### Structural rename and move
258
-
259
- When renaming or moving files/directories:
260
-
261
- 1. Use `mindos_get_backlinks` to find all files referencing the target path.
262
- 2. Report the full impact scope to the user: how many files, which ones, what references.
263
- 3. Ask for confirmation before proceeding.
264
- 4. Execute the rename/move.
265
- 5. Update all broken references in affected files.
266
- 6. Verify no orphaned links remain.
267
-
268
- ### Auto-sync READMEs after directory changes
269
-
270
- After any operation that affects directory structure (creating, deleting, moving, or renaming files or subdirectories):
271
-
272
- 1. Identify affected directories: the directory where the file was (source), the directory where it now is (destination), and their parent directories.
273
- 2. Read the README in each affected directory (if one exists).
274
- 3. Update file listings, indexes, and navigation in each README to accurately reflect the current directory contents.
275
- 4. If the README contains file descriptions or links, update paths and names accordingly.
276
- 5. If a directory has no README but sibling directories generally do, consider creating one for the new directory.
277
-
278
- This step is an automatic follow-up to all structural change operations — it does not require a separate user request.
113
+ Select the matching pattern below. All patterns share a common discipline: search → read → minimal edit → verify → summarize.
114
+
115
+ ### Core patterns (high-frequency)
116
+
117
+ #### Capture or update notes
118
+ Search read target + local rules → apply minimal edit → keep references consistent.
119
+
120
+ #### Context-aware question answering
121
+ Reason from directory tree → read relevant files → answer grounded in stored content with file citations → if info is missing, say so explicitly.
122
+
123
+ #### Structure-aware multi-file routing
124
+ For unstructured inputs (meeting notes, braindumps, chat exports) that belong in multiple places:
125
+ 1. Parse input into semantic units (facts, decisions, action items, ideas).
126
+ 2. For each unit, search + read candidate destination files.
127
+ 3. **Present routing plan to user for approval** (table: what → which file → where).
128
+ 4. Apply minimal edits. Create new files only when no existing file fits.
129
+ 5. Summarize all changes for audit.
130
+
131
+ #### Conversation retrospective
132
+ 1. Confirm scope with user.
133
+ 2. Extract reusable artifacts: decisions, rationale, pitfalls, next actions.
134
+ 3. Route each to the best existing file (or create near related docs).
135
+ 4. Add trace note of what changed and why. Ask user when routing confidence is low.
136
+
137
+ ### Structural change patterns (always apply after file create/delete/move/rename)
138
+
139
+ - **Rename/move**: `get_backlinks` → report impact → confirm → execute → update all references → verify no orphans.
140
+ - **Auto-sync READMEs**: After any structural change, update README in affected directories + parent directories to reflect current contents. This is automatic — no separate user request needed.
141
+
142
+ ### Reference patterns (use when task matches)
143
+
144
+ | Pattern | Key steps |
145
+ |---------|-----------|
146
+ | CSV operations | Read header → validate fields → append row |
147
+ | TODO/task management | Locate list → read format → minimal edit preserving conventions |
148
+ | SOP/workflow execution | Read doc fully → execute stepwise → update only affected section |
149
+ | Cross-agent handoff | Read task state + decisions → continue without re-discovery → write back progress |
150
+ | Knowledge conflict resolution | Multi-term search for old info → list all affected files → present change plan → update after approval |
151
+ | Distill experience into SOP | Extract procedure → generalize → create under Workflows/ with prerequisites, steps, pitfalls |
152
+ | Periodic review/summary | `get_recent`/`get_history` → read changed files → categorize → structured summary |
153
+ | Handoff document synthesis | Identify sources → read → synthesize (background, decisions, status, open items) → place in project dir |
154
+ | Relationship management | Extract updates from notes update contact records → generate next-step strategy |
155
+ | Information collection | Locate sources → read outreach docs → personalize per target → write back outcomes |
156
+ | Project bootstrap | Read preference/stack docs → scaffold aligned with standards → record decisions |
157
+ | Code review | Read review standards → check naming/security/performance → output actionable findings |
158
+ | Distill cross-agent discussion | Confirm decisions with user → structure as problem/decision/rationale/next-actions → minimal write-back |
279
159
 
280
160
  ## Interaction Rules
281
161
 
@@ -293,28 +173,12 @@ This step is an automatic follow-up to all structural change operations — it d
293
173
  - Never store secrets, tokens, or passwords.
294
174
  - Never delete or rewrite outside user intent.
295
175
 
296
- ## Continuous Evaluation Loop
297
-
298
- For important workflows, run a fast iterate loop:
299
-
300
- 1. Define 2-3 representative prompts for the current task type.
301
- 2. Run the workflow with this skill guidance.
302
- 3. Check result quality against user intent, local conventions, and safety rules.
303
- 4. Identify the smallest instruction change that would prevent observed failure modes.
304
- 5. Re-run prompts and keep only changes that improve consistency without overfitting.
305
-
306
176
  ## Quality Gates
307
177
 
308
- Before finishing, verify:
178
+ Before finishing any operation, verify:
309
179
 
310
180
  1. Result directly answers user intent.
311
181
  2. Updated content matches local style and structure.
312
182
  3. References/links remain valid after structural edits.
313
183
  4. No sensitive information was introduced.
314
184
  5. Summary to user is specific enough for quick audit.
315
-
316
- ## Style Rules
317
-
318
- - Follow repository-local style.
319
- - Keep language concise and execution-oriented.
320
- - Preserve useful structure like headings, checklists, tables, and references.