@xelth/eck-snapshot 5.9.0 → 6.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of @xelth/eck-snapshot might be problematic. Click here for more details.

Files changed (35) hide show
  1. package/README.md +267 -190
  2. package/package.json +15 -2
  3. package/scripts/mcp-eck-core.js +61 -13
  4. package/setup.json +119 -81
  5. package/src/cli/cli.js +235 -385
  6. package/src/cli/commands/createSnapshot.js +336 -122
  7. package/src/cli/commands/recon.js +244 -0
  8. package/src/cli/commands/setupMcp.js +278 -19
  9. package/src/cli/commands/trainTokens.js +42 -32
  10. package/src/cli/commands/updateSnapshot.js +128 -76
  11. package/src/core/depthConfig.js +54 -0
  12. package/src/core/skeletonizer.js +71 -18
  13. package/src/templates/architect-prompt.template.md +34 -0
  14. package/src/templates/multiAgent.md +43 -10
  15. package/src/templates/opencode/coder.template.md +44 -17
  16. package/src/templates/opencode/junior-architect.template.md +45 -15
  17. package/src/templates/skeleton-instruction.md +1 -1
  18. package/src/utils/aiHeader.js +57 -27
  19. package/src/utils/claudeMdGenerator.js +136 -78
  20. package/src/utils/fileUtils.js +1023 -1016
  21. package/src/utils/gitUtils.js +12 -8
  22. package/src/utils/opencodeAgentsGenerator.js +8 -2
  23. package/src/utils/projectDetector.js +66 -21
  24. package/src/utils/tokenEstimator.js +11 -7
  25. package/src/cli/commands/consilium.js +0 -86
  26. package/src/cli/commands/detectProfiles.js +0 -98
  27. package/src/cli/commands/envSync.js +0 -319
  28. package/src/cli/commands/generateProfileGuide.js +0 -144
  29. package/src/cli/commands/pruneSnapshot.js +0 -106
  30. package/src/cli/commands/restoreSnapshot.js +0 -173
  31. package/src/cli/commands/setupGemini.js +0 -149
  32. package/src/cli/commands/setupGemini.test.js +0 -115
  33. package/src/cli/commands/showFile.js +0 -39
  34. package/src/services/claudeCliService.js +0 -626
  35. package/src/services/claudeCliService.test.js +0 -267
@@ -1,38 +1,48 @@
1
- import { addTrainingPoint, showEstimationStats } from '../../utils/tokenEstimator.js';
1
+ import chalk from 'chalk';
2
+ import ora from 'ora';
3
+ import { addTrainingPoint, showEstimationStats, syncTokenWeights } from '../../utils/tokenEstimator.js';
2
4
 
3
- /**
4
- * Train token estimation with actual results
5
- * @param {string} projectType - Type of project (android, nodejs, etc.)
6
- * @param {string} fileSizeStr - File size in bytes
7
- * @param {string} estimatedStr - Estimated tokens
8
- * @param {string} actualStr - Actual tokens (from user input)
9
- */
10
- export async function trainTokens(projectType, fileSizeStr, estimatedStr, actualStr) {
5
+ export async function runTokenTools(payload) {
6
+ const toolName = payload.name;
7
+ const args = payload.arguments || {};
8
+
9
+ if (toolName === 'eck_train_tokens') {
10
+ await handleTrainTokens(args);
11
+ } else if (toolName === 'eck_token_stats') {
12
+ await handleTokenStats();
13
+ }
14
+ }
15
+
16
+ async function handleTrainTokens(args) {
17
+ const { projectType, fileSizeBytes, estimatedTokens, actualTokens } = args;
18
+
19
+ if (!projectType || fileSizeBytes === undefined || estimatedTokens === undefined || actualTokens === undefined) {
20
+ console.log(chalk.red('āŒ Error: Missing required arguments for eck_train_tokens.'));
21
+ console.log(chalk.yellow('Expected: { projectType, fileSizeBytes, estimatedTokens, actualTokens }'));
22
+ return;
23
+ }
24
+
25
+ const spinner = ora('Calibrating token estimation polynomial...').start();
11
26
  try {
12
- const fileSizeInBytes = parseInt(fileSizeStr, 10);
13
- const estimatedTokens = parseInt(estimatedStr, 10);
14
-
15
- // Parse actual tokens from user input (remove any text like "tokens", commas, etc.)
16
- const actualTokens = parseInt(actualStr.replace(/[^\d]/g, ''), 10);
17
-
18
- if (isNaN(fileSizeInBytes) || isNaN(estimatedTokens) || isNaN(actualTokens)) {
19
- throw new Error('Invalid numeric values provided');
20
- }
21
-
22
- await addTrainingPoint(projectType, fileSizeInBytes, estimatedTokens, actualTokens);
23
-
24
- console.log('\nšŸ“ˆ Updated polynomial coefficients for improved estimation.');
25
-
27
+ await addTrainingPoint(
28
+ projectType,
29
+ Number(fileSizeBytes),
30
+ Number(estimatedTokens),
31
+ Number(actualTokens)
32
+ );
33
+ spinner.succeed('Token estimation calibrated successfully.');
26
34
  } catch (error) {
27
- console.error(`āŒ Error training token estimation: ${error.message}`);
28
- console.error('Usage: eck-snapshot train-tokens <project-type> <file-size-bytes> <estimated-tokens> <actual-tokens>');
29
- process.exit(1);
35
+ spinner.fail(`Calibration failed: ${error.message}`);
30
36
  }
31
37
  }
32
38
 
33
- /**
34
- * Show token estimation statistics
35
- */
36
- export async function showTokenStats() {
37
- await showEstimationStats();
38
- }
39
+ async function handleTokenStats() {
40
+ const spinner = ora('Fetching latest token statistics and weights...').start();
41
+ try {
42
+ await syncTokenWeights(true);
43
+ spinner.stop();
44
+ await showEstimationStats();
45
+ } catch (error) {
46
+ spinner.fail(`Failed to fetch statistics: ${error.message}`);
47
+ }
48
+ }
@@ -1,15 +1,16 @@
1
- import fs from 'fs/promises';
2
- import path from 'path';
3
- import ora from 'ora';
4
- import chalk from 'chalk';
5
- import isBinaryPath from 'is-binary-path';
6
- import { getGitAnchor, getChangedFiles } from '../../utils/gitUtils.js';
7
- import { loadSetupConfig } from '../../config.js';
8
- import { readFileWithSizeCheck, parseSize, formatSize, matchesPattern, loadGitignore, generateTimestamp, getShortRepoName } from '../../utils/fileUtils.js';
9
- import { detectProjectType, getProjectSpecificFiltering } from '../../utils/projectDetector.js';
10
- import { execa } from 'execa';
11
- import { fileURLToPath } from 'url';
12
- import { pushTelemetry } from '../../utils/telemetry.js';
1
+ import fs from 'fs/promises';
2
+ import path from 'path';
3
+ import ora from 'ora';
4
+ import chalk from 'chalk';
5
+ import isBinaryPath from 'is-binary-path';
6
+ import { getGitAnchor, getChangedFiles } from '../../utils/gitUtils.js';
7
+ import { loadSetupConfig } from '../../config.js';
8
+ import { readFileWithSizeCheck, parseSize, formatSize, matchesPattern, loadGitignore, generateTimestamp, getShortRepoName } from '../../utils/fileUtils.js';
9
+ import { detectProjectType, getProjectSpecificFiltering } from '../../utils/projectDetector.js';
10
+ import { execa } from 'execa';
11
+ import { fileURLToPath } from 'url';
12
+ import { pushTelemetry } from '../../utils/telemetry.js';
13
+ import { syncTokenWeights } from '../../utils/tokenEstimator.js';
13
14
 
14
15
  // Mirror the same hidden-path guard used in createSnapshot.js
15
16
  function isHiddenPath(filePath) {
@@ -42,52 +43,95 @@ async function generateSnapshotContent(repoPath, changedFiles, anchor, config, g
42
43
  let includedCount = 0;
43
44
  const fileList = [];
44
45
 
45
- // Include Agent Report if it exists and hasn't been embedded yet
46
- let agentReport = null;
47
- const reportPath = path.join(repoPath, '.eck', 'lastsnapshot', 'AnswerToSA.md');
48
- try {
49
- const reportContent = await fs.readFile(reportPath, 'utf-8');
50
- if (!reportContent.includes('[SYSTEM: EMBEDDED]')) {
51
- agentReport = reportContent;
52
- await fs.appendFile(reportPath, '\n\n[SYSTEM: EMBEDDED]\n', 'utf-8');
53
- }
54
- } catch (e) { /* File not found or unreadable */ }
46
+ // Include Agent Report if it exists and hasn't been embedded yet
47
+ let agentReport = null;
48
+ const reportPath = path.join(repoPath, '.eck', 'lastsnapshot', 'AnswerToSA.md');
49
+ const lockPath = path.join(repoPath, '.eck', 'lastsnapshot', 'AnswerToSA.lock');
50
+ try {
51
+ // Use atomic directory creation as a lock to prevent race conditions
52
+ await fs.mkdir(lockPath);
53
+ const reportContent = await fs.readFile(reportPath, 'utf-8');
54
+
55
+ if (!reportContent.includes('[SYSTEM: EMBEDDED]')) {
56
+ agentReport = reportContent;
57
+
58
+ // Immediately mark as embedded to release the race window
59
+ await fs.appendFile(reportPath, '\n\n[SYSTEM: EMBEDDED]\n', 'utf-8');
60
+
61
+ // Auto-Journaling: prepend agent report to JOURNAL.md
62
+ const journalPath = path.join(repoPath, '.eck', 'JOURNAL.md');
63
+ try {
64
+ const dateStr = new Date().toISOString().split('T')[0];
65
+ const journalEntry = `## ${dateStr} — Agent Report\n\n${reportContent.trim()}\n`;
66
+
67
+ let existingJournal = '';
68
+ try {
69
+ existingJournal = await fs.readFile(journalPath, 'utf-8');
70
+ } catch (e) { /* might not exist */ }
71
+
72
+ const insertPos = existingJournal.indexOf('\n## ');
73
+ if (insertPos !== -1) {
74
+ const newJournal = existingJournal.slice(0, insertPos) + '\n\n' + journalEntry + existingJournal.slice(insertPos);
75
+ await fs.writeFile(journalPath, newJournal, 'utf-8');
76
+ } else {
77
+ await fs.writeFile(journalPath, (existingJournal ? existingJournal + '\n\n' : '') + journalEntry + '\n', 'utf-8');
78
+ }
79
+ } catch (je) {
80
+ console.warn('Could not auto-update JOURNAL.md', je.message);
81
+ }
82
+ }
83
+ await fs.rmdir(lockPath);
84
+ } catch (e) {
85
+ // File not found or locked by another process
86
+ try { await fs.rmdir(lockPath); } catch (_) {}
87
+ }
55
88
 
56
89
  const cleanDirsToIgnore = (config.dirsToIgnore || []).map(d => d.replace(/\/$/, ''));
57
90
 
58
- for (const filePath of changedFiles) {
59
- const normalizedPath = filePath.replace(/\\/g, '/');
60
-
61
- // Skip hidden paths (.idea/, .vscode/, etc.) — mirrors createSnapshot.js
62
- if (isHiddenPath(normalizedPath)) continue;
63
-
64
- // Skip binary files — mirrors createSnapshot.js
65
- if (isBinaryPath(filePath)) continue;
66
-
67
- const pathParts = normalizedPath.split('/');
68
- let isIgnoredDir = false;
69
- for (let i = 0; i < pathParts.length - 1; i++) {
70
- if (cleanDirsToIgnore.includes(pathParts[i])) {
71
- isIgnoredDir = true;
72
- break;
73
- }
74
- }
75
- if (isIgnoredDir) continue;
76
-
77
- const fileExt = path.extname(filePath);
78
- // Use matchesPattern (glob support) instead of exact includes() — mirrors createSnapshot.js
79
- if (config.filesToIgnore && matchesPattern(normalizedPath, config.filesToIgnore)) continue;
80
- if (fileExt && config.extensionsToIgnore?.includes(fileExt)) continue;
81
- if (gitignore.ignores(normalizedPath)) continue;
82
-
83
- try {
84
- const fullPath = path.join(repoPath, filePath);
85
- const content = await readFileWithSizeCheck(fullPath, parseSize(config.maxFileSize));
86
- contentOutput += `--- File: /${normalizedPath} ---\n\n${content}\n\n`;
87
- fileList.push(`- ${normalizedPath}`);
88
- includedCount++;
89
- } catch (e) { /* Skip */ }
90
- }
91
+ for (const filePath of changedFiles) {
92
+ const normalizedPath = filePath.replace(/\\/g, '/');
93
+
94
+ // Skip hidden paths (.idea/, .vscode/, etc.) — mirrors createSnapshot.js
95
+ if (isHiddenPath(normalizedPath)) continue;
96
+
97
+ // Skip binary files — mirrors createSnapshot.js
98
+ if (isBinaryPath(filePath)) continue;
99
+
100
+ const pathParts = normalizedPath.split('/');
101
+ let isIgnoredDir = false;
102
+ for (let i = 0; i < pathParts.length - 1; i++) {
103
+ if (cleanDirsToIgnore.includes(pathParts[i])) {
104
+ isIgnoredDir = true;
105
+ break;
106
+ }
107
+ }
108
+ if (isIgnoredDir) continue;
109
+
110
+ const fileExt = path.extname(filePath);
111
+ // Use matchesPattern (glob support) instead of exact includes() — mirrors createSnapshot.js
112
+ if (config.filesToIgnore && matchesPattern(normalizedPath, config.filesToIgnore)) continue;
113
+ if (fileExt && config.extensionsToIgnore?.includes(fileExt)) continue;
114
+ if (gitignore.ignores(normalizedPath)) continue;
115
+
116
+ try {
117
+ const fullPath = path.join(repoPath, filePath);
118
+
119
+ // Explicitly check if file was deleted
120
+ try {
121
+ await fs.access(fullPath);
122
+ } catch (accessErr) {
123
+ contentOutput += `--- File: /${normalizedPath} ---\n\n[FILE DELETED]\n\n`;
124
+ fileList.push(`- ${normalizedPath} (Deleted)`);
125
+ includedCount++;
126
+ continue;
127
+ }
128
+
129
+ const content = await readFileWithSizeCheck(fullPath, parseSize(config.maxFileSize));
130
+ contentOutput += `--- File: /${normalizedPath} ---\n\n${content}\n\n`;
131
+ fileList.push(`- ${normalizedPath} (Modified/Added)`);
132
+ includedCount++;
133
+ } catch (e) { /* Skip */ }
134
+ }
91
135
 
92
136
  // Load Template
93
137
  const templatePath = path.join(__dirname, '../../templates/update-prompt.template.md');
@@ -122,17 +166,22 @@ export async function updateSnapshot(repoPath, options) {
122
166
  }
123
167
 
124
168
  // Auto-commit any uncommitted changes so they appear in the diff
125
- const didCommit = await autoCommit(repoPath);
126
- if (didCommit) {
127
- spinner.info('Auto-committed uncommitted changes.');
128
- spinner.start('Generating update snapshot...');
169
+ let didCommit = false;
170
+ if (!options.fail) {
171
+ didCommit = await autoCommit(repoPath);
172
+ if (didCommit) {
173
+ spinner.info('Auto-committed uncommitted changes.');
174
+ }
175
+ } else {
176
+ spinner.info('Fail flag passed: skipping auto-commit.');
129
177
  }
178
+ spinner.start('Generating update snapshot...');
130
179
 
131
- const changedFiles = await getChangedFiles(repoPath, anchor);
132
- if (changedFiles.length === 0) {
133
- spinner.succeed('No changes detected since last full snapshot.');
134
- return;
135
- }
180
+ const changedFiles = await getChangedFiles(repoPath, anchor, options.fail);
181
+ if (changedFiles.length === 0) {
182
+ spinner.succeed('No changes detected since last full snapshot.');
183
+ return;
184
+ }
136
185
 
137
186
  const setupConfig = await loadSetupConfig();
138
187
  let config = { ...setupConfig.fileFiltering, ...setupConfig.performance, ...options };
@@ -216,7 +265,7 @@ export async function updateSnapshot(repoPath, options) {
216
265
  }
217
266
 
218
267
  // New Silent/JSON command for Agents
219
- export async function updateSnapshotJson(repoPath) {
268
+ export async function updateSnapshotJson(repoPath, options = {}) {
220
269
  try {
221
270
  const anchor = await getGitAnchor(repoPath);
222
271
  if (!anchor) {
@@ -225,9 +274,11 @@ export async function updateSnapshotJson(repoPath) {
225
274
  }
226
275
 
227
276
  // Auto-commit any uncommitted changes
228
- await autoCommit(repoPath);
277
+ if (!options.fail) {
278
+ await autoCommit(repoPath);
279
+ }
229
280
 
230
- const changedFiles = await getChangedFiles(repoPath, anchor);
281
+ const changedFiles = await getChangedFiles(repoPath, anchor, !!options.fail);
231
282
  if (changedFiles.length === 0) {
232
283
  console.log(JSON.stringify({ status: "no_changes", message: "No changes detected" }));
233
284
  return;
@@ -294,15 +345,16 @@ export async function updateSnapshotJson(repoPath) {
294
345
  }
295
346
  // --------------------------------------------
296
347
 
297
- console.log(JSON.stringify({
298
- status: "success",
299
- snapshot_file: `.eck/snapshots/${outputFilename}`,
300
- files_count: includedCount,
301
- timestamp: timestamp
302
- }));
303
-
304
- // Auto-push telemetry (fire and forget so it doesn't break JSON output)
305
- pushTelemetry(repoPath, true).catch(() => {});
348
+ console.log(JSON.stringify({
349
+ status: "success",
350
+ snapshot_file: `.eck/snapshots/${outputFilename}`,
351
+ files_count: includedCount,
352
+ timestamp: timestamp
353
+ }));
354
+
355
+ // Auto-push telemetry and sync weights (fire and forget so it doesn't break JSON output)
356
+ pushTelemetry(repoPath, true).catch(() => {});
357
+ syncTokenWeights(true).catch(() => {});
306
358
 
307
359
  } catch (error) {
308
360
  console.log(JSON.stringify({ status: "error", message: error.message }));
@@ -0,0 +1,54 @@
1
+ /**
2
+ * Shared depth configuration for link and scout commands.
3
+ * Scale: 0-9
4
+ *
5
+ * @param {number} depth - Depth level (0-9)
6
+ * @returns {object} Configuration object with mode settings
7
+ */
8
+ export function getDepthConfig(depth) {
9
+ const d = Math.max(0, Math.min(9, parseInt(depth, 10) || 0));
10
+
11
+ if (d === 0) {
12
+ return { mode: 'tree', skipContent: true };
13
+ }
14
+
15
+ if (d >= 1 && d <= 4) {
16
+ const linesMap = { 1: 10, 2: 30, 3: 60, 4: 100 };
17
+ return { mode: 'truncated', maxLinesPerFile: linesMap[d], skeleton: false };
18
+ }
19
+
20
+ if (d === 5) {
21
+ return { mode: 'skeleton', skeleton: true, preserveDocs: false, maxLinesPerFile: 0 };
22
+ }
23
+
24
+ if (d === 6) {
25
+ return { mode: 'skeleton+docs', skeleton: true, preserveDocs: true, maxLinesPerFile: 0 };
26
+ }
27
+
28
+ if (d === 7) {
29
+ return { mode: 'full', skeleton: false, maxLinesPerFile: 500 };
30
+ }
31
+
32
+ if (d === 8) {
33
+ return { mode: 'full', skeleton: false, maxLinesPerFile: 1000 };
34
+ }
35
+
36
+ // d === 9
37
+ return { mode: 'full', skeleton: false, maxLinesPerFile: 0 };
38
+ }
39
+
40
+ /**
41
+ * Human-readable depth scale table for documentation/headers.
42
+ */
43
+ export const DEPTH_SCALE = [
44
+ { depth: 0, mode: 'Tree only', description: 'Directory structure, no file contents' },
45
+ { depth: 1, mode: 'Truncated 10', description: '10 lines per file (imports/header)' },
46
+ { depth: 2, mode: 'Truncated 30', description: '30 lines per file' },
47
+ { depth: 3, mode: 'Truncated 60', description: '60 lines per file' },
48
+ { depth: 4, mode: 'Truncated 100', description: '100 lines per file' },
49
+ { depth: 5, mode: 'Skeleton', description: 'Function/class signatures only' },
50
+ { depth: 6, mode: 'Skeleton + docs', description: 'Signatures + docstrings/comments' },
51
+ { depth: 7, mode: 'Full (compact)', description: 'Full content, truncated at 500 lines' },
52
+ { depth: 8, mode: 'Full (standard)', description: 'Full content, truncated at 1000 lines' },
53
+ { depth: 9, mode: 'Full (unlimited)', description: 'Everything, no limits' },
54
+ ];
@@ -64,14 +64,17 @@ const languages = {
64
64
  * Strips implementation details from code.
65
65
  * @param {string} content - Full file content
66
66
  * @param {string} filePath - File path to determine language
67
+ * @param {object} [options] - Options
68
+ * @param {boolean} [options.preserveDocs=true] - Keep JSDoc/docstrings (depth 6) or strip them (depth 5)
67
69
  * @returns {Promise<string>} - Skeletonized code
68
70
  */
69
- export async function skeletonize(content, filePath) {
71
+ export async function skeletonize(content, filePath, options = {}) {
70
72
  if (!content) return content;
73
+ const preserveDocs = options.preserveDocs !== undefined ? options.preserveDocs : true;
71
74
 
72
75
  // 1. JS/TS Strategy (Babel is better for JS ecosystem)
73
76
  if (/\.(js|jsx|ts|tsx|mjs|cjs)$/.test(filePath)) {
74
- return skeletonizeJs(content);
77
+ return skeletonizeJs(content, preserveDocs);
75
78
  }
76
79
 
77
80
  // 2. Tree-sitter Strategy (Python, Java, Kotlin, C, Rust, Go)
@@ -83,7 +86,7 @@ export async function skeletonize(content, filePath) {
83
86
 
84
87
  // Only attempt tree-sitter if both the parser and the specific language module are ready
85
88
  if (available && Parser && langModule) {
86
- return skeletonizeTreeSitter(content, langModule, ext);
89
+ return skeletonizeTreeSitter(content, langModule, ext, preserveDocs);
87
90
  }
88
91
  return content; // Fallback: return original content if tree-sitter unavailable
89
92
  }
@@ -92,7 +95,7 @@ export async function skeletonize(content, filePath) {
92
95
  return content;
93
96
  }
94
97
 
95
- function skeletonizeJs(content) {
98
+ function skeletonizeJs(content, preserveDocs = true) {
96
99
  try {
97
100
  const ast = parse(content, {
98
101
  sourceType: 'module',
@@ -100,27 +103,27 @@ function skeletonizeJs(content) {
100
103
  errorRecovery: true
101
104
  });
102
105
 
103
- traverse(ast, {
104
- Function(path) {
105
- if (path.node.body && path.node.body.type === 'BlockStatement') {
106
- // Preserve leading comments (JSDoc) before emptying body
106
+ const emptyBody = (path) => {
107
+ if (path.node.body && path.node.body.type === 'BlockStatement') {
108
+ if (preserveDocs) {
109
+ // Keep leading comments (JSDoc) before emptying body
107
110
  const leadingComments = path.node.leadingComments || [];
108
111
  path.node.body.body = [];
109
112
  path.node.body.innerComments = leadingComments.length > 0
110
113
  ? leadingComments
111
114
  : [{ type: 'CommentBlock', value: ' ... ' }];
112
- }
113
- },
114
- ClassMethod(path) {
115
- if (path.node.body && path.node.body.type === 'BlockStatement') {
116
- // Preserve leading comments (JSDoc) before emptying body
117
- const leadingComments = path.node.leadingComments || [];
115
+ } else {
116
+ // Strip everything including docs
117
+ path.node.leadingComments = null;
118
118
  path.node.body.body = [];
119
- path.node.body.innerComments = leadingComments.length > 0
120
- ? leadingComments
121
- : [{ type: 'CommentBlock', value: ' ... ' }];
119
+ path.node.body.innerComments = [{ type: 'CommentBlock', value: ' ... ' }];
122
120
  }
123
121
  }
122
+ };
123
+
124
+ traverse(ast, {
125
+ Function: emptyBody,
126
+ ClassMethod: emptyBody
124
127
  });
125
128
 
126
129
  const output = generate(ast, {}, content);
@@ -130,7 +133,7 @@ function skeletonizeJs(content) {
130
133
  }
131
134
  }
132
135
 
133
- function skeletonizeTreeSitter(content, language, ext) {
136
+ function skeletonizeTreeSitter(content, language, ext, preserveDocs = true) {
134
137
  try {
135
138
  const parser = new Parser();
136
139
  parser.setLanguage(language);
@@ -172,6 +175,19 @@ function skeletonizeTreeSitter(content, language, ext) {
172
175
  }
173
176
 
174
177
  if (bodyNode) {
178
+ // For Python with preserveDocs: keep docstring as first statement
179
+ if (preserveDocs && ext === '.py' && bodyNode.childCount > 0) {
180
+ const docstring = extractPythonDocstring(bodyNode);
181
+ if (docstring) {
182
+ replacements.push({
183
+ start: bodyNode.startIndex,
184
+ end: bodyNode.endIndex,
185
+ text: docstring + '\n ...'
186
+ });
187
+ return;
188
+ }
189
+ }
190
+
175
191
  replacements.push({
176
192
  start: bodyNode.startIndex,
177
193
  end: bodyNode.endIndex,
@@ -181,6 +197,16 @@ function skeletonizeTreeSitter(content, language, ext) {
181
197
  }
182
198
  }
183
199
 
200
+ // If not preserveDocs, also strip standalone comment blocks
201
+ if (!preserveDocs && type === 'comment') {
202
+ replacements.push({
203
+ start: node.startIndex,
204
+ end: node.endIndex,
205
+ text: ''
206
+ });
207
+ return;
208
+ }
209
+
184
210
  for (let i = 0; i < node.childCount; i++) {
185
211
  visit(node.child(i));
186
212
  }
@@ -194,8 +220,35 @@ function skeletonizeTreeSitter(content, language, ext) {
194
220
  currentContent = currentContent.substring(0, rep.start) + rep.text + currentContent.substring(rep.end);
195
221
  }
196
222
 
223
+ // Clean up excessive blank lines from stripped comments
224
+ if (!preserveDocs) {
225
+ currentContent = currentContent.replace(/\n{3,}/g, '\n\n');
226
+ }
227
+
197
228
  return currentContent;
198
229
  } catch (e) {
199
230
  return content + `\n// [Skeleton error: ${e.message}]`;
200
231
  }
201
232
  }
233
+
234
+ /**
235
+ * Extract Python docstring from the first statement of a function body block.
236
+ */
237
+ function extractPythonDocstring(bodyNode) {
238
+ for (let i = 0; i < bodyNode.childCount; i++) {
239
+ const child = bodyNode.child(i);
240
+ // Python docstrings are expression_statement containing a string
241
+ if (child.type === 'expression_statement') {
242
+ const expr = child.child(0);
243
+ if (expr && expr.type === 'string') {
244
+ // Return the indented docstring text
245
+ return '\n ' + expr.text;
246
+ }
247
+ }
248
+ // Skip newline/indent tokens, but stop at first real statement
249
+ if (child.type !== 'newline' && child.type !== 'indent' && child.type !== 'NEWLINE' && child.type !== 'INDENT') {
250
+ if (child.type !== 'expression_statement') break;
251
+ }
252
+ }
253
+ return null;
254
+ }
@@ -19,6 +19,7 @@ You MUST wrap your ENTIRE response (Analysis + Changes + Metadata) in a single `
19
19
  ### Command Format (Wrapped)
20
20
 
21
21
  ````text
22
+ <eck_task id="{{repoName}}:short-task-description">
22
23
  # Analysis
23
24
 
24
25
  [Explain your reasoning: what you're doing and why.
@@ -48,6 +49,7 @@ async function example() {
48
49
  }
49
50
  }
50
51
  ```
52
+ </eck_task id="{{repoName}}:short-task-description">
51
53
  ````
52
54
 
53
55
  ### File Actions Reference
@@ -62,6 +64,7 @@ async function example() {
62
64
  ### Complete Example
63
65
 
64
66
  ````text
67
+ <eck_task id="{{repoName}}:add-user-validation">
65
68
  # Analysis
66
69
 
67
70
  The authentication module needs a null check to prevent crashes when
@@ -111,6 +114,7 @@ export function validateUser(user) {
111
114
  }
112
115
  }
113
116
  ```
117
+ </eck_task id="{{repoName}}:add-user-validation">
114
118
  ````
115
119
 
116
120
  ### Why Eck-Protocol v2?
@@ -133,6 +137,36 @@ To understand the project state, you can command the `eck-snapshot` tool directl
133
137
  - `eck-snapshot query "<question>"`: Search the codebase
134
138
  - `eck-snapshot detect`: Analyze the project structure
135
139
  - `eck-snapshot restore <snapshot_file> --include ...`: View specific files
140
+ - `eck-snapshot link <depth>`: Run inside a related repository to generate a read-only companion snapshot.
141
+
142
+ ## SCOUT & FETCH: Cross-Repository Intelligence
143
+
144
+ When working with multiple repositories, use `scout` and `fetch` to gather context:
145
+
146
+ **CRITICAL:** Both `scout` and `fetch` operate on the **current working directory only**. They scan files relative to `cwd` — absolute paths will NOT work.
147
+
148
+ **Single-repo fetch:**
149
+ ```bash
150
+ cd /path/to/project-a
151
+ eck-snapshot fetch "**/SyncManager.kt" "**/RelayClient.kt"
152
+ ```
153
+
154
+ **Multi-repo fetch (MUST be separate commands):**
155
+ ```bash
156
+ # Fetch from first project
157
+ cd /path/to/project-a
158
+ eck-snapshot fetch "**/SyncManager.kt"
159
+
160
+ # Fetch from second project
161
+ cd /path/to/project-b
162
+ eck-snapshot fetch "**/AppDatabase.kt"
163
+ ```
164
+
165
+ **Rules:**
166
+ - Always use **relative glob patterns**, never absolute paths
167
+ - Always specify which directory to `cd` into before each command
168
+ - Use `**/<filename>` to find files regardless of nesting depth
169
+ - One `fetch` command = one repo. For multiple repos, issue multiple commands.
136
170
 
137
171
  ## CAPABILITIES & DELEGATION PROTOCOL
138
172