@xelth/eck-snapshot 4.0.0 → 4.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +13 -3
- package/setup.json +57 -35
- package/src/cli/cli.js +81 -134
- package/src/cli/commands/autoDocs.js +1 -32
- package/src/cli/commands/createSnapshot.js +338 -198
- package/src/cli/commands/doctor.js +60 -0
- package/src/cli/commands/setupGemini.js +1 -1
- package/src/cli/commands/setupGemini.test.js +1 -1
- package/src/cli/commands/showFile.js +39 -0
- package/src/cli/commands/updateSnapshot.js +75 -0
- package/src/config.js +44 -0
- package/src/core/skeletonizer.js +201 -0
- package/src/services/claudeCliService.js +5 -0
- package/src/templates/agent-prompt.template.md +104 -7
- package/src/templates/architect-prompt.template.md +112 -23
- package/src/templates/multiAgent.md +40 -86
- package/src/templates/skeleton-instruction.md +16 -0
- package/src/templates/update-prompt.template.md +19 -0
- package/src/utils/aiHeader.js +373 -147
- package/src/utils/eckProtocolParser.js +221 -0
- package/src/utils/fileUtils.js +212 -175
- package/src/utils/gitUtils.js +44 -0
- package/src/utils/tokenEstimator.js +4 -1
- package/src/cli/commands/askGpt.js +0 -29
- package/src/services/authService.js +0 -20
- package/src/services/dispatcherService.js +0 -33
- package/src/services/gptService.js +0 -302
- package/src/services/gptService.test.js +0 -120
- package/src/templates/vectorMode.md +0 -22
package/src/utils/fileUtils.js
CHANGED
|
@@ -6,6 +6,61 @@ import { detectProjectType, getProjectSpecificFiltering } from './projectDetecto
|
|
|
6
6
|
import { executePrompt as askClaude } from '../services/claudeCliService.js';
|
|
7
7
|
import { getProfile, loadSetupConfig } from '../config.js';
|
|
8
8
|
import micromatch from 'micromatch';
|
|
9
|
+
import { minimatch } from 'minimatch';
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Scanner for detecting and redacting secrets (API keys, tokens)
|
|
13
|
+
*/
|
|
14
|
+
export const SecretScanner = {
|
|
15
|
+
patterns: [
|
|
16
|
+
// Service-specific patterns
|
|
17
|
+
{ name: 'GitHub Token', regex: /gh[pous]_[a-zA-Z0-9]{36}/g },
|
|
18
|
+
{ name: 'AWS Access Key', regex: /(?:AKIA|ASIA)[0-9A-Z]{16}/g },
|
|
19
|
+
{ name: 'OpenAI API Key', regex: /sk-[a-zA-Z0-9]{32,}/g },
|
|
20
|
+
{ name: 'Stripe Secret Key', regex: /sk_live_[0-9a-zA-Z]{24}/g },
|
|
21
|
+
{ name: 'Google API Key', regex: /AIza[0-9A-Za-z\-_]{35}/g },
|
|
22
|
+
{ name: 'Slack Token', regex: /xox[baprs]-[0-9a-zA-Z\-]{10,}/g },
|
|
23
|
+
{ name: 'NPM Token', regex: /npm_[a-zA-Z0-9]{36}/g },
|
|
24
|
+
{ name: 'Private Key', regex: /-----BEGIN (?:RSA |EC |OPENSSH )?PRIVATE KEY-----/g },
|
|
25
|
+
// Generic high-entropy patterns near sensitive keywords
|
|
26
|
+
{
|
|
27
|
+
name: 'Generic Secret',
|
|
28
|
+
regex: /(?:api[_-]?key|secret|password|token|auth|pwd|credential)\s*[:=]\s*["']([a-zA-Z0-9\-_.]{16,})["']/gi
|
|
29
|
+
}
|
|
30
|
+
],
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Scans content and replaces detected secrets with a placeholder
|
|
34
|
+
* @param {string} content - File content to scan
|
|
35
|
+
* @param {string} filePath - Path for logging context
|
|
36
|
+
* @returns {{content: string, found: string[]}} Redacted content and list of found secret types
|
|
37
|
+
*/
|
|
38
|
+
redact(content, filePath) {
|
|
39
|
+
let redactedContent = content;
|
|
40
|
+
const foundSecrets = [];
|
|
41
|
+
|
|
42
|
+
for (const pattern of this.patterns) {
|
|
43
|
+
// Reset regex lastIndex for global patterns
|
|
44
|
+
pattern.regex.lastIndex = 0;
|
|
45
|
+
|
|
46
|
+
const matches = [...content.matchAll(pattern.regex)];
|
|
47
|
+
if (matches.length > 0) {
|
|
48
|
+
for (const match of matches) {
|
|
49
|
+
// For generic pattern, use captured group; for specific patterns, use full match
|
|
50
|
+
const secretValue = match[1] || match[0];
|
|
51
|
+
const placeholder = `[REDACTED_${pattern.name.replace(/\s+/g, '_').toUpperCase()}]`;
|
|
52
|
+
redactedContent = redactedContent.replace(secretValue, placeholder);
|
|
53
|
+
foundSecrets.push(pattern.name);
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
return {
|
|
59
|
+
content: redactedContent,
|
|
60
|
+
found: [...new Set(foundSecrets)]
|
|
61
|
+
};
|
|
62
|
+
}
|
|
63
|
+
};
|
|
9
64
|
|
|
10
65
|
export function parseSize(sizeStr) {
|
|
11
66
|
const units = { B: 1, KB: 1024, MB: 1024 ** 2, GB: 1024 ** 3 };
|
|
@@ -40,12 +95,43 @@ export function matchesPattern(filePath, patterns) {
|
|
|
40
95
|
});
|
|
41
96
|
}
|
|
42
97
|
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
98
|
+
/**
|
|
99
|
+
* Checks if a file matches confidential patterns using minimatch
|
|
100
|
+
* @param {string} fileName - The file name to check
|
|
101
|
+
* @param {array} patterns - Array of glob patterns to match against
|
|
102
|
+
* @returns {boolean} True if the file matches any pattern
|
|
103
|
+
*/
|
|
104
|
+
function matchesConfidentialPattern(fileName, patterns) {
|
|
105
|
+
return patterns.some(pattern => minimatch(fileName, pattern, { nocase: true }));
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
/**
|
|
109
|
+
* Applies smart filtering for files within the .eck directory.
|
|
110
|
+
* Includes documentation files while excluding confidential files.
|
|
111
|
+
* @param {string} fileName - The file name to check
|
|
112
|
+
* @param {object} eckConfig - The eckDirectoryFiltering config object
|
|
113
|
+
* @returns {object} { include: boolean, isConfidential: boolean }
|
|
114
|
+
*/
|
|
115
|
+
export function applyEckDirectoryFiltering(fileName, eckConfig) {
|
|
116
|
+
if (!eckConfig || !eckConfig.enabled) {
|
|
117
|
+
return { include: false, isConfidential: false }; // .eck filtering disabled, exclude all
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
const { confidentialPatterns = [], alwaysIncludePatterns = [] } = eckConfig;
|
|
121
|
+
|
|
122
|
+
// First check if file matches confidential patterns
|
|
123
|
+
const isConfidential = matchesConfidentialPattern(fileName, confidentialPatterns);
|
|
124
|
+
if (isConfidential) {
|
|
125
|
+
return { include: false, isConfidential: true };
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
// Check if file matches always-include patterns
|
|
129
|
+
if (matchesPattern(fileName, alwaysIncludePatterns)) {
|
|
130
|
+
return { include: true, isConfidential: false };
|
|
48
131
|
}
|
|
132
|
+
|
|
133
|
+
// Default: exclude files not in the include list
|
|
134
|
+
return { include: false, isConfidential: false };
|
|
49
135
|
}
|
|
50
136
|
|
|
51
137
|
export async function checkGitRepository(repoPath) {
|
|
@@ -57,17 +143,18 @@ export async function checkGitRepository(repoPath) {
|
|
|
57
143
|
}
|
|
58
144
|
}
|
|
59
145
|
|
|
60
|
-
export async function scanDirectoryRecursively(dirPath, config, relativeTo = dirPath, projectType = null) {
|
|
146
|
+
export async function scanDirectoryRecursively(dirPath, config, relativeTo = dirPath, projectType = null, trackConfidential = false) {
|
|
61
147
|
const files = [];
|
|
62
|
-
|
|
148
|
+
const confidentialFiles = [];
|
|
149
|
+
|
|
63
150
|
// Get project-specific filtering if not provided
|
|
64
151
|
if (!projectType) {
|
|
65
152
|
const detection = await detectProjectType(relativeTo);
|
|
66
153
|
projectType = detection.type;
|
|
67
154
|
}
|
|
68
|
-
|
|
155
|
+
|
|
69
156
|
const projectSpecific = await getProjectSpecificFiltering(projectType);
|
|
70
|
-
|
|
157
|
+
|
|
71
158
|
// Merge project-specific filters with global config
|
|
72
159
|
const effectiveConfig = {
|
|
73
160
|
...config,
|
|
@@ -75,42 +162,63 @@ export async function scanDirectoryRecursively(dirPath, config, relativeTo = dir
|
|
|
75
162
|
filesToIgnore: [...(config.filesToIgnore || []), ...(projectSpecific.filesToIgnore || [])],
|
|
76
163
|
extensionsToIgnore: [...(config.extensionsToIgnore || []), ...(projectSpecific.extensionsToIgnore || [])]
|
|
77
164
|
};
|
|
78
|
-
|
|
165
|
+
|
|
79
166
|
try {
|
|
80
167
|
const entries = await fs.readdir(dirPath, { withFileTypes: true });
|
|
81
|
-
|
|
168
|
+
|
|
82
169
|
for (const entry of entries) {
|
|
83
170
|
const fullPath = path.join(dirPath, entry.name);
|
|
84
171
|
const relativePath = path.relative(relativeTo, fullPath).replace(/\\/g, '/');
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
172
|
+
|
|
173
|
+
// Special handling for .eck directory - never ignore it when tracking confidential files
|
|
174
|
+
const isEckDirectory = entry.name === '.eck' && entry.isDirectory();
|
|
175
|
+
const isInsideEck = relativePath.startsWith('.eck/');
|
|
176
|
+
|
|
177
|
+
if (effectiveConfig.dirsToIgnore.some(dir =>
|
|
178
|
+
entry.name === dir.replace('/', '') ||
|
|
88
179
|
relativePath.startsWith(dir)
|
|
89
|
-
)) {
|
|
180
|
+
) && !isEckDirectory && !isInsideEck) {
|
|
90
181
|
continue;
|
|
91
182
|
}
|
|
92
|
-
|
|
93
|
-
if (!effectiveConfig.includeHidden && entry.name.startsWith('.')) {
|
|
183
|
+
|
|
184
|
+
if (!effectiveConfig.includeHidden && entry.name.startsWith('.') && !isEckDirectory && !isInsideEck) {
|
|
94
185
|
continue;
|
|
95
186
|
}
|
|
96
|
-
|
|
187
|
+
|
|
97
188
|
if (entry.isDirectory()) {
|
|
98
|
-
const
|
|
99
|
-
|
|
189
|
+
const subResult = await scanDirectoryRecursively(fullPath, effectiveConfig, relativeTo, projectType, trackConfidential);
|
|
190
|
+
if (trackConfidential) {
|
|
191
|
+
files.push(...subResult.files);
|
|
192
|
+
confidentialFiles.push(...subResult.confidentialFiles);
|
|
193
|
+
} else {
|
|
194
|
+
files.push(...subResult);
|
|
195
|
+
}
|
|
100
196
|
} else {
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
197
|
+
// Apply smart filtering for files inside .eck directory
|
|
198
|
+
if (isInsideEck) {
|
|
199
|
+
const eckConfig = effectiveConfig.eckDirectoryFiltering;
|
|
200
|
+
const filterResult = applyEckDirectoryFiltering(entry.name, eckConfig);
|
|
201
|
+
|
|
202
|
+
if (trackConfidential && filterResult.isConfidential) {
|
|
203
|
+
confidentialFiles.push(relativePath);
|
|
204
|
+
} else if (filterResult.include) {
|
|
205
|
+
files.push(relativePath);
|
|
206
|
+
}
|
|
207
|
+
} else {
|
|
208
|
+
// Normal filtering for non-.eck files
|
|
209
|
+
if (effectiveConfig.extensionsToIgnore.includes(path.extname(entry.name)) ||
|
|
210
|
+
matchesPattern(relativePath, effectiveConfig.filesToIgnore)) {
|
|
211
|
+
continue;
|
|
212
|
+
}
|
|
213
|
+
files.push(relativePath);
|
|
104
214
|
}
|
|
105
|
-
|
|
106
|
-
files.push(relativePath);
|
|
107
215
|
}
|
|
108
216
|
}
|
|
109
217
|
} catch (error) {
|
|
110
218
|
console.warn(`⚠️ Warning: Could not read directory: ${dirPath} - ${error.message}`);
|
|
111
219
|
}
|
|
112
|
-
|
|
113
|
-
return files;
|
|
220
|
+
|
|
221
|
+
return trackConfidential ? { files, confidentialFiles } : files;
|
|
114
222
|
}
|
|
115
223
|
|
|
116
224
|
export async function loadGitignore(repoPath) {
|
|
@@ -153,6 +261,11 @@ export async function generateDirectoryTree(dir, prefix = '', allFiles, depth =
|
|
|
153
261
|
const validEntries = [];
|
|
154
262
|
|
|
155
263
|
for (const entry of sortedEntries) {
|
|
264
|
+
// Skip hidden directories and files (starting with '.')
|
|
265
|
+
// EXCEPT: show .eck as a placeholder at the first level
|
|
266
|
+
if (entry.name.startsWith('.')) {
|
|
267
|
+
continue;
|
|
268
|
+
}
|
|
156
269
|
if (config.dirsToIgnore.some(d => entry.name.includes(d.replace('/', '')))) continue;
|
|
157
270
|
const fullPath = path.join(dir, entry.name);
|
|
158
271
|
const relativePath = path.relative(process.cwd(), fullPath).replace(/\\/g, '/');
|
|
@@ -164,10 +277,10 @@ export async function generateDirectoryTree(dir, prefix = '', allFiles, depth =
|
|
|
164
277
|
for (let i = 0; i < validEntries.length; i++) {
|
|
165
278
|
const { entry, fullPath, relativePath } = validEntries[i];
|
|
166
279
|
const isLast = i === validEntries.length - 1;
|
|
167
|
-
|
|
280
|
+
|
|
168
281
|
const connector = isLast ? '└── ' : '├── ';
|
|
169
282
|
const nextPrefix = prefix + (isLast ? ' ' : '│ ');
|
|
170
|
-
|
|
283
|
+
|
|
171
284
|
if (entry.isDirectory()) {
|
|
172
285
|
tree += `${prefix}${connector}${entry.name}/\n`;
|
|
173
286
|
tree += await generateDirectoryTree(fullPath, nextPrefix, allFiles, depth + 1, maxDepth, config);
|
|
@@ -175,7 +288,14 @@ export async function generateDirectoryTree(dir, prefix = '', allFiles, depth =
|
|
|
175
288
|
tree += `${prefix}${connector}${entry.name}\n`;
|
|
176
289
|
}
|
|
177
290
|
}
|
|
178
|
-
|
|
291
|
+
|
|
292
|
+
// Add .eck placeholder at root level
|
|
293
|
+
if (depth === 0) {
|
|
294
|
+
const isLast = validEntries.length === 0;
|
|
295
|
+
const connector = isLast ? '└── ' : '├── ';
|
|
296
|
+
tree += `${prefix}${connector}.eck/\n`;
|
|
297
|
+
}
|
|
298
|
+
|
|
179
299
|
return tree;
|
|
180
300
|
} catch (error) {
|
|
181
301
|
console.warn(`⚠️ Warning: Could not read directory: ${dir}`);
|
|
@@ -295,14 +415,6 @@ export function generateTimestamp() {
|
|
|
295
415
|
return `${YYYY}-${MM}-${DD}_${hh}-${mm}-${ss}`;
|
|
296
416
|
}
|
|
297
417
|
|
|
298
|
-
export function sanitizeForFilename(text) {
|
|
299
|
-
return text
|
|
300
|
-
.toLowerCase()
|
|
301
|
-
.replace(/\s+/g, '-') // Replace spaces with hyphens
|
|
302
|
-
.replace(/[^a-z0-9-]/g, '') // Remove invalid characters
|
|
303
|
-
.substring(0, 50); // Truncate to a reasonable length
|
|
304
|
-
}
|
|
305
|
-
|
|
306
418
|
/**
|
|
307
419
|
* Displays project detection information in a user-friendly format
|
|
308
420
|
* @param {object} detection - Project detection result
|
|
@@ -734,164 +846,89 @@ export async function initializeEckManifest(projectPath) {
|
|
|
734
846
|
const staticFactsJson = JSON.stringify(staticFacts, null, 2);
|
|
735
847
|
// --- END NEW LOGIC ---
|
|
736
848
|
|
|
737
|
-
//
|
|
738
|
-
const
|
|
739
|
-
{
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
849
|
+
// 3. Define smarter templates and prompts with "STUB NOTICES"
|
|
850
|
+
const templateConfigs = {
|
|
851
|
+
'ENVIRONMENT.md': {
|
|
852
|
+
prompt: `Generate raw YAML for .eck/ENVIRONMENT.md based on these project facts:\n${staticFactsJson}\nInclude project_type, runtime, and agent_id: local_dev. NO markdown fences.`,
|
|
853
|
+
fallback: `project_type: ${staticFacts.type || 'unknown'}
|
|
854
|
+
# [STUB: ENVIRONMENT.MD]
|
|
855
|
+
# ARCHITECT: Task Coder to verify environment variables.
|
|
856
|
+
# CODER: Scan project for .env files or config files and fill this. Remove this stub notice.
|
|
857
|
+
agent_id: local_dev
|
|
858
|
+
`
|
|
743
859
|
},
|
|
744
|
-
{
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
860
|
+
'CONTEXT.md': {
|
|
861
|
+
prompt: `Analyze these project files and dependencies:\n${staticFactsJson}\nGenerate a professional # Project Overview in Markdown. Describe the actual architecture and purpose of this specific project. Be technical and concise. Start with '# Project Overview'.`,
|
|
862
|
+
fallback: `# [STUB: CONTEXT.MD]
|
|
863
|
+
|
|
864
|
+
## 🚨 ATTENTION ARCHITECT & CODER
|
|
865
|
+
**ARCHITECT:** This file is EMPTY. You MUST prioritize a task for the Coder to analyze the source code and define the Project Overview, Architecture, and Key Technologies.
|
|
866
|
+
**CODER:** Use your tools to read the code, write the real content, and DELETE this entire stub header.
|
|
748
867
|
|
|
749
868
|
## Description
|
|
750
|
-
|
|
869
|
+
(Placeholder: A ${staticFacts.type || 'project'} project)
|
|
751
870
|
|
|
752
871
|
## Architecture
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
## Key Technologies
|
|
756
|
-
- Technology 1
|
|
757
|
-
- Technology 2
|
|
758
|
-
- Technology 3
|
|
759
|
-
|
|
760
|
-
## Important Notes
|
|
761
|
-
Any crucial information that developers should know when working on this project.
|
|
762
|
-
`
|
|
872
|
+
(Placeholder: TBD)`
|
|
763
873
|
},
|
|
764
|
-
{
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
${staticFactsJson}
|
|
768
|
-
|
|
769
|
-
Generate the raw Markdown content ONLY for a .eck/OPERATIONS.md file. DO NOT add conversational text. Your response MUST start *directly* with the '# Common Operations' heading. List commands for ## Development Setup, ## Running the Project, and ## Testing.`,
|
|
770
|
-
content: `# Common Operations
|
|
771
|
-
|
|
772
|
-
## Development Setup
|
|
773
|
-
\`\`\`bash
|
|
774
|
-
# Setup commands
|
|
775
|
-
npm install
|
|
776
|
-
# or yarn install
|
|
777
|
-
\`\`\`
|
|
778
|
-
|
|
779
|
-
## Running the Project
|
|
780
|
-
\`\`\`bash
|
|
781
|
-
# Development mode
|
|
782
|
-
npm run dev
|
|
783
|
-
|
|
784
|
-
# Production build
|
|
785
|
-
npm run build
|
|
786
|
-
\`\`\`
|
|
787
|
-
|
|
788
|
-
## Testing
|
|
789
|
-
\`\`\`bash
|
|
790
|
-
# Run tests
|
|
791
|
-
npm test
|
|
792
|
-
|
|
793
|
-
# Run tests in watch mode
|
|
794
|
-
npm run test:watch
|
|
795
|
-
\`\`\`
|
|
796
|
-
|
|
797
|
-
## Deployment
|
|
798
|
-
\`\`\`bash
|
|
799
|
-
# Deployment commands
|
|
800
|
-
npm run deploy
|
|
801
|
-
\`\`\`
|
|
802
|
-
|
|
803
|
-
## Troubleshooting
|
|
804
|
-
Common issues and their solutions.
|
|
805
|
-
`
|
|
806
|
-
},
|
|
807
|
-
{
|
|
808
|
-
name: 'JOURNAL.md',
|
|
809
|
-
content: `# Development Journal
|
|
810
|
-
|
|
811
|
-
## Recent Changes
|
|
812
|
-
Track significant changes, decisions, and progress here.
|
|
874
|
+
'OPERATIONS.md': {
|
|
875
|
+
prompt: `Look at the dependencies and files:\n${staticFactsJson}\nGenerate a Markdown guide for common operations (Setup, Run, Test, Build) using the correct commands for this tech stack. Start with '# Common Operations'.`,
|
|
876
|
+
fallback: `# [STUB: OPERATIONS.MD]
|
|
813
877
|
|
|
814
|
-
|
|
878
|
+
## 🚨 ATTENTION
|
|
879
|
+
**CODER:** Run \`npm run\`, check Makefile, or build files to identify REAL commands for Setup, Running, and Testing. Replace this stub with actual commands. Remove this notice.
|
|
815
880
|
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
- Added basic structure
|
|
819
|
-
`
|
|
881
|
+
## Setup
|
|
882
|
+
${staticFacts.type === 'nodejs' ? 'npm install' : 'TBD'}`
|
|
820
883
|
},
|
|
821
|
-
{
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
content: `# Project Roadmap
|
|
825
|
-
|
|
826
|
-
## Current Sprint/Phase
|
|
827
|
-
- [ ] Feature 1
|
|
828
|
-
- [ ] Feature 2
|
|
829
|
-
- [ ] Bug fix 1
|
|
830
|
-
|
|
831
|
-
## Next Phase
|
|
832
|
-
- [ ] Future feature 1
|
|
833
|
-
- [ ] Future feature 2
|
|
834
|
-
|
|
835
|
-
## Long-term Goals
|
|
836
|
-
- [ ] Major milestone 1
|
|
837
|
-
- [ ] Major milestone 2
|
|
838
|
-
|
|
839
|
-
## Completed
|
|
840
|
-
- [x] Project initialization
|
|
841
|
-
`
|
|
842
|
-
},
|
|
843
|
-
{
|
|
844
|
-
name: 'TECH_DEBT.md',
|
|
845
|
-
prompt: `Generate the raw Markdown content ONLY for a .eck/TECH_DEBT.md file. DO NOT add conversational text. Start *directly* with '# Technical Debt'. Propose 1-2 *common* placeholder items for ## Code Quality Issues and ## Refactoring Opportunities.`,
|
|
846
|
-
content: `# Technical Debt
|
|
847
|
-
|
|
848
|
-
## Current Technical Debt
|
|
849
|
-
Track technical debt, refactoring needs, and code quality issues.
|
|
884
|
+
'ROADMAP.md': {
|
|
885
|
+
prompt: `Based on the project type (${staticFacts.type}), propose a 3-step roadmap. Start with '# Project Roadmap'.`,
|
|
886
|
+
fallback: `# [STUB: ROADMAP.MD]
|
|
850
887
|
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
- Opportunity 1: Description and impact
|
|
857
|
-
- Opportunity 2: Description and impact
|
|
858
|
-
|
|
859
|
-
### Performance Issues
|
|
860
|
-
- Performance issue 1: Description and impact
|
|
861
|
-
- Performance issue 2: Description and impact
|
|
888
|
+
**ARCHITECT:** Set a real roadmap based on user goals. **CODER:** Remove this stub marker once a real goal is added.`
|
|
889
|
+
},
|
|
890
|
+
'TECH_DEBT.md': {
|
|
891
|
+
prompt: `Given this is a ${staticFacts.type} project, list 2-3 common technical debt items. Start with '# Technical Debt'.`,
|
|
892
|
+
fallback: `# [STUB: TECH_DEBT.MD]
|
|
862
893
|
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
894
|
+
**CODER:** Scan for TODOs/FIXMEs or structural issues and list them here. Remove this stub marker.`
|
|
895
|
+
},
|
|
896
|
+
'JOURNAL.md': {
|
|
897
|
+
fallback: `# Development Journal
|
|
866
898
|
|
|
867
|
-
##
|
|
868
|
-
|
|
869
|
-
|
|
899
|
+
## Recent Changes
|
|
900
|
+
---
|
|
901
|
+
type: feat
|
|
902
|
+
scope: project
|
|
903
|
+
summary: Initial manifest generated (PENDING REVIEW)
|
|
904
|
+
date: ${new Date().toISOString().split('T')[0]}
|
|
905
|
+
---
|
|
906
|
+
- NOTICE: Some .eck files are STUBS. They need manual or AI-assisted verification.`
|
|
870
907
|
}
|
|
871
|
-
|
|
908
|
+
};
|
|
872
909
|
|
|
873
910
|
// Create each template file (only if it doesn't exist)
|
|
874
|
-
for (const
|
|
875
|
-
const filePath = path.join(eckDir,
|
|
876
|
-
|
|
911
|
+
for (const [fileName, config] of Object.entries(templateConfigs)) {
|
|
912
|
+
const filePath = path.join(eckDir, fileName);
|
|
913
|
+
|
|
877
914
|
// Skip if file already exists
|
|
878
915
|
try {
|
|
879
916
|
await fs.stat(filePath);
|
|
880
|
-
console.log(` ✅ ${
|
|
917
|
+
console.log(` ✅ ${fileName} already exists, skipping`);
|
|
881
918
|
continue;
|
|
882
919
|
} catch (error) {
|
|
883
920
|
// File doesn't exist, create it
|
|
884
921
|
}
|
|
885
|
-
|
|
886
|
-
let fileContent =
|
|
922
|
+
|
|
923
|
+
let fileContent = config.fallback; // Start with stub fallback
|
|
887
924
|
let generatedByAI = false;
|
|
888
925
|
|
|
889
926
|
// For files with a prompt, try to dynamically generate (only if enabled)
|
|
890
|
-
if (
|
|
927
|
+
if (config.prompt && aiGenerationEnabled) {
|
|
891
928
|
try {
|
|
892
|
-
console.log(` 🧠 Attempting to auto-generate ${
|
|
893
|
-
const aiResponseObject = await askClaude(
|
|
894
|
-
const rawText = aiResponseObject.result;
|
|
929
|
+
console.log(` 🧠 Attempting to auto-generate ${fileName} via Claude...`);
|
|
930
|
+
const aiResponseObject = await askClaude(config.prompt);
|
|
931
|
+
const rawText = aiResponseObject.result;
|
|
895
932
|
|
|
896
933
|
if (!rawText || typeof rawText.replace !== 'function') {
|
|
897
934
|
throw new Error(`AI returned invalid content type: ${typeof rawText}`);
|
|
@@ -903,19 +940,19 @@ Track technical debt, refactoring needs, and code quality issues.
|
|
|
903
940
|
if (cleanedResponse) {
|
|
904
941
|
fileContent = cleanedResponse;
|
|
905
942
|
generatedByAI = true;
|
|
906
|
-
console.log(` ✨ AI successfully generated ${
|
|
943
|
+
console.log(` ✨ AI successfully generated ${fileName}`);
|
|
907
944
|
} else {
|
|
908
945
|
throw new Error('AI returned empty content.');
|
|
909
946
|
}
|
|
910
947
|
} catch (error) {
|
|
911
|
-
console.warn(` ⚠️ AI generation failed for ${
|
|
912
|
-
// fileContent is already set to the fallback
|
|
948
|
+
console.warn(` ⚠️ AI generation failed for ${fileName}: ${error.message}. Using stub template.`);
|
|
949
|
+
// fileContent is already set to the stub fallback
|
|
913
950
|
}
|
|
914
951
|
}
|
|
915
|
-
|
|
952
|
+
|
|
916
953
|
await fs.writeFile(filePath, fileContent);
|
|
917
954
|
if (!generatedByAI) {
|
|
918
|
-
console.log(` ✅ Created ${
|
|
955
|
+
console.log(` ✅ Created ${fileName} (stub template)`);
|
|
919
956
|
}
|
|
920
957
|
}
|
|
921
958
|
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
import fs from 'fs/promises';
|
|
2
|
+
import path from 'path';
|
|
3
|
+
import { execa } from 'execa';
|
|
4
|
+
|
|
5
|
+
const ANCHOR_FILE = '.eck/anchor';
|
|
6
|
+
|
|
7
|
+
export async function saveGitAnchor(repoPath) {
|
|
8
|
+
try {
|
|
9
|
+
const { stdout } = await execa('git', ['rev-parse', 'HEAD'], { cwd: repoPath });
|
|
10
|
+
const anchorPath = path.join(repoPath, ANCHOR_FILE);
|
|
11
|
+
await fs.mkdir(path.dirname(anchorPath), { recursive: true });
|
|
12
|
+
await fs.writeFile(anchorPath, stdout.trim());
|
|
13
|
+
// console.log(`⚓ Git anchor saved: ${stdout.trim().substring(0, 7)}`);
|
|
14
|
+
} catch (e) {
|
|
15
|
+
// Ignore if not a git repo
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export async function getGitAnchor(repoPath) {
|
|
20
|
+
try {
|
|
21
|
+
const anchorPath = path.join(repoPath, ANCHOR_FILE);
|
|
22
|
+
return await fs.readFile(anchorPath, 'utf-8');
|
|
23
|
+
} catch (e) {
|
|
24
|
+
return null;
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
export async function getChangedFiles(repoPath, anchorHash) {
|
|
29
|
+
try {
|
|
30
|
+
const { stdout } = await execa('git', ['diff', '--name-only', anchorHash, 'HEAD'], { cwd: repoPath });
|
|
31
|
+
return stdout.split('\n').filter(Boolean);
|
|
32
|
+
} catch (e) {
|
|
33
|
+
throw new Error(`Failed to get git diff: ${e.message}`);
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
export async function getGitDiffOutput(repoPath, anchorHash) {
|
|
38
|
+
try {
|
|
39
|
+
const { stdout } = await execa('git', ['diff', anchorHash, 'HEAD'], { cwd: repoPath });
|
|
40
|
+
return stdout;
|
|
41
|
+
} catch (e) {
|
|
42
|
+
return '';
|
|
43
|
+
}
|
|
44
|
+
}
|
|
@@ -8,7 +8,8 @@ import { fileURLToPath } from 'url';
|
|
|
8
8
|
|
|
9
9
|
const __filename = fileURLToPath(import.meta.url);
|
|
10
10
|
const __dirname = path.dirname(__filename);
|
|
11
|
-
const
|
|
11
|
+
const ECK_DIR = path.join(__dirname, '..', '..', '.eck');
|
|
12
|
+
const ESTIMATION_DATA_FILE = path.join(ECK_DIR, 'token-training.json');
|
|
12
13
|
|
|
13
14
|
/**
|
|
14
15
|
* Default coefficients for different project types (bytes to tokens ratio)
|
|
@@ -49,6 +50,8 @@ async function loadTrainingData() {
|
|
|
49
50
|
* Save training data to file
|
|
50
51
|
*/
|
|
51
52
|
async function saveTrainingData(data) {
|
|
53
|
+
// Ensure .eck directory exists
|
|
54
|
+
await fs.mkdir(ECK_DIR, { recursive: true });
|
|
52
55
|
await fs.writeFile(ESTIMATION_DATA_FILE, JSON.stringify(data, null, 2));
|
|
53
56
|
}
|
|
54
57
|
|
|
@@ -1,29 +0,0 @@
|
|
|
1
|
-
import { ask } from '../../services/gptService.js';
|
|
2
|
-
|
|
3
|
-
/**
|
|
4
|
-
* CLI entry point for ask-gpt command.
|
|
5
|
-
* @param {string} payload - JSON payload string.
|
|
6
|
-
* @param {{ verbose?: boolean, model?: string, reasoning?: string }} options - CLI options.
|
|
7
|
-
*/
|
|
8
|
-
export async function askGpt(payload, options = {}) {
|
|
9
|
-
const verbose = Boolean(options.verbose);
|
|
10
|
-
const model = options.model || 'gpt-5-codex';
|
|
11
|
-
const reasoning = options.reasoning || 'high';
|
|
12
|
-
|
|
13
|
-
if (!payload) {
|
|
14
|
-
console.error('ask-gpt requires a JSON payload argument.');
|
|
15
|
-
process.exitCode = 1;
|
|
16
|
-
return;
|
|
17
|
-
}
|
|
18
|
-
|
|
19
|
-
try {
|
|
20
|
-
const result = await ask(payload, { verbose, model, reasoning });
|
|
21
|
-
console.log(JSON.stringify(result, null, 2));
|
|
22
|
-
} catch (error) {
|
|
23
|
-
console.error(error.message);
|
|
24
|
-
if (verbose && error?.stack) {
|
|
25
|
-
console.error(error.stack);
|
|
26
|
-
}
|
|
27
|
-
process.exitCode = 1;
|
|
28
|
-
}
|
|
29
|
-
}
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
import ora from 'ora';
|
|
2
|
-
import { execa } from 'execa';
|
|
3
|
-
|
|
4
|
-
/**
|
|
5
|
-
* Initiates the interactive login flow by spawning 'codex login'.
|
|
6
|
-
* This will open a browser and wait for the user to complete authentication.
|
|
7
|
-
* @returns {Promise<void>}
|
|
8
|
-
*/
|
|
9
|
-
export async function initiateLogin() {
|
|
10
|
-
const spinner = ora('Authentication required. Please follow the browser instructions.').start();
|
|
11
|
-
try {
|
|
12
|
-
// Run `codex login` interactively, inheriting stdio to show user instructions.
|
|
13
|
-
await execa('codex', ['login'], { stdio: 'inherit' });
|
|
14
|
-
spinner.succeed('Login successful. Retrying original command...');
|
|
15
|
-
} catch (e) {
|
|
16
|
-
spinner.fail('Login process failed or was cancelled.');
|
|
17
|
-
// Re-throw to notify p-retry that the attempt failed.
|
|
18
|
-
throw new Error(`Login failed: ${e.message}`);
|
|
19
|
-
}
|
|
20
|
-
}
|
|
@@ -1,33 +0,0 @@
|
|
|
1
|
-
import { ask as askGpt } from './gptService.js';
|
|
2
|
-
import { executePrompt as askClaude } from './claudeCliService.js';
|
|
3
|
-
|
|
4
|
-
/**
|
|
5
|
-
* Dispatches an analytical task to the most efficient AI model with a fallback.
|
|
6
|
-
* Priority 1: Codex (GPT) with low reasoning for speed and cost.
|
|
7
|
-
* Priority 2: Claude as a reliable fallback.
|
|
8
|
-
* @param {string} prompt The JSON payload or prompt string for the task.
|
|
9
|
-
* @returns {Promise<object>} The result from the successful AI agent.
|
|
10
|
-
*/
|
|
11
|
-
export async function dispatchAnalysisTask(prompt) {
|
|
12
|
-
try {
|
|
13
|
-
console.log('🧠 Dispatcher: Attempting analysis with Codex (low reasoning)...');
|
|
14
|
-
const gptOptions = {
|
|
15
|
-
model: 'gpt-5-codex',
|
|
16
|
-
reasoning: 'low'
|
|
17
|
-
};
|
|
18
|
-
// The 'ask' function expects payload as first arg, and options as second.
|
|
19
|
-
// Since prompt is a string here, we wrap it in an object for consistency if needed,
|
|
20
|
-
// but for simple prompts it can often be passed directly.
|
|
21
|
-
const payload = (typeof prompt === 'string' && prompt.startsWith('{')) ? prompt : JSON.stringify({ objective: prompt });
|
|
22
|
-
return await askGpt(payload, { verbose: false, ...gptOptions });
|
|
23
|
-
} catch (gptError) {
|
|
24
|
-
console.warn(`⚠️ Codex (low reasoning) failed: ${gptError.message}`);
|
|
25
|
-
console.log('🔄 Failing over to Claude for analysis...');
|
|
26
|
-
try {
|
|
27
|
-
return await askClaude(prompt);
|
|
28
|
-
} catch (claudeError) {
|
|
29
|
-
console.error(`❌ Critical Failure: Both Codex and Claude failed for analysis task.`);
|
|
30
|
-
throw new Error(`Primary (Codex) Error: ${gptError.message}\nFallback (Claude) Error: ${claudeError.message}`);
|
|
31
|
-
}
|
|
32
|
-
}
|
|
33
|
-
}
|