@xelth/eck-snapshot 5.9.0 → 6.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of @xelth/eck-snapshot might be problematic. Click here for more details.
- package/README.md +46 -165
- package/package.json +2 -2
- package/scripts/mcp-eck-core.js +61 -13
- package/setup.json +109 -64
- package/src/cli/cli.js +2 -0
- package/src/cli/commands/updateSnapshot.js +128 -76
- package/src/templates/opencode/coder.template.md +25 -16
- package/src/templates/opencode/junior-architect.template.md +28 -15
- package/src/utils/aiHeader.js +5 -4
- package/src/utils/claudeMdGenerator.js +84 -77
- package/src/utils/fileUtils.js +154 -89
- package/src/utils/gitUtils.js +12 -8
- package/src/utils/opencodeAgentsGenerator.js +8 -2
- package/src/utils/tokenEstimator.js +50 -46
package/src/utils/fileUtils.js
CHANGED
|
@@ -11,56 +11,86 @@ import { minimatch } from 'minimatch';
|
|
|
11
11
|
/**
|
|
12
12
|
* Scanner for detecting and redacting secrets (API keys, tokens)
|
|
13
13
|
*/
|
|
14
|
-
export const SecretScanner = {
|
|
15
|
-
patterns: [
|
|
16
|
-
// Service-specific patterns
|
|
17
|
-
{ name: 'GitHub Token', regex: /gh[pous]_[a-zA-Z0-9]{36}/g },
|
|
18
|
-
{ name: 'AWS Access Key', regex: /(?:AKIA|ASIA)[0-9A-Z]{16}/g },
|
|
19
|
-
{ name: 'OpenAI API Key', regex: /sk-[a-zA-Z0-9]{32,}/g },
|
|
20
|
-
{ name: 'Stripe Secret Key', regex: /sk_live_[0-9a-zA-Z]{24}/g },
|
|
21
|
-
{ name: 'Google API Key', regex: /AIza[0-9A-Za-z\-_]{35}/g },
|
|
22
|
-
{ name: 'Slack Token', regex: /xox[baprs]-[0-9a-zA-Z\-]{10,}/g },
|
|
23
|
-
{ name: 'NPM Token', regex: /npm_[a-zA-Z0-9]{36}/g },
|
|
24
|
-
{ name: 'Private Key', regex: /-----BEGIN (?:RSA |EC |OPENSSH )?PRIVATE KEY-----/g },
|
|
25
|
-
// Generic high-entropy patterns near sensitive keywords
|
|
26
|
-
{
|
|
27
|
-
name: 'Generic Secret',
|
|
28
|
-
regex: /(?:api[_-]?key|secret|password|token|auth|pwd|credential)\s*[:=]\s*["']([a-zA-Z0-9\-_.]{16,})["']/gi
|
|
29
|
-
}
|
|
30
|
-
],
|
|
31
|
-
|
|
32
|
-
/**
|
|
33
|
-
*
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
14
|
+
export const SecretScanner = {
|
|
15
|
+
patterns: [
|
|
16
|
+
// Service-specific patterns
|
|
17
|
+
{ name: 'GitHub Token', regex: /gh[pous]_[a-zA-Z0-9]{36}/g },
|
|
18
|
+
{ name: 'AWS Access Key', regex: /(?:AKIA|ASIA)[0-9A-Z]{16}/g },
|
|
19
|
+
{ name: 'OpenAI API Key', regex: /sk-[a-zA-Z0-9]{32,}/g },
|
|
20
|
+
{ name: 'Stripe Secret Key', regex: /sk_live_[0-9a-zA-Z]{24}/g },
|
|
21
|
+
{ name: 'Google API Key', regex: /AIza[0-9A-Za-z\-_]{35}/g },
|
|
22
|
+
{ name: 'Slack Token', regex: /xox[baprs]-[0-9a-zA-Z\-]{10,}/g },
|
|
23
|
+
{ name: 'NPM Token', regex: /npm_[a-zA-Z0-9]{36}/g },
|
|
24
|
+
{ name: 'Private Key', regex: /-----BEGIN (?:RSA |EC |OPENSSH )?PRIVATE KEY-----/g },
|
|
25
|
+
// Generic high-entropy patterns near sensitive keywords
|
|
26
|
+
{
|
|
27
|
+
name: 'Generic Secret',
|
|
28
|
+
regex: /(?:api[_-]?key|secret|password|token|auth|pwd|credential)\s*[:=]\s*["']([a-zA-Z0-9\-_.]{16,})["']/gi
|
|
29
|
+
}
|
|
30
|
+
],
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Calculates Shannon Entropy of a string
|
|
34
|
+
*/
|
|
35
|
+
calculateEntropy(str) {
|
|
36
|
+
const len = str.length;
|
|
37
|
+
const frequencies = Array.from(str).reduce((freq, c) => {
|
|
38
|
+
freq[c] = (freq[c] || 0) + 1;
|
|
39
|
+
return freq;
|
|
40
|
+
}, {});
|
|
41
|
+
return Object.values(frequencies).reduce((sum, f) => {
|
|
42
|
+
const p = f / len;
|
|
43
|
+
return sum - (p * Math.log2(p));
|
|
44
|
+
}, 0);
|
|
45
|
+
},
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
* Scans content and replaces detected secrets with a placeholder
|
|
49
|
+
* @param {string} content - File content to scan
|
|
50
|
+
* @param {string} filePath - Path for logging context
|
|
51
|
+
* @returns {{content: string, found: string[]}} Redacted content and list of found secret types
|
|
52
|
+
*/
|
|
53
|
+
redact(content, filePath) {
|
|
54
|
+
let redactedContent = content;
|
|
55
|
+
const foundSecrets = [];
|
|
56
|
+
|
|
57
|
+
for (const pattern of this.patterns) {
|
|
58
|
+
// Reset regex lastIndex for global patterns
|
|
59
|
+
pattern.regex.lastIndex = 0;
|
|
60
|
+
|
|
61
|
+
const matches = [...content.matchAll(pattern.regex)];
|
|
62
|
+
if (matches.length > 0) {
|
|
63
|
+
for (const match of matches) {
|
|
64
|
+
// For generic pattern, use captured group; for specific patterns, use full match
|
|
65
|
+
const secretValue = match[1] || match[0];
|
|
66
|
+
const placeholder = `[REDACTED_${pattern.name.replace(/\s+/g, '_').toUpperCase()}]`;
|
|
67
|
+
redactedContent = redactedContent.replace(secretValue, placeholder);
|
|
68
|
+
foundSecrets.push(pattern.name);
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
// Second pass: Shannon Entropy check for arbitrary hardcoded secrets
|
|
74
|
+
// Look for long strings assigned to variable names that might be keys
|
|
75
|
+
const entropyRegex = /(?:const|let|var|set|export|define)\s+([A-Za-z0-9_]*(?:KEY|TOKEN|SECRET|PASSWORD)[A-Za-z0-9_]*)\s*=\s*["']([a-zA-Z0-9+/=_-]{20,128})["']/gi;
|
|
76
|
+
const entropyMatches = [...redactedContent.matchAll(entropyRegex)];
|
|
77
|
+
|
|
78
|
+
for (const match of entropyMatches) {
|
|
79
|
+
const secretValue = match[2];
|
|
80
|
+
// Check entropy - random base64 usually has entropy > 4.5
|
|
81
|
+
if (this.calculateEntropy(secretValue) > 4.5 && !secretValue.includes('REDACTED')) {
|
|
82
|
+
const placeholder = `[REDACTED_HIGH_ENTROPY_SECRET]`;
|
|
83
|
+
redactedContent = redactedContent.replace(secretValue, placeholder);
|
|
84
|
+
foundSecrets.push('High Entropy Secret');
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
return {
|
|
89
|
+
content: redactedContent,
|
|
90
|
+
found: [...new Set(foundSecrets)]
|
|
91
|
+
};
|
|
92
|
+
}
|
|
93
|
+
};
|
|
64
94
|
|
|
65
95
|
export function parseSize(sizeStr) {
|
|
66
96
|
const units = { B: 1, KB: 1024, MB: 1024 ** 2, GB: 1024 ** 3 };
|
|
@@ -838,15 +868,16 @@ export async function applyProfileFilter(allFiles, profileString, repoPath) {
|
|
|
838
868
|
export async function initializeEckManifest(projectPath) {
|
|
839
869
|
const eckDir = path.join(projectPath, '.eck');
|
|
840
870
|
|
|
841
|
-
// Load setup configuration to check AI generation settings
|
|
842
|
-
let aiGenerationEnabled = false;
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
871
|
+
// Load setup configuration to check AI generation settings and project context
|
|
872
|
+
let aiGenerationEnabled = false;
|
|
873
|
+
let setupConfig = null;
|
|
874
|
+
try {
|
|
875
|
+
setupConfig = await loadSetupConfig();
|
|
876
|
+
aiGenerationEnabled = setupConfig?.aiInstructions?.manifestInitialization?.aiGenerationEnabled ?? false;
|
|
877
|
+
} catch (error) {
|
|
878
|
+
// If setup config fails to load, default to disabled
|
|
879
|
+
console.warn(` ⚠️ Could not load setup config: ${error.message}. AI generation disabled.`);
|
|
880
|
+
}
|
|
850
881
|
|
|
851
882
|
try {
|
|
852
883
|
// Check if .eck directory already exists and has all required files
|
|
@@ -894,33 +925,38 @@ export async function initializeEckManifest(projectPath) {
|
|
|
894
925
|
delete staticFacts.allDetections;
|
|
895
926
|
}
|
|
896
927
|
|
|
897
|
-
const staticFactsJson = JSON.stringify(staticFacts, null, 2);
|
|
898
|
-
// --- END NEW LOGIC ---
|
|
899
|
-
|
|
900
|
-
//
|
|
928
|
+
const staticFactsJson = JSON.stringify(staticFacts, null, 2);
|
|
929
|
+
// --- END NEW LOGIC ---
|
|
930
|
+
|
|
931
|
+
// Extract Context from setup.json if available
|
|
932
|
+
const projName = setupConfig?.projectContext?.name || staticFacts.type || 'project';
|
|
933
|
+
const projType = setupConfig?.projectContext?.type || staticFacts.type || 'unknown';
|
|
934
|
+
const projStack = setupConfig?.projectContext?.architecture?.stack?.join(', ') || 'TBD';
|
|
935
|
+
const projAi = setupConfig?.projectContext?.architecture?.aiIntegration || 'None';
|
|
936
|
+
|
|
937
|
+
// 3. Define smarter templates and prompts using setup.json context
|
|
901
938
|
const templateConfigs = {
|
|
902
|
-
'ENVIRONMENT.md': {
|
|
903
|
-
prompt: `Generate raw YAML for .eck/ENVIRONMENT.md based on these project facts:\n${staticFactsJson}\nInclude project_type, runtime, and agent_id: local_dev. NO markdown fences.`,
|
|
904
|
-
fallback: `project_type: ${
|
|
905
|
-
|
|
906
|
-
#
|
|
907
|
-
|
|
908
|
-
agent_id: local_dev
|
|
909
|
-
`
|
|
939
|
+
'ENVIRONMENT.md': {
|
|
940
|
+
prompt: `Generate raw YAML for .eck/ENVIRONMENT.md based on these project facts:\n${staticFactsJson}\nInclude project_type, runtime, and agent_id: local_dev. NO markdown fences.`,
|
|
941
|
+
fallback: `project_type: ${projType}
|
|
942
|
+
agent_id: local_dev
|
|
943
|
+
# Generated from setup.json
|
|
944
|
+
`
|
|
910
945
|
},
|
|
911
|
-
'CONTEXT.md': {
|
|
912
|
-
prompt: `Analyze these project files and dependencies:\n${staticFactsJson}\nGenerate a professional # Project Overview in Markdown. Describe the actual architecture and purpose of this specific project. Be technical and concise. Start with '# Project Overview'.`,
|
|
913
|
-
fallback: `#
|
|
914
|
-
|
|
915
|
-
##
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
(
|
|
946
|
+
'CONTEXT.md': {
|
|
947
|
+
prompt: `Analyze these project files and dependencies:\n${staticFactsJson}\nGenerate a professional # Project Overview in Markdown. Describe the actual architecture and purpose of this specific project. Be technical and concise. Start with '# Project Overview'.`,
|
|
948
|
+
fallback: `# Project Overview
|
|
949
|
+
|
|
950
|
+
## Description
|
|
951
|
+
Project name: ${projName}
|
|
952
|
+
Type: ${projType}
|
|
953
|
+
AI Integration: ${projAi}
|
|
954
|
+
|
|
955
|
+
## Architecture
|
|
956
|
+
Stack: ${projStack}
|
|
957
|
+
|
|
958
|
+
*(Auto-generated from setup.json)*
|
|
959
|
+
`
|
|
924
960
|
},
|
|
925
961
|
'OPERATIONS.md': {
|
|
926
962
|
prompt: `Look at the dependencies and files:\n${staticFactsJson}\nGenerate a Markdown guide for common operations (Setup, Run, Test, Build) using the correct commands for this tech stack. Start with '# Common Operations'.`,
|
|
@@ -938,11 +974,40 @@ ${staticFacts.type === 'nodejs' ? 'npm install' : 'TBD'}`
|
|
|
938
974
|
|
|
939
975
|
**ARCHITECT:** Set a real roadmap based on user goals. **CODER:** Remove this stub marker once a real goal is added.`
|
|
940
976
|
},
|
|
941
|
-
'TECH_DEBT.md': {
|
|
942
|
-
prompt: `Given this is a ${staticFacts.type} project, list 2-3 common technical debt items. Start with '# Technical Debt'.`,
|
|
943
|
-
fallback: `# [STUB: TECH_DEBT.MD]
|
|
944
|
-
|
|
945
|
-
**CODER:** Scan for TODOs/FIXMEs or structural issues and list them here. Remove this stub marker.`
|
|
977
|
+
'TECH_DEBT.md': {
|
|
978
|
+
prompt: `Given this is a ${staticFacts.type} project, list 2-3 common technical debt items. Start with '# Technical Debt'.`,
|
|
979
|
+
fallback: `# [STUB: TECH_DEBT.MD]
|
|
980
|
+
|
|
981
|
+
**CODER:** Scan for TODOs/FIXMEs or structural issues and list them here. Remove this stub marker.`
|
|
982
|
+
},
|
|
983
|
+
'DEPLOY_CHECKLIST.md': {
|
|
984
|
+
prompt: `Based on the project type (${staticFacts.type}), generate a pre-deployment checklist. Start with '# Deployment Checklist'.`,
|
|
985
|
+
fallback: `# [STUB: DEPLOY_CHECKLIST.MD]
|
|
986
|
+
|
|
987
|
+
## 🚨 ATTENTION CODER
|
|
988
|
+
Verify required build steps before deployment.
|
|
989
|
+
|
|
990
|
+
## Pre-Deployment Checklist
|
|
991
|
+
- [ ] Verify all tests pass
|
|
992
|
+
- [ ] Build assets (e.g., npm run build)
|
|
993
|
+
- [ ] Check environment variables
|
|
994
|
+
|
|
995
|
+
**CODER:** Update this checklist with actual build/deploy steps for this project.`
|
|
996
|
+
},
|
|
997
|
+
'RUNTIME_STATE.md': {
|
|
998
|
+
prompt: `Based on the project type (${staticFacts.type}), generate a template for RUNTIME_STATE.md. Start with '# Runtime State'.`,
|
|
999
|
+
fallback: `# Runtime State
|
|
1000
|
+
|
|
1001
|
+
## 🚨 ATTENTION CODER
|
|
1002
|
+
Always check this file and verify the actual runtime state (ports, running processes, env variables) BEFORE writing code. Update this file if ports or access methods change.
|
|
1003
|
+
|
|
1004
|
+
- **Server:** e.g., running on port 3210
|
|
1005
|
+
- **Services:** e.g., Scraper running on port 3211
|
|
1006
|
+
- **Auth:** e.g., admin@local / password
|
|
1007
|
+
- **Verification Commands:**
|
|
1008
|
+
- \`pm2 ls\`
|
|
1009
|
+
- \`curl http://localhost:3210/health\`
|
|
1010
|
+
`
|
|
946
1011
|
},
|
|
947
1012
|
'JOURNAL.md': {
|
|
948
1013
|
fallback: `# Development Journal
|
package/src/utils/gitUtils.js
CHANGED
|
@@ -25,14 +25,18 @@ export async function getGitAnchor(repoPath) {
|
|
|
25
25
|
}
|
|
26
26
|
}
|
|
27
27
|
|
|
28
|
-
export async function getChangedFiles(repoPath, anchorHash) {
|
|
29
|
-
try {
|
|
30
|
-
const
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
28
|
+
export async function getChangedFiles(repoPath, anchorHash, includeWorkingTree = false) {
|
|
29
|
+
try {
|
|
30
|
+
const args = ['diff', '--name-only', anchorHash];
|
|
31
|
+
if (!includeWorkingTree) {
|
|
32
|
+
args.push('HEAD');
|
|
33
|
+
}
|
|
34
|
+
const { stdout } = await execa('git', args, { cwd: repoPath });
|
|
35
|
+
return stdout.split('\n').filter(Boolean);
|
|
36
|
+
} catch (e) {
|
|
37
|
+
throw new Error(`Failed to get git diff: ${e.message}`);
|
|
38
|
+
}
|
|
39
|
+
}
|
|
36
40
|
|
|
37
41
|
export async function getGitDiffOutput(repoPath, anchorHash, excludeFiles = []) {
|
|
38
42
|
try {
|
|
@@ -1,5 +1,9 @@
|
|
|
1
1
|
import fs from 'fs/promises';
|
|
2
2
|
import path from 'path';
|
|
3
|
+
import { fileURLToPath } from 'url';
|
|
4
|
+
|
|
5
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
6
|
+
const __dirname = path.dirname(__filename);
|
|
3
7
|
|
|
4
8
|
/**
|
|
5
9
|
* Generates AGENTS.md for OpenCode integration (GLM Z.AI ecosystem only)
|
|
@@ -30,11 +34,12 @@ export async function generateOpenCodeAgents(repoPath, mode, tree, confidentialF
|
|
|
30
34
|
color: '#10a37f'
|
|
31
35
|
};
|
|
32
36
|
|
|
33
|
-
const templatePath = path.join(
|
|
37
|
+
const templatePath = path.join(__dirname, '..', 'templates', 'opencode', 'junior-architect.template.md');
|
|
34
38
|
try {
|
|
35
39
|
let templateContent = await fs.readFile(templatePath, 'utf-8');
|
|
36
40
|
body = templateContent.replace('{{tree}}', tree);
|
|
37
41
|
} catch (error) {
|
|
42
|
+
console.warn(`⚠️ Could not load JAZ template from ${templatePath}: ${error.message}`);
|
|
38
43
|
body = `# 🧠 ROLE: Swarm Orchestrator (GLM-4.7)\n\nDirectory:\n\`\`\`\n${tree}\n\`\`\``;
|
|
39
44
|
}
|
|
40
45
|
} else {
|
|
@@ -48,10 +53,11 @@ export async function generateOpenCodeAgents(repoPath, mode, tree, confidentialF
|
|
|
48
53
|
color: '#44BA81'
|
|
49
54
|
};
|
|
50
55
|
|
|
51
|
-
const templatePath = path.join(
|
|
56
|
+
const templatePath = path.join(__dirname, '..', 'templates', 'opencode', 'coder.template.md');
|
|
52
57
|
try {
|
|
53
58
|
body = await fs.readFile(templatePath, 'utf-8');
|
|
54
59
|
} catch (error) {
|
|
60
|
+
console.warn(`⚠️ Could not load Coder template from ${templatePath}: ${error.message}`);
|
|
55
61
|
body = `# 🛠️ ROLE: Expert Developer`;
|
|
56
62
|
}
|
|
57
63
|
}
|
|
@@ -137,29 +137,29 @@ export async function addTrainingPoint(projectType, fileSizeInBytes, estimatedTo
|
|
|
137
137
|
}
|
|
138
138
|
}
|
|
139
139
|
|
|
140
|
-
/**
|
|
141
|
-
* Fetch global token weights from Telemetry Hub and merge them into local training data
|
|
142
|
-
*/
|
|
143
|
-
export async function syncTokenWeights() {
|
|
144
|
-
try {
|
|
145
|
-
console.log('Fetching global token weights from Telemetry Hub...');
|
|
146
|
-
const res = await fetch('https://xelth.com/T/tokens/weights');
|
|
147
|
-
if (!res.ok) throw new Error(res.statusText);
|
|
148
|
-
const data = await res.json();
|
|
149
|
-
|
|
150
|
-
if (data && data.coefficients && Object.keys(data.coefficients).length > 0) {
|
|
151
|
-
const localData = await loadTrainingData();
|
|
152
|
-
// Global coefficients override local ones
|
|
153
|
-
localData.coefficients = { ...localData.coefficients, ...data.coefficients };
|
|
154
|
-
await saveTrainingData(localData);
|
|
155
|
-
console.log('Global token weights synchronized successfully.');
|
|
156
|
-
} else {
|
|
157
|
-
console.log('No global weights available yet.');
|
|
158
|
-
}
|
|
159
|
-
} catch (e) {
|
|
160
|
-
console.log('Failed to sync token weights: ' + e.message);
|
|
161
|
-
}
|
|
162
|
-
}
|
|
140
|
+
/**
|
|
141
|
+
* Fetch global token weights from Telemetry Hub and merge them into local training data
|
|
142
|
+
*/
|
|
143
|
+
export async function syncTokenWeights(silent = false) {
|
|
144
|
+
try {
|
|
145
|
+
if (!silent) console.log('Fetching global token weights from Telemetry Hub...');
|
|
146
|
+
const res = await fetch('https://xelth.com/T/tokens/weights');
|
|
147
|
+
if (!res.ok) throw new Error(res.statusText);
|
|
148
|
+
const data = await res.json();
|
|
149
|
+
|
|
150
|
+
if (data && data.coefficients && Object.keys(data.coefficients).length > 0) {
|
|
151
|
+
const localData = await loadTrainingData();
|
|
152
|
+
// Global coefficients override local ones
|
|
153
|
+
localData.coefficients = { ...localData.coefficients, ...data.coefficients };
|
|
154
|
+
await saveTrainingData(localData);
|
|
155
|
+
if (!silent) console.log('Global token weights synchronized successfully.');
|
|
156
|
+
} else {
|
|
157
|
+
if (!silent) console.log('No global weights available yet.');
|
|
158
|
+
}
|
|
159
|
+
} catch (e) {
|
|
160
|
+
if (!silent) console.log('Failed to sync token weights: ' + e.message);
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
163
|
|
|
164
164
|
/**
|
|
165
165
|
* Update polynomial coefficients using least squares fitting
|
|
@@ -217,27 +217,31 @@ function updateCoefficients(data, projectType) {
|
|
|
217
217
|
];
|
|
218
218
|
}
|
|
219
219
|
|
|
220
|
-
/**
|
|
221
|
-
* Show current estimation statistics
|
|
222
|
-
*/
|
|
223
|
-
export async function showEstimationStats() {
|
|
224
|
-
const data = await loadTrainingData();
|
|
225
|
-
|
|
226
|
-
console.log('\n📊 Token Estimation Statistics:');
|
|
227
|
-
console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
|
|
228
|
-
|
|
229
|
-
for (const [projectType, coefficients] of Object.entries(data.coefficients)) {
|
|
230
|
-
const points = data.trainingPoints[projectType] || [];
|
|
231
|
-
console.log(`\n🔸 ${projectType}:`);
|
|
232
|
-
console.log(` Coefficients: [${coefficients.map(c => c.toFixed(6)).join(', ')}]`);
|
|
233
|
-
console.log(` Training points: ${points.length}`);
|
|
234
|
-
|
|
235
|
-
if (points.length > 0) {
|
|
236
|
-
|
|
237
|
-
const
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
220
|
+
/**
|
|
221
|
+
* Show current estimation statistics
|
|
222
|
+
*/
|
|
223
|
+
export async function showEstimationStats() {
|
|
224
|
+
const data = await loadTrainingData();
|
|
225
|
+
|
|
226
|
+
console.log('\n📊 Token Estimation Statistics:');
|
|
227
|
+
console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
|
|
228
|
+
|
|
229
|
+
for (const [projectType, coefficients] of Object.entries(data.coefficients)) {
|
|
230
|
+
const points = data.trainingPoints[projectType] || [];
|
|
231
|
+
console.log(`\n🔸 ${projectType}:`);
|
|
232
|
+
console.log(` Coefficients: [${coefficients.map(c => c.toFixed(6)).join(', ')}]`);
|
|
233
|
+
console.log(` Training points: ${points.length}`);
|
|
234
|
+
|
|
235
|
+
if (points.length > 0) {
|
|
236
|
+
// Recalculate error against current coefficients, ignoring old stored estimate
|
|
237
|
+
const errors = points.map(p => {
|
|
238
|
+
const currentEstimate = evaluatePolynomial(coefficients, p.fileSizeInBytes);
|
|
239
|
+
return Math.abs(p.actualTokens - currentEstimate);
|
|
240
|
+
});
|
|
241
|
+
const avgError = errors.reduce((a, b) => a + b, 0) / errors.length;
|
|
242
|
+
console.log(` Average error: ${Math.round(avgError)} tokens`);
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
|
|
243
247
|
}
|