chekk 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/bin/chekk.js ADDED
@@ -0,0 +1,19 @@
1
+ #!/usr/bin/env node
2
+
3
+ import { Command } from 'commander';
4
+ import { run } from '../src/index.js';
5
+
6
+ const program = new Command();
7
+
8
+ program
9
+ .name('chekk')
10
+ .description('See how you prompt. Analyze your AI coding workflow.')
11
+ .version('0.1.0')
12
+ .option('--offline', 'Skip prose generation and show raw metrics only')
13
+ .option('--json', 'Output raw metrics as JSON')
14
+ .option('--no-upload', 'Skip the claim/upload prompt')
15
+ .action(async (options) => {
16
+ await run(options);
17
+ });
18
+
19
+ program.parse();
package/package.json ADDED
@@ -0,0 +1,44 @@
1
+ {
2
+ "name": "chekk",
3
+ "version": "0.1.0",
4
+ "description": "See how you prompt. Chekk analyzes your AI coding workflow and tells you what kind of engineer you are.",
5
+ "bin": {
6
+ "chekk": "./bin/chekk.js"
7
+ },
8
+ "type": "module",
9
+ "files": [
10
+ "bin/",
11
+ "src/"
12
+ ],
13
+ "scripts": {
14
+ "start": "node bin/chekk.js"
15
+ },
16
+ "keywords": [
17
+ "ai",
18
+ "coding",
19
+ "claude",
20
+ "cursor",
21
+ "copilot",
22
+ "developer",
23
+ "analysis",
24
+ "prompt",
25
+ "engineering",
26
+ "vibe-coding",
27
+ "workflow"
28
+ ],
29
+ "author": "Chekk <timi@chekk.dev>",
30
+ "license": "MIT",
31
+ "repository": {
32
+ "type": "git",
33
+ "url": "https://github.com/omarionnn/chekk"
34
+ },
35
+ "homepage": "https://chekk.dev",
36
+ "engines": {
37
+ "node": ">=18.0.0"
38
+ },
39
+ "dependencies": {
40
+ "chalk": "^5.3.0",
41
+ "commander": "^12.1.0",
42
+ "ora": "^8.0.1"
43
+ }
44
+ }
package/src/detect.js ADDED
@@ -0,0 +1,94 @@
1
+ import { existsSync, readdirSync, statSync } from 'fs';
2
+ import { join } from 'path';
3
+ import { homedir } from 'os';
4
+
5
+ /**
6
+ * Detect installed AI coding tools and count available sessions.
7
+ */
8
+ export function detectTools() {
9
+ const home = homedir();
10
+ const results = [];
11
+
12
+ // Claude Code: ~/.claude/projects/
13
+ const claudeProjectsDir = join(home, '.claude', 'projects');
14
+ if (existsSync(claudeProjectsDir)) {
15
+ const projects = readdirSync(claudeProjectsDir).filter(f => {
16
+ const full = join(claudeProjectsDir, f);
17
+ return statSync(full).isDirectory();
18
+ });
19
+
20
+ let totalSessions = 0;
21
+ let totalLines = 0;
22
+ const projectDetails = [];
23
+
24
+ for (const project of projects) {
25
+ const projectDir = join(claudeProjectsDir, project);
26
+ const jsonlFiles = readdirSync(projectDir).filter(f =>
27
+ f.endsWith('.jsonl') && !f.startsWith('agent-')
28
+ );
29
+
30
+ if (jsonlFiles.length > 0) {
31
+ totalSessions += jsonlFiles.length;
32
+ // Rough line count for display
33
+ for (const file of jsonlFiles) {
34
+ try {
35
+ const stat = statSync(join(projectDir, file));
36
+ totalLines += Math.floor(stat.size / 500); // rough estimate
37
+ } catch {}
38
+ }
39
+ projectDetails.push({
40
+ name: project.replace(/-/g, '/').replace(/^\//, ''),
41
+ sessions: jsonlFiles.length,
42
+ path: projectDir,
43
+ });
44
+ }
45
+ }
46
+
47
+ if (totalSessions > 0) {
48
+ results.push({
49
+ tool: 'Claude Code',
50
+ sessions: totalSessions,
51
+ projects: projectDetails,
52
+ estimatedPrompts: totalLines,
53
+ basePath: claudeProjectsDir,
54
+ });
55
+ }
56
+ }
57
+
58
+ // Cursor: ~/Library/Application Support/Cursor/User/workspaceStorage/
59
+ const cursorPaths = [
60
+ join(home, 'Library', 'Application Support', 'Cursor', 'User', 'workspaceStorage'), // macOS
61
+ join(home, '.config', 'Cursor', 'User', 'workspaceStorage'), // Linux
62
+ join(home, 'AppData', 'Roaming', 'Cursor', 'User', 'workspaceStorage'), // Windows
63
+ ];
64
+ for (const cursorPath of cursorPaths) {
65
+ if (existsSync(cursorPath)) {
66
+ results.push({
67
+ tool: 'Cursor',
68
+ sessions: 0,
69
+ projects: [],
70
+ estimatedPrompts: 0,
71
+ basePath: cursorPath,
72
+ status: 'detected_not_supported',
73
+ message: 'Cursor support coming in V2',
74
+ });
75
+ break;
76
+ }
77
+ }
78
+
79
+ // Codex: ~/.codex/
80
+ const codexPath = join(home, '.codex');
81
+ if (existsSync(codexPath)) {
82
+ results.push({
83
+ tool: 'Codex',
84
+ sessions: 0,
85
+ projects: [],
86
+ estimatedPrompts: 0,
87
+ basePath: codexPath,
88
+ status: 'detected_not_supported',
89
+ message: 'Codex support coming in V2',
90
+ });
91
+ }
92
+
93
+ return results;
94
+ }
package/src/display.js ADDED
@@ -0,0 +1,137 @@
1
+ import chalk from 'chalk';
2
+
3
+ /**
4
+ * Display detection results.
5
+ */
6
+ export function displayDetection(tools) {
7
+ console.log();
8
+
9
+ for (const tool of tools) {
10
+ if (tool.status === 'detected_not_supported') {
11
+ console.log(chalk.dim(` Found ${tool.tool} — ${tool.message}`));
12
+ } else {
13
+ console.log(
14
+ chalk.green(` Found ${tool.tool}`) +
15
+ chalk.dim(` (${tool.sessions} sessions across ${tool.projects.length} projects)`)
16
+ );
17
+ }
18
+ }
19
+ console.log();
20
+ }
21
+
22
+ /**
23
+ * Display a progress bar.
24
+ */
25
+ function progressBar(score, width = 20) {
26
+ const filled = Math.round((score / 100) * width);
27
+ const empty = width - filled;
28
+
29
+ const color = score >= 80 ? chalk.green :
30
+ score >= 60 ? chalk.yellow :
31
+ score >= 40 ? chalk.hex('#FFA500') :
32
+ chalk.red;
33
+
34
+ return color('\u2588'.repeat(filled)) + chalk.dim('\u2591'.repeat(empty));
35
+ }
36
+
37
+ /**
38
+ * Display the final Chekk results with prose from the API.
39
+ */
40
+ export function displayResults(result, prose) {
41
+ const { overall, archetype, tier } = result;
42
+
43
+ const tierColor = tier === 'LEGENDARY' ? chalk.hex('#FFD700') :
44
+ tier === 'RARE' ? chalk.hex('#A855F7') :
45
+ tier === 'UNCOMMON' ? chalk.hex('#3B82F6') :
46
+ chalk.dim;
47
+
48
+ console.log(chalk.dim('\u2500'.repeat(50)));
49
+ console.log();
50
+
51
+ // Prose sections
52
+ if (prose && prose.sections) {
53
+ for (const section of prose.sections) {
54
+ console.log(` ${section.emoji} ${chalk.bold(section.title)}`);
55
+ // Word-wrap the description
56
+ const lines = wordWrap(section.description, 45);
57
+ for (const line of lines) {
58
+ console.log(chalk.italic(` ${chalk.dim('"')}${line}${chalk.dim('"')}`));
59
+ }
60
+ console.log();
61
+ }
62
+ }
63
+
64
+ console.log(chalk.dim('\u2500'.repeat(50)));
65
+ console.log();
66
+
67
+ // Archetype + tier
68
+ console.log(` ${chalk.bold('\u2192')} You're ${chalk.bold.white(archetype.name)}`);
69
+ if (prose && prose.tagline) {
70
+ console.log(` ${chalk.dim('"' + prose.tagline + '"')}`);
71
+ }
72
+ console.log();
73
+ console.log(` ${chalk.bold('\u2192')} Tier: ${tierColor(tier)} ${progressBar(overall)} ${chalk.bold(overall)}`);
74
+ console.log();
75
+ }
76
+
77
+ /**
78
+ * Display offline/raw metric results (no API call).
79
+ */
80
+ export function displayOfflineResults(result) {
81
+ const { overall, scores, archetype, tier } = result;
82
+
83
+ const tierColor = tier === 'LEGENDARY' ? chalk.hex('#FFD700') :
84
+ tier === 'RARE' ? chalk.hex('#A855F7') :
85
+ tier === 'UNCOMMON' ? chalk.hex('#3B82F6') :
86
+ chalk.dim;
87
+
88
+ console.log(chalk.dim('\u2500'.repeat(50)));
89
+ console.log();
90
+
91
+ console.log(` ${chalk.bold('Decomposition')} ${progressBar(scores.decomposition)} ${scores.decomposition}`);
92
+ console.log(` ${chalk.bold('Debug Efficiency')} ${progressBar(scores.debugCycles)} ${scores.debugCycles}`);
93
+ console.log(` ${chalk.bold('AI Leverage')} ${progressBar(scores.aiLeverage)} ${scores.aiLeverage}`);
94
+ console.log(` ${chalk.bold('Workflow')} ${progressBar(scores.sessionStructure)} ${scores.sessionStructure}`);
95
+ console.log();
96
+ console.log(chalk.dim('\u2500'.repeat(50)));
97
+ console.log();
98
+
99
+ console.log(` ${chalk.bold('\u2192')} You're ${chalk.bold.white(archetype.name)}`);
100
+ console.log(` ${chalk.bold('\u2192')} Tier: ${tierColor(tier)} ${progressBar(overall)} ${chalk.bold(overall)}`);
101
+ console.log();
102
+ console.log(chalk.dim(' Run without --offline for personalized AI-generated insights'));
103
+ console.log();
104
+ }
105
+
106
+ /**
107
+ * Display the claim prompt.
108
+ */
109
+ export function displayClaimPrompt(claimUrl) {
110
+ console.log(chalk.dim('\u2500'.repeat(50)));
111
+ console.log();
112
+ console.log(` ${chalk.bold('Claim your profile:')} ${chalk.cyan.underline(claimUrl)}`);
113
+ console.log();
114
+ console.log(chalk.dim(' Connect GitHub + LinkedIn to make your profile'));
115
+ console.log(chalk.dim(' visible to companies hiring AI-native engineers.'));
116
+ console.log();
117
+ }
118
+
119
+ /**
120
+ * Word wrap text to a max width.
121
+ */
122
+ function wordWrap(text, maxWidth) {
123
+ const words = text.split(' ');
124
+ const lines = [];
125
+ let current = '';
126
+
127
+ for (const word of words) {
128
+ if (current.length + word.length + 1 > maxWidth && current.length > 0) {
129
+ lines.push(current);
130
+ current = word;
131
+ } else {
132
+ current = current ? current + ' ' + word : word;
133
+ }
134
+ }
135
+ if (current) lines.push(current);
136
+ return lines;
137
+ }
package/src/index.js ADDED
@@ -0,0 +1,154 @@
1
+ import chalk from 'chalk';
2
+ import ora from 'ora';
3
+ import { detectTools } from './detect.js';
4
+ import { parseAllProjects } from './parsers/claude-code.js';
5
+ import { computeDecomposition } from './metrics/decomposition.js';
6
+ import { computeDebugCycles } from './metrics/debug-cycles.js';
7
+ import { computeAILeverage } from './metrics/ai-leverage.js';
8
+ import { computeSessionStructure } from './metrics/session-structure.js';
9
+ import { computeOverallScore } from './scorer.js';
10
+ import { displayDetection, displayResults, displayOfflineResults, displayClaimPrompt } from './display.js';
11
+ import { generateProse, askClaim, uploadAndClaim } from './upload.js';
12
+
13
+ export async function run(options = {}) {
14
+ console.log();
15
+ console.log(chalk.bold(' chekk') + chalk.dim(' — see how you prompt'));
16
+ console.log();
17
+
18
+ // Step 1: Detect tools
19
+ const spinner = ora({ text: 'Scanning for AI coding tools...', indent: 3 }).start();
20
+ const tools = detectTools();
21
+
22
+ if (tools.length === 0) {
23
+ spinner.fail('No AI coding tools detected');
24
+ console.log();
25
+ console.log(chalk.dim(' Chekk currently supports Claude Code.'));
26
+ console.log(chalk.dim(' Cursor and Codex support coming in V2.'));
27
+ console.log();
28
+ process.exit(1);
29
+ }
30
+
31
+ const supported = tools.filter(t => t.status !== 'detected_not_supported');
32
+ if (supported.length === 0) {
33
+ spinner.fail('No supported AI tools found');
34
+ console.log();
35
+ for (const t of tools) {
36
+ console.log(chalk.dim(` Found ${t.tool} — ${t.message}`));
37
+ }
38
+ console.log();
39
+ process.exit(1);
40
+ }
41
+
42
+ spinner.succeed('Found AI coding tools');
43
+ displayDetection(tools);
44
+
45
+ // Step 2: Parse sessions
46
+ const parseSpinner = ora({ text: 'Parsing session history...', indent: 3 }).start();
47
+
48
+ let allSessions = [];
49
+ for (const tool of supported) {
50
+ if (tool.tool === 'Claude Code') {
51
+ const sessions = parseAllProjects(tool.basePath);
52
+ allSessions.push(...sessions);
53
+ }
54
+ }
55
+
56
+ if (allSessions.length === 0) {
57
+ parseSpinner.fail('No sessions found to analyze');
58
+ console.log();
59
+ process.exit(1);
60
+ }
61
+
62
+ // Count stats
63
+ const totalExchanges = allSessions.reduce((sum, s) => sum + s.exchangeCount, 0);
64
+ const projects = [...new Set(allSessions.map(s => s.project))];
65
+
66
+ // Date range
67
+ const allTimestamps = allSessions
68
+ .map(s => s.startTime)
69
+ .filter(Boolean)
70
+ .map(t => new Date(t))
71
+ .sort((a, b) => a - b);
72
+ const dateRange = allTimestamps.length >= 2
73
+ ? `${allTimestamps[0].toLocaleDateString()} to ${allTimestamps[allTimestamps.length - 1].toLocaleDateString()}`
74
+ : 'unknown';
75
+
76
+ parseSpinner.succeed(
77
+ `Parsed ${chalk.bold(allSessions.length)} sessions, ${chalk.bold(totalExchanges)} exchanges`
78
+ );
79
+ console.log(chalk.dim(` ${projects.length} projects — ${dateRange}`));
80
+ console.log();
81
+
82
+ // Step 3: Compute metrics
83
+ const metricsSpinner = ora({ text: 'Analyzing your workflow patterns...', indent: 3 }).start();
84
+
85
+ const metrics = {
86
+ decomposition: computeDecomposition(allSessions),
87
+ debugCycles: computeDebugCycles(allSessions),
88
+ aiLeverage: computeAILeverage(allSessions),
89
+ sessionStructure: computeSessionStructure(allSessions),
90
+ };
91
+
92
+ const result = computeOverallScore(metrics);
93
+
94
+ metricsSpinner.succeed('Analysis complete');
95
+ console.log();
96
+
97
+ const sessionStats = {
98
+ totalSessions: allSessions.length,
99
+ totalExchanges,
100
+ projectCount: projects.length,
101
+ dateRange,
102
+ tools: supported.map(t => t.tool),
103
+ };
104
+
105
+ // JSON output mode
106
+ if (options.json) {
107
+ console.log(JSON.stringify({ metrics, result, sessionStats }, null, 2));
108
+ return;
109
+ }
110
+
111
+ // Step 4: Generate prose (unless offline)
112
+ if (options.offline) {
113
+ displayOfflineResults(result);
114
+ } else {
115
+ const proseSpinner = ora({ text: 'Generating your personalized profile...', indent: 3 }).start();
116
+
117
+ try {
118
+ const proseResponse = await generateProse(metrics, result, sessionStats);
119
+ proseSpinner.succeed('Profile generated');
120
+ console.log();
121
+ displayResults(result, proseResponse);
122
+ } catch (err) {
123
+ proseSpinner.warn('Could not reach chekk.dev — showing raw scores');
124
+ console.log();
125
+ displayOfflineResults(result);
126
+ }
127
+ }
128
+
129
+ // Step 5: Claim prompt
130
+ if (options.upload !== false) {
131
+ try {
132
+ const wantsClaim = await askClaim();
133
+ if (wantsClaim) {
134
+ const claimSpinner = ora({ text: 'Creating your profile...', indent: 3 }).start();
135
+ try {
136
+ const claimResult = await uploadAndClaim(metrics, result, sessionStats);
137
+ claimSpinner.succeed('Profile created');
138
+ displayClaimPrompt(claimResult.claimUrl || 'https://chekk.dev/claim');
139
+ } catch (err) {
140
+ claimSpinner.fail('Could not create profile — try again later');
141
+ console.log(chalk.dim(` ${err.message}`));
142
+ console.log();
143
+ }
144
+ } else {
145
+ console.log();
146
+ console.log(chalk.dim(' No worries. Run `npx chekk` again anytime to claim your score.'));
147
+ console.log();
148
+ }
149
+ } catch {
150
+ // stdin not available (piped), skip claim
151
+ console.log();
152
+ }
153
+ }
154
+ }
@@ -0,0 +1,124 @@
1
+ /**
2
+ * AI Leverage
3
+ *
4
+ * Measures whether the engineer uses AI for high-level design and architecture
5
+ * or mostly for boilerplate and simple tasks.
6
+ *
7
+ * Signals:
8
+ * - Architectural/design prompts vs boilerplate/CRUD prompts
9
+ * - Complexity of requested tasks
10
+ * - Usage of AI for planning, design review, code review
11
+ * - Diversity of tool usage (not just "write code" but also explore, analyze, test)
12
+ */
13
+
14
+ const architecturalPatterns = /\b(architect|design|refactor|redesign|restructure|system design|data model|schema|api design|interface|abstract|pattern|trade-?off|scalab|approach|strategy|migration|infrastructure)\b/i;
15
+ const planningPatterns = /\b(plan|breakdown|break down|think through|help me think|what('?s| is) the best (way|approach)|how should (i|we)|pros and cons|options for|compare|evaluate|review my|code review|audit)\b/i;
16
+ const exploratoryPatterns = /\b(explain|understand|how does|what does|why does|walk me through|investigate|diagnose|analyze|explore|deep dive|look into)\b/i;
17
+ const boilerplatePatterns = /\b(add a (button|field|column|route|endpoint|page|component)|create a (form|modal|table|list|card)|simple (function|class|component)|CRUD|boilerplate|scaffold|template|generate (a |the )?(basic|simple))\b/i;
18
+ const testingPatterns = /\b(test|spec|unit test|integration test|e2e|coverage|assert|expect|mock|fixture)\b/i;
19
+
20
+ const highLeverageTools = ['Task', 'WebSearch', 'WebFetch', 'Grep', 'Glob', 'Read'];
21
+ const codingTools = ['Write', 'Edit', 'Bash', 'NotebookEdit'];
22
+
23
+ export function computeAILeverage(sessions) {
24
+ if (sessions.length === 0) return { score: 50, details: {} };
25
+
26
+ let totalPrompts = 0;
27
+ let architecturalPrompts = 0;
28
+ let planningPrompts = 0;
29
+ let exploratoryPrompts = 0;
30
+ let boilerplatePrompts = 0;
31
+ let testingPrompts = 0;
32
+ let highLeverageToolUses = 0;
33
+ let codingToolUses = 0;
34
+ let totalToolUses = 0;
35
+
36
+ // Track prompt complexity via length and structure
37
+ let complexPrompts = 0; // > 200 chars with multiple sentences
38
+ let trivialPrompts = 0; // < 50 chars, simple commands
39
+
40
+ for (const session of sessions) {
41
+ for (const exchange of session.exchanges) {
42
+ const prompt = exchange.userPrompt || '';
43
+ totalPrompts++;
44
+
45
+ // Categorize prompt type
46
+ if (architecturalPatterns.test(prompt)) architecturalPrompts++;
47
+ if (planningPatterns.test(prompt)) planningPrompts++;
48
+ if (exploratoryPatterns.test(prompt)) exploratoryPrompts++;
49
+ if (boilerplatePatterns.test(prompt)) boilerplatePrompts++;
50
+ if (testingPatterns.test(prompt)) testingPrompts++;
51
+
52
+ // Complexity
53
+ const sentences = prompt.split(/[.!?]+/).filter(s => s.trim().length > 10);
54
+ if (prompt.length > 200 && sentences.length >= 2) {
55
+ complexPrompts++;
56
+ } else if (prompt.length < 50) {
57
+ trivialPrompts++;
58
+ }
59
+
60
+ // Tool usage from assistant responses
61
+ for (const tool of exchange.toolCalls) {
62
+ totalToolUses++;
63
+ const toolName = tool.tool || '';
64
+ if (highLeverageTools.some(t => toolName.includes(t))) {
65
+ highLeverageToolUses++;
66
+ }
67
+ if (codingTools.some(t => toolName.includes(t))) {
68
+ codingToolUses++;
69
+ }
70
+ }
71
+ }
72
+ }
73
+
74
+ // Score components
75
+
76
+ // High-level thinking ratio (architectural + planning + exploratory vs total)
77
+ const highLevelPrompts = architecturalPrompts + planningPrompts + exploratoryPrompts;
78
+ const highLevelRatio = totalPrompts > 0 ? highLevelPrompts / totalPrompts : 0;
79
+ const highLevelScore = Math.min(100, highLevelRatio * 250); // 40%+ = 100
80
+
81
+ // Boilerplate ratio (lower is better)
82
+ const boilerplateRatio = totalPrompts > 0 ? boilerplatePrompts / totalPrompts : 0;
83
+ const boilerplatePenalty = boilerplateRatio * 60;
84
+
85
+ // Complexity ratio
86
+ const complexRatio = totalPrompts > 0 ? complexPrompts / totalPrompts : 0;
87
+ const complexScore = Math.min(100, complexRatio * 300);
88
+
89
+ // Tool diversity - using AI for research/exploration not just coding
90
+ const researchToolRatio = totalToolUses > 0 ? highLeverageToolUses / totalToolUses : 0.5;
91
+ const toolDiversityScore = Math.min(100, researchToolRatio * 200);
92
+
93
+ // Testing awareness
94
+ const testingRatio = totalPrompts > 0 ? testingPrompts / totalPrompts : 0;
95
+ const testingBonus = Math.min(20, testingRatio * 200);
96
+
97
+ const score = Math.round(
98
+ highLevelScore * 0.35 +
99
+ (100 - boilerplatePenalty) * 0.2 +
100
+ complexScore * 0.2 +
101
+ toolDiversityScore * 0.15 +
102
+ testingBonus * 0.1 +
103
+ 50 * 0 // baseline filler
104
+ );
105
+
106
+ return {
107
+ score: Math.max(0, Math.min(100, score)),
108
+ details: {
109
+ totalPrompts,
110
+ architecturalPrompts,
111
+ planningPrompts,
112
+ exploratoryPrompts,
113
+ boilerplatePrompts,
114
+ testingPrompts,
115
+ highLevelRatio: Math.round(highLevelRatio * 100),
116
+ complexPromptRatio: Math.round(complexRatio * 100),
117
+ toolDiversity: {
118
+ total: totalToolUses,
119
+ research: highLeverageToolUses,
120
+ coding: codingToolUses,
121
+ },
122
+ },
123
+ };
124
+ }
@@ -0,0 +1,124 @@
1
+ /**
2
+ * Debug Cycle Efficiency
3
+ *
4
+ * Measures how effectively the engineer resolves issues with AI assistance.
5
+ *
6
+ * Signals:
7
+ * - Error/fix loops: user reports error → assistant tries fix → user reports same error
8
+ * - Number of turns to resolution
9
+ * - Quality of error context provided (stack traces, specific error messages)
10
+ * - "it's still broken" vs targeted debug prompts
11
+ */
12
+
13
+ const errorPatterns = /\b(error|bug|broken|crash|fail|exception|traceback|stack trace|doesn'?t work|not working|issue|problem|wrong)\b/i;
14
+ const vaguePhrases = /^(it'?s? (?:still )?(?:not working|broken|wrong|failing))|^(fix it|try again|still (?:the same|broken|failing|not working))|^(same (?:error|issue|problem|thing))/i;
15
+ const specificDebugPatterns = /\b(line \d+|TypeError|SyntaxError|ImportError|ReferenceError|ValueError|KeyError|AttributeError|NoneType|undefined is not|cannot read prop|stack trace|traceback|\.py:\d+|\.ts:\d+|\.js:\d+|status (?:code )?\d{3}|HTTP \d{3}|ENOENT|EACCES|CORS|404|500|502|503)\b/i;
16
+ const resolutionPatterns = /\b(works|working|fixed|solved|resolved|perfect|great|thanks|nice|awesome|that did it|looks good|ship it)\b/i;
17
+
18
+ export function computeDebugCycles(sessions) {
19
+ if (sessions.length === 0) return { score: 50, details: {} };
20
+
21
+ let totalDebugSequences = 0;
22
+ let totalTurnsToResolve = 0;
23
+ let vagueReports = 0;
24
+ let specificReports = 0;
25
+ let unresolvedSequences = 0;
26
+ let quickFixes = 0; // resolved in 1-2 turns
27
+ let longLoops = 0; // > 5 turns to resolve
28
+
29
+ for (const session of sessions) {
30
+ const { exchanges } = session;
31
+ let inDebugMode = false;
32
+ let debugTurnCount = 0;
33
+
34
+ for (let i = 0; i < exchanges.length; i++) {
35
+ const prompt = exchanges[i].userPrompt || '';
36
+
37
+ if (errorPatterns.test(prompt)) {
38
+ if (!inDebugMode) {
39
+ // Starting a new debug sequence
40
+ inDebugMode = true;
41
+ debugTurnCount = 1;
42
+ totalDebugSequences++;
43
+ } else {
44
+ debugTurnCount++;
45
+ }
46
+
47
+ // Check quality of error report
48
+ if (vaguePhrases.test(prompt)) {
49
+ vagueReports++;
50
+ }
51
+ if (specificDebugPatterns.test(prompt) || prompt.length > 200) {
52
+ specificReports++;
53
+ }
54
+ } else if (inDebugMode) {
55
+ // Check if this exchange resolves the debug
56
+ if (resolutionPatterns.test(prompt)) {
57
+ totalTurnsToResolve += debugTurnCount;
58
+ if (debugTurnCount <= 2) quickFixes++;
59
+ if (debugTurnCount > 5) longLoops++;
60
+ inDebugMode = false;
61
+ debugTurnCount = 0;
62
+ } else {
63
+ // Moved on without explicit resolution
64
+ totalTurnsToResolve += debugTurnCount;
65
+ unresolvedSequences++;
66
+ inDebugMode = false;
67
+ debugTurnCount = 0;
68
+ }
69
+ }
70
+ }
71
+
72
+ // Handle session ending mid-debug
73
+ if (inDebugMode) {
74
+ totalTurnsToResolve += debugTurnCount;
75
+ if (debugTurnCount > 5) longLoops++;
76
+ }
77
+ }
78
+
79
+ const avgTurnsToResolve = totalDebugSequences > 0
80
+ ? totalTurnsToResolve / totalDebugSequences
81
+ : 0;
82
+
83
+ const totalReports = vagueReports + specificReports;
84
+ const specificRatio = totalReports > 0 ? specificReports / totalReports : 0.5;
85
+
86
+ // Score components
87
+ // Fewer turns to resolve = better
88
+ const turnsScore = avgTurnsToResolve === 0 ? 70 :
89
+ avgTurnsToResolve <= 2 ? 95 :
90
+ avgTurnsToResolve <= 3 ? 85 :
91
+ avgTurnsToResolve <= 5 ? 65 :
92
+ avgTurnsToResolve <= 8 ? 40 : 20;
93
+
94
+ // More specific reports = better
95
+ const specificScore = specificRatio * 100;
96
+
97
+ // Quick fix ratio
98
+ const quickFixRatio = totalDebugSequences > 0 ? quickFixes / totalDebugSequences : 0.5;
99
+ const quickFixScore = quickFixRatio * 100;
100
+
101
+ // Long loop penalty
102
+ const longLoopRatio = totalDebugSequences > 0 ? longLoops / totalDebugSequences : 0;
103
+ const longLoopPenalty = longLoopRatio * 50;
104
+
105
+ const score = Math.round(
106
+ turnsScore * 0.35 +
107
+ specificScore * 0.25 +
108
+ quickFixScore * 0.25 +
109
+ (100 - longLoopPenalty) * 0.15
110
+ );
111
+
112
+ return {
113
+ score: Math.max(0, Math.min(100, score)),
114
+ details: {
115
+ totalDebugSequences,
116
+ avgTurnsToResolve: Math.round(avgTurnsToResolve * 10) / 10,
117
+ quickFixes,
118
+ longLoops,
119
+ specificReportRatio: Math.round(specificRatio * 100),
120
+ vagueReports,
121
+ specificReports,
122
+ },
123
+ };
124
+ }
@@ -0,0 +1,98 @@
1
+ /**
2
+ * Decomposition Quality
3
+ *
4
+ * Measures whether the engineer breaks complex tasks into subtasks
5
+ * or dumps everything in single mega-prompts.
6
+ *
7
+ * Signals:
8
+ * - Multi-step sessions (multiple exchanges building on each other) → higher
9
+ * - Single mega-prompt sessions → lower
10
+ * - Prompt length distribution (long single prompts = less decomposition)
11
+ * - Follow-up prompts that reference or build on previous context
12
+ */
13
+
14
+ export function computeDecomposition(sessions) {
15
+ if (sessions.length === 0) return { score: 50, details: {} };
16
+
17
+ let totalExchanges = 0;
18
+ let multiStepSessions = 0;
19
+ let singleShotSessions = 0;
20
+ let avgPromptLength = 0;
21
+ let promptCount = 0;
22
+ let longPromptCount = 0; // > 500 chars
23
+ let shortPromptCount = 0; // < 100 chars
24
+ let contextualFollowups = 0; // prompts that start with "now", "next", "also", "then", reference prev context
25
+
26
+ const followupPatterns = /^(now |next |then |also |and |ok |okay |great |good |perfect |after that|building on|following up|continuing)/i;
27
+ const refinementPatterns = /^(actually |wait |hmm |instead |change |modify |update |tweak |adjust |fix |but )/i;
28
+
29
+ for (const session of sessions) {
30
+ const { exchanges } = session;
31
+ totalExchanges += exchanges.length;
32
+
33
+ if (exchanges.length >= 4) {
34
+ multiStepSessions++;
35
+ } else if (exchanges.length === 1) {
36
+ singleShotSessions++;
37
+ }
38
+
39
+ for (let i = 0; i < exchanges.length; i++) {
40
+ const prompt = exchanges[i].userPrompt || '';
41
+ const len = prompt.length;
42
+ promptCount++;
43
+ avgPromptLength += len;
44
+
45
+ if (len > 500) longPromptCount++;
46
+ if (len < 100) shortPromptCount++;
47
+
48
+ // Check for contextual followups (not the first prompt in a session)
49
+ if (i > 0) {
50
+ if (followupPatterns.test(prompt) || refinementPatterns.test(prompt)) {
51
+ contextualFollowups++;
52
+ }
53
+ }
54
+ }
55
+ }
56
+
57
+ avgPromptLength = promptCount > 0 ? avgPromptLength / promptCount : 0;
58
+
59
+ // Score components (each 0-100)
60
+ const multiStepRatio = sessions.length > 0 ? multiStepSessions / sessions.length : 0;
61
+ const multiStepScore = Math.min(100, multiStepRatio * 150); // bonus for > 66%
62
+
63
+ const singleShotRatio = sessions.length > 0 ? singleShotSessions / sessions.length : 0;
64
+ const singleShotPenalty = singleShotRatio * 40; // up to -40
65
+
66
+ // Moderate prompt length is good (not too mega, not too terse)
67
+ const lengthScore = avgPromptLength > 50 && avgPromptLength < 400 ? 80 :
68
+ avgPromptLength >= 400 && avgPromptLength < 800 ? 60 :
69
+ avgPromptLength >= 800 ? 30 : 50;
70
+
71
+ // Contextual followups show iterative thinking
72
+ const followupRatio = promptCount > 0 ? contextualFollowups / promptCount : 0;
73
+ const followupScore = Math.min(100, followupRatio * 300);
74
+
75
+ const avgExchangesPerSession = sessions.length > 0 ? totalExchanges / sessions.length : 0;
76
+ const depthScore = Math.min(100, avgExchangesPerSession * 8); // 12+ exchanges = 100
77
+
78
+ const score = Math.round(
79
+ multiStepScore * 0.25 +
80
+ (100 - singleShotPenalty) * 0.15 +
81
+ lengthScore * 0.2 +
82
+ followupScore * 0.2 +
83
+ depthScore * 0.2
84
+ );
85
+
86
+ return {
87
+ score: Math.max(0, Math.min(100, score)),
88
+ details: {
89
+ totalSessions: sessions.length,
90
+ multiStepSessions,
91
+ singleShotSessions,
92
+ avgExchangesPerSession: Math.round(avgExchangesPerSession * 10) / 10,
93
+ avgPromptLength: Math.round(avgPromptLength),
94
+ longPromptRatio: promptCount > 0 ? Math.round(longPromptCount / promptCount * 100) : 0,
95
+ contextualFollowupRatio: promptCount > 0 ? Math.round(followupRatio * 100) : 0,
96
+ },
97
+ };
98
+ }
@@ -0,0 +1,134 @@
1
+ /**
2
+ * Session Structure / Workflow Quality
3
+ *
4
+ * Measures how deliberate and structured the engineer's workflow is.
5
+ *
6
+ * Signals:
7
+ * - Do sessions start with context-setting? (explaining what needs to happen)
8
+ * - Do they plan before diving into code?
9
+ * - Is there a review/validation step at the end?
10
+ * - Session duration distribution (very short = throwaway, very long = unfocused)
11
+ * - Modification rate of AI output (shows critical review)
12
+ */
13
+
14
+ const contextSettingPatterns = /^(i('?m| am) (working on|building|trying to|looking at)|we need to|the goal is|here'?s (the|what)|context:|background:|i have a|there'?s a|i want to|let me explain)/i;
15
+ const planningStartPatterns = /^(let'?s (plan|think|figure|start by)|first,? (let'?s|we should)|before we (start|begin|code)|the plan is|step 1|here'?s (my|the) plan)/i;
16
+ const reviewPatterns = /\b(looks good|ship it|deploy|push it|commit|merge|let'?s go|lgtm|approved|test it|run (the )?tests|build it|does this look|review this|check this)\b/i;
17
+ const refinementPatterns = /\b(actually|wait|hmm|instead|change|modify|tweak|adjust|no,? |not quite|close but|almost|that'?s not)\b/i;
18
+
19
+ export function computeSessionStructure(sessions) {
20
+ if (sessions.length === 0) return { score: 50, details: {} };
21
+
22
+ let contextSetSessions = 0;
23
+ let planBeforeCodeSessions = 0;
24
+ let reviewEndSessions = 0;
25
+ let refinementCount = 0;
26
+ let totalExchanges = 0;
27
+
28
+ // Duration distribution
29
+ let shortSessions = 0; // < 5 min
30
+ let mediumSessions = 0; // 5-60 min
31
+ let longSessions = 0; // > 60 min
32
+ let focusedSessions = 0; // 10-45 min (sweet spot)
33
+
34
+ // First prompt length distribution (longer first prompts = more context setting)
35
+ let firstPromptTotalLength = 0;
36
+
37
+ for (const session of sessions) {
38
+ const { exchanges, durationMinutes } = session;
39
+ if (exchanges.length === 0) continue;
40
+
41
+ totalExchanges += exchanges.length;
42
+
43
+ // Check if session starts with context
44
+ const firstPrompt = exchanges[0].userPrompt || '';
45
+ firstPromptTotalLength += firstPrompt.length;
46
+
47
+ if (contextSettingPatterns.test(firstPrompt) || firstPrompt.length > 200) {
48
+ contextSetSessions++;
49
+ }
50
+
51
+ if (planningStartPatterns.test(firstPrompt)) {
52
+ planBeforeCodeSessions++;
53
+ }
54
+
55
+ // Check if session ends with review/validation
56
+ if (exchanges.length >= 2) {
57
+ const lastPrompt = exchanges[exchanges.length - 1].userPrompt || '';
58
+ const secondLastPrompt = exchanges.length >= 3 ? exchanges[exchanges.length - 2].userPrompt || '' : '';
59
+ if (reviewPatterns.test(lastPrompt) || reviewPatterns.test(secondLastPrompt)) {
60
+ reviewEndSessions++;
61
+ }
62
+ }
63
+
64
+ // Count refinements (shows critical evaluation)
65
+ for (let i = 1; i < exchanges.length; i++) {
66
+ const prompt = exchanges[i].userPrompt || '';
67
+ if (refinementPatterns.test(prompt)) {
68
+ refinementCount++;
69
+ }
70
+ }
71
+
72
+ // Duration buckets
73
+ if (durationMinutes !== undefined && durationMinutes !== null) {
74
+ if (durationMinutes < 5) shortSessions++;
75
+ else if (durationMinutes <= 60) mediumSessions++;
76
+ else longSessions++;
77
+
78
+ if (durationMinutes >= 10 && durationMinutes <= 45) focusedSessions++;
79
+ }
80
+ }
81
+
82
+ const sessionsWithExchanges = sessions.filter(s => s.exchanges.length > 0).length;
83
+
84
+ // Score components
85
+ const contextRatio = sessionsWithExchanges > 0 ? contextSetSessions / sessionsWithExchanges : 0;
86
+ const contextScore = Math.min(100, contextRatio * 170);
87
+
88
+ const planRatio = sessionsWithExchanges > 0 ? planBeforeCodeSessions / sessionsWithExchanges : 0;
89
+ const planScore = Math.min(100, planRatio * 300);
90
+
91
+ const reviewRatio = sessionsWithExchanges > 0 ? reviewEndSessions / sessionsWithExchanges : 0;
92
+ const reviewScore = Math.min(100, reviewRatio * 200);
93
+
94
+ // Refinement shows critical thinking
95
+ const refinementRatio = totalExchanges > 0 ? refinementCount / totalExchanges : 0;
96
+ const refinementScore = Math.min(100, refinementRatio * 400);
97
+
98
+ // Duration focus (medium/focused sessions are ideal)
99
+ const totalWithDuration = shortSessions + mediumSessions + longSessions;
100
+ const focusedRatio = totalWithDuration > 0 ? focusedSessions / totalWithDuration : 0.5;
101
+ const focusScore = Math.min(100, focusedRatio * 170);
102
+
103
+ // Average first prompt length (longer = more thoughtful setup)
104
+ const avgFirstPromptLength = sessionsWithExchanges > 0 ? firstPromptTotalLength / sessionsWithExchanges : 0;
105
+ const firstPromptScore = avgFirstPromptLength > 300 ? 90 :
106
+ avgFirstPromptLength > 150 ? 75 :
107
+ avgFirstPromptLength > 50 ? 55 : 35;
108
+
109
+ const score = Math.round(
110
+ contextScore * 0.2 +
111
+ planScore * 0.15 +
112
+ reviewScore * 0.15 +
113
+ refinementScore * 0.2 +
114
+ focusScore * 0.15 +
115
+ firstPromptScore * 0.15
116
+ );
117
+
118
+ return {
119
+ score: Math.max(0, Math.min(100, score)),
120
+ details: {
121
+ contextSetRatio: Math.round(contextRatio * 100),
122
+ planBeforeCodeRatio: Math.round(planRatio * 100),
123
+ reviewEndRatio: Math.round(reviewRatio * 100),
124
+ refinementRatio: Math.round(refinementRatio * 100),
125
+ avgFirstPromptLength: Math.round(avgFirstPromptLength),
126
+ durationDistribution: {
127
+ short: shortSessions,
128
+ medium: mediumSessions,
129
+ long: longSessions,
130
+ focused: focusedSessions,
131
+ },
132
+ },
133
+ };
134
+ }
@@ -0,0 +1,204 @@
1
+ import { readFileSync, readdirSync, statSync } from 'fs';
2
+ import { join } from 'path';
3
+
4
+ /**
5
+ * Parse Claude Code JSONL session files into normalized format.
6
+ *
7
+ * Each JSONL line is one of:
8
+ * - type: "user" → human prompt
9
+ * - type: "assistant" → AI response (may contain text, thinking, tool_use)
10
+ * - type: "summary" → session summary (skip)
11
+ * - type: "file-history-snapshot" → file state (skip)
12
+ *
13
+ * We normalize into sessions, each with turns.
14
+ */
15
+
16
+ function parseJsonlLine(line) {
17
+ try {
18
+ return JSON.parse(line);
19
+ } catch {
20
+ return null;
21
+ }
22
+ }
23
+
24
+ function extractToolCalls(content) {
25
+ if (!Array.isArray(content)) return [];
26
+ return content
27
+ .filter(block => block.type === 'tool_use')
28
+ .map(block => ({
29
+ tool: block.name || block.input?.description || 'unknown',
30
+ input: block.input || {},
31
+ }));
32
+ }
33
+
34
+ function extractTextContent(content) {
35
+ if (typeof content === 'string') return content;
36
+ if (!Array.isArray(content)) return '';
37
+ return content
38
+ .filter(block => block.type === 'text')
39
+ .map(block => block.text || '')
40
+ .join('\n');
41
+ }
42
+
43
+ function extractThinking(content) {
44
+ if (!Array.isArray(content)) return '';
45
+ return content
46
+ .filter(block => block.type === 'thinking')
47
+ .map(block => block.thinking || '')
48
+ .join('\n');
49
+ }
50
+
51
+ function hasToolResults(content) {
52
+ if (!Array.isArray(content)) return false;
53
+ return content.some(block => block.type === 'tool_result');
54
+ }
55
+
56
+ /**
57
+ * Parse a single JSONL file into a list of turns.
58
+ */
59
+ function parseSessionFile(filePath) {
60
+ let raw;
61
+ try {
62
+ raw = readFileSync(filePath, 'utf-8');
63
+ } catch {
64
+ return [];
65
+ }
66
+
67
+ const lines = raw.split('\n').filter(l => l.trim());
68
+ const turns = [];
69
+
70
+ for (const line of lines) {
71
+ const entry = parseJsonlLine(line);
72
+ if (!entry) continue;
73
+
74
+ // Skip non-message types
75
+ if (entry.type === 'summary' || entry.type === 'file-history-snapshot') continue;
76
+ if (!entry.message) continue;
77
+
78
+ const role = entry.message.role || entry.type;
79
+ if (role !== 'user' && role !== 'assistant') continue;
80
+
81
+ // Skip tool result messages (these are system-injected responses to tool calls)
82
+ if (role === 'user' && hasToolResults(entry.message.content)) continue;
83
+
84
+ const turn = {
85
+ role,
86
+ text: extractTextContent(entry.message.content),
87
+ thinking: role === 'assistant' ? extractThinking(entry.message.content) : '',
88
+ toolCalls: role === 'assistant' ? extractToolCalls(entry.message.content) : [],
89
+ timestamp: entry.timestamp || null,
90
+ uuid: entry.uuid || null,
91
+ parentUuid: entry.parentUuid || null,
92
+ model: entry.message.model || null,
93
+ };
94
+
95
+ // Skip empty assistant messages that are just tool call continuations
96
+ if (role === 'assistant' && !turn.text && turn.toolCalls.length === 0 && !turn.thinking) {
97
+ continue;
98
+ }
99
+
100
+ turns.push(turn);
101
+ }
102
+
103
+ return turns;
104
+ }
105
+
106
+ /**
107
+ * Group consecutive turns into logical conversation exchanges.
108
+ * A "exchange" is one user prompt followed by all assistant responses until the next user prompt.
109
+ */
110
+ function groupIntoExchanges(turns) {
111
+ const exchanges = [];
112
+ let current = null;
113
+
114
+ for (const turn of turns) {
115
+ if (turn.role === 'user') {
116
+ if (current) exchanges.push(current);
117
+ current = {
118
+ userPrompt: turn.text,
119
+ userTimestamp: turn.timestamp,
120
+ assistantResponses: [],
121
+ toolCalls: [],
122
+ thinkingContent: [],
123
+ };
124
+ } else if (turn.role === 'assistant' && current) {
125
+ if (turn.text) current.assistantResponses.push(turn.text);
126
+ if (turn.thinking) current.thinkingContent.push(turn.thinking);
127
+ current.toolCalls.push(...turn.toolCalls);
128
+ }
129
+ }
130
+
131
+ if (current) exchanges.push(current);
132
+ return exchanges;
133
+ }
134
+
135
+ /**
136
+ * Parse all Claude Code sessions from a project directory.
137
+ */
138
+ export function parseProject(projectPath) {
139
+ const files = readdirSync(projectPath).filter(f =>
140
+ f.endsWith('.jsonl') && !f.startsWith('agent-')
141
+ );
142
+
143
+ const sessions = [];
144
+
145
+ for (const file of files) {
146
+ const filePath = join(projectPath, file);
147
+ const stat = statSync(filePath);
148
+
149
+ const turns = parseSessionFile(filePath);
150
+ if (turns.length === 0) continue;
151
+
152
+ const exchanges = groupIntoExchanges(turns);
153
+ if (exchanges.length === 0) continue;
154
+
155
+ // Get time range
156
+ const timestamps = turns
157
+ .map(t => t.timestamp)
158
+ .filter(Boolean)
159
+ .map(t => new Date(t).getTime())
160
+ .sort();
161
+
162
+ sessions.push({
163
+ id: file.replace('.jsonl', ''),
164
+ file,
165
+ exchanges,
166
+ turnCount: turns.length,
167
+ exchangeCount: exchanges.length,
168
+ startTime: timestamps[0] ? new Date(timestamps[0]).toISOString() : null,
169
+ endTime: timestamps[timestamps.length - 1] ? new Date(timestamps[timestamps.length - 1]).toISOString() : null,
170
+ durationMinutes: timestamps.length >= 2
171
+ ? Math.round((timestamps[timestamps.length - 1] - timestamps[0]) / 60000)
172
+ : 0,
173
+ });
174
+ }
175
+
176
+ return sessions;
177
+ }
178
+
179
+ /**
180
+ * Parse all Claude Code projects.
181
+ */
182
+ export function parseAllProjects(basePath) {
183
+ const projects = readdirSync(basePath).filter(f => {
184
+ try {
185
+ return statSync(join(basePath, f)).isDirectory();
186
+ } catch {
187
+ return false;
188
+ }
189
+ });
190
+
191
+ const allSessions = [];
192
+
193
+ for (const project of projects) {
194
+ const projectPath = join(basePath, project);
195
+ const sessions = parseProject(projectPath);
196
+
197
+ for (const session of sessions) {
198
+ session.project = project.replace(/-/g, '/').replace(/^\//, '');
199
+ allSessions.push(session);
200
+ }
201
+ }
202
+
203
+ return allSessions;
204
+ }
package/src/scorer.js ADDED
@@ -0,0 +1,109 @@
1
+ /**
2
+ * Combine individual metric scores into composite scores and assign archetype.
3
+ */
4
+
5
+ const ARCHETYPES = [
6
+ {
7
+ id: 'THE_ARCHITECT',
8
+ name: 'THE ARCHITECT',
9
+ // High decomposition + high planning + high leverage
10
+ match: (s) => s.decomposition >= 70 && s.sessionStructure >= 65 && s.aiLeverage >= 65,
11
+ priority: 1,
12
+ },
13
+ {
14
+ id: 'THE_SURGEON',
15
+ name: 'THE SURGEON',
16
+ // High debug efficiency + high leverage + good structure
17
+ match: (s) => s.debugCycles >= 75 && s.aiLeverage >= 60,
18
+ priority: 2,
19
+ },
20
+ {
21
+ id: 'THE_BUILDER',
22
+ name: 'THE BUILDER',
23
+ // High overall scores, well-rounded
24
+ match: (s) => {
25
+ const avg = (s.decomposition + s.debugCycles + s.aiLeverage + s.sessionStructure) / 4;
26
+ return avg >= 65 && Math.min(s.decomposition, s.debugCycles, s.aiLeverage, s.sessionStructure) >= 55;
27
+ },
28
+ priority: 3,
29
+ },
30
+ {
31
+ id: 'THE_EXPLORER',
32
+ name: 'THE EXPLORER',
33
+ // High leverage + high decomposition, loves research
34
+ match: (s) => s.aiLeverage >= 70 && s.decomposition >= 60,
35
+ priority: 4,
36
+ },
37
+ {
38
+ id: 'THE_SPEEDRUNNER',
39
+ name: 'THE SPEEDRUNNER',
40
+ // Quick debug cycles, less structured but efficient
41
+ match: (s) => s.debugCycles >= 70 && s.sessionStructure < 60,
42
+ priority: 5,
43
+ },
44
+ {
45
+ id: 'THE_APPRENTICE',
46
+ name: 'THE APPRENTICE',
47
+ // Learning patterns - lower scores but shows growth potential
48
+ match: (s) => {
49
+ const avg = (s.decomposition + s.debugCycles + s.aiLeverage + s.sessionStructure) / 4;
50
+ return avg >= 40 && avg < 65;
51
+ },
52
+ priority: 6,
53
+ },
54
+ {
55
+ id: 'THE_MAVERICK',
56
+ name: 'THE MAVERICK',
57
+ // Unstructured but somehow gets things done - low structure, decent other scores
58
+ match: (s) => s.sessionStructure < 50 && (s.decomposition >= 55 || s.aiLeverage >= 55),
59
+ priority: 7,
60
+ },
61
+ {
62
+ id: 'THE_NEWCOMER',
63
+ name: 'THE NEWCOMER',
64
+ // Default fallback
65
+ match: () => true,
66
+ priority: 99,
67
+ },
68
+ ];
69
+
70
+ const TIERS = [
71
+ { name: 'LEGENDARY', min: 90 },
72
+ { name: 'RARE', min: 75 },
73
+ { name: 'UNCOMMON', min: 55 },
74
+ { name: 'COMMON', min: 0 },
75
+ ];
76
+
77
+ export function computeOverallScore(metrics) {
78
+ const scores = {
79
+ decomposition: metrics.decomposition.score,
80
+ debugCycles: metrics.debugCycles.score,
81
+ aiLeverage: metrics.aiLeverage.score,
82
+ sessionStructure: metrics.sessionStructure.score,
83
+ };
84
+
85
+ // Weighted average
86
+ const overall = Math.round(
87
+ scores.decomposition * 0.25 +
88
+ scores.debugCycles * 0.25 +
89
+ scores.aiLeverage * 0.3 +
90
+ scores.sessionStructure * 0.2
91
+ );
92
+
93
+ // Assign archetype
94
+ const sortedArchetypes = [...ARCHETYPES].sort((a, b) => a.priority - b.priority);
95
+ const archetype = sortedArchetypes.find(a => a.match(scores)) || ARCHETYPES[ARCHETYPES.length - 1];
96
+
97
+ // Assign tier
98
+ const tier = TIERS.find(t => overall >= t.min) || TIERS[TIERS.length - 1];
99
+
100
+ return {
101
+ overall,
102
+ scores,
103
+ archetype: {
104
+ id: archetype.id,
105
+ name: archetype.name,
106
+ },
107
+ tier: tier.name,
108
+ };
109
+ }
package/src/upload.js ADDED
@@ -0,0 +1,100 @@
1
+ import { createInterface } from 'readline';
2
+
3
+ const API_BASE = 'https://chekk-production.up.railway.app/api/v1';
4
+
5
+ /**
6
+ * Call the Chekk API to generate personalized prose from metrics.
7
+ */
8
+ export async function generateProse(metrics, result, sessionStats) {
9
+ const payload = {
10
+ metrics: {
11
+ decomposition: {
12
+ score: metrics.decomposition.score,
13
+ details: metrics.decomposition.details,
14
+ },
15
+ debugCycles: {
16
+ score: metrics.debugCycles.score,
17
+ details: metrics.debugCycles.details,
18
+ },
19
+ aiLeverage: {
20
+ score: metrics.aiLeverage.score,
21
+ details: metrics.aiLeverage.details,
22
+ },
23
+ sessionStructure: {
24
+ score: metrics.sessionStructure.score,
25
+ details: metrics.sessionStructure.details,
26
+ },
27
+ },
28
+ result: {
29
+ overall: result.overall,
30
+ scores: result.scores,
31
+ archetype: result.archetype,
32
+ tier: result.tier,
33
+ },
34
+ sessionStats,
35
+ };
36
+
37
+ const response = await fetch(`${API_BASE}/public/cli/analyze`, {
38
+ method: 'POST',
39
+ headers: { 'Content-Type': 'application/json' },
40
+ body: JSON.stringify(payload),
41
+ });
42
+
43
+ if (!response.ok) {
44
+ throw new Error(`API returned ${response.status}`);
45
+ }
46
+
47
+ return response.json();
48
+ }
49
+
50
+ /**
51
+ * Ask user if they want to claim their profile.
52
+ */
53
+ export function askClaim() {
54
+ return new Promise((resolve) => {
55
+ const rl = createInterface({
56
+ input: process.stdin,
57
+ output: process.stdout,
58
+ });
59
+
60
+ rl.question(' Push your score to chekk.dev? Your raw prompts never leave your machine. (y/n) ', (answer) => {
61
+ rl.close();
62
+ resolve(answer.toLowerCase().startsWith('y'));
63
+ });
64
+ });
65
+ }
66
+
67
+ /**
68
+ * Upload score and get a claim URL.
69
+ */
70
+ export async function uploadAndClaim(metrics, result, sessionStats, prose) {
71
+ const payload = {
72
+ metrics: {
73
+ decomposition: metrics.decomposition.score,
74
+ debugCycles: metrics.debugCycles.score,
75
+ aiLeverage: metrics.aiLeverage.score,
76
+ sessionStructure: metrics.sessionStructure.score,
77
+ },
78
+ result: {
79
+ overall: result.overall,
80
+ archetype: result.archetype,
81
+ tier: result.tier,
82
+ },
83
+ prose,
84
+ sessionStats,
85
+ tools: sessionStats.tools,
86
+ claimedAt: new Date().toISOString(),
87
+ };
88
+
89
+ const response = await fetch(`${API_BASE}/public/cli/claim`, {
90
+ method: 'POST',
91
+ headers: { 'Content-Type': 'application/json' },
92
+ body: JSON.stringify(payload),
93
+ });
94
+
95
+ if (!response.ok) {
96
+ throw new Error(`Claim API returned ${response.status}`);
97
+ }
98
+
99
+ return response.json();
100
+ }