@iservu-inc/adf-cli 0.1.5 ā 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.project/chats/current/2025-10-03_ADF-CLI-QUALITY-BASED-PROGRESS-AND-RESUME.md +399 -0
- package/.project/docs/architecture/SYSTEM-DESIGN.md +369 -0
- package/.project/docs/frameworks/FRAMEWORK-METHODOLOGIES.md +449 -0
- package/.project/docs/goals/PROJECT-VISION.md +112 -0
- package/.project/docs/tool-integrations/IDE-CUSTOMIZATIONS.md +578 -0
- package/CHANGELOG.md +253 -0
- package/README.md +50 -11
- package/jest.config.js +20 -0
- package/lib/commands/init.js +41 -111
- package/lib/frameworks/answer-quality-analyzer.js +216 -0
- package/lib/frameworks/interviewer.js +447 -0
- package/lib/frameworks/output-generators.js +345 -0
- package/lib/frameworks/progress-tracker.js +239 -0
- package/lib/frameworks/questions.js +664 -0
- package/lib/frameworks/session-manager.js +100 -0
- package/package.json +10 -5
- package/test-scenarios.sh +134 -0
- package/tests/answer-quality-analyzer.test.js +173 -0
- package/tests/progress-tracker.test.js +205 -0
- package/tests/session-manager.test.js +162 -0
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
const fs = require('fs-extra');
|
|
2
|
+
const path = require('path');
|
|
3
|
+
const chalk = require('chalk');
|
|
4
|
+
const inquirer = require('inquirer');
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Session Manager
|
|
8
|
+
* Manages interview sessions - list, resume, delete
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
class SessionManager {
|
|
12
|
+
constructor(projectPath) {
|
|
13
|
+
this.projectPath = projectPath;
|
|
14
|
+
this.sessionsDir = path.join(projectPath, '.adf', 'sessions');
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
async listSessions() {
|
|
18
|
+
await fs.ensureDir(this.sessionsDir);
|
|
19
|
+
const sessions = await fs.readdir(this.sessionsDir);
|
|
20
|
+
|
|
21
|
+
const sessionDetails = [];
|
|
22
|
+
|
|
23
|
+
for (const sessionId of sessions) {
|
|
24
|
+
const sessionPath = path.join(this.sessionsDir, sessionId);
|
|
25
|
+
const progressFile = path.join(sessionPath, '_progress.json');
|
|
26
|
+
|
|
27
|
+
if (await fs.pathExists(progressFile)) {
|
|
28
|
+
const progress = await fs.readJson(progressFile);
|
|
29
|
+
sessionDetails.push({
|
|
30
|
+
sessionId,
|
|
31
|
+
sessionPath,
|
|
32
|
+
progress
|
|
33
|
+
});
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
return sessionDetails;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
async getResumableSessions() {
|
|
41
|
+
const sessions = await this.listSessions();
|
|
42
|
+
return sessions.filter(s => s.progress.status === 'in-progress' && s.progress.canResume);
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
async promptToResume() {
|
|
46
|
+
const resumableSessions = await this.getResumableSessions();
|
|
47
|
+
|
|
48
|
+
if (resumableSessions.length === 0) {
|
|
49
|
+
return null; // No sessions to resume
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
console.log(chalk.cyan.bold('\nš Found Previous Interview Sessions\n'));
|
|
53
|
+
|
|
54
|
+
const choices = resumableSessions.map(s => {
|
|
55
|
+
const framework = s.sessionId.split('_').pop();
|
|
56
|
+
const date = new Date(s.progress.lastUpdated).toLocaleString();
|
|
57
|
+
const completion = Math.round((s.progress.completedBlocks.length / s.progress.totalBlocks) * 100);
|
|
58
|
+
|
|
59
|
+
return {
|
|
60
|
+
name: `${framework.toUpperCase()} | ${date} | ${completion}% complete (${s.progress.totalQuestionsAnswered} questions)`,
|
|
61
|
+
value: s.sessionId,
|
|
62
|
+
short: s.sessionId
|
|
63
|
+
};
|
|
64
|
+
});
|
|
65
|
+
|
|
66
|
+
choices.push({
|
|
67
|
+
name: chalk.gray('Start a new interview'),
|
|
68
|
+
value: 'new',
|
|
69
|
+
short: 'New interview'
|
|
70
|
+
});
|
|
71
|
+
|
|
72
|
+
const { choice } = await inquirer.prompt([
|
|
73
|
+
{
|
|
74
|
+
type: 'list',
|
|
75
|
+
name: 'choice',
|
|
76
|
+
message: 'Resume previous interview or start new?',
|
|
77
|
+
choices
|
|
78
|
+
}
|
|
79
|
+
]);
|
|
80
|
+
|
|
81
|
+
if (choice === 'new') {
|
|
82
|
+
return null;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
const session = resumableSessions.find(s => s.sessionId === choice);
|
|
86
|
+
return session;
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
async deleteSession(sessionId) {
|
|
90
|
+
const sessionPath = path.join(this.sessionsDir, sessionId);
|
|
91
|
+
await fs.remove(sessionPath);
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
async deleteAllSessions() {
|
|
95
|
+
await fs.remove(this.sessionsDir);
|
|
96
|
+
await fs.ensureDir(this.sessionsDir);
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
module.exports = SessionManager;
|
package/package.json
CHANGED
|
@@ -1,13 +1,14 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@iservu-inc/adf-cli",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.2.0",
|
|
4
4
|
"description": "CLI tool for AgentDevFramework - AI-assisted development framework",
|
|
5
5
|
"main": "index.js",
|
|
6
6
|
"bin": {
|
|
7
7
|
"adf": "bin/adf.js"
|
|
8
8
|
},
|
|
9
9
|
"scripts": {
|
|
10
|
-
"test": "
|
|
10
|
+
"test": "jest --coverage",
|
|
11
|
+
"test:watch": "jest --watch"
|
|
11
12
|
},
|
|
12
13
|
"keywords": [
|
|
13
14
|
"cli",
|
|
@@ -28,11 +29,11 @@
|
|
|
28
29
|
"author": "iServU",
|
|
29
30
|
"license": "MIT",
|
|
30
31
|
"dependencies": {
|
|
31
|
-
"inquirer": "^8.2.5",
|
|
32
32
|
"chalk": "^4.1.2",
|
|
33
|
-
"
|
|
33
|
+
"commander": "^11.1.0",
|
|
34
34
|
"fs-extra": "^11.2.0",
|
|
35
|
-
"
|
|
35
|
+
"inquirer": "^8.2.5",
|
|
36
|
+
"ora": "^5.4.1"
|
|
36
37
|
},
|
|
37
38
|
"engines": {
|
|
38
39
|
"node": ">=18.0.0",
|
|
@@ -48,5 +49,9 @@
|
|
|
48
49
|
"homepage": "https://github.com/iservu/adf-cli#readme",
|
|
49
50
|
"publishConfig": {
|
|
50
51
|
"access": "public"
|
|
52
|
+
},
|
|
53
|
+
"devDependencies": {
|
|
54
|
+
"@types/node": "^24.6.2",
|
|
55
|
+
"jest": "^30.2.0"
|
|
51
56
|
}
|
|
52
57
|
}
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
# Comprehensive Test Script for @iservu-inc/adf-cli
|
|
4
|
+
# Tests all major scenarios and validates outputs
|
|
5
|
+
|
|
6
|
+
set -e # Exit on error
|
|
7
|
+
|
|
8
|
+
echo "=================================="
|
|
9
|
+
echo "ADF CLI Comprehensive Test Suite"
|
|
10
|
+
echo "=================================="
|
|
11
|
+
echo ""
|
|
12
|
+
|
|
13
|
+
# Colors
|
|
14
|
+
GREEN='\033[0;32m'
|
|
15
|
+
RED='\033[0;31m'
|
|
16
|
+
YELLOW='\033[1;33m'
|
|
17
|
+
NC='\033[0m' # No Color
|
|
18
|
+
|
|
19
|
+
TEST_BASE_DIR="D:/Documents/GitHub/adf-cli-tests"
|
|
20
|
+
PASS_COUNT=0
|
|
21
|
+
FAIL_COUNT=0
|
|
22
|
+
|
|
23
|
+
# Cleanup function
|
|
24
|
+
cleanup() {
|
|
25
|
+
rm -rf "$TEST_BASE_DIR"
|
|
26
|
+
mkdir -p "$TEST_BASE_DIR"
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
# Test result function
|
|
30
|
+
test_result() {
|
|
31
|
+
if [ $1 -eq 0 ]; then
|
|
32
|
+
echo -e "${GREEN}ā PASS${NC}: $2"
|
|
33
|
+
((PASS_COUNT++))
|
|
34
|
+
else
|
|
35
|
+
echo -e "${RED}ā FAIL${NC}: $2"
|
|
36
|
+
((FAIL_COUNT++))
|
|
37
|
+
fi
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
# Setup
|
|
41
|
+
cleanup
|
|
42
|
+
|
|
43
|
+
echo "=== Test 1: Rapid Workflow with Tool ===="
|
|
44
|
+
cd "$TEST_BASE_DIR"
|
|
45
|
+
mkdir test-rapid && cd test-rapid
|
|
46
|
+
echo "# Test" > README.md
|
|
47
|
+
printf "n\nn\n" | adf init --rapid --tool windsurf > /dev/null 2>&1
|
|
48
|
+
test_result $? "Rapid workflow initialization"
|
|
49
|
+
[ -f ".adf/context.json" ] && test_result 0 "context.json created" || test_result 1 "context.json missing"
|
|
50
|
+
[ -f ".env.template" ] && test_result 0 ".env.template created" || test_result 1 ".env.template missing"
|
|
51
|
+
[ -f ".windsurfrules" ] && test_result 0 ".windsurfrules created" || test_result 1 ".windsurfrules missing"
|
|
52
|
+
[ -f ".framework/agents/dev.md" ] && test_result 0 "dev agent deployed" || test_result 1 "dev agent missing"
|
|
53
|
+
[ -f ".framework/agents/qa.md" ] && test_result 0 "qa agent deployed" || test_result 1 "qa agent missing"
|
|
54
|
+
WORKFLOW=$(cat .adf/context.json | grep -o '"workflow": "[^"]*"' | cut -d'"' -f4)
|
|
55
|
+
[ "$WORKFLOW" = "rapid" ] && test_result 0 "Workflow is rapid" || test_result 1 "Workflow incorrect: $WORKFLOW"
|
|
56
|
+
echo ""
|
|
57
|
+
|
|
58
|
+
echo "=== Test 2: Balanced Workflow ===="
|
|
59
|
+
cd "$TEST_BASE_DIR"
|
|
60
|
+
mkdir test-balanced && cd test-balanced
|
|
61
|
+
echo "# Test" > README.md
|
|
62
|
+
printf "n\nn\n" | adf init --balanced --tool cursor > /dev/null 2>&1
|
|
63
|
+
test_result $? "Balanced workflow initialization"
|
|
64
|
+
[ -f ".cursorrules" ] && test_result 0 ".cursorrules created" || test_result 1 ".cursorrules missing"
|
|
65
|
+
[ -f ".framework/agents/analyst.md" ] && test_result 0 "analyst agent deployed" || test_result 1 "analyst agent missing"
|
|
66
|
+
[ -f ".framework/agents/pm.md" ] && test_result 0 "pm agent deployed" || test_result 1 "pm agent missing"
|
|
67
|
+
WORKFLOW=$(cat .adf/context.json | grep -o '"workflow": "[^"]*"' | cut -d'"' -f4)
|
|
68
|
+
[ "$WORKFLOW" = "balanced" ] && test_result 0 "Workflow is balanced" || test_result 1 "Workflow incorrect: $WORKFLOW"
|
|
69
|
+
echo ""
|
|
70
|
+
|
|
71
|
+
echo "=== Test 3: Comprehensive Workflow ===="
|
|
72
|
+
cd "$TEST_BASE_DIR"
|
|
73
|
+
mkdir test-comprehensive && cd test-comprehensive
|
|
74
|
+
echo "# Test" > README.md
|
|
75
|
+
printf "n\nn\n" | adf init --comprehensive --tool vscode > /dev/null 2>&1
|
|
76
|
+
test_result $? "Comprehensive workflow initialization"
|
|
77
|
+
[ -d ".vscode" ] && test_result 0 ".vscode directory created" || test_result 1 ".vscode missing"
|
|
78
|
+
[ -f ".framework/agents/architect.md" ] && test_result 0 "architect agent deployed" || test_result 1 "architect agent missing"
|
|
79
|
+
[ -f ".framework/agents/sm.md" ] && test_result 0 "sm agent deployed" || test_result 1 "sm agent missing"
|
|
80
|
+
WORKFLOW=$(cat .adf/context.json | grep -o '"workflow": "[^"]*"' | cut -d'"' -f4)
|
|
81
|
+
[ "$WORKFLOW" = "comprehensive" ] && test_result 0 "Workflow is comprehensive" || test_result 1 "Workflow incorrect: $WORKFLOW"
|
|
82
|
+
echo ""
|
|
83
|
+
|
|
84
|
+
echo "=== Test 4: Deploy Command ===="
|
|
85
|
+
cd "$TEST_BASE_DIR/test-rapid"
|
|
86
|
+
adf deploy cursor > /dev/null 2>&1
|
|
87
|
+
test_result $? "Deploy to cursor"
|
|
88
|
+
[ -f ".cursorrules" ] && test_result 0 "cursor config created" || test_result 1 "cursor config missing"
|
|
89
|
+
echo ""
|
|
90
|
+
|
|
91
|
+
echo "=== Test 5: Deploy List ===="
|
|
92
|
+
OUTPUT=$(adf deploy --list 2>&1)
|
|
93
|
+
echo "$OUTPUT" | grep -q "windsurf" && test_result 0 "windsurf in deploy list" || test_result 1 "windsurf not found"
|
|
94
|
+
echo "$OUTPUT" | grep -q "cursor" && test_result 0 "cursor in deploy list" || test_result 1 "cursor not found"
|
|
95
|
+
echo "$OUTPUT" | grep -q "claude-code" && test_result 0 "claude-code in deploy list" || test_result 1 "claude-code not found"
|
|
96
|
+
echo ""
|
|
97
|
+
|
|
98
|
+
echo "=== Test 6: Version Check ===="
|
|
99
|
+
VERSION=$(adf --version 2>&1)
|
|
100
|
+
test_result $? "Version command works"
|
|
101
|
+
echo "Current version: $VERSION"
|
|
102
|
+
echo ""
|
|
103
|
+
|
|
104
|
+
echo "=== Test 7: Update Check ===="
|
|
105
|
+
adf update --check > /dev/null 2>&1
|
|
106
|
+
test_result $? "Update check command"
|
|
107
|
+
echo ""
|
|
108
|
+
|
|
109
|
+
echo "=== Test 8: Context.json Structure ===="
|
|
110
|
+
cd "$TEST_BASE_DIR/test-rapid"
|
|
111
|
+
cat .adf/context.json | grep -q '"version"' && test_result 0 "version field exists" || test_result 1 "version field missing"
|
|
112
|
+
cat .adf/context.json | grep -q '"workflow"' && test_result 0 "workflow field exists" || test_result 1 "workflow field missing"
|
|
113
|
+
cat .adf/context.json | grep -q '"documentationUrls"' && test_result 0 "documentationUrls field exists" || test_result 1 "documentationUrls field missing"
|
|
114
|
+
cat .adf/context.json | grep -q '"documentationFiles"' && test_result 0 "documentationFiles field exists" || test_result 1 "documentationFiles field missing"
|
|
115
|
+
cat .adf/context.json | grep -q '"agents"' && test_result 0 "agents field exists" || test_result 1 "agents field missing"
|
|
116
|
+
echo ""
|
|
117
|
+
|
|
118
|
+
echo "==================================="
|
|
119
|
+
echo "Test Summary"
|
|
120
|
+
echo "==================================="
|
|
121
|
+
echo -e "${GREEN}Passed:${NC} $PASS_COUNT"
|
|
122
|
+
echo -e "${RED}Failed:${NC} $FAIL_COUNT"
|
|
123
|
+
echo ""
|
|
124
|
+
|
|
125
|
+
# Cleanup
|
|
126
|
+
cleanup
|
|
127
|
+
|
|
128
|
+
if [ $FAIL_COUNT -eq 0 ]; then
|
|
129
|
+
echo -e "${GREEN}All tests passed!${NC}"
|
|
130
|
+
exit 0
|
|
131
|
+
else
|
|
132
|
+
echo -e "${RED}Some tests failed${NC}"
|
|
133
|
+
exit 1
|
|
134
|
+
fi
|
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
const AnswerQualityAnalyzer = require('../lib/frameworks/answer-quality-analyzer');
|
|
2
|
+
|
|
3
|
+
describe('AnswerQualityAnalyzer', () => {
|
|
4
|
+
describe('analyze', () => {
|
|
5
|
+
it('should score high-quality comprehensive answer highly', () => {
|
|
6
|
+
const question = {
|
|
7
|
+
keywords: ['react', 'typescript', 'web'],
|
|
8
|
+
requiredElements: ['platform', 'technology']
|
|
9
|
+
};
|
|
10
|
+
|
|
11
|
+
const answer = 'I am building a React 18 web dashboard using TypeScript and Node.js. ' +
|
|
12
|
+
'The platform is web-based, responsive design for desktop and mobile. ' +
|
|
13
|
+
'It will display real-time analytics data fetched from a PostgreSQL database. ' +
|
|
14
|
+
'Users can filter data by date range, export to CSV, and create custom views. ' +
|
|
15
|
+
'The tech stack includes Next.js 14 for the frontend, Express.js for the API, ' +
|
|
16
|
+
'and we will use Chart.js for data visualization. File structure will be ' +
|
|
17
|
+
'src/components/, src/api/, src/utils/. The main components are Dashboard, ' +
|
|
18
|
+
'DataTable, ChartView, and FilterPanel.';
|
|
19
|
+
|
|
20
|
+
const metrics = AnswerQualityAnalyzer.analyze(answer, question);
|
|
21
|
+
|
|
22
|
+
expect(metrics.qualityScore).toBeGreaterThan(80);
|
|
23
|
+
expect(metrics.isComprehensive).toBe(true);
|
|
24
|
+
expect(metrics.wordCount).toBeGreaterThan(50);
|
|
25
|
+
expect(metrics.hasKeywords.matched).toContain('react');
|
|
26
|
+
expect(metrics.hasKeywords.matched).toContain('typescript');
|
|
27
|
+
expect(metrics.hasRequiredElements.detected).toContain('platform');
|
|
28
|
+
expect(metrics.hasRequiredElements.detected).toContain('technology');
|
|
29
|
+
});
|
|
30
|
+
|
|
31
|
+
it('should score low-quality vague answer poorly', () => {
|
|
32
|
+
const question = {
|
|
33
|
+
keywords: ['react', 'typescript', 'nextjs'],
|
|
34
|
+
requiredElements: ['platform', 'technology']
|
|
35
|
+
};
|
|
36
|
+
|
|
37
|
+
const answer = 'An app';
|
|
38
|
+
|
|
39
|
+
const metrics = AnswerQualityAnalyzer.analyze(answer, question);
|
|
40
|
+
|
|
41
|
+
expect(metrics.qualityScore).toBeLessThan(30);
|
|
42
|
+
expect(metrics.isComprehensive).toBe(false);
|
|
43
|
+
expect(metrics.wordCount).toBeLessThan(5);
|
|
44
|
+
expect(metrics.hasKeywords.matched.length).toBe(0);
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
it('should detect bullet points and examples', () => {
|
|
48
|
+
const question = { keywords: [], requiredElements: [] };
|
|
49
|
+
|
|
50
|
+
const answer = 'The features include user authentication, dashboard, and export. ' +
|
|
51
|
+
'This allows for better security. ' +
|
|
52
|
+
'For example, users can click the export button to download CSV files.';
|
|
53
|
+
|
|
54
|
+
const metrics = AnswerQualityAnalyzer.analyze(answer, question);
|
|
55
|
+
|
|
56
|
+
expect(metrics.isDetailed.hasMultipleSentences).toBe(true);
|
|
57
|
+
expect(metrics.isDetailed.hasExamples).toBe(true);
|
|
58
|
+
});
|
|
59
|
+
|
|
60
|
+
it('should detect technical depth', () => {
|
|
61
|
+
const question = { keywords: [], requiredElements: [] };
|
|
62
|
+
|
|
63
|
+
const answer = 'Using React 18.2.0 with TypeScript. Will deploy to AWS using Docker containers. ' +
|
|
64
|
+
'Database is PostgreSQL 15. API endpoints follow REST conventions.';
|
|
65
|
+
|
|
66
|
+
const metrics = AnswerQualityAnalyzer.analyze(answer, question);
|
|
67
|
+
|
|
68
|
+
expect(metrics.hasTechnicalDepth.hasTechStack).toBe(true);
|
|
69
|
+
expect(metrics.hasTechnicalDepth.hasVersions).toBe(true);
|
|
70
|
+
expect(metrics.hasTechnicalDepth.hasSpecificTools).toBe(true);
|
|
71
|
+
});
|
|
72
|
+
|
|
73
|
+
it('should allow skipping follow-ups for excellent answers', () => {
|
|
74
|
+
const question = {
|
|
75
|
+
keywords: ['react', 'web', 'api'],
|
|
76
|
+
requiredElements: ['platform', 'technology', 'user-interaction']
|
|
77
|
+
};
|
|
78
|
+
|
|
79
|
+
const answer = 'Building a React web application with TypeScript and Next.js 14. ' +
|
|
80
|
+
'The platform is web-based, responsive design. Users can click buttons to submit forms, ' +
|
|
81
|
+
'search and filter data, and view real-time updates. Technology stack includes React 18, ' +
|
|
82
|
+
'TypeScript 5, Node.js 20, PostgreSQL 15, and Docker for deployment. ' +
|
|
83
|
+
'API endpoints: GET /api/users, POST /api/users, PUT /api/users/:id. ' +
|
|
84
|
+
'File structure: src/components/, src/pages/, src/api/, src/utils/. ' +
|
|
85
|
+
'Main components: UserList, UserForm, SearchBar, FilterPanel.';
|
|
86
|
+
|
|
87
|
+
const metrics = AnswerQualityAnalyzer.analyze(answer, question);
|
|
88
|
+
|
|
89
|
+
expect(metrics.canSkipFollowUps).toBe(true);
|
|
90
|
+
expect(metrics.qualityScore).toBeGreaterThanOrEqual(85);
|
|
91
|
+
});
|
|
92
|
+
});
|
|
93
|
+
|
|
94
|
+
describe('getFeedback', () => {
|
|
95
|
+
it('should return excellent feedback for score >= 90', () => {
|
|
96
|
+
const metrics = { qualityScore: 95 };
|
|
97
|
+
const feedback = AnswerQualityAnalyzer.getFeedback(metrics);
|
|
98
|
+
expect(feedback).toContain('Excellent');
|
|
99
|
+
});
|
|
100
|
+
|
|
101
|
+
it('should return good feedback for score >= 70', () => {
|
|
102
|
+
const metrics = { qualityScore: 75 };
|
|
103
|
+
const feedback = AnswerQualityAnalyzer.getFeedback(metrics);
|
|
104
|
+
expect(feedback).toContain('Great');
|
|
105
|
+
});
|
|
106
|
+
|
|
107
|
+
it('should return null for low scores', () => {
|
|
108
|
+
const metrics = { qualityScore: 45 };
|
|
109
|
+
const feedback = AnswerQualityAnalyzer.getFeedback(metrics);
|
|
110
|
+
expect(feedback).toBeNull();
|
|
111
|
+
});
|
|
112
|
+
});
|
|
113
|
+
|
|
114
|
+
describe('getWordCount', () => {
|
|
115
|
+
it('should count words correctly', () => {
|
|
116
|
+
expect(AnswerQualityAnalyzer.getWordCount('Hello world')).toBe(2);
|
|
117
|
+
expect(AnswerQualityAnalyzer.getWordCount('This is a test sentence.')).toBe(5);
|
|
118
|
+
expect(AnswerQualityAnalyzer.getWordCount(' Multiple spaces here ')).toBe(3);
|
|
119
|
+
});
|
|
120
|
+
});
|
|
121
|
+
|
|
122
|
+
describe('checkKeywords', () => {
|
|
123
|
+
it('should match keywords case-insensitively', () => {
|
|
124
|
+
const result = AnswerQualityAnalyzer.checkKeywords(
|
|
125
|
+
'Using React and TypeScript',
|
|
126
|
+
['react', 'typescript', 'node']
|
|
127
|
+
);
|
|
128
|
+
|
|
129
|
+
expect(result.matched).toContain('react');
|
|
130
|
+
expect(result.matched).toContain('typescript');
|
|
131
|
+
expect(result.count).toBe(2);
|
|
132
|
+
expect(result.total).toBe(3);
|
|
133
|
+
});
|
|
134
|
+
});
|
|
135
|
+
|
|
136
|
+
describe('checkRequiredElements', () => {
|
|
137
|
+
it('should detect platform element', () => {
|
|
138
|
+
const result = AnswerQualityAnalyzer.checkRequiredElements(
|
|
139
|
+
'This is a web application for mobile users',
|
|
140
|
+
['platform']
|
|
141
|
+
);
|
|
142
|
+
|
|
143
|
+
expect(result.detected).toContain('platform');
|
|
144
|
+
});
|
|
145
|
+
|
|
146
|
+
it('should detect technology element', () => {
|
|
147
|
+
const result = AnswerQualityAnalyzer.checkRequiredElements(
|
|
148
|
+
'Built with React and Node.js',
|
|
149
|
+
['technology']
|
|
150
|
+
);
|
|
151
|
+
|
|
152
|
+
expect(result.detected).toContain('technology');
|
|
153
|
+
});
|
|
154
|
+
|
|
155
|
+
it('should detect API endpoints', () => {
|
|
156
|
+
const result = AnswerQualityAnalyzer.checkRequiredElements(
|
|
157
|
+
'API has GET /api/users and POST /api/users endpoints',
|
|
158
|
+
['api-endpoints']
|
|
159
|
+
);
|
|
160
|
+
|
|
161
|
+
expect(result.detected).toContain('api-endpoints');
|
|
162
|
+
});
|
|
163
|
+
|
|
164
|
+
it('should detect file paths', () => {
|
|
165
|
+
const result = AnswerQualityAnalyzer.checkRequiredElements(
|
|
166
|
+
'Files are in src/components/ and app/utils/helper.ts',
|
|
167
|
+
['file-paths']
|
|
168
|
+
);
|
|
169
|
+
|
|
170
|
+
expect(result.detected).toContain('file-paths');
|
|
171
|
+
});
|
|
172
|
+
});
|
|
173
|
+
});
|
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
const fs = require('fs-extra');
|
|
2
|
+
const path = require('path');
|
|
3
|
+
const ProgressTracker = require('../lib/frameworks/progress-tracker');
|
|
4
|
+
|
|
5
|
+
const TEST_SESSION_PATH = path.join(__dirname, 'test-session');
|
|
6
|
+
|
|
7
|
+
describe('ProgressTracker', () => {
|
|
8
|
+
beforeEach(async () => {
|
|
9
|
+
// Clean up test session directory
|
|
10
|
+
await fs.remove(TEST_SESSION_PATH);
|
|
11
|
+
await fs.ensureDir(TEST_SESSION_PATH);
|
|
12
|
+
});
|
|
13
|
+
|
|
14
|
+
afterEach(async () => {
|
|
15
|
+
// Clean up after tests
|
|
16
|
+
await fs.remove(TEST_SESSION_PATH);
|
|
17
|
+
});
|
|
18
|
+
|
|
19
|
+
describe('initialize', () => {
|
|
20
|
+
it('should create new progress file for new session', async () => {
|
|
21
|
+
const tracker = new ProgressTracker(TEST_SESSION_PATH, 5, 'rapid');
|
|
22
|
+
const isResumable = await tracker.initialize();
|
|
23
|
+
|
|
24
|
+
expect(isResumable).toBe(false);
|
|
25
|
+
expect(await fs.pathExists(path.join(TEST_SESSION_PATH, '_progress.json'))).toBe(true);
|
|
26
|
+
expect(await fs.pathExists(path.join(TEST_SESSION_PATH, '_progress-log.md'))).toBe(true);
|
|
27
|
+
});
|
|
28
|
+
|
|
29
|
+
it('should load existing progress for resumable session', async () => {
|
|
30
|
+
// Create initial session
|
|
31
|
+
const tracker1 = new ProgressTracker(TEST_SESSION_PATH, 5, 'rapid');
|
|
32
|
+
await tracker1.initialize();
|
|
33
|
+
await tracker1.startBlock(1, 'Test Block');
|
|
34
|
+
|
|
35
|
+
// Resume session
|
|
36
|
+
const tracker2 = new ProgressTracker(TEST_SESSION_PATH, 5, 'rapid');
|
|
37
|
+
const isResumable = await tracker2.initialize();
|
|
38
|
+
|
|
39
|
+
expect(isResumable).toBe(true);
|
|
40
|
+
expect(tracker2.getProgress().currentBlock).toBe(1);
|
|
41
|
+
});
|
|
42
|
+
});
|
|
43
|
+
|
|
44
|
+
describe('answerQuestion', () => {
|
|
45
|
+
it('should save answer with quality metrics', async () => {
|
|
46
|
+
const tracker = new ProgressTracker(TEST_SESSION_PATH, 5, 'rapid');
|
|
47
|
+
await tracker.initialize();
|
|
48
|
+
|
|
49
|
+
const qualityMetrics = {
|
|
50
|
+
wordCount: 50,
|
|
51
|
+
qualityScore: 85,
|
|
52
|
+
isComprehensive: true,
|
|
53
|
+
hasKeywords: { matched: ['react', 'web'], count: 2 },
|
|
54
|
+
hasRequiredElements: { detected: ['platform'], count: 1 }
|
|
55
|
+
};
|
|
56
|
+
|
|
57
|
+
await tracker.answerQuestion('q1', 'What are you building?', 'A React web app', qualityMetrics);
|
|
58
|
+
|
|
59
|
+
const progress = tracker.getProgress();
|
|
60
|
+
expect(progress.answers['q1']).toBeDefined();
|
|
61
|
+
expect(progress.answers['q1'].text).toBe('A React web app');
|
|
62
|
+
expect(progress.answers['q1'].quality.qualityScore).toBe(85);
|
|
63
|
+
expect(progress.totalWordCount).toBe(50);
|
|
64
|
+
expect(progress.comprehensiveAnswers).toBe(1);
|
|
65
|
+
});
|
|
66
|
+
|
|
67
|
+
it('should calculate average answer quality correctly', async () => {
|
|
68
|
+
const tracker = new ProgressTracker(TEST_SESSION_PATH, 5, 'rapid');
|
|
69
|
+
await tracker.initialize();
|
|
70
|
+
|
|
71
|
+
await tracker.answerQuestion('q1', 'Q1', 'Answer 1', { qualityScore: 80, wordCount: 20, isComprehensive: true });
|
|
72
|
+
await tracker.answerQuestion('q2', 'Q2', 'Answer 2', { qualityScore: 90, wordCount: 30, isComprehensive: true });
|
|
73
|
+
await tracker.answerQuestion('q3', 'Q3', 'Answer 3', { qualityScore: 70, wordCount: 25, isComprehensive: true });
|
|
74
|
+
|
|
75
|
+
const progress = tracker.getProgress();
|
|
76
|
+
expect(progress.averageAnswerQuality).toBe(80); // (80+90+70)/3 = 80
|
|
77
|
+
});
|
|
78
|
+
|
|
79
|
+
it('should calculate information richness based on quality and completion', async () => {
|
|
80
|
+
const tracker = new ProgressTracker(TEST_SESSION_PATH, 5, 'rapid');
|
|
81
|
+
await tracker.initialize();
|
|
82
|
+
|
|
83
|
+
// Complete 2 of 5 blocks (40% completion)
|
|
84
|
+
await tracker.completeBlock(1, 'Block 1', 3);
|
|
85
|
+
await tracker.completeBlock(2, 'Block 2', 2);
|
|
86
|
+
|
|
87
|
+
// Add high-quality answers (90% quality)
|
|
88
|
+
await tracker.answerQuestion('q1', 'Q1', 'A1', { qualityScore: 90, wordCount: 50, isComprehensive: true });
|
|
89
|
+
await tracker.answerQuestion('q2', 'Q2', 'A2', { qualityScore: 90, wordCount: 50, isComprehensive: true });
|
|
90
|
+
|
|
91
|
+
const progress = tracker.getProgress();
|
|
92
|
+
// informationRichness = (0.4 * completionFactor) + (0.6 * qualityFactor)
|
|
93
|
+
// = (0.4 * 0.4) + (0.6 * 0.9) = 0.16 + 0.54 = 0.70 = 70%
|
|
94
|
+
expect(progress.informationRichness).toBeCloseTo(70, 0);
|
|
95
|
+
});
|
|
96
|
+
});
|
|
97
|
+
|
|
98
|
+
describe('saveWithBackup', () => {
|
|
99
|
+
it('should create triple-redundant saves', async () => {
|
|
100
|
+
const tracker = new ProgressTracker(TEST_SESSION_PATH, 5, 'rapid');
|
|
101
|
+
await tracker.initialize();
|
|
102
|
+
|
|
103
|
+
await tracker.answerQuestion('q1', 'Q1', 'Answer', { qualityScore: 75, wordCount: 20, isComprehensive: true });
|
|
104
|
+
|
|
105
|
+
// Check all three save locations exist
|
|
106
|
+
expect(await fs.pathExists(path.join(TEST_SESSION_PATH, '_progress.json'))).toBe(true);
|
|
107
|
+
expect(await fs.pathExists(path.join(TEST_SESSION_PATH, '_progress.backup.json'))).toBe(true);
|
|
108
|
+
expect(await fs.pathExists(path.join(TEST_SESSION_PATH, '_progress-log.md'))).toBe(true);
|
|
109
|
+
|
|
110
|
+
// Verify main and backup have same content
|
|
111
|
+
const main = await fs.readJson(path.join(TEST_SESSION_PATH, '_progress.json'));
|
|
112
|
+
const backup = await fs.readJson(path.join(TEST_SESSION_PATH, '_progress.backup.json'));
|
|
113
|
+
expect(main).toEqual(backup);
|
|
114
|
+
});
|
|
115
|
+
|
|
116
|
+
it('should create emergency save on error', async () => {
|
|
117
|
+
const tracker = new ProgressTracker(TEST_SESSION_PATH, 5, 'rapid');
|
|
118
|
+
await tracker.initialize();
|
|
119
|
+
|
|
120
|
+
// Make main file read-only to simulate save error
|
|
121
|
+
const mainFile = path.join(TEST_SESSION_PATH, '_progress.json');
|
|
122
|
+
await fs.chmod(mainFile, 0o444);
|
|
123
|
+
|
|
124
|
+
try {
|
|
125
|
+
await tracker.answerQuestion('q1', 'Q1', 'Answer', { qualityScore: 75, wordCount: 20, isComprehensive: true });
|
|
126
|
+
} catch (error) {
|
|
127
|
+
// May throw, but should still create emergency file
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
// Check for emergency file
|
|
131
|
+
const files = await fs.readdir(TEST_SESSION_PATH);
|
|
132
|
+
const emergencyFiles = files.filter(f => f.startsWith('_emergency-'));
|
|
133
|
+
|
|
134
|
+
// Restore permissions for cleanup
|
|
135
|
+
await fs.chmod(mainFile, 0o666);
|
|
136
|
+
|
|
137
|
+
// Emergency file should exist if save failed
|
|
138
|
+
expect(emergencyFiles.length).toBeGreaterThan(0);
|
|
139
|
+
});
|
|
140
|
+
});
|
|
141
|
+
|
|
142
|
+
describe('block tracking', () => {
|
|
143
|
+
it('should track block start, complete, and skip', async () => {
|
|
144
|
+
const tracker = new ProgressTracker(TEST_SESSION_PATH, 5, 'rapid');
|
|
145
|
+
await tracker.initialize();
|
|
146
|
+
|
|
147
|
+
await tracker.startBlock(1, 'Block 1');
|
|
148
|
+
await tracker.completeBlock(1, 'Block 1', 3);
|
|
149
|
+
await tracker.skipBlock(2, 'Block 2');
|
|
150
|
+
|
|
151
|
+
const progress = tracker.getProgress();
|
|
152
|
+
expect(progress.currentBlock).toBe(1);
|
|
153
|
+
expect(progress.completedBlocks.length).toBe(1);
|
|
154
|
+
expect(progress.completedBlocks[0].number).toBe(1);
|
|
155
|
+
expect(progress.completedBlocks[0].questionsAnswered).toBe(3);
|
|
156
|
+
expect(progress.skippedBlocks.length).toBe(1);
|
|
157
|
+
expect(progress.skippedBlocks[0].number).toBe(2);
|
|
158
|
+
});
|
|
159
|
+
});
|
|
160
|
+
|
|
161
|
+
describe('complete', () => {
|
|
162
|
+
it('should mark session as completed', async () => {
|
|
163
|
+
const tracker = new ProgressTracker(TEST_SESSION_PATH, 5, 'rapid');
|
|
164
|
+
await tracker.initialize();
|
|
165
|
+
|
|
166
|
+
await tracker.complete();
|
|
167
|
+
|
|
168
|
+
const progress = tracker.getProgress();
|
|
169
|
+
expect(progress.status).toBe('completed');
|
|
170
|
+
expect(progress.completedAt).toBeDefined();
|
|
171
|
+
expect(progress.canResume).toBe(false);
|
|
172
|
+
});
|
|
173
|
+
});
|
|
174
|
+
|
|
175
|
+
describe('canResume', () => {
|
|
176
|
+
it('should return true for in-progress sessions', async () => {
|
|
177
|
+
const tracker = new ProgressTracker(TEST_SESSION_PATH, 5, 'rapid');
|
|
178
|
+
await tracker.initialize();
|
|
179
|
+
|
|
180
|
+
expect(tracker.canResume()).toBe(true);
|
|
181
|
+
});
|
|
182
|
+
|
|
183
|
+
it('should return false for completed sessions', async () => {
|
|
184
|
+
const tracker = new ProgressTracker(TEST_SESSION_PATH, 5, 'rapid');
|
|
185
|
+
await tracker.initialize();
|
|
186
|
+
await tracker.complete();
|
|
187
|
+
|
|
188
|
+
expect(tracker.canResume()).toBe(false);
|
|
189
|
+
});
|
|
190
|
+
});
|
|
191
|
+
|
|
192
|
+
describe('getResumeInfo', () => {
|
|
193
|
+
it('should return resume information', async () => {
|
|
194
|
+
const tracker = new ProgressTracker(TEST_SESSION_PATH, 5, 'rapid');
|
|
195
|
+
await tracker.initialize();
|
|
196
|
+
await tracker.completeBlock(1, 'Block 1', 3); // This adds 3 to totalQuestionsAnswered
|
|
197
|
+
await tracker.answerQuestion('q1', 'Q1', 'A1', { qualityScore: 80, wordCount: 20, isComprehensive: true });
|
|
198
|
+
|
|
199
|
+
const info = tracker.getResumeInfo();
|
|
200
|
+
expect(info.completedBlocks).toBe(1);
|
|
201
|
+
expect(info.totalBlocks).toBe(5);
|
|
202
|
+
expect(info.totalQuestionsAnswered).toBe(3); // completeBlock added 3
|
|
203
|
+
});
|
|
204
|
+
});
|
|
205
|
+
});
|