fraim-framework 2.0.37 → 2.0.41
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +8 -0
- package/dist/src/ai-manager/ai-manager.js +162 -0
- package/dist/src/cli/commands/init-project.js +74 -0
- package/dist/src/cli/commands/setup.js +176 -0
- package/dist/src/cli/commands/test-mcp.js +135 -0
- package/dist/src/cli/fraim.js +6 -0
- package/dist/src/cli/setup/auto-mcp-setup.js +367 -0
- package/dist/src/cli/setup/ide-detector.js +165 -0
- package/dist/src/cli/setup/mcp-config-generator.js +144 -0
- package/dist/src/cli/setup/token-validator.js +49 -0
- package/dist/src/fraim-mcp-server.js +198 -0
- package/dist/tests/debug-tools.js +2 -2
- package/dist/tests/shared-server-utils.js +57 -0
- package/dist/tests/test-ai-manager.js +113 -0
- package/dist/tests/test-client-scripts-validation.js +27 -5
- package/dist/tests/test-complete-setup-flow.js +110 -0
- package/dist/tests/test-ide-detector.js +46 -0
- package/dist/tests/test-improved-setup.js +121 -0
- package/dist/tests/test-mcp-config-generator.js +99 -0
- package/dist/tests/test-mcp-connection.js +58 -117
- package/dist/tests/test-mcp-issue-integration.js +2 -2
- package/dist/tests/test-mcp-lifecycle-methods.js +34 -100
- package/dist/tests/test-mcp-shared-server.js +308 -0
- package/dist/tests/test-package-size.js +21 -8
- package/dist/tests/test-script-location-independence.js +39 -62
- package/dist/tests/test-server-utils.js +32 -0
- package/dist/tests/test-session-rehydration.js +2 -2
- package/dist/tests/test-setup-integration.js +98 -0
- package/dist/tests/test-standalone.js +2 -2
- package/dist/tests/test-stub-registry.js +23 -7
- package/dist/tests/test-telemetry.js +2 -2
- package/dist/tests/test-token-validator.js +30 -0
- package/dist/tests/test-user-journey.js +2 -1
- package/package.json +3 -2
- package/registry/scripts/code-quality-check.sh +566 -559
- package/registry/scripts/prep-issue.sh +7 -0
- package/registry/scripts/verify-pr-comments.sh +74 -70
- /package/registry/stubs/workflows/{convert-to-pdf.md → marketing/convert-to-pdf.md} +0 -0
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.isValidTokenFormat = exports.validateGitHubToken = exports.validateFraimKey = void 0;
|
|
7
|
+
const chalk_1 = __importDefault(require("chalk"));
|
|
8
|
+
const validateFraimKey = async (key) => {
|
|
9
|
+
// Basic format validation
|
|
10
|
+
if (!key || !key.startsWith('fraim_')) {
|
|
11
|
+
return false;
|
|
12
|
+
}
|
|
13
|
+
// TODO: Add actual API validation when FRAIM server is available
|
|
14
|
+
// For now, just validate format
|
|
15
|
+
return key.length > 15; // More reasonable minimum length
|
|
16
|
+
};
|
|
17
|
+
exports.validateFraimKey = validateFraimKey;
|
|
18
|
+
const validateGitHubToken = async (token) => {
|
|
19
|
+
if (!token)
|
|
20
|
+
return false;
|
|
21
|
+
// Validate token format
|
|
22
|
+
if (!token.startsWith('ghp_') && !token.startsWith('github_pat_')) {
|
|
23
|
+
return false;
|
|
24
|
+
}
|
|
25
|
+
try {
|
|
26
|
+
const response = await fetch('https://api.github.com/user', {
|
|
27
|
+
headers: {
|
|
28
|
+
Authorization: `Bearer ${token}`,
|
|
29
|
+
'User-Agent': 'FRAIM-Setup/1.0'
|
|
30
|
+
}
|
|
31
|
+
});
|
|
32
|
+
return response.ok;
|
|
33
|
+
}
|
|
34
|
+
catch (error) {
|
|
35
|
+
console.log(chalk_1.default.yellow('⚠️ Could not validate GitHub token (network issue), proceeding anyway'));
|
|
36
|
+
return true; // Assume valid if network issues
|
|
37
|
+
}
|
|
38
|
+
};
|
|
39
|
+
exports.validateGitHubToken = validateGitHubToken;
|
|
40
|
+
const isValidTokenFormat = (token, type) => {
|
|
41
|
+
if (type === 'fraim') {
|
|
42
|
+
return token.startsWith('fraim_') && token.length > 15; // More reasonable minimum length
|
|
43
|
+
}
|
|
44
|
+
if (type === 'github') {
|
|
45
|
+
return token.startsWith('ghp_') || token.startsWith('github_pat_');
|
|
46
|
+
}
|
|
47
|
+
return false;
|
|
48
|
+
};
|
|
49
|
+
exports.isValidTokenFormat = isValidTokenFormat;
|
|
@@ -45,6 +45,7 @@ const path_1 = require("path");
|
|
|
45
45
|
const git_utils_1 = require("./utils/git-utils");
|
|
46
46
|
const config_loader_1 = require("./fraim/config-loader");
|
|
47
47
|
const db_service_1 = require("./fraim/db-service");
|
|
48
|
+
const ai_manager_1 = require("./ai-manager/ai-manager");
|
|
48
49
|
const issues_1 = require("./fraim/issues");
|
|
49
50
|
const crypto_1 = require("crypto");
|
|
50
51
|
const dotenv = __importStar(require("dotenv"));
|
|
@@ -169,6 +170,7 @@ class FraimMCPServer {
|
|
|
169
170
|
// Initialize database service
|
|
170
171
|
this.dbService = new db_service_1.FraimDbService();
|
|
171
172
|
this.sessionManager = new SessionManager(this.dbService);
|
|
173
|
+
this.aiManager = new ai_manager_1.AIManager();
|
|
172
174
|
// Load FRAIM configuration
|
|
173
175
|
this.config = (0, config_loader_1.loadFraimConfig)();
|
|
174
176
|
// Find registry directory (check dist first for production, then source)
|
|
@@ -979,6 +981,98 @@ Supports dry-run mode to preview the operation.`,
|
|
|
979
981
|
},
|
|
980
982
|
required: ['title', 'body']
|
|
981
983
|
}
|
|
984
|
+
},
|
|
985
|
+
{
|
|
986
|
+
name: 'ai_manager_request_review',
|
|
987
|
+
description: `Request self-review instructions from AI Manager after completing workflow phase.
|
|
988
|
+
|
|
989
|
+
The AI Manager provides detailed review instructions including:
|
|
990
|
+
- Step-by-step validation criteria
|
|
991
|
+
- Pass/fail criteria for each step
|
|
992
|
+
- Commands to run for verification
|
|
993
|
+
- Grading guidelines and reporting format
|
|
994
|
+
|
|
995
|
+
Use this when you believe your work is complete and ready for review.
|
|
996
|
+
Currently supports: spec phase (more phases coming soon).`,
|
|
997
|
+
inputSchema: {
|
|
998
|
+
type: 'object',
|
|
999
|
+
properties: {
|
|
1000
|
+
workflowType: {
|
|
1001
|
+
type: 'string',
|
|
1002
|
+
description: 'Type of workflow phase completed',
|
|
1003
|
+
enum: ['spec', 'design', 'implement', 'test']
|
|
1004
|
+
},
|
|
1005
|
+
issueNumber: {
|
|
1006
|
+
type: 'string',
|
|
1007
|
+
description: 'Issue number being worked on'
|
|
1008
|
+
},
|
|
1009
|
+
phase: {
|
|
1010
|
+
type: 'string',
|
|
1011
|
+
description: 'Specific phase name (e.g., "specification", "implementation")'
|
|
1012
|
+
}
|
|
1013
|
+
},
|
|
1014
|
+
required: ['workflowType', 'issueNumber', 'phase']
|
|
1015
|
+
}
|
|
1016
|
+
},
|
|
1017
|
+
{
|
|
1018
|
+
name: 'ai_manager_report_grade',
|
|
1019
|
+
description: `Report your self-assessment to AI Manager after completing self-review.
|
|
1020
|
+
|
|
1021
|
+
Provide results from review in JSON format:
|
|
1022
|
+
- pass: true/false based on review assessment
|
|
1023
|
+
- reasons: array of failure reasons (only if pass=false)
|
|
1024
|
+
- iterationCount: REQUIRED - your current iteration number for this phase
|
|
1025
|
+
|
|
1026
|
+
AI Manager will evaluate your report and provide next steps:
|
|
1027
|
+
- PROCEED: Ready to submit PR for human review
|
|
1028
|
+
- ITERATE: Fix issues and retry (max 3 iterations)
|
|
1029
|
+
- ESCALATE: Max iterations reached, escalate to human review
|
|
1030
|
+
|
|
1031
|
+
IMPORTANT: Track your iteration count per workflow phase. Each phase (spec, implement, test) has its own counter.
|
|
1032
|
+
Maximum 3 iterations per phase before automatic escalation to human review.`,
|
|
1033
|
+
inputSchema: {
|
|
1034
|
+
type: 'object',
|
|
1035
|
+
properties: {
|
|
1036
|
+
workflowType: {
|
|
1037
|
+
type: 'string',
|
|
1038
|
+
description: 'Type of workflow phase',
|
|
1039
|
+
enum: ['spec', 'design', 'implement', 'test']
|
|
1040
|
+
},
|
|
1041
|
+
issueNumber: {
|
|
1042
|
+
type: 'string',
|
|
1043
|
+
description: 'Issue number being worked on'
|
|
1044
|
+
},
|
|
1045
|
+
phase: {
|
|
1046
|
+
type: 'string',
|
|
1047
|
+
description: 'Specific phase name'
|
|
1048
|
+
},
|
|
1049
|
+
report: {
|
|
1050
|
+
type: 'object',
|
|
1051
|
+
description: 'Your self-assessment report in JSON format',
|
|
1052
|
+
properties: {
|
|
1053
|
+
pass: {
|
|
1054
|
+
type: 'boolean',
|
|
1055
|
+
description: 'Whether your work passes all validation criteria'
|
|
1056
|
+
},
|
|
1057
|
+
reasons: {
|
|
1058
|
+
type: 'array',
|
|
1059
|
+
description: 'Array of specific reasons for failure (only required if pass=false)',
|
|
1060
|
+
items: {
|
|
1061
|
+
type: 'string'
|
|
1062
|
+
}
|
|
1063
|
+
},
|
|
1064
|
+
iterationCount: {
|
|
1065
|
+
type: 'number',
|
|
1066
|
+
description: 'REQUIRED: Your current iteration number for this workflow phase (1, 2, or 3)',
|
|
1067
|
+
minimum: 1,
|
|
1068
|
+
maximum: 3
|
|
1069
|
+
}
|
|
1070
|
+
},
|
|
1071
|
+
required: ['pass', 'iterationCount']
|
|
1072
|
+
}
|
|
1073
|
+
},
|
|
1074
|
+
required: ['workflowType', 'issueNumber', 'phase', 'report']
|
|
1075
|
+
}
|
|
982
1076
|
}
|
|
983
1077
|
]
|
|
984
1078
|
};
|
|
@@ -1016,6 +1110,10 @@ Supports dry-run mode to preview the operation.`,
|
|
|
1016
1110
|
};
|
|
1017
1111
|
case 'fraim_connect':
|
|
1018
1112
|
return await this.handleFraimConnect(toolArgs, context.apiKey, context.userId);
|
|
1113
|
+
case 'ai_manager_request_review':
|
|
1114
|
+
return await this.handleAIManagerRequestReview(toolArgs);
|
|
1115
|
+
case 'ai_manager_report_grade':
|
|
1116
|
+
return await this.handleAIManagerReportGrade(toolArgs);
|
|
1019
1117
|
default:
|
|
1020
1118
|
throw new Error(`Unknown tool: ${toolName} `);
|
|
1021
1119
|
}
|
|
@@ -1450,6 +1548,106 @@ If \`.fraim/config.json\` doesn't exist:
|
|
|
1450
1548
|
sessionId: sessionId
|
|
1451
1549
|
};
|
|
1452
1550
|
}
|
|
1551
|
+
async handleAIManagerRequestReview(args) {
|
|
1552
|
+
try {
|
|
1553
|
+
console.log(`🤖 AI Manager: Generating review instructions for ${args.workflowType} phase`);
|
|
1554
|
+
const instructions = this.aiManager.generateReviewInstructions({
|
|
1555
|
+
workflowType: args.workflowType,
|
|
1556
|
+
issueNumber: args.issueNumber,
|
|
1557
|
+
phase: args.phase
|
|
1558
|
+
});
|
|
1559
|
+
return {
|
|
1560
|
+
content: [{
|
|
1561
|
+
type: 'text',
|
|
1562
|
+
text: instructions
|
|
1563
|
+
}]
|
|
1564
|
+
};
|
|
1565
|
+
}
|
|
1566
|
+
catch (error) {
|
|
1567
|
+
console.error('❌ AI Manager request review failed:', error);
|
|
1568
|
+
return {
|
|
1569
|
+
content: [{
|
|
1570
|
+
type: 'text',
|
|
1571
|
+
text: `# ❌ AI Manager Request Failed\n\n**Error**: ${error instanceof Error ? error.message : 'Unknown error'}\n\nPlease check your request parameters and try again.`
|
|
1572
|
+
}],
|
|
1573
|
+
error: error instanceof Error ? error.message : 'Unknown error'
|
|
1574
|
+
};
|
|
1575
|
+
}
|
|
1576
|
+
}
|
|
1577
|
+
async handleAIManagerReportGrade(args) {
|
|
1578
|
+
try {
|
|
1579
|
+
console.log(`🤖 AI Manager: Evaluating review report for ${args.workflowType} phase`);
|
|
1580
|
+
const decision = this.aiManager.evaluateReport(args.report, {
|
|
1581
|
+
workflowType: args.workflowType,
|
|
1582
|
+
issueNumber: args.issueNumber,
|
|
1583
|
+
phase: args.phase
|
|
1584
|
+
});
|
|
1585
|
+
// Format response for agent
|
|
1586
|
+
let response = `# 🤖 AI Manager Evaluation Result\n\n`;
|
|
1587
|
+
const actionEmoji = decision.action === 'PROCEED' ? '✅' :
|
|
1588
|
+
decision.action === 'ESCALATE' ? '⚠️' : '🔄';
|
|
1589
|
+
response += `## Decision: ${actionEmoji} ${decision.action}\n\n`;
|
|
1590
|
+
response += `**Message**: ${decision.message}\n\n`;
|
|
1591
|
+
if (decision.iterationCount) {
|
|
1592
|
+
response += `**Iteration Count**: ${decision.iterationCount}/3\n\n`;
|
|
1593
|
+
}
|
|
1594
|
+
response += `## Next Steps\n\n`;
|
|
1595
|
+
decision.nextSteps.forEach((step, index) => {
|
|
1596
|
+
response += `${index + 1}. ${step}\n`;
|
|
1597
|
+
});
|
|
1598
|
+
response += `\n`;
|
|
1599
|
+
if (decision.action === 'PROCEED') {
|
|
1600
|
+
response += `## 🎉 Ready for Human Review!\n\n`;
|
|
1601
|
+
response += `Your work has passed AI Manager validation. You should now:\n\n`;
|
|
1602
|
+
response += `1. **Submit PR** with complete evidence document\n`;
|
|
1603
|
+
response += `2. **Update issue labels** to status:needs-review\n`;
|
|
1604
|
+
response += `3. **Include this AI Manager validation** in your evidence\n\n`;
|
|
1605
|
+
}
|
|
1606
|
+
else if (decision.action === 'ESCALATE') {
|
|
1607
|
+
response += `## ⚠️ Escalated to Human Review\n\n`;
|
|
1608
|
+
response += `Maximum iterations reached. Your work will be reviewed by a human despite validation failures:\n\n`;
|
|
1609
|
+
response += `1. **Submit PR** with detailed iteration history\n`;
|
|
1610
|
+
response += `2. **Add escalation label** (ai-manager:max-iterations)\n`;
|
|
1611
|
+
response += `3. **Document all attempts** and failure reasons in evidence\n`;
|
|
1612
|
+
response += `4. **Human reviewer** will focus on recurring validation issues\n\n`;
|
|
1613
|
+
}
|
|
1614
|
+
else {
|
|
1615
|
+
response += `## 🔧 Work Required\n\n`;
|
|
1616
|
+
response += `Your work needs improvement. You should:\n\n`;
|
|
1617
|
+
response += `1. **Address all failure reasons** listed below\n`;
|
|
1618
|
+
response += `2. **Re-run validation steps** to verify fixes\n`;
|
|
1619
|
+
response += `3. **Request new review** using ai_manager_request_review\n`;
|
|
1620
|
+
response += `4. **Include iterationCount: ${(decision.iterationCount || 1) + 1}** in next report\n`;
|
|
1621
|
+
response += `5. **Do NOT submit PR** until review passes or escalates\n\n`;
|
|
1622
|
+
}
|
|
1623
|
+
response += `## Your Report Summary\n\n`;
|
|
1624
|
+
response += `**Pass**: ${args.report.pass}\n`;
|
|
1625
|
+
response += `**Iteration**: ${args.report.iterationCount || 1}/3\n`;
|
|
1626
|
+
if (args.report.reasons && args.report.reasons.length > 0) {
|
|
1627
|
+
response += `**Failure Reasons**:\n`;
|
|
1628
|
+
args.report.reasons.forEach((reason) => {
|
|
1629
|
+
response += `- ${reason}\n`;
|
|
1630
|
+
});
|
|
1631
|
+
}
|
|
1632
|
+
return {
|
|
1633
|
+
content: [{
|
|
1634
|
+
type: 'text',
|
|
1635
|
+
text: response
|
|
1636
|
+
}],
|
|
1637
|
+
decision: decision
|
|
1638
|
+
};
|
|
1639
|
+
}
|
|
1640
|
+
catch (error) {
|
|
1641
|
+
console.error('❌ AI Manager report evaluation failed:', error);
|
|
1642
|
+
return {
|
|
1643
|
+
content: [{
|
|
1644
|
+
type: 'text',
|
|
1645
|
+
text: `# ❌ AI Manager Evaluation Failed\n\n**Error**: ${error instanceof Error ? error.message : 'Unknown error'}\n\nPlease check your report format and try again.`
|
|
1646
|
+
}],
|
|
1647
|
+
error: error instanceof Error ? error.message : 'Unknown error'
|
|
1648
|
+
};
|
|
1649
|
+
}
|
|
1650
|
+
}
|
|
1453
1651
|
async start(port = 3002) {
|
|
1454
1652
|
try {
|
|
1455
1653
|
// Connect to database before starting server
|
|
@@ -8,7 +8,7 @@ const axios_1 = __importDefault(require("axios"));
|
|
|
8
8
|
const db_service_js_1 = require("../src/fraim/db-service.js");
|
|
9
9
|
const test_utils_1 = require("./test-utils");
|
|
10
10
|
const tree_kill_1 = __importDefault(require("tree-kill"));
|
|
11
|
-
const
|
|
11
|
+
const test_server_utils_1 = require("./test-server-utils");
|
|
12
12
|
async function debugListTools() {
|
|
13
13
|
console.log(' 🔍 Debugging Available Tools...');
|
|
14
14
|
let fraimProcess;
|
|
@@ -24,7 +24,7 @@ async function debugListTools() {
|
|
|
24
24
|
await db.collection('fraim_api_keys').updateOne({ key: TEST_API_KEY }, { $set: { userId: 'debug@test.com', orgId: 'debug-org', isActive: true, createdAt: new Date() } }, { upsert: true });
|
|
25
25
|
// 2. Start server
|
|
26
26
|
const npxCommand = process.platform === 'win32' ? 'npx.cmd' : 'npx';
|
|
27
|
-
const serverScript =
|
|
27
|
+
const serverScript = (0, test_server_utils_1.getServerScriptPath)();
|
|
28
28
|
fraimProcess = (0, node_child_process_1.spawn)(npxCommand, ['node', `"${serverScript}"`], {
|
|
29
29
|
env: { ...process.env, FRAIM_MCP_PORT: PORT.toString(), FRAIM_SKIP_INDEX_ON_START: 'true' },
|
|
30
30
|
shell: true
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Shared server utilities for tests
|
|
4
|
+
* All tests should use these utilities instead of starting their own servers
|
|
5
|
+
*/
|
|
6
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
7
|
+
exports.getTestServerUrl = getTestServerUrl;
|
|
8
|
+
exports.getTestServerPort = getTestServerPort;
|
|
9
|
+
exports.getMcpEndpoint = getMcpEndpoint;
|
|
10
|
+
exports.getHealthEndpoint = getHealthEndpoint;
|
|
11
|
+
exports.isServerRunning = isServerRunning;
|
|
12
|
+
exports.waitForServer = waitForServer;
|
|
13
|
+
const SERVER_URL = process.env.FRAIM_TEST_SERVER_URL || 'http://localhost:12999';
|
|
14
|
+
const SERVER_PORT = parseInt(process.env.FRAIM_TEST_SERVER_PORT || '12999');
|
|
15
|
+
function getTestServerUrl() {
|
|
16
|
+
return SERVER_URL;
|
|
17
|
+
}
|
|
18
|
+
function getTestServerPort() {
|
|
19
|
+
return SERVER_PORT;
|
|
20
|
+
}
|
|
21
|
+
function getMcpEndpoint() {
|
|
22
|
+
return `${SERVER_URL}/mcp`;
|
|
23
|
+
}
|
|
24
|
+
function getHealthEndpoint() {
|
|
25
|
+
return `${SERVER_URL}/health`;
|
|
26
|
+
}
|
|
27
|
+
/**
|
|
28
|
+
* Check if the shared test server is running
|
|
29
|
+
*/
|
|
30
|
+
async function isServerRunning() {
|
|
31
|
+
try {
|
|
32
|
+
const axios = require('axios');
|
|
33
|
+
await axios.get(getHealthEndpoint(), { timeout: 1000 });
|
|
34
|
+
return true;
|
|
35
|
+
}
|
|
36
|
+
catch {
|
|
37
|
+
return false;
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
/**
|
|
41
|
+
* Wait for the shared server to be ready
|
|
42
|
+
* This is useful for tests that run early in the suite
|
|
43
|
+
*/
|
|
44
|
+
async function waitForServer(timeoutMs = 10000) {
|
|
45
|
+
const axios = require('axios');
|
|
46
|
+
const startTime = Date.now();
|
|
47
|
+
while (Date.now() - startTime < timeoutMs) {
|
|
48
|
+
try {
|
|
49
|
+
await axios.get(getHealthEndpoint(), { timeout: 1000 });
|
|
50
|
+
return;
|
|
51
|
+
}
|
|
52
|
+
catch {
|
|
53
|
+
await new Promise(resolve => setTimeout(resolve, 500));
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
throw new Error(`Shared test server not ready after ${timeoutMs}ms`);
|
|
57
|
+
}
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Tests for AI Manager - Simplified Review System
|
|
4
|
+
*/
|
|
5
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
6
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
7
|
+
};
|
|
8
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
9
|
+
const node_test_1 = require("node:test");
|
|
10
|
+
const node_assert_1 = __importDefault(require("node:assert"));
|
|
11
|
+
const ai_manager_1 = require("../src/ai-manager/ai-manager");
|
|
12
|
+
(0, node_test_1.describe)('AI Manager', () => {
|
|
13
|
+
let aiManager;
|
|
14
|
+
(0, node_test_1.beforeEach)(() => {
|
|
15
|
+
aiManager = new ai_manager_1.AIManager();
|
|
16
|
+
});
|
|
17
|
+
(0, node_test_1.describe)('generateReviewInstructions', () => {
|
|
18
|
+
(0, node_test_1.test)('should generate spec workflow instructions', () => {
|
|
19
|
+
const context = {
|
|
20
|
+
workflowType: 'spec',
|
|
21
|
+
issueNumber: '123',
|
|
22
|
+
phase: 'specification'
|
|
23
|
+
};
|
|
24
|
+
const instructions = aiManager.generateReviewInstructions(context);
|
|
25
|
+
(0, node_assert_1.default)(typeof instructions === 'string');
|
|
26
|
+
(0, node_assert_1.default)(instructions.includes('AI Manager Review Instructions'));
|
|
27
|
+
(0, node_assert_1.default)(instructions.includes('spec'));
|
|
28
|
+
(0, node_assert_1.default)(instructions.includes('123'));
|
|
29
|
+
(0, node_assert_1.default)(instructions.includes('iterationCount'));
|
|
30
|
+
(0, node_assert_1.default)(instructions.includes('Maximum 3 iterations'));
|
|
31
|
+
});
|
|
32
|
+
(0, node_test_1.test)('should throw error for unknown workflow type', () => {
|
|
33
|
+
const context = {
|
|
34
|
+
workflowType: 'unknown',
|
|
35
|
+
issueNumber: '123',
|
|
36
|
+
phase: 'test'
|
|
37
|
+
};
|
|
38
|
+
node_assert_1.default.throws(() => {
|
|
39
|
+
aiManager.generateReviewInstructions(context);
|
|
40
|
+
}, /No rules found for workflow type: unknown/);
|
|
41
|
+
});
|
|
42
|
+
});
|
|
43
|
+
(0, node_test_1.describe)('evaluateReport', () => {
|
|
44
|
+
(0, node_test_1.test)('should return PROCEED for passing report', () => {
|
|
45
|
+
const report = {
|
|
46
|
+
pass: true,
|
|
47
|
+
iterationCount: 1
|
|
48
|
+
};
|
|
49
|
+
const context = {
|
|
50
|
+
workflowType: 'spec',
|
|
51
|
+
issueNumber: '123',
|
|
52
|
+
phase: 'specification'
|
|
53
|
+
};
|
|
54
|
+
const decision = aiManager.evaluateReport(report, context);
|
|
55
|
+
node_assert_1.default.strictEqual(decision.action, 'PROCEED');
|
|
56
|
+
(0, node_assert_1.default)(decision.message.includes('Ready to submit PR'));
|
|
57
|
+
(0, node_assert_1.default)(decision.nextSteps.length > 0);
|
|
58
|
+
node_assert_1.default.strictEqual(decision.iterationCount, 1);
|
|
59
|
+
});
|
|
60
|
+
(0, node_test_1.test)('should return ITERATE for failing report within iteration limit', () => {
|
|
61
|
+
const report = {
|
|
62
|
+
pass: false,
|
|
63
|
+
reasons: ['Missing spec document', 'Template not followed'],
|
|
64
|
+
iterationCount: 2
|
|
65
|
+
};
|
|
66
|
+
const context = {
|
|
67
|
+
workflowType: 'spec',
|
|
68
|
+
issueNumber: '123',
|
|
69
|
+
phase: 'specification'
|
|
70
|
+
};
|
|
71
|
+
const decision = aiManager.evaluateReport(report, context);
|
|
72
|
+
node_assert_1.default.strictEqual(decision.action, 'ITERATE');
|
|
73
|
+
(0, node_assert_1.default)(decision.message.includes('Address the identified issues'));
|
|
74
|
+
(0, node_assert_1.default)(decision.message.includes('Iteration 2/3'));
|
|
75
|
+
(0, node_assert_1.default)(decision.nextSteps.length > 0);
|
|
76
|
+
node_assert_1.default.strictEqual(decision.iterationCount, 2);
|
|
77
|
+
});
|
|
78
|
+
(0, node_test_1.test)('should return ESCALATE when max iterations reached', () => {
|
|
79
|
+
const report = {
|
|
80
|
+
pass: false,
|
|
81
|
+
reasons: ['Still missing spec document', 'Template still not followed'],
|
|
82
|
+
iterationCount: 3
|
|
83
|
+
};
|
|
84
|
+
const context = {
|
|
85
|
+
workflowType: 'spec',
|
|
86
|
+
issueNumber: '123',
|
|
87
|
+
phase: 'specification'
|
|
88
|
+
};
|
|
89
|
+
const decision = aiManager.evaluateReport(report, context);
|
|
90
|
+
node_assert_1.default.strictEqual(decision.action, 'ESCALATE');
|
|
91
|
+
(0, node_assert_1.default)(decision.message.includes('Maximum iterations'));
|
|
92
|
+
(0, node_assert_1.default)(decision.message.includes('Escalating to human review'));
|
|
93
|
+
(0, node_assert_1.default)(decision.nextSteps.length > 0);
|
|
94
|
+
node_assert_1.default.strictEqual(decision.iterationCount, 3);
|
|
95
|
+
node_assert_1.default.strictEqual(decision.maxIterationsReached, true);
|
|
96
|
+
});
|
|
97
|
+
(0, node_test_1.test)('should default to iteration 1 if not provided', () => {
|
|
98
|
+
const report = {
|
|
99
|
+
pass: false,
|
|
100
|
+
reasons: ['Missing spec document']
|
|
101
|
+
// iterationCount not provided
|
|
102
|
+
};
|
|
103
|
+
const context = {
|
|
104
|
+
workflowType: 'spec',
|
|
105
|
+
issueNumber: '123',
|
|
106
|
+
phase: 'specification'
|
|
107
|
+
};
|
|
108
|
+
const decision = aiManager.evaluateReport(report, context);
|
|
109
|
+
node_assert_1.default.strictEqual(decision.action, 'ITERATE');
|
|
110
|
+
node_assert_1.default.strictEqual(decision.iterationCount, 1);
|
|
111
|
+
});
|
|
112
|
+
});
|
|
113
|
+
});
|
|
@@ -8,10 +8,28 @@ const child_process_1 = require("child_process");
|
|
|
8
8
|
const fs_1 = require("fs");
|
|
9
9
|
const path_1 = require("path");
|
|
10
10
|
const node_assert_1 = __importDefault(require("node:assert"));
|
|
11
|
+
const path_2 = __importDefault(require("path"));
|
|
12
|
+
/**
|
|
13
|
+
* Client-side script validation tests
|
|
14
|
+
* Ensures scripts can run without FRAIM internal imports
|
|
15
|
+
* Following the project's standard test structure from test-utils.ts
|
|
16
|
+
*/
|
|
17
|
+
// Find project root by looking for package.json
|
|
18
|
+
function findProjectRoot() {
|
|
19
|
+
let currentDir = __dirname;
|
|
20
|
+
while (currentDir !== path_2.default.dirname(currentDir)) {
|
|
21
|
+
if ((0, fs_1.existsSync)(path_2.default.join(currentDir, 'package.json'))) {
|
|
22
|
+
return currentDir;
|
|
23
|
+
}
|
|
24
|
+
currentDir = path_2.default.dirname(currentDir);
|
|
25
|
+
}
|
|
26
|
+
throw new Error('Could not find project root (package.json not found)');
|
|
27
|
+
}
|
|
11
28
|
async function testNoFraimInternalImports() {
|
|
12
29
|
console.log(' 🧪 Testing registry scripts do not import FRAIM internals...');
|
|
13
30
|
try {
|
|
14
|
-
const
|
|
31
|
+
const projectRoot = findProjectRoot();
|
|
32
|
+
const scriptsDir = (0, path_1.join)(projectRoot, 'registry', 'scripts');
|
|
15
33
|
const scriptFiles = ['cleanup-branch.ts', 'generate-engagement-emails.ts', 'newsletter-helpers.ts'];
|
|
16
34
|
for (const scriptFile of scriptFiles) {
|
|
17
35
|
const scriptPath = (0, path_1.join)(scriptsDir, scriptFile);
|
|
@@ -35,7 +53,8 @@ async function testNoFraimInternalImports() {
|
|
|
35
53
|
async function testInlineUtilityFunctions() {
|
|
36
54
|
console.log(' 🧪 Testing scripts have inline utility functions...');
|
|
37
55
|
try {
|
|
38
|
-
const
|
|
56
|
+
const projectRoot = findProjectRoot();
|
|
57
|
+
const scriptsDir = (0, path_1.join)(projectRoot, 'registry', 'scripts');
|
|
39
58
|
const scriptPath = (0, path_1.join)(scriptsDir, 'cleanup-branch.ts');
|
|
40
59
|
if ((0, fs_1.existsSync)(scriptPath)) {
|
|
41
60
|
const content = (0, fs_1.readFileSync)(scriptPath, 'utf-8');
|
|
@@ -54,7 +73,8 @@ async function testInlineUtilityFunctions() {
|
|
|
54
73
|
async function testConfigLoadingFromJson() {
|
|
55
74
|
console.log(' 🧪 Testing scripts load config from .fraim/config.json...');
|
|
56
75
|
try {
|
|
57
|
-
const
|
|
76
|
+
const projectRoot = findProjectRoot();
|
|
77
|
+
const scriptsDir = (0, path_1.join)(projectRoot, 'registry', 'scripts');
|
|
58
78
|
const scriptPath = (0, path_1.join)(scriptsDir, 'generate-engagement-emails.ts');
|
|
59
79
|
if ((0, fs_1.existsSync)(scriptPath)) {
|
|
60
80
|
const content = (0, fs_1.readFileSync)(scriptPath, 'utf-8');
|
|
@@ -73,7 +93,8 @@ async function testConfigLoadingFromJson() {
|
|
|
73
93
|
async function testRegistryPathValidator() {
|
|
74
94
|
console.log(' 🧪 Testing registry path validator passes...');
|
|
75
95
|
try {
|
|
76
|
-
|
|
96
|
+
const projectRoot = findProjectRoot();
|
|
97
|
+
(0, child_process_1.execSync)('npm run validate:registry', { stdio: 'pipe', cwd: projectRoot });
|
|
77
98
|
return true;
|
|
78
99
|
}
|
|
79
100
|
catch (error) {
|
|
@@ -84,7 +105,8 @@ async function testRegistryPathValidator() {
|
|
|
84
105
|
async function testScriptExecutability() {
|
|
85
106
|
console.log(' 🧪 Testing scripts are executable without import errors...');
|
|
86
107
|
try {
|
|
87
|
-
const
|
|
108
|
+
const projectRoot = findProjectRoot();
|
|
109
|
+
const scriptsDir = (0, path_1.join)(projectRoot, 'registry', 'scripts');
|
|
88
110
|
const scriptPath = (0, path_1.join)(scriptsDir, 'cleanup-branch.ts');
|
|
89
111
|
if ((0, fs_1.existsSync)(scriptPath)) {
|
|
90
112
|
// Test that the script can be parsed without import errors
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
const node_test_1 = require("node:test");
|
|
7
|
+
const node_assert_1 = __importDefault(require("node:assert"));
|
|
8
|
+
const fs_1 = __importDefault(require("fs"));
|
|
9
|
+
const path_1 = __importDefault(require("path"));
|
|
10
|
+
const os_1 = __importDefault(require("os"));
|
|
11
|
+
const ide_detector_1 = require("../src/cli/setup/ide-detector");
|
|
12
|
+
const mcp_config_generator_1 = require("../src/cli/setup/mcp-config-generator");
|
|
13
|
+
const token_validator_1 = require("../src/cli/setup/token-validator");
|
|
14
|
+
(0, node_test_1.test)('Complete setup flow - end to end simulation', async () => {
|
|
15
|
+
const testFraimKey = 'fraim_test123456789012345';
|
|
16
|
+
const testGithubToken = 'ghp_test123456789012345';
|
|
17
|
+
// Step 1: Validate tokens (as setup command would)
|
|
18
|
+
(0, node_assert_1.default)((0, token_validator_1.isValidTokenFormat)(testFraimKey, 'fraim'), 'FRAIM key should be valid format');
|
|
19
|
+
(0, node_assert_1.default)((0, token_validator_1.isValidTokenFormat)(testGithubToken, 'github'), 'GitHub token should be valid format');
|
|
20
|
+
(0, node_assert_1.default)(await (0, token_validator_1.validateFraimKey)(testFraimKey), 'FRAIM key should validate');
|
|
21
|
+
// Step 2: Detect IDEs (as setup command would)
|
|
22
|
+
const detectedIDEs = (0, ide_detector_1.detectInstalledIDEs)();
|
|
23
|
+
console.log(`Detected ${detectedIDEs.length} IDEs in test environment`);
|
|
24
|
+
// Step 3: Generate configs for each detected IDE type
|
|
25
|
+
const configTypes = ['standard', 'kiro', 'codex', 'windsurf'];
|
|
26
|
+
for (const configType of configTypes) {
|
|
27
|
+
const config = (0, mcp_config_generator_1.generateMCPConfig)(configType, testFraimKey, testGithubToken);
|
|
28
|
+
if (configType === 'codex') {
|
|
29
|
+
// TOML config
|
|
30
|
+
(0, node_assert_1.default)(typeof config === 'string', `${configType} should return string`);
|
|
31
|
+
(0, node_assert_1.default)(config.includes(testFraimKey), `${configType} should contain FRAIM key`);
|
|
32
|
+
}
|
|
33
|
+
else {
|
|
34
|
+
// JSON configs
|
|
35
|
+
(0, node_assert_1.default)(typeof config === 'object', `${configType} should return object`);
|
|
36
|
+
(0, node_assert_1.default)(config.mcpServers, `${configType} should have mcpServers`);
|
|
37
|
+
(0, node_assert_1.default)(config.mcpServers.fraim, `${configType} should have fraim server`);
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
// Step 4: Simulate global config creation
|
|
41
|
+
const testGlobalDir = path_1.default.join(os_1.default.tmpdir(), 'fraim-global-test-' + Date.now());
|
|
42
|
+
fs_1.default.mkdirSync(testGlobalDir, { recursive: true });
|
|
43
|
+
const globalConfig = {
|
|
44
|
+
version: '2.0.37',
|
|
45
|
+
apiKey: testFraimKey,
|
|
46
|
+
configuredAt: new Date().toISOString(),
|
|
47
|
+
userPreferences: {
|
|
48
|
+
autoSync: true,
|
|
49
|
+
backupConfigs: true
|
|
50
|
+
}
|
|
51
|
+
};
|
|
52
|
+
const globalConfigPath = path_1.default.join(testGlobalDir, 'config.json');
|
|
53
|
+
fs_1.default.writeFileSync(globalConfigPath, JSON.stringify(globalConfig, null, 2));
|
|
54
|
+
(0, node_assert_1.default)(fs_1.default.existsSync(globalConfigPath), 'Global config should be created');
|
|
55
|
+
// Step 5: Simulate project config creation (as init-project would)
|
|
56
|
+
const testProjectDir = path_1.default.join(os_1.default.tmpdir(), 'fraim-project-test-' + Date.now());
|
|
57
|
+
fs_1.default.mkdirSync(testProjectDir, { recursive: true });
|
|
58
|
+
const fraimProjectDir = path_1.default.join(testProjectDir, '.fraim');
|
|
59
|
+
fs_1.default.mkdirSync(fraimProjectDir, { recursive: true });
|
|
60
|
+
const projectConfig = {
|
|
61
|
+
version: '2.0.37',
|
|
62
|
+
project: {
|
|
63
|
+
name: 'test-project'
|
|
64
|
+
},
|
|
65
|
+
git: {
|
|
66
|
+
defaultBranch: 'master',
|
|
67
|
+
repoOwner: 'test-owner',
|
|
68
|
+
repoName: 'test-project'
|
|
69
|
+
}
|
|
70
|
+
};
|
|
71
|
+
const projectConfigPath = path_1.default.join(fraimProjectDir, 'config.json');
|
|
72
|
+
fs_1.default.writeFileSync(projectConfigPath, JSON.stringify(projectConfig, null, 2));
|
|
73
|
+
(0, node_assert_1.default)(fs_1.default.existsSync(projectConfigPath), 'Project config should be created');
|
|
74
|
+
// Step 6: Verify configs can be read back correctly
|
|
75
|
+
const savedGlobalConfig = JSON.parse(fs_1.default.readFileSync(globalConfigPath, 'utf8'));
|
|
76
|
+
const savedProjectConfig = JSON.parse(fs_1.default.readFileSync(projectConfigPath, 'utf8'));
|
|
77
|
+
(0, node_assert_1.default)(savedGlobalConfig.apiKey === testFraimKey, 'Global config should contain API key');
|
|
78
|
+
(0, node_assert_1.default)(savedProjectConfig.project.name === 'test-project', 'Project config should contain project name');
|
|
79
|
+
// Cleanup
|
|
80
|
+
fs_1.default.rmSync(testGlobalDir, { recursive: true, force: true });
|
|
81
|
+
fs_1.default.rmSync(testProjectDir, { recursive: true, force: true });
|
|
82
|
+
console.log('✅ Complete setup flow simulation passed');
|
|
83
|
+
});
|
|
84
|
+
(0, node_test_1.test)('Error handling - invalid tokens should be rejected', async () => {
|
|
85
|
+
const invalidFraimKey = 'invalid_key';
|
|
86
|
+
const invalidGithubToken = 'invalid_token';
|
|
87
|
+
const shortFraimKey = 'fraim_short';
|
|
88
|
+
(0, node_assert_1.default)(!(0, token_validator_1.isValidTokenFormat)(invalidFraimKey, 'fraim'), 'Invalid FRAIM key should be rejected');
|
|
89
|
+
(0, node_assert_1.default)(!(0, token_validator_1.isValidTokenFormat)(invalidGithubToken, 'github'), 'Invalid GitHub token should be rejected');
|
|
90
|
+
(0, node_assert_1.default)(!(0, token_validator_1.isValidTokenFormat)(shortFraimKey, 'fraim'), 'Short FRAIM key should be rejected');
|
|
91
|
+
(0, node_assert_1.default)(!await (0, token_validator_1.validateFraimKey)(invalidFraimKey), 'Invalid FRAIM key should not validate');
|
|
92
|
+
(0, node_assert_1.default)(!await (0, token_validator_1.validateFraimKey)(shortFraimKey), 'Short FRAIM key should not validate');
|
|
93
|
+
console.log('✅ Error handling tests passed');
|
|
94
|
+
});
|
|
95
|
+
(0, node_test_1.test)('Path expansion works correctly across platforms', () => {
|
|
96
|
+
const testPaths = [
|
|
97
|
+
'~/.fraim/config.json',
|
|
98
|
+
'~/.claude.json',
|
|
99
|
+
'~/.kiro/settings/mcp.json'
|
|
100
|
+
];
|
|
101
|
+
testPaths.forEach(testPath => {
|
|
102
|
+
const expanded = (0, ide_detector_1.expandPath)(testPath);
|
|
103
|
+
(0, node_assert_1.default)(expanded.includes(os_1.default.homedir()), `${testPath} should expand to include home directory`);
|
|
104
|
+
(0, node_assert_1.default)(!expanded.includes('~'), `${testPath} should not contain tilde after expansion`);
|
|
105
|
+
});
|
|
106
|
+
// Test absolute paths remain unchanged
|
|
107
|
+
const absolutePath = '/usr/local/bin/fraim';
|
|
108
|
+
(0, node_assert_1.default)((0, ide_detector_1.expandPath)(absolutePath) === absolutePath, 'Absolute paths should remain unchanged');
|
|
109
|
+
console.log('✅ Path expansion tests passed');
|
|
110
|
+
});
|