jstar-reviewer 2.0.3 ā 2.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/jstar.js +34 -10
- package/dist/scripts/config.js +23 -0
- package/dist/scripts/dashboard.js +217 -0
- package/dist/scripts/detective.js +150 -0
- package/dist/scripts/gemini-embedding.js +95 -0
- package/dist/scripts/indexer.js +126 -0
- package/dist/scripts/local-embedding.js +49 -0
- package/dist/scripts/mock-llm.js +22 -0
- package/dist/scripts/reviewer.js +287 -0
- package/dist/scripts/types.js +14 -0
- package/package.json +3 -1
package/bin/jstar.js
CHANGED
|
@@ -70,27 +70,51 @@ function commandExists(cmd) {
|
|
|
70
70
|
}
|
|
71
71
|
|
|
72
72
|
function runScript(scriptName) {
|
|
73
|
+
// Look for transpiled files in dist/
|
|
74
|
+
// .ts becomes .js in dist
|
|
75
|
+
const jsName = scriptName.replace('.ts', '.js');
|
|
76
|
+
const distDir = path.join(__dirname, '..', 'dist', 'scripts');
|
|
77
|
+
const jsPath = path.join(distDir, jsName);
|
|
78
|
+
|
|
79
|
+
// Fallback for local development if dist doesn't exist
|
|
73
80
|
const scriptsDir = path.join(__dirname, '..', 'scripts');
|
|
74
|
-
const
|
|
81
|
+
const tsPath = path.join(scriptsDir, scriptName);
|
|
82
|
+
|
|
83
|
+
const isWin = process.platform === 'win32';
|
|
75
84
|
|
|
76
|
-
if (
|
|
85
|
+
if (fs.existsSync(jsPath)) {
|
|
86
|
+
log(`${COLORS.dim}Running ${jsName}...${COLORS.reset}`);
|
|
87
|
+
const child = spawn('node', [jsPath, ...process.argv.slice(3)], {
|
|
88
|
+
cwd: process.cwd(),
|
|
89
|
+
stdio: 'inherit',
|
|
90
|
+
shell: isWin,
|
|
91
|
+
env: {
|
|
92
|
+
...process.env,
|
|
93
|
+
JSTAR_CWD: process.cwd()
|
|
94
|
+
}
|
|
95
|
+
});
|
|
96
|
+
|
|
97
|
+
child.on('close', (code) => process.exit(code || 0));
|
|
98
|
+
child.on('error', (err) => {
|
|
99
|
+
log(`${COLORS.red}Error running script: ${err.message}${COLORS.reset}`);
|
|
100
|
+
process.exit(1);
|
|
101
|
+
});
|
|
102
|
+
return;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
if (!fs.existsSync(tsPath)) {
|
|
77
106
|
log(`${COLORS.red}Error: Script not found: ${scriptPath}${COLORS.reset}`);
|
|
78
107
|
process.exit(1);
|
|
79
108
|
}
|
|
80
109
|
|
|
81
|
-
//
|
|
110
|
+
// Fallback to ts-node if dist is not built (mostly for local development)
|
|
82
111
|
const hasPnpm = commandExists('pnpm');
|
|
83
112
|
const runner = hasPnpm ? 'pnpm' : 'npx';
|
|
84
113
|
const runnerArgs = hasPnpm ? ['dlx', 'ts-node'] : ['ts-node'];
|
|
85
114
|
|
|
86
|
-
log(`${COLORS.dim}Using ${runner} to run ${scriptName}...${COLORS.reset}`);
|
|
87
|
-
|
|
88
|
-
// On Windows, global commands like pnpm/npx are .cmd files.
|
|
89
|
-
// spawn without shell: true often fails with EINVAL or ENOENT on Windows.
|
|
90
|
-
// We use shell: true only on Windows for reliability.
|
|
91
|
-
const isWin = process.platform === 'win32';
|
|
115
|
+
log(`${COLORS.dim}Using ${runner} (fallback) to run ${scriptName}...${COLORS.reset}`);
|
|
92
116
|
|
|
93
|
-
const child = spawn(runner, [...runnerArgs,
|
|
117
|
+
const child = spawn(runner, [...runnerArgs, tsPath, ...process.argv.slice(3)], {
|
|
94
118
|
cwd: process.cwd(),
|
|
95
119
|
stdio: 'inherit',
|
|
96
120
|
shell: isWin,
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.Config = void 0;
|
|
7
|
+
const dotenv_1 = __importDefault(require("dotenv"));
|
|
8
|
+
// Load .env.local first, then .env
|
|
9
|
+
dotenv_1.default.config({ path: ".env.local" });
|
|
10
|
+
dotenv_1.default.config();
|
|
11
|
+
/**
|
|
12
|
+
* Default fallback values.
|
|
13
|
+
* These are intentional fallbacks when environment variables are not configured.
|
|
14
|
+
* Override via REVIEW_MODEL_NAME env var for production use.
|
|
15
|
+
*/
|
|
16
|
+
const DEFAULT_MODEL = "moonshotai/kimi-k2-instruct-0905";
|
|
17
|
+
exports.Config = {
|
|
18
|
+
MODEL_NAME: process.env.REVIEW_MODEL_NAME || DEFAULT_MODEL,
|
|
19
|
+
DEFAULT_SEVERITY: 'P2_MEDIUM',
|
|
20
|
+
THRESHOLDS: {
|
|
21
|
+
MEDIUM: 5
|
|
22
|
+
}
|
|
23
|
+
};
|
|
@@ -0,0 +1,217 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* J-Star Dashboard Renderer
|
|
4
|
+
* Generates professional markdown dashboard from review findings
|
|
5
|
+
*/
|
|
6
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
7
|
+
exports.renderDashboard = renderDashboard;
|
|
8
|
+
exports.determineStatus = determineStatus;
|
|
9
|
+
exports.generateRecommendation = generateRecommendation;
|
|
10
|
+
const config_1 = require("./config");
|
|
11
|
+
const SEVERITY_EMOJI = {
|
|
12
|
+
'P0_CRITICAL': 'š',
|
|
13
|
+
'P1_HIGH': 'ā ļø',
|
|
14
|
+
'P2_MEDIUM': 'š',
|
|
15
|
+
'LGTM': 'ā
'
|
|
16
|
+
};
|
|
17
|
+
const SEVERITY_LABEL = {
|
|
18
|
+
'P0_CRITICAL': 'CRITICAL',
|
|
19
|
+
'P1_HIGH': 'HIGH',
|
|
20
|
+
'P2_MEDIUM': 'MEDIUM',
|
|
21
|
+
'LGTM': 'PASSED'
|
|
22
|
+
};
|
|
23
|
+
function getStatusEmoji(status) {
|
|
24
|
+
switch (status) {
|
|
25
|
+
case 'CRITICAL_FAILURE': return 'š“';
|
|
26
|
+
case 'NEEDS_REVIEW': return 'š”';
|
|
27
|
+
case 'APPROVED': return 'š¢';
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
function formatDate() {
|
|
31
|
+
return new Date().toISOString().split('T')[0];
|
|
32
|
+
}
|
|
33
|
+
/**
|
|
34
|
+
* Renders a single issue row for the markdown table.
|
|
35
|
+
* Includes fallback handling for unexpected severity values to ensure
|
|
36
|
+
* the dashboard renders gracefully even if parsing produced invalid data.
|
|
37
|
+
*/
|
|
38
|
+
function renderIssueRow(file, issue, severity) {
|
|
39
|
+
// Fallback to 'ā' and raw severity if the value is not in our known maps
|
|
40
|
+
const emoji = SEVERITY_EMOJI[severity] ?? 'ā';
|
|
41
|
+
const label = SEVERITY_LABEL[severity] ?? severity;
|
|
42
|
+
return `| \`${file}\` | ${emoji} **${label}** | ${issue.title} |`;
|
|
43
|
+
}
|
|
44
|
+
function renderFixPrompt(issue) {
|
|
45
|
+
if (!issue.fixPrompt)
|
|
46
|
+
return '';
|
|
47
|
+
return `
|
|
48
|
+
<details>
|
|
49
|
+
<summary>š¤ <strong>Fix Prompt:</strong> ${issue.title}</summary>
|
|
50
|
+
|
|
51
|
+
\`\`\`
|
|
52
|
+
${issue.fixPrompt}
|
|
53
|
+
\`\`\`
|
|
54
|
+
|
|
55
|
+
</details>
|
|
56
|
+
`;
|
|
57
|
+
}
|
|
58
|
+
/**
|
|
59
|
+
* Validates that the report object has the required structure.
|
|
60
|
+
* This guards against runtime errors from malformed LLM responses or corrupted data.
|
|
61
|
+
*/
|
|
62
|
+
function validateReport(report) {
|
|
63
|
+
if (!report || typeof report !== 'object')
|
|
64
|
+
return false;
|
|
65
|
+
const r = report;
|
|
66
|
+
// Check required fields exist
|
|
67
|
+
if (typeof r.status !== 'string')
|
|
68
|
+
return false;
|
|
69
|
+
if (typeof r.recommendedAction !== 'string')
|
|
70
|
+
return false;
|
|
71
|
+
if (!Array.isArray(r.findings))
|
|
72
|
+
return false;
|
|
73
|
+
if (!r.metrics || typeof r.metrics !== 'object')
|
|
74
|
+
return false;
|
|
75
|
+
// Validate metrics structure
|
|
76
|
+
const m = r.metrics;
|
|
77
|
+
const requiredMetrics = ['filesScanned', 'totalTokens', 'violations', 'critical', 'high', 'medium', 'lgtm'];
|
|
78
|
+
for (const key of requiredMetrics) {
|
|
79
|
+
if (typeof m[key] !== 'number')
|
|
80
|
+
return false;
|
|
81
|
+
}
|
|
82
|
+
return true;
|
|
83
|
+
}
|
|
84
|
+
function renderDashboard(report) {
|
|
85
|
+
// Runtime validation to catch malformed reports early
|
|
86
|
+
if (!validateReport(report)) {
|
|
87
|
+
throw new Error('Invalid DashboardReport: missing or malformed required fields');
|
|
88
|
+
}
|
|
89
|
+
const { metrics, findings, status, recommendedAction } = report;
|
|
90
|
+
// Group findings by severity
|
|
91
|
+
const critical = findings.filter(f => f.severity === 'P0_CRITICAL');
|
|
92
|
+
const high = findings.filter(f => f.severity === 'P1_HIGH');
|
|
93
|
+
const medium = findings.filter(f => f.severity === 'P2_MEDIUM');
|
|
94
|
+
const lgtm = findings.filter(f => f.severity === 'LGTM');
|
|
95
|
+
let md = `# š J-STAR CODE REVIEW DASHBOARD
|
|
96
|
+
|
|
97
|
+
**Date:** \`${formatDate()}\` | **Reviewer:** \`Detective Engine & Judge\` | **Status:** ${getStatusEmoji(status)} **${status.replace('_', ' ')}**
|
|
98
|
+
|
|
99
|
+
---
|
|
100
|
+
|
|
101
|
+
## 1. š EXECUTIVE SUMMARY
|
|
102
|
+
|
|
103
|
+
| Metric | Value | Status |
|
|
104
|
+
| --- | --- | --- |
|
|
105
|
+
| **Files Scanned** | **${metrics.filesScanned}** | š Complete |
|
|
106
|
+
| **Total Tokens** | **~${metrics.totalTokens.toLocaleString()}** | āļø Processed |
|
|
107
|
+
| **Total Violations** | **${metrics.violations}** | ${metrics.violations > 0 ? 'šØ Action Required' : 'ā
Clean'} |
|
|
108
|
+
| **Critical (P0)** | **${metrics.critical}** | ${metrics.critical > 0 ? 'š **BLOCKER**' : 'ā
None'} |
|
|
109
|
+
| **High (P1)** | **${metrics.high}** | ${metrics.high > 0 ? 'ā ļø Needs Fix' : 'ā
None'} |
|
|
110
|
+
| **Medium (P2)** | **${metrics.medium}** | ${metrics.medium > 0 ? 'š Review' : 'ā
None'} |
|
|
111
|
+
| **Passed (LGTM)** | **${metrics.lgtm}** | ā
Clean |
|
|
112
|
+
|
|
113
|
+
---
|
|
114
|
+
|
|
115
|
+
`;
|
|
116
|
+
// Critical Section
|
|
117
|
+
if (critical.length > 0) {
|
|
118
|
+
md += `## 2. š CRITICAL SECURITY VULNERABILITIES (P0)
|
|
119
|
+
|
|
120
|
+
> **These files contain blockers that must be fixed before any merge.**
|
|
121
|
+
|
|
122
|
+
| File | Severity | Issue |
|
|
123
|
+
| --- | --- | --- |
|
|
124
|
+
`;
|
|
125
|
+
for (const finding of critical) {
|
|
126
|
+
for (const issue of finding.issues) {
|
|
127
|
+
md += renderIssueRow(finding.file, issue, 'P0_CRITICAL') + '\n';
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
md += '\n### š¤ Fix Prompts (P0)\n\n';
|
|
131
|
+
for (const finding of critical) {
|
|
132
|
+
for (const issue of finding.issues) {
|
|
133
|
+
md += renderFixPrompt(issue);
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
md += '\n---\n\n';
|
|
137
|
+
}
|
|
138
|
+
// High Section
|
|
139
|
+
if (high.length > 0) {
|
|
140
|
+
md += `## 3. ā ļø HIGH PRIORITY ISSUES (P1)
|
|
141
|
+
|
|
142
|
+
> **Architecture and logic issues requiring significant attention.**
|
|
143
|
+
|
|
144
|
+
| File | Severity | Issue |
|
|
145
|
+
| --- | --- | --- |
|
|
146
|
+
`;
|
|
147
|
+
for (const finding of high) {
|
|
148
|
+
for (const issue of finding.issues) {
|
|
149
|
+
md += renderIssueRow(finding.file, issue, 'P1_HIGH') + '\n';
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
md += '\n### š¤ Fix Prompts (P1)\n\n';
|
|
153
|
+
for (const finding of high) {
|
|
154
|
+
for (const issue of finding.issues) {
|
|
155
|
+
md += renderFixPrompt(issue);
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
md += '\n---\n\n';
|
|
159
|
+
}
|
|
160
|
+
// Medium Section
|
|
161
|
+
if (medium.length > 0) {
|
|
162
|
+
md += `## 4. š MEDIUM PRIORITY ISSUES (P2)
|
|
163
|
+
|
|
164
|
+
> **Code quality and maintenance items.**
|
|
165
|
+
|
|
166
|
+
| File | Severity | Issue |
|
|
167
|
+
| --- | --- | --- |
|
|
168
|
+
`;
|
|
169
|
+
for (const finding of medium) {
|
|
170
|
+
for (const issue of finding.issues) {
|
|
171
|
+
md += renderIssueRow(finding.file, issue, 'P2_MEDIUM') + '\n';
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
md += '\n---\n\n';
|
|
175
|
+
}
|
|
176
|
+
// LGTM Section
|
|
177
|
+
if (lgtm.length > 0) {
|
|
178
|
+
md += `## 5. ā
PASSED REVIEW (LGTM)
|
|
179
|
+
|
|
180
|
+
> **No issues found in these files.**
|
|
181
|
+
|
|
182
|
+
`;
|
|
183
|
+
for (const finding of lgtm) {
|
|
184
|
+
md += `- \`${finding.file}\`\n`;
|
|
185
|
+
}
|
|
186
|
+
md += '\n---\n\n';
|
|
187
|
+
}
|
|
188
|
+
// Recommended Action
|
|
189
|
+
md += `## šÆ RECOMMENDED ACTION
|
|
190
|
+
|
|
191
|
+
> ${recommendedAction}
|
|
192
|
+
|
|
193
|
+
---
|
|
194
|
+
|
|
195
|
+
*Generated by J-Star Code Reviewer v2*
|
|
196
|
+
`;
|
|
197
|
+
return md;
|
|
198
|
+
}
|
|
199
|
+
function determineStatus(metrics) {
|
|
200
|
+
if (metrics.critical > 0)
|
|
201
|
+
return 'CRITICAL_FAILURE';
|
|
202
|
+
if (metrics.high > 0 || metrics.medium > config_1.Config.THRESHOLDS.MEDIUM)
|
|
203
|
+
return 'NEEDS_REVIEW';
|
|
204
|
+
return 'APPROVED';
|
|
205
|
+
}
|
|
206
|
+
function generateRecommendation(metrics) {
|
|
207
|
+
if (metrics.critical > 0) {
|
|
208
|
+
return `**BLOCK MERGE:** Fix ${metrics.critical} critical issue(s) immediately. Review P0 fix prompts above.`;
|
|
209
|
+
}
|
|
210
|
+
if (metrics.high > 0) {
|
|
211
|
+
return `**Request Changes:** Address ${metrics.high} high-priority issue(s) before merging.`;
|
|
212
|
+
}
|
|
213
|
+
if (metrics.medium > 0) {
|
|
214
|
+
return `**Approve with Notes:** ${metrics.medium} medium issues found. Consider fixing in follow-up PR.`;
|
|
215
|
+
}
|
|
216
|
+
return `**Approve:** All files passed review. Ship it! š`;
|
|
217
|
+
}
|
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
+
var ownKeys = function(o) {
|
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
+
var ar = [];
|
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
+
return ar;
|
|
24
|
+
};
|
|
25
|
+
return ownKeys(o);
|
|
26
|
+
};
|
|
27
|
+
return function (mod) {
|
|
28
|
+
if (mod && mod.__esModule) return mod;
|
|
29
|
+
var result = {};
|
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
+
__setModuleDefault(result, mod);
|
|
32
|
+
return result;
|
|
33
|
+
};
|
|
34
|
+
})();
|
|
35
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
36
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
37
|
+
};
|
|
38
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
39
|
+
exports.Detective = void 0;
|
|
40
|
+
const fs = __importStar(require("fs"));
|
|
41
|
+
const path = __importStar(require("path"));
|
|
42
|
+
const chalk_1 = __importDefault(require("chalk"));
|
|
43
|
+
const RULES = [
|
|
44
|
+
{
|
|
45
|
+
id: 'SEC-001',
|
|
46
|
+
severity: 'high',
|
|
47
|
+
message: 'Possible Hardcoded Secret detected',
|
|
48
|
+
pattern: /(api_key|secret|password|token)\s*[:=]\s*['"`][a-zA-Z0-9_\-\.]{10,}['"`]/i
|
|
49
|
+
},
|
|
50
|
+
{
|
|
51
|
+
id: 'ARCH-001',
|
|
52
|
+
severity: 'medium',
|
|
53
|
+
message: 'Avoid using console.log in production code',
|
|
54
|
+
pattern: /console\.log\(/
|
|
55
|
+
},
|
|
56
|
+
];
|
|
57
|
+
// File-level rules that check the whole content
|
|
58
|
+
const FILE_RULES = [
|
|
59
|
+
{
|
|
60
|
+
id: 'ARCH-002',
|
|
61
|
+
severity: 'high',
|
|
62
|
+
message: 'Next.js "use client" must be at the very top of the file',
|
|
63
|
+
pattern: /^(?!['"]use client['"]).*['"]use client['"]/s,
|
|
64
|
+
filePattern: /\.tsx?$/
|
|
65
|
+
}
|
|
66
|
+
];
|
|
67
|
+
class Detective {
|
|
68
|
+
constructor(directory) {
|
|
69
|
+
this.directory = directory;
|
|
70
|
+
this.violations = [];
|
|
71
|
+
}
|
|
72
|
+
async scan() {
|
|
73
|
+
this.walk(this.directory);
|
|
74
|
+
return this.violations;
|
|
75
|
+
}
|
|
76
|
+
walk(dir) {
|
|
77
|
+
if (!fs.existsSync(dir))
|
|
78
|
+
return;
|
|
79
|
+
const files = fs.readdirSync(dir);
|
|
80
|
+
for (const file of files) {
|
|
81
|
+
const filePath = path.join(dir, file);
|
|
82
|
+
const stat = fs.statSync(filePath);
|
|
83
|
+
if (stat.isDirectory()) {
|
|
84
|
+
if (file !== 'node_modules' && file !== '.git' && file !== '.jstar') {
|
|
85
|
+
this.walk(filePath);
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
else {
|
|
89
|
+
this.checkFile(filePath);
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
checkFile(filePath) {
|
|
94
|
+
if (!filePath.match(/\.(ts|tsx|js|jsx)$/))
|
|
95
|
+
return;
|
|
96
|
+
const content = fs.readFileSync(filePath, 'utf-8');
|
|
97
|
+
const lines = content.split('\n');
|
|
98
|
+
// Line-based rules
|
|
99
|
+
for (const rule of RULES) {
|
|
100
|
+
if (rule.filePattern && !filePath.match(rule.filePattern))
|
|
101
|
+
continue;
|
|
102
|
+
lines.forEach((line, index) => {
|
|
103
|
+
if (rule.pattern.test(line)) {
|
|
104
|
+
this.addViolation(filePath, index + 1, rule);
|
|
105
|
+
}
|
|
106
|
+
});
|
|
107
|
+
}
|
|
108
|
+
// File-based rules
|
|
109
|
+
for (const rule of FILE_RULES) {
|
|
110
|
+
if (rule.filePattern && !filePath.match(rule.filePattern))
|
|
111
|
+
continue;
|
|
112
|
+
if (rule.pattern.test(content)) {
|
|
113
|
+
this.addViolation(filePath, 1, rule);
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
addViolation(filePath, line, rule) {
|
|
118
|
+
this.violations.push({
|
|
119
|
+
file: path.relative(process.cwd(), filePath),
|
|
120
|
+
line,
|
|
121
|
+
message: rule.message,
|
|
122
|
+
severity: rule.severity,
|
|
123
|
+
code: rule.id
|
|
124
|
+
});
|
|
125
|
+
}
|
|
126
|
+
report() {
|
|
127
|
+
if (this.violations.length === 0) {
|
|
128
|
+
console.log(chalk_1.default.green("ā
Detective Engine: No violations found."));
|
|
129
|
+
return;
|
|
130
|
+
}
|
|
131
|
+
console.log(chalk_1.default.red(`šØ Detective Engine found ${this.violations.length} violations:`));
|
|
132
|
+
// Only show first 10 to avoid wall of text
|
|
133
|
+
const total = this.violations.length;
|
|
134
|
+
const toShow = this.violations.slice(0, 10);
|
|
135
|
+
toShow.forEach(v => {
|
|
136
|
+
const color = v.severity === 'high' ? chalk_1.default.red : chalk_1.default.yellow;
|
|
137
|
+
console.log(color(`[${v.code}] ${v.file}:${v.line} - ${v.message}`));
|
|
138
|
+
});
|
|
139
|
+
if (total > 10) {
|
|
140
|
+
console.log(chalk_1.default.dim(`... and ${total - 10} more.`));
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
exports.Detective = Detective;
|
|
145
|
+
// CLI Integration
|
|
146
|
+
if (require.main === module) {
|
|
147
|
+
const detective = new Detective(path.join(process.cwd(), 'src'));
|
|
148
|
+
detective.scan();
|
|
149
|
+
detective.report();
|
|
150
|
+
}
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.GeminiEmbedding = void 0;
|
|
4
|
+
const generative_ai_1 = require("@google/generative-ai");
|
|
5
|
+
class GeminiEmbedding {
|
|
6
|
+
constructor() {
|
|
7
|
+
// Stubs for BaseEmbedding compliance
|
|
8
|
+
this.embedBatchSize = 10;
|
|
9
|
+
const apiKey = process.env.GOOGLE_API_KEY;
|
|
10
|
+
if (!apiKey) {
|
|
11
|
+
throw new Error("GOOGLE_API_KEY is missing from environment variables.");
|
|
12
|
+
}
|
|
13
|
+
this.genAI = new generative_ai_1.GoogleGenerativeAI(apiKey);
|
|
14
|
+
// User requested 'text-embedding-004', which has better rate limits
|
|
15
|
+
this.model = this.genAI.getGenerativeModel({ model: "text-embedding-004" });
|
|
16
|
+
}
|
|
17
|
+
async getTextEmbedding(text) {
|
|
18
|
+
// Retry logic for transient network errors
|
|
19
|
+
let retries = 0;
|
|
20
|
+
const maxRetries = 3;
|
|
21
|
+
while (retries < maxRetries) {
|
|
22
|
+
try {
|
|
23
|
+
const result = await this.model.embedContent(text);
|
|
24
|
+
return result.embedding.values;
|
|
25
|
+
}
|
|
26
|
+
catch (e) {
|
|
27
|
+
if (e.message.includes("fetch failed") || e.message.includes("network")) {
|
|
28
|
+
retries++;
|
|
29
|
+
const waitTime = Math.pow(2, retries) * 1000;
|
|
30
|
+
console.warn(`ā ļø Network error. Retrying in ${waitTime / 1000}s... (${retries}/${maxRetries})`);
|
|
31
|
+
await new Promise(resolve => setTimeout(resolve, waitTime));
|
|
32
|
+
}
|
|
33
|
+
else {
|
|
34
|
+
throw e;
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
throw new Error("Max retries exceeded for embedding request.");
|
|
39
|
+
}
|
|
40
|
+
async getQueryEmbedding(query) {
|
|
41
|
+
return this.getTextEmbedding(query);
|
|
42
|
+
}
|
|
43
|
+
async getTextEmbeddings(texts) {
|
|
44
|
+
const embeddings = [];
|
|
45
|
+
console.log(`Creating embeddings for ${texts.length} chunks (Batching to avoid rate limits)...`);
|
|
46
|
+
// Process in smaller batches with delay
|
|
47
|
+
const BATCH_SIZE = 1; // Strict serial for safety on free tier
|
|
48
|
+
const DELAY_MS = 1000; // 1s delay between calls
|
|
49
|
+
for (let i = 0; i < texts.length; i += BATCH_SIZE) {
|
|
50
|
+
const batch = texts.slice(i, i + BATCH_SIZE);
|
|
51
|
+
for (const text of batch) {
|
|
52
|
+
let retries = 0;
|
|
53
|
+
let success = false;
|
|
54
|
+
while (!success && retries < 5) {
|
|
55
|
+
try {
|
|
56
|
+
const embedding = await this.getTextEmbedding(text);
|
|
57
|
+
embeddings.push(embedding);
|
|
58
|
+
success = true;
|
|
59
|
+
// Standard delay between calls
|
|
60
|
+
await new Promise(resolve => setTimeout(resolve, DELAY_MS));
|
|
61
|
+
process.stdout.write("."); // Progress indicator
|
|
62
|
+
}
|
|
63
|
+
catch (e) {
|
|
64
|
+
if (e.message.includes("429") || e.message.includes("quota")) {
|
|
65
|
+
retries++;
|
|
66
|
+
const waitTime = Math.pow(2, retries) * 2000; // 2s, 4s, 8s, 16s...
|
|
67
|
+
console.warn(`\nā ļø Rate limit hit. Retrying in ${waitTime / 1000}s...`);
|
|
68
|
+
await new Promise(resolve => setTimeout(resolve, waitTime));
|
|
69
|
+
}
|
|
70
|
+
else {
|
|
71
|
+
console.error("\nā Embedding failed irreversibly:", e.message);
|
|
72
|
+
throw e;
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
if (!success) {
|
|
77
|
+
throw new Error("Max retries exceeded for rate limits.");
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
console.log("\nā
Done embedding.");
|
|
82
|
+
return embeddings;
|
|
83
|
+
}
|
|
84
|
+
similarity(embedding1, embedding2) {
|
|
85
|
+
return embedding1.reduce((sum, val, i) => sum + val * embedding2[i], 0);
|
|
86
|
+
}
|
|
87
|
+
async transform(nodes, _options) {
|
|
88
|
+
for (const node of nodes) {
|
|
89
|
+
node.embedding = await this.getTextEmbedding(node.getContent("text"));
|
|
90
|
+
}
|
|
91
|
+
return nodes;
|
|
92
|
+
}
|
|
93
|
+
async getTextEmbeddingsBatch(texts) { return this.getTextEmbeddings(texts); }
|
|
94
|
+
}
|
|
95
|
+
exports.GeminiEmbedding = GeminiEmbedding;
|
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
+
var ownKeys = function(o) {
|
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
+
var ar = [];
|
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
+
return ar;
|
|
24
|
+
};
|
|
25
|
+
return ownKeys(o);
|
|
26
|
+
};
|
|
27
|
+
return function (mod) {
|
|
28
|
+
if (mod && mod.__esModule) return mod;
|
|
29
|
+
var result = {};
|
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
+
__setModuleDefault(result, mod);
|
|
32
|
+
return result;
|
|
33
|
+
};
|
|
34
|
+
})();
|
|
35
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
36
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
37
|
+
};
|
|
38
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
39
|
+
const llamaindex_1 = require("llamaindex");
|
|
40
|
+
const gemini_embedding_1 = require("./gemini-embedding");
|
|
41
|
+
const mock_llm_1 = require("./mock-llm");
|
|
42
|
+
const path = __importStar(require("path"));
|
|
43
|
+
const fs = __importStar(require("fs"));
|
|
44
|
+
const chalk_1 = __importDefault(require("chalk"));
|
|
45
|
+
const dotenv_1 = __importDefault(require("dotenv"));
|
|
46
|
+
// Load .env.local first, then .env
|
|
47
|
+
dotenv_1.default.config({ path: ".env.local" });
|
|
48
|
+
dotenv_1.default.config();
|
|
49
|
+
// Configuration
|
|
50
|
+
const STORAGE_DIR = path.join(process.cwd(), ".jstar", "storage");
|
|
51
|
+
const SOURCE_DIR = path.join(process.cwd(), "scripts"); // Changed from src/ to scripts/
|
|
52
|
+
// Ensure OpenAI Key exists (LlamaIndex default) or fallback if we configured something else
|
|
53
|
+
// (We are using local now, so this check is less critical, but good to have if we revert)
|
|
54
|
+
// if (!process.env.OPENAI_API_KEY) {
|
|
55
|
+
// console.warn(chalk.yellow("ā ļø OPENAI_API_KEY not found. Embeddings may fail unless you have configured a local model."));
|
|
56
|
+
// }
|
|
57
|
+
async function main() {
|
|
58
|
+
const args = process.argv.slice(2);
|
|
59
|
+
const isWatch = args.includes("--watch");
|
|
60
|
+
console.log(chalk_1.default.blue("š§ J-Star Indexer: Scanning codebase..."));
|
|
61
|
+
// 1. Load documents (Your Code)
|
|
62
|
+
if (!fs.existsSync(SOURCE_DIR)) {
|
|
63
|
+
console.error(chalk_1.default.red(`ā Source directory not found: ${SOURCE_DIR}`));
|
|
64
|
+
process.exit(1);
|
|
65
|
+
}
|
|
66
|
+
const reader = new llamaindex_1.SimpleDirectoryReader();
|
|
67
|
+
const documents = await reader.loadData({ directoryPath: SOURCE_DIR });
|
|
68
|
+
console.log(chalk_1.default.yellow(`š Found ${documents.length} files to index.`));
|
|
69
|
+
const isInit = args.includes("--init");
|
|
70
|
+
try {
|
|
71
|
+
// 2. Setup Service Context with Google Gemini Embeddings
|
|
72
|
+
// using 'models/text-embedding-004' which is a strong, recent model
|
|
73
|
+
const embedModel = new gemini_embedding_1.GeminiEmbedding();
|
|
74
|
+
const llm = new mock_llm_1.MockLLM();
|
|
75
|
+
const serviceContext = (0, llamaindex_1.serviceContextFromDefaults)({
|
|
76
|
+
embedModel,
|
|
77
|
+
llm: llm
|
|
78
|
+
});
|
|
79
|
+
// 3. Create the Storage Context
|
|
80
|
+
let storageContext;
|
|
81
|
+
if (isInit) {
|
|
82
|
+
console.log(chalk_1.default.blue("⨠Initializing fresh Local Brain..."));
|
|
83
|
+
storageContext = await (0, llamaindex_1.storageContextFromDefaults)({});
|
|
84
|
+
}
|
|
85
|
+
else {
|
|
86
|
+
// Try to load
|
|
87
|
+
if (!fs.existsSync(STORAGE_DIR)) {
|
|
88
|
+
console.log(chalk_1.default.yellow("ā ļø Storage not found. Running fresh init..."));
|
|
89
|
+
storageContext = await (0, llamaindex_1.storageContextFromDefaults)({});
|
|
90
|
+
}
|
|
91
|
+
else {
|
|
92
|
+
storageContext = await (0, llamaindex_1.storageContextFromDefaults)({
|
|
93
|
+
persistDir: STORAGE_DIR,
|
|
94
|
+
});
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
// 4. Generate the Index
|
|
98
|
+
const index = await llamaindex_1.VectorStoreIndex.fromDocuments(documents, {
|
|
99
|
+
storageContext,
|
|
100
|
+
serviceContext,
|
|
101
|
+
});
|
|
102
|
+
// 4. Persist (Save the Brain)
|
|
103
|
+
// Manual persistence for LlamaIndex TS compatibility
|
|
104
|
+
const ctxToPersist = index.storageContext;
|
|
105
|
+
if (ctxToPersist.docStore)
|
|
106
|
+
await ctxToPersist.docStore.persist(path.join(STORAGE_DIR, "doc_store.json"));
|
|
107
|
+
if (ctxToPersist.vectorStore)
|
|
108
|
+
await ctxToPersist.vectorStore.persist(path.join(STORAGE_DIR, "vector_store.json"));
|
|
109
|
+
if (ctxToPersist.indexStore)
|
|
110
|
+
await ctxToPersist.indexStore.persist(path.join(STORAGE_DIR, "index_store.json"));
|
|
111
|
+
if (ctxToPersist.propStore)
|
|
112
|
+
await ctxToPersist.propStore.persist(path.join(STORAGE_DIR, "property_store.json"));
|
|
113
|
+
console.log(chalk_1.default.green("ā
Indexing Complete. Brain is updated."));
|
|
114
|
+
if (isWatch) {
|
|
115
|
+
console.log(chalk_1.default.blue("š Watch mode enabled."));
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
catch (e) {
|
|
119
|
+
console.error(chalk_1.default.red("ā Indexing Failed:"), e.message);
|
|
120
|
+
if (e.message.includes("OpenAI")) {
|
|
121
|
+
console.log(chalk_1.default.yellow("š Tip: Make sure you have OPENAI_API_KEY in your .env file (or configure a local embedding model)."));
|
|
122
|
+
}
|
|
123
|
+
process.exit(1);
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
main().catch(console.error);
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.LocalEmbedding = void 0;
|
|
4
|
+
const transformers_1 = require("@xenova/transformers");
|
|
5
|
+
// Skip local model checks if needed, or let it download
|
|
6
|
+
transformers_1.env.allowLocalModels = false;
|
|
7
|
+
transformers_1.env.useBrowserCache = false;
|
|
8
|
+
class LocalEmbedding {
|
|
9
|
+
constructor() {
|
|
10
|
+
// Stubs for BaseEmbedding interface compliance
|
|
11
|
+
this.embedBatchSize = 10;
|
|
12
|
+
this.modelName = "Xenova/bge-small-en-v1.5";
|
|
13
|
+
}
|
|
14
|
+
async init() {
|
|
15
|
+
if (!this.pipe) {
|
|
16
|
+
console.log("š„ Loading local embedding model (Xenova/bge-small-en-v1.5)...");
|
|
17
|
+
this.pipe = await (0, transformers_1.pipeline)("feature-extraction", this.modelName);
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
async getTextEmbedding(text) {
|
|
21
|
+
await this.init();
|
|
22
|
+
const result = await this.pipe(text, { pooling: "mean", normalize: true });
|
|
23
|
+
return Array.from(result.data);
|
|
24
|
+
}
|
|
25
|
+
async getQueryEmbedding(query) {
|
|
26
|
+
return this.getTextEmbedding(query);
|
|
27
|
+
}
|
|
28
|
+
// Batch method (Required by LlamaIndex)
|
|
29
|
+
async getTextEmbeddings(texts) {
|
|
30
|
+
await this.init();
|
|
31
|
+
const embeddings = [];
|
|
32
|
+
for (const text of texts) {
|
|
33
|
+
embeddings.push(await this.getTextEmbedding(text));
|
|
34
|
+
}
|
|
35
|
+
return embeddings;
|
|
36
|
+
}
|
|
37
|
+
similarity(embedding1, embedding2) {
|
|
38
|
+
// Simple dot product for normalized vectors
|
|
39
|
+
return embedding1.reduce((sum, val, i) => sum + val * embedding2[i], 0);
|
|
40
|
+
}
|
|
41
|
+
async transform(nodes, _options) {
|
|
42
|
+
for (const node of nodes) {
|
|
43
|
+
node.embedding = await this.getTextEmbedding(node.getContent("text"));
|
|
44
|
+
}
|
|
45
|
+
return nodes;
|
|
46
|
+
}
|
|
47
|
+
async getTextEmbeddingsBatch(texts) { return this.getTextEmbeddings(texts); }
|
|
48
|
+
}
|
|
49
|
+
exports.LocalEmbedding = LocalEmbedding;
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.MockLLM = void 0;
|
|
4
|
+
class MockLLM {
|
|
5
|
+
constructor() {
|
|
6
|
+
this.hasStreaming = false;
|
|
7
|
+
this.metadata = {
|
|
8
|
+
model: "mock",
|
|
9
|
+
temperature: 0,
|
|
10
|
+
topP: 1,
|
|
11
|
+
contextWindow: 1024,
|
|
12
|
+
tokenizer: undefined,
|
|
13
|
+
};
|
|
14
|
+
}
|
|
15
|
+
async chat(messages, parentEvent) {
|
|
16
|
+
return { message: { content: "Mock response" } };
|
|
17
|
+
}
|
|
18
|
+
async complete(prompt, parentEvent) {
|
|
19
|
+
return { text: "Mock response" };
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
exports.MockLLM = MockLLM;
|
|
@@ -0,0 +1,287 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
+
var ownKeys = function(o) {
|
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
+
var ar = [];
|
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
+
return ar;
|
|
24
|
+
};
|
|
25
|
+
return ownKeys(o);
|
|
26
|
+
};
|
|
27
|
+
return function (mod) {
|
|
28
|
+
if (mod && mod.__esModule) return mod;
|
|
29
|
+
var result = {};
|
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
+
__setModuleDefault(result, mod);
|
|
32
|
+
return result;
|
|
33
|
+
};
|
|
34
|
+
})();
|
|
35
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
36
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
37
|
+
};
|
|
38
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
39
|
+
const ai_1 = require("ai");
|
|
40
|
+
const groq_1 = require("@ai-sdk/groq");
|
|
41
|
+
const google_1 = require("@ai-sdk/google");
|
|
42
|
+
const path = __importStar(require("path"));
|
|
43
|
+
const fs = __importStar(require("fs"));
|
|
44
|
+
const chalk_1 = __importDefault(require("chalk"));
|
|
45
|
+
const simple_git_1 = __importDefault(require("simple-git"));
|
|
46
|
+
const config_1 = require("./config");
|
|
47
|
+
const detective_1 = require("./detective");
|
|
48
|
+
const gemini_embedding_1 = require("./gemini-embedding");
|
|
49
|
+
const mock_llm_1 = require("./mock-llm");
|
|
50
|
+
const dashboard_1 = require("./dashboard");
|
|
51
|
+
const llamaindex_1 = require("llamaindex");
|
|
52
|
+
const google = (0, google_1.createGoogleGenerativeAI)({ apiKey: process.env.GOOGLE_API_KEY });
|
|
53
|
+
const groq = (0, groq_1.createGroq)({ apiKey: process.env.GROQ_API_KEY });
|
|
54
|
+
const embedModel = new gemini_embedding_1.GeminiEmbedding();
|
|
55
|
+
const llm = new mock_llm_1.MockLLM();
|
|
56
|
+
const serviceContext = (0, llamaindex_1.serviceContextFromDefaults)({ embedModel, llm: llm });
|
|
57
|
+
const STORAGE_DIR = path.join(process.cwd(), ".jstar", "storage");
|
|
58
|
+
const SOURCE_DIR = path.join(process.cwd(), "scripts");
|
|
59
|
+
const OUTPUT_FILE = path.join(process.cwd(), ".jstar", "last-review.md");
|
|
60
|
+
const git = (0, simple_git_1.default)();
|
|
61
|
+
// --- Config ---
|
|
62
|
+
const MODEL_NAME = config_1.Config.MODEL_NAME;
|
|
63
|
+
const MAX_TOKENS_PER_REQUEST = 8000;
|
|
64
|
+
const CHARS_PER_TOKEN = 4;
|
|
65
|
+
const DELAY_BETWEEN_CHUNKS_MS = 2000;
|
|
66
|
+
// --- Helpers ---
|
|
67
|
+
function estimateTokens(text) {
|
|
68
|
+
return Math.ceil(text.length / CHARS_PER_TOKEN);
|
|
69
|
+
}
|
|
70
|
+
const EXCLUDED_PATTERNS = [
|
|
71
|
+
/pnpm-lock\.yaml/,
|
|
72
|
+
/package-lock\.json/,
|
|
73
|
+
/yarn\.lock/,
|
|
74
|
+
/\.env/,
|
|
75
|
+
/\.json$/,
|
|
76
|
+
/\.txt$/,
|
|
77
|
+
/\.md$/,
|
|
78
|
+
/node_modules/,
|
|
79
|
+
/\.jstar\//,
|
|
80
|
+
];
|
|
81
|
+
function shouldSkipFile(fileName) {
|
|
82
|
+
return EXCLUDED_PATTERNS.some(pattern => pattern.test(fileName));
|
|
83
|
+
}
|
|
84
|
+
function chunkDiffByFile(diff) {
|
|
85
|
+
return diff.split(/(?=^diff --git)/gm).filter(Boolean);
|
|
86
|
+
}
|
|
87
|
+
function sleep(ms) {
|
|
88
|
+
return new Promise(resolve => setTimeout(resolve, ms));
|
|
89
|
+
}
|
|
90
|
+
function parseReviewResponse(text) {
|
|
91
|
+
try {
|
|
92
|
+
// Try to extract JSON from the response
|
|
93
|
+
const jsonMatch = text.match(/\{[\s\S]*\}/);
|
|
94
|
+
if (jsonMatch) {
|
|
95
|
+
const parsed = JSON.parse(jsonMatch[0]);
|
|
96
|
+
// Validate structure
|
|
97
|
+
if (parsed &&
|
|
98
|
+
typeof parsed === 'object' &&
|
|
99
|
+
Array.isArray(parsed.issues) &&
|
|
100
|
+
['P0_CRITICAL', 'P1_HIGH', 'P2_MEDIUM', 'LGTM'].includes(parsed.severity)) {
|
|
101
|
+
return {
|
|
102
|
+
severity: parsed.severity,
|
|
103
|
+
issues: parsed.issues
|
|
104
|
+
};
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
catch (e) {
|
|
109
|
+
// Parse failed, try to extract from markdown
|
|
110
|
+
}
|
|
111
|
+
// Fallback: If "LGTM" in text, it's clean
|
|
112
|
+
if (text.includes('LGTM') || text.includes('ā
')) {
|
|
113
|
+
return { severity: 'LGTM', issues: [] };
|
|
114
|
+
}
|
|
115
|
+
// Otherwise, assume there are issues (treat as medium)
|
|
116
|
+
return {
|
|
117
|
+
severity: config_1.Config.DEFAULT_SEVERITY,
|
|
118
|
+
issues: [{
|
|
119
|
+
title: 'Review Notes',
|
|
120
|
+
description: text.slice(0, 500),
|
|
121
|
+
fixPrompt: 'Review the file and address the issues mentioned above.'
|
|
122
|
+
}]
|
|
123
|
+
};
|
|
124
|
+
}
|
|
125
|
+
// --- Main ---
|
|
126
|
+
async function main() {
|
|
127
|
+
console.log(chalk_1.default.blue("šµļø J-Star Reviewer: Analyzing your changes...\n"));
|
|
128
|
+
// 0. Detective
|
|
129
|
+
console.log(chalk_1.default.blue("š Running Detective Engine..."));
|
|
130
|
+
const detective = new detective_1.Detective(SOURCE_DIR);
|
|
131
|
+
await detective.scan();
|
|
132
|
+
detective.report();
|
|
133
|
+
// 1. Get the Diff
|
|
134
|
+
const diff = await git.diff(["--staged"]);
|
|
135
|
+
if (!diff) {
|
|
136
|
+
console.log(chalk_1.default.green("\nā
No staged changes to review. (Did you 'git add'?)"));
|
|
137
|
+
return;
|
|
138
|
+
}
|
|
139
|
+
// 2. Load the Brain
|
|
140
|
+
if (!fs.existsSync(STORAGE_DIR)) {
|
|
141
|
+
console.error(chalk_1.default.red("ā Local Brain not found. Run 'pnpm run index:init' first."));
|
|
142
|
+
return;
|
|
143
|
+
}
|
|
144
|
+
const storageContext = await (0, llamaindex_1.storageContextFromDefaults)({ persistDir: STORAGE_DIR });
|
|
145
|
+
const index = await llamaindex_1.VectorStoreIndex.init({ storageContext, serviceContext });
|
|
146
|
+
// 3. Retrieval
|
|
147
|
+
const retriever = index.asRetriever({ similarityTopK: 1 });
|
|
148
|
+
const keywords = (diff.match(/import .* from ['"](.*)['"]/g) || [])
|
|
149
|
+
.map(s => s.replace(/import .* from ['"](.*)['"]/, '$1'))
|
|
150
|
+
.join(" ").slice(0, 300) || "general context";
|
|
151
|
+
const contextNodes = await retriever.retrieve(keywords);
|
|
152
|
+
const relatedContext = contextNodes.map(n => n.node.getContent(llamaindex_1.MetadataMode.NONE).slice(0, 1500)).join("\n");
|
|
153
|
+
console.log(chalk_1.default.yellow(`\nš§ Found ${contextNodes.length} context chunk.`));
|
|
154
|
+
// 4. Chunk the Diff
|
|
155
|
+
const fileChunks = chunkDiffByFile(diff);
|
|
156
|
+
const totalTokens = estimateTokens(diff);
|
|
157
|
+
console.log(chalk_1.default.dim(` Total diff: ~${totalTokens} tokens across ${fileChunks.length} files.`));
|
|
158
|
+
// 5. Structured JSON Prompt
|
|
159
|
+
const systemPrompt = `You are J-Star, a Senior Code Reviewer. Be direct and professional.
|
|
160
|
+
|
|
161
|
+
Analyze the Git Diff and return a JSON response with this EXACT structure:
|
|
162
|
+
{
|
|
163
|
+
"severity": "P0_CRITICAL" | "P1_HIGH" | "P2_MEDIUM" | "LGTM",
|
|
164
|
+
"issues": [
|
|
165
|
+
{
|
|
166
|
+
"title": "Short issue title",
|
|
167
|
+
"description": "Detailed description of the problem",
|
|
168
|
+
"line": 42,
|
|
169
|
+
"fixPrompt": "A specific prompt an AI can use to fix this issue"
|
|
170
|
+
}
|
|
171
|
+
]
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
SEVERITY GUIDE:
|
|
175
|
+
- P0_CRITICAL: Security vulnerabilities, data leaks, auth bypass, SQL injection
|
|
176
|
+
- P1_HIGH: Missing validation, race conditions, architectural violations
|
|
177
|
+
- P2_MEDIUM: Code quality, missing types, cleanup needed
|
|
178
|
+
- LGTM: No issues found (return empty issues array)
|
|
179
|
+
|
|
180
|
+
IMPORTANT:
|
|
181
|
+
- Return ONLY valid JSON, no markdown or explanation
|
|
182
|
+
- Each issue MUST have a fixPrompt that explains exactly how to fix it
|
|
183
|
+
- If the file is clean, return {"severity": "LGTM", "issues": []}
|
|
184
|
+
|
|
185
|
+
Context: ${relatedContext.slice(0, 800)}`;
|
|
186
|
+
const findings = [];
|
|
187
|
+
let chunkIndex = 0;
|
|
188
|
+
let skippedCount = 0;
|
|
189
|
+
console.log(chalk_1.default.blue("\nāļø Sending to Judge...\n"));
|
|
190
|
+
for (const chunk of fileChunks) {
|
|
191
|
+
chunkIndex++;
|
|
192
|
+
const fileName = chunk.match(/diff --git a\/(.+?) /)?.[1] || `Chunk ${chunkIndex}`;
|
|
193
|
+
// Skip excluded files
|
|
194
|
+
if (shouldSkipFile(fileName)) {
|
|
195
|
+
console.log(chalk_1.default.dim(` āļø Skipping ${fileName} (excluded)`));
|
|
196
|
+
skippedCount++;
|
|
197
|
+
continue;
|
|
198
|
+
}
|
|
199
|
+
const chunkTokens = estimateTokens(chunk) + estimateTokens(systemPrompt);
|
|
200
|
+
// Skip huge files
|
|
201
|
+
if (chunkTokens > MAX_TOKENS_PER_REQUEST) {
|
|
202
|
+
console.log(chalk_1.default.yellow(` ā ļø Skipping ${fileName} (too large: ~${chunkTokens} tokens)`));
|
|
203
|
+
findings.push({
|
|
204
|
+
file: fileName,
|
|
205
|
+
severity: config_1.Config.DEFAULT_SEVERITY,
|
|
206
|
+
issues: [{
|
|
207
|
+
title: 'File too large for review',
|
|
208
|
+
description: `This file has ~${chunkTokens} tokens which exceeds the limit.`,
|
|
209
|
+
fixPrompt: 'Consider splitting this file into smaller modules.'
|
|
210
|
+
}]
|
|
211
|
+
});
|
|
212
|
+
continue;
|
|
213
|
+
}
|
|
214
|
+
process.stdout.write(chalk_1.default.dim(` š ${fileName}...`));
|
|
215
|
+
try {
|
|
216
|
+
const { text } = await (0, ai_1.generateText)({
|
|
217
|
+
model: groq(MODEL_NAME),
|
|
218
|
+
system: systemPrompt,
|
|
219
|
+
prompt: `REVIEW THIS DIFF:\n\n${chunk}`,
|
|
220
|
+
temperature: 0.1,
|
|
221
|
+
});
|
|
222
|
+
const response = parseReviewResponse(text);
|
|
223
|
+
findings.push({
|
|
224
|
+
file: fileName,
|
|
225
|
+
severity: response.severity,
|
|
226
|
+
issues: response.issues
|
|
227
|
+
});
|
|
228
|
+
const emoji = response.severity === 'LGTM' ? 'ā
' :
|
|
229
|
+
response.severity === 'P0_CRITICAL' ? 'š' :
|
|
230
|
+
response.severity === 'P1_HIGH' ? 'ā ļø' : 'š';
|
|
231
|
+
console.log(` ${emoji}`);
|
|
232
|
+
}
|
|
233
|
+
catch (error) {
|
|
234
|
+
console.log(chalk_1.default.red(` ā (${error.message.slice(0, 50)})`));
|
|
235
|
+
findings.push({
|
|
236
|
+
file: fileName,
|
|
237
|
+
severity: config_1.Config.DEFAULT_SEVERITY,
|
|
238
|
+
issues: [{
|
|
239
|
+
title: 'Review failed',
|
|
240
|
+
description: error.message,
|
|
241
|
+
fixPrompt: 'Retry the review or check manually.'
|
|
242
|
+
}]
|
|
243
|
+
});
|
|
244
|
+
}
|
|
245
|
+
// Rate limit delay
|
|
246
|
+
if (chunkIndex < fileChunks.length) {
|
|
247
|
+
await sleep(DELAY_BETWEEN_CHUNKS_MS);
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
// 6. Build Dashboard Report
|
|
251
|
+
const metrics = {
|
|
252
|
+
filesScanned: fileChunks.length - skippedCount,
|
|
253
|
+
totalTokens,
|
|
254
|
+
violations: findings.reduce((sum, f) => sum + f.issues.length, 0),
|
|
255
|
+
critical: findings.filter(f => f.severity === 'P0_CRITICAL').length,
|
|
256
|
+
high: findings.filter(f => f.severity === 'P1_HIGH').length,
|
|
257
|
+
medium: findings.filter(f => f.severity === 'P2_MEDIUM').length,
|
|
258
|
+
lgtm: findings.filter(f => f.severity === 'LGTM').length,
|
|
259
|
+
};
|
|
260
|
+
const report = {
|
|
261
|
+
date: new Date().toISOString().split('T')[0],
|
|
262
|
+
reviewer: 'Detective Engine & Judge',
|
|
263
|
+
status: (0, dashboard_1.determineStatus)(metrics),
|
|
264
|
+
metrics,
|
|
265
|
+
findings,
|
|
266
|
+
recommendedAction: (0, dashboard_1.generateRecommendation)(metrics)
|
|
267
|
+
};
|
|
268
|
+
// 7. Render and Save Dashboard
|
|
269
|
+
const dashboard = (0, dashboard_1.renderDashboard)(report);
|
|
270
|
+
// Ensure .jstar directory exists
|
|
271
|
+
fs.mkdirSync(path.dirname(OUTPUT_FILE), { recursive: true });
|
|
272
|
+
fs.writeFileSync(OUTPUT_FILE, dashboard);
|
|
273
|
+
console.log("\n" + chalk_1.default.bold.green("š DASHBOARD GENERATED"));
|
|
274
|
+
console.log(chalk_1.default.dim(` Saved to: ${OUTPUT_FILE}`));
|
|
275
|
+
console.log("\n" + chalk_1.default.bold.white("ā".repeat(50)));
|
|
276
|
+
// Print summary to console
|
|
277
|
+
const statusEmoji = report.status === 'APPROVED' ? 'š¢' :
|
|
278
|
+
report.status === 'NEEDS_REVIEW' ? 'š”' : 'š“';
|
|
279
|
+
console.log(`\n${statusEmoji} Status: ${report.status.replace('_', ' ')}`);
|
|
280
|
+
console.log(` š Critical: ${metrics.critical}`);
|
|
281
|
+
console.log(` ā ļø High: ${metrics.high}`);
|
|
282
|
+
console.log(` š Medium: ${metrics.medium}`);
|
|
283
|
+
console.log(` ā
LGTM: ${metrics.lgtm}`);
|
|
284
|
+
console.log(`\nš” ${report.recommendedAction}`);
|
|
285
|
+
console.log(chalk_1.default.dim(`\nš Full report: ${OUTPUT_FILE}`));
|
|
286
|
+
}
|
|
287
|
+
main().catch(console.error);
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* J-Star Reviewer Types
|
|
4
|
+
* Structured types for dashboard output
|
|
5
|
+
*/
|
|
6
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
7
|
+
exports.EMPTY_REVIEW = void 0;
|
|
8
|
+
/**
|
|
9
|
+
* Default empty response for parse failures
|
|
10
|
+
*/
|
|
11
|
+
exports.EMPTY_REVIEW = {
|
|
12
|
+
severity: 'LGTM',
|
|
13
|
+
issues: []
|
|
14
|
+
};
|
package/package.json
CHANGED
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "jstar-reviewer",
|
|
3
|
-
"version": "2.0.
|
|
3
|
+
"version": "2.0.4",
|
|
4
4
|
"description": "Local-First, Context-Aware AI Code Reviewer - Works with any language",
|
|
5
5
|
"bin": {
|
|
6
6
|
"jstar": "bin/jstar.js"
|
|
7
7
|
},
|
|
8
8
|
"scripts": {
|
|
9
|
+
"build": "tsc",
|
|
9
10
|
"index:init": "ts-node scripts/indexer.ts --init",
|
|
10
11
|
"index:watch": "ts-node scripts/indexer.ts --watch",
|
|
11
12
|
"review": "ts-node scripts/reviewer.ts",
|
|
@@ -50,6 +51,7 @@
|
|
|
50
51
|
"files": [
|
|
51
52
|
"bin/",
|
|
52
53
|
"scripts/",
|
|
54
|
+
"dist/",
|
|
53
55
|
"README.md"
|
|
54
56
|
],
|
|
55
57
|
"main": "setup.js",
|