jstar-reviewer 2.1.4 → 2.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/jstar.js +12 -1
- package/dist/scripts/chat.js +150 -0
- package/dist/scripts/config.js +5 -1
- package/dist/scripts/core/critique.js +137 -0
- package/dist/scripts/core/debate.js +95 -0
- package/dist/scripts/detective.js +5 -4
- package/dist/scripts/reviewer.js +136 -41
- package/dist/scripts/session.js +273 -0
- package/dist/scripts/ui/interaction.js +43 -0
- package/dist/scripts/utils/logger.js +110 -0
- package/package.json +14 -10
- package/scripts/chat.ts +130 -0
- package/scripts/config.ts +5 -1
- package/scripts/core/critique.ts +162 -0
- package/scripts/core/debate.ts +111 -0
- package/scripts/detective.ts +5 -4
- package/scripts/reviewer.ts +151 -41
- package/scripts/session.ts +312 -0
- package/scripts/types.ts +9 -0
- package/scripts/ui/interaction.ts +38 -0
- package/scripts/utils/logger.ts +118 -0
- package/setup.js +1 -1
- package/scripts/local-embedding.ts +0 -55
package/bin/jstar.js
CHANGED
|
@@ -29,7 +29,7 @@ function log(msg) {
|
|
|
29
29
|
|
|
30
30
|
function printHelp() {
|
|
31
31
|
log(`
|
|
32
|
-
${COLORS.bold}🌟 J-Star Reviewer v2${COLORS.reset}
|
|
32
|
+
${COLORS.bold}🌟 J-Star Reviewer v2.2.0${COLORS.reset}
|
|
33
33
|
|
|
34
34
|
${COLORS.dim}AI-powered code review with local embeddings${COLORS.reset}
|
|
35
35
|
|
|
@@ -39,8 +39,13 @@ ${COLORS.bold}USAGE:${COLORS.reset}
|
|
|
39
39
|
${COLORS.bold}COMMANDS:${COLORS.reset}
|
|
40
40
|
${COLORS.green}init${COLORS.reset} Index the current codebase (build the brain)
|
|
41
41
|
${COLORS.green}review${COLORS.reset} Review staged git changes
|
|
42
|
+
${COLORS.green}chat${COLORS.reset} Resume an interactive session from the last review
|
|
42
43
|
${COLORS.green}setup${COLORS.reset} Create .env.example and .jstar/ in current directory
|
|
43
44
|
|
|
45
|
+
${COLORS.bold}OPTIONS:${COLORS.reset}
|
|
46
|
+
${COLORS.yellow}--json${COLORS.reset} Output machine-readable JSON (for CI/CD)
|
|
47
|
+
${COLORS.yellow}--headless${COLORS.reset} Enable stdin/stdout protocol (for AI agents)
|
|
48
|
+
|
|
44
49
|
${COLORS.bold}EXAMPLES:${COLORS.reset}
|
|
45
50
|
${COLORS.dim}# First time setup${COLORS.reset}
|
|
46
51
|
jstar init
|
|
@@ -49,6 +54,9 @@ ${COLORS.bold}EXAMPLES:${COLORS.reset}
|
|
|
49
54
|
git add .
|
|
50
55
|
jstar review
|
|
51
56
|
|
|
57
|
+
${COLORS.dim}# JSON output for CI${COLORS.reset}
|
|
58
|
+
jstar review --json > report.json
|
|
59
|
+
|
|
52
60
|
${COLORS.bold}ENVIRONMENT:${COLORS.reset}
|
|
53
61
|
GEMINI_API_KEY Required for Gemini embeddings (or GOOGLE_API_KEY)
|
|
54
62
|
GROQ_API_KEY Required for Groq LLM reviews
|
|
@@ -214,6 +222,9 @@ switch (command) {
|
|
|
214
222
|
case 'review':
|
|
215
223
|
runScript('reviewer.ts');
|
|
216
224
|
break;
|
|
225
|
+
case 'chat':
|
|
226
|
+
runScript('chat.ts');
|
|
227
|
+
break;
|
|
217
228
|
case 'setup':
|
|
218
229
|
createSetupFiles();
|
|
219
230
|
break;
|
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
+
var ownKeys = function(o) {
|
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
+
var ar = [];
|
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
+
return ar;
|
|
24
|
+
};
|
|
25
|
+
return ownKeys(o);
|
|
26
|
+
};
|
|
27
|
+
return function (mod) {
|
|
28
|
+
if (mod && mod.__esModule) return mod;
|
|
29
|
+
var result = {};
|
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
+
__setModuleDefault(result, mod);
|
|
32
|
+
return result;
|
|
33
|
+
};
|
|
34
|
+
})();
|
|
35
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
36
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
37
|
+
};
|
|
38
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
39
|
+
const session_1 = require("./session");
|
|
40
|
+
const llamaindex_1 = require("llamaindex");
|
|
41
|
+
const gemini_embedding_1 = require("./gemini-embedding");
|
|
42
|
+
const mock_llm_1 = require("./mock-llm");
|
|
43
|
+
const logger_1 = require("./utils/logger");
|
|
44
|
+
const path = __importStar(require("path"));
|
|
45
|
+
const fs = __importStar(require("fs"));
|
|
46
|
+
const chalk_1 = __importDefault(require("chalk"));
|
|
47
|
+
const dashboard_1 = require("./dashboard");
|
|
48
|
+
const STORAGE_DIR = path.join(process.cwd(), ".jstar", "storage");
|
|
49
|
+
const SESSION_FILE = path.join(process.cwd(), ".jstar", "session.json");
|
|
50
|
+
const OUTPUT_FILE = path.join(process.cwd(), ".jstar", "last-review.md");
|
|
51
|
+
const embedModel = new gemini_embedding_1.GeminiEmbedding();
|
|
52
|
+
const llm = new mock_llm_1.MockLLM();
|
|
53
|
+
const serviceContext = (0, llamaindex_1.serviceContextFromDefaults)({ embedModel, llm: llm });
|
|
54
|
+
async function loadSession() {
|
|
55
|
+
try {
|
|
56
|
+
const content = fs.readFileSync(SESSION_FILE, 'utf-8');
|
|
57
|
+
return JSON.parse(content);
|
|
58
|
+
}
|
|
59
|
+
catch (e) {
|
|
60
|
+
if (e.code === 'ENOENT') {
|
|
61
|
+
return null; // File doesn't exist
|
|
62
|
+
}
|
|
63
|
+
logger_1.Logger.error(`Failed to load session: ${e.message}`);
|
|
64
|
+
return null;
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
async function main() {
|
|
68
|
+
// Initialize logger mode
|
|
69
|
+
logger_1.Logger.init();
|
|
70
|
+
logger_1.Logger.info(chalk_1.default.bold.magenta("\n💬 J-Star Chat: Resuming Session...\n"));
|
|
71
|
+
// 1. Load Session
|
|
72
|
+
const session = await loadSession();
|
|
73
|
+
if (!session) {
|
|
74
|
+
logger_1.Logger.error(chalk_1.default.red("❌ No active session found."));
|
|
75
|
+
logger_1.Logger.info(chalk_1.default.yellow("Run 'jstar review' first to analyze the codebase."));
|
|
76
|
+
return;
|
|
77
|
+
}
|
|
78
|
+
logger_1.Logger.info(chalk_1.default.dim(` 📅 Loaded session from: ${session.date}`));
|
|
79
|
+
logger_1.Logger.info(chalk_1.default.dim(` 🔍 Loaded ${session.findings.reduce((acc, f) => acc + f.issues.length, 0)} issues.`));
|
|
80
|
+
// 2. Load Brain (Fast)
|
|
81
|
+
if (!fs.existsSync(STORAGE_DIR)) {
|
|
82
|
+
logger_1.Logger.error(chalk_1.default.red("❌ Local Brain not found. Run 'pnpm index:init' first."));
|
|
83
|
+
return;
|
|
84
|
+
}
|
|
85
|
+
const storageContext = await (0, llamaindex_1.storageContextFromDefaults)({ persistDir: STORAGE_DIR });
|
|
86
|
+
const index = await llamaindex_1.VectorStoreIndex.init({ storageContext, serviceContext });
|
|
87
|
+
// 3. Start Chat (Headless or Interactive)
|
|
88
|
+
let updatedFindings;
|
|
89
|
+
let hasUpdates;
|
|
90
|
+
if (logger_1.Logger.isHeadless()) {
|
|
91
|
+
// Headless mode: stdin/stdout JSON protocol
|
|
92
|
+
const result = await (0, session_1.startHeadlessSession)(session.findings, index);
|
|
93
|
+
updatedFindings = result.updatedFindings;
|
|
94
|
+
hasUpdates = result.hasUpdates;
|
|
95
|
+
}
|
|
96
|
+
else {
|
|
97
|
+
// Normal TUI mode
|
|
98
|
+
const result = await (0, session_1.startInteractiveSession)(session.findings, index);
|
|
99
|
+
updatedFindings = result.updatedFindings;
|
|
100
|
+
hasUpdates = result.hasUpdates;
|
|
101
|
+
}
|
|
102
|
+
// 4. Update Session & Report if changed
|
|
103
|
+
if (hasUpdates) {
|
|
104
|
+
logger_1.Logger.info(chalk_1.default.blue("\n🔄 Updating Session & Dashboard..."));
|
|
105
|
+
// Recalculate metrics based on new findings
|
|
106
|
+
const newMetrics = {
|
|
107
|
+
...session.metrics, // keep files/tokens same
|
|
108
|
+
violations: updatedFindings.reduce((sum, f) => sum + f.issues.length, 0),
|
|
109
|
+
critical: updatedFindings.filter(f => f.severity === 'P0_CRITICAL').length,
|
|
110
|
+
high: updatedFindings.filter(f => f.severity === 'P1_HIGH').length,
|
|
111
|
+
medium: updatedFindings.filter(f => f.severity === 'P2_MEDIUM').length,
|
|
112
|
+
lgtm: updatedFindings.filter(f => f.severity === 'LGTM').length,
|
|
113
|
+
};
|
|
114
|
+
// Save Session
|
|
115
|
+
const newSession = {
|
|
116
|
+
date: new Date().toISOString().split('T')[0],
|
|
117
|
+
findings: updatedFindings,
|
|
118
|
+
metrics: newMetrics
|
|
119
|
+
};
|
|
120
|
+
try {
|
|
121
|
+
fs.writeFileSync(SESSION_FILE, JSON.stringify(newSession, null, 2));
|
|
122
|
+
}
|
|
123
|
+
catch (err) {
|
|
124
|
+
logger_1.Logger.error(`Failed to save session: ${err.message}`);
|
|
125
|
+
return;
|
|
126
|
+
}
|
|
127
|
+
// Save Dashboard
|
|
128
|
+
const report = {
|
|
129
|
+
date: newSession.date,
|
|
130
|
+
reviewer: 'J-Star Chat',
|
|
131
|
+
status: (0, dashboard_1.determineStatus)(newMetrics),
|
|
132
|
+
metrics: newMetrics,
|
|
133
|
+
findings: updatedFindings,
|
|
134
|
+
recommendedAction: (0, dashboard_1.generateRecommendation)(newMetrics)
|
|
135
|
+
};
|
|
136
|
+
const dashboard = (0, dashboard_1.renderDashboard)(report);
|
|
137
|
+
try {
|
|
138
|
+
fs.writeFileSync(OUTPUT_FILE, dashboard);
|
|
139
|
+
}
|
|
140
|
+
catch (err) {
|
|
141
|
+
logger_1.Logger.error(`Failed to save dashboard: ${err.message}`);
|
|
142
|
+
return;
|
|
143
|
+
}
|
|
144
|
+
logger_1.Logger.info(chalk_1.default.bold.green("✅ Saved."));
|
|
145
|
+
}
|
|
146
|
+
else {
|
|
147
|
+
logger_1.Logger.info(chalk_1.default.dim(" No changes made."));
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
main().catch(console.error);
|
package/dist/scripts/config.js
CHANGED
|
@@ -86,8 +86,12 @@ dotenv_1.default.config({ path: path.join(cwd, ".env") });
|
|
|
86
86
|
const DEFAULT_MODEL = "moonshotai/kimi-k2-instruct-0905";
|
|
87
87
|
exports.Config = {
|
|
88
88
|
MODEL_NAME: process.env.REVIEW_MODEL_NAME || DEFAULT_MODEL,
|
|
89
|
+
CRITIQUE_MODEL_NAME: process.env.CRITIQUE_MODEL_NAME || process.env.REVIEW_MODEL_NAME || DEFAULT_MODEL,
|
|
89
90
|
DEFAULT_SEVERITY: 'P2_MEDIUM',
|
|
90
91
|
THRESHOLDS: {
|
|
91
92
|
MEDIUM: 5
|
|
92
|
-
}
|
|
93
|
+
},
|
|
94
|
+
// Smart Review Settings
|
|
95
|
+
CONFIDENCE_THRESHOLD: 3, // Minimum confidence (1-5) to include an issue
|
|
96
|
+
ENABLE_SELF_CRITIQUE: true, // Enable second-pass validation
|
|
93
97
|
};
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Self-Critique Module
|
|
4
|
+
* Second-pass validation to filter out false positives
|
|
5
|
+
*/
|
|
6
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
7
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
8
|
+
};
|
|
9
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
10
|
+
exports.critiqueFindings = critiqueFindings;
|
|
11
|
+
const ai_1 = require("ai");
|
|
12
|
+
const groq_1 = require("@ai-sdk/groq");
|
|
13
|
+
const chalk_1 = __importDefault(require("chalk"));
|
|
14
|
+
const logger_1 = require("../utils/logger");
|
|
15
|
+
const config_1 = require("../config");
|
|
16
|
+
const groq = (0, groq_1.createGroq)({ apiKey: process.env.GROQ_API_KEY });
|
|
17
|
+
/**
|
|
18
|
+
* Runs a self-critique pass on the initial findings.
|
|
19
|
+
* Returns filtered findings with only validated issues.
|
|
20
|
+
*/
|
|
21
|
+
async function critiqueFindings(findings, diff) {
|
|
22
|
+
// Collect all issues across all files for batch critique
|
|
23
|
+
const allIssues = [];
|
|
24
|
+
findings.forEach((f, fIdx) => {
|
|
25
|
+
f.issues.forEach((issue, iIdx) => {
|
|
26
|
+
allIssues.push({ file: f.file, issue, index: fIdx * 1000 + iIdx });
|
|
27
|
+
});
|
|
28
|
+
});
|
|
29
|
+
if (allIssues.length === 0) {
|
|
30
|
+
return findings;
|
|
31
|
+
}
|
|
32
|
+
logger_1.Logger.info(chalk_1.default.blue("\n🔍 Self-Critique Pass: Validating findings...\n"));
|
|
33
|
+
// Build the critique prompt
|
|
34
|
+
const issueList = allIssues.map((item, idx) => `[${idx}] File: ${item.file}\n Title: ${item.issue.title}\n Description: ${item.issue.description}`).join("\n\n");
|
|
35
|
+
const systemPrompt = `You are a code review validator. Your job is to filter out FALSE POSITIVES.
|
|
36
|
+
|
|
37
|
+
For each issue below, decide:
|
|
38
|
+
- VALID: This is a real problem that should be reported
|
|
39
|
+
- FALSE_POSITIVE: This is NOT a real issue (test mock, intentional pattern, already handled, etc.)
|
|
40
|
+
- NEEDS_CONTEXT: Can't determine without more code context (treat as valid)
|
|
41
|
+
|
|
42
|
+
Return JSON with this structure:
|
|
43
|
+
{
|
|
44
|
+
"results": [
|
|
45
|
+
{ "issueTitle": "...", "verdict": "VALID" | "FALSE_POSITIVE" | "NEEDS_CONTEXT", "reason": "..." }
|
|
46
|
+
]
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
IMPORTANT:
|
|
50
|
+
- Be SKEPTICAL. If the code looks intentional, it's probably not a bug.
|
|
51
|
+
- Test files, mocks, and stubs are NOT bugs.
|
|
52
|
+
- "Missing error handling" in utility modules may be intentional.
|
|
53
|
+
- Return ONLY valid JSON.`;
|
|
54
|
+
const userPrompt = `ORIGINAL DIFF:
|
|
55
|
+
\`\`\`
|
|
56
|
+
${diff.slice(0, 4000)}
|
|
57
|
+
\`\`\`
|
|
58
|
+
|
|
59
|
+
ISSUES TO VALIDATE:
|
|
60
|
+
${issueList}`;
|
|
61
|
+
try {
|
|
62
|
+
const { text } = await (0, ai_1.generateText)({
|
|
63
|
+
model: groq(config_1.Config.CRITIQUE_MODEL_NAME),
|
|
64
|
+
system: systemPrompt,
|
|
65
|
+
prompt: userPrompt,
|
|
66
|
+
temperature: 0.1,
|
|
67
|
+
});
|
|
68
|
+
// Parse critique response
|
|
69
|
+
const jsonMatch = text.match(/\{[\s\S]*\}/);
|
|
70
|
+
if (!jsonMatch) {
|
|
71
|
+
logger_1.Logger.warn(chalk_1.default.yellow(" ⚠️ Could not parse critique response, keeping all issues"));
|
|
72
|
+
return findings;
|
|
73
|
+
}
|
|
74
|
+
let response;
|
|
75
|
+
try {
|
|
76
|
+
const parsed = JSON.parse(jsonMatch[0]);
|
|
77
|
+
// Validate structure
|
|
78
|
+
if (!parsed || !Array.isArray(parsed.results)) {
|
|
79
|
+
throw new Error("Invalid response structure");
|
|
80
|
+
}
|
|
81
|
+
response = parsed;
|
|
82
|
+
}
|
|
83
|
+
catch (parseError) {
|
|
84
|
+
logger_1.Logger.warn(chalk_1.default.yellow(" ⚠️ Invalid JSON from critique, keeping all issues"));
|
|
85
|
+
return findings;
|
|
86
|
+
}
|
|
87
|
+
// Build a map of verdicts by title
|
|
88
|
+
const verdictMap = new Map();
|
|
89
|
+
for (const result of response.results) {
|
|
90
|
+
verdictMap.set(result.issueTitle.toLowerCase(), result);
|
|
91
|
+
}
|
|
92
|
+
// Filter findings
|
|
93
|
+
const filteredFindings = [];
|
|
94
|
+
let removedCount = 0;
|
|
95
|
+
for (const finding of findings) {
|
|
96
|
+
const validIssues = [];
|
|
97
|
+
for (const issue of finding.issues) {
|
|
98
|
+
const verdict = verdictMap.get(issue.title.toLowerCase());
|
|
99
|
+
if (verdict?.verdict === 'FALSE_POSITIVE') {
|
|
100
|
+
removedCount++;
|
|
101
|
+
logger_1.Logger.info(chalk_1.default.dim(` ❌ Removed: "${issue.title}" (${verdict.reason})`));
|
|
102
|
+
}
|
|
103
|
+
else {
|
|
104
|
+
validIssues.push(issue);
|
|
105
|
+
if (verdict?.verdict === 'VALID') {
|
|
106
|
+
logger_1.Logger.info(chalk_1.default.green(` ✓ Kept: "${issue.title}"`));
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
if (validIssues.length > 0) {
|
|
111
|
+
filteredFindings.push({
|
|
112
|
+
...finding,
|
|
113
|
+
issues: validIssues,
|
|
114
|
+
// Upgrade to LGTM if no issues remain? No, keep severity for record
|
|
115
|
+
});
|
|
116
|
+
}
|
|
117
|
+
else if (finding.issues.length > 0) {
|
|
118
|
+
// All issues were false positives - mark as LGTM
|
|
119
|
+
filteredFindings.push({
|
|
120
|
+
...finding,
|
|
121
|
+
severity: 'LGTM',
|
|
122
|
+
issues: []
|
|
123
|
+
});
|
|
124
|
+
}
|
|
125
|
+
else {
|
|
126
|
+
filteredFindings.push(finding);
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
logger_1.Logger.info(chalk_1.default.blue(`\n 📊 Self-Critique: ${removedCount} false positive(s) removed\n`));
|
|
130
|
+
return filteredFindings;
|
|
131
|
+
}
|
|
132
|
+
catch (error) {
|
|
133
|
+
logger_1.Logger.warn(chalk_1.default.yellow(` ⚠️ Self-critique failed: ${error.message.slice(0, 100)}`));
|
|
134
|
+
logger_1.Logger.warn(chalk_1.default.yellow(" Keeping all issues as fallback."));
|
|
135
|
+
return findings;
|
|
136
|
+
}
|
|
137
|
+
}
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.debateIssue = debateIssue;
|
|
7
|
+
const llamaindex_1 = require("llamaindex");
|
|
8
|
+
const ai_1 = require("ai");
|
|
9
|
+
const groq_1 = require("@ai-sdk/groq");
|
|
10
|
+
const config_1 = require("../config");
|
|
11
|
+
const chalk_1 = __importDefault(require("chalk"));
|
|
12
|
+
const groq = (0, groq_1.createGroq)({ apiKey: process.env.GROQ_API_KEY });
|
|
13
|
+
async function debateIssue(issueTitle, issueDescription, fileName, userArgument, index) {
|
|
14
|
+
// Validate API key before making any calls
|
|
15
|
+
if (!process.env.GROQ_API_KEY) {
|
|
16
|
+
throw new Error("GROQ_API_KEY is required for debate mode. Please set it in your .env.local file.");
|
|
17
|
+
}
|
|
18
|
+
console.log(chalk_1.default.dim(" 🧠 Thinking... (Consulting the Brain)"));
|
|
19
|
+
// 1. Extract keywords/context
|
|
20
|
+
const query = `${userArgument} ${issueTitle}`;
|
|
21
|
+
// 2. Retrieve new context
|
|
22
|
+
const retriever = index.asRetriever({ similarityTopK: 2 });
|
|
23
|
+
const contextNodes = await retriever.retrieve(query);
|
|
24
|
+
const newContext = contextNodes.map(n => n.node.getContent(llamaindex_1.MetadataMode.NONE)).join("\n\n").slice(0, 2000);
|
|
25
|
+
if (newContext.length >= 2000) {
|
|
26
|
+
console.log(chalk_1.default.yellow(" ⚠️ Context truncated to 2000 chars"));
|
|
27
|
+
}
|
|
28
|
+
const sources = contextNodes.map(n => n.node.metadata?.['file_name']).filter(Boolean).join(', ');
|
|
29
|
+
if (sources) {
|
|
30
|
+
console.log(chalk_1.default.dim(` 🔍 Found relevant context from: ${sources}`));
|
|
31
|
+
}
|
|
32
|
+
// 3. Ask the Judge
|
|
33
|
+
const systemPrompt = `You are a Senior Code Reviewer in a debate with a developer.
|
|
34
|
+
|
|
35
|
+
ORIGINAL FINDING: "${issueTitle} - ${issueDescription}" in file ${fileName}.
|
|
36
|
+
USER DEFENSE: "${userArgument}"
|
|
37
|
+
|
|
38
|
+
NEW CONTEXT FOUND IN REPO:
|
|
39
|
+
${newContext}
|
|
40
|
+
|
|
41
|
+
TASK:
|
|
42
|
+
Analyze the USER INPUT.
|
|
43
|
+
|
|
44
|
+
1. **IS IT A QUESTION?** (e.g., "What does this mean?", "Why is this wrong?")
|
|
45
|
+
- If yes, **EXPLAIN** the technical reasoning behind the finding.
|
|
46
|
+
- Reference the specific code/context.
|
|
47
|
+
- Do NOT withdraw the issue (Severity: UNCHANGED).
|
|
48
|
+
- Tone: Educational and helpful.
|
|
49
|
+
|
|
50
|
+
2. **IS IT A DEFENSE/ARGUMENT?** (e.g., "This is handled in utils.ts", "It's a false positive because...")
|
|
51
|
+
- Evaluate if the user is correct based on the NEW CONTEXT.
|
|
52
|
+
- If user is RIGHT: Apologize and withdraw (Severity: LGTM).
|
|
53
|
+
- If user is WRONG: Explain why, citing the context. (Severity: UNCHANGED).
|
|
54
|
+
|
|
55
|
+
RETURN JSON:
|
|
56
|
+
{
|
|
57
|
+
"response": "Conversational response (explanation or verdict).",
|
|
58
|
+
"severity": "P0_CRITICAL" | "P1_HIGH" | "P2_MEDIUM" | "LGTM" | "UNCHANGED"
|
|
59
|
+
}
|
|
60
|
+
`;
|
|
61
|
+
try {
|
|
62
|
+
const { text } = await (0, ai_1.generateText)({
|
|
63
|
+
model: groq(config_1.Config.MODEL_NAME),
|
|
64
|
+
system: systemPrompt,
|
|
65
|
+
prompt: "What is your verdict?",
|
|
66
|
+
temperature: 0.2,
|
|
67
|
+
});
|
|
68
|
+
const jsonMatch = text.match(/\{[\s\S]*\}/);
|
|
69
|
+
if (jsonMatch) {
|
|
70
|
+
try {
|
|
71
|
+
const parsed = JSON.parse(jsonMatch[0]);
|
|
72
|
+
// Validate expected structure
|
|
73
|
+
if (parsed && typeof parsed.response === 'string' && parsed.severity) {
|
|
74
|
+
return {
|
|
75
|
+
text: parsed.response,
|
|
76
|
+
severity: parsed.severity
|
|
77
|
+
};
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
catch (parseError) {
|
|
81
|
+
// JSON parse failed, fall through to default response
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
return {
|
|
85
|
+
text: text,
|
|
86
|
+
severity: 'UNCHANGED'
|
|
87
|
+
};
|
|
88
|
+
}
|
|
89
|
+
catch (error) {
|
|
90
|
+
return {
|
|
91
|
+
text: `Failed to debate: ${error.message}`,
|
|
92
|
+
severity: 'UNCHANGED'
|
|
93
|
+
};
|
|
94
|
+
}
|
|
95
|
+
}
|
|
@@ -40,6 +40,7 @@ exports.Detective = void 0;
|
|
|
40
40
|
const fs = __importStar(require("fs"));
|
|
41
41
|
const path = __importStar(require("path"));
|
|
42
42
|
const chalk_1 = __importDefault(require("chalk"));
|
|
43
|
+
const logger_1 = require("./utils/logger");
|
|
43
44
|
const RULES = [
|
|
44
45
|
{
|
|
45
46
|
id: 'SEC-001',
|
|
@@ -125,19 +126,19 @@ class Detective {
|
|
|
125
126
|
}
|
|
126
127
|
report() {
|
|
127
128
|
if (this.violations.length === 0) {
|
|
128
|
-
|
|
129
|
+
logger_1.Logger.info(chalk_1.default.green("✅ Detective Engine: No violations found."));
|
|
129
130
|
return;
|
|
130
131
|
}
|
|
131
|
-
|
|
132
|
+
logger_1.Logger.info(chalk_1.default.red(`🚨 Detective Engine found ${this.violations.length} violations:`));
|
|
132
133
|
// Only show first 10 to avoid wall of text
|
|
133
134
|
const total = this.violations.length;
|
|
134
135
|
const toShow = this.violations.slice(0, 10);
|
|
135
136
|
toShow.forEach(v => {
|
|
136
137
|
const color = v.severity === 'high' ? chalk_1.default.red : chalk_1.default.yellow;
|
|
137
|
-
|
|
138
|
+
logger_1.Logger.info(color(`[${v.code}] ${v.file}:${v.line} - ${v.message}`));
|
|
138
139
|
});
|
|
139
140
|
if (total > 10) {
|
|
140
|
-
|
|
141
|
+
logger_1.Logger.dim(`... and ${total - 10} more.`);
|
|
141
142
|
}
|
|
142
143
|
}
|
|
143
144
|
}
|