@rigour-labs/core 3.0.6 → 4.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/deep/fact-extractor.d.ts +80 -0
- package/dist/deep/fact-extractor.js +626 -0
- package/dist/deep/fact-extractor.test.d.ts +1 -0
- package/dist/deep/fact-extractor.test.js +547 -0
- package/dist/deep/index.d.ts +14 -0
- package/dist/deep/index.js +12 -0
- package/dist/deep/prompts.d.ts +22 -0
- package/dist/deep/prompts.js +374 -0
- package/dist/deep/prompts.test.d.ts +1 -0
- package/dist/deep/prompts.test.js +220 -0
- package/dist/deep/verifier.d.ts +16 -0
- package/dist/deep/verifier.js +388 -0
- package/dist/deep/verifier.test.d.ts +1 -0
- package/dist/deep/verifier.test.js +514 -0
- package/dist/gates/deep-analysis.d.ts +28 -0
- package/dist/gates/deep-analysis.js +302 -0
- package/dist/gates/runner.d.ts +4 -2
- package/dist/gates/runner.js +46 -1
- package/dist/index.d.ts +10 -0
- package/dist/index.js +12 -2
- package/dist/inference/cloud-provider.d.ts +34 -0
- package/dist/inference/cloud-provider.js +126 -0
- package/dist/inference/index.d.ts +17 -0
- package/dist/inference/index.js +23 -0
- package/dist/inference/model-manager.d.ts +26 -0
- package/dist/inference/model-manager.js +106 -0
- package/dist/inference/sidecar-provider.d.ts +15 -0
- package/dist/inference/sidecar-provider.js +153 -0
- package/dist/inference/types.d.ts +77 -0
- package/dist/inference/types.js +19 -0
- package/dist/settings.d.ts +104 -0
- package/dist/settings.js +186 -0
- package/dist/storage/db.d.ts +16 -0
- package/dist/storage/db.js +132 -0
- package/dist/storage/findings.d.ts +14 -0
- package/dist/storage/findings.js +38 -0
- package/dist/storage/index.d.ts +9 -0
- package/dist/storage/index.js +8 -0
- package/dist/storage/patterns.d.ts +35 -0
- package/dist/storage/patterns.js +62 -0
- package/dist/storage/scans.d.ts +42 -0
- package/dist/storage/scans.js +55 -0
- package/dist/templates/universal-config.js +19 -0
- package/dist/types/index.d.ts +438 -15
- package/dist/types/index.js +41 -1
- package/package.json +6 -2
|
@@ -0,0 +1,302 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Deep Analysis Gate — LLM-powered code quality analysis.
|
|
3
|
+
*
|
|
4
|
+
* Three-step pipeline:
|
|
5
|
+
* 1. AST extracts facts → "UserService has 8 public methods touching 4 domains"
|
|
6
|
+
* 2. LLM interprets facts → "UserService violates Single Responsibility"
|
|
7
|
+
* 3. AST verifies LLM → Does UserService actually have those methods? ✓
|
|
8
|
+
*
|
|
9
|
+
* AST grounds LLM. LLM interprets AST. Neither works alone.
|
|
10
|
+
*/
|
|
11
|
+
import { Gate } from './base.js';
|
|
12
|
+
import { createProvider } from '../inference/index.js';
|
|
13
|
+
import { extractFacts, factsToPromptString, chunkFacts, buildAnalysisPrompt, buildCrossFilePrompt, verifyFindings } from '../deep/index.js';
|
|
14
|
+
import { Logger } from '../utils/logger.js';
|
|
15
|
+
/** Max files to analyze before truncating (prevents OOM on huge repos) */
|
|
16
|
+
const MAX_ANALYZABLE_FILES = 500;
|
|
17
|
+
/** Setup timeout: 120s for model download, 30s for API connection */
|
|
18
|
+
const SETUP_TIMEOUT_MS = 120_000;
|
|
19
|
+
export class DeepAnalysisGate extends Gate {
|
|
20
|
+
config;
|
|
21
|
+
provider = null;
|
|
22
|
+
constructor(config) {
|
|
23
|
+
super('deep-analysis', 'Deep Code Quality Analysis');
|
|
24
|
+
this.config = config;
|
|
25
|
+
}
|
|
26
|
+
get provenance() {
|
|
27
|
+
return 'deep-analysis';
|
|
28
|
+
}
|
|
29
|
+
async run(context) {
|
|
30
|
+
const { onProgress } = this.config;
|
|
31
|
+
const failures = [];
|
|
32
|
+
const startTime = Date.now();
|
|
33
|
+
try {
|
|
34
|
+
// Step 0: Initialize inference provider (with timeout)
|
|
35
|
+
onProgress?.('\n Setting up Rigour Brain...\n');
|
|
36
|
+
this.provider = createProvider(this.config.options);
|
|
37
|
+
await Promise.race([
|
|
38
|
+
this.provider.setup(onProgress),
|
|
39
|
+
new Promise((_, reject) => setTimeout(() => reject(new Error('Setup timed out. Check network or model availability.')), SETUP_TIMEOUT_MS)),
|
|
40
|
+
]);
|
|
41
|
+
const isLocal = !this.config.options.apiKey || this.config.options.provider === 'local';
|
|
42
|
+
if (isLocal) {
|
|
43
|
+
onProgress?.('\n 🔒 100% local analysis. Your code never leaves this machine.\n');
|
|
44
|
+
}
|
|
45
|
+
else {
|
|
46
|
+
onProgress?.(`\n ☁️ Using ${this.config.options.provider} API. Code is sent to cloud.\n`);
|
|
47
|
+
}
|
|
48
|
+
// Step 1: AST extracts facts
|
|
49
|
+
onProgress?.(' Extracting code facts...');
|
|
50
|
+
let allFacts = await extractFacts(context.cwd, context.ignore);
|
|
51
|
+
if (allFacts.length === 0) {
|
|
52
|
+
onProgress?.(' No analyzable files found. Check ignore patterns and file extensions.');
|
|
53
|
+
return [];
|
|
54
|
+
}
|
|
55
|
+
// Cap file count to prevent OOM on huge repos
|
|
56
|
+
if (allFacts.length > MAX_ANALYZABLE_FILES) {
|
|
57
|
+
onProgress?.(` ⚠ Found ${allFacts.length} files, capping at ${MAX_ANALYZABLE_FILES} (largest files prioritized).`);
|
|
58
|
+
// Sort by line count descending — analyze the biggest files first
|
|
59
|
+
allFacts.sort((a, b) => b.lineCount - a.lineCount);
|
|
60
|
+
allFacts = allFacts.slice(0, MAX_ANALYZABLE_FILES);
|
|
61
|
+
}
|
|
62
|
+
const agentCount = this.config.options.agents || 1;
|
|
63
|
+
const isCloud = !!this.config.options.apiKey;
|
|
64
|
+
onProgress?.(` Found ${allFacts.length} files to analyze${agentCount > 1 ? ` with ${agentCount} parallel agents` : ''}.`);
|
|
65
|
+
// Step 2: LLM interprets facts (in chunks)
|
|
66
|
+
const chunks = chunkFacts(allFacts);
|
|
67
|
+
const allFindings = [];
|
|
68
|
+
let failedChunks = 0;
|
|
69
|
+
if (agentCount > 1 && isCloud) {
|
|
70
|
+
// ── Multi-agent mode: partition chunks across N agents, analyze in parallel ──
|
|
71
|
+
// Each agent gets its own provider instance for true parallelism.
|
|
72
|
+
// Local mode stays sequential (single sidecar process).
|
|
73
|
+
onProgress?.(` Spawning ${agentCount} parallel agents...`);
|
|
74
|
+
const agentBuckets = Array.from({ length: agentCount }, () => []);
|
|
75
|
+
chunks.forEach((chunk, i) => agentBuckets[i % agentCount].push(chunk));
|
|
76
|
+
// Create N independent provider instances
|
|
77
|
+
const agentProviders = [];
|
|
78
|
+
for (let a = 0; a < agentCount; a++) {
|
|
79
|
+
if (agentBuckets[a].length === 0)
|
|
80
|
+
continue;
|
|
81
|
+
const p = createProvider(this.config.options);
|
|
82
|
+
await p.setup(); // Already connected — cloud setup is instant after first
|
|
83
|
+
agentProviders.push(p);
|
|
84
|
+
}
|
|
85
|
+
// Run all agents in parallel
|
|
86
|
+
const agentResults = await Promise.allSettled(agentProviders.map(async (provider, agentIdx) => {
|
|
87
|
+
const bucket = agentBuckets[agentIdx];
|
|
88
|
+
const findings = [];
|
|
89
|
+
let failed = 0;
|
|
90
|
+
for (let ci = 0; ci < bucket.length; ci++) {
|
|
91
|
+
const globalIdx = agentIdx + ci * agentCount + 1;
|
|
92
|
+
onProgress?.(` Agent ${agentIdx + 1}: batch ${ci + 1}/${bucket.length} (global ${globalIdx}/${chunks.length})`);
|
|
93
|
+
const factsStr = factsToPromptString(bucket[ci]);
|
|
94
|
+
const prompt = buildAnalysisPrompt(factsStr, this.config.checks);
|
|
95
|
+
try {
|
|
96
|
+
const response = await provider.analyze(prompt, {
|
|
97
|
+
maxTokens: this.config.maxTokens || 8192,
|
|
98
|
+
temperature: this.config.temperature || 0.1,
|
|
99
|
+
timeout: this.config.timeoutMs || 120000,
|
|
100
|
+
jsonMode: true,
|
|
101
|
+
});
|
|
102
|
+
findings.push(...parseFindings(response));
|
|
103
|
+
}
|
|
104
|
+
catch (error) {
|
|
105
|
+
failed++;
|
|
106
|
+
Logger.warn(`Agent ${agentIdx + 1} chunk ${ci + 1} failed: ${error.message}`);
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
return { findings, failed };
|
|
110
|
+
}));
|
|
111
|
+
// Merge results and dispose extra providers
|
|
112
|
+
for (let i = 0; i < agentResults.length; i++) {
|
|
113
|
+
const result = agentResults[i];
|
|
114
|
+
if (result.status === 'fulfilled') {
|
|
115
|
+
allFindings.push(...result.value.findings);
|
|
116
|
+
failedChunks += result.value.failed;
|
|
117
|
+
}
|
|
118
|
+
else {
|
|
119
|
+
failedChunks += agentBuckets[i].length;
|
|
120
|
+
Logger.warn(`Agent ${i + 1} failed entirely: ${result.reason?.message || 'unknown'}`);
|
|
121
|
+
}
|
|
122
|
+
agentProviders[i]?.dispose();
|
|
123
|
+
}
|
|
124
|
+
onProgress?.(` All ${agentCount} agents completed.`);
|
|
125
|
+
}
|
|
126
|
+
else {
|
|
127
|
+
// ── Single-agent mode: sequential chunk processing ──
|
|
128
|
+
let chunkIndex = 0;
|
|
129
|
+
for (const chunk of chunks) {
|
|
130
|
+
chunkIndex++;
|
|
131
|
+
onProgress?.(` Analyzing batch ${chunkIndex}/${chunks.length}...`);
|
|
132
|
+
const factsStr = factsToPromptString(chunk);
|
|
133
|
+
const prompt = buildAnalysisPrompt(factsStr, this.config.checks);
|
|
134
|
+
try {
|
|
135
|
+
const response = await this.provider.analyze(prompt, {
|
|
136
|
+
maxTokens: this.config.maxTokens || (isCloud ? 4096 : 512),
|
|
137
|
+
temperature: this.config.temperature || 0.1,
|
|
138
|
+
timeout: this.config.timeoutMs || (isCloud ? 120000 : 60000),
|
|
139
|
+
jsonMode: true,
|
|
140
|
+
});
|
|
141
|
+
const findings = parseFindings(response);
|
|
142
|
+
allFindings.push(...findings);
|
|
143
|
+
}
|
|
144
|
+
catch (error) {
|
|
145
|
+
failedChunks++;
|
|
146
|
+
Logger.warn(`Chunk ${chunkIndex} inference failed: ${error.message}`);
|
|
147
|
+
onProgress?.(` ⚠ Batch ${chunkIndex} failed: ${error.message}`);
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
// Cross-file analysis (if we have enough files and at least some chunks succeeded)
|
|
152
|
+
if (allFacts.length >= 3 && failedChunks < chunks.length) {
|
|
153
|
+
onProgress?.(' Running cross-file analysis...');
|
|
154
|
+
try {
|
|
155
|
+
const crossPrompt = buildCrossFilePrompt(allFacts);
|
|
156
|
+
const crossResponse = await this.provider.analyze(crossPrompt, {
|
|
157
|
+
maxTokens: this.config.maxTokens || (isCloud ? 4096 : 512),
|
|
158
|
+
temperature: this.config.temperature || 0.1,
|
|
159
|
+
timeout: this.config.timeoutMs || (isCloud ? 120000 : 60000),
|
|
160
|
+
jsonMode: true,
|
|
161
|
+
});
|
|
162
|
+
const crossFindings = parseFindings(crossResponse);
|
|
163
|
+
allFindings.push(...crossFindings);
|
|
164
|
+
}
|
|
165
|
+
catch (error) {
|
|
166
|
+
Logger.warn(`Cross-file analysis failed: ${error.message}`);
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
// Step 3: AST verifies LLM
|
|
170
|
+
onProgress?.(' Verifying findings...');
|
|
171
|
+
const verified = verifyFindings(allFindings, allFacts);
|
|
172
|
+
const durationMs = Date.now() - startTime;
|
|
173
|
+
onProgress?.(` ✓ ${verified.length} verified findings (${allFindings.length - verified.length} dropped) in ${(durationMs / 1000).toFixed(1)}s`);
|
|
174
|
+
if (failedChunks > 0) {
|
|
175
|
+
onProgress?.(` ⚠ ${failedChunks}/${chunks.length} batches failed — results may be incomplete.`);
|
|
176
|
+
}
|
|
177
|
+
// Convert to Failure format
|
|
178
|
+
for (const finding of verified) {
|
|
179
|
+
const failure = this.createFailure(finding.description, [finding.file], finding.suggestion, `[${finding.category}] ${finding.description.substring(0, 80)}`, finding.line, undefined, finding.severity);
|
|
180
|
+
// Tag with deep analysis metadata
|
|
181
|
+
failure.confidence = finding.confidence;
|
|
182
|
+
failure.source = 'llm';
|
|
183
|
+
failure.category = finding.category;
|
|
184
|
+
failure.verified = finding.verified;
|
|
185
|
+
failures.push(failure);
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
catch (error) {
|
|
189
|
+
Logger.error(`Deep analysis failed: ${error.message}`);
|
|
190
|
+
onProgress?.(` ⚠ Deep analysis error: ${error.message}`);
|
|
191
|
+
// Don't fail the whole check — deep is advisory
|
|
192
|
+
}
|
|
193
|
+
finally {
|
|
194
|
+
this.provider?.dispose();
|
|
195
|
+
}
|
|
196
|
+
return failures;
|
|
197
|
+
}
|
|
198
|
+
}
|
|
199
|
+
/**
|
|
200
|
+
* Parse LLM response into structured findings.
|
|
201
|
+
* Handles various response formats (raw JSON, markdown-wrapped JSON, etc.)
|
|
202
|
+
*/
|
|
203
|
+
function parseFindings(response) {
|
|
204
|
+
if (!response || response.trim().length === 0) {
|
|
205
|
+
Logger.warn('Empty LLM response received');
|
|
206
|
+
return [];
|
|
207
|
+
}
|
|
208
|
+
try {
|
|
209
|
+
// Try direct JSON parse first
|
|
210
|
+
const parsed = JSON.parse(response);
|
|
211
|
+
if (Array.isArray(parsed.findings))
|
|
212
|
+
return validateFindings(parsed.findings);
|
|
213
|
+
if (Array.isArray(parsed))
|
|
214
|
+
return validateFindings(parsed);
|
|
215
|
+
return [];
|
|
216
|
+
}
|
|
217
|
+
catch {
|
|
218
|
+
// Try extracting JSON from markdown code blocks
|
|
219
|
+
const jsonMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/);
|
|
220
|
+
if (jsonMatch) {
|
|
221
|
+
try {
|
|
222
|
+
const parsed = JSON.parse(jsonMatch[1]);
|
|
223
|
+
if (Array.isArray(parsed.findings))
|
|
224
|
+
return validateFindings(parsed.findings);
|
|
225
|
+
if (Array.isArray(parsed))
|
|
226
|
+
return validateFindings(parsed);
|
|
227
|
+
}
|
|
228
|
+
catch {
|
|
229
|
+
// Fall through
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
// Try finding JSON object in response
|
|
233
|
+
const objectMatch = response.match(/\{[\s\S]*"findings"[\s\S]*\}/);
|
|
234
|
+
if (objectMatch) {
|
|
235
|
+
try {
|
|
236
|
+
const parsed = JSON.parse(objectMatch[0]);
|
|
237
|
+
if (Array.isArray(parsed.findings))
|
|
238
|
+
return validateFindings(parsed.findings);
|
|
239
|
+
}
|
|
240
|
+
catch {
|
|
241
|
+
// Give up
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
// Last resort: try to recover truncated JSON arrays
|
|
245
|
+
// LLMs sometimes exceed token limits, truncating the response mid-JSON
|
|
246
|
+
const recovered = recoverTruncatedFindings(response);
|
|
247
|
+
if (recovered.length > 0) {
|
|
248
|
+
Logger.info(`Recovered ${recovered.length} findings from truncated response`);
|
|
249
|
+
return recovered;
|
|
250
|
+
}
|
|
251
|
+
Logger.warn(`Could not parse LLM response as findings JSON. First 200 chars: ${response.substring(0, 200)}`);
|
|
252
|
+
return [];
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
/**
|
|
256
|
+
* Attempt to recover individual finding objects from a truncated JSON response.
|
|
257
|
+
* Extracts complete JSON objects from partial arrays.
|
|
258
|
+
*/
|
|
259
|
+
function recoverTruncatedFindings(response) {
|
|
260
|
+
const findings = [];
|
|
261
|
+
// Match individual complete objects within the response
|
|
262
|
+
const objectRegex = /\{\s*"category"\s*:\s*"[^"]+"\s*,[\s\S]*?"description"\s*:\s*"[^"]*"[^}]*\}/g;
|
|
263
|
+
let match;
|
|
264
|
+
while ((match = objectRegex.exec(response)) !== null) {
|
|
265
|
+
try {
|
|
266
|
+
const obj = JSON.parse(match[0]);
|
|
267
|
+
if (obj.category && obj.file && obj.description) {
|
|
268
|
+
findings.push(obj);
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
catch {
|
|
272
|
+
// Individual object was itself truncated — skip
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
return validateFindings(findings);
|
|
276
|
+
}
|
|
277
|
+
/**
|
|
278
|
+
* Validate and sanitize findings from LLM response.
|
|
279
|
+
* Drops malformed entries that lack required fields.
|
|
280
|
+
*/
|
|
281
|
+
function validateFindings(raw) {
|
|
282
|
+
return raw.filter(f => {
|
|
283
|
+
if (!f || typeof f !== 'object')
|
|
284
|
+
return false;
|
|
285
|
+
if (!f.category || typeof f.category !== 'string')
|
|
286
|
+
return false;
|
|
287
|
+
if (!f.file || typeof f.file !== 'string')
|
|
288
|
+
return false;
|
|
289
|
+
if (!f.description || typeof f.description !== 'string')
|
|
290
|
+
return false;
|
|
291
|
+
// Normalize confidence
|
|
292
|
+
if (typeof f.confidence !== 'number' || f.confidence < 0 || f.confidence > 1) {
|
|
293
|
+
f.confidence = 0.5;
|
|
294
|
+
}
|
|
295
|
+
// Normalize severity
|
|
296
|
+
const validSeverities = ['critical', 'high', 'medium', 'low', 'info'];
|
|
297
|
+
if (!validSeverities.includes(f.severity)) {
|
|
298
|
+
f.severity = 'medium';
|
|
299
|
+
}
|
|
300
|
+
return true;
|
|
301
|
+
});
|
|
302
|
+
}
|
package/dist/gates/runner.d.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { Gate } from './base.js';
|
|
2
|
-
import { Config, Report } from '../types/index.js';
|
|
2
|
+
import { Config, Report, DeepOptions } from '../types/index.js';
|
|
3
3
|
export declare class GateRunner {
|
|
4
4
|
private config;
|
|
5
5
|
private gates;
|
|
@@ -9,5 +9,7 @@ export declare class GateRunner {
|
|
|
9
9
|
* Allows adding custom gates dynamically (SOLID - Open/Closed Principle)
|
|
10
10
|
*/
|
|
11
11
|
addGate(gate: Gate): void;
|
|
12
|
-
run(cwd: string, patterns?: string[]
|
|
12
|
+
run(cwd: string, patterns?: string[], deepOptions?: DeepOptions & {
|
|
13
|
+
onProgress?: (msg: string) => void;
|
|
14
|
+
}): Promise<Report>;
|
|
13
15
|
}
|
package/dist/gates/runner.js
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { SEVERITY_WEIGHTS } from '../types/index.js';
|
|
2
|
+
import { DeepAnalysisGate } from './deep-analysis.js';
|
|
2
3
|
import { FileGate } from './file.js';
|
|
3
4
|
import { ContentGate } from './content.js';
|
|
4
5
|
import { StructureGate } from './structure.js';
|
|
@@ -102,7 +103,7 @@ export class GateRunner {
|
|
|
102
103
|
addGate(gate) {
|
|
103
104
|
this.gates.push(gate);
|
|
104
105
|
}
|
|
105
|
-
async run(cwd, patterns) {
|
|
106
|
+
async run(cwd, patterns, deepOptions) {
|
|
106
107
|
const start = Date.now();
|
|
107
108
|
const failures = [];
|
|
108
109
|
const summary = {};
|
|
@@ -164,6 +165,43 @@ export class GateRunner {
|
|
|
164
165
|
}
|
|
165
166
|
}
|
|
166
167
|
}
|
|
168
|
+
// 3. Run Deep Analysis (if enabled)
|
|
169
|
+
let deepStats = undefined;
|
|
170
|
+
if (deepOptions?.enabled) {
|
|
171
|
+
const deepSetupStart = Date.now();
|
|
172
|
+
const deepGate = new DeepAnalysisGate({
|
|
173
|
+
options: deepOptions,
|
|
174
|
+
checks: this.config.gates.deep?.checks,
|
|
175
|
+
threads: this.config.gates.deep?.threads,
|
|
176
|
+
maxTokens: this.config.gates.deep?.max_tokens,
|
|
177
|
+
temperature: this.config.gates.deep?.temperature,
|
|
178
|
+
timeoutMs: this.config.gates.deep?.timeout_ms,
|
|
179
|
+
onProgress: deepOptions.onProgress,
|
|
180
|
+
});
|
|
181
|
+
try {
|
|
182
|
+
const deepFailures = await deepGate.run({ cwd, ignore, patterns });
|
|
183
|
+
if (deepFailures.length > 0) {
|
|
184
|
+
failures.push(...deepFailures);
|
|
185
|
+
summary['deep-analysis'] = 'FAIL';
|
|
186
|
+
}
|
|
187
|
+
else {
|
|
188
|
+
summary['deep-analysis'] = 'PASS';
|
|
189
|
+
}
|
|
190
|
+
deepStats = {
|
|
191
|
+
enabled: true,
|
|
192
|
+
tier: deepOptions.apiKey ? 'cloud' : (deepOptions.pro ? 'pro' : 'deep'),
|
|
193
|
+
model: deepOptions.apiKey ? (deepOptions.provider || 'cloud') : (deepOptions.pro ? 'Qwen2.5-Coder-1.5B' : 'Qwen2.5-Coder-0.5B'),
|
|
194
|
+
total_ms: Date.now() - deepSetupStart,
|
|
195
|
+
findings_count: deepFailures.length,
|
|
196
|
+
findings_verified: deepFailures.filter((f) => f.verified).length,
|
|
197
|
+
};
|
|
198
|
+
}
|
|
199
|
+
catch (error) {
|
|
200
|
+
Logger.error(`Deep analysis failed: ${error.message}`);
|
|
201
|
+
summary['deep-analysis'] = 'ERROR';
|
|
202
|
+
deepStats = { enabled: true };
|
|
203
|
+
}
|
|
204
|
+
}
|
|
167
205
|
const status = failures.length > 0 ? 'FAIL' : 'PASS';
|
|
168
206
|
// Severity-weighted scoring: each failure deducts based on its severity
|
|
169
207
|
const severityBreakdown = {};
|
|
@@ -180,11 +218,13 @@ export class GateRunner {
|
|
|
180
218
|
// preventing security criticals from incorrectly zeroing structural_score.
|
|
181
219
|
let aiDeduction = 0;
|
|
182
220
|
let structuralDeduction = 0;
|
|
221
|
+
let deepDeduction = 0;
|
|
183
222
|
const provenanceCounts = {
|
|
184
223
|
'ai-drift': 0,
|
|
185
224
|
'traditional': 0,
|
|
186
225
|
'security': 0,
|
|
187
226
|
'governance': 0,
|
|
227
|
+
'deep-analysis': 0,
|
|
188
228
|
};
|
|
189
229
|
for (const f of failures) {
|
|
190
230
|
const sev = (f.severity || 'medium');
|
|
@@ -198,6 +238,9 @@ export class GateRunner {
|
|
|
198
238
|
case 'traditional':
|
|
199
239
|
structuralDeduction += weight;
|
|
200
240
|
break;
|
|
241
|
+
case 'deep-analysis':
|
|
242
|
+
deepDeduction += weight;
|
|
243
|
+
break;
|
|
201
244
|
// security and governance contribute to overall score (totalDeduction)
|
|
202
245
|
// but do NOT pollute the sub-scores
|
|
203
246
|
case 'security':
|
|
@@ -214,8 +257,10 @@ export class GateRunner {
|
|
|
214
257
|
score,
|
|
215
258
|
ai_health_score: Math.max(0, 100 - aiDeduction),
|
|
216
259
|
structural_score: Math.max(0, 100 - structuralDeduction),
|
|
260
|
+
...(deepOptions?.enabled ? { code_quality_score: Math.max(0, 100 - deepDeduction) } : {}),
|
|
217
261
|
severity_breakdown: severityBreakdown,
|
|
218
262
|
provenance_breakdown: provenanceCounts,
|
|
263
|
+
...(deepStats ? { deep: deepStats } : {}),
|
|
219
264
|
},
|
|
220
265
|
};
|
|
221
266
|
}
|
package/dist/index.d.ts
CHANGED
|
@@ -9,3 +9,13 @@ export { RetryLoopBreakerGate } from './gates/retry-loop-breaker.js';
|
|
|
9
9
|
export * from './utils/logger.js';
|
|
10
10
|
export * from './services/score-history.js';
|
|
11
11
|
export * from './hooks/index.js';
|
|
12
|
+
export { loadSettings, saveSettings, getSettingsPath, resolveDeepOptions, getProviderKey, getAgentConfig, getCliPreferences, updateProviderKey, removeProviderKey } from './settings.js';
|
|
13
|
+
export type { RigourSettings, ResolvedDeepOptions, CLIDeepOptions } from './settings.js';
|
|
14
|
+
export { DeepAnalysisGate } from './gates/deep-analysis.js';
|
|
15
|
+
export { createProvider } from './inference/index.js';
|
|
16
|
+
export type { InferenceProvider, DeepFinding, DeepAnalysisResult, ModelTier } from './inference/types.js';
|
|
17
|
+
export { MODELS } from './inference/types.js';
|
|
18
|
+
export { isModelCached, getModelsDir, getModelInfo } from './inference/model-manager.js';
|
|
19
|
+
export { extractFacts, factsToPromptString } from './deep/fact-extractor.js';
|
|
20
|
+
export { openDatabase, isSQLiteAvailable, insertScan, insertFindings, getRecentScans, getScoreTrendFromDB, getTopIssues, reinforcePattern, getStrongPatterns } from './storage/index.js';
|
|
21
|
+
export type { RigourDB } from './storage/index.js';
|
package/dist/index.js
CHANGED
|
@@ -9,7 +9,17 @@ export { RetryLoopBreakerGate } from './gates/retry-loop-breaker.js';
|
|
|
9
9
|
export * from './utils/logger.js';
|
|
10
10
|
export * from './services/score-history.js';
|
|
11
11
|
export * from './hooks/index.js';
|
|
12
|
+
// Settings Module (Global user config at ~/.rigour/settings.json)
|
|
13
|
+
export { loadSettings, saveSettings, getSettingsPath, resolveDeepOptions, getProviderKey, getAgentConfig, getCliPreferences, updateProviderKey, removeProviderKey } from './settings.js';
|
|
14
|
+
// Deep Analysis Pipeline (v4.0+)
|
|
15
|
+
export { DeepAnalysisGate } from './gates/deep-analysis.js';
|
|
16
|
+
export { createProvider } from './inference/index.js';
|
|
17
|
+
export { MODELS } from './inference/types.js';
|
|
18
|
+
export { isModelCached, getModelsDir, getModelInfo } from './inference/model-manager.js';
|
|
19
|
+
export { extractFacts, factsToPromptString } from './deep/fact-extractor.js';
|
|
20
|
+
// Storage (SQLite Brain)
|
|
21
|
+
export { openDatabase, isSQLiteAvailable, insertScan, insertFindings, getRecentScans, getScoreTrendFromDB, getTopIssues, reinforcePattern, getStrongPatterns } from './storage/index.js';
|
|
12
22
|
// Pattern Index is intentionally NOT exported here to prevent
|
|
13
|
-
// native dependency issues (sharp/transformers) from leaking into
|
|
14
|
-
// non-AI parts of the system.
|
|
23
|
+
// native dependency issues (sharp/transformers) from leaking into
|
|
24
|
+
// non-AI parts of the system.
|
|
15
25
|
// Import from @rigour-labs/core/pattern-index instead.
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Cloud API Provider — runs inference via ANY cloud LLM API.
|
|
3
|
+
*
|
|
4
|
+
* The moat is local-first. But if a user brings their own key,
|
|
5
|
+
* we don't block them. No limitations. Support EVERY provider:
|
|
6
|
+
*
|
|
7
|
+
* - 'claude'/'anthropic' → Anthropic SDK (native)
|
|
8
|
+
* - Everything else → OpenAI-compatible SDK (works with OpenAI, Gemini, Groq,
|
|
9
|
+
* Mistral, Together, Fireworks, Perplexity, DeepSeek, self-hosted vLLM,
|
|
10
|
+
* Ollama, LM Studio, any OpenAI-compatible endpoint)
|
|
11
|
+
*
|
|
12
|
+
* User provides: api_key + provider name + optional base_url + optional model_name
|
|
13
|
+
* We figure out the rest. Their key, their choice.
|
|
14
|
+
*/
|
|
15
|
+
import type { InferenceProvider, InferenceOptions } from './types.js';
|
|
16
|
+
export declare class CloudProvider implements InferenceProvider {
|
|
17
|
+
readonly name: string;
|
|
18
|
+
private client;
|
|
19
|
+
private providerName;
|
|
20
|
+
private apiKey;
|
|
21
|
+
private baseUrl?;
|
|
22
|
+
private modelName;
|
|
23
|
+
private isClaude;
|
|
24
|
+
constructor(providerName: string, apiKey: string, options?: {
|
|
25
|
+
baseUrl?: string;
|
|
26
|
+
modelName?: string;
|
|
27
|
+
});
|
|
28
|
+
isAvailable(): Promise<boolean>;
|
|
29
|
+
setup(onProgress?: (message: string) => void): Promise<void>;
|
|
30
|
+
analyze(prompt: string, options?: InferenceOptions): Promise<string>;
|
|
31
|
+
private analyzeClaude;
|
|
32
|
+
private analyzeOpenAICompat;
|
|
33
|
+
dispose(): void;
|
|
34
|
+
}
|
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
/** Default models per provider (user can override via model_name) */
|
|
2
|
+
const DEFAULT_MODELS = {
|
|
3
|
+
claude: 'claude-opus-4-6',
|
|
4
|
+
anthropic: 'claude-sonnet-4-6',
|
|
5
|
+
openai: 'gpt-4o-mini',
|
|
6
|
+
gemini: 'gemini-3-flash',
|
|
7
|
+
groq: 'llama-3.1-70b-versatile',
|
|
8
|
+
mistral: 'mistral-large-latest',
|
|
9
|
+
together: 'meta-llama/Llama-3.1-70B-Instruct-Turbo',
|
|
10
|
+
fireworks: 'accounts/fireworks/models/llama-v3p1-70b-instruct',
|
|
11
|
+
deepseek: 'deepseek-coder',
|
|
12
|
+
perplexity: 'llama-3.1-sonar-large-128k-online',
|
|
13
|
+
ollama: 'qwen2.5-coder:7b',
|
|
14
|
+
lmstudio: 'qwen2.5-coder-7b-instruct',
|
|
15
|
+
};
|
|
16
|
+
/** Default base URLs per provider */
|
|
17
|
+
const DEFAULT_BASE_URLS = {
|
|
18
|
+
openai: 'https://api.openai.com/v1',
|
|
19
|
+
gemini: 'https://generativelanguage.googleapis.com/v1beta/openai',
|
|
20
|
+
groq: 'https://api.groq.com/openai/v1',
|
|
21
|
+
mistral: 'https://api.mistral.ai/v1',
|
|
22
|
+
together: 'https://api.together.xyz/v1',
|
|
23
|
+
fireworks: 'https://api.fireworks.ai/inference/v1',
|
|
24
|
+
deepseek: 'https://api.deepseek.com/v1',
|
|
25
|
+
perplexity: 'https://api.perplexity.ai',
|
|
26
|
+
ollama: 'http://localhost:11434/v1',
|
|
27
|
+
lmstudio: 'http://localhost:1234/v1',
|
|
28
|
+
};
|
|
29
|
+
export class CloudProvider {
|
|
30
|
+
name;
|
|
31
|
+
client = null;
|
|
32
|
+
providerName;
|
|
33
|
+
apiKey;
|
|
34
|
+
baseUrl;
|
|
35
|
+
modelName;
|
|
36
|
+
isClaude;
|
|
37
|
+
constructor(providerName, apiKey, options) {
|
|
38
|
+
if (!apiKey || apiKey.trim().length === 0) {
|
|
39
|
+
throw new Error(`API key cannot be empty for provider "${providerName}"`);
|
|
40
|
+
}
|
|
41
|
+
this.providerName = providerName.toLowerCase();
|
|
42
|
+
this.apiKey = apiKey.trim();
|
|
43
|
+
this.baseUrl = options?.baseUrl;
|
|
44
|
+
this.modelName = options?.modelName || DEFAULT_MODELS[this.providerName] || 'gpt-4o-mini';
|
|
45
|
+
this.isClaude = this.providerName === 'claude' || this.providerName === 'anthropic';
|
|
46
|
+
this.name = `cloud-${this.providerName}`;
|
|
47
|
+
}
|
|
48
|
+
async isAvailable() {
|
|
49
|
+
return !!this.apiKey;
|
|
50
|
+
}
|
|
51
|
+
async setup(onProgress) {
|
|
52
|
+
if (this.isClaude) {
|
|
53
|
+
try {
|
|
54
|
+
const { default: Anthropic } = await import('@anthropic-ai/sdk');
|
|
55
|
+
this.client = new Anthropic({ apiKey: this.apiKey });
|
|
56
|
+
onProgress?.(`✓ ${this.providerName} API connected (model: ${this.modelName})`);
|
|
57
|
+
}
|
|
58
|
+
catch {
|
|
59
|
+
throw new Error('Claude API SDK not installed. Run: npm install @anthropic-ai/sdk');
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
else {
|
|
63
|
+
// OpenAI-compatible SDK — works with literally everything.
|
|
64
|
+
// OpenAI, Groq, Mistral, Together, Fireworks, DeepSeek, Perplexity,
|
|
65
|
+
// Gemini, Ollama, LM Studio, vLLM, any OpenAI-compatible endpoint.
|
|
66
|
+
// No limitations. User's key, user's choice.
|
|
67
|
+
try {
|
|
68
|
+
const { default: OpenAI } = await import('openai');
|
|
69
|
+
const baseURL = this.baseUrl || DEFAULT_BASE_URLS[this.providerName] || undefined;
|
|
70
|
+
this.client = new OpenAI({
|
|
71
|
+
apiKey: this.apiKey,
|
|
72
|
+
...(baseURL ? { baseURL } : {}),
|
|
73
|
+
});
|
|
74
|
+
onProgress?.(`✓ ${this.providerName} API connected (model: ${this.modelName})`);
|
|
75
|
+
}
|
|
76
|
+
catch {
|
|
77
|
+
throw new Error('OpenAI SDK not installed (used for all OpenAI-compatible APIs). Run: npm install openai');
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
async analyze(prompt, options) {
|
|
82
|
+
if (!this.client) {
|
|
83
|
+
throw new Error('Provider not set up. Call setup() first.');
|
|
84
|
+
}
|
|
85
|
+
if (this.isClaude) {
|
|
86
|
+
return this.analyzeClaude(prompt, options);
|
|
87
|
+
}
|
|
88
|
+
else {
|
|
89
|
+
return this.analyzeOpenAICompat(prompt, options);
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
async analyzeClaude(prompt, options) {
|
|
93
|
+
const response = await this.client.messages.create({
|
|
94
|
+
model: this.modelName,
|
|
95
|
+
max_tokens: options?.maxTokens || 2048,
|
|
96
|
+
temperature: options?.temperature || 0.1,
|
|
97
|
+
messages: [
|
|
98
|
+
{ role: 'user', content: prompt }
|
|
99
|
+
],
|
|
100
|
+
});
|
|
101
|
+
const textBlock = response.content.find((b) => b.type === 'text');
|
|
102
|
+
if (!textBlock?.text) {
|
|
103
|
+
throw new Error(`Empty response from ${this.providerName} API (model: ${this.modelName}). Response had ${response.content.length} blocks but no text.`);
|
|
104
|
+
}
|
|
105
|
+
return textBlock.text;
|
|
106
|
+
}
|
|
107
|
+
async analyzeOpenAICompat(prompt, options) {
|
|
108
|
+
const response = await this.client.chat.completions.create({
|
|
109
|
+
model: this.modelName,
|
|
110
|
+
max_tokens: options?.maxTokens || 2048,
|
|
111
|
+
temperature: options?.temperature || 0.1,
|
|
112
|
+
messages: [
|
|
113
|
+
{ role: 'user', content: prompt }
|
|
114
|
+
],
|
|
115
|
+
...(options?.jsonMode ? { response_format: { type: 'json_object' } } : {}),
|
|
116
|
+
});
|
|
117
|
+
const content = response.choices[0]?.message?.content;
|
|
118
|
+
if (!content) {
|
|
119
|
+
throw new Error(`Empty response from ${this.providerName} API (model: ${this.modelName}). No content in choices.`);
|
|
120
|
+
}
|
|
121
|
+
return content;
|
|
122
|
+
}
|
|
123
|
+
dispose() {
|
|
124
|
+
this.client = null;
|
|
125
|
+
}
|
|
126
|
+
}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Inference provider factory and exports.
|
|
3
|
+
*/
|
|
4
|
+
export type { InferenceProvider, InferenceOptions, DeepFinding, DeepAnalysisResult, ModelTier, ModelInfo } from './types.js';
|
|
5
|
+
export { MODELS } from './types.js';
|
|
6
|
+
export { SidecarProvider } from './sidecar-provider.js';
|
|
7
|
+
export { CloudProvider } from './cloud-provider.js';
|
|
8
|
+
export { ensureModel, isModelCached, getModelPath, getModelInfo, downloadModel, getModelsDir } from './model-manager.js';
|
|
9
|
+
import type { InferenceProvider } from './types.js';
|
|
10
|
+
import type { DeepOptions } from '../types/index.js';
|
|
11
|
+
/**
|
|
12
|
+
* Create the appropriate inference provider based on options.
|
|
13
|
+
*
|
|
14
|
+
* - No API key → SidecarProvider (local llama.cpp binary)
|
|
15
|
+
* - API key + any provider → CloudProvider (no restrictions, user's key, user's choice)
|
|
16
|
+
*/
|
|
17
|
+
export declare function createProvider(options: DeepOptions): InferenceProvider;
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
export { MODELS } from './types.js';
|
|
2
|
+
export { SidecarProvider } from './sidecar-provider.js';
|
|
3
|
+
export { CloudProvider } from './cloud-provider.js';
|
|
4
|
+
export { ensureModel, isModelCached, getModelPath, getModelInfo, downloadModel, getModelsDir } from './model-manager.js';
|
|
5
|
+
import { SidecarProvider } from './sidecar-provider.js';
|
|
6
|
+
import { CloudProvider } from './cloud-provider.js';
|
|
7
|
+
/**
|
|
8
|
+
* Create the appropriate inference provider based on options.
|
|
9
|
+
*
|
|
10
|
+
* - No API key → SidecarProvider (local llama.cpp binary)
|
|
11
|
+
* - API key + any provider → CloudProvider (no restrictions, user's key, user's choice)
|
|
12
|
+
*/
|
|
13
|
+
export function createProvider(options) {
|
|
14
|
+
if (options.apiKey && options.provider && options.provider !== 'local') {
|
|
15
|
+
return new CloudProvider(options.provider, options.apiKey, {
|
|
16
|
+
baseUrl: options.apiBaseUrl,
|
|
17
|
+
modelName: options.modelName,
|
|
18
|
+
});
|
|
19
|
+
}
|
|
20
|
+
// Default: local sidecar
|
|
21
|
+
const tier = options.pro ? 'pro' : 'deep';
|
|
22
|
+
return new SidecarProvider(tier);
|
|
23
|
+
}
|