@rigour-labs/core 3.0.6 → 4.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/deep/fact-extractor.d.ts +80 -0
- package/dist/deep/fact-extractor.js +626 -0
- package/dist/deep/fact-extractor.test.d.ts +1 -0
- package/dist/deep/fact-extractor.test.js +547 -0
- package/dist/deep/index.d.ts +14 -0
- package/dist/deep/index.js +12 -0
- package/dist/deep/prompts.d.ts +22 -0
- package/dist/deep/prompts.js +374 -0
- package/dist/deep/prompts.test.d.ts +1 -0
- package/dist/deep/prompts.test.js +220 -0
- package/dist/deep/verifier.d.ts +16 -0
- package/dist/deep/verifier.js +388 -0
- package/dist/deep/verifier.test.d.ts +1 -0
- package/dist/deep/verifier.test.js +514 -0
- package/dist/gates/deep-analysis.d.ts +28 -0
- package/dist/gates/deep-analysis.js +302 -0
- package/dist/gates/runner.d.ts +4 -2
- package/dist/gates/runner.js +46 -1
- package/dist/index.d.ts +10 -0
- package/dist/index.js +12 -2
- package/dist/inference/cloud-provider.d.ts +34 -0
- package/dist/inference/cloud-provider.js +126 -0
- package/dist/inference/index.d.ts +17 -0
- package/dist/inference/index.js +23 -0
- package/dist/inference/model-manager.d.ts +26 -0
- package/dist/inference/model-manager.js +106 -0
- package/dist/inference/sidecar-provider.d.ts +15 -0
- package/dist/inference/sidecar-provider.js +153 -0
- package/dist/inference/types.d.ts +77 -0
- package/dist/inference/types.js +19 -0
- package/dist/settings.d.ts +104 -0
- package/dist/settings.js +186 -0
- package/dist/storage/db.d.ts +16 -0
- package/dist/storage/db.js +132 -0
- package/dist/storage/findings.d.ts +14 -0
- package/dist/storage/findings.js +38 -0
- package/dist/storage/index.d.ts +9 -0
- package/dist/storage/index.js +8 -0
- package/dist/storage/patterns.d.ts +35 -0
- package/dist/storage/patterns.js +62 -0
- package/dist/storage/scans.d.ts +42 -0
- package/dist/storage/scans.js +55 -0
- package/dist/templates/universal-config.js +19 -0
- package/dist/types/index.d.ts +438 -15
- package/dist/types/index.js +41 -1
- package/package.json +6 -2
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import { type ModelTier, type ModelInfo } from './types.js';
|
|
2
|
+
/**
|
|
3
|
+
* Check if a model is already downloaded and valid.
|
|
4
|
+
*/
|
|
5
|
+
export declare function isModelCached(tier: ModelTier): boolean;
|
|
6
|
+
/**
|
|
7
|
+
* Get the path to a cached model.
|
|
8
|
+
*/
|
|
9
|
+
export declare function getModelPath(tier: ModelTier): string;
|
|
10
|
+
/**
|
|
11
|
+
* Get model info for a tier.
|
|
12
|
+
*/
|
|
13
|
+
export declare function getModelInfo(tier: ModelTier): ModelInfo;
|
|
14
|
+
/**
|
|
15
|
+
* Download a model from HuggingFace CDN.
|
|
16
|
+
* Calls onProgress with status updates.
|
|
17
|
+
*/
|
|
18
|
+
export declare function downloadModel(tier: ModelTier, onProgress?: (message: string, percent?: number) => void): Promise<string>;
|
|
19
|
+
/**
|
|
20
|
+
* Ensure a model is available, downloading if needed.
|
|
21
|
+
*/
|
|
22
|
+
export declare function ensureModel(tier: ModelTier, onProgress?: (message: string, percent?: number) => void): Promise<string>;
|
|
23
|
+
/**
|
|
24
|
+
* Get the models directory path.
|
|
25
|
+
*/
|
|
26
|
+
export declare function getModelsDir(): string;
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Model Manager — handles downloading, caching, and verifying GGUF models.
|
|
3
|
+
* Models cached at ~/.rigour/models/
|
|
4
|
+
*/
|
|
5
|
+
import path from 'path';
|
|
6
|
+
import fs from 'fs-extra';
|
|
7
|
+
import { RIGOUR_DIR } from '../storage/db.js';
|
|
8
|
+
import { MODELS } from './types.js';
|
|
9
|
+
const MODELS_DIR = path.join(RIGOUR_DIR, 'models');
|
|
10
|
+
/**
|
|
11
|
+
* Check if a model is already downloaded and valid.
|
|
12
|
+
*/
|
|
13
|
+
export function isModelCached(tier) {
|
|
14
|
+
const model = MODELS[tier];
|
|
15
|
+
const modelPath = path.join(MODELS_DIR, model.filename);
|
|
16
|
+
if (!fs.existsSync(modelPath))
|
|
17
|
+
return false;
|
|
18
|
+
// Basic size check (within 10% tolerance)
|
|
19
|
+
const stat = fs.statSync(modelPath);
|
|
20
|
+
const tolerance = model.sizeBytes * 0.1;
|
|
21
|
+
return stat.size > model.sizeBytes - tolerance;
|
|
22
|
+
}
|
|
23
|
+
/**
|
|
24
|
+
* Get the path to a cached model.
|
|
25
|
+
*/
|
|
26
|
+
export function getModelPath(tier) {
|
|
27
|
+
return path.join(MODELS_DIR, MODELS[tier].filename);
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* Get model info for a tier.
|
|
31
|
+
*/
|
|
32
|
+
export function getModelInfo(tier) {
|
|
33
|
+
return MODELS[tier];
|
|
34
|
+
}
|
|
35
|
+
/**
|
|
36
|
+
* Download a model from HuggingFace CDN.
|
|
37
|
+
* Calls onProgress with status updates.
|
|
38
|
+
*/
|
|
39
|
+
export async function downloadModel(tier, onProgress) {
|
|
40
|
+
const model = MODELS[tier];
|
|
41
|
+
const destPath = path.join(MODELS_DIR, model.filename);
|
|
42
|
+
const tempPath = destPath + '.download';
|
|
43
|
+
fs.ensureDirSync(MODELS_DIR);
|
|
44
|
+
// Already cached
|
|
45
|
+
if (isModelCached(tier)) {
|
|
46
|
+
onProgress?.(`Model ${model.name} already cached`, 100);
|
|
47
|
+
return destPath;
|
|
48
|
+
}
|
|
49
|
+
onProgress?.(`Downloading ${model.name} (${model.sizeHuman})...`, 0);
|
|
50
|
+
try {
|
|
51
|
+
const response = await fetch(model.url);
|
|
52
|
+
if (!response.ok) {
|
|
53
|
+
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
|
|
54
|
+
}
|
|
55
|
+
const contentLength = parseInt(response.headers.get('content-length') || '0', 10);
|
|
56
|
+
const reader = response.body?.getReader();
|
|
57
|
+
if (!reader)
|
|
58
|
+
throw new Error('No response body');
|
|
59
|
+
const writeStream = fs.createWriteStream(tempPath);
|
|
60
|
+
let downloaded = 0;
|
|
61
|
+
let lastProgressPercent = 0;
|
|
62
|
+
while (true) {
|
|
63
|
+
const { done, value } = await reader.read();
|
|
64
|
+
if (done)
|
|
65
|
+
break;
|
|
66
|
+
writeStream.write(Buffer.from(value));
|
|
67
|
+
downloaded += value.length;
|
|
68
|
+
if (contentLength > 0) {
|
|
69
|
+
const percent = Math.round((downloaded / contentLength) * 100);
|
|
70
|
+
if (percent >= lastProgressPercent + 5) { // Report every 5%
|
|
71
|
+
lastProgressPercent = percent;
|
|
72
|
+
onProgress?.(`Downloading ${model.name}: ${percent}%`, percent);
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
writeStream.end();
|
|
77
|
+
await new Promise((resolve, reject) => {
|
|
78
|
+
writeStream.on('finish', resolve);
|
|
79
|
+
writeStream.on('error', reject);
|
|
80
|
+
});
|
|
81
|
+
// Atomic rename
|
|
82
|
+
fs.renameSync(tempPath, destPath);
|
|
83
|
+
onProgress?.(`Model ${model.name} ready`, 100);
|
|
84
|
+
return destPath;
|
|
85
|
+
}
|
|
86
|
+
catch (error) {
|
|
87
|
+
// Clean up temp file on failure
|
|
88
|
+
fs.removeSync(tempPath);
|
|
89
|
+
throw error;
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
/**
|
|
93
|
+
* Ensure a model is available, downloading if needed.
|
|
94
|
+
*/
|
|
95
|
+
export async function ensureModel(tier, onProgress) {
|
|
96
|
+
if (isModelCached(tier)) {
|
|
97
|
+
return getModelPath(tier);
|
|
98
|
+
}
|
|
99
|
+
return downloadModel(tier, onProgress);
|
|
100
|
+
}
|
|
101
|
+
/**
|
|
102
|
+
* Get the models directory path.
|
|
103
|
+
*/
|
|
104
|
+
export function getModelsDir() {
|
|
105
|
+
return MODELS_DIR;
|
|
106
|
+
}
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import type { InferenceProvider, InferenceOptions, ModelTier } from './types.js';
|
|
2
|
+
export declare class SidecarProvider implements InferenceProvider {
|
|
3
|
+
readonly name = "sidecar";
|
|
4
|
+
private binaryPath;
|
|
5
|
+
private modelPath;
|
|
6
|
+
private tier;
|
|
7
|
+
private threads;
|
|
8
|
+
constructor(tier?: ModelTier, threads?: number);
|
|
9
|
+
isAvailable(): Promise<boolean>;
|
|
10
|
+
setup(onProgress?: (message: string) => void): Promise<void>;
|
|
11
|
+
analyze(prompt: string, options?: InferenceOptions): Promise<string>;
|
|
12
|
+
dispose(): void;
|
|
13
|
+
private getPlatformKey;
|
|
14
|
+
private resolveBinaryPath;
|
|
15
|
+
}
|
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Sidecar Binary Provider — runs inference via pre-compiled llama.cpp binary.
|
|
3
|
+
* Binary ships as @rigour/brain-{platform} optional npm dependency.
|
|
4
|
+
* Falls back to PATH lookup for development/manual installs.
|
|
5
|
+
*/
|
|
6
|
+
import { execFile } from 'child_process';
|
|
7
|
+
import { promisify } from 'util';
|
|
8
|
+
import path from 'path';
|
|
9
|
+
import os from 'os';
|
|
10
|
+
import fs from 'fs-extra';
|
|
11
|
+
import { ensureModel, isModelCached, getModelInfo } from './model-manager.js';
|
|
12
|
+
const execFileAsync = promisify(execFile);
|
|
13
|
+
/** Platform → npm package mapping */
|
|
14
|
+
const PLATFORM_PACKAGES = {
|
|
15
|
+
'darwin-arm64': '@rigour/brain-darwin-arm64',
|
|
16
|
+
'darwin-x64': '@rigour/brain-darwin-x64',
|
|
17
|
+
'linux-x64': '@rigour/brain-linux-x64',
|
|
18
|
+
'linux-arm64': '@rigour/brain-linux-arm64',
|
|
19
|
+
'win32-x64': '@rigour/brain-win-x64',
|
|
20
|
+
};
|
|
21
|
+
export class SidecarProvider {
|
|
22
|
+
name = 'sidecar';
|
|
23
|
+
binaryPath = null;
|
|
24
|
+
modelPath = null;
|
|
25
|
+
tier;
|
|
26
|
+
threads;
|
|
27
|
+
constructor(tier = 'deep', threads = 4) {
|
|
28
|
+
this.tier = tier;
|
|
29
|
+
this.threads = threads;
|
|
30
|
+
}
|
|
31
|
+
async isAvailable() {
|
|
32
|
+
const binary = await this.resolveBinaryPath();
|
|
33
|
+
return binary !== null;
|
|
34
|
+
}
|
|
35
|
+
async setup(onProgress) {
|
|
36
|
+
// 1. Check/resolve binary
|
|
37
|
+
this.binaryPath = await this.resolveBinaryPath();
|
|
38
|
+
if (this.binaryPath) {
|
|
39
|
+
onProgress?.('✓ Inference engine ready');
|
|
40
|
+
}
|
|
41
|
+
else {
|
|
42
|
+
onProgress?.('⚠ Inference engine not found. Install @rigour/brain-* or add llama-cli to PATH');
|
|
43
|
+
throw new Error('Sidecar binary not found. Run: npm install @rigour/brain-' + this.getPlatformKey());
|
|
44
|
+
}
|
|
45
|
+
// 2. Ensure model is downloaded
|
|
46
|
+
if (!isModelCached(this.tier)) {
|
|
47
|
+
const modelInfo = getModelInfo(this.tier);
|
|
48
|
+
onProgress?.(`⬇ Downloading analysis model (${modelInfo.sizeHuman})...`);
|
|
49
|
+
}
|
|
50
|
+
this.modelPath = await ensureModel(this.tier, (msg, percent) => {
|
|
51
|
+
if (percent !== undefined && percent < 100) {
|
|
52
|
+
onProgress?.(` ${msg}`);
|
|
53
|
+
}
|
|
54
|
+
});
|
|
55
|
+
onProgress?.('✓ Model ready');
|
|
56
|
+
}
|
|
57
|
+
async analyze(prompt, options) {
|
|
58
|
+
if (!this.binaryPath || !this.modelPath) {
|
|
59
|
+
throw new Error('Provider not set up. Call setup() first.');
|
|
60
|
+
}
|
|
61
|
+
const args = [
|
|
62
|
+
'--model', this.modelPath,
|
|
63
|
+
'--prompt', prompt,
|
|
64
|
+
'--n-predict', String(options?.maxTokens || 512),
|
|
65
|
+
'--threads', String(this.threads),
|
|
66
|
+
'--temp', String(options?.temperature || 0.1),
|
|
67
|
+
'--no-display-prompt', // Don't echo the prompt
|
|
68
|
+
'--log-disable', // Suppress llama.cpp logging
|
|
69
|
+
];
|
|
70
|
+
// JSON grammar constraint if available
|
|
71
|
+
if (options?.jsonMode) {
|
|
72
|
+
args.push('--json');
|
|
73
|
+
}
|
|
74
|
+
try {
|
|
75
|
+
const { stdout, stderr } = await execFileAsync(this.binaryPath, args, {
|
|
76
|
+
timeout: options?.timeout || 60000,
|
|
77
|
+
maxBuffer: 10 * 1024 * 1024, // 10MB
|
|
78
|
+
env: { ...process.env, LLAMA_LOG_DISABLE: '1' },
|
|
79
|
+
});
|
|
80
|
+
// llama.cpp sometimes outputs to stderr for diagnostics — ignore
|
|
81
|
+
return stdout.trim();
|
|
82
|
+
}
|
|
83
|
+
catch (error) {
|
|
84
|
+
if (error.killed) {
|
|
85
|
+
throw new Error(`Inference timed out after ${(options?.timeout || 60000) / 1000}s`);
|
|
86
|
+
}
|
|
87
|
+
throw new Error(`Inference failed: ${error.message}`);
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
dispose() {
|
|
91
|
+
// No persistent process to clean up
|
|
92
|
+
this.binaryPath = null;
|
|
93
|
+
this.modelPath = null;
|
|
94
|
+
}
|
|
95
|
+
getPlatformKey() {
|
|
96
|
+
return `${os.platform()}-${os.arch()}`;
|
|
97
|
+
}
|
|
98
|
+
async resolveBinaryPath() {
|
|
99
|
+
const platformKey = this.getPlatformKey();
|
|
100
|
+
// Strategy 1: Check @rigour/brain-{platform} optional dependency
|
|
101
|
+
const packageName = PLATFORM_PACKAGES[platformKey];
|
|
102
|
+
if (packageName) {
|
|
103
|
+
try {
|
|
104
|
+
// Try to resolve from node_modules
|
|
105
|
+
const possiblePaths = [
|
|
106
|
+
// From rigour-core node_modules
|
|
107
|
+
path.join(__dirname, '..', '..', '..', 'node_modules', ...packageName.split('/'), 'bin', 'rigour-brain'),
|
|
108
|
+
// From global node_modules
|
|
109
|
+
path.join(os.homedir(), '.npm-global', 'lib', 'node_modules', ...packageName.split('/'), 'bin', 'rigour-brain'),
|
|
110
|
+
];
|
|
111
|
+
for (const p of possiblePaths) {
|
|
112
|
+
const binPath = os.platform() === 'win32' ? p + '.exe' : p;
|
|
113
|
+
if (await fs.pathExists(binPath)) {
|
|
114
|
+
return binPath;
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
catch {
|
|
119
|
+
// Package not installed
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
// Strategy 2: Check ~/.rigour/bin/
|
|
123
|
+
const localBin = path.join(os.homedir(), '.rigour', 'bin', 'rigour-brain');
|
|
124
|
+
const localBinPath = os.platform() === 'win32' ? localBin + '.exe' : localBin;
|
|
125
|
+
if (await fs.pathExists(localBinPath)) {
|
|
126
|
+
return localBinPath;
|
|
127
|
+
}
|
|
128
|
+
// Strategy 3: Check PATH for llama-cli (llama.cpp CLI)
|
|
129
|
+
try {
|
|
130
|
+
const { stdout } = await execFileAsync('which', ['llama-cli']);
|
|
131
|
+
const llamaPath = stdout.trim();
|
|
132
|
+
if (llamaPath && await fs.pathExists(llamaPath)) {
|
|
133
|
+
return llamaPath;
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
catch {
|
|
137
|
+
// Not in PATH
|
|
138
|
+
}
|
|
139
|
+
// Strategy 4: Check for llama.cpp server-style binary names
|
|
140
|
+
const altNames = ['llama-cli', 'llama', 'main'];
|
|
141
|
+
for (const name of altNames) {
|
|
142
|
+
try {
|
|
143
|
+
const { stdout } = await execFileAsync('which', [name]);
|
|
144
|
+
if (stdout.trim())
|
|
145
|
+
return stdout.trim();
|
|
146
|
+
}
|
|
147
|
+
catch {
|
|
148
|
+
// Continue
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
return null;
|
|
152
|
+
}
|
|
153
|
+
}
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Inference provider interface for Rigour deep analysis.
|
|
3
|
+
* Supports sidecar binary (local llama.cpp), cloud APIs (Claude/OpenAI).
|
|
4
|
+
*/
|
|
5
|
+
import type { Severity } from '../types/index.js';
|
|
6
|
+
/**
|
|
7
|
+
* Abstract inference provider — all backends implement this.
|
|
8
|
+
*/
|
|
9
|
+
export interface InferenceProvider {
|
|
10
|
+
/** Provider name for logging/reporting */
|
|
11
|
+
readonly name: string;
|
|
12
|
+
/** Check if this provider is available (binary exists, API key valid, etc.) */
|
|
13
|
+
isAvailable(): Promise<boolean>;
|
|
14
|
+
/**
|
|
15
|
+
* One-time setup: download model, verify binary, etc.
|
|
16
|
+
* Should show progress to user via callback.
|
|
17
|
+
*/
|
|
18
|
+
setup(onProgress?: (message: string) => void): Promise<void>;
|
|
19
|
+
/**
|
|
20
|
+
* Run inference on a prompt. Returns raw text response.
|
|
21
|
+
* Provider handles tokenization, temperature, etc.
|
|
22
|
+
*/
|
|
23
|
+
analyze(prompt: string, options?: InferenceOptions): Promise<string>;
|
|
24
|
+
/** Clean up resources (kill process, close connection) */
|
|
25
|
+
dispose(): void;
|
|
26
|
+
}
|
|
27
|
+
export interface InferenceOptions {
|
|
28
|
+
maxTokens?: number;
|
|
29
|
+
temperature?: number;
|
|
30
|
+
timeout?: number;
|
|
31
|
+
jsonMode?: boolean;
|
|
32
|
+
}
|
|
33
|
+
/**
|
|
34
|
+
* A single finding from deep LLM analysis.
|
|
35
|
+
*/
|
|
36
|
+
export interface DeepFinding {
|
|
37
|
+
/** Category like 'srp_violation', 'god_function', 'dry_violation' */
|
|
38
|
+
category: string;
|
|
39
|
+
/** Severity level */
|
|
40
|
+
severity: Severity;
|
|
41
|
+
/** Relative file path */
|
|
42
|
+
file: string;
|
|
43
|
+
/** Line number (if available) */
|
|
44
|
+
line?: number;
|
|
45
|
+
/** Human-readable description of the issue */
|
|
46
|
+
description: string;
|
|
47
|
+
/** Actionable suggestion for how to fix */
|
|
48
|
+
suggestion: string;
|
|
49
|
+
/** LLM confidence score 0.0-1.0 */
|
|
50
|
+
confidence: number;
|
|
51
|
+
}
|
|
52
|
+
/**
|
|
53
|
+
* Result of a deep analysis batch.
|
|
54
|
+
*/
|
|
55
|
+
export interface DeepAnalysisResult {
|
|
56
|
+
findings: DeepFinding[];
|
|
57
|
+
model: string;
|
|
58
|
+
tokensUsed?: number;
|
|
59
|
+
durationMs: number;
|
|
60
|
+
}
|
|
61
|
+
/**
|
|
62
|
+
* Available model tiers.
|
|
63
|
+
*/
|
|
64
|
+
export type ModelTier = 'deep' | 'pro';
|
|
65
|
+
/**
|
|
66
|
+
* Model info for download/caching.
|
|
67
|
+
*/
|
|
68
|
+
export interface ModelInfo {
|
|
69
|
+
tier: ModelTier;
|
|
70
|
+
name: string;
|
|
71
|
+
filename: string;
|
|
72
|
+
url: string;
|
|
73
|
+
sizeBytes: number;
|
|
74
|
+
sizeHuman: string;
|
|
75
|
+
}
|
|
76
|
+
/** All supported model definitions */
|
|
77
|
+
export declare const MODELS: Record<ModelTier, ModelInfo>;
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
/** All supported model definitions */
|
|
2
|
+
export const MODELS = {
|
|
3
|
+
deep: {
|
|
4
|
+
tier: 'deep',
|
|
5
|
+
name: 'Qwen2.5-Coder-0.5B-Instruct',
|
|
6
|
+
filename: 'qwen2.5-coder-0.5b-instruct-q4_k_m.gguf',
|
|
7
|
+
url: 'https://huggingface.co/Qwen/Qwen2.5-Coder-0.5B-Instruct-GGUF/resolve/main/qwen2.5-coder-0.5b-instruct-q4_k_m.gguf',
|
|
8
|
+
sizeBytes: 350_000_000,
|
|
9
|
+
sizeHuman: '350MB',
|
|
10
|
+
},
|
|
11
|
+
pro: {
|
|
12
|
+
tier: 'pro',
|
|
13
|
+
name: 'Qwen2.5-Coder-1.5B-Instruct',
|
|
14
|
+
filename: 'qwen2.5-coder-1.5b-instruct-q4_k_m.gguf',
|
|
15
|
+
url: 'https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B-Instruct-GGUF/resolve/main/qwen2.5-coder-1.5b-instruct-q4_k_m.gguf',
|
|
16
|
+
sizeBytes: 900_000_000,
|
|
17
|
+
sizeHuman: '900MB',
|
|
18
|
+
},
|
|
19
|
+
};
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* RigourSettings Interface
|
|
3
|
+
* Defines the schema for ~/.rigour/settings.json global user configuration
|
|
4
|
+
*/
|
|
5
|
+
export interface RigourSettings {
|
|
6
|
+
providers?: {
|
|
7
|
+
anthropic?: string;
|
|
8
|
+
openai?: string;
|
|
9
|
+
groq?: string;
|
|
10
|
+
deepseek?: string;
|
|
11
|
+
mistral?: string;
|
|
12
|
+
together?: string;
|
|
13
|
+
gemini?: string;
|
|
14
|
+
ollama?: string;
|
|
15
|
+
[key: string]: string | undefined;
|
|
16
|
+
};
|
|
17
|
+
deep?: {
|
|
18
|
+
defaultProvider?: string;
|
|
19
|
+
defaultModel?: string;
|
|
20
|
+
apiBaseUrl?: string;
|
|
21
|
+
maxTokens?: number;
|
|
22
|
+
temperature?: number;
|
|
23
|
+
};
|
|
24
|
+
agents?: {
|
|
25
|
+
[agentName: string]: {
|
|
26
|
+
model?: string;
|
|
27
|
+
provider?: string;
|
|
28
|
+
fallback?: string;
|
|
29
|
+
};
|
|
30
|
+
};
|
|
31
|
+
cli?: {
|
|
32
|
+
defaultPreset?: string;
|
|
33
|
+
colorOutput?: boolean;
|
|
34
|
+
verboseOutput?: boolean;
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
/**
|
|
38
|
+
* Resolved deep options from CLI flags merged with settings.json
|
|
39
|
+
*/
|
|
40
|
+
export interface ResolvedDeepOptions {
|
|
41
|
+
apiKey?: string;
|
|
42
|
+
provider?: string;
|
|
43
|
+
apiBaseUrl?: string;
|
|
44
|
+
modelName?: string;
|
|
45
|
+
maxTokens?: number;
|
|
46
|
+
temperature?: number;
|
|
47
|
+
}
|
|
48
|
+
/**
|
|
49
|
+
* CLI options that may override settings
|
|
50
|
+
*/
|
|
51
|
+
export interface CLIDeepOptions {
|
|
52
|
+
apiKey?: string;
|
|
53
|
+
provider?: string;
|
|
54
|
+
apiBaseUrl?: string;
|
|
55
|
+
modelName?: string;
|
|
56
|
+
maxTokens?: number;
|
|
57
|
+
temperature?: number;
|
|
58
|
+
}
|
|
59
|
+
/**
|
|
60
|
+
* Get the settings file path: ~/.rigour/settings.json
|
|
61
|
+
*/
|
|
62
|
+
export declare function getSettingsPath(): string;
|
|
63
|
+
/**
|
|
64
|
+
* Load settings from ~/.rigour/settings.json
|
|
65
|
+
* Returns empty object if file not found or is malformed
|
|
66
|
+
*/
|
|
67
|
+
export declare function loadSettings(): RigourSettings;
|
|
68
|
+
/**
|
|
69
|
+
* Save settings to ~/.rigour/settings.json
|
|
70
|
+
*/
|
|
71
|
+
export declare function saveSettings(settings: RigourSettings): void;
|
|
72
|
+
/**
|
|
73
|
+
* Resolve deep analysis options by merging CLI flags with settings.json
|
|
74
|
+
* CLI flags always take precedence over settings.json values
|
|
75
|
+
*
|
|
76
|
+
* @param cliOptions CLI flags provided by user
|
|
77
|
+
* @returns Merged options with CLI taking precedence
|
|
78
|
+
*/
|
|
79
|
+
export declare function resolveDeepOptions(cliOptions: CLIDeepOptions): ResolvedDeepOptions;
|
|
80
|
+
/**
|
|
81
|
+
* Get a specific provider's API key from settings
|
|
82
|
+
* Supports both normalized names (claude -> anthropic) and exact keys
|
|
83
|
+
*/
|
|
84
|
+
export declare function getProviderKey(providerName: string): string | undefined;
|
|
85
|
+
/**
|
|
86
|
+
* Get agent configuration from settings
|
|
87
|
+
*/
|
|
88
|
+
export declare function getAgentConfig(agentName: string): {
|
|
89
|
+
model?: string;
|
|
90
|
+
provider?: string;
|
|
91
|
+
fallback?: string;
|
|
92
|
+
} | undefined;
|
|
93
|
+
/**
|
|
94
|
+
* Get CLI preferences from settings
|
|
95
|
+
*/
|
|
96
|
+
export declare function getCliPreferences(): RigourSettings['cli'];
|
|
97
|
+
/**
|
|
98
|
+
* Update a specific provider key in settings
|
|
99
|
+
*/
|
|
100
|
+
export declare function updateProviderKey(provider: string, apiKey: string): void;
|
|
101
|
+
/**
|
|
102
|
+
* Remove a provider key from settings
|
|
103
|
+
*/
|
|
104
|
+
export declare function removeProviderKey(provider: string): void;
|
package/dist/settings.js
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
import os from 'os';
|
|
2
|
+
import path from 'path';
|
|
3
|
+
import fs from 'fs-extra';
|
|
4
|
+
import { Logger } from './utils/logger.js';
|
|
5
|
+
/**
|
|
6
|
+
* Get the settings file path: ~/.rigour/settings.json
|
|
7
|
+
*/
|
|
8
|
+
export function getSettingsPath() {
|
|
9
|
+
const homeDir = os.homedir();
|
|
10
|
+
return path.join(homeDir, '.rigour', 'settings.json');
|
|
11
|
+
}
|
|
12
|
+
/**
|
|
13
|
+
* Load settings from ~/.rigour/settings.json
|
|
14
|
+
* Returns empty object if file not found or is malformed
|
|
15
|
+
*/
|
|
16
|
+
export function loadSettings() {
|
|
17
|
+
const settingsPath = getSettingsPath();
|
|
18
|
+
try {
|
|
19
|
+
if (!fs.existsSync(settingsPath)) {
|
|
20
|
+
Logger.debug(`Settings file not found at ${settingsPath}`);
|
|
21
|
+
return {};
|
|
22
|
+
}
|
|
23
|
+
const content = fs.readFileSync(settingsPath, 'utf-8');
|
|
24
|
+
const settings = JSON.parse(content);
|
|
25
|
+
Logger.debug(`Settings loaded from ${settingsPath}`);
|
|
26
|
+
return settings;
|
|
27
|
+
}
|
|
28
|
+
catch (error) {
|
|
29
|
+
if (error instanceof SyntaxError) {
|
|
30
|
+
Logger.warn(`Malformed JSON in ${settingsPath}: ${error.message}`);
|
|
31
|
+
}
|
|
32
|
+
else if (error instanceof Error) {
|
|
33
|
+
Logger.warn(`Failed to read settings from ${settingsPath}: ${error.message}`);
|
|
34
|
+
}
|
|
35
|
+
else {
|
|
36
|
+
Logger.warn(`Failed to read settings from ${settingsPath}`);
|
|
37
|
+
}
|
|
38
|
+
return {};
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
/**
|
|
42
|
+
* Save settings to ~/.rigour/settings.json
|
|
43
|
+
*/
|
|
44
|
+
export function saveSettings(settings) {
|
|
45
|
+
const settingsPath = getSettingsPath();
|
|
46
|
+
const settingsDir = path.dirname(settingsPath);
|
|
47
|
+
try {
|
|
48
|
+
// Ensure directory exists
|
|
49
|
+
fs.ensureDirSync(settingsDir);
|
|
50
|
+
// Write with pretty formatting (2-space indent)
|
|
51
|
+
fs.writeFileSync(settingsPath, JSON.stringify(settings, null, 2), 'utf-8');
|
|
52
|
+
Logger.debug(`Settings saved to ${settingsPath}`);
|
|
53
|
+
}
|
|
54
|
+
catch (error) {
|
|
55
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
56
|
+
Logger.error(`Failed to save settings to ${settingsPath}: ${message}`);
|
|
57
|
+
throw error;
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
/**
|
|
61
|
+
* Map common provider names to their settings.json key
|
|
62
|
+
* This allows --provider claude to resolve to anthropic API key
|
|
63
|
+
*/
|
|
64
|
+
function normalizeProviderName(provider) {
|
|
65
|
+
const mapping = {
|
|
66
|
+
'claude': 'anthropic',
|
|
67
|
+
'anthropic': 'anthropic',
|
|
68
|
+
'gpt': 'openai',
|
|
69
|
+
'openai': 'openai',
|
|
70
|
+
'groq': 'groq',
|
|
71
|
+
'deepseek': 'deepseek',
|
|
72
|
+
'mistral': 'mistral',
|
|
73
|
+
'together': 'together',
|
|
74
|
+
'gemini': 'gemini',
|
|
75
|
+
'google': 'gemini',
|
|
76
|
+
'ollama': 'ollama',
|
|
77
|
+
};
|
|
78
|
+
return mapping[provider.toLowerCase()] || provider;
|
|
79
|
+
}
|
|
80
|
+
/**
|
|
81
|
+
* Resolve deep analysis options by merging CLI flags with settings.json
|
|
82
|
+
* CLI flags always take precedence over settings.json values
|
|
83
|
+
*
|
|
84
|
+
* @param cliOptions CLI flags provided by user
|
|
85
|
+
* @returns Merged options with CLI taking precedence
|
|
86
|
+
*/
|
|
87
|
+
export function resolveDeepOptions(cliOptions) {
|
|
88
|
+
const settings = loadSettings();
|
|
89
|
+
const result = {};
|
|
90
|
+
// 1. Start with settings.json defaults
|
|
91
|
+
if (settings.deep?.apiBaseUrl) {
|
|
92
|
+
result.apiBaseUrl = settings.deep.apiBaseUrl;
|
|
93
|
+
}
|
|
94
|
+
if (settings.deep?.maxTokens) {
|
|
95
|
+
result.maxTokens = settings.deep.maxTokens;
|
|
96
|
+
}
|
|
97
|
+
if (settings.deep?.temperature) {
|
|
98
|
+
result.temperature = settings.deep.temperature;
|
|
99
|
+
}
|
|
100
|
+
// 2. Apply provider selection (settings or default)
|
|
101
|
+
let selectedProvider = settings.deep?.defaultProvider || 'anthropic';
|
|
102
|
+
if (cliOptions.provider) {
|
|
103
|
+
selectedProvider = cliOptions.provider;
|
|
104
|
+
}
|
|
105
|
+
result.provider = selectedProvider;
|
|
106
|
+
// 3. Apply model selection (settings or default)
|
|
107
|
+
if (settings.deep?.defaultModel) {
|
|
108
|
+
result.modelName = settings.deep.defaultModel;
|
|
109
|
+
}
|
|
110
|
+
if (cliOptions.modelName) {
|
|
111
|
+
result.modelName = cliOptions.modelName;
|
|
112
|
+
}
|
|
113
|
+
// 4. Resolve API key
|
|
114
|
+
// CLI flag takes highest precedence
|
|
115
|
+
if (cliOptions.apiKey) {
|
|
116
|
+
result.apiKey = cliOptions.apiKey;
|
|
117
|
+
}
|
|
118
|
+
else if (settings.providers) {
|
|
119
|
+
// Otherwise look up provider key in settings.providers
|
|
120
|
+
const normalizedProvider = normalizeProviderName(selectedProvider);
|
|
121
|
+
const apiKey = settings.providers[normalizedProvider];
|
|
122
|
+
if (apiKey) {
|
|
123
|
+
result.apiKey = apiKey;
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
// 5. Override with CLI flags (highest priority)
|
|
127
|
+
if (cliOptions.apiBaseUrl) {
|
|
128
|
+
result.apiBaseUrl = cliOptions.apiBaseUrl;
|
|
129
|
+
}
|
|
130
|
+
if (cliOptions.maxTokens) {
|
|
131
|
+
result.maxTokens = cliOptions.maxTokens;
|
|
132
|
+
}
|
|
133
|
+
if (cliOptions.temperature) {
|
|
134
|
+
result.temperature = cliOptions.temperature;
|
|
135
|
+
}
|
|
136
|
+
return result;
|
|
137
|
+
}
|
|
138
|
+
/**
|
|
139
|
+
* Get a specific provider's API key from settings
|
|
140
|
+
* Supports both normalized names (claude -> anthropic) and exact keys
|
|
141
|
+
*/
|
|
142
|
+
export function getProviderKey(providerName) {
|
|
143
|
+
const settings = loadSettings();
|
|
144
|
+
if (!settings.providers) {
|
|
145
|
+
return undefined;
|
|
146
|
+
}
|
|
147
|
+
const normalized = normalizeProviderName(providerName);
|
|
148
|
+
return settings.providers[normalized];
|
|
149
|
+
}
|
|
150
|
+
/**
|
|
151
|
+
* Get agent configuration from settings
|
|
152
|
+
*/
|
|
153
|
+
export function getAgentConfig(agentName) {
|
|
154
|
+
const settings = loadSettings();
|
|
155
|
+
return settings.agents?.[agentName];
|
|
156
|
+
}
|
|
157
|
+
/**
|
|
158
|
+
* Get CLI preferences from settings
|
|
159
|
+
*/
|
|
160
|
+
export function getCliPreferences() {
|
|
161
|
+
const settings = loadSettings();
|
|
162
|
+
return settings.cli || {};
|
|
163
|
+
}
|
|
164
|
+
/**
|
|
165
|
+
* Update a specific provider key in settings
|
|
166
|
+
*/
|
|
167
|
+
export function updateProviderKey(provider, apiKey) {
|
|
168
|
+
const settings = loadSettings();
|
|
169
|
+
const normalized = normalizeProviderName(provider);
|
|
170
|
+
if (!settings.providers) {
|
|
171
|
+
settings.providers = {};
|
|
172
|
+
}
|
|
173
|
+
settings.providers[normalized] = apiKey;
|
|
174
|
+
saveSettings(settings);
|
|
175
|
+
}
|
|
176
|
+
/**
|
|
177
|
+
* Remove a provider key from settings
|
|
178
|
+
*/
|
|
179
|
+
export function removeProviderKey(provider) {
|
|
180
|
+
const settings = loadSettings();
|
|
181
|
+
const normalized = normalizeProviderName(provider);
|
|
182
|
+
if (settings.providers && settings.providers[normalized]) {
|
|
183
|
+
delete settings.providers[normalized];
|
|
184
|
+
saveSettings(settings);
|
|
185
|
+
}
|
|
186
|
+
}
|