ship-safe 6.1.1 → 6.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +735 -641
- package/cli/agents/api-fuzzer.js +345 -345
- package/cli/agents/auth-bypass-agent.js +348 -348
- package/cli/agents/base-agent.js +272 -272
- package/cli/agents/cicd-scanner.js +236 -201
- package/cli/agents/config-auditor.js +521 -521
- package/cli/agents/deep-analyzer.js +6 -2
- package/cli/agents/git-history-scanner.js +170 -170
- package/cli/agents/html-reporter.js +568 -568
- package/cli/agents/index.js +84 -84
- package/cli/agents/injection-tester.js +500 -500
- package/cli/agents/llm-redteam.js +251 -251
- package/cli/agents/mobile-scanner.js +231 -231
- package/cli/agents/orchestrator.js +322 -322
- package/cli/agents/pii-compliance-agent.js +301 -301
- package/cli/agents/scoring-engine.js +248 -248
- package/cli/agents/supabase-rls-agent.js +154 -154
- package/cli/agents/supply-chain-agent.js +650 -507
- package/cli/bin/ship-safe.js +452 -426
- package/cli/commands/agent.js +608 -608
- package/cli/commands/audit.js +986 -980
- package/cli/commands/baseline.js +193 -193
- package/cli/commands/ci.js +342 -342
- package/cli/commands/deps.js +516 -516
- package/cli/commands/doctor.js +159 -159
- package/cli/commands/fix.js +218 -218
- package/cli/commands/hooks.js +268 -0
- package/cli/commands/init.js +407 -407
- package/cli/commands/mcp.js +304 -304
- package/cli/commands/red-team.js +7 -1
- package/cli/commands/remediate.js +798 -798
- package/cli/commands/rotate.js +571 -571
- package/cli/commands/scan.js +569 -569
- package/cli/commands/score.js +449 -449
- package/cli/commands/watch.js +281 -281
- package/cli/hooks/patterns.js +313 -0
- package/cli/hooks/post-tool-use.js +140 -0
- package/cli/hooks/pre-tool-use.js +186 -0
- package/cli/index.js +73 -69
- package/cli/providers/llm-provider.js +397 -287
- package/cli/utils/autofix-rules.js +74 -74
- package/cli/utils/cache-manager.js +311 -311
- package/cli/utils/output.js +230 -230
- package/cli/utils/patterns.js +1121 -1121
- package/cli/utils/pdf-generator.js +94 -94
- package/package.json +69 -69
- package/configs/supabase/rls-templates.sql +0 -242
|
@@ -1,287 +1,397 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Multi-LLM Provider
|
|
3
|
-
* ===================
|
|
4
|
-
*
|
|
5
|
-
* Abstraction layer for LLM providers.
|
|
6
|
-
* Supports: Anthropic (Claude), OpenAI, Google (Gemini), Ollama (local)
|
|
7
|
-
*
|
|
8
|
-
*
|
|
9
|
-
*
|
|
10
|
-
*
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
}
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
}
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
}
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
}
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
}
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
}
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
}
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
}
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
}
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
}
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
1
|
+
/**
|
|
2
|
+
* Multi-LLM Provider
|
|
3
|
+
* ===================
|
|
4
|
+
*
|
|
5
|
+
* Abstraction layer for LLM providers.
|
|
6
|
+
* Supports: Anthropic (Claude), OpenAI, Google (Gemini), Ollama (local),
|
|
7
|
+
* and any OpenAI-compatible endpoint (Groq, Together AI, Mistral API,
|
|
8
|
+
* LM Studio, Azure OpenAI, AWS Bedrock via proxy, etc.).
|
|
9
|
+
*
|
|
10
|
+
* USAGE:
|
|
11
|
+
* const provider = createProvider('anthropic', apiKey);
|
|
12
|
+
* const provider = createProvider('groq', apiKey);
|
|
13
|
+
* const provider = createProvider('openai', apiKey, { baseUrl: 'https://custom/v1/chat/completions' });
|
|
14
|
+
* const result = await provider.classify(findings, context);
|
|
15
|
+
*/
|
|
16
|
+
|
|
17
|
+
import fs from 'fs';
|
|
18
|
+
import path from 'path';
|
|
19
|
+
|
|
20
|
+
// =============================================================================
|
|
21
|
+
// PROVIDER INTERFACE
|
|
22
|
+
// =============================================================================
|
|
23
|
+
|
|
24
|
+
class BaseLLMProvider {
|
|
25
|
+
constructor(name, apiKey, options = {}) {
|
|
26
|
+
this.name = name;
|
|
27
|
+
this.apiKey = apiKey;
|
|
28
|
+
this.model = options.model || null;
|
|
29
|
+
this.baseUrl = options.baseUrl || null;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Send a prompt to the LLM and get a text response.
|
|
34
|
+
*/
|
|
35
|
+
async complete(systemPrompt, userPrompt, options = {}) {
|
|
36
|
+
throw new Error(`${this.name}.complete() not implemented`);
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
/**
|
|
40
|
+
* Classify security findings using the LLM.
|
|
41
|
+
*/
|
|
42
|
+
async classify(findings, context) {
|
|
43
|
+
const prompt = this.buildClassificationPrompt(findings, context);
|
|
44
|
+
const response = await this.complete(
|
|
45
|
+
'You are a security expert. Respond with JSON only, no markdown.',
|
|
46
|
+
prompt,
|
|
47
|
+
{ maxTokens: 4096 }
|
|
48
|
+
);
|
|
49
|
+
return this.parseJSON(response);
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
buildClassificationPrompt(findings, context) {
|
|
53
|
+
const items = findings.map(f => ({
|
|
54
|
+
id: `${f.file}:${f.line}`,
|
|
55
|
+
rule: f.rule,
|
|
56
|
+
severity: f.severity,
|
|
57
|
+
title: f.title,
|
|
58
|
+
matched: f.matched?.slice(0, 100),
|
|
59
|
+
description: f.description,
|
|
60
|
+
}));
|
|
61
|
+
|
|
62
|
+
return `Classify each finding as REAL or FALSE_POSITIVE. For REAL findings, provide a specific fix.
|
|
63
|
+
|
|
64
|
+
Respond with JSON array ONLY:
|
|
65
|
+
[{"id":"<id>","classification":"REAL"|"FALSE_POSITIVE","reason":"<brief reason>","fix":"<specific fix or null>"}]
|
|
66
|
+
|
|
67
|
+
Findings:
|
|
68
|
+
${JSON.stringify(items, null, 2)}`;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
parseJSON(text) {
|
|
72
|
+
const cleaned = text
|
|
73
|
+
.replace(/^```(?:json)?\s*/i, '')
|
|
74
|
+
.replace(/\s*```\s*$/i, '')
|
|
75
|
+
.trim();
|
|
76
|
+
try {
|
|
77
|
+
return JSON.parse(cleaned);
|
|
78
|
+
} catch {
|
|
79
|
+
return [];
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// =============================================================================
|
|
85
|
+
// ANTHROPIC PROVIDER (Claude)
|
|
86
|
+
// =============================================================================
|
|
87
|
+
|
|
88
|
+
class AnthropicProvider extends BaseLLMProvider {
|
|
89
|
+
constructor(apiKey, options = {}) {
|
|
90
|
+
super('Anthropic', apiKey, options);
|
|
91
|
+
this.model = options.model || 'claude-haiku-4-5-20251001';
|
|
92
|
+
this.baseUrl = options.baseUrl || 'https://api.anthropic.com/v1/messages';
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
async complete(systemPrompt, userPrompt, options = {}) {
|
|
96
|
+
const response = await fetch(this.baseUrl, {
|
|
97
|
+
method: 'POST',
|
|
98
|
+
headers: {
|
|
99
|
+
'x-api-key': this.apiKey,
|
|
100
|
+
'anthropic-version': '2023-06-01',
|
|
101
|
+
'content-type': 'application/json',
|
|
102
|
+
},
|
|
103
|
+
body: JSON.stringify({
|
|
104
|
+
model: this.model,
|
|
105
|
+
max_tokens: options.maxTokens || 2048,
|
|
106
|
+
system: systemPrompt,
|
|
107
|
+
messages: [{ role: 'user', content: userPrompt }],
|
|
108
|
+
}),
|
|
109
|
+
});
|
|
110
|
+
|
|
111
|
+
if (!response.ok) {
|
|
112
|
+
throw new Error(`Anthropic API error: HTTP ${response.status}`);
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
const data = await response.json();
|
|
116
|
+
return data.content?.[0]?.text || '';
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
// =============================================================================
|
|
121
|
+
// OPENAI PROVIDER (GPT-4o, etc.)
|
|
122
|
+
// =============================================================================
|
|
123
|
+
|
|
124
|
+
class OpenAIProvider extends BaseLLMProvider {
|
|
125
|
+
constructor(apiKey, options = {}) {
|
|
126
|
+
super('OpenAI', apiKey, options);
|
|
127
|
+
this.model = options.model || 'gpt-4o-mini';
|
|
128
|
+
this.baseUrl = options.baseUrl || 'https://api.openai.com/v1/chat/completions';
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
async complete(systemPrompt, userPrompt, options = {}) {
|
|
132
|
+
const response = await fetch(this.baseUrl, {
|
|
133
|
+
method: 'POST',
|
|
134
|
+
headers: {
|
|
135
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
136
|
+
'Content-Type': 'application/json',
|
|
137
|
+
},
|
|
138
|
+
body: JSON.stringify({
|
|
139
|
+
model: this.model,
|
|
140
|
+
max_tokens: options.maxTokens || 2048,
|
|
141
|
+
messages: [
|
|
142
|
+
{ role: 'system', content: systemPrompt },
|
|
143
|
+
{ role: 'user', content: userPrompt },
|
|
144
|
+
],
|
|
145
|
+
}),
|
|
146
|
+
});
|
|
147
|
+
|
|
148
|
+
if (!response.ok) {
|
|
149
|
+
throw new Error(`OpenAI API error: HTTP ${response.status}`);
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
const data = await response.json();
|
|
153
|
+
return data.choices?.[0]?.message?.content || '';
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
// =============================================================================
|
|
158
|
+
// GOOGLE PROVIDER (Gemini)
|
|
159
|
+
// =============================================================================
|
|
160
|
+
|
|
161
|
+
class GoogleProvider extends BaseLLMProvider {
|
|
162
|
+
constructor(apiKey, options = {}) {
|
|
163
|
+
super('Google', apiKey, options);
|
|
164
|
+
this.model = options.model || 'gemini-2.0-flash';
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
async complete(systemPrompt, userPrompt, options = {}) {
|
|
168
|
+
const url = `https://generativelanguage.googleapis.com/v1beta/models/${this.model}:generateContent`;
|
|
169
|
+
|
|
170
|
+
const response = await fetch(url, {
|
|
171
|
+
method: 'POST',
|
|
172
|
+
headers: {
|
|
173
|
+
'Content-Type': 'application/json',
|
|
174
|
+
'x-goog-api-key': this.apiKey,
|
|
175
|
+
},
|
|
176
|
+
body: JSON.stringify({
|
|
177
|
+
systemInstruction: { parts: [{ text: systemPrompt }] },
|
|
178
|
+
contents: [{ parts: [{ text: userPrompt }] }],
|
|
179
|
+
generationConfig: { maxOutputTokens: options.maxTokens || 2048 },
|
|
180
|
+
}),
|
|
181
|
+
});
|
|
182
|
+
|
|
183
|
+
if (!response.ok) {
|
|
184
|
+
throw new Error(`Google API error: HTTP ${response.status}`);
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
const data = await response.json();
|
|
188
|
+
return data.candidates?.[0]?.content?.parts?.[0]?.text || '';
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
// =============================================================================
|
|
193
|
+
// OLLAMA PROVIDER (Local models)
|
|
194
|
+
// =============================================================================
|
|
195
|
+
|
|
196
|
+
class OllamaProvider extends BaseLLMProvider {
|
|
197
|
+
constructor(apiKey, options = {}) {
|
|
198
|
+
super('Ollama', null, options);
|
|
199
|
+
this.model = options.model || 'llama3.2';
|
|
200
|
+
this.baseUrl = options.baseUrl || 'http://localhost:11434/api/chat';
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
async complete(systemPrompt, userPrompt, options = {}) {
|
|
204
|
+
const response = await fetch(this.baseUrl, {
|
|
205
|
+
method: 'POST',
|
|
206
|
+
headers: { 'Content-Type': 'application/json' },
|
|
207
|
+
body: JSON.stringify({
|
|
208
|
+
model: this.model,
|
|
209
|
+
messages: [
|
|
210
|
+
{ role: 'system', content: systemPrompt },
|
|
211
|
+
{ role: 'user', content: userPrompt },
|
|
212
|
+
],
|
|
213
|
+
stream: false,
|
|
214
|
+
}),
|
|
215
|
+
});
|
|
216
|
+
|
|
217
|
+
if (!response.ok) {
|
|
218
|
+
throw new Error(`Ollama error: HTTP ${response.status}`);
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
const data = await response.json();
|
|
222
|
+
return data.message?.content || '';
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
// =============================================================================
|
|
227
|
+
// OPENAI-COMPATIBLE PROVIDER
|
|
228
|
+
// Handles Groq, Together AI, Mistral API, LM Studio, Azure OpenAI, Bedrock
|
|
229
|
+
// proxy, and any other endpoint that speaks /v1/chat/completions.
|
|
230
|
+
// =============================================================================
|
|
231
|
+
|
|
232
|
+
// Well-known OpenAI-compatible base URLs and their default models.
|
|
233
|
+
const OPENAI_COMPATIBLE_PRESETS = {
|
|
234
|
+
groq: { baseUrl: 'https://api.groq.com/openai/v1/chat/completions', model: 'llama-3.3-70b-versatile', envKey: 'GROQ_API_KEY' },
|
|
235
|
+
together: { baseUrl: 'https://api.together.xyz/v1/chat/completions', model: 'meta-llama/Llama-3-70b-chat-hf', envKey: 'TOGETHER_API_KEY' },
|
|
236
|
+
mistral: { baseUrl: 'https://api.mistral.ai/v1/chat/completions', model: 'mistral-large-latest', envKey: 'MISTRAL_API_KEY' },
|
|
237
|
+
cohere: { baseUrl: 'https://api.cohere.com/compatibility/v1/chat/completions', model: 'command-r-plus', envKey: 'COHERE_API_KEY' },
|
|
238
|
+
deepseek: { baseUrl: 'https://api.deepseek.com/v1/chat/completions', model: 'deepseek-chat', envKey: 'DEEPSEEK_API_KEY' },
|
|
239
|
+
perplexity: { baseUrl: 'https://api.perplexity.ai/chat/completions', model: 'llama-3.1-sonar-large-128k-online', envKey: 'PERPLEXITY_API_KEY' },
|
|
240
|
+
lmstudio: { baseUrl: 'http://localhost:1234/v1/chat/completions', model: null, envKey: null },
|
|
241
|
+
xai: { baseUrl: 'https://api.x.ai/v1/chat/completions', model: 'grok-3-mini', envKey: 'XAI_API_KEY' },
|
|
242
|
+
};
|
|
243
|
+
|
|
244
|
+
class OpenAICompatibleProvider extends OpenAIProvider {
|
|
245
|
+
constructor(name, apiKey, options = {}) {
|
|
246
|
+
super(apiKey, options);
|
|
247
|
+
this.name = name;
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
// =============================================================================
|
|
252
|
+
// FACTORY
|
|
253
|
+
// =============================================================================
|
|
254
|
+
|
|
255
|
+
/**
|
|
256
|
+
* Create an LLM provider instance.
|
|
257
|
+
*
|
|
258
|
+
* @param {string} provider — 'anthropic' | 'openai' | 'google' | 'ollama'
|
|
259
|
+
* or any preset: 'groq' | 'together' | 'mistral' |
|
|
260
|
+
* 'cohere' | 'deepseek' | 'perplexity' | 'lmstudio' | 'xai'
|
|
261
|
+
* @param {string} apiKey — API key (null for Ollama/LM Studio)
|
|
262
|
+
* @param {object} options — { model, baseUrl }
|
|
263
|
+
* baseUrl overrides the default for any provider.
|
|
264
|
+
*/
|
|
265
|
+
export function createProvider(provider, apiKey, options = {}) {
|
|
266
|
+
const name = provider.toLowerCase();
|
|
267
|
+
|
|
268
|
+
// First-class providers
|
|
269
|
+
switch (name) {
|
|
270
|
+
case 'anthropic':
|
|
271
|
+
case 'claude':
|
|
272
|
+
return new AnthropicProvider(apiKey, options);
|
|
273
|
+
case 'openai':
|
|
274
|
+
case 'gpt':
|
|
275
|
+
return new OpenAIProvider(apiKey, options);
|
|
276
|
+
case 'google':
|
|
277
|
+
case 'gemini':
|
|
278
|
+
return new GoogleProvider(apiKey, options);
|
|
279
|
+
case 'ollama':
|
|
280
|
+
case 'local':
|
|
281
|
+
return new OllamaProvider(apiKey, options);
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
// OpenAI-compatible presets
|
|
285
|
+
if (OPENAI_COMPATIBLE_PRESETS[name]) {
|
|
286
|
+
const preset = OPENAI_COMPATIBLE_PRESETS[name];
|
|
287
|
+
return new OpenAICompatibleProvider(
|
|
288
|
+
// Capitalise for display: "groq" → "Groq"
|
|
289
|
+
name.charAt(0).toUpperCase() + name.slice(1),
|
|
290
|
+
apiKey,
|
|
291
|
+
{
|
|
292
|
+
baseUrl: options.baseUrl || preset.baseUrl,
|
|
293
|
+
model: options.model || preset.model || 'default',
|
|
294
|
+
}
|
|
295
|
+
);
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
// Unknown name but caller supplied a baseUrl — treat as generic OpenAI-compatible
|
|
299
|
+
if (options.baseUrl) {
|
|
300
|
+
return new OpenAICompatibleProvider(provider, apiKey, options);
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
throw new Error(
|
|
304
|
+
`Unknown LLM provider: "${provider}".\n` +
|
|
305
|
+
`Built-in: anthropic, openai, google, ollama\n` +
|
|
306
|
+
`Presets: groq, together, mistral, cohere, deepseek, perplexity, lmstudio, xai\n` +
|
|
307
|
+
`Custom: pass any name with --base-url <url>`
|
|
308
|
+
);
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
/**
|
|
312
|
+
* Auto-detect the best available LLM provider from environment variables.
|
|
313
|
+
*
|
|
314
|
+
* @param {string} rootPath — Project root (for .env file scan)
|
|
315
|
+
* @param {object} options — { provider, baseUrl, model } explicit overrides
|
|
316
|
+
*/
|
|
317
|
+
export function autoDetectProvider(rootPath, options = {}) {
|
|
318
|
+
// Explicit provider name requested
|
|
319
|
+
if (options.provider) {
|
|
320
|
+
const apiKey = resolveApiKey(options.provider, rootPath);
|
|
321
|
+
return createProvider(options.provider, apiKey, {
|
|
322
|
+
model: options.model,
|
|
323
|
+
baseUrl: options.baseUrl,
|
|
324
|
+
});
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
// baseUrl supplied without a provider name → openai-compatible with auto key
|
|
328
|
+
if (options.baseUrl) {
|
|
329
|
+
const apiKey = process.env.OPENAI_API_KEY || resolveApiKey('openai', rootPath) || '';
|
|
330
|
+
return new OpenAICompatibleProvider('custom', apiKey, {
|
|
331
|
+
baseUrl: options.baseUrl,
|
|
332
|
+
model: options.model || 'default',
|
|
333
|
+
});
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
// Standard env-var auto-detection (first match wins)
|
|
337
|
+
const envKeys = {
|
|
338
|
+
ANTHROPIC_API_KEY: 'anthropic',
|
|
339
|
+
OPENAI_API_KEY: 'openai',
|
|
340
|
+
GOOGLE_API_KEY: 'google',
|
|
341
|
+
GEMINI_API_KEY: 'google',
|
|
342
|
+
GROQ_API_KEY: 'groq',
|
|
343
|
+
TOGETHER_API_KEY: 'together',
|
|
344
|
+
MISTRAL_API_KEY: 'mistral',
|
|
345
|
+
DEEPSEEK_API_KEY: 'deepseek',
|
|
346
|
+
XAI_API_KEY: 'xai',
|
|
347
|
+
};
|
|
348
|
+
|
|
349
|
+
for (const [envVar, providerName] of Object.entries(envKeys)) {
|
|
350
|
+
if (process.env[envVar]) {
|
|
351
|
+
return createProvider(providerName, process.env[envVar], { model: options.model });
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
// Check .env file
|
|
356
|
+
if (rootPath) {
|
|
357
|
+
const envPath = path.join(rootPath, '.env');
|
|
358
|
+
if (fs.existsSync(envPath)) {
|
|
359
|
+
try {
|
|
360
|
+
const content = fs.readFileSync(envPath, 'utf-8');
|
|
361
|
+
for (const [envVar, providerName] of Object.entries(envKeys)) {
|
|
362
|
+
const match = content.match(new RegExp(`^${envVar}\\s*=\\s*["']?([^"'\\s]+)`, 'm'));
|
|
363
|
+
if (match) return createProvider(providerName, match[1], { model: options.model });
|
|
364
|
+
}
|
|
365
|
+
} catch { /* ignore */ }
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
return null;
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
/**
|
|
373
|
+
* Resolve an API key for a given provider name from env or .env file.
|
|
374
|
+
*/
|
|
375
|
+
function resolveApiKey(providerName, rootPath) {
|
|
376
|
+
const name = providerName.toLowerCase();
|
|
377
|
+
const preset = OPENAI_COMPATIBLE_PRESETS[name];
|
|
378
|
+
const envVar = preset?.envKey || `${name.toUpperCase()}_API_KEY`;
|
|
379
|
+
|
|
380
|
+
if (process.env[envVar]) return process.env[envVar];
|
|
381
|
+
|
|
382
|
+
if (rootPath) {
|
|
383
|
+
const envPath = path.join(rootPath, '.env');
|
|
384
|
+
if (fs.existsSync(envPath)) {
|
|
385
|
+
try {
|
|
386
|
+
const content = fs.readFileSync(envPath, 'utf-8');
|
|
387
|
+
const match = content.match(new RegExp(`^${envVar}\\s*=\\s*["']?([^"'\\s]+)`, 'm'));
|
|
388
|
+
if (match) return match[1];
|
|
389
|
+
} catch { /* ignore */ }
|
|
390
|
+
}
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
return null;
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
export { OPENAI_COMPATIBLE_PRESETS };
|
|
397
|
+
export default { createProvider, autoDetectProvider };
|