ship-safe 6.1.1 → 6.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/README.md +735 -641
  2. package/cli/agents/api-fuzzer.js +345 -345
  3. package/cli/agents/auth-bypass-agent.js +348 -348
  4. package/cli/agents/base-agent.js +272 -272
  5. package/cli/agents/cicd-scanner.js +236 -201
  6. package/cli/agents/config-auditor.js +521 -521
  7. package/cli/agents/deep-analyzer.js +6 -2
  8. package/cli/agents/git-history-scanner.js +170 -170
  9. package/cli/agents/html-reporter.js +568 -568
  10. package/cli/agents/index.js +84 -84
  11. package/cli/agents/injection-tester.js +500 -500
  12. package/cli/agents/llm-redteam.js +251 -251
  13. package/cli/agents/mobile-scanner.js +231 -231
  14. package/cli/agents/orchestrator.js +322 -322
  15. package/cli/agents/pii-compliance-agent.js +301 -301
  16. package/cli/agents/scoring-engine.js +248 -248
  17. package/cli/agents/supabase-rls-agent.js +154 -154
  18. package/cli/agents/supply-chain-agent.js +650 -507
  19. package/cli/bin/ship-safe.js +452 -426
  20. package/cli/commands/agent.js +608 -608
  21. package/cli/commands/audit.js +986 -980
  22. package/cli/commands/baseline.js +193 -193
  23. package/cli/commands/ci.js +342 -342
  24. package/cli/commands/deps.js +516 -516
  25. package/cli/commands/doctor.js +159 -159
  26. package/cli/commands/fix.js +218 -218
  27. package/cli/commands/hooks.js +268 -0
  28. package/cli/commands/init.js +407 -407
  29. package/cli/commands/mcp.js +304 -304
  30. package/cli/commands/red-team.js +7 -1
  31. package/cli/commands/remediate.js +798 -798
  32. package/cli/commands/rotate.js +571 -571
  33. package/cli/commands/scan.js +569 -569
  34. package/cli/commands/score.js +449 -449
  35. package/cli/commands/watch.js +281 -281
  36. package/cli/hooks/patterns.js +313 -0
  37. package/cli/hooks/post-tool-use.js +140 -0
  38. package/cli/hooks/pre-tool-use.js +186 -0
  39. package/cli/index.js +73 -69
  40. package/cli/providers/llm-provider.js +397 -287
  41. package/cli/utils/autofix-rules.js +74 -74
  42. package/cli/utils/cache-manager.js +311 -311
  43. package/cli/utils/output.js +230 -230
  44. package/cli/utils/patterns.js +1121 -1121
  45. package/cli/utils/pdf-generator.js +94 -94
  46. package/package.json +69 -69
  47. package/configs/supabase/rls-templates.sql +0 -242
@@ -1,287 +1,397 @@
1
- /**
2
- * Multi-LLM Provider
3
- * ===================
4
- *
5
- * Abstraction layer for LLM providers.
6
- * Supports: Anthropic (Claude), OpenAI, Google (Gemini), Ollama (local).
7
- *
8
- * USAGE:
9
- * const provider = createProvider('anthropic', apiKey);
10
- * const result = await provider.classify(findings, context);
11
- */
12
-
13
- import fs from 'fs';
14
- import path from 'path';
15
-
16
- // =============================================================================
17
- // PROVIDER INTERFACE
18
- // =============================================================================
19
-
20
- class BaseLLMProvider {
21
- constructor(name, apiKey, options = {}) {
22
- this.name = name;
23
- this.apiKey = apiKey;
24
- this.model = options.model || null;
25
- this.baseUrl = options.baseUrl || null;
26
- }
27
-
28
- /**
29
- * Send a prompt to the LLM and get a text response.
30
- */
31
- async complete(systemPrompt, userPrompt, options = {}) {
32
- throw new Error(`${this.name}.complete() not implemented`);
33
- }
34
-
35
- /**
36
- * Classify security findings using the LLM.
37
- */
38
- async classify(findings, context) {
39
- const prompt = this.buildClassificationPrompt(findings, context);
40
- const response = await this.complete(
41
- 'You are a security expert. Respond with JSON only, no markdown.',
42
- prompt,
43
- { maxTokens: 4096 }
44
- );
45
- return this.parseJSON(response);
46
- }
47
-
48
- buildClassificationPrompt(findings, context) {
49
- const items = findings.map(f => ({
50
- id: `${f.file}:${f.line}`,
51
- rule: f.rule,
52
- severity: f.severity,
53
- title: f.title,
54
- matched: f.matched?.slice(0, 100),
55
- description: f.description,
56
- }));
57
-
58
- return `Classify each finding as REAL or FALSE_POSITIVE. For REAL findings, provide a specific fix.
59
-
60
- Respond with JSON array ONLY:
61
- [{"id":"<id>","classification":"REAL"|"FALSE_POSITIVE","reason":"<brief reason>","fix":"<specific fix or null>"}]
62
-
63
- Findings:
64
- ${JSON.stringify(items, null, 2)}`;
65
- }
66
-
67
- parseJSON(text) {
68
- const cleaned = text
69
- .replace(/^```(?:json)?\s*/i, '')
70
- .replace(/\s*```\s*$/i, '')
71
- .trim();
72
- try {
73
- return JSON.parse(cleaned);
74
- } catch {
75
- return [];
76
- }
77
- }
78
- }
79
-
80
- // =============================================================================
81
- // ANTHROPIC PROVIDER (Claude)
82
- // =============================================================================
83
-
84
- class AnthropicProvider extends BaseLLMProvider {
85
- constructor(apiKey, options = {}) {
86
- super('Anthropic', apiKey, options);
87
- this.model = options.model || 'claude-haiku-4-5-20251001';
88
- this.baseUrl = options.baseUrl || 'https://api.anthropic.com/v1/messages';
89
- }
90
-
91
- async complete(systemPrompt, userPrompt, options = {}) {
92
- const response = await fetch(this.baseUrl, {
93
- method: 'POST',
94
- headers: {
95
- 'x-api-key': this.apiKey,
96
- 'anthropic-version': '2023-06-01',
97
- 'content-type': 'application/json',
98
- },
99
- body: JSON.stringify({
100
- model: this.model,
101
- max_tokens: options.maxTokens || 2048,
102
- system: systemPrompt,
103
- messages: [{ role: 'user', content: userPrompt }],
104
- }),
105
- });
106
-
107
- if (!response.ok) {
108
- throw new Error(`Anthropic API error: HTTP ${response.status}`);
109
- }
110
-
111
- const data = await response.json();
112
- return data.content?.[0]?.text || '';
113
- }
114
- }
115
-
116
- // =============================================================================
117
- // OPENAI PROVIDER (GPT-4o, etc.)
118
- // =============================================================================
119
-
120
- class OpenAIProvider extends BaseLLMProvider {
121
- constructor(apiKey, options = {}) {
122
- super('OpenAI', apiKey, options);
123
- this.model = options.model || 'gpt-4o-mini';
124
- this.baseUrl = options.baseUrl || 'https://api.openai.com/v1/chat/completions';
125
- }
126
-
127
- async complete(systemPrompt, userPrompt, options = {}) {
128
- const response = await fetch(this.baseUrl, {
129
- method: 'POST',
130
- headers: {
131
- 'Authorization': `Bearer ${this.apiKey}`,
132
- 'Content-Type': 'application/json',
133
- },
134
- body: JSON.stringify({
135
- model: this.model,
136
- max_tokens: options.maxTokens || 2048,
137
- messages: [
138
- { role: 'system', content: systemPrompt },
139
- { role: 'user', content: userPrompt },
140
- ],
141
- }),
142
- });
143
-
144
- if (!response.ok) {
145
- throw new Error(`OpenAI API error: HTTP ${response.status}`);
146
- }
147
-
148
- const data = await response.json();
149
- return data.choices?.[0]?.message?.content || '';
150
- }
151
- }
152
-
153
- // =============================================================================
154
- // GOOGLE PROVIDER (Gemini)
155
- // =============================================================================
156
-
157
- class GoogleProvider extends BaseLLMProvider {
158
- constructor(apiKey, options = {}) {
159
- super('Google', apiKey, options);
160
- this.model = options.model || 'gemini-2.0-flash';
161
- }
162
-
163
- async complete(systemPrompt, userPrompt, options = {}) {
164
- const url = `https://generativelanguage.googleapis.com/v1beta/models/${this.model}:generateContent`;
165
-
166
- const response = await fetch(url, {
167
- method: 'POST',
168
- headers: {
169
- 'Content-Type': 'application/json',
170
- 'x-goog-api-key': this.apiKey,
171
- },
172
- body: JSON.stringify({
173
- systemInstruction: { parts: [{ text: systemPrompt }] },
174
- contents: [{ parts: [{ text: userPrompt }] }],
175
- generationConfig: { maxOutputTokens: options.maxTokens || 2048 },
176
- }),
177
- });
178
-
179
- if (!response.ok) {
180
- throw new Error(`Google API error: HTTP ${response.status}`);
181
- }
182
-
183
- const data = await response.json();
184
- return data.candidates?.[0]?.content?.parts?.[0]?.text || '';
185
- }
186
- }
187
-
188
- // =============================================================================
189
- // OLLAMA PROVIDER (Local models)
190
- // =============================================================================
191
-
192
- class OllamaProvider extends BaseLLMProvider {
193
- constructor(apiKey, options = {}) {
194
- super('Ollama', null, options);
195
- this.model = options.model || 'llama3.2';
196
- this.baseUrl = options.baseUrl || 'http://localhost:11434/api/chat';
197
- }
198
-
199
- async complete(systemPrompt, userPrompt, options = {}) {
200
- const response = await fetch(this.baseUrl, {
201
- method: 'POST',
202
- headers: { 'Content-Type': 'application/json' },
203
- body: JSON.stringify({
204
- model: this.model,
205
- messages: [
206
- { role: 'system', content: systemPrompt },
207
- { role: 'user', content: userPrompt },
208
- ],
209
- stream: false,
210
- }),
211
- });
212
-
213
- if (!response.ok) {
214
- throw new Error(`Ollama error: HTTP ${response.status}`);
215
- }
216
-
217
- const data = await response.json();
218
- return data.message?.content || '';
219
- }
220
- }
221
-
222
- // =============================================================================
223
- // FACTORY
224
- // =============================================================================
225
-
226
- /**
227
- * Create an LLM provider instance.
228
- *
229
- * @param {string} provider 'anthropic' | 'openai' | 'google' | 'ollama'
230
- * @param {string} apiKey — API key (null for Ollama)
231
- * @param {object} options — { model, baseUrl }
232
- */
233
- export function createProvider(provider, apiKey, options = {}) {
234
- switch (provider.toLowerCase()) {
235
- case 'anthropic':
236
- case 'claude':
237
- return new AnthropicProvider(apiKey, options);
238
- case 'openai':
239
- case 'gpt':
240
- return new OpenAIProvider(apiKey, options);
241
- case 'google':
242
- case 'gemini':
243
- return new GoogleProvider(apiKey, options);
244
- case 'ollama':
245
- case 'local':
246
- return new OllamaProvider(apiKey, options);
247
- default:
248
- throw new Error(`Unknown LLM provider: ${provider}. Use: anthropic, openai, google, ollama`);
249
- }
250
- }
251
-
252
- /**
253
- * Auto-detect the best available LLM provider from environment variables.
254
- */
255
- export function autoDetectProvider(rootPath) {
256
- // Check env vars
257
- const envKeys = {
258
- ANTHROPIC_API_KEY: 'anthropic',
259
- OPENAI_API_KEY: 'openai',
260
- GOOGLE_API_KEY: 'google',
261
- GEMINI_API_KEY: 'google',
262
- };
263
-
264
- for (const [envVar, provider] of Object.entries(envKeys)) {
265
- if (process.env[envVar]) {
266
- return createProvider(provider, process.env[envVar]);
267
- }
268
- }
269
-
270
- // Check .env file
271
- if (rootPath) {
272
- const envPath = path.join(rootPath, '.env');
273
- if (fs.existsSync(envPath)) {
274
- try {
275
- const content = fs.readFileSync(envPath, 'utf-8');
276
- for (const [envVar, provider] of Object.entries(envKeys)) {
277
- const match = content.match(new RegExp(`^${envVar}\\s*=\\s*["']?([^"'\\s]+)`, 'm'));
278
- if (match) return createProvider(provider, match[1]);
279
- }
280
- } catch { /* ignore */ }
281
- }
282
- }
283
-
284
- return null;
285
- }
286
-
287
- export default { createProvider, autoDetectProvider };
1
+ /**
2
+ * Multi-LLM Provider
3
+ * ===================
4
+ *
5
+ * Abstraction layer for LLM providers.
6
+ * Supports: Anthropic (Claude), OpenAI, Google (Gemini), Ollama (local),
7
+ * and any OpenAI-compatible endpoint (Groq, Together AI, Mistral API,
8
+ * LM Studio, Azure OpenAI, AWS Bedrock via proxy, etc.).
9
+ *
10
+ * USAGE:
11
+ * const provider = createProvider('anthropic', apiKey);
12
+ * const provider = createProvider('groq', apiKey);
13
+ * const provider = createProvider('openai', apiKey, { baseUrl: 'https://custom/v1/chat/completions' });
14
+ * const result = await provider.classify(findings, context);
15
+ */
16
+
17
+ import fs from 'fs';
18
+ import path from 'path';
19
+
20
+ // =============================================================================
21
+ // PROVIDER INTERFACE
22
+ // =============================================================================
23
+
24
+ class BaseLLMProvider {
25
+ constructor(name, apiKey, options = {}) {
26
+ this.name = name;
27
+ this.apiKey = apiKey;
28
+ this.model = options.model || null;
29
+ this.baseUrl = options.baseUrl || null;
30
+ }
31
+
32
+ /**
33
+ * Send a prompt to the LLM and get a text response.
34
+ */
35
+ async complete(systemPrompt, userPrompt, options = {}) {
36
+ throw new Error(`${this.name}.complete() not implemented`);
37
+ }
38
+
39
+ /**
40
+ * Classify security findings using the LLM.
41
+ */
42
+ async classify(findings, context) {
43
+ const prompt = this.buildClassificationPrompt(findings, context);
44
+ const response = await this.complete(
45
+ 'You are a security expert. Respond with JSON only, no markdown.',
46
+ prompt,
47
+ { maxTokens: 4096 }
48
+ );
49
+ return this.parseJSON(response);
50
+ }
51
+
52
+ buildClassificationPrompt(findings, context) {
53
+ const items = findings.map(f => ({
54
+ id: `${f.file}:${f.line}`,
55
+ rule: f.rule,
56
+ severity: f.severity,
57
+ title: f.title,
58
+ matched: f.matched?.slice(0, 100),
59
+ description: f.description,
60
+ }));
61
+
62
+ return `Classify each finding as REAL or FALSE_POSITIVE. For REAL findings, provide a specific fix.
63
+
64
+ Respond with JSON array ONLY:
65
+ [{"id":"<id>","classification":"REAL"|"FALSE_POSITIVE","reason":"<brief reason>","fix":"<specific fix or null>"}]
66
+
67
+ Findings:
68
+ ${JSON.stringify(items, null, 2)}`;
69
+ }
70
+
71
+ parseJSON(text) {
72
+ const cleaned = text
73
+ .replace(/^```(?:json)?\s*/i, '')
74
+ .replace(/\s*```\s*$/i, '')
75
+ .trim();
76
+ try {
77
+ return JSON.parse(cleaned);
78
+ } catch {
79
+ return [];
80
+ }
81
+ }
82
+ }
83
+
84
+ // =============================================================================
85
+ // ANTHROPIC PROVIDER (Claude)
86
+ // =============================================================================
87
+
88
+ class AnthropicProvider extends BaseLLMProvider {
89
+ constructor(apiKey, options = {}) {
90
+ super('Anthropic', apiKey, options);
91
+ this.model = options.model || 'claude-haiku-4-5-20251001';
92
+ this.baseUrl = options.baseUrl || 'https://api.anthropic.com/v1/messages';
93
+ }
94
+
95
+ async complete(systemPrompt, userPrompt, options = {}) {
96
+ const response = await fetch(this.baseUrl, {
97
+ method: 'POST',
98
+ headers: {
99
+ 'x-api-key': this.apiKey,
100
+ 'anthropic-version': '2023-06-01',
101
+ 'content-type': 'application/json',
102
+ },
103
+ body: JSON.stringify({
104
+ model: this.model,
105
+ max_tokens: options.maxTokens || 2048,
106
+ system: systemPrompt,
107
+ messages: [{ role: 'user', content: userPrompt }],
108
+ }),
109
+ });
110
+
111
+ if (!response.ok) {
112
+ throw new Error(`Anthropic API error: HTTP ${response.status}`);
113
+ }
114
+
115
+ const data = await response.json();
116
+ return data.content?.[0]?.text || '';
117
+ }
118
+ }
119
+
120
+ // =============================================================================
121
+ // OPENAI PROVIDER (GPT-4o, etc.)
122
+ // =============================================================================
123
+
124
+ class OpenAIProvider extends BaseLLMProvider {
125
+ constructor(apiKey, options = {}) {
126
+ super('OpenAI', apiKey, options);
127
+ this.model = options.model || 'gpt-4o-mini';
128
+ this.baseUrl = options.baseUrl || 'https://api.openai.com/v1/chat/completions';
129
+ }
130
+
131
+ async complete(systemPrompt, userPrompt, options = {}) {
132
+ const response = await fetch(this.baseUrl, {
133
+ method: 'POST',
134
+ headers: {
135
+ 'Authorization': `Bearer ${this.apiKey}`,
136
+ 'Content-Type': 'application/json',
137
+ },
138
+ body: JSON.stringify({
139
+ model: this.model,
140
+ max_tokens: options.maxTokens || 2048,
141
+ messages: [
142
+ { role: 'system', content: systemPrompt },
143
+ { role: 'user', content: userPrompt },
144
+ ],
145
+ }),
146
+ });
147
+
148
+ if (!response.ok) {
149
+ throw new Error(`OpenAI API error: HTTP ${response.status}`);
150
+ }
151
+
152
+ const data = await response.json();
153
+ return data.choices?.[0]?.message?.content || '';
154
+ }
155
+ }
156
+
157
+ // =============================================================================
158
+ // GOOGLE PROVIDER (Gemini)
159
+ // =============================================================================
160
+
161
+ class GoogleProvider extends BaseLLMProvider {
162
+ constructor(apiKey, options = {}) {
163
+ super('Google', apiKey, options);
164
+ this.model = options.model || 'gemini-2.0-flash';
165
+ }
166
+
167
+ async complete(systemPrompt, userPrompt, options = {}) {
168
+ const url = `https://generativelanguage.googleapis.com/v1beta/models/${this.model}:generateContent`;
169
+
170
+ const response = await fetch(url, {
171
+ method: 'POST',
172
+ headers: {
173
+ 'Content-Type': 'application/json',
174
+ 'x-goog-api-key': this.apiKey,
175
+ },
176
+ body: JSON.stringify({
177
+ systemInstruction: { parts: [{ text: systemPrompt }] },
178
+ contents: [{ parts: [{ text: userPrompt }] }],
179
+ generationConfig: { maxOutputTokens: options.maxTokens || 2048 },
180
+ }),
181
+ });
182
+
183
+ if (!response.ok) {
184
+ throw new Error(`Google API error: HTTP ${response.status}`);
185
+ }
186
+
187
+ const data = await response.json();
188
+ return data.candidates?.[0]?.content?.parts?.[0]?.text || '';
189
+ }
190
+ }
191
+
192
+ // =============================================================================
193
+ // OLLAMA PROVIDER (Local models)
194
+ // =============================================================================
195
+
196
+ class OllamaProvider extends BaseLLMProvider {
197
+ constructor(apiKey, options = {}) {
198
+ super('Ollama', null, options);
199
+ this.model = options.model || 'llama3.2';
200
+ this.baseUrl = options.baseUrl || 'http://localhost:11434/api/chat';
201
+ }
202
+
203
+ async complete(systemPrompt, userPrompt, options = {}) {
204
+ const response = await fetch(this.baseUrl, {
205
+ method: 'POST',
206
+ headers: { 'Content-Type': 'application/json' },
207
+ body: JSON.stringify({
208
+ model: this.model,
209
+ messages: [
210
+ { role: 'system', content: systemPrompt },
211
+ { role: 'user', content: userPrompt },
212
+ ],
213
+ stream: false,
214
+ }),
215
+ });
216
+
217
+ if (!response.ok) {
218
+ throw new Error(`Ollama error: HTTP ${response.status}`);
219
+ }
220
+
221
+ const data = await response.json();
222
+ return data.message?.content || '';
223
+ }
224
+ }
225
+
226
+ // =============================================================================
227
+ // OPENAI-COMPATIBLE PROVIDER
228
+ // Handles Groq, Together AI, Mistral API, LM Studio, Azure OpenAI, Bedrock
229
+ // proxy, and any other endpoint that speaks /v1/chat/completions.
230
+ // =============================================================================
231
+
232
+ // Well-known OpenAI-compatible base URLs and their default models.
233
+ const OPENAI_COMPATIBLE_PRESETS = {
234
+ groq: { baseUrl: 'https://api.groq.com/openai/v1/chat/completions', model: 'llama-3.3-70b-versatile', envKey: 'GROQ_API_KEY' },
235
+ together: { baseUrl: 'https://api.together.xyz/v1/chat/completions', model: 'meta-llama/Llama-3-70b-chat-hf', envKey: 'TOGETHER_API_KEY' },
236
+ mistral: { baseUrl: 'https://api.mistral.ai/v1/chat/completions', model: 'mistral-large-latest', envKey: 'MISTRAL_API_KEY' },
237
+ cohere: { baseUrl: 'https://api.cohere.com/compatibility/v1/chat/completions', model: 'command-r-plus', envKey: 'COHERE_API_KEY' },
238
+ deepseek: { baseUrl: 'https://api.deepseek.com/v1/chat/completions', model: 'deepseek-chat', envKey: 'DEEPSEEK_API_KEY' },
239
+ perplexity: { baseUrl: 'https://api.perplexity.ai/chat/completions', model: 'llama-3.1-sonar-large-128k-online', envKey: 'PERPLEXITY_API_KEY' },
240
+ lmstudio: { baseUrl: 'http://localhost:1234/v1/chat/completions', model: null, envKey: null },
241
+ xai: { baseUrl: 'https://api.x.ai/v1/chat/completions', model: 'grok-3-mini', envKey: 'XAI_API_KEY' },
242
+ };
243
+
244
+ class OpenAICompatibleProvider extends OpenAIProvider {
245
+ constructor(name, apiKey, options = {}) {
246
+ super(apiKey, options);
247
+ this.name = name;
248
+ }
249
+ }
250
+
251
+ // =============================================================================
252
+ // FACTORY
253
+ // =============================================================================
254
+
255
+ /**
256
+ * Create an LLM provider instance.
257
+ *
258
+ * @param {string} provider — 'anthropic' | 'openai' | 'google' | 'ollama'
259
+ * or any preset: 'groq' | 'together' | 'mistral' |
260
+ * 'cohere' | 'deepseek' | 'perplexity' | 'lmstudio' | 'xai'
261
+ * @param {string} apiKey — API key (null for Ollama/LM Studio)
262
+ * @param {object} options — { model, baseUrl }
263
+ * baseUrl overrides the default for any provider.
264
+ */
265
+ export function createProvider(provider, apiKey, options = {}) {
266
+ const name = provider.toLowerCase();
267
+
268
+ // First-class providers
269
+ switch (name) {
270
+ case 'anthropic':
271
+ case 'claude':
272
+ return new AnthropicProvider(apiKey, options);
273
+ case 'openai':
274
+ case 'gpt':
275
+ return new OpenAIProvider(apiKey, options);
276
+ case 'google':
277
+ case 'gemini':
278
+ return new GoogleProvider(apiKey, options);
279
+ case 'ollama':
280
+ case 'local':
281
+ return new OllamaProvider(apiKey, options);
282
+ }
283
+
284
+ // OpenAI-compatible presets
285
+ if (OPENAI_COMPATIBLE_PRESETS[name]) {
286
+ const preset = OPENAI_COMPATIBLE_PRESETS[name];
287
+ return new OpenAICompatibleProvider(
288
+ // Capitalise for display: "groq" → "Groq"
289
+ name.charAt(0).toUpperCase() + name.slice(1),
290
+ apiKey,
291
+ {
292
+ baseUrl: options.baseUrl || preset.baseUrl,
293
+ model: options.model || preset.model || 'default',
294
+ }
295
+ );
296
+ }
297
+
298
+ // Unknown name but caller supplied a baseUrl — treat as generic OpenAI-compatible
299
+ if (options.baseUrl) {
300
+ return new OpenAICompatibleProvider(provider, apiKey, options);
301
+ }
302
+
303
+ throw new Error(
304
+ `Unknown LLM provider: "${provider}".\n` +
305
+ `Built-in: anthropic, openai, google, ollama\n` +
306
+ `Presets: groq, together, mistral, cohere, deepseek, perplexity, lmstudio, xai\n` +
307
+ `Custom: pass any name with --base-url <url>`
308
+ );
309
+ }
310
+
311
+ /**
312
+ * Auto-detect the best available LLM provider from environment variables.
313
+ *
314
+ * @param {string} rootPath — Project root (for .env file scan)
315
+ * @param {object} options — { provider, baseUrl, model } explicit overrides
316
+ */
317
+ export function autoDetectProvider(rootPath, options = {}) {
318
+ // Explicit provider name requested
319
+ if (options.provider) {
320
+ const apiKey = resolveApiKey(options.provider, rootPath);
321
+ return createProvider(options.provider, apiKey, {
322
+ model: options.model,
323
+ baseUrl: options.baseUrl,
324
+ });
325
+ }
326
+
327
+ // baseUrl supplied without a provider name → openai-compatible with auto key
328
+ if (options.baseUrl) {
329
+ const apiKey = process.env.OPENAI_API_KEY || resolveApiKey('openai', rootPath) || '';
330
+ return new OpenAICompatibleProvider('custom', apiKey, {
331
+ baseUrl: options.baseUrl,
332
+ model: options.model || 'default',
333
+ });
334
+ }
335
+
336
+ // Standard env-var auto-detection (first match wins)
337
+ const envKeys = {
338
+ ANTHROPIC_API_KEY: 'anthropic',
339
+ OPENAI_API_KEY: 'openai',
340
+ GOOGLE_API_KEY: 'google',
341
+ GEMINI_API_KEY: 'google',
342
+ GROQ_API_KEY: 'groq',
343
+ TOGETHER_API_KEY: 'together',
344
+ MISTRAL_API_KEY: 'mistral',
345
+ DEEPSEEK_API_KEY: 'deepseek',
346
+ XAI_API_KEY: 'xai',
347
+ };
348
+
349
+ for (const [envVar, providerName] of Object.entries(envKeys)) {
350
+ if (process.env[envVar]) {
351
+ return createProvider(providerName, process.env[envVar], { model: options.model });
352
+ }
353
+ }
354
+
355
+ // Check .env file
356
+ if (rootPath) {
357
+ const envPath = path.join(rootPath, '.env');
358
+ if (fs.existsSync(envPath)) {
359
+ try {
360
+ const content = fs.readFileSync(envPath, 'utf-8');
361
+ for (const [envVar, providerName] of Object.entries(envKeys)) {
362
+ const match = content.match(new RegExp(`^${envVar}\\s*=\\s*["']?([^"'\\s]+)`, 'm'));
363
+ if (match) return createProvider(providerName, match[1], { model: options.model });
364
+ }
365
+ } catch { /* ignore */ }
366
+ }
367
+ }
368
+
369
+ return null;
370
+ }
371
+
372
+ /**
373
+ * Resolve an API key for a given provider name from env or .env file.
374
+ */
375
+ function resolveApiKey(providerName, rootPath) {
376
+ const name = providerName.toLowerCase();
377
+ const preset = OPENAI_COMPATIBLE_PRESETS[name];
378
+ const envVar = preset?.envKey || `${name.toUpperCase()}_API_KEY`;
379
+
380
+ if (process.env[envVar]) return process.env[envVar];
381
+
382
+ if (rootPath) {
383
+ const envPath = path.join(rootPath, '.env');
384
+ if (fs.existsSync(envPath)) {
385
+ try {
386
+ const content = fs.readFileSync(envPath, 'utf-8');
387
+ const match = content.match(new RegExp(`^${envVar}\\s*=\\s*["']?([^"'\\s]+)`, 'm'));
388
+ if (match) return match[1];
389
+ } catch { /* ignore */ }
390
+ }
391
+ }
392
+
393
+ return null;
394
+ }
395
+
396
+ export { OPENAI_COMPATIBLE_PRESETS };
397
+ export default { createProvider, autoDetectProvider };