ship-safe 4.0.0 → 4.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,288 +1,287 @@
1
- /**
2
- * Multi-LLM Provider
3
- * ===================
4
- *
5
- * Abstraction layer for LLM providers.
6
- * Supports: Anthropic (Claude), OpenAI, Google (Gemini), Ollama (local).
7
- *
8
- * USAGE:
9
- * const provider = createProvider('anthropic', apiKey);
10
- * const result = await provider.classify(findings, context);
11
- */
12
-
13
- import fs from 'fs';
14
- import path from 'path';
15
-
16
- // =============================================================================
17
- // PROVIDER INTERFACE
18
- // =============================================================================
19
-
20
- class BaseLLMProvider {
21
- constructor(name, apiKey, options = {}) {
22
- this.name = name;
23
- this.apiKey = apiKey;
24
- this.model = options.model || null;
25
- this.baseUrl = options.baseUrl || null;
26
- }
27
-
28
- /**
29
- * Send a prompt to the LLM and get a text response.
30
- */
31
- async complete(systemPrompt, userPrompt, options = {}) {
32
- throw new Error(`${this.name}.complete() not implemented`);
33
- }
34
-
35
- /**
36
- * Classify security findings using the LLM.
37
- */
38
- async classify(findings, context) {
39
- const prompt = this.buildClassificationPrompt(findings, context);
40
- const response = await this.complete(
41
- 'You are a security expert. Respond with JSON only, no markdown.',
42
- prompt,
43
- { maxTokens: 4096 }
44
- );
45
- return this.parseJSON(response);
46
- }
47
-
48
- buildClassificationPrompt(findings, context) {
49
- const items = findings.map(f => ({
50
- id: `${f.file}:${f.line}`,
51
- rule: f.rule,
52
- severity: f.severity,
53
- title: f.title,
54
- matched: f.matched?.slice(0, 100),
55
- description: f.description,
56
- }));
57
-
58
- return `Classify each finding as REAL or FALSE_POSITIVE. For REAL findings, provide a specific fix.
59
-
60
- Respond with JSON array ONLY:
61
- [{"id":"<id>","classification":"REAL"|"FALSE_POSITIVE","reason":"<brief reason>","fix":"<specific fix or null>"}]
62
-
63
- Findings:
64
- ${JSON.stringify(items, null, 2)}`;
65
- }
66
-
67
- parseJSON(text) {
68
- const cleaned = text
69
- .replace(/^```(?:json)?\s*/i, '')
70
- .replace(/\s*```\s*$/i, '')
71
- .trim();
72
- try {
73
- return JSON.parse(cleaned);
74
- } catch {
75
- return [];
76
- }
77
- }
78
- }
79
-
80
- // =============================================================================
81
- // ANTHROPIC PROVIDER (Claude)
82
- // =============================================================================
83
-
84
- class AnthropicProvider extends BaseLLMProvider {
85
- constructor(apiKey, options = {}) {
86
- super('Anthropic', apiKey, options);
87
- this.model = options.model || 'claude-haiku-4-5-20251001';
88
- this.baseUrl = options.baseUrl || 'https://api.anthropic.com/v1/messages';
89
- }
90
-
91
- async complete(systemPrompt, userPrompt, options = {}) {
92
- const response = await fetch(this.baseUrl, {
93
- method: 'POST',
94
- headers: {
95
- 'x-api-key': this.apiKey,
96
- 'anthropic-version': '2023-06-01',
97
- 'content-type': 'application/json',
98
- },
99
- body: JSON.stringify({
100
- model: this.model,
101
- max_tokens: options.maxTokens || 2048,
102
- system: systemPrompt,
103
- messages: [{ role: 'user', content: userPrompt }],
104
- }),
105
- });
106
-
107
- if (!response.ok) {
108
- const body = await response.text();
109
- throw new Error(`Anthropic API error ${response.status}: ${body.slice(0, 200)}`);
110
- }
111
-
112
- const data = await response.json();
113
- return data.content?.[0]?.text || '';
114
- }
115
- }
116
-
117
- // =============================================================================
118
- // OPENAI PROVIDER (GPT-4o, etc.)
119
- // =============================================================================
120
-
121
- class OpenAIProvider extends BaseLLMProvider {
122
- constructor(apiKey, options = {}) {
123
- super('OpenAI', apiKey, options);
124
- this.model = options.model || 'gpt-4o-mini';
125
- this.baseUrl = options.baseUrl || 'https://api.openai.com/v1/chat/completions';
126
- }
127
-
128
- async complete(systemPrompt, userPrompt, options = {}) {
129
- const response = await fetch(this.baseUrl, {
130
- method: 'POST',
131
- headers: {
132
- 'Authorization': `Bearer ${this.apiKey}`,
133
- 'Content-Type': 'application/json',
134
- },
135
- body: JSON.stringify({
136
- model: this.model,
137
- max_tokens: options.maxTokens || 2048,
138
- messages: [
139
- { role: 'system', content: systemPrompt },
140
- { role: 'user', content: userPrompt },
141
- ],
142
- }),
143
- });
144
-
145
- if (!response.ok) {
146
- const body = await response.text();
147
- throw new Error(`OpenAI API error ${response.status}: ${body.slice(0, 200)}`);
148
- }
149
-
150
- const data = await response.json();
151
- return data.choices?.[0]?.message?.content || '';
152
- }
153
- }
154
-
155
- // =============================================================================
156
- // GOOGLE PROVIDER (Gemini)
157
- // =============================================================================
158
-
159
- class GoogleProvider extends BaseLLMProvider {
160
- constructor(apiKey, options = {}) {
161
- super('Google', apiKey, options);
162
- this.model = options.model || 'gemini-2.0-flash';
163
- }
164
-
165
- async complete(systemPrompt, userPrompt, options = {}) {
166
- const url = `https://generativelanguage.googleapis.com/v1beta/models/${this.model}:generateContent?key=${this.apiKey}`;
167
-
168
- const response = await fetch(url, {
169
- method: 'POST',
170
- headers: { 'Content-Type': 'application/json' },
171
- body: JSON.stringify({
172
- systemInstruction: { parts: [{ text: systemPrompt }] },
173
- contents: [{ parts: [{ text: userPrompt }] }],
174
- generationConfig: { maxOutputTokens: options.maxTokens || 2048 },
175
- }),
176
- });
177
-
178
- if (!response.ok) {
179
- const body = await response.text();
180
- throw new Error(`Google API error ${response.status}: ${body.slice(0, 200)}`);
181
- }
182
-
183
- const data = await response.json();
184
- return data.candidates?.[0]?.content?.parts?.[0]?.text || '';
185
- }
186
- }
187
-
188
- // =============================================================================
189
- // OLLAMA PROVIDER (Local models)
190
- // =============================================================================
191
-
192
- class OllamaProvider extends BaseLLMProvider {
193
- constructor(apiKey, options = {}) {
194
- super('Ollama', null, options);
195
- this.model = options.model || 'llama3.2';
196
- this.baseUrl = options.baseUrl || 'http://localhost:11434/api/chat';
197
- }
198
-
199
- async complete(systemPrompt, userPrompt, options = {}) {
200
- const response = await fetch(this.baseUrl, {
201
- method: 'POST',
202
- headers: { 'Content-Type': 'application/json' },
203
- body: JSON.stringify({
204
- model: this.model,
205
- messages: [
206
- { role: 'system', content: systemPrompt },
207
- { role: 'user', content: userPrompt },
208
- ],
209
- stream: false,
210
- }),
211
- });
212
-
213
- if (!response.ok) {
214
- const body = await response.text();
215
- throw new Error(`Ollama error ${response.status}: ${body.slice(0, 200)}`);
216
- }
217
-
218
- const data = await response.json();
219
- return data.message?.content || '';
220
- }
221
- }
222
-
223
- // =============================================================================
224
- // FACTORY
225
- // =============================================================================
226
-
227
- /**
228
- * Create an LLM provider instance.
229
- *
230
- * @param {string} provider 'anthropic' | 'openai' | 'google' | 'ollama'
231
- * @param {string} apiKey API key (null for Ollama)
232
- * @param {object} options — { model, baseUrl }
233
- */
234
- export function createProvider(provider, apiKey, options = {}) {
235
- switch (provider.toLowerCase()) {
236
- case 'anthropic':
237
- case 'claude':
238
- return new AnthropicProvider(apiKey, options);
239
- case 'openai':
240
- case 'gpt':
241
- return new OpenAIProvider(apiKey, options);
242
- case 'google':
243
- case 'gemini':
244
- return new GoogleProvider(apiKey, options);
245
- case 'ollama':
246
- case 'local':
247
- return new OllamaProvider(apiKey, options);
248
- default:
249
- throw new Error(`Unknown LLM provider: ${provider}. Use: anthropic, openai, google, ollama`);
250
- }
251
- }
252
-
253
- /**
254
- * Auto-detect the best available LLM provider from environment variables.
255
- */
256
- export function autoDetectProvider(rootPath) {
257
- // Check env vars
258
- const envKeys = {
259
- ANTHROPIC_API_KEY: 'anthropic',
260
- OPENAI_API_KEY: 'openai',
261
- GOOGLE_API_KEY: 'google',
262
- GEMINI_API_KEY: 'google',
263
- };
264
-
265
- for (const [envVar, provider] of Object.entries(envKeys)) {
266
- if (process.env[envVar]) {
267
- return createProvider(provider, process.env[envVar]);
268
- }
269
- }
270
-
271
- // Check .env file
272
- if (rootPath) {
273
- const envPath = path.join(rootPath, '.env');
274
- if (fs.existsSync(envPath)) {
275
- try {
276
- const content = fs.readFileSync(envPath, 'utf-8');
277
- for (const [envVar, provider] of Object.entries(envKeys)) {
278
- const match = content.match(new RegExp(`^${envVar}\\s*=\\s*["']?([^"'\\s]+)`, 'm'));
279
- if (match) return createProvider(provider, match[1]);
280
- }
281
- } catch { /* ignore */ }
282
- }
283
- }
284
-
285
- return null;
286
- }
287
-
288
- export default { createProvider, autoDetectProvider };
1
+ /**
2
+ * Multi-LLM Provider
3
+ * ===================
4
+ *
5
+ * Abstraction layer for LLM providers.
6
+ * Supports: Anthropic (Claude), OpenAI, Google (Gemini), Ollama (local).
7
+ *
8
+ * USAGE:
9
+ * const provider = createProvider('anthropic', apiKey);
10
+ * const result = await provider.classify(findings, context);
11
+ */
12
+
13
+ import fs from 'fs';
14
+ import path from 'path';
15
+
16
+ // =============================================================================
17
+ // PROVIDER INTERFACE
18
+ // =============================================================================
19
+
20
+ class BaseLLMProvider {
21
+ constructor(name, apiKey, options = {}) {
22
+ this.name = name;
23
+ this.apiKey = apiKey;
24
+ this.model = options.model || null;
25
+ this.baseUrl = options.baseUrl || null;
26
+ }
27
+
28
+ /**
29
+ * Send a prompt to the LLM and get a text response.
30
+ */
31
+ async complete(systemPrompt, userPrompt, options = {}) {
32
+ throw new Error(`${this.name}.complete() not implemented`);
33
+ }
34
+
35
+ /**
36
+ * Classify security findings using the LLM.
37
+ */
38
+ async classify(findings, context) {
39
+ const prompt = this.buildClassificationPrompt(findings, context);
40
+ const response = await this.complete(
41
+ 'You are a security expert. Respond with JSON only, no markdown.',
42
+ prompt,
43
+ { maxTokens: 4096 }
44
+ );
45
+ return this.parseJSON(response);
46
+ }
47
+
48
+ buildClassificationPrompt(findings, context) {
49
+ const items = findings.map(f => ({
50
+ id: `${f.file}:${f.line}`,
51
+ rule: f.rule,
52
+ severity: f.severity,
53
+ title: f.title,
54
+ matched: f.matched?.slice(0, 100),
55
+ description: f.description,
56
+ }));
57
+
58
+ return `Classify each finding as REAL or FALSE_POSITIVE. For REAL findings, provide a specific fix.
59
+
60
+ Respond with JSON array ONLY:
61
+ [{"id":"<id>","classification":"REAL"|"FALSE_POSITIVE","reason":"<brief reason>","fix":"<specific fix or null>"}]
62
+
63
+ Findings:
64
+ ${JSON.stringify(items, null, 2)}`;
65
+ }
66
+
67
+ parseJSON(text) {
68
+ const cleaned = text
69
+ .replace(/^```(?:json)?\s*/i, '')
70
+ .replace(/\s*```\s*$/i, '')
71
+ .trim();
72
+ try {
73
+ return JSON.parse(cleaned);
74
+ } catch {
75
+ return [];
76
+ }
77
+ }
78
+ }
79
+
80
+ // =============================================================================
81
+ // ANTHROPIC PROVIDER (Claude)
82
+ // =============================================================================
83
+
84
+ class AnthropicProvider extends BaseLLMProvider {
85
+ constructor(apiKey, options = {}) {
86
+ super('Anthropic', apiKey, options);
87
+ this.model = options.model || 'claude-haiku-4-5-20251001';
88
+ this.baseUrl = options.baseUrl || 'https://api.anthropic.com/v1/messages';
89
+ }
90
+
91
+ async complete(systemPrompt, userPrompt, options = {}) {
92
+ const response = await fetch(this.baseUrl, {
93
+ method: 'POST',
94
+ headers: {
95
+ 'x-api-key': this.apiKey,
96
+ 'anthropic-version': '2023-06-01',
97
+ 'content-type': 'application/json',
98
+ },
99
+ body: JSON.stringify({
100
+ model: this.model,
101
+ max_tokens: options.maxTokens || 2048,
102
+ system: systemPrompt,
103
+ messages: [{ role: 'user', content: userPrompt }],
104
+ }),
105
+ });
106
+
107
+ if (!response.ok) {
108
+ throw new Error(`Anthropic API error: HTTP ${response.status}`);
109
+ }
110
+
111
+ const data = await response.json();
112
+ return data.content?.[0]?.text || '';
113
+ }
114
+ }
115
+
116
+ // =============================================================================
117
+ // OPENAI PROVIDER (GPT-4o, etc.)
118
+ // =============================================================================
119
+
120
+ class OpenAIProvider extends BaseLLMProvider {
121
+ constructor(apiKey, options = {}) {
122
+ super('OpenAI', apiKey, options);
123
+ this.model = options.model || 'gpt-4o-mini';
124
+ this.baseUrl = options.baseUrl || 'https://api.openai.com/v1/chat/completions';
125
+ }
126
+
127
+ async complete(systemPrompt, userPrompt, options = {}) {
128
+ const response = await fetch(this.baseUrl, {
129
+ method: 'POST',
130
+ headers: {
131
+ 'Authorization': `Bearer ${this.apiKey}`,
132
+ 'Content-Type': 'application/json',
133
+ },
134
+ body: JSON.stringify({
135
+ model: this.model,
136
+ max_tokens: options.maxTokens || 2048,
137
+ messages: [
138
+ { role: 'system', content: systemPrompt },
139
+ { role: 'user', content: userPrompt },
140
+ ],
141
+ }),
142
+ });
143
+
144
+ if (!response.ok) {
145
+ throw new Error(`OpenAI API error: HTTP ${response.status}`);
146
+ }
147
+
148
+ const data = await response.json();
149
+ return data.choices?.[0]?.message?.content || '';
150
+ }
151
+ }
152
+
153
+ // =============================================================================
154
+ // GOOGLE PROVIDER (Gemini)
155
+ // =============================================================================
156
+
157
+ class GoogleProvider extends BaseLLMProvider {
158
+ constructor(apiKey, options = {}) {
159
+ super('Google', apiKey, options);
160
+ this.model = options.model || 'gemini-2.0-flash';
161
+ }
162
+
163
+ async complete(systemPrompt, userPrompt, options = {}) {
164
+ const url = `https://generativelanguage.googleapis.com/v1beta/models/${this.model}:generateContent`;
165
+
166
+ const response = await fetch(url, {
167
+ method: 'POST',
168
+ headers: {
169
+ 'Content-Type': 'application/json',
170
+ 'x-goog-api-key': this.apiKey,
171
+ },
172
+ body: JSON.stringify({
173
+ systemInstruction: { parts: [{ text: systemPrompt }] },
174
+ contents: [{ parts: [{ text: userPrompt }] }],
175
+ generationConfig: { maxOutputTokens: options.maxTokens || 2048 },
176
+ }),
177
+ });
178
+
179
+ if (!response.ok) {
180
+ throw new Error(`Google API error: HTTP ${response.status}`);
181
+ }
182
+
183
+ const data = await response.json();
184
+ return data.candidates?.[0]?.content?.parts?.[0]?.text || '';
185
+ }
186
+ }
187
+
188
+ // =============================================================================
189
+ // OLLAMA PROVIDER (Local models)
190
+ // =============================================================================
191
+
192
+ class OllamaProvider extends BaseLLMProvider {
193
+ constructor(apiKey, options = {}) {
194
+ super('Ollama', null, options);
195
+ this.model = options.model || 'llama3.2';
196
+ this.baseUrl = options.baseUrl || 'http://localhost:11434/api/chat';
197
+ }
198
+
199
+ async complete(systemPrompt, userPrompt, options = {}) {
200
+ const response = await fetch(this.baseUrl, {
201
+ method: 'POST',
202
+ headers: { 'Content-Type': 'application/json' },
203
+ body: JSON.stringify({
204
+ model: this.model,
205
+ messages: [
206
+ { role: 'system', content: systemPrompt },
207
+ { role: 'user', content: userPrompt },
208
+ ],
209
+ stream: false,
210
+ }),
211
+ });
212
+
213
+ if (!response.ok) {
214
+ throw new Error(`Ollama error: HTTP ${response.status}`);
215
+ }
216
+
217
+ const data = await response.json();
218
+ return data.message?.content || '';
219
+ }
220
+ }
221
+
222
+ // =============================================================================
223
+ // FACTORY
224
+ // =============================================================================
225
+
226
+ /**
227
+ * Create an LLM provider instance.
228
+ *
229
+ * @param {string} provider — 'anthropic' | 'openai' | 'google' | 'ollama'
230
+ * @param {string} apiKey API key (null for Ollama)
231
+ * @param {object} options { model, baseUrl }
232
+ */
233
+ export function createProvider(provider, apiKey, options = {}) {
234
+ switch (provider.toLowerCase()) {
235
+ case 'anthropic':
236
+ case 'claude':
237
+ return new AnthropicProvider(apiKey, options);
238
+ case 'openai':
239
+ case 'gpt':
240
+ return new OpenAIProvider(apiKey, options);
241
+ case 'google':
242
+ case 'gemini':
243
+ return new GoogleProvider(apiKey, options);
244
+ case 'ollama':
245
+ case 'local':
246
+ return new OllamaProvider(apiKey, options);
247
+ default:
248
+ throw new Error(`Unknown LLM provider: ${provider}. Use: anthropic, openai, google, ollama`);
249
+ }
250
+ }
251
+
252
+ /**
253
+ * Auto-detect the best available LLM provider from environment variables.
254
+ */
255
+ export function autoDetectProvider(rootPath) {
256
+ // Check env vars
257
+ const envKeys = {
258
+ ANTHROPIC_API_KEY: 'anthropic',
259
+ OPENAI_API_KEY: 'openai',
260
+ GOOGLE_API_KEY: 'google',
261
+ GEMINI_API_KEY: 'google',
262
+ };
263
+
264
+ for (const [envVar, provider] of Object.entries(envKeys)) {
265
+ if (process.env[envVar]) {
266
+ return createProvider(provider, process.env[envVar]);
267
+ }
268
+ }
269
+
270
+ // Check .env file
271
+ if (rootPath) {
272
+ const envPath = path.join(rootPath, '.env');
273
+ if (fs.existsSync(envPath)) {
274
+ try {
275
+ const content = fs.readFileSync(envPath, 'utf-8');
276
+ for (const [envVar, provider] of Object.entries(envKeys)) {
277
+ const match = content.match(new RegExp(`^${envVar}\\s*=\\s*["']?([^"'\\s]+)`, 'm'));
278
+ if (match) return createProvider(provider, match[1]);
279
+ }
280
+ } catch { /* ignore */ }
281
+ }
282
+ }
283
+
284
+ return null;
285
+ }
286
+
287
+ export default { createProvider, autoDetectProvider };