ripp-cli 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,354 @@
1
+ /**
2
+ * RIPP AI Provider Interface
3
+ * Pluggable architecture for AI-assisted intent inference
4
+ */
5
+
6
+ /**
7
+ * Base AI Provider class
8
+ * All providers must implement this interface
9
+ */
10
+ class AIProvider {
11
+ constructor(config) {
12
+ this.config = config;
13
+ }
14
+
15
+ /**
16
+ * Infer intent from evidence pack
17
+ * @param {Object} evidencePack - Evidence pack index
18
+ * @param {Object} options - Additional options
19
+ * @returns {Promise<Object>} Candidate intent with confidence scores
20
+ */
21
+ async inferIntent(evidencePack, options) {
22
+ throw new Error('inferIntent() must be implemented by subclass');
23
+ }
24
+
25
+ /**
26
+ * Validate that the provider is properly configured
27
+ * @returns {boolean} True if configured correctly
28
+ */
29
+ isConfigured() {
30
+ throw new Error('isConfigured() must be implemented by subclass');
31
+ }
32
+ }
33
+
34
+ /**
35
+ * OpenAI Provider
36
+ */
37
+ class OpenAIProvider extends AIProvider {
38
+ constructor(config) {
39
+ super(config);
40
+ this.apiKey = process.env.OPENAI_API_KEY;
41
+ }
42
+
43
+ isConfigured() {
44
+ return !!this.apiKey;
45
+ }
46
+
47
+ async inferIntent(evidencePack, options = {}) {
48
+ if (!this.isConfigured()) {
49
+ throw new Error('OPENAI_API_KEY environment variable is not set');
50
+ }
51
+
52
+ const prompt = this.buildPrompt(evidencePack, options);
53
+ const maxRetries = this.config.maxRetries || 3;
54
+ let lastError = null;
55
+
56
+ for (let attempt = 1; attempt <= maxRetries; attempt++) {
57
+ try {
58
+ const response = await this.makeRequest(prompt, options);
59
+ const candidates = this.parseResponse(response, evidencePack);
60
+
61
+ // Validate structure
62
+ this.validateCandidates(candidates);
63
+
64
+ return candidates;
65
+ } catch (error) {
66
+ lastError = error;
67
+
68
+ if (attempt < maxRetries) {
69
+ // Add feedback to prompt for next attempt
70
+ prompt.feedback = error.message;
71
+ }
72
+ }
73
+ }
74
+
75
+ throw new Error(`Failed to infer intent after ${maxRetries} attempts: ${lastError.message}`);
76
+ }
77
+
78
+ buildPrompt(evidencePack, options) {
79
+ const targetLevel = options.targetLevel || 1;
80
+
81
+ return {
82
+ system: `You are a RIPP (Regenerative Intent Prompting Protocol) intent inference assistant.
83
+
84
+ Your task is to analyze code evidence and infer candidate RIPP sections with confidence scores.
85
+
86
+ CRITICAL RULES:
87
+ 1. Every candidate MUST have:
88
+ - source: "inferred"
89
+ - confidence: 0.0-1.0 (be conservative)
90
+ - evidence: array of {file, line, snippet} references
91
+ - requires_human_confirmation: true
92
+
93
+ 2. NEVER infer:
94
+ - Permissions (mark as "unknown" - security critical)
95
+ - Tenancy (mark as "unknown" - security critical)
96
+ - Audit requirements (mark as "unknown" - compliance critical)
97
+
98
+ 3. Be conservative with confidence:
99
+ - 0.9-1.0: Direct evidence in code
100
+ - 0.7-0.9: Strong patterns
101
+ - 0.5-0.7: Reasonable inference
102
+ - 0.0-0.5: Weak or uncertain
103
+
104
+ 4. Output MUST be valid JSON matching the intent-candidates schema.`,
105
+
106
+ user: `Analyze this evidence pack and generate Level ${targetLevel} RIPP candidate intent:
107
+
108
+ Evidence Summary:
109
+ - Dependencies: ${evidencePack.evidence.dependencies.length}
110
+ - Routes: ${evidencePack.evidence.routes.length}
111
+ - Schemas: ${evidencePack.evidence.schemas.length}
112
+ - Auth Signals: ${evidencePack.evidence.auth.length}
113
+ - Workflows: ${evidencePack.evidence.workflows.length}
114
+
115
+ Evidence Details:
116
+ ${JSON.stringify(evidencePack.evidence, null, 2)}
117
+
118
+ Generate candidates for these sections:
119
+ - purpose (problem, solution, value)
120
+ - ux_flow (user interaction steps)
121
+ - data_contracts (inputs, outputs)
122
+ ${targetLevel >= 2 ? '- api_contracts (endpoints, methods)' : ''}
123
+ ${targetLevel >= 2 ? '- failure_modes (error scenarios)' : ''}
124
+
125
+ Return ONLY valid JSON matching the schema. No markdown, no explanations.`,
126
+
127
+ feedback: null
128
+ };
129
+ }
130
+
131
+ async makeRequest(prompt, options) {
132
+ const model = this.config.model || 'gpt-4o-mini';
133
+ const timeout = this.config.timeout || 30000;
134
+
135
+ const messages = [
136
+ { role: 'system', content: prompt.system },
137
+ { role: 'user', content: prompt.user }
138
+ ];
139
+
140
+ if (prompt.feedback) {
141
+ messages.push({
142
+ role: 'user',
143
+ content: `Previous attempt failed validation: ${prompt.feedback}\nPlease fix and try again.`
144
+ });
145
+ }
146
+
147
+ const requestBody = {
148
+ model,
149
+ messages,
150
+ temperature: 0.3, // Lower temperature for more deterministic output
151
+ response_format: { type: 'json_object' }
152
+ };
153
+
154
+ // Use native fetch (Node 18+) or require node-fetch for older versions
155
+ const controller = new AbortController();
156
+ const timeoutId = setTimeout(() => controller.abort(), timeout);
157
+
158
+ try {
159
+ const response = await fetch('https://api.openai.com/v1/chat/completions', {
160
+ method: 'POST',
161
+ headers: {
162
+ 'Content-Type': 'application/json',
163
+ Authorization: `Bearer ${this.apiKey}`
164
+ },
165
+ body: JSON.stringify(requestBody),
166
+ signal: controller.signal
167
+ });
168
+
169
+ clearTimeout(timeoutId);
170
+
171
+ if (!response.ok) {
172
+ const error = await response.text();
173
+ throw new Error(`OpenAI API error: ${response.status} - ${error}`);
174
+ }
175
+
176
+ const data = await response.json();
177
+ return data.choices[0].message.content;
178
+ } catch (error) {
179
+ clearTimeout(timeoutId);
180
+ if (error.name === 'AbortError') {
181
+ throw new Error('Request timeout');
182
+ }
183
+ throw error;
184
+ }
185
+ }
186
+
187
+ parseResponse(response, evidencePack) {
188
+ try {
189
+ const parsed = JSON.parse(response);
190
+
191
+ // Wrap in standard structure if needed
192
+ if (Array.isArray(parsed)) {
193
+ return {
194
+ version: '1.0',
195
+ created: new Date().toISOString(),
196
+ generatedBy: {
197
+ provider: 'openai',
198
+ model: this.config.model || 'gpt-4o-mini',
199
+ evidencePackHash: this.hashEvidencePack(evidencePack)
200
+ },
201
+ candidates: parsed
202
+ };
203
+ } else if (parsed.candidates) {
204
+ return {
205
+ version: '1.0',
206
+ created: new Date().toISOString(),
207
+ generatedBy: {
208
+ provider: 'openai',
209
+ model: this.config.model || 'gpt-4o-mini',
210
+ evidencePackHash: this.hashEvidencePack(evidencePack)
211
+ },
212
+ ...parsed
213
+ };
214
+ } else {
215
+ return {
216
+ version: '1.0',
217
+ created: new Date().toISOString(),
218
+ generatedBy: {
219
+ provider: 'openai',
220
+ model: this.config.model || 'gpt-4o-mini',
221
+ evidencePackHash: this.hashEvidencePack(evidencePack)
222
+ },
223
+ candidates: [parsed]
224
+ };
225
+ }
226
+ } catch (error) {
227
+ throw new Error(`Failed to parse AI response as JSON: ${error.message}`);
228
+ }
229
+ }
230
+
231
+ hashEvidencePack(evidencePack) {
232
+ const crypto = require('crypto');
233
+ const content = JSON.stringify(evidencePack);
234
+ return crypto.createHash('sha256').update(content).digest('hex');
235
+ }
236
+
237
+ validateCandidates(candidatesData) {
238
+ if (!candidatesData.candidates || !Array.isArray(candidatesData.candidates)) {
239
+ throw new Error('Response must include "candidates" array');
240
+ }
241
+
242
+ for (const candidate of candidatesData.candidates) {
243
+ if (candidate.source !== 'inferred') {
244
+ throw new Error('All candidates must have source: "inferred"');
245
+ }
246
+
247
+ if (
248
+ typeof candidate.confidence !== 'number' ||
249
+ candidate.confidence < 0 ||
250
+ candidate.confidence > 1
251
+ ) {
252
+ throw new Error('All candidates must have confidence between 0.0 and 1.0');
253
+ }
254
+
255
+ if (!Array.isArray(candidate.evidence) || candidate.evidence.length === 0) {
256
+ throw new Error('All candidates must have at least one evidence reference');
257
+ }
258
+
259
+ if (candidate.requires_human_confirmation !== true) {
260
+ throw new Error('All candidates must have requires_human_confirmation: true');
261
+ }
262
+
263
+ // Validate evidence references
264
+ for (const ev of candidate.evidence) {
265
+ if (!ev.file || typeof ev.line !== 'number') {
266
+ throw new Error('Evidence must have file and line number');
267
+ }
268
+ }
269
+ }
270
+ }
271
+ }
272
+
273
+ /**
274
+ * Azure OpenAI Provider
275
+ */
276
+ class AzureOpenAIProvider extends OpenAIProvider {
277
+ constructor(config) {
278
+ super(config);
279
+ this.apiKey = process.env.AZURE_OPENAI_API_KEY;
280
+ this.endpoint = process.env.AZURE_OPENAI_ENDPOINT;
281
+ }
282
+
283
+ isConfigured() {
284
+ return !!this.apiKey && !!this.endpoint;
285
+ }
286
+
287
+ async makeRequest(prompt, options) {
288
+ // Similar to OpenAI but with Azure-specific endpoint
289
+ throw new Error('Azure OpenAI provider not yet implemented');
290
+ }
291
+ }
292
+
293
+ /**
294
+ * Ollama Provider (local)
295
+ */
296
+ class OllamaProvider extends AIProvider {
297
+ constructor(config) {
298
+ super(config);
299
+ this.endpoint = config.customEndpoint || 'http://localhost:11434';
300
+ }
301
+
302
+ isConfigured() {
303
+ return !!this.endpoint;
304
+ }
305
+
306
+ async inferIntent(evidencePack, options = {}) {
307
+ throw new Error('Ollama provider not yet implemented');
308
+ }
309
+ }
310
+
311
+ /**
312
+ * Custom Provider
313
+ */
314
+ class CustomProvider extends AIProvider {
315
+ constructor(config) {
316
+ super(config);
317
+ this.endpoint = config.customEndpoint;
318
+ }
319
+
320
+ isConfigured() {
321
+ return !!this.endpoint;
322
+ }
323
+
324
+ async inferIntent(evidencePack, options = {}) {
325
+ throw new Error('Custom provider not yet implemented');
326
+ }
327
+ }
328
+
329
+ /**
330
+ * Provider Factory
331
+ */
332
+ function createProvider(config) {
333
+ switch (config.provider) {
334
+ case 'openai':
335
+ return new OpenAIProvider(config);
336
+ case 'azure-openai':
337
+ return new AzureOpenAIProvider(config);
338
+ case 'ollama':
339
+ return new OllamaProvider(config);
340
+ case 'custom':
341
+ return new CustomProvider(config);
342
+ default:
343
+ throw new Error(`Unknown AI provider: ${config.provider}`);
344
+ }
345
+ }
346
+
347
+ module.exports = {
348
+ AIProvider,
349
+ OpenAIProvider,
350
+ AzureOpenAIProvider,
351
+ OllamaProvider,
352
+ CustomProvider,
353
+ createProvider
354
+ };