s9n-devops-agent 2.0.10 → 2.0.11-dev.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,497 @@
1
+ #!/usr/bin/env node
2
+
3
+ /**
4
+ * ============================================================================
5
+ * LLM-POWERED CONTRACT ANALYSIS SCRIPT
6
+ * ============================================================================
7
+ *
8
+ * This script uses Groq LLM to perform intelligent analysis of code and
9
+ * generate human-readable contract documentation.
10
+ *
11
+ * Usage:
12
+ * node scripts/contract-automation/analyze-with-llm.js --scan-results=path/to/results.json
13
+ * node scripts/contract-automation/analyze-with-llm.js --analyze-file=src/features/auth/index.js
14
+ * node scripts/contract-automation/analyze-with-llm.js --validate-contracts
15
+ *
16
+ * Options:
17
+ * --scan-results=<path> Use scan results from generate-contracts.js
18
+ * --analyze-file=<path> Analyze specific file
19
+ * --validate-contracts Validate existing contracts for completeness
20
+ * --model=<name> Groq model to use (default: llama-3.1-70b-versatile)
21
+ * --verbose Detailed logging
22
+ *
23
+ * Environment Variables:
24
+ * OPENAI_API_KEY Required - Groq API key (OpenAI-compatible endpoint)
25
+ *
26
+ * ============================================================================
27
+ */
28
+
29
+ import fs from 'fs';
30
+ import path from 'path';
31
+ import Groq from 'groq-sdk';
32
+ import { fileURLToPath } from 'url';
33
+ import { dirname } from 'path';
34
+ import { credentialsManager } from '../../src/credentials-manager.js';
35
+
36
+ const __filename = fileURLToPath(import.meta.url);
37
+ const __dirname = dirname(__filename);
38
+
39
+ // Attempt to load credentials manager for local key injection
40
+ credentialsManager.injectEnv();
41
+
42
+ // Configuration
43
+ const CONFIG = {
44
+ rootDir: process.cwd(),
45
+ contractsDir: path.join(process.cwd(), 'House_Rules_Contracts'),
46
+ verbose: process.argv.includes('--verbose'),
47
+ scanResults: getArgValue('--scan-results'),
48
+ analyzeFile: getArgValue('--analyze-file'),
49
+ validateContracts: process.argv.includes('--validate-contracts'),
50
+ model: getArgValue('--model') || 'llama-3.1-70b-versatile'
51
+ };
52
+
53
+ // Initialize Groq client
54
+ const groq = new Groq({
55
+ apiKey: process.env.GROQ_API_KEY || process.env.OPENAI_API_KEY
56
+ });
57
+
58
+ // Helper: Get command line argument value
59
+ function getArgValue(argName) {
60
+ const arg = process.argv.find(a => a.startsWith(argName + '='));
61
+ return arg ? arg.split('=')[1] : null;
62
+ }
63
+
64
+ // Helper: Log with optional verbose mode
65
+ function log(message, level = 'info') {
66
+ const prefix = {
67
+ info: '[INFO]',
68
+ warn: '[WARN]',
69
+ error: '[ERROR]',
70
+ success: '[SUCCESS]',
71
+ debug: '[DEBUG]'
72
+ }[level];
73
+
74
+ if (level === 'debug' && !CONFIG.verbose) return;
75
+
76
+ console.log(`${prefix} ${message}`);
77
+ }
78
+
79
+ // Helper: Read file safely
80
+ function readFileSafe(filePath) {
81
+ try {
82
+ return fs.readFileSync(filePath, 'utf8');
83
+ } catch (error) {
84
+ log(`Failed to read ${filePath}: ${error.message}`, 'warn');
85
+ return '';
86
+ }
87
+ }
88
+
89
+ // Helper: Write JSON file
90
+ function writeJSON(filePath, data) {
91
+ fs.writeFileSync(filePath, JSON.stringify(data, null, 2), 'utf8');
92
+ }
93
+
94
+ // ============================================================================
95
+ // LLM INTERACTION
96
+ // ============================================================================
97
+
98
+ async function callLLM(prompt, systemPrompt = 'You are a helpful assistant.') {
99
+ try {
100
+ log(`Calling LLM (${CONFIG.model})...`, 'debug');
101
+
102
+ const response = await groq.chat.completions.create({
103
+ model: CONFIG.model,
104
+ messages: [
105
+ { role: 'system', content: systemPrompt },
106
+ { role: 'user', content: prompt }
107
+ ],
108
+ temperature: 0.3, // Lower temperature for more deterministic output
109
+ max_tokens: 4000
110
+ });
111
+
112
+ return response.choices[0].message.content;
113
+ } catch (error) {
114
+ log(`LLM call failed: ${error.message}`, 'error');
115
+ throw error;
116
+ }
117
+ }
118
+
119
+ // ============================================================================
120
+ // FEATURE ANALYSIS
121
+ // ============================================================================
122
+
123
+ async function analyzeFeatures(features) {
124
+ log('Analyzing features with LLM...');
125
+
126
+ const systemPrompt = `You are a software architect analyzing a codebase.
127
+ Your task is to analyze feature information and provide detailed documentation including:
128
+ - Clear feature descriptions
129
+ - User stories
130
+ - Acceptance criteria
131
+ - Dependencies
132
+ - Priority assessment
133
+
134
+ Output should be in JSON format.`;
135
+
136
+ const prompt = `Analyze these features and provide detailed documentation for each:
137
+
138
+ ${JSON.stringify(features, null, 2)}
139
+
140
+ For each feature, provide:
141
+ 1. A clear description (2-3 sentences)
142
+ 2. A user story in format: "As a [user type], I want to [action] so that [benefit]"
143
+ 3. 3-5 acceptance criteria
144
+ 4. Likely dependencies on other features
145
+ 5. Priority assessment (critical/high/medium/low) with reasoning
146
+
147
+ Return as JSON array with same structure plus new fields: description, userStory, acceptanceCriteria, dependencies, priorityReasoning.`;
148
+
149
+ const result = await callLLM(prompt, systemPrompt);
150
+
151
+ try {
152
+ return JSON.parse(result);
153
+ } catch (error) {
154
+ log('Failed to parse LLM response as JSON, returning raw text', 'warn');
155
+ return { raw: result };
156
+ }
157
+ }
158
+
159
+ // ============================================================================
160
+ // API ENDPOINT ANALYSIS
161
+ // ============================================================================
162
+
163
+ async function analyzeAPIEndpoints(endpoints) {
164
+ log('Analyzing API endpoints with LLM...');
165
+
166
+ const systemPrompt = `You are an API documentation expert.
167
+ Analyze API endpoints and provide comprehensive documentation including:
168
+ - Endpoint purpose and description
169
+ - Expected request/response formats
170
+ - Authentication requirements
171
+ - Error scenarios
172
+ - Usage examples
173
+
174
+ Output should be in JSON format.`;
175
+
176
+ const prompt = `Analyze these API endpoints and provide detailed documentation:
177
+
178
+ ${JSON.stringify(endpoints.slice(0, 20), null, 2)}
179
+
180
+ For each endpoint, infer and provide:
181
+ 1. Purpose and description
182
+ 2. Likely authentication requirement (yes/no)
183
+ 3. Expected request parameters (path, query, body)
184
+ 4. Expected response format
185
+ 5. Common error scenarios
186
+ 6. Usage example (curl command)
187
+
188
+ Return as JSON array with enhanced documentation.`;
189
+
190
+ const result = await callLLM(prompt, systemPrompt);
191
+
192
+ try {
193
+ return JSON.parse(result);
194
+ } catch (error) {
195
+ log('Failed to parse LLM response as JSON, returning raw text', 'warn');
196
+ return { raw: result };
197
+ }
198
+ }
199
+
200
+ // ============================================================================
201
+ // SQL QUERY ANALYSIS
202
+ // ============================================================================
203
+
204
+ async function analyzeSQLQueries(queries) {
205
+ log('Analyzing SQL queries with LLM...');
206
+
207
+ const systemPrompt = `You are a database expert analyzing SQL queries.
208
+ Provide insights on:
209
+ - Query purpose and description
210
+ - Performance considerations
211
+ - Security concerns
212
+ - Suggested optimizations
213
+ - Parameter types and validation
214
+
215
+ Output should be in JSON format.`;
216
+
217
+ const queryList = Object.values(queries).slice(0, 10);
218
+
219
+ const prompt = `Analyze these SQL queries and provide documentation:
220
+
221
+ ${JSON.stringify(queryList, null, 2)}
222
+
223
+ For each query, provide:
224
+ 1. Clear description of what it does
225
+ 2. Performance notes (indexes needed, complexity)
226
+ 3. Security notes (SQL injection risks, data sensitivity)
227
+ 4. Parameter types and validation rules
228
+ 5. Suggested optimizations if any
229
+
230
+ Return as JSON array with enhanced documentation.`;
231
+
232
+ const result = await callLLM(prompt, systemPrompt);
233
+
234
+ try {
235
+ return JSON.parse(result);
236
+ } catch (error) {
237
+ log('Failed to parse LLM response as JSON, returning raw text', 'warn');
238
+ return { raw: result };
239
+ }
240
+ }
241
+
242
+ // ============================================================================
243
+ // THIRD-PARTY INTEGRATION ANALYSIS
244
+ // ============================================================================
245
+
246
+ async function analyzeIntegrations(integrations) {
247
+ log('Analyzing third-party integrations with LLM...');
248
+
249
+ const systemPrompt = `You are a software integration expert.
250
+ Analyze third-party service integrations and provide:
251
+ - Service purpose and use cases
252
+ - Best practices for integration
253
+ - Error handling strategies
254
+ - Security considerations
255
+ - Cost optimization tips
256
+
257
+ Output should be in JSON format.`;
258
+
259
+ const prompt = `Analyze these third-party integrations and provide documentation:
260
+
261
+ ${JSON.stringify(integrations, null, 2)}
262
+
263
+ For each integration, provide:
264
+ 1. Detailed purpose and use cases
265
+ 2. Recommended error handling strategy
266
+ 3. Security best practices
267
+ 4. Rate limiting considerations
268
+ 5. Cost optimization suggestions
269
+ 6. Alternative services (if applicable)
270
+
271
+ Return as JSON array with enhanced documentation.`;
272
+
273
+ const result = await callLLM(prompt, systemPrompt);
274
+
275
+ try {
276
+ return JSON.parse(result);
277
+ } catch (error) {
278
+ log('Failed to parse LLM response as JSON, returning raw text', 'warn');
279
+ return { raw: result };
280
+ }
281
+ }
282
+
283
+ // ============================================================================
284
+ // FILE ANALYSIS
285
+ // ============================================================================
286
+
287
+ async function analyzeFile(filePath) {
288
+ log(`Analyzing file: ${filePath}`);
289
+
290
+ const content = readFileSafe(filePath);
291
+ if (!content) {
292
+ throw new Error(`Could not read file: ${filePath}`);
293
+ }
294
+
295
+ const systemPrompt = `You are a code analysis expert.
296
+ Analyze the provided code file and extract:
297
+ - Purpose and functionality
298
+ - API endpoints (if any)
299
+ - Database queries (if any)
300
+ - Third-party integrations (if any)
301
+ - Environment variables used
302
+ - Dependencies on other modules
303
+ - Security considerations
304
+ - Suggested improvements
305
+
306
+ Provide structured analysis in JSON format.`;
307
+
308
+ const prompt = `Analyze this code file and provide comprehensive documentation:
309
+
310
+ File: ${filePath}
311
+
312
+ \`\`\`
313
+ ${content.slice(0, 10000)} ${content.length > 10000 ? '... (truncated)' : ''}
314
+ \`\`\`
315
+
316
+ Provide:
317
+ 1. Module purpose and description
318
+ 2. Exported functions/classes with descriptions
319
+ 3. API endpoints defined (method, path, purpose)
320
+ 4. Database queries used (with purpose)
321
+ 5. Third-party services integrated
322
+ 6. Environment variables required
323
+ 7. Dependencies on other modules
324
+ 8. Security considerations
325
+ 9. Suggested improvements or concerns
326
+
327
+ Return as structured JSON.`;
328
+
329
+ const result = await callLLM(prompt, systemPrompt);
330
+
331
+ try {
332
+ return JSON.parse(result);
333
+ } catch (error) {
334
+ log('Failed to parse LLM response as JSON, returning raw text', 'warn');
335
+ return { raw: result };
336
+ }
337
+ }
338
+
339
+ // ============================================================================
340
+ // CONTRACT VALIDATION
341
+ // ============================================================================
342
+
343
+ async function validateContracts() {
344
+ log('Validating existing contracts with LLM...');
345
+
346
+ const contracts = {
347
+ features: path.join(CONFIG.contractsDir, 'FEATURES_CONTRACT.md'),
348
+ api: path.join(CONFIG.contractsDir, 'API_CONTRACT.md'),
349
+ database: path.join(CONFIG.contractsDir, 'DATABASE_SCHEMA_CONTRACT.md'),
350
+ sql: path.join(CONFIG.contractsDir, 'SQL_CONTRACT.json'),
351
+ integrations: path.join(CONFIG.contractsDir, 'THIRD_PARTY_INTEGRATIONS.md'),
352
+ infra: path.join(CONFIG.contractsDir, 'INFRA_CONTRACT.md')
353
+ };
354
+
355
+ const validation = {};
356
+
357
+ for (const [name, filePath] of Object.entries(contracts)) {
358
+ if (!fs.existsSync(filePath)) {
359
+ validation[name] = {
360
+ exists: false,
361
+ complete: false,
362
+ issues: ['Contract file does not exist']
363
+ };
364
+ continue;
365
+ }
366
+
367
+ const content = readFileSafe(filePath);
368
+
369
+ const systemPrompt = `You are a documentation quality expert.
370
+ Analyze contract documentation and identify:
371
+ - Completeness (are all sections filled?)
372
+ - Missing information
373
+ - Inconsistencies
374
+ - Suggestions for improvement
375
+
376
+ Output should be in JSON format.`;
377
+
378
+ const prompt = `Analyze this contract file for completeness and quality:
379
+
380
+ File: ${name}
381
+
382
+ \`\`\`
383
+ ${content.slice(0, 5000)} ${content.length > 5000 ? '... (truncated)' : ''}
384
+ \`\`\`
385
+
386
+ Provide:
387
+ 1. Is the contract complete? (true/false)
388
+ 2. Completion percentage (0-100)
389
+ 3. List of missing sections or information
390
+ 4. List of inconsistencies or errors
391
+ 5. Suggestions for improvement
392
+
393
+ Return as JSON with fields: complete, completionPercentage, missingSections, inconsistencies, suggestions.`;
394
+
395
+ const result = await callLLM(prompt, systemPrompt);
396
+
397
+ try {
398
+ validation[name] = JSON.parse(result);
399
+ validation[name].exists = true;
400
+ } catch (error) {
401
+ validation[name] = {
402
+ exists: true,
403
+ complete: false,
404
+ error: 'Failed to parse validation result',
405
+ raw: result
406
+ };
407
+ }
408
+ }
409
+
410
+ return validation;
411
+ }
412
+
413
+ // ============================================================================
414
+ // MAIN EXECUTION
415
+ // ============================================================================
416
+
417
+ async function main() {
418
+ log('='.repeat(80));
419
+ log('LLM-POWERED CONTRACT ANALYSIS');
420
+ log('='.repeat(80));
421
+
422
+ // Check API key
423
+ if (!process.env.GROQ_API_KEY && !process.env.OPENAI_API_KEY) {
424
+ throw new Error('GROQ_API_KEY environment variable is required. Please set it or run "s9n-devops-agent setup" to configure your Groq API key.');
425
+ }
426
+
427
+ let results = {};
428
+
429
+ // Analyze scan results
430
+ if (CONFIG.scanResults) {
431
+ log(`Loading scan results from: ${CONFIG.scanResults}`);
432
+ const scanData = JSON.parse(readFileSafe(CONFIG.scanResults));
433
+
434
+ if (scanData.results.features) {
435
+ results.features = await analyzeFeatures(scanData.results.features);
436
+ }
437
+
438
+ if (scanData.results.api) {
439
+ results.api = await analyzeAPIEndpoints(scanData.results.api);
440
+ }
441
+
442
+ if (scanData.results.sql) {
443
+ results.sql = await analyzeSQLQueries(scanData.results.sql);
444
+ }
445
+
446
+ if (scanData.results.integrations) {
447
+ results.integrations = await analyzeIntegrations(scanData.results.integrations);
448
+ }
449
+ }
450
+
451
+ // Analyze specific file
452
+ if (CONFIG.analyzeFile) {
453
+ results.fileAnalysis = await analyzeFile(CONFIG.analyzeFile);
454
+ }
455
+
456
+ // Validate contracts
457
+ if (CONFIG.validateContracts) {
458
+ results.validation = await validateContracts();
459
+ }
460
+
461
+ // Save results
462
+ const outputPath = path.join(CONFIG.contractsDir, 'llm-analysis-results.json');
463
+ writeJSON(outputPath, {
464
+ generated: new Date().toISOString(),
465
+ model: CONFIG.model,
466
+ results
467
+ });
468
+
469
+ log(`Results saved to: ${outputPath}`, 'success');
470
+
471
+ // Summary
472
+ log('='.repeat(80));
473
+ log('ANALYSIS COMPLETE', 'success');
474
+ log('='.repeat(80));
475
+
476
+ if (results.validation) {
477
+ log('Contract Validation Results:');
478
+ for (const [name, result] of Object.entries(results.validation)) {
479
+ if (result.exists) {
480
+ const status = result.complete ? '✅' : '⚠️';
481
+ const pct = result.completionPercentage || 0;
482
+ log(` ${status} ${name}: ${pct}% complete`);
483
+ } else {
484
+ log(` ❌ ${name}: Does not exist`);
485
+ }
486
+ }
487
+ }
488
+
489
+ log('='.repeat(80));
490
+ }
491
+
492
+ // Run
493
+ main().catch(error => {
494
+ log(`Fatal error: ${error.message}`, 'error');
495
+ console.error(error);
496
+ process.exit(1);
497
+ });