cadr-cli 0.0.1 → 1.9.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (121) hide show
  1. package/dist/adr.d.ts +50 -0
  2. package/dist/adr.d.ts.map +1 -0
  3. package/dist/adr.js +156 -0
  4. package/dist/adr.js.map +1 -0
  5. package/dist/adr.test.d.ts +8 -0
  6. package/dist/adr.test.d.ts.map +1 -0
  7. package/dist/adr.test.js +256 -0
  8. package/dist/adr.test.js.map +1 -0
  9. package/dist/analysis.d.ts +24 -0
  10. package/dist/analysis.d.ts.map +1 -0
  11. package/dist/analysis.js +281 -0
  12. package/dist/analysis.js.map +1 -0
  13. package/dist/analysis.test.d.ts +8 -0
  14. package/dist/analysis.test.d.ts.map +1 -0
  15. package/dist/analysis.test.js +351 -0
  16. package/dist/analysis.test.js.map +1 -0
  17. package/dist/commands/analyze.d.ts +14 -0
  18. package/dist/commands/analyze.d.ts.map +1 -0
  19. package/dist/commands/analyze.js +56 -0
  20. package/dist/commands/analyze.js.map +1 -0
  21. package/dist/commands/init.d.ts +12 -0
  22. package/dist/commands/init.d.ts.map +1 -0
  23. package/dist/commands/init.js +93 -0
  24. package/dist/commands/init.js.map +1 -0
  25. package/dist/commands/init.test.d.ts +2 -0
  26. package/dist/commands/init.test.d.ts.map +1 -0
  27. package/dist/commands/init.test.js +56 -0
  28. package/dist/commands/init.test.js.map +1 -0
  29. package/dist/config.d.ts +40 -0
  30. package/dist/config.d.ts.map +1 -0
  31. package/dist/config.js +208 -0
  32. package/dist/config.js.map +1 -0
  33. package/dist/config.test.d.ts +2 -0
  34. package/dist/config.test.d.ts.map +1 -0
  35. package/dist/config.test.js +97 -0
  36. package/dist/config.test.js.map +1 -0
  37. package/dist/git.d.ts +42 -0
  38. package/dist/git.d.ts.map +1 -1
  39. package/dist/git.js +157 -0
  40. package/dist/git.js.map +1 -1
  41. package/dist/index.d.ts +2 -3
  42. package/dist/index.d.ts.map +1 -1
  43. package/dist/index.js +78 -62
  44. package/dist/index.js.map +1 -1
  45. package/dist/index.test.d.ts +2 -0
  46. package/dist/index.test.d.ts.map +1 -0
  47. package/dist/index.test.js +51 -0
  48. package/dist/index.test.js.map +1 -0
  49. package/dist/llm.d.ts +73 -0
  50. package/dist/llm.d.ts.map +1 -0
  51. package/dist/llm.js +264 -0
  52. package/dist/llm.js.map +1 -0
  53. package/dist/llm.test.d.ts +2 -0
  54. package/dist/llm.test.d.ts.map +1 -0
  55. package/dist/llm.test.js +592 -0
  56. package/dist/llm.test.js.map +1 -0
  57. package/dist/logger.d.ts.map +1 -1
  58. package/dist/logger.js +5 -3
  59. package/dist/logger.js.map +1 -1
  60. package/dist/logger.test.d.ts +2 -0
  61. package/dist/logger.test.d.ts.map +1 -0
  62. package/dist/logger.test.js +78 -0
  63. package/dist/logger.test.js.map +1 -0
  64. package/dist/prompts.d.ts +49 -0
  65. package/dist/prompts.d.ts.map +1 -0
  66. package/dist/prompts.js +195 -0
  67. package/dist/prompts.js.map +1 -0
  68. package/dist/prompts.test.d.ts +2 -0
  69. package/dist/prompts.test.d.ts.map +1 -0
  70. package/dist/prompts.test.js +427 -0
  71. package/dist/prompts.test.js.map +1 -0
  72. package/dist/providers/gemini.d.ts +3 -0
  73. package/dist/providers/gemini.d.ts.map +1 -0
  74. package/dist/providers/gemini.js +38 -0
  75. package/dist/providers/gemini.js.map +1 -0
  76. package/dist/providers/index.d.ts +2 -0
  77. package/dist/providers/index.d.ts.map +1 -0
  78. package/dist/providers/index.js +6 -0
  79. package/dist/providers/index.js.map +1 -0
  80. package/dist/providers/openai.d.ts +3 -0
  81. package/dist/providers/openai.d.ts.map +1 -0
  82. package/dist/providers/openai.js +24 -0
  83. package/dist/providers/openai.js.map +1 -0
  84. package/dist/providers/registry.d.ts +4 -0
  85. package/dist/providers/registry.d.ts.map +1 -0
  86. package/dist/providers/registry.js +16 -0
  87. package/dist/providers/registry.js.map +1 -0
  88. package/dist/providers/types.d.ts +11 -0
  89. package/dist/providers/types.d.ts.map +1 -0
  90. package/dist/providers/types.js +3 -0
  91. package/dist/providers/types.js.map +1 -0
  92. package/dist/version.test.d.ts +3 -0
  93. package/dist/version.test.d.ts.map +1 -0
  94. package/dist/version.test.js +25 -0
  95. package/dist/version.test.js.map +1 -0
  96. package/package.json +14 -5
  97. package/src/adr.test.ts +278 -0
  98. package/src/adr.ts +136 -0
  99. package/src/analysis.test.ts +396 -0
  100. package/src/analysis.ts +262 -0
  101. package/src/commands/analyze.ts +56 -0
  102. package/src/commands/init.test.ts +27 -0
  103. package/src/commands/init.ts +99 -0
  104. package/src/config.test.ts +79 -0
  105. package/src/config.ts +214 -0
  106. package/src/git.ts +240 -0
  107. package/src/index.test.ts +59 -0
  108. package/src/index.ts +80 -60
  109. package/src/llm.test.ts +701 -0
  110. package/src/llm.ts +345 -0
  111. package/src/logger.test.ts +90 -0
  112. package/src/logger.ts +6 -3
  113. package/src/prompts.test.ts +515 -0
  114. package/src/prompts.ts +174 -0
  115. package/src/providers/gemini.ts +41 -0
  116. package/src/providers/index.ts +1 -0
  117. package/src/providers/openai.ts +22 -0
  118. package/src/providers/registry.ts +16 -0
  119. package/src/providers/types.ts +12 -0
  120. package/src/version.test.ts +29 -0
  121. package/bin/cadr.js +0 -16
package/src/llm.ts ADDED
@@ -0,0 +1,345 @@
1
+ /**
2
+ * LLM Client Module
3
+ *
4
+ * Provider-based wrapper for analyzing code changes.
5
+ * Implements fail-open error handling per constitution requirements.
6
+ */
7
+
8
+ import { getProvider } from './providers';
9
+ import { AnalysisConfig } from './config';
10
+ import { loggerInstance as logger } from './logger';
11
+
12
+ /**
13
+ * Analysis request data structure
14
+ */
15
+ export interface AnalysisRequest {
16
+ file_paths: string[];
17
+ diff_content: string;
18
+ repository_context: string;
19
+ analysis_prompt: string;
20
+ }
21
+
22
+ /**
23
+ * Rough token estimation (1 token ≈ 4 characters for English text)
24
+ * This is a conservative estimate
25
+ */
26
+ function estimateTokens(text: string): number {
27
+ return Math.ceil(text.length / 4);
28
+ }
29
+
30
+ /**
31
+ * Analysis result from LLM
32
+ */
33
+ export interface AnalysisResult {
34
+ is_significant: boolean;
35
+ reason: string;
36
+ confidence?: number;
37
+ timestamp: string;
38
+ }
39
+
40
+ /**
41
+ * Analysis response including potential errors
42
+ */
43
+ export interface AnalysisResponse {
44
+ result: AnalysisResult | null;
45
+ error?: string;
46
+ }
47
+
48
+ /**
49
+ * Analyze staged changes using OpenAI LLM
50
+ *
51
+ * @param config - Analysis configuration with API settings
52
+ * @param request - Analysis request with code changes
53
+ * @returns Promise resolving to analysis response with result or error
54
+ */
55
+ export async function analyzeChanges(
56
+ config: AnalysisConfig,
57
+ request: AnalysisRequest
58
+ ): Promise<AnalysisResponse> {
59
+ try {
60
+ // Check if API key is available
61
+ const apiKey = process.env[config.api_key_env];
62
+ if (!apiKey) {
63
+ logger.warn('API key not found in environment', {
64
+ api_key_env: config.api_key_env,
65
+ });
66
+ return {
67
+ result: null,
68
+ error: `API key not found: ${config.api_key_env} environment variable is not set`
69
+ };
70
+ }
71
+
72
+ // Estimate tokens for logging and validation
73
+ const estimatedTokens = estimateTokens(request.analysis_prompt);
74
+
75
+ logger.info('Sending analysis request to LLM', {
76
+ provider: config.provider,
77
+ model: config.analysis_model,
78
+ file_count: request.file_paths.length,
79
+ estimated_tokens: estimatedTokens,
80
+ });
81
+
82
+ // Warn if token estimate is high (most models have 8k-32k limits)
83
+ if (estimatedTokens > 7000) {
84
+ logger.warn('High token count detected', {
85
+ estimated_tokens: estimatedTokens,
86
+ provider: config.provider,
87
+ model: config.analysis_model,
88
+ });
89
+ }
90
+
91
+ const provider = getProvider(config.provider);
92
+ const responseContent = await provider.analyze(request.analysis_prompt, {
93
+ apiKey,
94
+ model: config.analysis_model,
95
+ timeoutMs: config.timeout_seconds * 1000,
96
+ });
97
+
98
+ if (!responseContent) {
99
+ logger.warn('No response content from LLM', { provider: config.provider });
100
+ return {
101
+ result: null,
102
+ error: 'No response content from LLM'
103
+ };
104
+ }
105
+
106
+ // Parse JSON response - handle markdown-wrapped JSON
107
+ let parsedResponse: { is_significant: boolean; reason: string; confidence?: number };
108
+ try {
109
+ // Try to extract JSON from markdown code blocks if present
110
+ let jsonContent = responseContent.trim();
111
+
112
+ // Remove markdown code block if present: ```json ... ``` or ``` ... ```
113
+ const codeBlockMatch = jsonContent.match(/```(?:json)?\s*\n?([\s\S]*?)\n?```/);
114
+ if (codeBlockMatch) {
115
+ jsonContent = codeBlockMatch[1].trim();
116
+ }
117
+
118
+ // Try to find JSON object if there's surrounding text
119
+ const jsonMatch = jsonContent.match(/\{[\s\S]*\}/);
120
+ if (jsonMatch) {
121
+ jsonContent = jsonMatch[0];
122
+ }
123
+
124
+ parsedResponse = JSON.parse(jsonContent);
125
+ } catch (parseError) {
126
+ logger.warn('Failed to parse LLM response as JSON', {
127
+ error: parseError,
128
+ response: responseContent,
129
+ });
130
+ return {
131
+ result: null,
132
+ error: `Failed to parse LLM response as JSON. Response was:\n${responseContent.substring(0, 200)}...`
133
+ };
134
+ }
135
+
136
+ // Validate response format
137
+ if (
138
+ typeof parsedResponse.is_significant !== 'boolean' ||
139
+ typeof parsedResponse.reason !== 'string'
140
+ ) {
141
+ logger.warn('Invalid response format from LLM', {
142
+ response: parsedResponse,
143
+ });
144
+ return {
145
+ result: null,
146
+ error: `Invalid response format from LLM. Expected {is_significant: boolean, reason: string}, got: ${JSON.stringify(parsedResponse).substring(0, 150)}...`
147
+ };
148
+ }
149
+
150
+ // Reason is required when is_significant is true, but can be empty when false
151
+ if (parsedResponse.is_significant && !parsedResponse.reason) {
152
+ logger.warn('Missing reason for significant change', {
153
+ response: parsedResponse,
154
+ });
155
+ return {
156
+ result: null,
157
+ error: 'LLM indicated significant change but provided no reason'
158
+ };
159
+ }
160
+
161
+ // Build result with timestamp
162
+ const result: AnalysisResult = {
163
+ is_significant: parsedResponse.is_significant,
164
+ reason: parsedResponse.reason,
165
+ timestamp: new Date().toISOString(),
166
+ };
167
+
168
+ // Include confidence if provided
169
+ if (
170
+ typeof parsedResponse.confidence === 'number' &&
171
+ parsedResponse.confidence >= 0 &&
172
+ parsedResponse.confidence <= 1
173
+ ) {
174
+ result.confidence = parsedResponse.confidence;
175
+ }
176
+
177
+ logger.info('Analysis completed successfully', {
178
+ is_significant: result.is_significant,
179
+ has_confidence: result.confidence !== undefined,
180
+ });
181
+
182
+ return { result, error: undefined };
183
+ } catch (error) {
184
+ // Fail-open: log error and return descriptive error message
185
+ const errorObj = error as { status?: number; code?: string; message?: string };
186
+ let errorMessage: string;
187
+
188
+ // Check for specific error types and provide helpful messages
189
+ if (errorObj.status === 401) {
190
+ errorMessage = 'Invalid API key - please check your API key configuration';
191
+ logger.warn('LLM API authentication failed', { error: errorObj });
192
+ } else if (errorObj.status === 400 && errorObj.message?.includes('maximum context length')) {
193
+ // Extract token counts from error message if available
194
+ const tokenMatch = errorObj.message.match(/(\d+)\s+tokens/g);
195
+ errorMessage = 'Diff too large for model context window. Try:\n' +
196
+ ' • Stage fewer files at once\n' +
197
+ ' • Use gpt-4-turbo (128k context) in cadr.yaml:\n' +
198
+ ' analysis_model: gpt-4-turbo-preview\n' +
199
+ ' • Add ignore patterns to filter large files';
200
+ logger.warn('LLM context length exceeded', {
201
+ error: errorObj,
202
+ tokens: tokenMatch,
203
+ });
204
+ } else if (errorObj.status === 429) {
205
+ errorMessage = 'Rate limit exceeded - please try again later or check your API quota';
206
+ logger.warn('LLM API rate limit exceeded', { error: errorObj });
207
+ } else if (errorObj.code === 'ETIMEDOUT' || errorObj.message?.includes('timeout')) {
208
+ errorMessage = `Request timeout (${config.timeout_seconds}s) - the LLM took too long to respond`;
209
+ logger.warn('LLM API request timeout', { error: errorObj });
210
+ } else if (errorObj.code === 'ENOTFOUND' || errorObj.message?.includes('ENOTFOUND')) {
211
+ errorMessage = 'Network error - unable to reach LLM API (check internet connection)';
212
+ logger.warn('LLM API network error', { error: errorObj });
213
+ } else {
214
+ errorMessage = `API error: ${errorObj.message || 'Unknown error occurred'}`;
215
+ logger.warn('LLM API request failed', { error: errorObj });
216
+ }
217
+
218
+ return { result: null, error: errorMessage };
219
+ }
220
+ }
221
+
222
+ /**
223
+ * Generation request data structure
224
+ */
225
+ export interface GenerationRequest {
226
+ file_paths: string[];
227
+ diff_content: string;
228
+ reason: string;
229
+ generation_prompt: string;
230
+ }
231
+
232
+ /**
233
+ * Generation result from LLM
234
+ */
235
+ export interface GenerationResult {
236
+ content: string;
237
+ title: string;
238
+ timestamp: string;
239
+ }
240
+
241
+ /**
242
+ * Generation response including potential errors
243
+ */
244
+ export interface GenerationResponse {
245
+ result: GenerationResult | null;
246
+ error?: string;
247
+ }
248
+
249
+ /**
250
+ * Generate ADR content using LLM
251
+ *
252
+ * @param config - Analysis configuration with API settings
253
+ * @param request - Generation request with code changes
254
+ * @returns Promise resolving to generation response with result or error
255
+ */
256
+ export async function generateADRContent(
257
+ config: AnalysisConfig,
258
+ request: GenerationRequest
259
+ ): Promise<GenerationResponse> {
260
+ try {
261
+ // Check if API key is available
262
+ const apiKey = process.env[config.api_key_env];
263
+ if (!apiKey) {
264
+ logger.warn('API key not found in environment for generation', {
265
+ api_key_env: config.api_key_env,
266
+ });
267
+ return {
268
+ result: null,
269
+ error: `API key not found: ${config.api_key_env} environment variable is not set`
270
+ };
271
+ }
272
+
273
+ logger.info('Sending generation request to LLM', {
274
+ provider: config.provider,
275
+ model: config.analysis_model,
276
+ file_count: request.file_paths.length,
277
+ });
278
+
279
+ const provider = getProvider(config.provider);
280
+ const responseContent = await provider.analyze(request.generation_prompt, {
281
+ apiKey,
282
+ model: config.analysis_model, // Using same model per user request
283
+ timeoutMs: config.timeout_seconds * 1000,
284
+ });
285
+
286
+ if (!responseContent) {
287
+ logger.warn('No response content from LLM for generation', {
288
+ provider: config.provider
289
+ });
290
+ return {
291
+ result: null,
292
+ error: 'No response content from LLM'
293
+ };
294
+ }
295
+
296
+ // Clean up the response - remove markdown code fences if LLM added them
297
+ let cleanedContent = responseContent.trim();
298
+
299
+ // Remove markdown code block if present
300
+ const codeBlockMatch = cleanedContent.match(/```(?:markdown|md)?\s*\n?([\s\S]*?)\n?```/);
301
+ if (codeBlockMatch) {
302
+ cleanedContent = codeBlockMatch[1].trim();
303
+ }
304
+
305
+ // Extract title from first line (should be # Title)
306
+ const titleMatch = cleanedContent.match(/^#\s+(.+)$/m);
307
+ const title = titleMatch ? titleMatch[1].trim() : 'Untitled Decision';
308
+
309
+ const result: GenerationResult = {
310
+ content: cleanedContent,
311
+ title,
312
+ timestamp: new Date().toISOString(),
313
+ };
314
+
315
+ logger.info('ADR generation completed successfully', {
316
+ title,
317
+ content_length: cleanedContent.length,
318
+ });
319
+
320
+ return { result, error: undefined };
321
+ } catch (error) {
322
+ // Fail-open: log error and return descriptive error message
323
+ const errorObj = error as { status?: number; code?: string; message?: string };
324
+ let errorMessage: string;
325
+
326
+ if (errorObj.status === 401) {
327
+ errorMessage = 'Invalid API key - please check your API key configuration';
328
+ logger.warn('LLM API authentication failed during generation', { error: errorObj });
329
+ } else if (errorObj.status === 400 && errorObj.message?.includes('maximum context length')) {
330
+ errorMessage = 'Diff too large for model context window';
331
+ logger.warn('LLM context length exceeded during generation', { error: errorObj });
332
+ } else if (errorObj.status === 429) {
333
+ errorMessage = 'Rate limit exceeded - please try again later';
334
+ logger.warn('LLM API rate limit exceeded during generation', { error: errorObj });
335
+ } else if (errorObj.code === 'ETIMEDOUT' || errorObj.message?.includes('timeout')) {
336
+ errorMessage = `Request timeout (${config.timeout_seconds}s)`;
337
+ logger.warn('LLM API request timeout during generation', { error: errorObj });
338
+ } else {
339
+ errorMessage = `API error: ${errorObj.message || 'Unknown error occurred'}`;
340
+ logger.warn('LLM API request failed during generation', { error: errorObj });
341
+ }
342
+
343
+ return { result: null, error: errorMessage };
344
+ }
345
+ }
@@ -0,0 +1,90 @@
1
+ import { Logger, loggerInstance } from './logger';
2
+
3
+ describe('LoggerModule', () => {
4
+ // Suppress Pino logs during tests
5
+ beforeEach(() => {
6
+ jest.spyOn(console, 'log').mockImplementation();
7
+ jest.spyOn(console, 'warn').mockImplementation();
8
+ jest.spyOn(console, 'error').mockImplementation();
9
+ });
10
+
11
+ afterEach(() => {
12
+ jest.restoreAllMocks();
13
+ });
14
+
15
+ describe('Logger class', () => {
16
+ let logger: Logger;
17
+
18
+ beforeEach(() => {
19
+ logger = new Logger();
20
+ });
21
+
22
+ it('should create logger instance', () => {
23
+ expect(logger).toBeDefined();
24
+ expect(typeof logger.info).toBe('function');
25
+ expect(typeof logger.warn).toBe('function');
26
+ expect(typeof logger.error).toBe('function');
27
+ });
28
+
29
+ it('should log info message without context', () => {
30
+ // Just verify the method doesn't throw
31
+ expect(() => logger.info('Test info message')).not.toThrow();
32
+ });
33
+
34
+ it('should log info message with context', () => {
35
+ // Just verify the method doesn't throw
36
+ expect(() => logger.info('Test info message', { userId: 123, action: 'test' })).not.toThrow();
37
+ });
38
+
39
+ it('should log warn message without context', () => {
40
+ // Just verify the method doesn't throw
41
+ expect(() => logger.warn('Test warning message')).not.toThrow();
42
+ });
43
+
44
+ it('should log warn message with context', () => {
45
+ // Just verify the method doesn't throw
46
+ expect(() => logger.warn('Test warning message', { warning: 'deprecated' })).not.toThrow();
47
+ });
48
+
49
+ it('should log error message without context', () => {
50
+ // Just verify the method doesn't throw
51
+ expect(() => logger.error('Test error message')).not.toThrow();
52
+ });
53
+
54
+ it('should log error message with context', () => {
55
+ // Just verify the method doesn't throw
56
+ expect(() => logger.error('Test error message', { error: 'validation failed' })).not.toThrow();
57
+ });
58
+ });
59
+
60
+ describe('loggerInstance singleton', () => {
61
+ it('should be an instance of Logger', () => {
62
+ expect(loggerInstance).toBeDefined();
63
+ expect(typeof loggerInstance.info).toBe('function');
64
+ expect(typeof loggerInstance.warn).toBe('function');
65
+ expect(typeof loggerInstance.error).toBe('function');
66
+ });
67
+
68
+ it('should log messages without throwing', () => {
69
+ expect(() => loggerInstance.info('Singleton test message')).not.toThrow();
70
+ expect(() => loggerInstance.warn('Warning message')).not.toThrow();
71
+ expect(() => loggerInstance.error('Error message')).not.toThrow();
72
+ });
73
+ });
74
+
75
+ describe('Pino logger functionality', () => {
76
+ it('should use Pino for structured logging', () => {
77
+ expect(() => loggerInstance.info('Pino test message', { test: true })).not.toThrow();
78
+ expect(() => loggerInstance.warn('Pino warning', { level: 'warning' })).not.toThrow();
79
+ expect(() => loggerInstance.error('Pino error', { error: 'test error' })).not.toThrow();
80
+ });
81
+
82
+ it('should handle various data types in context', () => {
83
+ expect(() => loggerInstance.info('String context', { message: 'test' })).not.toThrow();
84
+ expect(() => loggerInstance.info('Number context', { count: 42 })).not.toThrow();
85
+ expect(() => loggerInstance.info('Boolean context', { enabled: true })).not.toThrow();
86
+ expect(() => loggerInstance.info('Object context', { data: { nested: 'value' } })).not.toThrow();
87
+ expect(() => loggerInstance.info('Array context', { items: [1, 2, 3] })).not.toThrow();
88
+ });
89
+ });
90
+ });
package/src/logger.ts CHANGED
@@ -1,9 +1,12 @@
1
1
  import pino from 'pino';
2
2
 
3
- // Configure Pino to output JSON to stderr
3
+ // Configure Pino to be silent by default (or verbose if --verbose flag is present)
4
+ const isVerbose = process.argv.includes('--verbose') || process.argv.includes('-v');
5
+ const isTest = process.env.NODE_ENV === 'test';
6
+
4
7
  const logger = pino({
5
- level: 'info',
6
- transport: {
8
+ level: isTest || !isVerbose ? 'silent' : 'info',
9
+ transport: isTest || !isVerbose ? undefined : {
7
10
  target: 'pino/file',
8
11
  options: { destination: 2 } // stderr
9
12
  }