claude-autopm 1.30.1 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/autopm/.claude/mcp/test-server.md +10 -0
  2. package/autopm/.claude/scripts/github/dependency-tracker.js +554 -0
  3. package/autopm/.claude/scripts/github/dependency-validator.js +545 -0
  4. package/autopm/.claude/scripts/github/dependency-visualizer.js +477 -0
  5. package/autopm/.claude/scripts/pm/lib/epic-discovery.js +119 -0
  6. package/autopm/.claude/scripts/pm/next.js +56 -58
  7. package/bin/autopm-poc.js +348 -0
  8. package/bin/autopm.js +6 -0
  9. package/lib/ai-providers/AbstractAIProvider.js +524 -0
  10. package/lib/ai-providers/ClaudeProvider.js +423 -0
  11. package/lib/ai-providers/TemplateProvider.js +432 -0
  12. package/lib/cli/commands/agent.js +206 -0
  13. package/lib/cli/commands/config.js +488 -0
  14. package/lib/cli/commands/prd.js +345 -0
  15. package/lib/cli/commands/task.js +206 -0
  16. package/lib/config/ConfigManager.js +531 -0
  17. package/lib/errors/AIProviderError.js +164 -0
  18. package/lib/services/AgentService.js +557 -0
  19. package/lib/services/EpicService.js +609 -0
  20. package/lib/services/PRDService.js +1003 -0
  21. package/lib/services/TaskService.js +760 -0
  22. package/lib/services/interfaces.js +753 -0
  23. package/lib/utils/CircuitBreaker.js +165 -0
  24. package/lib/utils/Encryption.js +201 -0
  25. package/lib/utils/RateLimiter.js +241 -0
  26. package/lib/utils/ServiceFactory.js +165 -0
  27. package/package.json +9 -5
  28. package/scripts/config/get.js +108 -0
  29. package/scripts/config/init.js +100 -0
  30. package/scripts/config/list-providers.js +93 -0
  31. package/scripts/config/set-api-key.js +107 -0
  32. package/scripts/config/set-provider.js +201 -0
  33. package/scripts/config/set.js +139 -0
  34. package/scripts/config/show.js +181 -0
  35. package/autopm/.claude/.env +0 -158
  36. package/autopm/.claude/settings.local.json +0 -9
@@ -0,0 +1,524 @@
1
+ /**
2
+ * AbstractAIProvider - Base Class for AI Provider Implementations
3
+ *
4
+ * Following Node.js best practices for abstract classes:
5
+ * - Template Method pattern for extensibility
6
+ * - Capability detection framework
7
+ * - Proper error handling hierarchy
8
+ * - Backward compatibility with existing ClaudeProvider
9
+ *
10
+ * @abstract
11
+ *
12
+ * @example
13
+ * // Implementing a concrete provider
14
+ * class MyProvider extends AbstractAIProvider {
15
+ * async complete(prompt, options = {}) {
16
+ * // Implementation
17
+ * }
18
+ *
19
+ * async *stream(prompt, options = {}) {
20
+ * // Implementation
21
+ * }
22
+ *
23
+ * getDefaultModel() {
24
+ * return 'my-model-v1';
25
+ * }
26
+ *
27
+ * getApiKeyEnvVar() {
28
+ * return 'MY_PROVIDER_API_KEY';
29
+ * }
30
+ * }
31
+ */
32
+
33
+ const AIProviderError = require('../errors/AIProviderError');
34
+
35
+ /**
36
+ * Abstract base class for AI providers
37
+ *
38
+ * @class AbstractAIProvider
39
+ */
40
+ class AbstractAIProvider {
41
+ /**
42
+ * Creates an instance of AbstractAIProvider
43
+ *
44
+ * Supports two constructor signatures for backward compatibility:
45
+ * 1. new Provider({ apiKey: 'key', ...options }) <- Recommended
46
+ * 2. new Provider('api-key') <- Legacy (ClaudeProvider)
47
+ *
48
+ * @param {Object|string} [config={}] - Configuration object or API key string
49
+ * @param {string} [config.apiKey] - API key (overrides environment variable)
50
+ * @param {string} [config.model] - Model to use (overrides default)
51
+ * @param {number} [config.maxTokens] - Maximum tokens (default: 4096)
52
+ * @param {number} [config.temperature] - Temperature (default: 0.7)
53
+ * @param {Object} [config.rateLimit] - Rate limit configuration
54
+ * @param {number} [config.rateLimit.tokensPerInterval] - Tokens per interval (default: 60)
55
+ * @param {string|number} [config.rateLimit.interval] - Interval ('second', 'minute', 'hour', 'day' or ms)
56
+ * @param {number} [config.rateLimit.bucketSize] - Bucket size for burst (default: tokensPerInterval)
57
+ * @param {boolean} [config.rateLimit.fireImmediately] - Don't wait, return negative on exceeded (default: false)
58
+ * @param {Object} [config.circuitBreaker] - Circuit breaker configuration
59
+ * @param {number} [config.circuitBreaker.failureThreshold=5] - Failures before opening
60
+ * @param {number} [config.circuitBreaker.successThreshold=2] - Successes to close from half-open
61
+ * @param {number} [config.circuitBreaker.timeout=60000] - Time before retrying (ms)
62
+ *
63
+ * @throws {Error} If attempting to instantiate abstract class directly
64
+ */
65
+ constructor(config = {}) {
66
+ // Prevent direct instantiation of abstract class
67
+ if (this.constructor === AbstractAIProvider) {
68
+ throw new Error('Cannot instantiate abstract class AbstractAIProvider');
69
+ }
70
+
71
+ // Backward compatibility: Support string API key as first parameter
72
+ if (typeof config === 'string') {
73
+ config = { apiKey: config };
74
+ }
75
+
76
+ // Handle null/undefined config
77
+ if (!config || typeof config !== 'object') {
78
+ config = {};
79
+ }
80
+
81
+ // Store full config for reference
82
+ this.config = config;
83
+
84
+ // Initialize core properties with fallbacks
85
+ this.apiKey = config.apiKey || process.env[this.getApiKeyEnvVar()];
86
+ this.model = config.model || this.getDefaultModel();
87
+ this.maxTokens = config.maxTokens || this.getMaxTokens();
88
+ this.temperature = config.temperature !== undefined
89
+ ? config.temperature
90
+ : this.getDefaultTemperature();
91
+
92
+ // Initialize rate limiter if configured
93
+ this.rateLimiter = null;
94
+ if (config.rateLimit) {
95
+ const RateLimiter = require('../utils/RateLimiter');
96
+ this.rateLimiter = new RateLimiter(config.rateLimit);
97
+ }
98
+
99
+ // Initialize circuit breaker if configured
100
+ this.circuitBreaker = null;
101
+ if (config.circuitBreaker) {
102
+ const { CircuitBreaker } = require('../utils/CircuitBreaker');
103
+ this.circuitBreaker = new CircuitBreaker(config.circuitBreaker);
104
+ }
105
+ }
106
+
107
+ // ============================================================
108
+ // ABSTRACT METHODS (must be implemented by subclasses)
109
+ // ============================================================
110
+
111
+ /**
112
+ * Generate completion for a prompt
113
+ *
114
+ * @abstract
115
+ * @param {string} prompt - The prompt to complete
116
+ * @param {Object} [options={}] - Provider-specific options
117
+ * @returns {Promise<string>} The completion response
118
+ * @throws {AIProviderError}
119
+ */
120
+ async complete(prompt, options = {}) {
121
+ throw new Error(`${this.constructor.name} must implement complete()`);
122
+ }
123
+
124
+ /**
125
+ * Stream completion chunks for a prompt
126
+ *
127
+ * @abstract
128
+ * @param {string} prompt - The prompt to complete
129
+ * @param {Object} [options={}] - Provider-specific options
130
+ * @yields {string} Completion chunks
131
+ * @throws {AIProviderError}
132
+ */
133
+ async *stream(prompt, options = {}) {
134
+ throw new Error(`${this.constructor.name} must implement stream()`);
135
+ }
136
+
137
+ /**
138
+ * Get the default model identifier
139
+ *
140
+ * @abstract
141
+ * @returns {string} Default model name
142
+ */
143
+ getDefaultModel() {
144
+ throw new Error(`${this.constructor.name} must implement getDefaultModel()`);
145
+ }
146
+
147
+ /**
148
+ * Get the environment variable name for API key
149
+ *
150
+ * @abstract
151
+ * @returns {string} Environment variable name
152
+ */
153
+ getApiKeyEnvVar() {
154
+ throw new Error(`${this.constructor.name} must implement getApiKeyEnvVar()`);
155
+ }
156
+
157
+ // ============================================================
158
+ // TEMPLATE METHODS (can be overridden for customization)
159
+ // ============================================================
160
+
161
+ /**
162
+ * Get maximum tokens limit
163
+ *
164
+ * @returns {number} Maximum tokens (default: 4096)
165
+ */
166
+ getMaxTokens() {
167
+ return 4096;
168
+ }
169
+
170
+ /**
171
+ * Get default temperature
172
+ *
173
+ * @returns {number} Default temperature (default: 0.7)
174
+ */
175
+ getDefaultTemperature() {
176
+ return 0.7;
177
+ }
178
+
179
+ /**
180
+ * Format error into AIProviderError
181
+ *
182
+ * @param {Error} error - The error to format
183
+ * @returns {AIProviderError} Formatted error
184
+ */
185
+ formatError(error) {
186
+ // Already an AIProviderError, return as-is
187
+ if (error instanceof AIProviderError) {
188
+ return error;
189
+ }
190
+
191
+ // Wrap in AIProviderError
192
+ return new AIProviderError(
193
+ 'UNKNOWN_ERROR',
194
+ error.message || 'An unknown error occurred',
195
+ true
196
+ );
197
+ }
198
+
199
+ // ============================================================
200
+ // CAPABILITY DETECTION (override as needed)
201
+ // ============================================================
202
+
203
+ /**
204
+ * Check if provider supports streaming
205
+ *
206
+ * @returns {boolean} True if streaming is supported
207
+ */
208
+ supportsStreaming() {
209
+ return false;
210
+ }
211
+
212
+ /**
213
+ * Check if provider supports function calling
214
+ *
215
+ * @returns {boolean} True if function calling is supported
216
+ */
217
+ supportsFunctionCalling() {
218
+ return false;
219
+ }
220
+
221
+ /**
222
+ * Check if provider supports chat format
223
+ *
224
+ * @returns {boolean} True if chat format is supported
225
+ */
226
+ supportsChat() {
227
+ return false;
228
+ }
229
+
230
+ /**
231
+ * Check if provider supports vision/image inputs
232
+ *
233
+ * @returns {boolean} True if vision is supported
234
+ */
235
+ supportsVision() {
236
+ return false;
237
+ }
238
+
239
+ // ============================================================
240
+ // DEFAULT IMPLEMENTATIONS (common functionality)
241
+ // ============================================================
242
+
243
+ /**
244
+ * Get provider name
245
+ *
246
+ * @returns {string} Provider name (extracted from class name)
247
+ */
248
+ getName() {
249
+ return this.constructor.name;
250
+ }
251
+
252
+ /**
253
+ * Get provider information and capabilities
254
+ *
255
+ * @returns {Object} Provider metadata
256
+ */
257
+ getInfo() {
258
+ return {
259
+ name: this.getName(),
260
+ model: this.model,
261
+ maxTokens: this.maxTokens,
262
+ temperature: this.temperature,
263
+ capabilities: {
264
+ streaming: this.supportsStreaming(),
265
+ functionCalling: this.supportsFunctionCalling(),
266
+ chat: this.supportsChat(),
267
+ vision: this.supportsVision()
268
+ }
269
+ };
270
+ }
271
+
272
+ /**
273
+ * Validate provider connection
274
+ *
275
+ * @returns {Promise<boolean>} True if connection is valid
276
+ */
277
+ async validate() {
278
+ try {
279
+ await this.complete('test', { maxTokens: 5 });
280
+ return true;
281
+ } catch (error) {
282
+ return false;
283
+ }
284
+ }
285
+
286
+ /**
287
+ * Test connection (alias for validate)
288
+ *
289
+ * @returns {Promise<boolean>} True if connection is valid
290
+ */
291
+ async testConnection() {
292
+ return this.validate();
293
+ }
294
+
295
+ /**
296
+ * Chat completion with message history
297
+ * Fallback implementation that converts messages to prompt
298
+ *
299
+ * @param {Array<{role: string, content: string}>} messages - Chat messages
300
+ * @param {Object} [options={}] - Provider-specific options
301
+ * @returns {Promise<string>} Completion response
302
+ * @throws {AIProviderError}
303
+ */
304
+ async chat(messages, options = {}) {
305
+ // Convert chat messages to single prompt
306
+ const prompt = messages
307
+ .map(msg => {
308
+ const role = msg.role.charAt(0).toUpperCase() + msg.role.slice(1);
309
+ return `${role}: ${msg.content}`;
310
+ })
311
+ .join('\n\n');
312
+
313
+ return this.complete(prompt, options);
314
+ }
315
+
316
+ /**
317
+ * Generate with automatic retry on failure with exponential backoff
318
+ *
319
+ * Supports both legacy number parameter and enhanced config object:
320
+ * - Legacy: generateWithRetry(prompt, options, 3)
321
+ * - Enhanced: generateWithRetry(prompt, options, { maxAttempts: 3, ... })
322
+ *
323
+ * @param {string} prompt - The prompt to complete
324
+ * @param {Object} [options={}] - Provider-specific options
325
+ * @param {number|Object} [retries=3] - Max retries (number) or config object
326
+ * @param {number} [retries.maxAttempts=3] - Maximum retry attempts
327
+ * @param {number} [retries.startingDelay=100] - Initial delay in ms
328
+ * @param {number} [retries.timeMultiple=2] - Exponential multiplier
329
+ * @param {number} [retries.maxDelay=30000] - Maximum delay cap in ms
330
+ * @param {string} [retries.jitter='full'] - Jitter strategy: 'full', 'equal', or 'none'
331
+ * @param {boolean} [retries.delayFirstAttempt=false] - Whether to delay before first attempt
332
+ * @param {Function} [retries.shouldRetry] - Custom retry predicate (error, attempt) => boolean
333
+ * @returns {Promise<string>} Completion response
334
+ * @throws {AIProviderError} After max retries exceeded or on non-retryable error
335
+ */
336
+ async generateWithRetry(prompt, options = {}, retries = 3) {
337
+ // Backward compatibility: support both number and config object
338
+ const config = typeof retries === 'number'
339
+ ? { maxAttempts: retries }
340
+ : { maxAttempts: 3, ...retries };
341
+
342
+ // Merge with defaults
343
+ const {
344
+ maxAttempts = 3,
345
+ startingDelay = 100,
346
+ timeMultiple = 2,
347
+ maxDelay = 30000,
348
+ jitter = 'full',
349
+ delayFirstAttempt = false,
350
+ shouldRetry = null
351
+ } = config;
352
+
353
+ let lastError;
354
+
355
+ for (let attempt = 0; attempt < maxAttempts; attempt++) {
356
+ try {
357
+ // Apply delay before attempt (except first if configured)
358
+ if (attempt > 0 || delayFirstAttempt) {
359
+ // For retries, use attempt-1 so first retry gets startingDelay
360
+ const retryNumber = delayFirstAttempt ? attempt : attempt - 1;
361
+ const delay = this._calculateBackoff(retryNumber, {
362
+ startingDelay,
363
+ timeMultiple,
364
+ maxDelay,
365
+ jitter
366
+ });
367
+ await this._delay(delay);
368
+ }
369
+
370
+ return await this.complete(prompt, options);
371
+
372
+ } catch (error) {
373
+ lastError = error;
374
+
375
+ // Custom retry predicate: pass the next attempt number (1-indexed)
376
+ // attempt=0 (1st try) fails → pass 2 (would be 2nd try)
377
+ // attempt=1 (2nd try) fails → pass 3 (would be 3rd try)
378
+ if (shouldRetry && !shouldRetry(error, attempt + 2)) {
379
+ throw this.formatError(lastError);
380
+ }
381
+
382
+ // Don't retry on last attempt
383
+ if (attempt >= maxAttempts - 1) {
384
+ throw this.formatError(lastError);
385
+ }
386
+
387
+ // Check if error is retryable
388
+ if (!this._isRetryableError(error)) {
389
+ throw this.formatError(lastError);
390
+ }
391
+ }
392
+ }
393
+
394
+ // All retries exhausted
395
+ throw this.formatError(lastError);
396
+ }
397
+
398
+ /**
399
+ * Calculate exponential backoff delay with jitter
400
+ * @private
401
+ * @param {number} attempt - Current attempt number (0-indexed)
402
+ * @param {Object} config - Backoff configuration
403
+ * @returns {number} Delay in milliseconds
404
+ */
405
+ _calculateBackoff(attempt, { startingDelay, timeMultiple, maxDelay, jitter }) {
406
+ // Calculate exponential delay: startingDelay * (timeMultiple ^ attempt)
407
+ let delay = startingDelay * Math.pow(timeMultiple, attempt);
408
+
409
+ // Cap at maxDelay
410
+ delay = Math.min(delay, maxDelay);
411
+
412
+ // Apply jitter
413
+ if (jitter === 'full') {
414
+ // Full jitter: random value between 0 and calculated delay
415
+ delay = Math.random() * delay;
416
+ } else if (jitter === 'equal') {
417
+ // Equal jitter: calculated delay ± 50%
418
+ const jitterAmount = delay * 0.5;
419
+ delay = delay + (Math.random() * jitterAmount * 2 - jitterAmount);
420
+ }
421
+ // else: no jitter
422
+
423
+ return Math.floor(delay);
424
+ }
425
+
426
+ /**
427
+ * Delay execution for specified milliseconds
428
+ * @private
429
+ * @param {number} ms - Milliseconds to delay
430
+ * @returns {Promise<void>}
431
+ */
432
+ _delay(ms) {
433
+ return new Promise(resolve => setTimeout(resolve, ms));
434
+ }
435
+
436
+ /**
437
+ * Check if error should be retried
438
+ * @private
439
+ * @param {Error} error - Error to check
440
+ * @returns {boolean} True if error is retryable
441
+ */
442
+ _isRetryableError(error) {
443
+ // If not an AIProviderError, retry (could be network error)
444
+ if (!(error instanceof AIProviderError)) {
445
+ return true;
446
+ }
447
+
448
+ // Don't retry on invalid API key
449
+ if (error.code === 'INVALID_API_KEY') {
450
+ return false;
451
+ }
452
+
453
+ // Don't retry on invalid request
454
+ if (error.code === 'INVALID_REQUEST') {
455
+ return false;
456
+ }
457
+
458
+ // Don't retry on content policy violations
459
+ if (error.code === 'CONTENT_POLICY_VIOLATION') {
460
+ return false;
461
+ }
462
+
463
+ // Don't retry on non-operational errors
464
+ if (!error.isOperational) {
465
+ return false;
466
+ }
467
+
468
+ // Retry on rate limits, network errors, service unavailable, etc.
469
+ return true;
470
+ }
471
+
472
+ /**
473
+ * Stream with progress tracking
474
+ *
475
+ * @param {string} prompt - The prompt to complete
476
+ * @param {Function} [onProgress] - Progress callback (receives each chunk)
477
+ * @param {Object} [options={}] - Provider-specific options
478
+ * @yields {string} Completion chunks
479
+ * @throws {AIProviderError}
480
+ */
481
+ async *streamWithProgress(prompt, onProgress, options = {}) {
482
+ for await (const chunk of this.stream(prompt, options)) {
483
+ if (onProgress && typeof onProgress === 'function') {
484
+ onProgress(chunk);
485
+ }
486
+ yield chunk;
487
+ }
488
+ }
489
+
490
+ /**
491
+ * Merge instance config with method-level options
492
+ * Method options take precedence over instance config
493
+ *
494
+ * @private
495
+ * @param {Object} methodOptions - Options passed to method
496
+ * @returns {Object} Merged options
497
+ */
498
+ _mergeOptions(methodOptions = {}) {
499
+ return {
500
+ temperature: this.temperature,
501
+ maxTokens: this.maxTokens,
502
+ model: this.model,
503
+ ...methodOptions
504
+ };
505
+ }
506
+
507
+ /**
508
+ * Wrap async function with rate limiting
509
+ * Automatically applies rate limit if configured
510
+ *
511
+ * @protected
512
+ * @param {Function} fn - Async function to wrap
513
+ * @param {number} [tokenCost=1] - Number of tokens to consume (default: 1)
514
+ * @returns {Promise<*>} Result of wrapped function
515
+ */
516
+ async _withRateLimit(fn, tokenCost = 1) {
517
+ if (this.rateLimiter) {
518
+ await this.rateLimiter.removeTokens(tokenCost);
519
+ }
520
+ return await fn();
521
+ }
522
+ }
523
+
524
+ module.exports = AbstractAIProvider;