musubi-sdd 3.5.1 → 3.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,214 @@
1
+ /**
2
+ * @fileoverview LLM Provider Factory and Exports for MUSUBI Replanning Engine
3
+ * @module llm-providers
4
+ * @version 1.0.0
5
+ */
6
+
7
+ 'use strict';
8
+
9
+ const { LLMProvider } = require('./base-provider');
10
+ const { CopilotLMProvider } = require('./copilot-provider');
11
+ const { AnthropicLMProvider } = require('./anthropic-provider');
12
+ const { OpenAILMProvider } = require('./openai-provider');
13
+
14
+ /**
15
+ * Provider priority order for auto-selection
16
+ * GitHub Copilot is preferred when available (in VS Code)
17
+ */
18
+ const PROVIDER_PRIORITY = ['github-copilot', 'anthropic', 'openai'];
19
+
20
+ /**
21
+ * Create an LLM provider instance
22
+ * @param {string} [provider='auto'] - Provider name or 'auto' for automatic selection
23
+ * @param {Object} [config={}] - Provider configuration
24
+ * @returns {LLMProvider} Configured provider instance
25
+ * @throws {Error} If no provider is available
26
+ */
27
+ function createLLMProvider(provider = 'auto', config = {}) {
28
+ if (provider === 'auto') {
29
+ return createAutoProvider(config);
30
+ }
31
+
32
+ return createNamedProvider(provider, config);
33
+ }
34
+
35
+ /**
36
+ * Automatically select the best available provider
37
+ * @param {Object} config - Provider configuration
38
+ * @returns {LLMProvider} Best available provider
39
+ * @private
40
+ */
41
+ function createAutoProvider(config) {
42
+ // Try providers in priority order
43
+ for (const providerName of PROVIDER_PRIORITY) {
44
+ try {
45
+ const provider = createNamedProvider(providerName, config);
46
+
47
+ // Check if provider can be used
48
+ if (providerName === 'github-copilot') {
49
+ // Copilot is available in VS Code context or with token
50
+ try {
51
+ require('vscode');
52
+ return provider;
53
+ } catch (e) {
54
+ if (process.env.GITHUB_COPILOT_TOKEN) {
55
+ return provider;
56
+ }
57
+ }
58
+ } else if (providerName === 'anthropic' && process.env.ANTHROPIC_API_KEY) {
59
+ return provider;
60
+ } else if (providerName === 'openai' && process.env.OPENAI_API_KEY) {
61
+ return provider;
62
+ }
63
+ } catch (e) {
64
+ // Continue to next provider
65
+ }
66
+ }
67
+
68
+ throw new Error(
69
+ 'No LLM provider available. Please configure one of:\n' +
70
+ ' - Run inside VS Code with GitHub Copilot extension\n' +
71
+ ' - Set GITHUB_COPILOT_TOKEN environment variable\n' +
72
+ ' - Set ANTHROPIC_API_KEY environment variable\n' +
73
+ ' - Set OPENAI_API_KEY environment variable'
74
+ );
75
+ }
76
+
77
+ /**
78
+ * Create a specific named provider
79
+ * @param {string} name - Provider name
80
+ * @param {Object} config - Provider configuration
81
+ * @returns {LLMProvider} Provider instance
82
+ * @private
83
+ */
84
+ function createNamedProvider(name, config) {
85
+ switch (name.toLowerCase()) {
86
+ case 'github-copilot':
87
+ case 'copilot':
88
+ return new CopilotLMProvider(config);
89
+
90
+ case 'anthropic':
91
+ case 'claude':
92
+ return new AnthropicLMProvider(config);
93
+
94
+ case 'openai':
95
+ case 'gpt':
96
+ return new OpenAILMProvider(config);
97
+
98
+ default:
99
+ throw new Error(`Unknown LLM provider: ${name}`);
100
+ }
101
+ }
102
+
103
+ /**
104
+ * Get list of available providers based on current environment
105
+ * @returns {Promise<Array<{name: string, available: boolean, info: Object}>>}
106
+ */
107
+ async function getAvailableProviders() {
108
+ const results = [];
109
+
110
+ const providers = [
111
+ { name: 'github-copilot', class: CopilotLMProvider },
112
+ { name: 'anthropic', class: AnthropicLMProvider },
113
+ { name: 'openai', class: OpenAILMProvider }
114
+ ];
115
+
116
+ for (const { name, class: ProviderClass } of providers) {
117
+ try {
118
+ const provider = new ProviderClass();
119
+ const available = await provider.isAvailable();
120
+ results.push({
121
+ name,
122
+ available,
123
+ info: provider.getInfo()
124
+ });
125
+ } catch (e) {
126
+ results.push({
127
+ name,
128
+ available: false,
129
+ error: e.message
130
+ });
131
+ }
132
+ }
133
+
134
+ return results;
135
+ }
136
+
137
+ /**
138
+ * Mock LLM Provider for testing
139
+ * Returns predefined responses
140
+ */
141
+ class MockLLMProvider extends LLMProvider {
142
+ constructor(config = {}) {
143
+ super(config);
144
+ this.name = 'mock';
145
+ this.responses = config.responses || [];
146
+ this.responseIndex = 0;
147
+ }
148
+
149
+ async initialize() {
150
+ this.isInitialized = true;
151
+ }
152
+
153
+ async complete(prompt, options = {}) {
154
+ const response = this.responses[this.responseIndex] || {
155
+ content: JSON.stringify({
156
+ analysis: 'Mock analysis',
157
+ goal: 'Mock goal',
158
+ alternatives: [
159
+ {
160
+ id: 'mock-alt-1',
161
+ description: 'Mock alternative',
162
+ task: { name: 'mock-task', skill: 'mock-skill', parameters: {} },
163
+ confidence: 0.85,
164
+ reasoning: 'Mock reasoning',
165
+ risks: []
166
+ }
167
+ ]
168
+ })
169
+ };
170
+
171
+ this.responseIndex = (this.responseIndex + 1) % Math.max(1, this.responses.length);
172
+
173
+ return {
174
+ content: typeof response.content === 'string' ? response.content : JSON.stringify(response.content),
175
+ model: 'mock-model',
176
+ usage: { promptTokens: 100, completionTokens: 50, totalTokens: 150 },
177
+ finishReason: 'stop'
178
+ };
179
+ }
180
+
181
+ async embed(text) {
182
+ // Return a fixed-dimension embedding
183
+ return new Array(1536).fill(0).map(() => Math.random());
184
+ }
185
+
186
+ async isAvailable() {
187
+ return true;
188
+ }
189
+
190
+ /**
191
+ * Set mock responses
192
+ * @param {Array<Object>} responses - Array of mock responses
193
+ */
194
+ setResponses(responses) {
195
+ this.responses = responses;
196
+ this.responseIndex = 0;
197
+ }
198
+ }
199
+
200
+ module.exports = {
201
+ // Factory function
202
+ createLLMProvider,
203
+ getAvailableProviders,
204
+
205
+ // Provider classes
206
+ LLMProvider,
207
+ CopilotLMProvider,
208
+ AnthropicLMProvider,
209
+ OpenAILMProvider,
210
+ MockLLMProvider,
211
+
212
+ // Constants
213
+ PROVIDER_PRIORITY
214
+ };
@@ -0,0 +1,205 @@
1
+ /**
2
+ * @fileoverview OpenAI GPT API Provider for MUSUBI Replanning Engine
3
+ * @module llm-providers/openai-provider
4
+ * @version 1.0.0
5
+ */
6
+
7
+ 'use strict';
8
+
9
+ const { LLMProvider } = require('./base-provider');
10
+
11
+ /**
12
+ * OpenAI GPT API Provider
13
+ * Fallback provider with embedding support
14
+ */
15
+ class OpenAILMProvider extends LLMProvider {
16
+ /**
17
+ * Create an OpenAI provider
18
+ * @param {Object} config - Provider configuration
19
+ * @param {string} [config.apiKey] - OpenAI API key
20
+ * @param {string} [config.model='gpt-4o'] - Model to use for completion
21
+ * @param {string} [config.embeddingModel='text-embedding-3-small'] - Model for embeddings
22
+ */
23
+ constructor(config = {}) {
24
+ super(config);
25
+ this.name = 'openai';
26
+ this.apiKey = config.apiKey || process.env.OPENAI_API_KEY;
27
+ this.config.model = config.model || 'gpt-4o';
28
+ this.config.embeddingModel = config.embeddingModel || 'text-embedding-3-small';
29
+ this.endpoint = config.endpoint || 'https://api.openai.com/v1';
30
+ this.rateLimiter = this.createRateLimiter(60); // 60 RPM default
31
+ }
32
+
33
+ /**
34
+ * Initialize the OpenAI provider
35
+ * @returns {Promise<void>}
36
+ */
37
+ async initialize() {
38
+ if (!this.apiKey) {
39
+ throw new Error('OpenAI API key not found. Set OPENAI_API_KEY environment variable.');
40
+ }
41
+ this.isInitialized = true;
42
+ }
43
+
44
+ /**
45
+ * Complete a prompt using OpenAI GPT API
46
+ * @param {string} prompt - The prompt to complete
47
+ * @param {Object} [options={}] - Completion options
48
+ * @returns {Promise<LLMCompletionResult>} Completion result
49
+ */
50
+ async complete(prompt, options = {}) {
51
+ if (!this.isInitialized) {
52
+ await this.initialize();
53
+ }
54
+
55
+ const systemPrompt = options.systemPrompt || this.getDefaultSystemPrompt();
56
+ const messages = this.formatMessages(systemPrompt, prompt);
57
+
58
+ return this.rateLimiter(async () => {
59
+ return this.retryWithBackoff(async () => {
60
+ const response = await fetch(`${this.endpoint}/chat/completions`, {
61
+ method: 'POST',
62
+ headers: {
63
+ 'Authorization': `Bearer ${this.apiKey}`,
64
+ 'Content-Type': 'application/json'
65
+ },
66
+ body: JSON.stringify({
67
+ model: this.config.model,
68
+ messages,
69
+ max_tokens: options.maxTokens || this.config.maxTokens,
70
+ temperature: options.temperature || this.config.temperature,
71
+ response_format: options.jsonMode ? { type: 'json_object' } : undefined
72
+ }),
73
+ signal: AbortSignal.timeout(this.config.timeout)
74
+ });
75
+
76
+ if (!response.ok) {
77
+ const error = await response.text();
78
+ throw new Error(`OpenAI API error: ${response.status} - ${error}`);
79
+ }
80
+
81
+ const data = await response.json();
82
+
83
+ return {
84
+ content: data.choices[0].message.content,
85
+ model: data.model,
86
+ usage: {
87
+ promptTokens: data.usage?.prompt_tokens || 0,
88
+ completionTokens: data.usage?.completion_tokens || 0,
89
+ totalTokens: data.usage?.total_tokens || 0
90
+ },
91
+ finishReason: data.choices[0].finish_reason
92
+ };
93
+ });
94
+ });
95
+ }
96
+
97
+ /**
98
+ * Generate embeddings using OpenAI Embedding API
99
+ * @param {string} text - Text to embed
100
+ * @returns {Promise<number[]>} Embedding vector
101
+ */
102
+ async embed(text) {
103
+ if (!this.isInitialized) {
104
+ await this.initialize();
105
+ }
106
+
107
+ return this.rateLimiter(async () => {
108
+ return this.retryWithBackoff(async () => {
109
+ const response = await fetch(`${this.endpoint}/embeddings`, {
110
+ method: 'POST',
111
+ headers: {
112
+ 'Authorization': `Bearer ${this.apiKey}`,
113
+ 'Content-Type': 'application/json'
114
+ },
115
+ body: JSON.stringify({
116
+ model: this.config.embeddingModel,
117
+ input: text
118
+ }),
119
+ signal: AbortSignal.timeout(this.config.timeout)
120
+ });
121
+
122
+ if (!response.ok) {
123
+ const error = await response.text();
124
+ throw new Error(`OpenAI Embedding API error: ${response.status} - ${error}`);
125
+ }
126
+
127
+ const data = await response.json();
128
+ return data.data[0].embedding;
129
+ });
130
+ });
131
+ }
132
+
133
+ /**
134
+ * Check if the provider is available
135
+ * @returns {Promise<boolean>}
136
+ */
137
+ async isAvailable() {
138
+ if (!this.apiKey) {
139
+ return false;
140
+ }
141
+
142
+ try {
143
+ // Simple validation - check if API key format is valid
144
+ // OpenAI API keys start with 'sk-'
145
+ return this.apiKey.startsWith('sk-') && this.apiKey.length > 20;
146
+ } catch (e) {
147
+ return false;
148
+ }
149
+ }
150
+
151
+ /**
152
+ * Get provider information
153
+ * @returns {ProviderInfo}
154
+ */
155
+ getInfo() {
156
+ return {
157
+ name: this.name,
158
+ model: this.config.model,
159
+ embeddingModel: this.config.embeddingModel,
160
+ isInitialized: this.isInitialized,
161
+ capabilities: {
162
+ completion: true,
163
+ embedding: true,
164
+ streaming: true,
165
+ functionCalling: true,
166
+ jsonMode: true
167
+ }
168
+ };
169
+ }
170
+
171
+ /**
172
+ * Get default system prompt for replanning
173
+ * @returns {string}
174
+ * @private
175
+ */
176
+ getDefaultSystemPrompt() {
177
+ return `You are an AI assistant helping with task replanning in a software development workflow.
178
+ Your role is to analyze failed tasks, understand the goal, and generate alternative approaches.
179
+
180
+ Guidelines:
181
+ 1. Be concise and specific in your recommendations
182
+ 2. Prioritize practical, actionable alternatives
183
+ 3. Consider resource constraints and dependencies
184
+ 4. Provide confidence scores for each alternative (0.0 to 1.0)
185
+ 5. Explain the reasoning behind each suggestion
186
+
187
+ When generating alternatives, output valid JSON with this structure:
188
+ {
189
+ "analysis": "Brief analysis of the failure",
190
+ "goal": "Extracted goal from the task",
191
+ "alternatives": [
192
+ {
193
+ "id": "alt-1",
194
+ "description": "Alternative approach",
195
+ "task": { "name": "task-name", "skill": "skill-name", "parameters": {} },
196
+ "confidence": 0.8,
197
+ "reasoning": "Why this might work",
198
+ "risks": ["potential risk"]
199
+ }
200
+ ]
201
+ }`;
202
+ }
203
+ }
204
+
205
+ module.exports = { OpenAILMProvider };
@@ -71,6 +71,19 @@ const {
71
71
  createWorkflowOrchestrator
72
72
  } = require('./workflow-orchestrator');
73
73
 
74
+ const {
75
+ ReplanningEngine,
76
+ PlanMonitor,
77
+ PlanEvaluator,
78
+ AlternativeGenerator,
79
+ ReplanHistory,
80
+ ReplanTrigger,
81
+ ReplanDecision,
82
+ defaultReplanningConfig,
83
+ mergeConfig: mergeReplanningConfig,
84
+ validateConfig: validateReplanningConfig
85
+ } = require('./replanning');
86
+
74
87
  /**
75
88
  * Create a fully configured orchestration engine
76
89
  * with default patterns registered
@@ -161,6 +174,18 @@ module.exports = {
161
174
  SDDWorkflowTemplates,
162
175
  createWorkflowOrchestrator,
163
176
 
177
+ // Replanning Engine
178
+ ReplanningEngine,
179
+ PlanMonitor,
180
+ PlanEvaluator,
181
+ AlternativeGenerator,
182
+ ReplanHistory,
183
+ ReplanTrigger,
184
+ ReplanDecision,
185
+ defaultReplanningConfig,
186
+ mergeReplanningConfig,
187
+ validateReplanningConfig,
188
+
164
189
  // Constants
165
190
  PatternType,
166
191
  ExecutionStatus,
@@ -3,6 +3,8 @@
3
3
  *
4
4
  * Enables concurrent execution of multiple skills with
5
5
  * P-label task decomposition and dependency tracking.
6
+ *
7
+ * v1.1.0: Added replanning support for dynamic task recovery
6
8
  */
7
9
 
8
10
  const { BasePattern } = require('../pattern-registry');
@@ -37,16 +39,18 @@ class SwarmPattern extends BasePattern {
37
39
  name: PatternType.SWARM,
38
40
  type: PatternType.SWARM,
39
41
  description: 'Execute multiple skills concurrently with dependency tracking',
40
- version: '1.0.0',
41
- tags: ['parallel', 'concurrent', 'swarm', 'distributed'],
42
+ version: '1.1.0',
43
+ tags: ['parallel', 'concurrent', 'swarm', 'distributed', 'replanning'],
42
44
  useCases: [
43
45
  'Parallel task execution',
44
46
  'Independent subtask processing',
45
47
  'Load distribution',
46
- 'Multi-perspective analysis'
48
+ 'Multi-perspective analysis',
49
+ 'Dynamic task recovery with replanning'
47
50
  ],
48
51
  complexity: 'high',
49
52
  supportsParallel: true,
53
+ supportsReplanning: true,
50
54
  requiresHuman: false
51
55
  });
52
56
 
@@ -58,6 +62,10 @@ class SwarmPattern extends BasePattern {
58
62
  retryAttempts: options.retryAttempts || 3,
59
63
  quorumThreshold: options.quorumThreshold || 0.5,
60
64
  priorityOrder: options.priorityOrder || [PLabel.P0, PLabel.P1, PLabel.P2, PLabel.P3],
65
+ // Replanning options
66
+ enableReplanning: options.enableReplanning || false,
67
+ replanningEngine: options.replanningEngine || null,
68
+ fallbackSkill: options.fallbackSkill || null,
61
69
  ...options
62
70
  };
63
71
  }
@@ -196,11 +204,48 @@ class SwarmPattern extends BasePattern {
196
204
  error: result.reason
197
205
  });
198
206
 
199
- // Retry logic
207
+ // Try replanning if enabled
208
+ if (this.options.enableReplanning && this.options.replanningEngine) {
209
+ const alternative = await this._tryReplanning(
210
+ task,
211
+ result.reason,
212
+ engine,
213
+ context,
214
+ sharedContext,
215
+ results
216
+ );
217
+
218
+ if (alternative) {
219
+ // Add alternative task to pending
220
+ pending.add(alternative.id || alternative.skill);
221
+ sortedTasks.push(alternative);
222
+ failed.delete(taskId);
223
+
224
+ engine.emit('swarmTaskReplanned', {
225
+ context,
226
+ originalTaskId: taskId,
227
+ alternativeTask: alternative
228
+ });
229
+ continue;
230
+ }
231
+ }
232
+
233
+ // Retry logic (fallback if replanning not available or failed)
200
234
  if (this.options.retryFailed && task.retryCount < this.options.retryAttempts) {
201
235
  task.retryCount = (task.retryCount || 0) + 1;
202
236
  pending.add(taskId);
203
237
  failed.delete(taskId);
238
+ } else if (this.options.fallbackSkill) {
239
+ // Use fallback skill
240
+ const fallbackTask = {
241
+ ...task,
242
+ skill: this.options.fallbackSkill,
243
+ id: `${taskId}-fallback`,
244
+ originalTaskId: taskId
245
+ };
246
+ pending.add(fallbackTask.id);
247
+ sortedTasks.push(fallbackTask);
248
+ failed.delete(taskId);
204
249
  }
205
250
  }
206
251
  }
@@ -318,6 +363,68 @@ class SwarmPattern extends BasePattern {
318
363
  });
319
364
  }
320
365
 
366
+ /**
367
+ * Try to replan a failed task using ReplanningEngine
368
+ * @param {Object} task - Failed task
369
+ * @param {Error} error - Error that caused failure
370
+ * @param {OrchestrationEngine} engine - Orchestration engine
371
+ * @param {ExecutionContext} context - Parent context
372
+ * @param {Object} sharedContext - Shared context
373
+ * @param {Map} previousResults - Previous results
374
+ * @returns {Promise<Object|null>} Alternative task or null
375
+ * @private
376
+ */
377
+ async _tryReplanning(task, error, engine, context, sharedContext, previousResults) {
378
+ const replanningEngine = this.options.replanningEngine;
379
+
380
+ try {
381
+ // Create context for replanning
382
+ const replanContext = {
383
+ completed: [...previousResults.entries()].filter(([, v]) => !v.error).map(([id, result]) => ({
384
+ id,
385
+ result
386
+ })),
387
+ pending: [],
388
+ failed: [{
389
+ id: task.id || task.skill,
390
+ ...task,
391
+ error
392
+ }],
393
+ sharedContext
394
+ };
395
+
396
+ // Generate alternatives
397
+ const alternatives = await replanningEngine.generator.generateAlternatives(
398
+ { ...task, error },
399
+ replanContext
400
+ );
401
+
402
+ if (alternatives.length > 0) {
403
+ const best = alternatives[0];
404
+
405
+ // Only use alternatives with sufficient confidence
406
+ if (best.confidence >= (replanningEngine.config.alternatives?.minConfidence || 0.5)) {
407
+ return {
408
+ ...best.task,
409
+ id: best.task.id || `${task.id || task.skill}-replan`,
410
+ priority: task.priority,
411
+ originalTaskId: task.id || task.skill,
412
+ replanSource: 'llm',
413
+ replanConfidence: best.confidence
414
+ };
415
+ }
416
+ }
417
+ } catch (replanError) {
418
+ engine.emit('swarmReplanFailed', {
419
+ context,
420
+ taskId: task.id || task.skill,
421
+ error: replanError
422
+ });
423
+ }
424
+
425
+ return null;
426
+ }
427
+
321
428
  /**
322
429
  * Check if swarm should exit early based on strategy
323
430
  * @private