musubi-sdd 3.0.1 → 3.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/README.md +17 -3
  2. package/bin/musubi-change.js +623 -10
  3. package/bin/musubi-orchestrate.js +456 -0
  4. package/bin/musubi-trace.js +393 -0
  5. package/package.json +3 -2
  6. package/src/analyzers/impact-analyzer.js +682 -0
  7. package/src/integrations/cicd.js +782 -0
  8. package/src/integrations/documentation.js +740 -0
  9. package/src/integrations/examples.js +789 -0
  10. package/src/integrations/index.js +23 -0
  11. package/src/integrations/platforms.js +929 -0
  12. package/src/llm-providers/anthropic-provider.js +175 -0
  13. package/src/llm-providers/base-provider.js +221 -0
  14. package/src/llm-providers/copilot-provider.js +262 -0
  15. package/src/llm-providers/index.js +214 -0
  16. package/src/llm-providers/openai-provider.js +205 -0
  17. package/src/managers/delta-spec.js +484 -0
  18. package/src/monitoring/incident-manager.js +890 -0
  19. package/src/monitoring/index.js +633 -0
  20. package/src/monitoring/observability.js +938 -0
  21. package/src/monitoring/release-manager.js +622 -0
  22. package/src/orchestration/index.js +193 -0
  23. package/src/orchestration/orchestration-engine.js +409 -0
  24. package/src/orchestration/pattern-registry.js +319 -0
  25. package/src/orchestration/patterns/auto.js +386 -0
  26. package/src/orchestration/patterns/group-chat.js +395 -0
  27. package/src/orchestration/patterns/human-in-loop.js +506 -0
  28. package/src/orchestration/patterns/nested.js +322 -0
  29. package/src/orchestration/patterns/sequential.js +278 -0
  30. package/src/orchestration/patterns/swarm.js +502 -0
  31. package/src/orchestration/replanning/alternative-generator.js +508 -0
  32. package/src/orchestration/replanning/config.js +378 -0
  33. package/src/orchestration/replanning/index.js +40 -0
  34. package/src/orchestration/replanning/plan-evaluator.js +455 -0
  35. package/src/orchestration/replanning/plan-monitor.js +379 -0
  36. package/src/orchestration/replanning/replan-history.js +402 -0
  37. package/src/orchestration/replanning/replanning-engine.js +706 -0
  38. package/src/orchestration/workflow-orchestrator.js +738 -0
  39. package/src/reporters/coverage-report.js +452 -0
  40. package/src/reporters/traceability-matrix-report.js +684 -0
  41. package/src/steering/advanced-validation.js +812 -0
  42. package/src/steering/auto-updater.js +670 -0
  43. package/src/steering/index.js +119 -0
  44. package/src/steering/quality-metrics.js +650 -0
  45. package/src/steering/template-constraints.js +789 -0
  46. package/src/templates/agents/claude-code/skills/agent-assistant/SKILL.md +22 -0
  47. package/src/templates/agents/claude-code/skills/issue-resolver/SKILL.md +21 -0
  48. package/src/templates/agents/claude-code/skills/orchestrator/SKILL.md +90 -28
  49. package/src/templates/agents/claude-code/skills/project-manager/SKILL.md +32 -0
  50. package/src/templates/agents/claude-code/skills/site-reliability-engineer/SKILL.md +27 -0
  51. package/src/templates/agents/claude-code/skills/steering/SKILL.md +30 -0
  52. package/src/templates/agents/claude-code/skills/test-engineer/SKILL.md +21 -0
  53. package/src/templates/agents/claude-code/skills/ui-ux-designer/SKILL.md +27 -0
  54. package/src/templates/agents/codex/AGENTS.md +36 -1
  55. package/src/templates/agents/cursor/AGENTS.md +36 -1
  56. package/src/templates/agents/gemini-cli/GEMINI.md +36 -1
  57. package/src/templates/agents/github-copilot/AGENTS.md +65 -1
  58. package/src/templates/agents/qwen-code/QWEN.md +36 -1
  59. package/src/templates/agents/windsurf/AGENTS.md +36 -1
  60. package/src/templates/shared/delta-spec-template.md +246 -0
  61. package/src/validators/delta-format.js +474 -0
  62. package/src/validators/traceability-validator.js +561 -0
@@ -0,0 +1,214 @@
1
+ /**
2
+ * @fileoverview LLM Provider Factory and Exports for MUSUBI Replanning Engine
3
+ * @module llm-providers
4
+ * @version 1.0.0
5
+ */
6
+
7
+ 'use strict';
8
+
9
+ const { LLMProvider } = require('./base-provider');
10
+ const { CopilotLMProvider } = require('./copilot-provider');
11
+ const { AnthropicLMProvider } = require('./anthropic-provider');
12
+ const { OpenAILMProvider } = require('./openai-provider');
13
+
14
+ /**
15
+ * Provider priority order for auto-selection
16
+ * GitHub Copilot is preferred when available (in VS Code)
17
+ */
18
+ const PROVIDER_PRIORITY = ['github-copilot', 'anthropic', 'openai'];
19
+
20
+ /**
21
+ * Create an LLM provider instance
22
+ * @param {string} [provider='auto'] - Provider name or 'auto' for automatic selection
23
+ * @param {Object} [config={}] - Provider configuration
24
+ * @returns {LLMProvider} Configured provider instance
25
+ * @throws {Error} If no provider is available
26
+ */
27
+ function createLLMProvider(provider = 'auto', config = {}) {
28
+ if (provider === 'auto') {
29
+ return createAutoProvider(config);
30
+ }
31
+
32
+ return createNamedProvider(provider, config);
33
+ }
34
+
35
+ /**
36
+ * Automatically select the best available provider
37
+ * @param {Object} config - Provider configuration
38
+ * @returns {LLMProvider} Best available provider
39
+ * @private
40
+ */
41
+ function createAutoProvider(config) {
42
+ // Try providers in priority order
43
+ for (const providerName of PROVIDER_PRIORITY) {
44
+ try {
45
+ const provider = createNamedProvider(providerName, config);
46
+
47
+ // Check if provider can be used
48
+ if (providerName === 'github-copilot') {
49
+ // Copilot is available in VS Code context or with token
50
+ try {
51
+ require('vscode');
52
+ return provider;
53
+ } catch (e) {
54
+ if (process.env.GITHUB_COPILOT_TOKEN) {
55
+ return provider;
56
+ }
57
+ }
58
+ } else if (providerName === 'anthropic' && process.env.ANTHROPIC_API_KEY) {
59
+ return provider;
60
+ } else if (providerName === 'openai' && process.env.OPENAI_API_KEY) {
61
+ return provider;
62
+ }
63
+ } catch (e) {
64
+ // Continue to next provider
65
+ }
66
+ }
67
+
68
+ throw new Error(
69
+ 'No LLM provider available. Please configure one of:\n' +
70
+ ' - Run inside VS Code with GitHub Copilot extension\n' +
71
+ ' - Set GITHUB_COPILOT_TOKEN environment variable\n' +
72
+ ' - Set ANTHROPIC_API_KEY environment variable\n' +
73
+ ' - Set OPENAI_API_KEY environment variable'
74
+ );
75
+ }
76
+
77
+ /**
78
+ * Create a specific named provider
79
+ * @param {string} name - Provider name
80
+ * @param {Object} config - Provider configuration
81
+ * @returns {LLMProvider} Provider instance
82
+ * @private
83
+ */
84
+ function createNamedProvider(name, config) {
85
+ switch (name.toLowerCase()) {
86
+ case 'github-copilot':
87
+ case 'copilot':
88
+ return new CopilotLMProvider(config);
89
+
90
+ case 'anthropic':
91
+ case 'claude':
92
+ return new AnthropicLMProvider(config);
93
+
94
+ case 'openai':
95
+ case 'gpt':
96
+ return new OpenAILMProvider(config);
97
+
98
+ default:
99
+ throw new Error(`Unknown LLM provider: ${name}`);
100
+ }
101
+ }
102
+
103
+ /**
104
+ * Get list of available providers based on current environment
105
+ * @returns {Promise<Array<{name: string, available: boolean, info: Object}>>}
106
+ */
107
+ async function getAvailableProviders() {
108
+ const results = [];
109
+
110
+ const providers = [
111
+ { name: 'github-copilot', class: CopilotLMProvider },
112
+ { name: 'anthropic', class: AnthropicLMProvider },
113
+ { name: 'openai', class: OpenAILMProvider }
114
+ ];
115
+
116
+ for (const { name, class: ProviderClass } of providers) {
117
+ try {
118
+ const provider = new ProviderClass();
119
+ const available = await provider.isAvailable();
120
+ results.push({
121
+ name,
122
+ available,
123
+ info: provider.getInfo()
124
+ });
125
+ } catch (e) {
126
+ results.push({
127
+ name,
128
+ available: false,
129
+ error: e.message
130
+ });
131
+ }
132
+ }
133
+
134
+ return results;
135
+ }
136
+
137
+ /**
138
+ * Mock LLM Provider for testing
139
+ * Returns predefined responses
140
+ */
141
+ class MockLLMProvider extends LLMProvider {
142
+ constructor(config = {}) {
143
+ super(config);
144
+ this.name = 'mock';
145
+ this.responses = config.responses || [];
146
+ this.responseIndex = 0;
147
+ }
148
+
149
+ async initialize() {
150
+ this.isInitialized = true;
151
+ }
152
+
153
+ async complete(prompt, options = {}) {
154
+ const response = this.responses[this.responseIndex] || {
155
+ content: JSON.stringify({
156
+ analysis: 'Mock analysis',
157
+ goal: 'Mock goal',
158
+ alternatives: [
159
+ {
160
+ id: 'mock-alt-1',
161
+ description: 'Mock alternative',
162
+ task: { name: 'mock-task', skill: 'mock-skill', parameters: {} },
163
+ confidence: 0.85,
164
+ reasoning: 'Mock reasoning',
165
+ risks: []
166
+ }
167
+ ]
168
+ })
169
+ };
170
+
171
+ this.responseIndex = (this.responseIndex + 1) % Math.max(1, this.responses.length);
172
+
173
+ return {
174
+ content: typeof response.content === 'string' ? response.content : JSON.stringify(response.content),
175
+ model: 'mock-model',
176
+ usage: { promptTokens: 100, completionTokens: 50, totalTokens: 150 },
177
+ finishReason: 'stop'
178
+ };
179
+ }
180
+
181
+ async embed(text) {
182
+ // Return a fixed-dimension embedding
183
+ return new Array(1536).fill(0).map(() => Math.random());
184
+ }
185
+
186
+ async isAvailable() {
187
+ return true;
188
+ }
189
+
190
+ /**
191
+ * Set mock responses
192
+ * @param {Array<Object>} responses - Array of mock responses
193
+ */
194
+ setResponses(responses) {
195
+ this.responses = responses;
196
+ this.responseIndex = 0;
197
+ }
198
+ }
199
+
200
+ module.exports = {
201
+ // Factory function
202
+ createLLMProvider,
203
+ getAvailableProviders,
204
+
205
+ // Provider classes
206
+ LLMProvider,
207
+ CopilotLMProvider,
208
+ AnthropicLMProvider,
209
+ OpenAILMProvider,
210
+ MockLLMProvider,
211
+
212
+ // Constants
213
+ PROVIDER_PRIORITY
214
+ };
@@ -0,0 +1,205 @@
1
+ /**
2
+ * @fileoverview OpenAI GPT API Provider for MUSUBI Replanning Engine
3
+ * @module llm-providers/openai-provider
4
+ * @version 1.0.0
5
+ */
6
+
7
+ 'use strict';
8
+
9
+ const { LLMProvider } = require('./base-provider');
10
+
11
+ /**
12
+ * OpenAI GPT API Provider
13
+ * Fallback provider with embedding support
14
+ */
15
+ class OpenAILMProvider extends LLMProvider {
16
+ /**
17
+ * Create an OpenAI provider
18
+ * @param {Object} config - Provider configuration
19
+ * @param {string} [config.apiKey] - OpenAI API key
20
+ * @param {string} [config.model='gpt-4o'] - Model to use for completion
21
+ * @param {string} [config.embeddingModel='text-embedding-3-small'] - Model for embeddings
22
+ */
23
+ constructor(config = {}) {
24
+ super(config);
25
+ this.name = 'openai';
26
+ this.apiKey = config.apiKey || process.env.OPENAI_API_KEY;
27
+ this.config.model = config.model || 'gpt-4o';
28
+ this.config.embeddingModel = config.embeddingModel || 'text-embedding-3-small';
29
+ this.endpoint = config.endpoint || 'https://api.openai.com/v1';
30
+ this.rateLimiter = this.createRateLimiter(60); // 60 RPM default
31
+ }
32
+
33
+ /**
34
+ * Initialize the OpenAI provider
35
+ * @returns {Promise<void>}
36
+ */
37
+ async initialize() {
38
+ if (!this.apiKey) {
39
+ throw new Error('OpenAI API key not found. Set OPENAI_API_KEY environment variable.');
40
+ }
41
+ this.isInitialized = true;
42
+ }
43
+
44
+ /**
45
+ * Complete a prompt using OpenAI GPT API
46
+ * @param {string} prompt - The prompt to complete
47
+ * @param {Object} [options={}] - Completion options
48
+ * @returns {Promise<LLMCompletionResult>} Completion result
49
+ */
50
+ async complete(prompt, options = {}) {
51
+ if (!this.isInitialized) {
52
+ await this.initialize();
53
+ }
54
+
55
+ const systemPrompt = options.systemPrompt || this.getDefaultSystemPrompt();
56
+ const messages = this.formatMessages(systemPrompt, prompt);
57
+
58
+ return this.rateLimiter(async () => {
59
+ return this.retryWithBackoff(async () => {
60
+ const response = await fetch(`${this.endpoint}/chat/completions`, {
61
+ method: 'POST',
62
+ headers: {
63
+ 'Authorization': `Bearer ${this.apiKey}`,
64
+ 'Content-Type': 'application/json'
65
+ },
66
+ body: JSON.stringify({
67
+ model: this.config.model,
68
+ messages,
69
+ max_tokens: options.maxTokens || this.config.maxTokens,
70
+ temperature: options.temperature || this.config.temperature,
71
+ response_format: options.jsonMode ? { type: 'json_object' } : undefined
72
+ }),
73
+ signal: AbortSignal.timeout(this.config.timeout)
74
+ });
75
+
76
+ if (!response.ok) {
77
+ const error = await response.text();
78
+ throw new Error(`OpenAI API error: ${response.status} - ${error}`);
79
+ }
80
+
81
+ const data = await response.json();
82
+
83
+ return {
84
+ content: data.choices[0].message.content,
85
+ model: data.model,
86
+ usage: {
87
+ promptTokens: data.usage?.prompt_tokens || 0,
88
+ completionTokens: data.usage?.completion_tokens || 0,
89
+ totalTokens: data.usage?.total_tokens || 0
90
+ },
91
+ finishReason: data.choices[0].finish_reason
92
+ };
93
+ });
94
+ });
95
+ }
96
+
97
+ /**
98
+ * Generate embeddings using OpenAI Embedding API
99
+ * @param {string} text - Text to embed
100
+ * @returns {Promise<number[]>} Embedding vector
101
+ */
102
+ async embed(text) {
103
+ if (!this.isInitialized) {
104
+ await this.initialize();
105
+ }
106
+
107
+ return this.rateLimiter(async () => {
108
+ return this.retryWithBackoff(async () => {
109
+ const response = await fetch(`${this.endpoint}/embeddings`, {
110
+ method: 'POST',
111
+ headers: {
112
+ 'Authorization': `Bearer ${this.apiKey}`,
113
+ 'Content-Type': 'application/json'
114
+ },
115
+ body: JSON.stringify({
116
+ model: this.config.embeddingModel,
117
+ input: text
118
+ }),
119
+ signal: AbortSignal.timeout(this.config.timeout)
120
+ });
121
+
122
+ if (!response.ok) {
123
+ const error = await response.text();
124
+ throw new Error(`OpenAI Embedding API error: ${response.status} - ${error}`);
125
+ }
126
+
127
+ const data = await response.json();
128
+ return data.data[0].embedding;
129
+ });
130
+ });
131
+ }
132
+
133
+ /**
134
+ * Check if the provider is available
135
+ * @returns {Promise<boolean>}
136
+ */
137
+ async isAvailable() {
138
+ if (!this.apiKey) {
139
+ return false;
140
+ }
141
+
142
+ try {
143
+ // Simple validation - check if API key format is valid
144
+ // OpenAI API keys start with 'sk-'
145
+ return this.apiKey.startsWith('sk-') && this.apiKey.length > 20;
146
+ } catch (e) {
147
+ return false;
148
+ }
149
+ }
150
+
151
+ /**
152
+ * Get provider information
153
+ * @returns {ProviderInfo}
154
+ */
155
+ getInfo() {
156
+ return {
157
+ name: this.name,
158
+ model: this.config.model,
159
+ embeddingModel: this.config.embeddingModel,
160
+ isInitialized: this.isInitialized,
161
+ capabilities: {
162
+ completion: true,
163
+ embedding: true,
164
+ streaming: true,
165
+ functionCalling: true,
166
+ jsonMode: true
167
+ }
168
+ };
169
+ }
170
+
171
+ /**
172
+ * Get default system prompt for replanning
173
+ * @returns {string}
174
+ * @private
175
+ */
176
+ getDefaultSystemPrompt() {
177
+ return `You are an AI assistant helping with task replanning in a software development workflow.
178
+ Your role is to analyze failed tasks, understand the goal, and generate alternative approaches.
179
+
180
+ Guidelines:
181
+ 1. Be concise and specific in your recommendations
182
+ 2. Prioritize practical, actionable alternatives
183
+ 3. Consider resource constraints and dependencies
184
+ 4. Provide confidence scores for each alternative (0.0 to 1.0)
185
+ 5. Explain the reasoning behind each suggestion
186
+
187
+ When generating alternatives, output valid JSON with this structure:
188
+ {
189
+ "analysis": "Brief analysis of the failure",
190
+ "goal": "Extracted goal from the task",
191
+ "alternatives": [
192
+ {
193
+ "id": "alt-1",
194
+ "description": "Alternative approach",
195
+ "task": { "name": "task-name", "skill": "skill-name", "parameters": {} },
196
+ "confidence": 0.8,
197
+ "reasoning": "Why this might work",
198
+ "risks": ["potential risk"]
199
+ }
200
+ ]
201
+ }`;
202
+ }
203
+ }
204
+
205
+ module.exports = { OpenAILMProvider };