musubi-sdd 3.5.1 → 3.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -71,7 +71,15 @@ musubi init --windsurf # Windsurf IDE
71
71
 
72
72
  ---
73
73
 
74
- ## 📊 What's New in v2.1.0
74
+ ## 📊 What's New in v3.6.0
75
+
76
+ - 🧠 **Dynamic Replanning Engine** - AI agents can now dynamically adjust plans when tasks fail
77
+ - 🔌 **LLM Provider Abstraction** - Multi-provider support (Copilot, Anthropic, OpenAI)
78
+ - 📡 **Real-time Plan Monitoring** - Detect failures, timeouts, and quality degradation
79
+ - 🔄 **Alternative Path Generation** - LLM-powered alternative strategies with confidence scoring
80
+ - 📝 **Replan History & Audit** - Full audit trail with JSONL persistence and export
81
+
82
+ ### Previous (v3.5.1)
75
83
 
76
84
  - 🔄 **Workflow Engine** - New `musubi-workflow` CLI for stage management and metrics
77
85
  - 📊 **Metrics Collection** - Track time per stage, iteration counts, feedback loops
@@ -80,7 +88,12 @@ musubi init --windsurf # Windsurf IDE
80
88
  - 🔄 **Retrospective Stage** - Stage 9 for continuous improvement
81
89
  - ✅ **Stage Validation Guide** - Checklists for stage transition validation
82
90
 
83
- ### Previous (v2.0.0)
91
+ ### Previous (v3.5.1)
92
+
93
+ - 🔧 **CLI Integration** - Added CLI command references to all 8 Claude Code skills
94
+ - 📚 **Platform Documentation** - CLI Commands section added to all 6 non-Claude platforms
95
+
96
+ ### v2.1.0
84
97
 
85
98
  - 🔌 **CodeGraphMCPServer Integration** - 14 MCP tools for enhanced code analysis
86
99
  - 🧠 **GraphRAG-Powered Search** - Semantic code understanding with Louvain community detection
@@ -89,7 +102,8 @@ musubi init --windsurf # Windsurf IDE
89
102
  ## Features
90
103
 
91
104
  - 🤖 **Multi-Agent Support** - Works with 7 AI coding agents (Claude Code, GitHub Copilot, Cursor, Gemini CLI, Codex CLI, Qwen Code, Windsurf)
92
- - 🔌 **MCP Server Integration** - CodeGraphMCPServer for advanced code analysis (NEW in v2.0.0)
105
+ - 🧠 **Dynamic Replanning** - AI agents dynamically adjust plans on failure with LLM-powered alternatives (NEW in v3.6.0)
106
+ - 🔌 **MCP Server Integration** - CodeGraphMCPServer for advanced code analysis (v2.0.0)
93
107
  - 📄 **Flexible Command Formats** - Supports Markdown, TOML, and AGENTS.md formats
94
108
  - 🎯 **25 Specialized Agents (All Platforms)** - Orchestrator, Steering, Requirements, Architecture, Development, Quality, Security, Infrastructure
95
109
  - Claude Code: Skills API (25 skills)
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "musubi-sdd",
3
- "version": "3.5.1",
3
+ "version": "3.6.0",
4
4
  "description": "Ultimate Specification Driven Development Tool with 27 Agents for 7 AI Coding Platforms + MCP Integration (Claude Code, GitHub Copilot, Cursor, Gemini CLI, Windsurf, Codex, Qwen Code)",
5
5
  "main": "src/index.js",
6
6
  "bin": {
@@ -0,0 +1,175 @@
1
+ /**
2
+ * @fileoverview Anthropic Claude API Provider for MUSUBI Replanning Engine
3
+ * @module llm-providers/anthropic-provider
4
+ * @version 1.0.0
5
+ */
6
+
7
+ 'use strict';
8
+
9
+ const { LLMProvider } = require('./base-provider');
10
+
11
+ /**
12
+ * Anthropic Claude API Provider
13
+ * Used when running outside VS Code or when Claude is preferred
14
+ */
15
+ class AnthropicLMProvider extends LLMProvider {
16
+ /**
17
+ * Create an Anthropic provider
18
+ * @param {Object} config - Provider configuration
19
+ * @param {string} [config.apiKey] - Anthropic API key
20
+ * @param {string} [config.model='claude-sonnet-4-20250514'] - Model to use
21
+ */
22
+ constructor(config = {}) {
23
+ super(config);
24
+ this.name = 'anthropic';
25
+ this.apiKey = config.apiKey || process.env.ANTHROPIC_API_KEY;
26
+ this.config.model = config.model || 'claude-sonnet-4-20250514';
27
+ this.endpoint = config.endpoint || 'https://api.anthropic.com/v1/messages';
28
+ this.rateLimiter = this.createRateLimiter(60); // 60 RPM
29
+ }
30
+
31
+ /**
32
+ * Initialize the Anthropic provider
33
+ * @returns {Promise<void>}
34
+ */
35
+ async initialize() {
36
+ if (!this.apiKey) {
37
+ throw new Error('Anthropic API key not found. Set ANTHROPIC_API_KEY environment variable.');
38
+ }
39
+ this.isInitialized = true;
40
+ }
41
+
42
+ /**
43
+ * Complete a prompt using Anthropic Claude API
44
+ * @param {string} prompt - The prompt to complete
45
+ * @param {Object} [options={}] - Completion options
46
+ * @returns {Promise<LLMCompletionResult>} Completion result
47
+ */
48
+ async complete(prompt, options = {}) {
49
+ if (!this.isInitialized) {
50
+ await this.initialize();
51
+ }
52
+
53
+ const systemPrompt = options.systemPrompt || this.getDefaultSystemPrompt();
54
+
55
+ return this.rateLimiter(async () => {
56
+ return this.retryWithBackoff(async () => {
57
+ const response = await fetch(this.endpoint, {
58
+ method: 'POST',
59
+ headers: {
60
+ 'x-api-key': this.apiKey,
61
+ 'anthropic-version': '2023-06-01',
62
+ 'content-type': 'application/json'
63
+ },
64
+ body: JSON.stringify({
65
+ model: this.config.model,
66
+ max_tokens: options.maxTokens || this.config.maxTokens,
67
+ system: systemPrompt,
68
+ messages: [
69
+ { role: 'user', content: prompt }
70
+ ]
71
+ }),
72
+ signal: AbortSignal.timeout(this.config.timeout)
73
+ });
74
+
75
+ if (!response.ok) {
76
+ const error = await response.text();
77
+ throw new Error(`Anthropic API error: ${response.status} - ${error}`);
78
+ }
79
+
80
+ const data = await response.json();
81
+
82
+ return {
83
+ content: data.content[0].text,
84
+ model: data.model,
85
+ usage: {
86
+ promptTokens: data.usage?.input_tokens || 0,
87
+ completionTokens: data.usage?.output_tokens || 0,
88
+ totalTokens: (data.usage?.input_tokens || 0) + (data.usage?.output_tokens || 0)
89
+ },
90
+ finishReason: data.stop_reason
91
+ };
92
+ });
93
+ });
94
+ }
95
+
96
+ /**
97
+ * Generate embeddings (not natively supported by Anthropic)
98
+ * @param {string} text - Text to embed
99
+ * @returns {Promise<number[]>}
100
+ */
101
+ async embed(text) {
102
+ throw new Error('Embedding not supported by Anthropic Claude API. Use OpenAI or a dedicated embedding service.');
103
+ }
104
+
105
+ /**
106
+ * Check if the provider is available
107
+ * @returns {Promise<boolean>}
108
+ */
109
+ async isAvailable() {
110
+ if (!this.apiKey) {
111
+ return false;
112
+ }
113
+
114
+ try {
115
+ // Simple validation - check if API key format is valid
116
+ // Anthropic API keys start with 'sk-ant-'
117
+ return this.apiKey.startsWith('sk-ant-') || this.apiKey.length > 20;
118
+ } catch (e) {
119
+ return false;
120
+ }
121
+ }
122
+
123
+ /**
124
+ * Get provider information
125
+ * @returns {ProviderInfo}
126
+ */
127
+ getInfo() {
128
+ return {
129
+ name: this.name,
130
+ model: this.config.model,
131
+ isInitialized: this.isInitialized,
132
+ capabilities: {
133
+ completion: true,
134
+ embedding: false,
135
+ streaming: true,
136
+ functionCalling: true
137
+ }
138
+ };
139
+ }
140
+
141
+ /**
142
+ * Get default system prompt for replanning
143
+ * @returns {string}
144
+ * @private
145
+ */
146
+ getDefaultSystemPrompt() {
147
+ return `You are an AI assistant helping with task replanning in a software development workflow.
148
+ Your role is to analyze failed tasks, understand the goal, and generate alternative approaches.
149
+
150
+ Guidelines:
151
+ 1. Be concise and specific in your recommendations
152
+ 2. Prioritize practical, actionable alternatives
153
+ 3. Consider resource constraints and dependencies
154
+ 4. Provide confidence scores for each alternative (0.0 to 1.0)
155
+ 5. Explain the reasoning behind each suggestion
156
+
157
+ When generating alternatives, output valid JSON with this structure:
158
+ {
159
+ "analysis": "Brief analysis of the failure",
160
+ "goal": "Extracted goal from the task",
161
+ "alternatives": [
162
+ {
163
+ "id": "alt-1",
164
+ "description": "Alternative approach",
165
+ "task": { "name": "task-name", "skill": "skill-name", "parameters": {} },
166
+ "confidence": 0.8,
167
+ "reasoning": "Why this might work",
168
+ "risks": ["potential risk"]
169
+ }
170
+ ]
171
+ }`;
172
+ }
173
+ }
174
+
175
+ module.exports = { AnthropicLMProvider };
@@ -0,0 +1,221 @@
1
+ /**
2
+ * @fileoverview Base LLM Provider class for MUSUBI Replanning Engine
3
+ * @module llm-providers/base-provider
4
+ * @version 1.0.0
5
+ */
6
+
7
+ 'use strict';
8
+
9
+ /**
10
+ * Abstract base class for LLM providers
11
+ * Provides a unified interface for different LLM APIs
12
+ */
13
+ class LLMProvider {
14
+ /**
15
+ * Create an LLM provider instance
16
+ * @param {Object} config - Provider configuration
17
+ * @param {string} [config.model] - Model identifier
18
+ * @param {number} [config.maxTokens=1024] - Maximum tokens for completion
19
+ * @param {number} [config.temperature=0.7] - Sampling temperature
20
+ * @param {number} [config.timeout=30000] - Request timeout in milliseconds
21
+ */
22
+ constructor(config = {}) {
23
+ this.config = {
24
+ maxTokens: 1024,
25
+ temperature: 0.7,
26
+ timeout: 30000,
27
+ ...config
28
+ };
29
+ this.name = 'base';
30
+ this.isInitialized = false;
31
+ }
32
+
33
+ /**
34
+ * Initialize the provider
35
+ * @returns {Promise<void>}
36
+ */
37
+ async initialize() {
38
+ this.isInitialized = true;
39
+ }
40
+
41
+ /**
42
+ * Complete a prompt with the LLM
43
+ * @param {string} prompt - The prompt to complete
44
+ * @param {Object} [options={}] - Completion options
45
+ * @param {number} [options.maxTokens] - Override max tokens
46
+ * @param {number} [options.temperature] - Override temperature
47
+ * @param {string} [options.systemPrompt] - System prompt
48
+ * @returns {Promise<LLMCompletionResult>} Completion result
49
+ * @abstract
50
+ */
51
+ async complete(prompt, options = {}) {
52
+ throw new Error('LLMProvider.complete() must be implemented by subclass');
53
+ }
54
+
55
+ /**
56
+ * Complete a structured prompt with JSON output
57
+ * @param {string} prompt - The prompt
58
+ * @param {Object} schema - JSON schema for expected output
59
+ * @param {Object} [options={}] - Completion options
60
+ * @returns {Promise<Object>} Parsed JSON result
61
+ */
62
+ async completeJSON(prompt, schema, options = {}) {
63
+ const jsonPrompt = `${prompt}
64
+
65
+ Respond with valid JSON matching this schema:
66
+ ${JSON.stringify(schema, null, 2)}
67
+
68
+ Output only the JSON, no explanation.`;
69
+
70
+ const result = await this.complete(jsonPrompt, {
71
+ ...options,
72
+ temperature: 0.3 // Lower temperature for structured output
73
+ });
74
+
75
+ try {
76
+ // Extract JSON from the response
77
+ const jsonMatch = result.content.match(/\{[\s\S]*\}/);
78
+ if (!jsonMatch) {
79
+ throw new Error('No JSON found in response');
80
+ }
81
+ return JSON.parse(jsonMatch[0]);
82
+ } catch (error) {
83
+ throw new Error(`Failed to parse JSON response: ${error.message}`);
84
+ }
85
+ }
86
+
87
+ /**
88
+ * Generate embeddings for text
89
+ * @param {string} text - Text to embed
90
+ * @returns {Promise<number[]>} Embedding vector
91
+ * @abstract
92
+ */
93
+ async embed(text) {
94
+ throw new Error('LLMProvider.embed() must be implemented by subclass');
95
+ }
96
+
97
+ /**
98
+ * Check if the provider is available and properly configured
99
+ * @returns {Promise<boolean>} Availability status
100
+ * @abstract
101
+ */
102
+ async isAvailable() {
103
+ throw new Error('LLMProvider.isAvailable() must be implemented by subclass');
104
+ }
105
+
106
+ /**
107
+ * Get provider information
108
+ * @returns {ProviderInfo} Provider information
109
+ */
110
+ getInfo() {
111
+ return {
112
+ name: this.name,
113
+ model: this.config.model,
114
+ isInitialized: this.isInitialized,
115
+ capabilities: {
116
+ completion: true,
117
+ embedding: false,
118
+ streaming: false,
119
+ functionCalling: false
120
+ }
121
+ };
122
+ }
123
+
124
+ /**
125
+ * Format messages for chat completion
126
+ * @param {string} systemPrompt - System prompt
127
+ * @param {string} userPrompt - User prompt
128
+ * @returns {Array<Message>} Formatted messages
129
+ * @protected
130
+ */
131
+ formatMessages(systemPrompt, userPrompt) {
132
+ const messages = [];
133
+
134
+ if (systemPrompt) {
135
+ messages.push({ role: 'system', content: systemPrompt });
136
+ }
137
+
138
+ messages.push({ role: 'user', content: userPrompt });
139
+
140
+ return messages;
141
+ }
142
+
143
+ /**
144
+ * Create a rate limiter for API calls
145
+ * @param {number} requestsPerMinute - Rate limit
146
+ * @returns {Function} Rate limited function wrapper
147
+ * @protected
148
+ */
149
+ createRateLimiter(requestsPerMinute) {
150
+ const minInterval = 60000 / requestsPerMinute;
151
+ let lastCall = 0;
152
+
153
+ return async (fn) => {
154
+ const now = Date.now();
155
+ const timeSinceLastCall = now - lastCall;
156
+
157
+ if (timeSinceLastCall < minInterval) {
158
+ await new Promise(resolve =>
159
+ setTimeout(resolve, minInterval - timeSinceLastCall)
160
+ );
161
+ }
162
+
163
+ lastCall = Date.now();
164
+ return fn();
165
+ };
166
+ }
167
+
168
+ /**
169
+ * Retry a function with exponential backoff
170
+ * @param {Function} fn - Function to retry
171
+ * @param {number} [maxRetries=3] - Maximum retry attempts
172
+ * @param {number} [baseDelay=1000] - Base delay in milliseconds
173
+ * @returns {Promise<*>} Function result
174
+ * @protected
175
+ */
176
+ async retryWithBackoff(fn, maxRetries = 3, baseDelay = 1000) {
177
+ let lastError;
178
+
179
+ for (let attempt = 0; attempt <= maxRetries; attempt++) {
180
+ try {
181
+ return await fn();
182
+ } catch (error) {
183
+ lastError = error;
184
+
185
+ if (attempt < maxRetries) {
186
+ const delay = baseDelay * Math.pow(2, attempt);
187
+ await new Promise(resolve => setTimeout(resolve, delay));
188
+ }
189
+ }
190
+ }
191
+
192
+ throw lastError;
193
+ }
194
+ }
195
+
196
+ /**
197
+ * @typedef {Object} LLMCompletionResult
198
+ * @property {string} content - Completion content
199
+ * @property {string} model - Model used
200
+ * @property {Object} usage - Token usage
201
+ * @property {number} usage.promptTokens - Prompt tokens
202
+ * @property {number} usage.completionTokens - Completion tokens
203
+ * @property {number} usage.totalTokens - Total tokens
204
+ * @property {string} [finishReason] - Reason for completion finish
205
+ */
206
+
207
+ /**
208
+ * @typedef {Object} ProviderInfo
209
+ * @property {string} name - Provider name
210
+ * @property {string} model - Model identifier
211
+ * @property {boolean} isInitialized - Initialization status
212
+ * @property {Object} capabilities - Provider capabilities
213
+ */
214
+
215
+ /**
216
+ * @typedef {Object} Message
217
+ * @property {string} role - Message role (system, user, assistant)
218
+ * @property {string} content - Message content
219
+ */
220
+
221
+ module.exports = { LLMProvider };
@@ -0,0 +1,262 @@
1
+ /**
2
+ * @fileoverview GitHub Copilot LM API Provider for MUSUBI Replanning Engine
3
+ * @module llm-providers/copilot-provider
4
+ * @version 1.0.0
5
+ */
6
+
7
+ 'use strict';
8
+
9
+ const { LLMProvider } = require('./base-provider');
10
+
11
+ /**
12
+ * GitHub Copilot Language Model API Provider
13
+ * Primary provider for MUSUBI when running in VS Code with GitHub Copilot
14
+ */
15
+ class CopilotLMProvider extends LLMProvider {
16
+ /**
17
+ * Create a GitHub Copilot LM provider
18
+ * @param {Object} config - Provider configuration
19
+ * @param {Object} [config.vscode] - VS Code API reference
20
+ * @param {string} [config.model='claude-sonnet-4'] - Model to use
21
+ */
22
+ constructor(config = {}) {
23
+ super(config);
24
+ this.name = 'github-copilot';
25
+ this.vscode = config.vscode || null;
26
+ this.config.model = config.model || 'claude-sonnet-4';
27
+ this.languageModelAPI = null;
28
+ }
29
+
30
+ /**
31
+ * Initialize the Copilot provider
32
+ * @returns {Promise<void>}
33
+ */
34
+ async initialize() {
35
+ if (this.vscode) {
36
+ // When running in VS Code extension context
37
+ this.languageModelAPI = this.vscode.lm;
38
+ } else {
39
+ // Try to get vscode module dynamically (for VS Code extension context)
40
+ try {
41
+ // This will only work when running inside VS Code
42
+ const vscode = require('vscode');
43
+ this.vscode = vscode;
44
+ this.languageModelAPI = vscode.lm;
45
+ } catch (e) {
46
+ // Not running in VS Code context - use fallback or mock
47
+ this.languageModelAPI = null;
48
+ }
49
+ }
50
+
51
+ this.isInitialized = true;
52
+ }
53
+
54
+ /**
55
+ * Complete a prompt using GitHub Copilot LM API
56
+ * @param {string} prompt - The prompt to complete
57
+ * @param {Object} [options={}] - Completion options
58
+ * @returns {Promise<LLMCompletionResult>} Completion result
59
+ */
60
+ async complete(prompt, options = {}) {
61
+ if (!this.isInitialized) {
62
+ await this.initialize();
63
+ }
64
+
65
+ const systemPrompt = options.systemPrompt || this.getDefaultSystemPrompt();
66
+ const messages = this.formatMessages(systemPrompt, prompt);
67
+
68
+ // Use VS Code Language Model API if available
69
+ if (this.languageModelAPI) {
70
+ return this.completeWithVSCodeAPI(messages, options);
71
+ }
72
+
73
+ // Fallback: Use REST API if token is available
74
+ if (process.env.GITHUB_COPILOT_TOKEN) {
75
+ return this.completeWithRestAPI(messages, options);
76
+ }
77
+
78
+ throw new Error('GitHub Copilot LM API not available. Run inside VS Code with Copilot extension or provide GITHUB_COPILOT_TOKEN.');
79
+ }
80
+
81
+ /**
82
+ * Complete using VS Code Language Model API
83
+ * @param {Array<Message>} messages - Chat messages
84
+ * @param {Object} options - Completion options
85
+ * @returns {Promise<LLMCompletionResult>}
86
+ * @private
87
+ */
88
+ async completeWithVSCodeAPI(messages, options) {
89
+ const vscode = this.vscode;
90
+
91
+ // Select the appropriate model
92
+ const models = await vscode.lm.selectChatModels({
93
+ vendor: 'copilot',
94
+ family: this.config.model
95
+ });
96
+
97
+ if (models.length === 0) {
98
+ throw new Error(`No Copilot model found matching: ${this.config.model}`);
99
+ }
100
+
101
+ const model = models[0];
102
+
103
+ // Convert messages to VS Code format
104
+ const chatMessages = messages.map(msg => {
105
+ if (msg.role === 'system') {
106
+ return vscode.LanguageModelChatMessage.User(msg.content);
107
+ } else if (msg.role === 'user') {
108
+ return vscode.LanguageModelChatMessage.User(msg.content);
109
+ } else {
110
+ return vscode.LanguageModelChatMessage.Assistant(msg.content);
111
+ }
112
+ });
113
+
114
+ // Create request options
115
+ const requestOptions = {
116
+ justification: 'MUSUBI Replanning Engine alternative path generation'
117
+ };
118
+
119
+ // Send request
120
+ const response = await model.sendRequest(
121
+ chatMessages,
122
+ requestOptions,
123
+ new vscode.CancellationTokenSource().token
124
+ );
125
+
126
+ // Collect response
127
+ let content = '';
128
+ for await (const fragment of response.text) {
129
+ content += fragment;
130
+ }
131
+
132
+ return {
133
+ content,
134
+ model: model.id,
135
+ usage: {
136
+ promptTokens: 0, // VS Code API doesn't expose token counts
137
+ completionTokens: 0,
138
+ totalTokens: 0
139
+ },
140
+ finishReason: 'stop'
141
+ };
142
+ }
143
+
144
+ /**
145
+ * Complete using REST API (fallback)
146
+ * @param {Array<Message>} messages - Chat messages
147
+ * @param {Object} options - Completion options
148
+ * @returns {Promise<LLMCompletionResult>}
149
+ * @private
150
+ */
151
+ async completeWithRestAPI(messages, options) {
152
+ const endpoint = process.env.GITHUB_COPILOT_ENDPOINT || 'https://api.githubcopilot.com/chat/completions';
153
+ const token = process.env.GITHUB_COPILOT_TOKEN;
154
+
155
+ const response = await fetch(endpoint, {
156
+ method: 'POST',
157
+ headers: {
158
+ 'Authorization': `Bearer ${token}`,
159
+ 'Content-Type': 'application/json',
160
+ 'Editor-Version': 'MUSUBI/1.0.0',
161
+ 'Editor-Plugin-Version': 'MUSUBI/1.0.0'
162
+ },
163
+ body: JSON.stringify({
164
+ model: this.config.model,
165
+ messages,
166
+ max_tokens: options.maxTokens || this.config.maxTokens,
167
+ temperature: options.temperature || this.config.temperature
168
+ }),
169
+ signal: AbortSignal.timeout(this.config.timeout)
170
+ });
171
+
172
+ if (!response.ok) {
173
+ const error = await response.text();
174
+ throw new Error(`GitHub Copilot API error: ${response.status} - ${error}`);
175
+ }
176
+
177
+ const data = await response.json();
178
+
179
+ return {
180
+ content: data.choices[0].message.content,
181
+ model: data.model,
182
+ usage: {
183
+ promptTokens: data.usage?.prompt_tokens || 0,
184
+ completionTokens: data.usage?.completion_tokens || 0,
185
+ totalTokens: data.usage?.total_tokens || 0
186
+ },
187
+ finishReason: data.choices[0].finish_reason
188
+ };
189
+ }
190
+
191
+ /**
192
+ * Generate embeddings (not supported by Copilot LM API)
193
+ * @param {string} text - Text to embed
194
+ * @returns {Promise<number[]>}
195
+ */
196
+ async embed(text) {
197
+ throw new Error('Embedding not supported by GitHub Copilot LM API');
198
+ }
199
+
200
+ /**
201
+ * Check if the provider is available
202
+ * @returns {Promise<boolean>}
203
+ */
204
+ async isAvailable() {
205
+ try {
206
+ if (!this.isInitialized) {
207
+ await this.initialize();
208
+ }
209
+
210
+ // Check VS Code LM API
211
+ if (this.languageModelAPI) {
212
+ const models = await this.vscode.lm.selectChatModels({
213
+ vendor: 'copilot'
214
+ });
215
+ return models.length > 0;
216
+ }
217
+
218
+ // Check REST API token
219
+ return !!process.env.GITHUB_COPILOT_TOKEN;
220
+ } catch (e) {
221
+ return false;
222
+ }
223
+ }
224
+
225
+ /**
226
+ * Get provider information
227
+ * @returns {ProviderInfo}
228
+ */
229
+ getInfo() {
230
+ return {
231
+ name: this.name,
232
+ model: this.config.model,
233
+ isInitialized: this.isInitialized,
234
+ capabilities: {
235
+ completion: true,
236
+ embedding: false,
237
+ streaming: true,
238
+ functionCalling: false
239
+ },
240
+ context: this.languageModelAPI ? 'vscode' : 'rest'
241
+ };
242
+ }
243
+
244
+ /**
245
+ * Get default system prompt for replanning
246
+ * @returns {string}
247
+ * @private
248
+ */
249
+ getDefaultSystemPrompt() {
250
+ return `You are an AI assistant helping with task replanning in a software development workflow.
251
+ Your role is to analyze failed tasks, understand the goal, and generate alternative approaches.
252
+
253
+ Guidelines:
254
+ 1. Be concise and specific in your recommendations
255
+ 2. Prioritize practical, actionable alternatives
256
+ 3. Consider resource constraints and dependencies
257
+ 4. Provide confidence scores for each alternative
258
+ 5. Explain the reasoning behind each suggestion`;
259
+ }
260
+ }
261
+
262
+ module.exports = { CopilotLMProvider };