@mcpilotx/intentorch 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +201 -0
- package/README.md +545 -0
- package/dist/ai/ai.d.ts +205 -0
- package/dist/ai/ai.js +1200 -0
- package/dist/ai/cloud-intent-engine.d.ts +270 -0
- package/dist/ai/cloud-intent-engine.js +956 -0
- package/dist/ai/command.d.ts +59 -0
- package/dist/ai/command.js +285 -0
- package/dist/ai/config.d.ts +66 -0
- package/dist/ai/config.js +211 -0
- package/dist/ai/enhanced-intent.d.ts +17 -0
- package/dist/ai/enhanced-intent.js +32 -0
- package/dist/ai/index.d.ts +29 -0
- package/dist/ai/index.js +44 -0
- package/dist/ai/intent.d.ts +16 -0
- package/dist/ai/intent.js +30 -0
- package/dist/core/ai-config.d.ts +25 -0
- package/dist/core/ai-config.js +326 -0
- package/dist/core/config-manager.d.ts +36 -0
- package/dist/core/config-manager.js +400 -0
- package/dist/core/config-validator.d.ts +9 -0
- package/dist/core/config-validator.js +184 -0
- package/dist/core/constants.d.ts +34 -0
- package/dist/core/constants.js +37 -0
- package/dist/core/error-ai.d.ts +23 -0
- package/dist/core/error-ai.js +217 -0
- package/dist/core/error-handler.d.ts +197 -0
- package/dist/core/error-handler.js +467 -0
- package/dist/core/index.d.ts +13 -0
- package/dist/core/index.js +17 -0
- package/dist/core/logger.d.ts +27 -0
- package/dist/core/logger.js +108 -0
- package/dist/core/performance-monitor.d.ts +74 -0
- package/dist/core/performance-monitor.js +260 -0
- package/dist/core/providers.d.ts +36 -0
- package/dist/core/providers.js +304 -0
- package/dist/core/retry-manager.d.ts +41 -0
- package/dist/core/retry-manager.js +204 -0
- package/dist/core/types.d.ts +155 -0
- package/dist/core/types.js +2 -0
- package/dist/daemon/index.d.ts +10 -0
- package/dist/daemon/index.js +15 -0
- package/dist/daemon/intent-engine.d.ts +22 -0
- package/dist/daemon/intent-engine.js +50 -0
- package/dist/daemon/orchestrator.d.ts +24 -0
- package/dist/daemon/orchestrator.js +100 -0
- package/dist/daemon/pm.d.ts +33 -0
- package/dist/daemon/pm.js +127 -0
- package/dist/daemon/process.d.ts +11 -0
- package/dist/daemon/process.js +49 -0
- package/dist/daemon/server.d.ts +17 -0
- package/dist/daemon/server.js +435 -0
- package/dist/daemon/service.d.ts +36 -0
- package/dist/daemon/service.js +278 -0
- package/dist/index.d.ts +30 -0
- package/dist/index.js +36 -0
- package/dist/mcp/client.d.ts +51 -0
- package/dist/mcp/client.js +276 -0
- package/dist/mcp/index.d.ts +162 -0
- package/dist/mcp/index.js +199 -0
- package/dist/mcp/tool-registry.d.ts +71 -0
- package/dist/mcp/tool-registry.js +308 -0
- package/dist/mcp/transport.d.ts +83 -0
- package/dist/mcp/transport.js +515 -0
- package/dist/mcp/types.d.ts +136 -0
- package/dist/mcp/types.js +31 -0
- package/dist/runtime/adapter-advanced.d.ts +184 -0
- package/dist/runtime/adapter-advanced.js +160 -0
- package/dist/runtime/adapter.d.ts +9 -0
- package/dist/runtime/adapter.js +2 -0
- package/dist/runtime/detector-advanced.d.ts +59 -0
- package/dist/runtime/detector-advanced.js +487 -0
- package/dist/runtime/detector.d.ts +5 -0
- package/dist/runtime/detector.js +56 -0
- package/dist/runtime/docker-adapter.d.ts +18 -0
- package/dist/runtime/docker-adapter.js +170 -0
- package/dist/runtime/docker.d.ts +17 -0
- package/dist/runtime/docker.js +71 -0
- package/dist/runtime/executable-analyzer.d.ts +56 -0
- package/dist/runtime/executable-analyzer.js +391 -0
- package/dist/runtime/go-adapter.d.ts +19 -0
- package/dist/runtime/go-adapter.js +190 -0
- package/dist/runtime/index.d.ts +9 -0
- package/dist/runtime/index.js +10 -0
- package/dist/runtime/node-adapter.d.ts +10 -0
- package/dist/runtime/node-adapter.js +23 -0
- package/dist/runtime/node.d.ts +20 -0
- package/dist/runtime/node.js +86 -0
- package/dist/runtime/python-adapter.d.ts +11 -0
- package/dist/runtime/python-adapter.js +102 -0
- package/dist/runtime/python.d.ts +17 -0
- package/dist/runtime/python.js +72 -0
- package/dist/runtime/rust-adapter.d.ts +21 -0
- package/dist/runtime/rust-adapter.js +267 -0
- package/dist/sdk.d.ts +500 -0
- package/dist/sdk.js +904 -0
- package/docs/README.ZH_CN.md +545 -0
- package/docs/api.md +888 -0
- package/docs/architecture.md +731 -0
- package/docs/development.md +744 -0
- package/package.json +112 -0
package/dist/ai/ai.js
ADDED
|
@@ -0,0 +1,1200 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Simplified AI Core Service
|
|
3
|
+
* Focused on converting natural language to MCP tool calls
|
|
4
|
+
*/
|
|
5
|
+
import chalk from 'chalk';
|
|
6
|
+
import { logger } from '../core/logger.js';
|
|
7
|
+
// Simplified AI error
|
|
8
|
+
export class AIError extends Error {
|
|
9
|
+
code;
|
|
10
|
+
message;
|
|
11
|
+
category;
|
|
12
|
+
suggestions;
|
|
13
|
+
constructor(code, message, category, suggestions = []) {
|
|
14
|
+
super(message);
|
|
15
|
+
this.code = code;
|
|
16
|
+
this.message = message;
|
|
17
|
+
this.category = category;
|
|
18
|
+
this.suggestions = suggestions;
|
|
19
|
+
this.name = 'AIError';
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
/**
|
|
23
|
+
* Simplified AI Core Service
|
|
24
|
+
*/
|
|
25
|
+
export class SimpleAI {
|
|
26
|
+
config = null;
|
|
27
|
+
enabled = false;
|
|
28
|
+
client = null;
|
|
29
|
+
constructor() {
|
|
30
|
+
logger.info('[AI] Initializing simplified AI service');
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* Configure AI service
|
|
34
|
+
*/
|
|
35
|
+
async configure(config) {
|
|
36
|
+
logger.info(`[AI] Configuring AI provider: ${config.provider}`);
|
|
37
|
+
// Provider-specific validation
|
|
38
|
+
switch (config.provider) {
|
|
39
|
+
case 'openai':
|
|
40
|
+
case 'anthropic':
|
|
41
|
+
case 'google':
|
|
42
|
+
case 'azure':
|
|
43
|
+
case 'deepseek': {
|
|
44
|
+
if (!config.apiKey) {
|
|
45
|
+
throw new AIError('AI_CONFIG_ERROR', `${config.provider} requires API key`, 'config', [
|
|
46
|
+
`Run: mcp ai configure ${config.provider} YOUR_API_KEY`,
|
|
47
|
+
`Get ${config.provider} API key from their official website`,
|
|
48
|
+
]);
|
|
49
|
+
}
|
|
50
|
+
break;
|
|
51
|
+
}
|
|
52
|
+
case 'ollama':
|
|
53
|
+
// Ollama can work without API key (local)
|
|
54
|
+
break;
|
|
55
|
+
case 'none':
|
|
56
|
+
// No validation needed
|
|
57
|
+
break;
|
|
58
|
+
default:
|
|
59
|
+
throw new AIError('AI_CONFIG_ERROR', `Unsupported provider: ${config.provider}`, 'config', [
|
|
60
|
+
'Supported providers: openai, anthropic, google, azure, deepseek, ollama, none',
|
|
61
|
+
]);
|
|
62
|
+
}
|
|
63
|
+
this.config = config;
|
|
64
|
+
// Initialize client
|
|
65
|
+
await this.initializeClient();
|
|
66
|
+
this.enabled = true;
|
|
67
|
+
logger.info(`[AI] ${config.provider} configuration completed`);
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* Initialize AI client
|
|
71
|
+
*/
|
|
72
|
+
async initializeClient() {
|
|
73
|
+
if (!this.config || this.config.provider === 'none') {
|
|
74
|
+
this.enabled = false;
|
|
75
|
+
return;
|
|
76
|
+
}
|
|
77
|
+
try {
|
|
78
|
+
switch (this.config.provider) {
|
|
79
|
+
case 'openai': {
|
|
80
|
+
// Simplified OpenAI client
|
|
81
|
+
this.client = {
|
|
82
|
+
provider: 'openai',
|
|
83
|
+
config: this.config,
|
|
84
|
+
endpoint: 'https://api.openai.com/v1',
|
|
85
|
+
};
|
|
86
|
+
break;
|
|
87
|
+
}
|
|
88
|
+
case 'anthropic': {
|
|
89
|
+
// Simplified Anthropic client
|
|
90
|
+
this.client = {
|
|
91
|
+
provider: 'anthropic',
|
|
92
|
+
config: this.config,
|
|
93
|
+
endpoint: 'https://api.anthropic.com/v1',
|
|
94
|
+
};
|
|
95
|
+
break;
|
|
96
|
+
}
|
|
97
|
+
case 'google': {
|
|
98
|
+
// Simplified Google (Gemini) client
|
|
99
|
+
this.client = {
|
|
100
|
+
provider: 'google',
|
|
101
|
+
config: this.config,
|
|
102
|
+
endpoint: 'https://generativelanguage.googleapis.com/v1',
|
|
103
|
+
};
|
|
104
|
+
break;
|
|
105
|
+
}
|
|
106
|
+
case 'azure': {
|
|
107
|
+
// Simplified Azure OpenAI client
|
|
108
|
+
const azureEndpoint = this.config.endpoint || 'https://YOUR_RESOURCE.openai.azure.com';
|
|
109
|
+
this.client = {
|
|
110
|
+
provider: 'azure',
|
|
111
|
+
config: this.config,
|
|
112
|
+
endpoint: azureEndpoint,
|
|
113
|
+
apiVersion: this.config.apiVersion || '2024-02-15-preview',
|
|
114
|
+
};
|
|
115
|
+
break;
|
|
116
|
+
}
|
|
117
|
+
case 'deepseek': {
|
|
118
|
+
// Simplified DeepSeek client
|
|
119
|
+
this.client = {
|
|
120
|
+
provider: 'deepseek',
|
|
121
|
+
config: this.config,
|
|
122
|
+
endpoint: 'https://api.deepseek.com/v1',
|
|
123
|
+
};
|
|
124
|
+
break;
|
|
125
|
+
}
|
|
126
|
+
case 'ollama': {
|
|
127
|
+
// Simplified Ollama client
|
|
128
|
+
this.client = {
|
|
129
|
+
provider: 'ollama',
|
|
130
|
+
endpoint: this.config.endpoint || 'http://localhost:11434',
|
|
131
|
+
config: this.config,
|
|
132
|
+
};
|
|
133
|
+
break;
|
|
134
|
+
}
|
|
135
|
+
default:
|
|
136
|
+
this.enabled = false;
|
|
137
|
+
return;
|
|
138
|
+
}
|
|
139
|
+
// Test connection
|
|
140
|
+
await this.testConnection();
|
|
141
|
+
}
|
|
142
|
+
catch (error) {
|
|
143
|
+
logger.warn(`[AI] Client initialization failed: ${error.message}`);
|
|
144
|
+
this.enabled = false;
|
|
145
|
+
throw new AIError('AI_INIT_ERROR', `AI initialization failed: ${error.message}`, 'connection', [
|
|
146
|
+
'Check network connection',
|
|
147
|
+
'Verify configuration',
|
|
148
|
+
'Run: mcp ai test to test connection',
|
|
149
|
+
]);
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
/**
|
|
153
|
+
* Test AI connection
|
|
154
|
+
*/
|
|
155
|
+
async testConnection() {
|
|
156
|
+
if (!this.config || this.config.provider === 'none') {
|
|
157
|
+
return {
|
|
158
|
+
success: false,
|
|
159
|
+
message: 'AI not configured',
|
|
160
|
+
};
|
|
161
|
+
}
|
|
162
|
+
try {
|
|
163
|
+
switch (this.config.provider) {
|
|
164
|
+
case 'openai': {
|
|
165
|
+
// Simple OpenAI connection test
|
|
166
|
+
const openaiTest = await this.testOpenAIConnection();
|
|
167
|
+
return openaiTest;
|
|
168
|
+
}
|
|
169
|
+
case 'anthropic': {
|
|
170
|
+
// Simple Anthropic connection test
|
|
171
|
+
const anthropicTest = await this.testAnthropicConnection();
|
|
172
|
+
return anthropicTest;
|
|
173
|
+
}
|
|
174
|
+
case 'google': {
|
|
175
|
+
// Simple Google connection test
|
|
176
|
+
const googleTest = await this.testGoogleConnection();
|
|
177
|
+
return googleTest;
|
|
178
|
+
}
|
|
179
|
+
case 'azure': {
|
|
180
|
+
// Simple Azure connection test
|
|
181
|
+
const azureTest = await this.testAzureConnection();
|
|
182
|
+
return azureTest;
|
|
183
|
+
}
|
|
184
|
+
case 'deepseek': {
|
|
185
|
+
// Simple DeepSeek connection test
|
|
186
|
+
const deepseekTest = await this.testDeepSeekConnection();
|
|
187
|
+
return deepseekTest;
|
|
188
|
+
}
|
|
189
|
+
case 'ollama': {
|
|
190
|
+
// Simple Ollama connection test
|
|
191
|
+
const ollamaTest = await this.testOllamaConnection();
|
|
192
|
+
return ollamaTest;
|
|
193
|
+
}
|
|
194
|
+
default:
|
|
195
|
+
return {
|
|
196
|
+
success: false,
|
|
197
|
+
message: `Unsupported provider: ${this.config.provider}`,
|
|
198
|
+
};
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
catch (error) {
|
|
202
|
+
return {
|
|
203
|
+
success: false,
|
|
204
|
+
message: `Connection test failed: ${error.message}`,
|
|
205
|
+
};
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
/**
|
|
209
|
+
* Test OpenAI connection
|
|
210
|
+
*/
|
|
211
|
+
async testOpenAIConnection() {
|
|
212
|
+
if (!this.config?.apiKey) {
|
|
213
|
+
return {
|
|
214
|
+
success: false,
|
|
215
|
+
message: 'Missing API key',
|
|
216
|
+
};
|
|
217
|
+
}
|
|
218
|
+
try {
|
|
219
|
+
// Simple HTTP request test
|
|
220
|
+
const response = await fetch('https://api.openai.com/v1/models', {
|
|
221
|
+
headers: {
|
|
222
|
+
'Authorization': `Bearer ${this.config.apiKey}`,
|
|
223
|
+
'Content-Type': 'application/json',
|
|
224
|
+
},
|
|
225
|
+
});
|
|
226
|
+
if (response.ok) {
|
|
227
|
+
return {
|
|
228
|
+
success: true,
|
|
229
|
+
message: 'OpenAI connection OK',
|
|
230
|
+
};
|
|
231
|
+
}
|
|
232
|
+
else {
|
|
233
|
+
return {
|
|
234
|
+
success: false,
|
|
235
|
+
message: `API returned error: ${response.status}`,
|
|
236
|
+
};
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
catch (error) {
|
|
240
|
+
return {
|
|
241
|
+
success: false,
|
|
242
|
+
message: `Network error: ${error.message}`,
|
|
243
|
+
};
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
/**
|
|
247
|
+
* Test Anthropic connection
|
|
248
|
+
*/
|
|
249
|
+
async testAnthropicConnection() {
|
|
250
|
+
if (!this.config?.apiKey) {
|
|
251
|
+
return {
|
|
252
|
+
success: false,
|
|
253
|
+
message: 'Missing API key',
|
|
254
|
+
};
|
|
255
|
+
}
|
|
256
|
+
try {
|
|
257
|
+
const response = await fetch('https://api.anthropic.com/v1/messages', {
|
|
258
|
+
method: 'POST',
|
|
259
|
+
headers: {
|
|
260
|
+
'x-api-key': this.config.apiKey,
|
|
261
|
+
'anthropic-version': '2023-06-01',
|
|
262
|
+
'Content-Type': 'application/json',
|
|
263
|
+
},
|
|
264
|
+
body: JSON.stringify({
|
|
265
|
+
model: this.config.model || 'claude-3-haiku-20240307',
|
|
266
|
+
max_tokens: 10,
|
|
267
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
268
|
+
}),
|
|
269
|
+
});
|
|
270
|
+
if (response.ok) {
|
|
271
|
+
return {
|
|
272
|
+
success: true,
|
|
273
|
+
message: 'Anthropic connection OK',
|
|
274
|
+
};
|
|
275
|
+
}
|
|
276
|
+
else {
|
|
277
|
+
return {
|
|
278
|
+
success: false,
|
|
279
|
+
message: `API returned error: ${response.status}`,
|
|
280
|
+
};
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
catch (error) {
|
|
284
|
+
return {
|
|
285
|
+
success: false,
|
|
286
|
+
message: `Network error: ${error.message}`,
|
|
287
|
+
};
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
/**
|
|
291
|
+
* Test Google (Gemini) connection
|
|
292
|
+
*/
|
|
293
|
+
async testGoogleConnection() {
|
|
294
|
+
if (!this.config?.apiKey) {
|
|
295
|
+
return {
|
|
296
|
+
success: false,
|
|
297
|
+
message: 'Missing API key',
|
|
298
|
+
};
|
|
299
|
+
}
|
|
300
|
+
try {
|
|
301
|
+
const response = await fetch(`https://generativelanguage.googleapis.com/v1/models?key=${this.config.apiKey}`);
|
|
302
|
+
if (response.ok) {
|
|
303
|
+
return {
|
|
304
|
+
success: true,
|
|
305
|
+
message: 'Google Gemini connection OK',
|
|
306
|
+
};
|
|
307
|
+
}
|
|
308
|
+
else {
|
|
309
|
+
return {
|
|
310
|
+
success: false,
|
|
311
|
+
message: `API returned error: ${response.status}`,
|
|
312
|
+
};
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
catch (error) {
|
|
316
|
+
return {
|
|
317
|
+
success: false,
|
|
318
|
+
message: `Network error: ${error.message}`,
|
|
319
|
+
};
|
|
320
|
+
}
|
|
321
|
+
}
|
|
322
|
+
/**
|
|
323
|
+
* Test Azure OpenAI connection
|
|
324
|
+
*/
|
|
325
|
+
async testAzureConnection() {
|
|
326
|
+
if (!this.config?.apiKey || !this.config?.endpoint) {
|
|
327
|
+
return {
|
|
328
|
+
success: false,
|
|
329
|
+
message: 'Missing API key or endpoint',
|
|
330
|
+
};
|
|
331
|
+
}
|
|
332
|
+
try {
|
|
333
|
+
const apiVersion = this.config.apiVersion || '2024-02-15-preview';
|
|
334
|
+
const endpoint = this.config.endpoint.replace(/\/$/, '');
|
|
335
|
+
const url = `${endpoint}/openai/deployments?api-version=${apiVersion}`;
|
|
336
|
+
const response = await fetch(url, {
|
|
337
|
+
headers: {
|
|
338
|
+
'api-key': this.config.apiKey,
|
|
339
|
+
'Content-Type': 'application/json',
|
|
340
|
+
},
|
|
341
|
+
});
|
|
342
|
+
if (response.ok) {
|
|
343
|
+
return {
|
|
344
|
+
success: true,
|
|
345
|
+
message: 'Azure OpenAI connection OK',
|
|
346
|
+
};
|
|
347
|
+
}
|
|
348
|
+
else {
|
|
349
|
+
return {
|
|
350
|
+
success: false,
|
|
351
|
+
message: `API returned error: ${response.status}`,
|
|
352
|
+
};
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
catch (error) {
|
|
356
|
+
return {
|
|
357
|
+
success: false,
|
|
358
|
+
message: `Network error: ${error.message}`,
|
|
359
|
+
};
|
|
360
|
+
}
|
|
361
|
+
}
|
|
362
|
+
/**
|
|
363
|
+
* Test DeepSeek connection
|
|
364
|
+
*/
|
|
365
|
+
async testDeepSeekConnection() {
|
|
366
|
+
if (!this.config?.apiKey) {
|
|
367
|
+
return {
|
|
368
|
+
success: false,
|
|
369
|
+
message: 'Missing API key',
|
|
370
|
+
};
|
|
371
|
+
}
|
|
372
|
+
try {
|
|
373
|
+
const response = await fetch('https://api.deepseek.com/v1/models', {
|
|
374
|
+
headers: {
|
|
375
|
+
'Authorization': `Bearer ${this.config.apiKey}`,
|
|
376
|
+
'Content-Type': 'application/json',
|
|
377
|
+
},
|
|
378
|
+
});
|
|
379
|
+
if (response.ok) {
|
|
380
|
+
return {
|
|
381
|
+
success: true,
|
|
382
|
+
message: 'DeepSeek connection OK',
|
|
383
|
+
};
|
|
384
|
+
}
|
|
385
|
+
else {
|
|
386
|
+
return {
|
|
387
|
+
success: false,
|
|
388
|
+
message: `API returned error: ${response.status}`,
|
|
389
|
+
};
|
|
390
|
+
}
|
|
391
|
+
}
|
|
392
|
+
catch (error) {
|
|
393
|
+
return {
|
|
394
|
+
success: false,
|
|
395
|
+
message: `Network error: ${error.message}`,
|
|
396
|
+
};
|
|
397
|
+
}
|
|
398
|
+
}
|
|
399
|
+
/**
|
|
400
|
+
* Test Ollama connection
|
|
401
|
+
*/
|
|
402
|
+
async testOllamaConnection() {
|
|
403
|
+
const endpoint = this.config?.endpoint || 'http://localhost:11434';
|
|
404
|
+
try {
|
|
405
|
+
const response = await fetch(`${endpoint}/api/tags`, {
|
|
406
|
+
method: 'GET',
|
|
407
|
+
headers: {
|
|
408
|
+
'Content-Type': 'application/json',
|
|
409
|
+
},
|
|
410
|
+
});
|
|
411
|
+
if (response.ok) {
|
|
412
|
+
return {
|
|
413
|
+
success: true,
|
|
414
|
+
message: `Ollama connection OK (${endpoint})`,
|
|
415
|
+
};
|
|
416
|
+
}
|
|
417
|
+
else {
|
|
418
|
+
return {
|
|
419
|
+
success: false,
|
|
420
|
+
message: `Ollama service error: ${response.status}`,
|
|
421
|
+
};
|
|
422
|
+
}
|
|
423
|
+
}
|
|
424
|
+
catch (error) {
|
|
425
|
+
return {
|
|
426
|
+
success: false,
|
|
427
|
+
message: `Cannot connect to Ollama: ${error.message}`,
|
|
428
|
+
};
|
|
429
|
+
}
|
|
430
|
+
}
|
|
431
|
+
/**
|
|
432
|
+
* Process natural language query
|
|
433
|
+
*/
|
|
434
|
+
async ask(query) {
|
|
435
|
+
logger.info(`[AI] Processing query: "${query}"`);
|
|
436
|
+
// Check if AI is enabled
|
|
437
|
+
if (!this.enabled || !this.config || this.config.provider === 'none') {
|
|
438
|
+
throw new AIError('AI_NOT_CONFIGURED', 'AI provider not configured. Please call configureAI() with a valid API key.', 'config', [
|
|
439
|
+
'Run: mcpilot.configureAI({ provider: "openai", apiKey: "YOUR_API_KEY" })',
|
|
440
|
+
'Get OpenAI API key: https://platform.openai.com/api-keys',
|
|
441
|
+
'Or use Ollama: mcpilot.configureAI({ provider: "ollama", endpoint: "http://localhost:11434" })',
|
|
442
|
+
]);
|
|
443
|
+
}
|
|
444
|
+
try {
|
|
445
|
+
// 1. Analyze intent
|
|
446
|
+
const intent = await this.analyzeIntent(query);
|
|
447
|
+
// 2. Map to tool call
|
|
448
|
+
const toolCall = this.mapIntentToTool(intent);
|
|
449
|
+
// 3. Return tool call
|
|
450
|
+
return {
|
|
451
|
+
type: 'tool_call',
|
|
452
|
+
tool: toolCall,
|
|
453
|
+
confidence: intent.confidence,
|
|
454
|
+
};
|
|
455
|
+
}
|
|
456
|
+
catch (error) {
|
|
457
|
+
logger.warn(`[AI] Intent analysis failed: ${error.message}`);
|
|
458
|
+
// Fallback to command suggestions when AI fails
|
|
459
|
+
return this.getFallbackSuggestions(query);
|
|
460
|
+
}
|
|
461
|
+
}
|
|
462
|
+
/**
|
|
463
|
+
* Analyze intent (simplified version)
|
|
464
|
+
*/
|
|
465
|
+
async analyzeIntent(query) {
|
|
466
|
+
// Simplified intent analysis: keyword matching
|
|
467
|
+
const queryLower = query.toLowerCase();
|
|
468
|
+
// Common intent patterns
|
|
469
|
+
const patterns = [
|
|
470
|
+
// File operations
|
|
471
|
+
{
|
|
472
|
+
regex: /(list|show|display).*(file|directory|folder)/i,
|
|
473
|
+
action: 'list',
|
|
474
|
+
target: 'files',
|
|
475
|
+
confidence: 0.8,
|
|
476
|
+
},
|
|
477
|
+
{
|
|
478
|
+
regex: /(read|view|open).*file/i,
|
|
479
|
+
action: 'read',
|
|
480
|
+
target: 'file',
|
|
481
|
+
confidence: 0.7,
|
|
482
|
+
},
|
|
483
|
+
// Service operations
|
|
484
|
+
{
|
|
485
|
+
regex: /(start|launch|run).*service/i,
|
|
486
|
+
action: 'start',
|
|
487
|
+
target: 'service',
|
|
488
|
+
confidence: 0.9,
|
|
489
|
+
},
|
|
490
|
+
{
|
|
491
|
+
regex: /(stop|halt|terminate).*service/i,
|
|
492
|
+
action: 'stop',
|
|
493
|
+
target: 'service',
|
|
494
|
+
confidence: 0.9,
|
|
495
|
+
},
|
|
496
|
+
{
|
|
497
|
+
regex: /(status|check).*service/i,
|
|
498
|
+
action: 'status',
|
|
499
|
+
target: 'service',
|
|
500
|
+
confidence: 0.8,
|
|
501
|
+
},
|
|
502
|
+
// General queries
|
|
503
|
+
{
|
|
504
|
+
regex: /(help|what can you do)/i,
|
|
505
|
+
action: 'help',
|
|
506
|
+
target: 'general',
|
|
507
|
+
confidence: 0.9,
|
|
508
|
+
},
|
|
509
|
+
];
|
|
510
|
+
// Find matching pattern
|
|
511
|
+
for (const pattern of patterns) {
|
|
512
|
+
if (pattern.regex.test(query)) {
|
|
513
|
+
return {
|
|
514
|
+
action: pattern.action,
|
|
515
|
+
target: pattern.target,
|
|
516
|
+
params: this.extractParams(query),
|
|
517
|
+
confidence: pattern.confidence,
|
|
518
|
+
};
|
|
519
|
+
}
|
|
520
|
+
}
|
|
521
|
+
// If no match, use LLM analysis (if available)
|
|
522
|
+
if (this.config?.provider !== 'none' && this.client) {
|
|
523
|
+
return await this.analyzeWithLLM(query);
|
|
524
|
+
}
|
|
525
|
+
// Default intent
|
|
526
|
+
return {
|
|
527
|
+
action: 'unknown',
|
|
528
|
+
target: 'unknown',
|
|
529
|
+
params: {},
|
|
530
|
+
confidence: 0.3,
|
|
531
|
+
};
|
|
532
|
+
}
|
|
533
|
+
/**
|
|
534
|
+
* Analyze intent with LLM (optional)
|
|
535
|
+
*/
|
|
536
|
+
async analyzeWithLLM(query) {
|
|
537
|
+
if (!this.config || !this.client) {
|
|
538
|
+
throw new AIError('AI_NOT_CONFIGURED', 'AI not configured for LLM analysis', 'config');
|
|
539
|
+
}
|
|
540
|
+
logger.info(`[AI] Analyzing intent with ${this.config.provider}`);
|
|
541
|
+
try {
|
|
542
|
+
// Call actual AI API based on provider
|
|
543
|
+
const response = await this.callAIAPI(query);
|
|
544
|
+
// Parse response to extract intent
|
|
545
|
+
const intent = this.parseAIResponse(response, query);
|
|
546
|
+
return intent;
|
|
547
|
+
}
|
|
548
|
+
catch (error) {
|
|
549
|
+
logger.warn(`[AI] LLM analysis failed: ${error.message}`);
|
|
550
|
+
// Fallback to default intent
|
|
551
|
+
return {
|
|
552
|
+
action: 'analyze',
|
|
553
|
+
target: 'query',
|
|
554
|
+
params: { query },
|
|
555
|
+
confidence: 0.3,
|
|
556
|
+
};
|
|
557
|
+
}
|
|
558
|
+
}
|
|
559
|
+
/**
|
|
560
|
+
* Call AI API based on provider
|
|
561
|
+
*/
|
|
562
|
+
async callAIAPI(query) {
|
|
563
|
+
if (!this.config || !this.client) {
|
|
564
|
+
throw new AIError('AI_NOT_CONFIGURED', 'AI not configured', 'config');
|
|
565
|
+
}
|
|
566
|
+
const provider = this.config.provider;
|
|
567
|
+
const apiKey = this.config.apiKey;
|
|
568
|
+
const model = this.config.model || this.getDefaultModel(provider);
|
|
569
|
+
switch (provider) {
|
|
570
|
+
case 'openai':
|
|
571
|
+
return await this.callOpenAI(query, apiKey, model);
|
|
572
|
+
case 'anthropic':
|
|
573
|
+
return await this.callAnthropic(query, apiKey, model);
|
|
574
|
+
case 'google':
|
|
575
|
+
return await this.callGoogle(query, apiKey, model);
|
|
576
|
+
case 'azure':
|
|
577
|
+
return await this.callAzure(query, apiKey, model);
|
|
578
|
+
case 'deepseek':
|
|
579
|
+
return await this.callDeepSeek(query, apiKey, model);
|
|
580
|
+
case 'ollama':
|
|
581
|
+
return await this.callOllama(query, model);
|
|
582
|
+
default:
|
|
583
|
+
throw new AIError('UNSUPPORTED_PROVIDER', `Unsupported provider: ${provider}`, 'config');
|
|
584
|
+
}
|
|
585
|
+
}
|
|
586
|
+
/**
|
|
587
|
+
* Get default model for provider
|
|
588
|
+
*/
|
|
589
|
+
getDefaultModel(provider) {
|
|
590
|
+
switch (provider) {
|
|
591
|
+
case 'openai': return 'gpt-3.5-turbo';
|
|
592
|
+
case 'anthropic': return 'claude-3-haiku-20240307';
|
|
593
|
+
case 'google': return 'gemini-pro';
|
|
594
|
+
case 'azure': return 'gpt-35-turbo';
|
|
595
|
+
case 'deepseek': return 'deepseek-chat';
|
|
596
|
+
case 'ollama': return 'llama2';
|
|
597
|
+
default: return 'unknown';
|
|
598
|
+
}
|
|
599
|
+
}
|
|
600
|
+
/**
|
|
601
|
+
* Call OpenAI API
|
|
602
|
+
*/
|
|
603
|
+
async callOpenAI(query, apiKey, model) {
|
|
604
|
+
const response = await fetch('https://api.openai.com/v1/chat/completions', {
|
|
605
|
+
method: 'POST',
|
|
606
|
+
headers: {
|
|
607
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
608
|
+
'Content-Type': 'application/json',
|
|
609
|
+
},
|
|
610
|
+
body: JSON.stringify({
|
|
611
|
+
model,
|
|
612
|
+
messages: [
|
|
613
|
+
{
|
|
614
|
+
role: 'system',
|
|
615
|
+
content: 'You are an intent analyzer. Extract action, target, and parameters from user queries.',
|
|
616
|
+
},
|
|
617
|
+
{
|
|
618
|
+
role: 'user',
|
|
619
|
+
content: query,
|
|
620
|
+
},
|
|
621
|
+
],
|
|
622
|
+
max_tokens: 100,
|
|
623
|
+
temperature: 0.1,
|
|
624
|
+
}),
|
|
625
|
+
});
|
|
626
|
+
if (!response.ok) {
|
|
627
|
+
throw new Error(`OpenAI API error: ${response.status}`);
|
|
628
|
+
}
|
|
629
|
+
return await response.json();
|
|
630
|
+
}
|
|
631
|
+
/**
|
|
632
|
+
* Call Anthropic API
|
|
633
|
+
*/
|
|
634
|
+
async callAnthropic(query, apiKey, model) {
|
|
635
|
+
const response = await fetch('https://api.anthropic.com/v1/messages', {
|
|
636
|
+
method: 'POST',
|
|
637
|
+
headers: {
|
|
638
|
+
'x-api-key': apiKey,
|
|
639
|
+
'anthropic-version': '2023-06-01',
|
|
640
|
+
'Content-Type': 'application/json',
|
|
641
|
+
},
|
|
642
|
+
body: JSON.stringify({
|
|
643
|
+
model,
|
|
644
|
+
max_tokens: 100,
|
|
645
|
+
messages: [
|
|
646
|
+
{
|
|
647
|
+
role: 'user',
|
|
648
|
+
content: `Analyze this query for intent: ${query}`,
|
|
649
|
+
},
|
|
650
|
+
],
|
|
651
|
+
}),
|
|
652
|
+
});
|
|
653
|
+
if (!response.ok) {
|
|
654
|
+
throw new Error(`Anthropic API error: ${response.status}`);
|
|
655
|
+
}
|
|
656
|
+
return await response.json();
|
|
657
|
+
}
|
|
658
|
+
/**
|
|
659
|
+
* Call Google (Gemini) API
|
|
660
|
+
*/
|
|
661
|
+
async callGoogle(query, apiKey, model) {
|
|
662
|
+
const response = await fetch(`https://generativelanguage.googleapis.com/v1/models/${model}:generateContent?key=${apiKey}`, {
|
|
663
|
+
method: 'POST',
|
|
664
|
+
headers: {
|
|
665
|
+
'Content-Type': 'application/json',
|
|
666
|
+
},
|
|
667
|
+
body: JSON.stringify({
|
|
668
|
+
contents: [
|
|
669
|
+
{
|
|
670
|
+
parts: [
|
|
671
|
+
{
|
|
672
|
+
text: `Analyze this query for intent: ${query}`,
|
|
673
|
+
},
|
|
674
|
+
],
|
|
675
|
+
},
|
|
676
|
+
],
|
|
677
|
+
generationConfig: {
|
|
678
|
+
maxOutputTokens: 100,
|
|
679
|
+
temperature: 0.1,
|
|
680
|
+
},
|
|
681
|
+
}),
|
|
682
|
+
});
|
|
683
|
+
if (!response.ok) {
|
|
684
|
+
throw new Error(`Google API error: ${response.status}`);
|
|
685
|
+
}
|
|
686
|
+
return await response.json();
|
|
687
|
+
}
|
|
688
|
+
/**
|
|
689
|
+
* Call Azure OpenAI API
|
|
690
|
+
*/
|
|
691
|
+
async callAzure(query, apiKey, model) {
|
|
692
|
+
const endpoint = this.config?.endpoint || 'https://YOUR_RESOURCE.openai.azure.com';
|
|
693
|
+
const apiVersion = this.config?.apiVersion || '2024-02-15-preview';
|
|
694
|
+
const url = `${endpoint}/openai/deployments/${model}/chat/completions?api-version=${apiVersion}`;
|
|
695
|
+
const response = await fetch(url, {
|
|
696
|
+
method: 'POST',
|
|
697
|
+
headers: {
|
|
698
|
+
'api-key': apiKey,
|
|
699
|
+
'Content-Type': 'application/json',
|
|
700
|
+
},
|
|
701
|
+
body: JSON.stringify({
|
|
702
|
+
messages: [
|
|
703
|
+
{
|
|
704
|
+
role: 'system',
|
|
705
|
+
content: 'You are an intent analyzer. Extract action, target, and parameters from user queries.',
|
|
706
|
+
},
|
|
707
|
+
{
|
|
708
|
+
role: 'user',
|
|
709
|
+
content: query,
|
|
710
|
+
},
|
|
711
|
+
],
|
|
712
|
+
max_tokens: 100,
|
|
713
|
+
temperature: 0.1,
|
|
714
|
+
}),
|
|
715
|
+
});
|
|
716
|
+
if (!response.ok) {
|
|
717
|
+
throw new Error(`Azure OpenAI API error: ${response.status}`);
|
|
718
|
+
}
|
|
719
|
+
return await response.json();
|
|
720
|
+
}
|
|
721
|
+
/**
|
|
722
|
+
* Call DeepSeek API
|
|
723
|
+
*/
|
|
724
|
+
async callDeepSeek(query, apiKey, model) {
|
|
725
|
+
const response = await fetch('https://api.deepseek.com/v1/chat/completions', {
|
|
726
|
+
method: 'POST',
|
|
727
|
+
headers: {
|
|
728
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
729
|
+
'Content-Type': 'application/json',
|
|
730
|
+
},
|
|
731
|
+
body: JSON.stringify({
|
|
732
|
+
model,
|
|
733
|
+
messages: [
|
|
734
|
+
{
|
|
735
|
+
role: 'system',
|
|
736
|
+
content: 'You are an intent analyzer. Extract action, target, and parameters from user queries.',
|
|
737
|
+
},
|
|
738
|
+
{
|
|
739
|
+
role: 'user',
|
|
740
|
+
content: query,
|
|
741
|
+
},
|
|
742
|
+
],
|
|
743
|
+
max_tokens: 100,
|
|
744
|
+
temperature: 0.1,
|
|
745
|
+
}),
|
|
746
|
+
});
|
|
747
|
+
if (!response.ok) {
|
|
748
|
+
throw new Error(`DeepSeek API error: ${response.status}`);
|
|
749
|
+
}
|
|
750
|
+
return await response.json();
|
|
751
|
+
}
|
|
752
|
+
/**
|
|
753
|
+
* Call Ollama API
|
|
754
|
+
*/
|
|
755
|
+
async callOllama(query, model) {
|
|
756
|
+
const endpoint = this.config?.endpoint || 'http://localhost:11434';
|
|
757
|
+
const response = await fetch(`${endpoint}/api/generate`, {
|
|
758
|
+
method: 'POST',
|
|
759
|
+
headers: {
|
|
760
|
+
'Content-Type': 'application/json',
|
|
761
|
+
},
|
|
762
|
+
body: JSON.stringify({
|
|
763
|
+
model,
|
|
764
|
+
prompt: `Analyze this query for intent: ${query}`,
|
|
765
|
+
stream: false,
|
|
766
|
+
options: {
|
|
767
|
+
temperature: 0.1,
|
|
768
|
+
},
|
|
769
|
+
}),
|
|
770
|
+
});
|
|
771
|
+
if (!response.ok) {
|
|
772
|
+
throw new Error(`Ollama API error: ${response.status}`);
|
|
773
|
+
}
|
|
774
|
+
return await response.json();
|
|
775
|
+
}
|
|
776
|
+
/**
|
|
777
|
+
* Parse AI response to extract intent
|
|
778
|
+
*/
|
|
779
|
+
parseAIResponse(response, query) {
|
|
780
|
+
// Default intent
|
|
781
|
+
const defaultIntent = {
|
|
782
|
+
action: 'analyze',
|
|
783
|
+
target: 'query',
|
|
784
|
+
params: { query },
|
|
785
|
+
confidence: 0.5,
|
|
786
|
+
};
|
|
787
|
+
if (!response) {
|
|
788
|
+
return defaultIntent;
|
|
789
|
+
}
|
|
790
|
+
try {
|
|
791
|
+
// Extract text from different provider responses
|
|
792
|
+
let text = '';
|
|
793
|
+
if (response.choices && response.choices[0]?.message?.content) {
|
|
794
|
+
// OpenAI, Azure, DeepSeek format
|
|
795
|
+
text = response.choices[0].message.content;
|
|
796
|
+
}
|
|
797
|
+
else if (response.content && response.content[0]?.text) {
|
|
798
|
+
// Anthropic format
|
|
799
|
+
text = response.content[0].text;
|
|
800
|
+
}
|
|
801
|
+
else if (response.candidates && response.candidates[0]?.content?.parts?.[0]?.text) {
|
|
802
|
+
// Google format
|
|
803
|
+
text = response.candidates[0].content.parts[0].text;
|
|
804
|
+
}
|
|
805
|
+
else if (response.response) {
|
|
806
|
+
// Ollama format
|
|
807
|
+
text = response.response;
|
|
808
|
+
}
|
|
809
|
+
if (!text) {
|
|
810
|
+
return defaultIntent;
|
|
811
|
+
}
|
|
812
|
+
// Simple parsing - in real implementation, this would be more sophisticated
|
|
813
|
+
const textLower = text.toLowerCase();
|
|
814
|
+
// Extract action
|
|
815
|
+
let action = 'analyze';
|
|
816
|
+
if (textLower.includes('list') || textLower.includes('show')) {
|
|
817
|
+
action = 'list';
|
|
818
|
+
}
|
|
819
|
+
else if (textLower.includes('read') || textLower.includes('view')) {
|
|
820
|
+
action = 'read';
|
|
821
|
+
}
|
|
822
|
+
else if (textLower.includes('start') || textLower.includes('launch')) {
|
|
823
|
+
action = 'start';
|
|
824
|
+
}
|
|
825
|
+
else if (textLower.includes('stop') || textLower.includes('terminate')) {
|
|
826
|
+
action = 'stop';
|
|
827
|
+
}
|
|
828
|
+
else if (textLower.includes('status') || textLower.includes('check')) {
|
|
829
|
+
action = 'status';
|
|
830
|
+
}
|
|
831
|
+
else if (textLower.includes('help')) {
|
|
832
|
+
action = 'help';
|
|
833
|
+
}
|
|
834
|
+
// Extract target
|
|
835
|
+
let target = 'query';
|
|
836
|
+
if (textLower.includes('file') || textLower.includes('directory')) {
|
|
837
|
+
target = 'files';
|
|
838
|
+
}
|
|
839
|
+
else if (textLower.includes('service')) {
|
|
840
|
+
target = 'service';
|
|
841
|
+
}
|
|
842
|
+
// Extract parameters
|
|
843
|
+
const params = this.extractParams(query);
|
|
844
|
+
// Calculate confidence based on response quality
|
|
845
|
+
const confidence = text.length > 20 ? 0.7 : 0.4;
|
|
846
|
+
return {
|
|
847
|
+
action,
|
|
848
|
+
target,
|
|
849
|
+
params,
|
|
850
|
+
confidence,
|
|
851
|
+
};
|
|
852
|
+
}
|
|
853
|
+
catch (error) {
|
|
854
|
+
logger.warn(`[AI] Failed to parse AI response: ${error}`);
|
|
855
|
+
return defaultIntent;
|
|
856
|
+
}
|
|
857
|
+
}
|
|
858
|
+
/**
|
|
859
|
+
* Extract parameters from query
|
|
860
|
+
*/
|
|
861
|
+
extractParams(query) {
|
|
862
|
+
const params = {};
|
|
863
|
+
// Extract path parameter
|
|
864
|
+
const pathMatch = query.match(/(\/[^\s]+|\.[^\s]+)/);
|
|
865
|
+
if (pathMatch) {
|
|
866
|
+
params.path = pathMatch[0];
|
|
867
|
+
}
|
|
868
|
+
// Extract service name
|
|
869
|
+
const serviceMatch = query.match(/([a-zA-Z0-9_-]+)\s+service/i);
|
|
870
|
+
if (serviceMatch) {
|
|
871
|
+
params.service = serviceMatch[1];
|
|
872
|
+
}
|
|
873
|
+
return params;
|
|
874
|
+
}
|
|
875
|
+
/**
|
|
876
|
+
* Map intent to tool call
|
|
877
|
+
*/
|
|
878
|
+
mapIntentToTool(intent) {
|
|
879
|
+
// Simplified mapping logic
|
|
880
|
+
switch (intent.action) {
|
|
881
|
+
case 'list':
|
|
882
|
+
return {
|
|
883
|
+
service: 'filesystem',
|
|
884
|
+
tool: 'list_directory',
|
|
885
|
+
params: { path: intent.params.path || '.' },
|
|
886
|
+
};
|
|
887
|
+
case 'read':
|
|
888
|
+
return {
|
|
889
|
+
service: 'filesystem',
|
|
890
|
+
tool: 'read_file',
|
|
891
|
+
params: { path: intent.params.path || 'README.md' },
|
|
892
|
+
};
|
|
893
|
+
case 'start':
|
|
894
|
+
return {
|
|
895
|
+
service: 'service_manager',
|
|
896
|
+
tool: 'start_service',
|
|
897
|
+
params: { name: intent.params.service || 'default' },
|
|
898
|
+
};
|
|
899
|
+
case 'stop':
|
|
900
|
+
return {
|
|
901
|
+
service: 'service_manager',
|
|
902
|
+
tool: 'stop_service',
|
|
903
|
+
params: { name: intent.params.service || 'default' },
|
|
904
|
+
};
|
|
905
|
+
case 'status':
|
|
906
|
+
return {
|
|
907
|
+
service: 'service_manager',
|
|
908
|
+
tool: 'get_status',
|
|
909
|
+
params: { name: intent.params.service },
|
|
910
|
+
};
|
|
911
|
+
case 'help':
|
|
912
|
+
return {
|
|
913
|
+
service: 'system',
|
|
914
|
+
tool: 'show_help',
|
|
915
|
+
params: {},
|
|
916
|
+
};
|
|
917
|
+
default:
|
|
918
|
+
return {
|
|
919
|
+
service: 'system',
|
|
920
|
+
tool: 'unknown',
|
|
921
|
+
params: { intent },
|
|
922
|
+
};
|
|
923
|
+
}
|
|
924
|
+
}
|
|
925
|
+
/**
|
|
926
|
+
* Get fallback suggestions (when AI is not available)
|
|
927
|
+
*/
|
|
928
|
+
getFallbackSuggestions(query) {
|
|
929
|
+
const suggestions = [];
|
|
930
|
+
// Analyze query to provide traditional command suggestions
|
|
931
|
+
const queryLower = query.toLowerCase();
|
|
932
|
+
if (queryLower.includes('file') || queryLower.includes('directory')) {
|
|
933
|
+
suggestions.push('mcp service list');
|
|
934
|
+
suggestions.push('List files: ls or dir');
|
|
935
|
+
}
|
|
936
|
+
if (queryLower.includes('service') && queryLower.includes('start')) {
|
|
937
|
+
suggestions.push('mcp service start <service-name>');
|
|
938
|
+
}
|
|
939
|
+
if (queryLower.includes('service') && queryLower.includes('stop')) {
|
|
940
|
+
suggestions.push('mcp service stop <service-name>');
|
|
941
|
+
}
|
|
942
|
+
if (queryLower.includes('status') || queryLower.includes('check')) {
|
|
943
|
+
suggestions.push('mcp service status');
|
|
944
|
+
}
|
|
945
|
+
if (suggestions.length === 0) {
|
|
946
|
+
suggestions.push('mcp --help to see all commands');
|
|
947
|
+
suggestions.push('mcp service --help to see service commands');
|
|
948
|
+
}
|
|
949
|
+
return {
|
|
950
|
+
type: 'suggestions',
|
|
951
|
+
message: 'AI feature not enabled or configured incorrectly',
|
|
952
|
+
suggestions,
|
|
953
|
+
help: 'You can:',
|
|
954
|
+
};
|
|
955
|
+
}
|
|
956
|
+
/**
|
|
957
|
+
* Get AI status
|
|
958
|
+
*/
|
|
959
|
+
getStatus() {
|
|
960
|
+
return {
|
|
961
|
+
enabled: this.enabled,
|
|
962
|
+
provider: this.config?.provider || 'none',
|
|
963
|
+
configured: !!this.config && this.config.provider !== 'none',
|
|
964
|
+
};
|
|
965
|
+
}
|
|
966
|
+
/**
|
|
967
|
+
* Call raw LLM API with custom messages and options
|
|
968
|
+
* This method supports advanced use cases like function calling, JSON mode, etc.
|
|
969
|
+
*/
|
|
970
|
+
async callRawAPI(options) {
|
|
971
|
+
// Check if AI is enabled
|
|
972
|
+
if (!this.enabled || !this.config || this.config.provider === 'none') {
|
|
973
|
+
throw new AIError('AI_NOT_CONFIGURED', 'AI provider not configured. Please call configure() with a valid API key.', 'config', [
|
|
974
|
+
'Run: mcpilot.configureAI({ provider: "openai", apiKey: "YOUR_API_KEY" })',
|
|
975
|
+
'Get OpenAI API key: https://platform.openai.com/api-keys',
|
|
976
|
+
'Or use Ollama: mcpilot.configureAI({ provider: "ollama", endpoint: "http://localhost:11434" })',
|
|
977
|
+
]);
|
|
978
|
+
}
|
|
979
|
+
try {
|
|
980
|
+
const provider = this.config.provider;
|
|
981
|
+
const apiKey = this.config.apiKey;
|
|
982
|
+
const model = this.config.model || this.getDefaultModel(provider);
|
|
983
|
+
// Prepare request based on provider
|
|
984
|
+
switch (provider) {
|
|
985
|
+
case 'openai':
|
|
986
|
+
return await this.callOpenAIRaw(options, apiKey, model);
|
|
987
|
+
case 'anthropic':
|
|
988
|
+
return await this.callAnthropicRaw(options, apiKey, model);
|
|
989
|
+
case 'google':
|
|
990
|
+
return await this.callGoogleRaw(options, apiKey, model);
|
|
991
|
+
case 'azure':
|
|
992
|
+
return await this.callAzureRaw(options, apiKey, model);
|
|
993
|
+
case 'deepseek':
|
|
994
|
+
return await this.callDeepSeekRaw(options, apiKey, model);
|
|
995
|
+
case 'ollama':
|
|
996
|
+
return await this.callOllamaRaw(options, model);
|
|
997
|
+
default:
|
|
998
|
+
throw new AIError('UNSUPPORTED_PROVIDER', `Raw API calls not supported for provider: ${provider}`, 'execution');
|
|
999
|
+
}
|
|
1000
|
+
}
|
|
1001
|
+
catch (error) {
|
|
1002
|
+
logger.error(`[AI] Raw API call failed: ${error.message}`);
|
|
1003
|
+
throw new AIError('API_CALL_FAILED', `Raw API call failed: ${error.message}`, 'execution');
|
|
1004
|
+
}
|
|
1005
|
+
}
|
|
1006
|
+
/**
|
|
1007
|
+
* Call OpenAI raw API
|
|
1008
|
+
*/
|
|
1009
|
+
async callOpenAIRaw(options, apiKey, model) {
|
|
1010
|
+
const requestBody = {
|
|
1011
|
+
model,
|
|
1012
|
+
messages: options.messages,
|
|
1013
|
+
temperature: options.temperature || 0.1,
|
|
1014
|
+
max_tokens: options.maxTokens || 1024,
|
|
1015
|
+
};
|
|
1016
|
+
// Add response format if specified
|
|
1017
|
+
if (options.responseFormat) {
|
|
1018
|
+
requestBody.response_format = options.responseFormat;
|
|
1019
|
+
}
|
|
1020
|
+
// Add functions if specified
|
|
1021
|
+
if (options.functions && options.functions.length > 0) {
|
|
1022
|
+
requestBody.functions = options.functions;
|
|
1023
|
+
requestBody.function_call = options.functionCall || 'auto';
|
|
1024
|
+
}
|
|
1025
|
+
const response = await fetch('https://api.openai.com/v1/chat/completions', {
|
|
1026
|
+
method: 'POST',
|
|
1027
|
+
headers: {
|
|
1028
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
1029
|
+
'Content-Type': 'application/json',
|
|
1030
|
+
},
|
|
1031
|
+
body: JSON.stringify(requestBody),
|
|
1032
|
+
});
|
|
1033
|
+
if (!response.ok) {
|
|
1034
|
+
throw new Error(`OpenAI API error: ${response.status}`);
|
|
1035
|
+
}
|
|
1036
|
+
return await response.json();
|
|
1037
|
+
}
|
|
1038
|
+
/**
|
|
1039
|
+
* Call Anthropic raw API
|
|
1040
|
+
*/
|
|
1041
|
+
async callAnthropicRaw(options, apiKey, model) {
|
|
1042
|
+
// Anthropic has different API structure
|
|
1043
|
+
const requestBody = {
|
|
1044
|
+
model,
|
|
1045
|
+
max_tokens: options.maxTokens || 1024,
|
|
1046
|
+
messages: options.messages,
|
|
1047
|
+
temperature: options.temperature || 0.1,
|
|
1048
|
+
};
|
|
1049
|
+
const response = await fetch('https://api.anthropic.com/v1/messages', {
|
|
1050
|
+
method: 'POST',
|
|
1051
|
+
headers: {
|
|
1052
|
+
'x-api-key': apiKey,
|
|
1053
|
+
'anthropic-version': '2023-06-01',
|
|
1054
|
+
'Content-Type': 'application/json',
|
|
1055
|
+
},
|
|
1056
|
+
body: JSON.stringify(requestBody),
|
|
1057
|
+
});
|
|
1058
|
+
if (!response.ok) {
|
|
1059
|
+
throw new Error(`Anthropic API error: ${response.status}`);
|
|
1060
|
+
}
|
|
1061
|
+
return await response.json();
|
|
1062
|
+
}
|
|
1063
|
+
/**
|
|
1064
|
+
* Call Google raw API
|
|
1065
|
+
*/
|
|
1066
|
+
async callGoogleRaw(options, apiKey, model) {
|
|
1067
|
+
// Google Gemini API structure
|
|
1068
|
+
const requestBody = {
|
|
1069
|
+
contents: options.messages.map((msg) => ({
|
|
1070
|
+
parts: [{ text: msg.content }],
|
|
1071
|
+
role: msg.role === 'user' ? 'user' : 'model',
|
|
1072
|
+
})),
|
|
1073
|
+
generationConfig: {
|
|
1074
|
+
temperature: options.temperature || 0.1,
|
|
1075
|
+
maxOutputTokens: options.maxTokens || 1024,
|
|
1076
|
+
},
|
|
1077
|
+
};
|
|
1078
|
+
const response = await fetch(`https://generativelanguage.googleapis.com/v1/models/${model}:generateContent?key=${apiKey}`, {
|
|
1079
|
+
method: 'POST',
|
|
1080
|
+
headers: {
|
|
1081
|
+
'Content-Type': 'application/json',
|
|
1082
|
+
},
|
|
1083
|
+
body: JSON.stringify(requestBody),
|
|
1084
|
+
});
|
|
1085
|
+
if (!response.ok) {
|
|
1086
|
+
throw new Error(`Google API error: ${response.status}`);
|
|
1087
|
+
}
|
|
1088
|
+
return await response.json();
|
|
1089
|
+
}
|
|
1090
|
+
/**
|
|
1091
|
+
* Call Azure OpenAI raw API
|
|
1092
|
+
*/
|
|
1093
|
+
async callAzureRaw(options, apiKey, model) {
|
|
1094
|
+
const endpoint = this.config?.endpoint || 'https://YOUR_RESOURCE.openai.azure.com';
|
|
1095
|
+
const apiVersion = this.config?.apiVersion || '2024-02-15-preview';
|
|
1096
|
+
const url = `${endpoint}/openai/deployments/${model}/chat/completions?api-version=${apiVersion}`;
|
|
1097
|
+
const requestBody = {
|
|
1098
|
+
messages: options.messages,
|
|
1099
|
+
temperature: options.temperature || 0.1,
|
|
1100
|
+
max_tokens: options.maxTokens || 1024,
|
|
1101
|
+
};
|
|
1102
|
+
// Azure OpenAI supports functions
|
|
1103
|
+
if (options.functions && options.functions.length > 0) {
|
|
1104
|
+
requestBody.functions = options.functions;
|
|
1105
|
+
requestBody.function_call = options.functionCall || 'auto';
|
|
1106
|
+
}
|
|
1107
|
+
const response = await fetch(url, {
|
|
1108
|
+
method: 'POST',
|
|
1109
|
+
headers: {
|
|
1110
|
+
'api-key': apiKey,
|
|
1111
|
+
'Content-Type': 'application/json',
|
|
1112
|
+
},
|
|
1113
|
+
body: JSON.stringify(requestBody),
|
|
1114
|
+
});
|
|
1115
|
+
if (!response.ok) {
|
|
1116
|
+
throw new Error(`Azure OpenAI API error: ${response.status}`);
|
|
1117
|
+
}
|
|
1118
|
+
return await response.json();
|
|
1119
|
+
}
|
|
1120
|
+
/**
|
|
1121
|
+
* Call DeepSeek raw API
|
|
1122
|
+
*/
|
|
1123
|
+
async callDeepSeekRaw(options, apiKey, model) {
|
|
1124
|
+
const requestBody = {
|
|
1125
|
+
model,
|
|
1126
|
+
messages: options.messages,
|
|
1127
|
+
temperature: options.temperature || 0.1,
|
|
1128
|
+
max_tokens: options.maxTokens || 1024,
|
|
1129
|
+
};
|
|
1130
|
+
// DeepSeek supports response format
|
|
1131
|
+
if (options.responseFormat) {
|
|
1132
|
+
requestBody.response_format = options.responseFormat;
|
|
1133
|
+
}
|
|
1134
|
+
const response = await fetch('https://api.deepseek.com/v1/chat/completions', {
|
|
1135
|
+
method: 'POST',
|
|
1136
|
+
headers: {
|
|
1137
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
1138
|
+
'Content-Type': 'application/json',
|
|
1139
|
+
},
|
|
1140
|
+
body: JSON.stringify(requestBody),
|
|
1141
|
+
});
|
|
1142
|
+
if (!response.ok) {
|
|
1143
|
+
throw new Error(`DeepSeek API error: ${response.status}`);
|
|
1144
|
+
}
|
|
1145
|
+
return await response.json();
|
|
1146
|
+
}
|
|
1147
|
+
/**
|
|
1148
|
+
* Call Ollama raw API
|
|
1149
|
+
*/
|
|
1150
|
+
async callOllamaRaw(options, model) {
|
|
1151
|
+
const endpoint = this.config?.endpoint || 'http://localhost:11434';
|
|
1152
|
+
// Ollama has different API structure
|
|
1153
|
+
const requestBody = {
|
|
1154
|
+
model,
|
|
1155
|
+
prompt: options.messages[options.messages.length - 1]?.content || '',
|
|
1156
|
+
stream: false,
|
|
1157
|
+
options: {
|
|
1158
|
+
temperature: options.temperature || 0.1,
|
|
1159
|
+
num_predict: options.maxTokens || 1024,
|
|
1160
|
+
},
|
|
1161
|
+
};
|
|
1162
|
+
const response = await fetch(`${endpoint}/api/generate`, {
|
|
1163
|
+
method: 'POST',
|
|
1164
|
+
headers: {
|
|
1165
|
+
'Content-Type': 'application/json',
|
|
1166
|
+
},
|
|
1167
|
+
body: JSON.stringify(requestBody),
|
|
1168
|
+
});
|
|
1169
|
+
if (!response.ok) {
|
|
1170
|
+
throw new Error(`Ollama API error: ${response.status}`);
|
|
1171
|
+
}
|
|
1172
|
+
return await response.json();
|
|
1173
|
+
}
|
|
1174
|
+
/**
|
|
1175
|
+
* Reset configuration
|
|
1176
|
+
*/
|
|
1177
|
+
reset() {
|
|
1178
|
+
this.config = null;
|
|
1179
|
+
this.enabled = false;
|
|
1180
|
+
this.client = null;
|
|
1181
|
+
logger.info('[AI] Configuration reset');
|
|
1182
|
+
}
|
|
1183
|
+
/**
|
|
1184
|
+
* Get friendly error message
|
|
1185
|
+
*/
|
|
1186
|
+
static getFriendlyError(error) {
|
|
1187
|
+
const lines = [
|
|
1188
|
+
chalk.red(`❌ ${error.message}`),
|
|
1189
|
+
chalk.gray(`Error code: ${error.code}`),
|
|
1190
|
+
];
|
|
1191
|
+
if (error.suggestions.length > 0) {
|
|
1192
|
+
lines.push(chalk.yellow('\n🔧 Fix suggestions:'));
|
|
1193
|
+
error.suggestions.forEach((suggestion, i) => {
|
|
1194
|
+
lines.push(` ${i + 1}. ${suggestion}`);
|
|
1195
|
+
});
|
|
1196
|
+
}
|
|
1197
|
+
return lines.join('\n');
|
|
1198
|
+
}
|
|
1199
|
+
}
|
|
1200
|
+
//# sourceMappingURL=ai.js.map
|