@mcp-use/cli 1.0.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/dist/InputPrompt.d.ts +13 -0
  2. package/dist/InputPrompt.js +188 -0
  3. package/dist/MultilineInput.d.ts +13 -0
  4. package/dist/MultilineInput.js +154 -0
  5. package/dist/MultilineTextInput.d.ts +11 -0
  6. package/dist/MultilineTextInput.js +97 -0
  7. package/dist/PasteAwareInput.d.ts +13 -0
  8. package/dist/PasteAwareInput.js +183 -0
  9. package/dist/SimpleMultilineInput.d.ts +11 -0
  10. package/dist/SimpleMultilineInput.js +125 -0
  11. package/dist/app.d.ts +1 -5
  12. package/dist/app.js +291 -186
  13. package/dist/cli.js +2 -5
  14. package/dist/commands.d.ts +15 -30
  15. package/dist/commands.js +308 -568
  16. package/dist/components/AsciiLogo.d.ts +2 -0
  17. package/dist/components/AsciiLogo.js +7 -0
  18. package/dist/components/Footer.d.ts +5 -0
  19. package/dist/components/Footer.js +19 -0
  20. package/dist/components/InputPrompt.d.ts +13 -0
  21. package/dist/components/InputPrompt.js +188 -0
  22. package/dist/components/Messages.d.ts +21 -0
  23. package/dist/components/Messages.js +80 -0
  24. package/dist/components/ServerStatus.d.ts +7 -0
  25. package/dist/components/ServerStatus.js +36 -0
  26. package/dist/components/Spinner.d.ts +16 -0
  27. package/dist/components/Spinner.js +63 -0
  28. package/dist/components/ToolStatus.d.ts +8 -0
  29. package/dist/components/ToolStatus.js +33 -0
  30. package/dist/components/textInput.d.ts +1 -0
  31. package/dist/components/textInput.js +1 -0
  32. package/dist/logger.d.ts +10 -0
  33. package/dist/logger.js +48 -0
  34. package/dist/mcp-service.d.ts +5 -4
  35. package/dist/mcp-service.js +98 -207
  36. package/dist/services/agent-service.d.ts +56 -0
  37. package/dist/services/agent-service.js +203 -0
  38. package/dist/services/cli-service.d.ts +132 -0
  39. package/dist/services/cli-service.js +591 -0
  40. package/dist/services/index.d.ts +4 -0
  41. package/dist/services/index.js +4 -0
  42. package/dist/services/llm-service.d.ts +174 -0
  43. package/dist/services/llm-service.js +567 -0
  44. package/dist/services/mcp-config-service.d.ts +69 -0
  45. package/dist/services/mcp-config-service.js +426 -0
  46. package/dist/services/mcp-service.d.ts +1 -0
  47. package/dist/services/mcp-service.js +1 -0
  48. package/dist/services/utility-service.d.ts +47 -0
  49. package/dist/services/utility-service.js +208 -0
  50. package/dist/storage.js +4 -4
  51. package/dist/types.d.ts +30 -0
  52. package/dist/types.js +1 -0
  53. package/package.json +22 -8
  54. package/readme.md +68 -39
@@ -0,0 +1,567 @@
1
+ import { ChatOpenAI, AzureChatOpenAI } from '@langchain/openai';
2
+ import { ChatAnthropic } from '@langchain/anthropic';
3
+ import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
4
+ import { ChatVertexAI } from '@langchain/google-vertexai';
5
+ import { ChatMistralAI } from '@langchain/mistralai';
6
+ import { ChatGroq } from '@langchain/groq';
7
+ import { ChatCohere } from '@langchain/cohere';
8
+ import { ChatFireworks } from '@langchain/community/chat_models/fireworks';
9
+ import { ChatPerplexity } from '@langchain/community/chat_models/perplexity';
10
+ import { ChatOllama } from '@langchain/ollama';
11
+ import { ChatTogetherAI } from '@langchain/community/chat_models/togetherai';
12
+ import { ChatDeepSeek } from '@langchain/deepseek';
13
+ import { ChatXAI } from '@langchain/xai';
14
+ import { SecureStorage } from '../storage.js';
15
+ const PROVIDERS = {
16
+ openai: {
17
+ envVar: 'OPENAI_API_KEY',
18
+ defaultModel: 'gpt-4o',
19
+ factory: (key, cfg) => new ChatOpenAI({ openAIApiKey: key, modelName: cfg.model }),
20
+ },
21
+ azureopenai: {
22
+ envVar: 'AZURE_OPENAI_API_KEY',
23
+ defaultModel: 'gpt-4o',
24
+ factory: (key, cfg) => new AzureChatOpenAI({ azureOpenAIApiKey: key, modelName: cfg.model }),
25
+ },
26
+ anthropic: {
27
+ envVar: 'ANTHROPIC_API_KEY',
28
+ defaultModel: 'claude-3-5-sonnet-20240620',
29
+ factory: (key, cfg) => new ChatAnthropic({ anthropicApiKey: key, modelName: cfg.model }),
30
+ },
31
+ gemini: {
32
+ envVar: 'GOOGLE_API_KEY',
33
+ defaultModel: 'gemini-1.5-pro',
34
+ factory: (key, cfg) => new ChatGoogleGenerativeAI({ apiKey: key, model: cfg.model }),
35
+ },
36
+ vertex: {
37
+ envVar: 'GOOGLE_APPLICATION_CREDENTIALS',
38
+ defaultModel: 'gemini-1.5-flash',
39
+ factory: (_key, cfg) => new ChatVertexAI({ model: cfg.model }),
40
+ },
41
+ mistral: {
42
+ envVar: 'MISTRAL_API_KEY',
43
+ defaultModel: 'mistral-large-latest',
44
+ factory: (key, cfg) => new ChatMistralAI({ apiKey: key, modelName: cfg.model }),
45
+ },
46
+ groq: {
47
+ envVar: 'GROQ_API_KEY',
48
+ defaultModel: 'llama-3.1-70b-versatile',
49
+ factory: (key, cfg) => new ChatGroq({ apiKey: key, model: cfg.model }),
50
+ },
51
+ cohere: {
52
+ envVar: 'COHERE_API_KEY',
53
+ defaultModel: 'command-r-plus',
54
+ factory: (key, cfg) => new ChatCohere({ apiKey: key, model: cfg.model }),
55
+ },
56
+ fireworks: {
57
+ envVar: 'FIREWORKS_API_KEY',
58
+ defaultModel: 'accounts/fireworks/models/llama-v3p1-70b-instruct',
59
+ factory: (key, cfg) => new ChatFireworks({ apiKey: key, model: cfg.model }),
60
+ },
61
+ perplexity: {
62
+ envVar: 'PERPLEXITY_API_KEY',
63
+ defaultModel: 'pplx-70b-online',
64
+ factory: (key, cfg) => new ChatPerplexity({ apiKey: key, model: cfg.model }),
65
+ },
66
+ ollama: {
67
+ envVar: 'OLLAMA_HOST',
68
+ defaultModel: 'llama3',
69
+ factory: (_key, cfg) => new ChatOllama({ baseUrl: process.env['OLLAMA_HOST'], model: cfg.model }),
70
+ },
71
+ together: {
72
+ envVar: 'TOGETHER_API_KEY',
73
+ defaultModel: 'mistralai/Mixtral-8x22B-Instruct-v0.1',
74
+ factory: (key, cfg) => new ChatTogetherAI({ apiKey: key, model: cfg.model }),
75
+ },
76
+ deepseek: {
77
+ envVar: 'DEEPSEEK_API_KEY',
78
+ defaultModel: 'deepseek-chat',
79
+ factory: (key, cfg) => new ChatDeepSeek({ apiKey: key, model: cfg.model }),
80
+ },
81
+ xai: {
82
+ envVar: 'XAI_API_KEY',
83
+ defaultModel: 'grok-1.5',
84
+ factory: (key, cfg) => new ChatXAI({ apiKey: key, model: cfg.model }),
85
+ },
86
+ };
87
+ export class LLMService {
88
+ constructor() {
89
+ Object.defineProperty(this, "currentLLMConfig", {
90
+ enumerable: true,
91
+ configurable: true,
92
+ writable: true,
93
+ value: null
94
+ });
95
+ Object.defineProperty(this, "sessionApiKeys", {
96
+ enumerable: true,
97
+ configurable: true,
98
+ writable: true,
99
+ value: {}
100
+ });
101
+ Object.defineProperty(this, "persistentConfig", {
102
+ enumerable: true,
103
+ configurable: true,
104
+ writable: true,
105
+ value: void 0
106
+ });
107
+ this.persistentConfig = SecureStorage.loadConfig();
108
+ this.initializeDefaultProvider();
109
+ }
110
+ initializeDefaultProvider() {
111
+ if (this.persistentConfig.lastModel) {
112
+ const lastModel = this.persistentConfig.lastModel;
113
+ const providerInfo = PROVIDERS[lastModel.provider];
114
+ if (providerInfo) {
115
+ const envVar = providerInfo.envVar;
116
+ if (envVar && this.getApiKey(envVar)) {
117
+ this.currentLLMConfig = {
118
+ provider: lastModel.provider,
119
+ model: lastModel.model,
120
+ temperature: lastModel.temperature ?? 0.7,
121
+ maxTokens: lastModel.maxTokens,
122
+ };
123
+ return;
124
+ }
125
+ }
126
+ }
127
+ for (const [providerName, providerInfo] of Object.entries(PROVIDERS)) {
128
+ if (this.getApiKey(providerInfo.envVar)) {
129
+ this.currentLLMConfig = {
130
+ provider: providerName,
131
+ model: providerInfo.defaultModel,
132
+ temperature: 0.7,
133
+ };
134
+ return;
135
+ }
136
+ }
137
+ this.currentLLMConfig = null;
138
+ }
139
+ getApiKey(envVar) {
140
+ return (this.sessionApiKeys[envVar] ||
141
+ this.persistentConfig.apiKeys[envVar] ||
142
+ process.env[envVar]);
143
+ }
144
+ getAvailableProviders() {
145
+ return Object.entries(PROVIDERS)
146
+ .filter(([, providerInfo]) => this.getApiKey(providerInfo.envVar))
147
+ .map(([providerName]) => providerName);
148
+ }
149
+ isAnyProviderAvailable() {
150
+ return this.getAvailableProviders().length > 0;
151
+ }
152
+ getAvailableModels(provider) {
153
+ const models = {};
154
+ for (const [key, value] of Object.entries(PROVIDERS)) {
155
+ models[key] = [value.defaultModel];
156
+ }
157
+ if (provider && models[provider]) {
158
+ return models[provider];
159
+ }
160
+ return models;
161
+ }
162
+ getCurrentConfig() {
163
+ return this.currentLLMConfig ? { ...this.currentLLMConfig } : null;
164
+ }
165
+ setModel(provider, model) {
166
+ if (!Object.keys(PROVIDERS).includes(provider)) {
167
+ return { success: false, message: `Unknown provider: ${provider}` };
168
+ }
169
+ const availableProviders = this.getAvailableProviders();
170
+ if (!availableProviders.includes(provider)) {
171
+ const providerInfo = PROVIDERS[provider];
172
+ return {
173
+ success: false,
174
+ message: `API key not found for ${provider}`,
175
+ requiresApiKey: true,
176
+ envVar: providerInfo.envVar,
177
+ };
178
+ }
179
+ this.currentLLMConfig = {
180
+ provider: provider,
181
+ model,
182
+ temperature: this.currentLLMConfig?.temperature || 0.7,
183
+ maxTokens: this.currentLLMConfig?.maxTokens,
184
+ };
185
+ this.persistentConfig.lastModel = this.currentLLMConfig;
186
+ SecureStorage.saveConfig(this.persistentConfig);
187
+ return { success: true, message: `Switched to ${provider} ${model}` };
188
+ }
189
+ setTemperature(temperature) {
190
+ if (!this.currentLLMConfig) {
191
+ return { success: false, message: 'No model configured' };
192
+ }
193
+ if (temperature < 0 || temperature > 2) {
194
+ return {
195
+ success: false,
196
+ message: 'Temperature must be between 0.0 and 2.0',
197
+ };
198
+ }
199
+ this.currentLLMConfig.temperature = temperature;
200
+ return { success: true, message: `Temperature set to ${temperature}` };
201
+ }
202
+ setMaxTokens(maxTokens) {
203
+ if (!this.currentLLMConfig) {
204
+ return { success: false, message: 'No model configured' };
205
+ }
206
+ if (maxTokens < 1) {
207
+ return { success: false, message: 'Max tokens must be a positive integer' };
208
+ }
209
+ this.currentLLMConfig.maxTokens = maxTokens;
210
+ return { success: true, message: `Max tokens set to ${maxTokens}` };
211
+ }
212
+ validateApiKey(_provider, apiKey) {
213
+ if (!apiKey || apiKey.trim().length === 0) {
214
+ return { valid: false, message: 'API key cannot be empty' };
215
+ }
216
+ // Basic validation can be improved per provider if needed
217
+ return { valid: true, message: '' };
218
+ }
219
+ setApiKey(provider, apiKey, shouldAutoSelect = false) {
220
+ const validationResult = this.validateApiKey(provider, apiKey);
221
+ if (!validationResult.valid) {
222
+ return { success: false, message: validationResult.message };
223
+ }
224
+ const providerInfo = PROVIDERS[provider];
225
+ if (!providerInfo) {
226
+ return { success: false, message: `Invalid provider: ${provider}` };
227
+ }
228
+ const envVar = providerInfo.envVar;
229
+ this.persistentConfig.apiKeys[envVar] = apiKey;
230
+ SecureStorage.saveConfig(this.persistentConfig);
231
+ let autoSelected;
232
+ if (shouldAutoSelect && !this.currentLLMConfig) {
233
+ this.currentLLMConfig = {
234
+ provider: provider,
235
+ model: providerInfo.defaultModel,
236
+ temperature: 0.7,
237
+ };
238
+ autoSelected = this.currentLLMConfig;
239
+ }
240
+ return {
241
+ success: true,
242
+ message: `${provider} API key set`,
243
+ autoSelected,
244
+ };
245
+ }
246
+ clearApiKeys() {
247
+ this.sessionApiKeys = {};
248
+ this.persistentConfig.apiKeys = {};
249
+ this.persistentConfig.lastModel = undefined;
250
+ this.currentLLMConfig = null;
251
+ SecureStorage.saveConfig(this.persistentConfig);
252
+ }
253
+ maskApiKey(apiKey) {
254
+ if (apiKey.length <= 8) {
255
+ return '*'.repeat(apiKey.length);
256
+ }
257
+ const start = apiKey.substring(0, 4);
258
+ const end = apiKey.substring(apiKey.length - 4);
259
+ const middle = '*'.repeat(Math.min(12, apiKey.length - 8));
260
+ return `${start}${middle}${end}`;
261
+ }
262
+ getApiKeyStatus() {
263
+ const status = {};
264
+ for (const [providerName, providerInfo] of Object.entries(PROVIDERS)) {
265
+ const envVar = providerInfo.envVar;
266
+ const hasEnvKey = !!process.env[envVar];
267
+ const hasSessionKey = !!this.sessionApiKeys[envVar];
268
+ const hasPersistentKey = !!this.persistentConfig.apiKeys[envVar];
269
+ if (hasSessionKey) {
270
+ const key = this.sessionApiKeys[envVar];
271
+ status[providerName] = {
272
+ status: 'set',
273
+ source: 'session',
274
+ masked: this.maskApiKey(key),
275
+ };
276
+ }
277
+ else if (hasPersistentKey) {
278
+ const key = this.persistentConfig.apiKeys[envVar];
279
+ status[providerName] = {
280
+ status: 'set',
281
+ source: 'stored',
282
+ masked: this.maskApiKey(key),
283
+ };
284
+ }
285
+ else if (hasEnvKey) {
286
+ const key = process.env[envVar];
287
+ status[providerName] = {
288
+ status: 'set',
289
+ source: 'env',
290
+ masked: this.maskApiKey(key),
291
+ };
292
+ }
293
+ else {
294
+ status[providerName] = {
295
+ status: 'not set',
296
+ source: 'none',
297
+ masked: '',
298
+ };
299
+ }
300
+ }
301
+ return status;
302
+ }
303
+ createLLM() {
304
+ if (!this.currentLLMConfig) {
305
+ throw new Error('No LLM configured. Use /model command to select a provider and model.');
306
+ }
307
+ const config = this.currentLLMConfig;
308
+ const providerInfo = PROVIDERS[config.provider];
309
+ if (!providerInfo) {
310
+ throw new Error(`Unsupported provider: ${config.provider}`);
311
+ }
312
+ const apiKey = this.getApiKey(providerInfo.envVar);
313
+ if (!apiKey && providerInfo.envVar !== 'OLLAMA_HOST') {
314
+ throw new Error(`${providerInfo.envVar} is required for ${config.provider} models. Use /setkey ${config.provider} <your-key>`);
315
+ }
316
+ const llm = providerInfo.factory(apiKey, config);
317
+ llm.temperature = config.temperature ?? 0.7;
318
+ llm.maxTokens = config.maxTokens;
319
+ return llm;
320
+ }
321
+ /**
322
+ * Handles the /model command to select a provider and model.
323
+ * @param args - Array of arguments where args[0] is provider and args[1] is model
324
+ * @returns A CommandResult with success/error status and prompts for API key if needed
325
+ */
326
+ handleModelCommand(args) {
327
+ if (args.length === 0) {
328
+ return {
329
+ type: 'info',
330
+ message: 'Usage: /model <provider> <model>\n\nUse /models to see all available options.',
331
+ };
332
+ }
333
+ if (args.length < 2) {
334
+ const provider = args[0];
335
+ const providerInfo = PROVIDERS[provider];
336
+ if (!providerInfo) {
337
+ return {
338
+ type: 'error',
339
+ message: `Unknown provider: ${provider}. Use /models to see available providers.`,
340
+ };
341
+ }
342
+ args.push(providerInfo.defaultModel);
343
+ }
344
+ const provider = args[0];
345
+ const model = args[1];
346
+ if (!provider || !model) {
347
+ return {
348
+ type: 'error',
349
+ message: 'Both provider and model are required',
350
+ };
351
+ }
352
+ if (!Object.keys(PROVIDERS).includes(provider)) {
353
+ return {
354
+ type: 'error',
355
+ message: `Unknown provider: ${provider}\nAvailable providers: ${Object.keys(PROVIDERS).join(', ')}`,
356
+ };
357
+ }
358
+ const result = this.setModel(provider, model);
359
+ if (!result.success) {
360
+ if (result.requiresApiKey) {
361
+ return {
362
+ type: 'prompt_key',
363
+ message: `Please enter your ${provider.toUpperCase()} API key:`,
364
+ data: {
365
+ provider,
366
+ model,
367
+ envVar: result.envVar,
368
+ },
369
+ };
370
+ }
371
+ return {
372
+ type: 'error',
373
+ message: result.message,
374
+ };
375
+ }
376
+ return {
377
+ type: 'success',
378
+ message: `āœ… ${result.message}`,
379
+ data: { llmConfig: this.getCurrentConfig() },
380
+ };
381
+ }
382
+ /**
383
+ * Handles the /models command to list available models.
384
+ * @param args - Optional array with provider name to filter models
385
+ * @returns A CommandResult with the list of available models
386
+ */
387
+ handleListModelsCommand(_args) {
388
+ const currentConfig = this.getCurrentConfig();
389
+ let modelList = 'šŸ“‹ Available models by provider:\n\n';
390
+ for (const [provider, info] of Object.entries(PROVIDERS)) {
391
+ const current = provider === currentConfig?.provider ? ' ← current provider' : '';
392
+ modelList += `šŸ”ø ${provider}${current}:\n`;
393
+ const model = info.defaultModel;
394
+ const currentModel = current && model === currentConfig?.model ? ' ← current model' : '';
395
+ modelList += ` • ${model} (default)${currentModel}\n`;
396
+ modelList += '\n';
397
+ }
398
+ modelList += `\nNote: This list shows default models. Most providers support many more models that can be used with the /model command.\nDon't see your model/provider? Submit a PR to add it at https://github.com/mcp-use/mcp-use-cli/`;
399
+ return {
400
+ type: 'info',
401
+ message: modelList.trim(),
402
+ };
403
+ }
404
+ /**
405
+ * Handles the /config command to adjust temperature and max tokens.
406
+ * @param args - Array where args[0] is setting name and args[1] is value
407
+ * @returns A CommandResult with success/error status
408
+ */
409
+ handleConfigCommand(args) {
410
+ if (args.length < 2) {
411
+ return {
412
+ type: 'error',
413
+ message: 'Usage: /config <setting> <value>\nAvailable settings: temp, tokens',
414
+ };
415
+ }
416
+ const setting = args[0];
417
+ const value = args[1];
418
+ if (!value) {
419
+ return {
420
+ type: 'error',
421
+ message: 'Value is required',
422
+ };
423
+ }
424
+ if (!this.getCurrentConfig()) {
425
+ return {
426
+ type: 'error',
427
+ message: 'No model configured. Use /model to select a provider and model first.',
428
+ };
429
+ }
430
+ switch (setting) {
431
+ case 'temp':
432
+ case 'temperature':
433
+ const temp = parseFloat(value);
434
+ if (isNaN(temp)) {
435
+ return {
436
+ type: 'error',
437
+ message: 'Temperature must be a number',
438
+ };
439
+ }
440
+ const tempResult = this.setTemperature(temp);
441
+ if (!tempResult.success) {
442
+ return {
443
+ type: 'error',
444
+ message: tempResult.message,
445
+ };
446
+ }
447
+ return {
448
+ type: 'success',
449
+ message: `āœ… ${tempResult.message}`,
450
+ data: { llmConfig: this.getCurrentConfig() },
451
+ };
452
+ case 'tokens':
453
+ case 'max-tokens':
454
+ const tokens = parseInt(value);
455
+ if (isNaN(tokens)) {
456
+ return {
457
+ type: 'error',
458
+ message: 'Max tokens must be a number',
459
+ };
460
+ }
461
+ const tokensResult = this.setMaxTokens(tokens);
462
+ if (!tokensResult.success) {
463
+ return {
464
+ type: 'error',
465
+ message: tokensResult.message,
466
+ };
467
+ }
468
+ return {
469
+ type: 'success',
470
+ message: `āœ… ${tokensResult.message}`,
471
+ data: { llmConfig: this.getCurrentConfig() },
472
+ };
473
+ default:
474
+ return {
475
+ type: 'error',
476
+ message: `Unknown setting: ${setting}\nAvailable settings: temp, tokens`,
477
+ };
478
+ }
479
+ }
480
+ /**
481
+ * Handles the /setkey command to manually set API keys.
482
+ * @param args - Array where args[0] is provider and args[1] is API key
483
+ * @returns A CommandResult with success/error status
484
+ */
485
+ handleSetKeyCommand(args) {
486
+ if (args.length < 2) {
487
+ return {
488
+ type: 'error',
489
+ message: 'Usage: /setkey <provider> <api_key>\n\nSupported providers: openai, anthropic, google, mistral\n\nExample:\n/setkey openai sk-1234567890abcdef...',
490
+ };
491
+ }
492
+ const provider = args[0]?.toLowerCase();
493
+ const apiKey = args[1];
494
+ if (!provider || !apiKey) {
495
+ return {
496
+ type: 'error',
497
+ message: 'Both provider and API key are required',
498
+ };
499
+ }
500
+ const validProviders = Object.keys(PROVIDERS);
501
+ if (!validProviders.includes(provider)) {
502
+ return {
503
+ type: 'error',
504
+ message: `Invalid provider: ${provider}\nSupported providers: ${validProviders.join(', ')}`,
505
+ };
506
+ }
507
+ const shouldAutoSelect = !this.getCurrentConfig();
508
+ const result = this.setApiKey(provider, apiKey, shouldAutoSelect);
509
+ if (!result.success) {
510
+ return {
511
+ type: 'error',
512
+ message: result.message,
513
+ };
514
+ }
515
+ const maskedKey = this.maskApiKey(apiKey);
516
+ let message = `āœ… ${provider} API key set (${maskedKey})`;
517
+ if (result.autoSelected) {
518
+ message += `\nšŸ¤– Auto-selected ${result.autoSelected.provider}/${result.autoSelected.model}`;
519
+ }
520
+ return {
521
+ type: 'success',
522
+ message,
523
+ data: result.autoSelected ? { llmConfig: result.autoSelected } : undefined,
524
+ };
525
+ }
526
+ /**
527
+ * Handles the /clearkeys command to clear all stored API keys.
528
+ * @returns A CommandResult indicating success
529
+ */
530
+ handleClearKeysCommand() {
531
+ this.clearApiKeys();
532
+ return {
533
+ type: 'success',
534
+ message: 'āœ… All API keys cleared from storage.\n\nUse /setkey or /model to set up a new provider.',
535
+ data: { llmConfig: null },
536
+ };
537
+ }
538
+ /**
539
+ * Handles API key input when prompted during model selection.
540
+ * @param apiKey - The API key entered by the user
541
+ * @param provider - The provider for the API key
542
+ * @param model - The model to select after setting the key
543
+ * @returns A CommandResult with success/error status
544
+ */
545
+ handleApiKeyInput(apiKey, provider, model) {
546
+ const keyResult = this.setApiKey(provider, apiKey, false);
547
+ if (!keyResult.success) {
548
+ return {
549
+ type: 'error',
550
+ message: keyResult.message,
551
+ };
552
+ }
553
+ const modelResult = this.setModel(provider, model);
554
+ if (!modelResult.success) {
555
+ return {
556
+ type: 'error',
557
+ message: modelResult.message,
558
+ };
559
+ }
560
+ const maskedKey = this.maskApiKey(apiKey);
561
+ return {
562
+ type: 'success',
563
+ message: `āœ… ${provider} API key set (${maskedKey})\nšŸ¤– Switched to ${provider}/${model}`,
564
+ data: { llmConfig: this.getCurrentConfig() },
565
+ };
566
+ }
567
+ }
@@ -0,0 +1,69 @@
1
+ import type { CommandResult } from '../types.js';
2
+ export interface MCPServerConfig {
3
+ command: string;
4
+ args?: string[];
5
+ env?: Record<string, string>;
6
+ }
7
+ export interface MCPServerConfigResult {
8
+ success: boolean;
9
+ message: string;
10
+ data?: any;
11
+ }
12
+ export declare class MCPConfigService {
13
+ private persistentConfig;
14
+ constructor();
15
+ getConfiguredServers(): Record<string, MCPServerConfig>;
16
+ isServerConfigured(serverName: string): boolean;
17
+ addServerFromJSON(jsonConfig: string): MCPServerConfigResult;
18
+ addServer(name: string, config: MCPServerConfig): MCPServerConfigResult;
19
+ /**
20
+ * Gets the configuration for a specific server.
21
+ * @param serverName - Name of the server
22
+ * @returns The server configuration or null if not found
23
+ */
24
+ getServerConfig(serverName: string): MCPServerConfig | null;
25
+ /**
26
+ * Gets all configured servers with their configurations.
27
+ * Note: Connection status should be determined by the caller using AgentService.
28
+ * @returns Array of server configurations
29
+ */
30
+ getAllServers(): Array<{
31
+ name: string;
32
+ config: MCPServerConfig;
33
+ }>;
34
+ getServerTestCommand(serverName: string): {
35
+ success: boolean;
36
+ command?: string;
37
+ message?: string;
38
+ };
39
+ validateServerName(name: string): {
40
+ valid: boolean;
41
+ message?: string;
42
+ };
43
+ parseEnvironmentVariables(envString: string): Record<string, string>;
44
+ /**
45
+ * Handles the /server command and its subcommands.
46
+ * @param args - Array of arguments where args[0] is the subcommand
47
+ * @returns A CommandResult with the appropriate response
48
+ */
49
+ handleServerCommand(args: string[]): CommandResult;
50
+ /**
51
+ * Handles the /servers command to list all servers and their connection status.
52
+ * @returns A CommandResult with the server list
53
+ */
54
+ handleListServersCommand(): CommandResult;
55
+ /**
56
+ * Handles the /test-server command to test server configuration.
57
+ * @param args - Array where args[0] is the server name
58
+ * @returns A CommandResult with test information
59
+ */
60
+ handleTestServerCommand(args: string[]): CommandResult;
61
+ /**
62
+ * Handles server configuration input during the interactive setup flow.
63
+ * @param input - User input string
64
+ * @param step - Current step in the configuration flow
65
+ * @param serverConfig - Partial server configuration being built
66
+ * @returns A CommandResult to continue or complete the flow
67
+ */
68
+ handleServerConfigInput(input: string, step: string, serverConfig?: any): CommandResult;
69
+ }