converse-mcp-server 2.3.1 → 2.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/README.md +771 -738
  2. package/docs/API.md +10 -1
  3. package/docs/PROVIDERS.md +8 -4
  4. package/package.json +12 -12
  5. package/src/async/asyncJobStore.js +82 -52
  6. package/src/async/eventBus.js +25 -20
  7. package/src/async/fileCache.js +121 -40
  8. package/src/async/jobRunner.js +65 -39
  9. package/src/async/providerStreamNormalizer.js +203 -117
  10. package/src/config.js +374 -102
  11. package/src/continuationStore.js +32 -24
  12. package/src/index.js +45 -25
  13. package/src/prompts/helpPrompt.js +328 -305
  14. package/src/providers/anthropic.js +303 -119
  15. package/src/providers/codex.js +103 -45
  16. package/src/providers/deepseek.js +24 -8
  17. package/src/providers/google.js +337 -93
  18. package/src/providers/index.js +1 -1
  19. package/src/providers/interface.js +16 -11
  20. package/src/providers/mistral.js +179 -69
  21. package/src/providers/openai-compatible.js +231 -94
  22. package/src/providers/openai.js +1094 -914
  23. package/src/providers/openrouter-endpoints-client.js +220 -216
  24. package/src/providers/openrouter.js +426 -381
  25. package/src/providers/xai.js +153 -56
  26. package/src/resources/helpResource.js +70 -67
  27. package/src/router.js +95 -67
  28. package/src/services/summarizationService.js +51 -24
  29. package/src/systemPrompts.js +89 -89
  30. package/src/tools/cancelJob.js +31 -19
  31. package/src/tools/chat.js +997 -883
  32. package/src/tools/checkStatus.js +86 -65
  33. package/src/tools/consensus.js +400 -234
  34. package/src/tools/index.js +39 -16
  35. package/src/transport/httpTransport.js +82 -55
  36. package/src/utils/contextProcessor.js +54 -37
  37. package/src/utils/errorHandler.js +95 -45
  38. package/src/utils/fileValidator.js +107 -98
  39. package/src/utils/formatStatus.js +122 -64
  40. package/src/utils/logger.js +459 -449
  41. package/src/utils/pathUtils.js +2 -2
  42. package/src/utils/tokenLimiter.js +216 -216
@@ -20,8 +20,16 @@ const SUPPORTED_MODELS = {
20
20
  supportsTemperature: true,
21
21
  supportsWebSearch: true,
22
22
  timeout: 300000, // 5 minutes
23
- description: 'GROK-4 (256K context) - Latest advanced model from X.AI with image support and live search',
24
- aliases: ['grok', 'grok4', 'grok-4', 'grok-4-latest', 'grok 4', 'grok 4 latest']
23
+ description:
24
+ 'GROK-4 (256K context) - Latest advanced model from X.AI with image support and live search',
25
+ aliases: [
26
+ 'grok',
27
+ 'grok4',
28
+ 'grok-4',
29
+ 'grok-4-latest',
30
+ 'grok 4',
31
+ 'grok 4 latest',
32
+ ],
25
33
  },
26
34
  'grok-4-fast-reasoning': {
27
35
  modelName: 'grok-4-fast-reasoning',
@@ -36,8 +44,14 @@ const SUPPORTED_MODELS = {
36
44
  supportsFunctionCalling: true,
37
45
  supportsStructuredOutputs: true,
38
46
  timeout: 300000, // 5 minutes
39
- description: 'GROK-4 Fast Reasoning (2M context) - Cost-efficient reasoning model with function calling and structured outputs',
40
- aliases: ['grok-4-fast', 'grok-4-fast-reasoning-latest', 'grok 4 fast', 'grok 4 fast reasoning']
47
+ description:
48
+ 'GROK-4 Fast Reasoning (2M context) - Cost-efficient reasoning model with function calling and structured outputs',
49
+ aliases: [
50
+ 'grok-4-fast',
51
+ 'grok-4-fast-reasoning-latest',
52
+ 'grok 4 fast',
53
+ 'grok 4 fast reasoning',
54
+ ],
41
55
  },
42
56
  'grok-4-fast-non-reasoning': {
43
57
  modelName: 'grok-4-fast-non-reasoning',
@@ -52,8 +66,9 @@ const SUPPORTED_MODELS = {
52
66
  supportsFunctionCalling: true,
53
67
  supportsStructuredOutputs: true,
54
68
  timeout: 300000, // 5 minutes
55
- description: 'GROK-4 Fast Non-Reasoning (2M context) - Fast, cost-efficient model without reasoning for quick responses',
56
- aliases: ['grok-4-fast-non-reasoning-latest', 'grok 4 fast non-reasoning']
69
+ description:
70
+ 'GROK-4 Fast Non-Reasoning (2M context) - Fast, cost-efficient model without reasoning for quick responses',
71
+ aliases: ['grok-4-fast-non-reasoning-latest', 'grok 4 fast non-reasoning'],
57
72
  },
58
73
  'grok-code-fast-1': {
59
74
  modelName: 'grok-code-fast-1',
@@ -65,8 +80,14 @@ const SUPPORTED_MODELS = {
65
80
  supportsTemperature: true,
66
81
  supportsWebSearch: false,
67
82
  timeout: 300000, // 5 minutes
68
- description: 'GROK Code Fast 1 (256K context) - Speedy and economical reasoning model that excels at agentic coding',
69
- aliases: ['grok-code-fast', 'grok-code-fast-1-0825', 'grok code fast', 'grok code fast 1']
83
+ description:
84
+ 'GROK Code Fast 1 (256K context) - Speedy and economical reasoning model that excels at agentic coding',
85
+ aliases: [
86
+ 'grok-code-fast',
87
+ 'grok-code-fast-1-0825',
88
+ 'grok code fast',
89
+ 'grok code fast 1',
90
+ ],
70
91
  },
71
92
  };
72
93
 
@@ -132,17 +153,26 @@ function convertMessages(messages) {
132
153
 
133
154
  return messages.map((msg, index) => {
134
155
  if (!msg || typeof msg !== 'object') {
135
- throw new XAIProviderError(`Message at index ${index} must be an object`, 'INVALID_MESSAGE');
156
+ throw new XAIProviderError(
157
+ `Message at index ${index} must be an object`,
158
+ 'INVALID_MESSAGE',
159
+ );
136
160
  }
137
161
 
138
162
  const { role, content } = msg;
139
163
 
140
164
  if (!role || !['system', 'user', 'assistant'].includes(role)) {
141
- throw new XAIProviderError(`Invalid role "${role}" at message index ${index}`, 'INVALID_ROLE');
165
+ throw new XAIProviderError(
166
+ `Invalid role "${role}" at message index ${index}`,
167
+ 'INVALID_ROLE',
168
+ );
142
169
  }
143
170
 
144
171
  if (!content) {
145
- throw new XAIProviderError(`Message content is required at index ${index}`, 'MISSING_CONTENT');
172
+ throw new XAIProviderError(
173
+ `Message content is required at index ${index}`,
174
+ 'MISSING_CONTENT',
175
+ );
146
176
  }
147
177
 
148
178
  // Handle complex content structure (array with text and images)
@@ -153,7 +183,7 @@ function convertMessages(messages) {
153
183
  if (item.type === 'text') {
154
184
  convertedContent.push({
155
185
  type: 'text',
156
- text: item.text
186
+ text: item.text,
157
187
  });
158
188
  } else if (item.type === 'image' && item.source) {
159
189
  // Convert Anthropic/Claude format to OpenAI format for XAI
@@ -161,10 +191,12 @@ function convertMessages(messages) {
161
191
  type: 'image_url',
162
192
  image_url: {
163
193
  url: `data:${item.source.media_type};base64,${item.source.data}`,
164
- detail: 'high'
165
- }
194
+ detail: 'high',
195
+ },
166
196
  });
167
- debugLog(`[XAI] Converting image: ${item.source.media_type}, data length: ${item.source.data.length}`);
197
+ debugLog(
198
+ `[XAI] Converting image: ${item.source.media_type}, data length: ${item.source.data.length}`,
199
+ );
168
200
  }
169
201
  }
170
202
 
@@ -201,11 +233,17 @@ export const xaiProvider = {
201
233
 
202
234
  // Validate API key
203
235
  if (!config?.apiKeys?.xai) {
204
- throw new XAIProviderError('XAI API key not configured', 'MISSING_API_KEY');
236
+ throw new XAIProviderError(
237
+ 'XAI API key not configured',
238
+ 'MISSING_API_KEY',
239
+ );
205
240
  }
206
241
 
207
242
  if (!validateApiKey(config.apiKeys.xai)) {
208
- throw new XAIProviderError('Invalid XAI API key format', 'INVALID_API_KEY');
243
+ throw new XAIProviderError(
244
+ 'Invalid XAI API key format',
245
+ 'INVALID_API_KEY',
246
+ );
209
247
  }
210
248
 
211
249
  // Get base URL from config or use default
@@ -225,14 +263,15 @@ export const xaiProvider = {
225
263
  const xaiMessages = convertMessages(messages);
226
264
 
227
265
  // Filter out unsupported parameters for XAI/Grok models
228
- const { reasoning_effort: _unused_reasoning_effort, ...supportedOptions } = otherOptions;
266
+ const { reasoning_effort: _unused_reasoning_effort, ...supportedOptions } =
267
+ otherOptions;
229
268
 
230
269
  // Build request payload
231
270
  const requestPayload = {
232
271
  model: resolvedModel,
233
272
  messages: xaiMessages,
234
273
  stream,
235
- ...supportedOptions
274
+ ...supportedOptions,
236
275
  };
237
276
 
238
277
  // Add temperature (all Grok models support temperature)
@@ -242,13 +281,16 @@ export const xaiProvider = {
242
281
 
243
282
  // Add max tokens if specified
244
283
  if (maxTokens) {
245
- requestPayload.max_tokens = Math.min(maxTokens, modelConfig.maxOutputTokens || 256000);
284
+ requestPayload.max_tokens = Math.min(
285
+ maxTokens,
286
+ modelConfig.maxOutputTokens || 256000,
287
+ );
246
288
  }
247
289
 
248
290
  // Add web search parameters if requested and model supports it
249
291
  if (use_websearch && modelConfig.supportsWebSearch) {
250
292
  requestPayload.search_parameters = {
251
- mode: 'auto' // Let the model decide when to use web search
293
+ mode: 'auto', // Let the model decide when to use web search
252
294
  };
253
295
  }
254
296
 
@@ -259,20 +301,31 @@ export const xaiProvider = {
259
301
 
260
302
  // If streaming is requested and model doesn't support it, fall back to non-streaming
261
303
  if (stream && modelConfig.supportsStreaming === false) {
262
- debugLog(`[XAI] Model ${resolvedModel} doesn't support streaming, falling back to non-streaming mode`);
304
+ debugLog(
305
+ `[XAI] Model ${resolvedModel} doesn't support streaming, falling back to non-streaming mode`,
306
+ );
263
307
  requestPayload.stream = false;
264
308
  }
265
309
 
266
310
  // Handle streaming requests
267
311
  if (stream && requestPayload.stream !== false) {
268
- return this._createStreamingGenerator(openai, requestPayload, resolvedModel, modelConfig, use_websearch, signal);
312
+ return this._createStreamingGenerator(
313
+ openai,
314
+ requestPayload,
315
+ resolvedModel,
316
+ modelConfig,
317
+ use_websearch,
318
+ signal,
319
+ );
269
320
  }
270
321
 
271
322
  // Note: XAI/Grok models don't currently support reasoning_effort parameter
272
323
  // We silently ignore it for API consistency (no need to log warnings in tests)
273
324
 
274
325
  try {
275
- debugLog(`[XAI] Calling ${resolvedModel} with ${xaiMessages.length} messages${use_websearch && modelConfig.supportsWebSearch ? ' (with live search)' : ''}`);
326
+ debugLog(
327
+ `[XAI] Calling ${resolvedModel} with ${xaiMessages.length} messages${use_websearch && modelConfig.supportsWebSearch ? ' (with live search)' : ''}`,
328
+ );
276
329
 
277
330
  // Check if already aborted before making request
278
331
  if (signal?.aborted) {
@@ -294,12 +347,18 @@ export const xaiProvider = {
294
347
  // Extract response data
295
348
  const choice = response.choices[0];
296
349
  if (!choice) {
297
- throw new XAIProviderError('No response choice received from XAI', 'NO_RESPONSE_CHOICE');
350
+ throw new XAIProviderError(
351
+ 'No response choice received from XAI',
352
+ 'NO_RESPONSE_CHOICE',
353
+ );
298
354
  }
299
355
 
300
356
  const content = choice.message?.content;
301
357
  if (!content) {
302
- throw new XAIProviderError('No content in response from XAI', 'NO_RESPONSE_CONTENT');
358
+ throw new XAIProviderError(
359
+ 'No content in response from XAI',
360
+ 'NO_RESPONSE_CONTENT',
361
+ );
303
362
  }
304
363
 
305
364
  // Extract usage information
@@ -315,38 +374,61 @@ export const xaiProvider = {
315
374
  usage: {
316
375
  input_tokens: usage.prompt_tokens || 0,
317
376
  output_tokens: usage.completion_tokens || 0,
318
- total_tokens: usage.total_tokens || 0
377
+ total_tokens: usage.total_tokens || 0,
319
378
  },
320
379
  response_time_ms: responseTime,
321
380
  finish_reason: choice.finish_reason,
322
381
  provider: 'xai',
323
- web_search_used: use_websearch && modelConfig.supportsWebSearch
324
- }
382
+ web_search_used: use_websearch && modelConfig.supportsWebSearch,
383
+ },
325
384
  };
326
-
327
385
  } catch (error) {
328
386
  debugError('[XAI] Error during API call:', error);
329
387
 
330
388
  // Handle specific XAI/OpenAI compatible errors
331
389
  if (error.code === 'insufficient_quota') {
332
- throw new XAIProviderError('XAI API quota exceeded', 'QUOTA_EXCEEDED', error);
390
+ throw new XAIProviderError(
391
+ 'XAI API quota exceeded',
392
+ 'QUOTA_EXCEEDED',
393
+ error,
394
+ );
333
395
  } else if (error.code === 'invalid_api_key') {
334
- throw new XAIProviderError('Invalid XAI API key', 'INVALID_API_KEY', error);
396
+ throw new XAIProviderError(
397
+ 'Invalid XAI API key',
398
+ 'INVALID_API_KEY',
399
+ error,
400
+ );
335
401
  } else if (error.code === 'model_not_found') {
336
- throw new XAIProviderError(`Model ${resolvedModel} not found`, 'MODEL_NOT_FOUND', error);
402
+ throw new XAIProviderError(
403
+ `Model ${resolvedModel} not found`,
404
+ 'MODEL_NOT_FOUND',
405
+ error,
406
+ );
337
407
  } else if (error.code === 'context_length_exceeded') {
338
- throw new XAIProviderError('Context length exceeded for model', 'CONTEXT_LENGTH_EXCEEDED', error);
408
+ throw new XAIProviderError(
409
+ 'Context length exceeded for model',
410
+ 'CONTEXT_LENGTH_EXCEEDED',
411
+ error,
412
+ );
339
413
  } else if (error.type === 'invalid_request_error') {
340
- throw new XAIProviderError(`Invalid request: ${error.message}`, 'INVALID_REQUEST', error);
414
+ throw new XAIProviderError(
415
+ `Invalid request: ${error.message}`,
416
+ 'INVALID_REQUEST',
417
+ error,
418
+ );
341
419
  } else if (error.type === 'rate_limit_error') {
342
- throw new XAIProviderError('XAI rate limit exceeded', 'RATE_LIMIT_EXCEEDED', error);
420
+ throw new XAIProviderError(
421
+ 'XAI rate limit exceeded',
422
+ 'RATE_LIMIT_EXCEEDED',
423
+ error,
424
+ );
343
425
  }
344
426
 
345
427
  // Generic error handling
346
428
  throw new XAIProviderError(
347
429
  `XAI API error: ${error.message || 'Unknown error'}`,
348
430
  'API_ERROR',
349
- error
431
+ error,
350
432
  );
351
433
  }
352
434
  },
@@ -361,10 +443,22 @@ export const xaiProvider = {
361
443
  * @param {boolean} use_websearch - Whether web search is enabled
362
444
  * @returns {AsyncGenerator} - Streaming generator yielding events
363
445
  */
364
- async *_createStreamingGenerator(openai, requestPayload, resolvedModel, modelConfig, use_websearch, signal) {
365
- const searchInfo = (use_websearch && modelConfig.supportsWebSearch) ? ' (with live search)' : '';
366
-
367
- debugLog(`[XAI] Starting streaming for ${resolvedModel} with ${requestPayload.messages?.length} messages${searchInfo}`);
446
+ async *_createStreamingGenerator(
447
+ openai,
448
+ requestPayload,
449
+ resolvedModel,
450
+ modelConfig,
451
+ use_websearch,
452
+ signal,
453
+ ) {
454
+ const searchInfo =
455
+ use_websearch && modelConfig.supportsWebSearch
456
+ ? ' (with live search)'
457
+ : '';
458
+
459
+ debugLog(
460
+ `[XAI] Starting streaming for ${resolvedModel} with ${requestPayload.messages?.length} messages${searchInfo}`,
461
+ );
368
462
 
369
463
  const startTime = Date.now();
370
464
  let totalContent = '';
@@ -385,7 +479,7 @@ export const xaiProvider = {
385
479
  type: 'start',
386
480
  timestamp: new Date().toISOString(),
387
481
  model: resolvedModel,
388
- provider: 'xai'
482
+ provider: 'xai',
389
483
  };
390
484
 
391
485
  // Create stream using OpenAI SDK with XAI base URL and abort signal support
@@ -400,7 +494,9 @@ export const xaiProvider = {
400
494
  try {
401
495
  // Check for cancellation during stream processing
402
496
  if (signal?.aborted) {
403
- debugLog(`[XAI] Stream aborted during processing: ${signal.reason || 'Cancelled'}`);
497
+ debugLog(
498
+ `[XAI] Stream aborted during processing: ${signal.reason || 'Cancelled'}`,
499
+ );
404
500
  break;
405
501
  }
406
502
  // Handle Chat Completions API streaming format (XAI uses OpenAI-compatible format)
@@ -412,7 +508,7 @@ export const xaiProvider = {
412
508
  yield {
413
509
  type: 'delta',
414
510
  content,
415
- timestamp: new Date().toISOString()
511
+ timestamp: new Date().toISOString(),
416
512
  };
417
513
  }
418
514
 
@@ -446,15 +542,17 @@ export const xaiProvider = {
446
542
  error: {
447
543
  message: `Chunk processing error: ${chunkError.message}`,
448
544
  code: 'CHUNK_PROCESSING_ERROR',
449
- recoverable: true
545
+ recoverable: true,
450
546
  },
451
- timestamp: new Date().toISOString()
547
+ timestamp: new Date().toISOString(),
452
548
  };
453
549
  }
454
550
  }
455
551
 
456
552
  const responseTime = Date.now() - startTime;
457
- debugLog(`[XAI] Streaming completed in ${responseTime}ms${searchSourcesUsed > 0 ? ` (used ${searchSourcesUsed} search sources)` : ''}`);
553
+ debugLog(
554
+ `[XAI] Streaming completed in ${responseTime}ms${searchSourcesUsed > 0 ? ` (used ${searchSourcesUsed} search sources)` : ''}`,
555
+ );
458
556
 
459
557
  // Yield usage information if available
460
558
  if (lastUsage) {
@@ -463,9 +561,9 @@ export const xaiProvider = {
463
561
  usage: {
464
562
  input_tokens: lastUsage.prompt_tokens || 0,
465
563
  output_tokens: lastUsage.completion_tokens || 0,
466
- total_tokens: lastUsage.total_tokens || 0
564
+ total_tokens: lastUsage.total_tokens || 0,
467
565
  },
468
- timestamp: new Date().toISOString()
566
+ timestamp: new Date().toISOString(),
469
567
  };
470
568
 
471
569
  // Add search-specific usage information
@@ -486,12 +584,12 @@ export const xaiProvider = {
486
584
  usage: {
487
585
  input_tokens: lastUsage?.prompt_tokens || 0,
488
586
  output_tokens: lastUsage?.completion_tokens || 0,
489
- total_tokens: lastUsage?.total_tokens || 0
587
+ total_tokens: lastUsage?.total_tokens || 0,
490
588
  },
491
589
  response_time_ms: responseTime,
492
590
  finish_reason: finishReason || 'stop',
493
591
  provider: 'xai',
494
- web_search_used: webSearchUsed
592
+ web_search_used: webSearchUsed,
495
593
  };
496
594
 
497
595
  // Add search-specific metadata
@@ -510,9 +608,8 @@ export const xaiProvider = {
510
608
  content: totalContent,
511
609
  stop_reason: finishReason || 'stop',
512
610
  metadata,
513
- timestamp: new Date().toISOString()
611
+ timestamp: new Date().toISOString(),
514
612
  };
515
-
516
613
  } catch (error) {
517
614
  debugError('[XAI] Streaming error:', error);
518
615
 
@@ -549,9 +646,9 @@ export const xaiProvider = {
549
646
  message: errorMessage,
550
647
  code: errorCode,
551
648
  recoverable,
552
- originalError: error.message
649
+ originalError: error.message,
553
650
  },
554
- timestamp: new Date().toISOString()
651
+ timestamp: new Date().toISOString(),
555
652
  };
556
653
 
557
654
  // Re-throw to maintain error propagation
@@ -593,5 +690,5 @@ export const xaiProvider = {
593
690
  getModelConfig(modelName) {
594
691
  const resolved = resolveModelName(modelName);
595
692
  return SUPPORTED_MODELS[resolved] || null;
596
- }
693
+ },
597
694
  };
@@ -1,67 +1,70 @@
1
- /**
2
- * Help Resource Handler
3
- *
4
- * Exposes comprehensive documentation and server information as an MCP resource.
5
- * Provides the same content as the help prompt plus version information.
6
- */
7
-
8
- import { generateHelpContent } from '../prompts/helpPrompt.js';
9
- import { readFileSync } from 'fs';
10
- import { fileURLToPath } from 'url';
11
- import { dirname, join } from 'path';
12
-
13
- const __filename = fileURLToPath(import.meta.url);
14
- const __dirname = dirname(__filename);
15
-
16
- /**
17
- * Get the current server version from package.json
18
- * @returns {string} Server version
19
- */
20
- function getServerVersion() {
21
- try {
22
- const packagePath = join(__dirname, '../../package.json');
23
- const packageJson = JSON.parse(readFileSync(packagePath, 'utf8'));
24
- return packageJson.version || 'unknown';
25
- } catch (error) {
26
- return 'unknown';
27
- }
28
- }
29
-
30
- /**
31
- * Resource metadata for the help documentation
32
- */
33
- export const helpResourceMetadata = {
34
- uri: 'converse://help',
35
- name: 'Help Documentation',
36
- description: 'Comprehensive guide for the Converse MCP Server including all tools, parameters, providers, and models',
37
- mimeType: 'text/plain'
38
- };
39
-
40
- /**
41
- * Handler for reading the help resource
42
- * @param {object} config - Configuration object (optional)
43
- * @returns {object} Resource content
44
- */
45
- export async function helpResourceHandler(config = null) {
46
- const helpContent = generateHelpContent(config);
47
- const version = getServerVersion();
48
-
49
- // Add version information to the help content
50
- const contentWithVersion = `${helpContent}\n\n## Server Information\n\n- **Version**: ${version}\n- **Protocol**: MCP (Model Context Protocol)\n- **Server Type**: HTTP Transport\n- **Default Port**: 3157\n`;
51
-
52
- return {
53
- contents: [{
54
- uri: helpResourceMetadata.uri,
55
- mimeType: helpResourceMetadata.mimeType,
56
- text: contentWithVersion
57
- }]
58
- };
59
- }
60
-
61
- /**
62
- * Get list of all available resources
63
- * @returns {array} List of resource metadata
64
- */
65
- export function listResources() {
66
- return [helpResourceMetadata];
67
- }
1
+ /**
2
+ * Help Resource Handler
3
+ *
4
+ * Exposes comprehensive documentation and server information as an MCP resource.
5
+ * Provides the same content as the help prompt plus version information.
6
+ */
7
+
8
+ import { generateHelpContent } from '../prompts/helpPrompt.js';
9
+ import { readFileSync } from 'fs';
10
+ import { fileURLToPath } from 'url';
11
+ import { dirname, join } from 'path';
12
+
13
+ const __filename = fileURLToPath(import.meta.url);
14
+ const __dirname = dirname(__filename);
15
+
16
+ /**
17
+ * Get the current server version from package.json
18
+ * @returns {string} Server version
19
+ */
20
+ function getServerVersion() {
21
+ try {
22
+ const packagePath = join(__dirname, '../../package.json');
23
+ const packageJson = JSON.parse(readFileSync(packagePath, 'utf8'));
24
+ return packageJson.version || 'unknown';
25
+ } catch (error) {
26
+ return 'unknown';
27
+ }
28
+ }
29
+
30
+ /**
31
+ * Resource metadata for the help documentation
32
+ */
33
+ export const helpResourceMetadata = {
34
+ uri: 'converse://help',
35
+ name: 'Help Documentation',
36
+ description:
37
+ 'Comprehensive guide for the Converse MCP Server including all tools, parameters, providers, and models',
38
+ mimeType: 'text/plain',
39
+ };
40
+
41
+ /**
42
+ * Handler for reading the help resource
43
+ * @param {object} config - Configuration object (optional)
44
+ * @returns {object} Resource content
45
+ */
46
+ export async function helpResourceHandler(config = null) {
47
+ const helpContent = generateHelpContent(config);
48
+ const version = getServerVersion();
49
+
50
+ // Add version information to the help content
51
+ const contentWithVersion = `${helpContent}\n\n## Server Information\n\n- **Version**: ${version}\n- **Protocol**: MCP (Model Context Protocol)\n- **Server Type**: HTTP Transport\n- **Default Port**: 3157\n`;
52
+
53
+ return {
54
+ contents: [
55
+ {
56
+ uri: helpResourceMetadata.uri,
57
+ mimeType: helpResourceMetadata.mimeType,
58
+ text: contentWithVersion,
59
+ },
60
+ ],
61
+ };
62
+ }
63
+
64
+ /**
65
+ * Get list of all available resources
66
+ * @returns {array} List of resource metadata
67
+ */
68
+ export function listResources() {
69
+ return [helpResourceMetadata];
70
+ }