converse-mcp-server 1.5.4 → 1.5.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "converse-mcp-server",
3
- "version": "1.5.4",
3
+ "version": "1.5.5",
4
4
  "description": "Converse MCP Server - Converse with other LLMs with chat and consensus tools",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
@@ -364,7 +364,9 @@ export const anthropicProvider = {
364
364
  apiKey: config.apiKeys.anthropic,
365
365
  defaultHeaders: {
366
366
  'anthropic-beta': betaHeaders.join(',')
367
- }
367
+ },
368
+ // Increase timeout to 20 minutes for thinking models that may take longer
369
+ timeout: 20 * 60 * 1000
368
370
  });
369
371
 
370
372
  // Convert messages to Anthropic format (system messages are always cached)
@@ -383,15 +385,13 @@ export const anthropicProvider = {
383
385
  requestPayload.system = systemPrompt;
384
386
  }
385
387
 
386
- // Add max tokens only if explicitly requested
387
- // For Claude 4 series models, let the SDK use its defaults (32k for opus, 64k for sonnet)
388
+ // Set max tokens - API requires this field
388
389
  if (maxTokens) {
389
390
  requestPayload.max_tokens = Math.min(maxTokens, modelConfig.maxOutputTokens || 8192);
390
- } else if (!resolvedModel.includes('claude-opus-4') && !resolvedModel.includes('claude-sonnet-4')) {
391
- // For non-4 series models, we still need to set max_tokens
391
+ } else {
392
+ // Use model's default max output tokens
392
393
  requestPayload.max_tokens = modelConfig.maxOutputTokens || 8192;
393
394
  }
394
- // For 4 series models without explicit maxTokens, don't set max_tokens - let SDK use defaults
395
395
 
396
396
  // Add thinking configuration for models that support it
397
397
  if (modelConfig.supportsThinking && reasoning_effort) {
@@ -523,15 +523,6 @@ export const anthropicProvider = {
523
523
  debugError(`[Anthropic] Error message:`, error.message);
524
524
  debugError(`[Anthropic] Error response:`, error.response);
525
525
  throw new AnthropicProviderError(`Context length exceeded for model: ${error.message}`, ErrorCodes.CONTEXT_LENGTH_EXCEEDED, error);
526
- } else if (error.message?.includes('Streaming is strongly recommended')) {
527
- // This is just a warning from the SDK about long requests
528
- debugLog(`[Anthropic] SDK streaming recommendation warning`);
529
- debugError(`[Anthropic] Full error object:`, error);
530
- // Check if there's an actual error response
531
- if (error.response || error.status) {
532
- debugError(`[Anthropic] Error response status:`, error.status);
533
- debugError(`[Anthropic] Error response data:`, error.response);
534
- }
535
526
  }
536
527
 
537
528
  // Generic error handling