converse-mcp-server 1.7.3 → 1.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -129,7 +129,9 @@ Get multiple AI models to analyze the same question simultaneously. Each model c
129
129
  - **o3**: Strong reasoning (200K context)
130
130
  - **o3-mini**: Fast O3 variant (200K context)
131
131
  - **o3-pro**: Professional-grade reasoning (200K context) - EXTREMELY EXPENSIVE
132
+ - **o3-deep-research**: Deep research model (200K context) - 30-90 min runtime
132
133
  - **o4-mini**: Latest reasoning model (200K context)
134
+ - **o4-mini-deep-research**: Fast deep research model (200K context) - 15-60 min runtime
133
135
  - **gpt-4.1**: Advanced reasoning (1M context)
134
136
  - **gpt-4o**: Multimodal flagship (128K context)
135
137
  - **gpt-4o-mini**: Fast multimodal (128K context)
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "converse-mcp-server",
3
- "version": "1.7.3",
3
+ "version": "1.8.0",
4
4
  "description": "Converse MCP Server - Converse with other LLMs with chat and consensus tools",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
@@ -95,20 +95,20 @@
95
95
  ],
96
96
  "dependencies": {
97
97
  "@anthropic-ai/sdk": "^0.57.0",
98
- "@google/genai": "^1.11.0",
98
+ "@google/genai": "^1.12.0",
99
99
  "@mistralai/mistralai": "^1.7.5",
100
- "@modelcontextprotocol/sdk": "^1.17.0",
100
+ "@modelcontextprotocol/sdk": "^1.17.1",
101
101
  "cors": "^2.8.5",
102
102
  "dotenv": "^17.2.1",
103
103
  "express": "^5.1.0",
104
- "openai": "^5.0.0",
104
+ "openai": "^5.11.0",
105
105
  "vite": "^7.0.6"
106
106
  },
107
107
  "devDependencies": {
108
108
  "@vitest/coverage-v8": "^3.2.4",
109
109
  "cross-env": "^10.0.0",
110
110
  "eslint": "^9.32.0",
111
- "prettier": "^3.0.0",
111
+ "prettier": "^3.6.2",
112
112
  "rimraf": "^6.0.1",
113
113
  "vitest": "^3.2.4"
114
114
  }
@@ -107,6 +107,36 @@ const SUPPORTED_MODELS = {
107
107
  timeout: 120000,
108
108
  description: 'GPT-4o-mini (128K context) - Fast and efficient multimodal model',
109
109
  aliases: ['gpt4o-mini', 'gpt 4o mini', '4o mini', '4o-mini']
110
+ },
111
+ 'o3-deep-research-2025-06-26': {
112
+ modelName: 'o3-deep-research-2025-06-26',
113
+ friendlyName: 'OpenAI (O3 Deep Research)',
114
+ contextWindow: 200000,
115
+ maxOutputTokens: 100000,
116
+ supportsStreaming: true,
117
+ supportsImages: true,
118
+ supportsTemperature: false,
119
+ supportsWebSearch: true,
120
+ supportsResponsesAPI: true,
121
+ supportsDeepResearch: true,
122
+ timeout: 5400000, // 90 minutes for deep research
123
+ description: 'Deep research model (200K context) - In-depth synthesis, comprehensive reports, multi-source analysis (30-90 min runtime)',
124
+ aliases: ['o3-deep-research', 'o3-research', 'o3 deep research', 'deep-research-o3']
125
+ },
126
+ 'o4-mini-deep-research-2025-06-26': {
127
+ modelName: 'o4-mini-deep-research-2025-06-26',
128
+ friendlyName: 'OpenAI (O4-mini Deep Research)',
129
+ contextWindow: 200000,
130
+ maxOutputTokens: 100000,
131
+ supportsStreaming: true,
132
+ supportsImages: true,
133
+ supportsTemperature: false,
134
+ supportsWebSearch: true,
135
+ supportsResponsesAPI: true,
136
+ supportsDeepResearch: true,
137
+ timeout: 3600000, // 60 minutes for faster deep research
138
+ description: 'Fast deep research model (200K context) - Lightweight research, faster results, latency-sensitive analysis (15-60 min runtime)',
139
+ aliases: ['o4-mini-deep-research', 'o4-mini-research', 'o4-research', 'o4 mini deep research', 'deep-research-o4-mini', 'o4-deep-research']
110
140
  }
111
141
  };
112
142
 
@@ -304,7 +334,8 @@ export const openaiProvider = {
304
334
 
305
335
  // Add web search tools only if requested and model supports it
306
336
  if (use_websearch && modelConfig.supportsWebSearch) {
307
- requestPayload.tools = [{ type: 'web_search' }];
337
+ // Use web_search_preview tool for all models in Responses API
338
+ requestPayload.tools = [{ type: 'web_search_preview' }];
308
339
  }
309
340
 
310
341
  // Add temperature if model supports it
@@ -348,7 +379,8 @@ export const openaiProvider = {
348
379
 
349
380
  try {
350
381
  const apiType = shouldUseResponsesAPI ? 'Responses API' : 'Chat Completions API';
351
- debugLog(`[OpenAI] Calling ${resolvedModel} via ${apiType} with ${openaiMessages.length} messages${use_websearch && modelConfig.supportsWebSearch ? ' (with web search)' : ''}`);
382
+ const searchInfo = (use_websearch && modelConfig.supportsWebSearch) ? ' (with web search)' : '';
383
+ debugLog(`[OpenAI] Calling ${resolvedModel} via ${apiType} with ${openaiMessages.length} messages${searchInfo}`);
352
384
 
353
385
  const startTime = Date.now();
354
386
 
@@ -389,6 +421,10 @@ export const openaiProvider = {
389
421
  usage = response.usage || {};
390
422
  }
391
423
 
424
+ // Determine web search usage
425
+ const webSearchUsed = use_websearch && modelConfig.supportsWebSearch;
426
+ const webSearchType = webSearchUsed ? 'web_search_preview' : null;
427
+
392
428
  // Return unified response format
393
429
  return {
394
430
  content,
@@ -405,7 +441,8 @@ export const openaiProvider = {
405
441
  finish_reason: stopReason,
406
442
  provider: 'openai',
407
443
  api_type: apiType,
408
- web_search_used: use_websearch && modelConfig.supportsWebSearch
444
+ web_search_used: webSearchUsed,
445
+ web_search_type: webSearchType
409
446
  }
410
447
  };
411
448