converse-mcp-server 1.8.3 → 1.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -1
- package/bin/converse.js +30 -30
- package/docs/API.md +8 -2
- package/docs/EXAMPLES.md +46 -1
- package/package.json +3 -2
- package/src/prompts/helpPrompt.js +7 -6
- package/src/providers/openai.js +58 -5
- package/src/tools/chat.js +11 -3
package/README.md
CHANGED
|
@@ -113,7 +113,7 @@ Get multiple AI models to analyze the same question simultaneously. Each model c
|
|
|
113
113
|
{
|
|
114
114
|
"prompt": "Should we use microservices or monolith architecture for our e-commerce platform?",
|
|
115
115
|
"models": [
|
|
116
|
-
{"model": "
|
|
116
|
+
{"model": "gpt-5"},
|
|
117
117
|
{"model": "gemini-2.5-flash"},
|
|
118
118
|
{"model": "grok-4-0709"}
|
|
119
119
|
],
|
|
@@ -126,6 +126,9 @@ Get multiple AI models to analyze the same question simultaneously. Each model c
|
|
|
126
126
|
## 📊 Supported Models
|
|
127
127
|
|
|
128
128
|
### OpenAI Models
|
|
129
|
+
- **gpt-5**: Latest flagship model (400K context, 128K output) - Superior reasoning, code generation, and analysis
|
|
130
|
+
- **gpt-5-mini**: Faster, cost-efficient GPT-5 (400K context, 128K output) - Well-defined tasks, precise prompts
|
|
131
|
+
- **gpt-5-nano**: Fastest, most cost-efficient GPT-5 (400K context, 128K output) - Summarization, classification
|
|
129
132
|
- **o3**: Strong reasoning (200K context)
|
|
130
133
|
- **o3-mini**: Fast O3 variant (200K context)
|
|
131
134
|
- **o3-pro**: Professional-grade reasoning (200K context) - EXTREMELY EXPENSIVE
|
package/bin/converse.js
CHANGED
|
@@ -1,32 +1,32 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
|
-
|
|
3
|
-
/**
|
|
4
|
-
* Converse MCP Server - CLI Entry Point
|
|
5
|
-
*
|
|
6
|
-
* This script allows the MCP server to be run via npx/pnpm dlx for easy installation and execution.
|
|
7
|
-
*/
|
|
8
|
-
|
|
9
|
-
import { fileURLToPath, pathToFileURL } from 'url';
|
|
10
|
-
import { dirname, join } from 'path';
|
|
11
|
-
import { createRequire } from 'module';
|
|
12
|
-
|
|
13
|
-
// Get the directory of this script
|
|
14
|
-
const __filename = fileURLToPath(import.meta.url);
|
|
15
|
-
const __dirname = dirname(__filename);
|
|
16
|
-
|
|
17
|
-
// Get the project root (parent of bin directory)
|
|
18
|
-
const projectRoot = dirname(__dirname);
|
|
19
|
-
|
|
20
|
-
// Import and start the server
|
|
21
|
-
try {
|
|
22
|
-
const indexPath = join(projectRoot, 'src/index.js');
|
|
23
|
-
const { main } = await import(pathToFileURL(indexPath).href);
|
|
24
|
-
|
|
25
|
-
// The main function will handle all logging appropriately based on transport type
|
|
26
|
-
await main();
|
|
27
|
-
} catch (error) {
|
|
28
|
-
// For stdio transport, we must not output anything to stdout
|
|
29
|
-
// For http transport, this will be logged by the error handler in main
|
|
30
|
-
// Just exit with error code
|
|
31
|
-
process.exit(1);
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Converse MCP Server - CLI Entry Point
|
|
5
|
+
*
|
|
6
|
+
* This script allows the MCP server to be run via npx/pnpm dlx for easy installation and execution.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
import { fileURLToPath, pathToFileURL } from 'url';
|
|
10
|
+
import { dirname, join } from 'path';
|
|
11
|
+
import { createRequire } from 'module';
|
|
12
|
+
|
|
13
|
+
// Get the directory of this script
|
|
14
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
15
|
+
const __dirname = dirname(__filename);
|
|
16
|
+
|
|
17
|
+
// Get the project root (parent of bin directory)
|
|
18
|
+
const projectRoot = dirname(__dirname);
|
|
19
|
+
|
|
20
|
+
// Import and start the server
|
|
21
|
+
try {
|
|
22
|
+
const indexPath = join(projectRoot, 'src/index.js');
|
|
23
|
+
const { main } = await import(pathToFileURL(indexPath).href);
|
|
24
|
+
|
|
25
|
+
// The main function will handle all logging appropriately based on transport type
|
|
26
|
+
await main();
|
|
27
|
+
} catch (error) {
|
|
28
|
+
// For stdio transport, we must not output anything to stdout
|
|
29
|
+
// For http transport, this will be logged by the error handler in main
|
|
30
|
+
// Just exit with error code
|
|
31
|
+
process.exit(1);
|
|
32
32
|
}
|
package/docs/API.md
CHANGED
|
@@ -55,7 +55,7 @@ MCP_TRANSPORT=stdio npm start
|
|
|
55
55
|
},
|
|
56
56
|
"model": {
|
|
57
57
|
"type": "string",
|
|
58
|
-
"description": "AI model to use. Examples: 'auto' (recommended), 'gemini-2.5-flash', '
|
|
58
|
+
"description": "AI model to use. Examples: 'auto' (recommended), 'gemini-2.5-flash', 'gpt-5', 'grok-4-0709'. Default: 'auto'"
|
|
59
59
|
},
|
|
60
60
|
"files": {
|
|
61
61
|
"type": "array",
|
|
@@ -82,7 +82,13 @@ MCP_TRANSPORT=stdio npm start
|
|
|
82
82
|
"type": "string",
|
|
83
83
|
"enum": ["minimal", "low", "medium", "high", "max"],
|
|
84
84
|
"default": "medium",
|
|
85
|
-
"description": "Reasoning depth for thinking models. Examples: 'minimal' (
|
|
85
|
+
"description": "Reasoning depth for thinking models. Examples: 'minimal' (fastest, few reasoning tokens), 'low' (light analysis), 'medium' (balanced), 'high' (complex analysis)"
|
|
86
|
+
},
|
|
87
|
+
"verbosity": {
|
|
88
|
+
"type": "string",
|
|
89
|
+
"enum": ["low", "medium", "high"],
|
|
90
|
+
"default": "medium",
|
|
91
|
+
"description": "Output verbosity for GPT-5 models. Examples: 'low' (concise answers), 'medium' (balanced), 'high' (thorough explanations)"
|
|
86
92
|
},
|
|
87
93
|
"use_websearch": {
|
|
88
94
|
"type": "boolean",
|
package/docs/EXAMPLES.md
CHANGED
|
@@ -48,6 +48,51 @@
|
|
|
48
48
|
}
|
|
49
49
|
```
|
|
50
50
|
|
|
51
|
+
## 🚀 GPT-5 Advanced Features
|
|
52
|
+
|
|
53
|
+
### Using Minimal Reasoning for Fast Responses
|
|
54
|
+
|
|
55
|
+
```json
|
|
56
|
+
{
|
|
57
|
+
"tool": "chat",
|
|
58
|
+
"arguments": {
|
|
59
|
+
"prompt": "Write a simple SQL query to get all users created today",
|
|
60
|
+
"model": "gpt-5",
|
|
61
|
+
"reasoning_effort": "minimal",
|
|
62
|
+
"verbosity": "low"
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
### High Verbosity for Detailed Explanations
|
|
68
|
+
|
|
69
|
+
```json
|
|
70
|
+
{
|
|
71
|
+
"tool": "chat",
|
|
72
|
+
"arguments": {
|
|
73
|
+
"prompt": "Explain the architecture of this authentication system and suggest improvements",
|
|
74
|
+
"model": "gpt-5",
|
|
75
|
+
"files": ["/c/Users/username/project/src/auth.js"],
|
|
76
|
+
"reasoning_effort": "high",
|
|
77
|
+
"verbosity": "high"
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
### Cost-Efficient with GPT-5-nano
|
|
83
|
+
|
|
84
|
+
```json
|
|
85
|
+
{
|
|
86
|
+
"tool": "chat",
|
|
87
|
+
"arguments": {
|
|
88
|
+
"prompt": "Summarize the main points from this document",
|
|
89
|
+
"model": "gpt-5-nano",
|
|
90
|
+
"files": ["/c/Users/username/docs/report.md"],
|
|
91
|
+
"verbosity": "low"
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
```
|
|
95
|
+
|
|
51
96
|
## 🔧 Code Analysis Examples
|
|
52
97
|
|
|
53
98
|
### Single File Analysis
|
|
@@ -181,7 +226,7 @@
|
|
|
181
226
|
"arguments": {
|
|
182
227
|
"prompt": "Given our current system architecture, what's the best approach for implementing real-time notifications?",
|
|
183
228
|
"models": [
|
|
184
|
-
{"model": "
|
|
229
|
+
{"model": "gpt-5"}, // Most intelligent: Superior reasoning
|
|
185
230
|
{"model": "grok-4"}, // Most intelligent: Advanced analysis
|
|
186
231
|
{"model": "gemini-2.5-pro"} // Most intelligent: Deep thinking
|
|
187
232
|
],
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "converse-mcp-server",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.9.0",
|
|
4
4
|
"description": "Converse MCP Server - Converse with other LLMs with chat and consensus tools",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "src/index.js",
|
|
@@ -63,7 +63,8 @@
|
|
|
63
63
|
"validate": "node scripts/validate.js",
|
|
64
64
|
"validate:fix": "node scripts/validate.js --fix",
|
|
65
65
|
"validate:fast": "node scripts/validate.js --skip-tests --skip-lint",
|
|
66
|
-
"precommit": "npm run validate"
|
|
66
|
+
"precommit": "npm run validate",
|
|
67
|
+
"prepublishOnly": "node scripts/fix-line-endings.js"
|
|
67
68
|
},
|
|
68
69
|
"keywords": [
|
|
69
70
|
"mcp",
|
|
@@ -134,21 +134,22 @@ ${formatProviderModels('OpenRouter', allModels.openrouter)}
|
|
|
134
134
|
## Model Selection Tips
|
|
135
135
|
|
|
136
136
|
### For Complex Reasoning Tasks
|
|
137
|
-
- **Most Intelligent**:
|
|
138
|
-
- **Fast & Smart**: o3-mini, o4-mini, gemini-2.5-flash
|
|
139
|
-
- **Budget-Friendly**: gpt-4o-mini, gemini-2.0-flash-lite
|
|
137
|
+
- **Most Intelligent**: gpt-5, o3-pro, gemini-2.5-pro, grok-4
|
|
138
|
+
- **Fast & Smart**: gpt-5-mini, o3-mini, o4-mini, gemini-2.5-flash
|
|
139
|
+
- **Budget-Friendly**: gpt-5-nano, gpt-4o-mini, gemini-2.0-flash-lite
|
|
140
140
|
|
|
141
141
|
### For Quick Responses
|
|
142
|
-
- **Ultra-Fast**: gemini-2.5-flash, gemini-2.0-flash, gpt-4o-mini
|
|
143
|
-
- **Good Balance**: o4-mini, grok-3-fast
|
|
142
|
+
- **Ultra-Fast**: gpt-5-nano, gemini-2.5-flash, gemini-2.0-flash, gpt-4o-mini
|
|
143
|
+
- **Good Balance**: gpt-5-mini, o4-mini, grok-3-fast
|
|
144
144
|
|
|
145
145
|
### For Large Context Windows
|
|
146
146
|
- **1M+ Tokens**: gpt-4.1 (1M), all Gemini models (1M)
|
|
147
|
+
- **400K Tokens**: gpt-5 family (gpt-5, gpt-5-mini, gpt-5-nano)
|
|
147
148
|
- **256K Tokens**: grok-4 series
|
|
148
149
|
- **200K Tokens**: o3 series, o4-mini
|
|
149
150
|
|
|
150
151
|
### Special Features
|
|
151
|
-
- **Web Search**: o3 series, o4-mini, gpt-4 series, gemini models with grounding, grok-4
|
|
152
|
+
- **Web Search**: gpt-5, gpt-5-mini, o3 series, o4-mini, gpt-4 series, gemini models with grounding, grok-4
|
|
152
153
|
- **Thinking Mode**: o3 series (reasoning_effort), gemini models (thinking budget)
|
|
153
154
|
- **Image Support**: All models except gemini-2.0-flash-lite and grok-3 series
|
|
154
155
|
|
package/src/providers/openai.js
CHANGED
|
@@ -10,6 +10,48 @@ import { debugLog, debugError } from '../utils/console.js';
|
|
|
10
10
|
|
|
11
11
|
// Define supported models with their capabilities
|
|
12
12
|
const SUPPORTED_MODELS = {
|
|
13
|
+
'gpt-5': {
|
|
14
|
+
modelName: 'gpt-5',
|
|
15
|
+
friendlyName: 'OpenAI (GPT-5)',
|
|
16
|
+
contextWindow: 400000,
|
|
17
|
+
maxOutputTokens: 128000,
|
|
18
|
+
supportsStreaming: true,
|
|
19
|
+
supportsImages: true,
|
|
20
|
+
supportsTemperature: false, // GPT-5 doesn't support temperature
|
|
21
|
+
supportsWebSearch: true,
|
|
22
|
+
supportsResponsesAPI: true,
|
|
23
|
+
timeout: 300000, // 5 minutes
|
|
24
|
+
description: 'Latest flagship model (400K context, 128K output) - Superior reasoning, code generation, and analysis',
|
|
25
|
+
aliases: ['gpt5', 'gpt 5', 'gpt-5-2025-08-07']
|
|
26
|
+
},
|
|
27
|
+
'gpt-5-mini': {
|
|
28
|
+
modelName: 'gpt-5-mini',
|
|
29
|
+
friendlyName: 'OpenAI (GPT-5-mini)',
|
|
30
|
+
contextWindow: 400000,
|
|
31
|
+
maxOutputTokens: 128000,
|
|
32
|
+
supportsStreaming: true,
|
|
33
|
+
supportsImages: true,
|
|
34
|
+
supportsTemperature: false, // GPT-5 models don't support temperature
|
|
35
|
+
supportsWebSearch: true,
|
|
36
|
+
supportsResponsesAPI: true,
|
|
37
|
+
timeout: 180000, // 3 minutes
|
|
38
|
+
description: 'Faster, cost-efficient GPT-5 (400K context, 128K output) - Well-defined tasks, precise prompts',
|
|
39
|
+
aliases: ['gpt5-mini', 'gpt-5mini', 'gpt 5 mini', 'gpt-5-mini-2025-08-07']
|
|
40
|
+
},
|
|
41
|
+
'gpt-5-nano': {
|
|
42
|
+
modelName: 'gpt-5-nano',
|
|
43
|
+
friendlyName: 'OpenAI (GPT-5-nano)',
|
|
44
|
+
contextWindow: 400000,
|
|
45
|
+
maxOutputTokens: 128000,
|
|
46
|
+
supportsStreaming: true,
|
|
47
|
+
supportsImages: true,
|
|
48
|
+
supportsTemperature: false, // GPT-5 models don't support temperature
|
|
49
|
+
supportsWebSearch: false, // GPT-5-nano doesn't support web search
|
|
50
|
+
supportsResponsesAPI: true,
|
|
51
|
+
timeout: 120000, // 2 minutes
|
|
52
|
+
description: 'Fastest, most cost-efficient GPT-5 (400K context, 128K output) - Summarization, classification',
|
|
53
|
+
aliases: ['gpt5-nano', 'gpt-5nano', 'gpt 5 nano', 'gpt-5-nano-2025-08-07']
|
|
54
|
+
},
|
|
13
55
|
'o3': {
|
|
14
56
|
modelName: 'o3',
|
|
15
57
|
friendlyName: 'OpenAI (O3)',
|
|
@@ -290,6 +332,7 @@ export const openaiProvider = {
|
|
|
290
332
|
maxTokens = null,
|
|
291
333
|
stream = false,
|
|
292
334
|
reasoning_effort = 'medium',
|
|
335
|
+
verbosity = 'medium',
|
|
293
336
|
use_websearch = false,
|
|
294
337
|
config,
|
|
295
338
|
...otherOptions
|
|
@@ -343,13 +386,18 @@ export const openaiProvider = {
|
|
|
343
386
|
requestPayload.temperature = Math.max(0, Math.min(2, temperature));
|
|
344
387
|
}
|
|
345
388
|
|
|
346
|
-
// Add reasoning effort for thinking models (o3 series
|
|
347
|
-
if (resolvedModel.startsWith('o3') && reasoning_effort) {
|
|
389
|
+
// Add reasoning effort for thinking models (o3 series and GPT-5 family)
|
|
390
|
+
if ((resolvedModel.startsWith('o3') || resolvedModel.startsWith('gpt-5')) && reasoning_effort) {
|
|
348
391
|
requestPayload.reasoning = { effort: reasoning_effort };
|
|
349
392
|
}
|
|
393
|
+
|
|
394
|
+
// Add verbosity for GPT-5 models
|
|
395
|
+
if (resolvedModel.startsWith('gpt-5') && verbosity) {
|
|
396
|
+
requestPayload.text = { verbosity: verbosity };
|
|
397
|
+
}
|
|
350
398
|
} else {
|
|
351
399
|
// Build Chat Completions API payload
|
|
352
|
-
const { reasoning_effort: _unused, ...cleanOptions } = otherOptions;
|
|
400
|
+
const { reasoning_effort: _unused, verbosity: _unused2, ...cleanOptions } = otherOptions;
|
|
353
401
|
requestPayload = {
|
|
354
402
|
model: resolvedModel,
|
|
355
403
|
messages: openaiMessages,
|
|
@@ -362,10 +410,15 @@ export const openaiProvider = {
|
|
|
362
410
|
requestPayload.temperature = Math.max(0, Math.min(2, temperature));
|
|
363
411
|
}
|
|
364
412
|
|
|
365
|
-
// Add reasoning effort for thinking models (o3 series
|
|
366
|
-
if (resolvedModel.startsWith('o3') && reasoning_effort) {
|
|
413
|
+
// Add reasoning effort for thinking models (o3 series and GPT-5 family)
|
|
414
|
+
if ((resolvedModel.startsWith('o3') || resolvedModel.startsWith('gpt-5')) && reasoning_effort) {
|
|
367
415
|
requestPayload.reasoning_effort = reasoning_effort;
|
|
368
416
|
}
|
|
417
|
+
|
|
418
|
+
// Add verbosity for GPT-5 models
|
|
419
|
+
if (resolvedModel.startsWith('gpt-5') && verbosity) {
|
|
420
|
+
requestPayload.verbosity = verbosity;
|
|
421
|
+
}
|
|
369
422
|
}
|
|
370
423
|
|
|
371
424
|
// Add max tokens if specified (both APIs)
|
package/src/tools/chat.js
CHANGED
|
@@ -40,7 +40,8 @@ export async function chatTool(args, dependencies) {
|
|
|
40
40
|
temperature = 0.5,
|
|
41
41
|
use_websearch = false,
|
|
42
42
|
images = [],
|
|
43
|
-
reasoning_effort = 'medium'
|
|
43
|
+
reasoning_effort = 'medium',
|
|
44
|
+
verbosity = 'medium'
|
|
44
45
|
} = args;
|
|
45
46
|
|
|
46
47
|
let conversationHistory = [];
|
|
@@ -177,6 +178,7 @@ export async function chatTool(args, dependencies) {
|
|
|
177
178
|
model: resolvedModel,
|
|
178
179
|
temperature,
|
|
179
180
|
reasoning_effort,
|
|
181
|
+
verbosity,
|
|
180
182
|
use_websearch,
|
|
181
183
|
config
|
|
182
184
|
};
|
|
@@ -369,7 +371,7 @@ chatTool.inputSchema = {
|
|
|
369
371
|
},
|
|
370
372
|
model: {
|
|
371
373
|
type: 'string',
|
|
372
|
-
description: 'AI model to use. Examples: "auto" (recommended), "
|
|
374
|
+
description: 'AI model to use. Examples: "auto" (recommended), "gpt-5", "gemini-2.5-pro", "grok-4-0709". Defaults to auto-selection.',
|
|
373
375
|
},
|
|
374
376
|
files: {
|
|
375
377
|
type: 'array',
|
|
@@ -395,7 +397,13 @@ chatTool.inputSchema = {
|
|
|
395
397
|
reasoning_effort: {
|
|
396
398
|
type: 'string',
|
|
397
399
|
enum: ['minimal', 'low', 'medium', 'high', 'max'],
|
|
398
|
-
description: 'Reasoning depth for thinking models. Examples: "low" (light analysis), "medium" (balanced), "high" (complex analysis). Default: "medium"',
|
|
400
|
+
description: 'Reasoning depth for thinking models. Examples: "minimal" (fastest, few reasoning tokens), "low" (light analysis), "medium" (balanced), "high" (complex analysis). Default: "medium"',
|
|
401
|
+
default: 'medium'
|
|
402
|
+
},
|
|
403
|
+
verbosity: {
|
|
404
|
+
type: 'string',
|
|
405
|
+
enum: ['low', 'medium', 'high'],
|
|
406
|
+
description: 'Output verbosity for GPT-5 models. Examples: "low" (concise answers), "medium" (balanced), "high" (thorough explanations). Default: "medium"',
|
|
399
407
|
default: 'medium'
|
|
400
408
|
},
|
|
401
409
|
use_websearch: {
|