converse-mcp-server 1.14.1 → 1.14.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -132,7 +132,7 @@ Get multiple AI models to analyze the same question simultaneously. Each model c
132
132
  // Synchronous consensus (default)
133
133
  {
134
134
  "prompt": "Should we use microservices or monolith architecture for our e-commerce platform?",
135
- "models": ["gpt-5", "gemini-2.5-flash", "grok-4-0709"],
135
+ "models": ["gpt-5", "gemini-2.5-flash", "grok-4"],
136
136
  "files": ["/path/to/requirements.md"],
137
137
  "enable_cross_feedback": true,
138
138
  "temperature": 0.2
@@ -231,9 +231,8 @@ SUMMARIZATION_MODEL=gpt-5-nano # Default: gpt-5-nano
231
231
  - **gemini-2.0-flash-lite**: Lightweight fast model, text-only
232
232
 
233
233
  ### X.AI/Grok Models
234
- - **grok-4-0709** (alias: `grok`): Latest advanced model (256K context)
235
- - **grok-3**: Previous generation (131K context)
236
- - **grok-3-fast**: Higher performance variant
234
+ - **grok-4-0709** (aliases: `grok`, `grok-4`): Latest advanced model (256K context)
235
+ - **grok-code-fast-1**: Speedy and economical reasoning model that excels at agentic coding (256K context)
237
236
 
238
237
  ### Anthropic Models
239
238
  - **claude-opus-4.1**: Highest intelligence with extended thinking (200K context)
@@ -342,6 +341,7 @@ Use `"auto"` for automatic model selection, or specify exact models:
342
341
  "flash" // -> gemini-2.5-flash
343
342
  "pro" // -> gemini-2.5-pro
344
343
  "grok" // -> grok-4-0709
344
+ "grok-4" // -> grok-4-0709
345
345
  ```
346
346
 
347
347
  **Auto Model Behavior:**
@@ -351,7 +351,7 @@ Use `"auto"` for automatic model selection, or specify exact models:
351
351
  Provider priority order (requires corresponding API key):
352
352
  1. OpenAI (`gpt-5`)
353
353
  2. Google (`gemini-2.5-pro`)
354
- 3. XAI (`grok-4-0709`)
354
+ 3. XAI (`grok-4`)
355
355
  4. Anthropic (`claude-sonnet-4-20250514`)
356
356
  5. Mistral (`magistral-medium-2506`)
357
357
  6. DeepSeek (`deepseek-reasoner`)
package/docs/API.md CHANGED
@@ -59,7 +59,7 @@ MCP_TRANSPORT=stdio npm start
59
59
  },
60
60
  "model": {
61
61
  "type": "string",
62
- "description": "AI model to use. Examples: 'auto' (recommended), 'gemini-2.5-flash', 'gpt-5', 'grok-4-0709'. Default: 'auto'"
62
+ "description": "AI model to use. Examples: 'auto' (recommended), 'gemini-2.5-flash', 'gpt-5', 'grok-4'. Default: 'auto'"
63
63
  },
64
64
  "files": {
65
65
  "type": "array",
@@ -178,7 +178,7 @@ MCP_TRANSPORT=stdio npm start
178
178
  "type": "array",
179
179
  "items": {"type": "string"},
180
180
  "minItems": 1,
181
- "description": "List of models to consult. Example: ['o3', 'gemini-2.5-flash', 'grok-4-0709']"
181
+ "description": "List of models to consult. Example: ['o3', 'gemini-2.5-flash', 'grok-4']"
182
182
  },
183
183
  "files": {
184
184
  "type": "array",
@@ -274,7 +274,7 @@ MCP_TRANSPORT=stdio npm start
274
274
  "settings": {
275
275
  "enable_cross_feedback": true,
276
276
  "temperature": 0.2,
277
- "models_requested": ["o3", "gemini-2.5-flash", "grok-4-0709"]
277
+ "models_requested": ["o3", "gemini-2.5-flash", "grok-4"]
278
278
  }
279
279
  }
280
280
  ```
@@ -304,7 +304,7 @@ MCP_TRANSPORT=stdio npm start
304
304
  "models": [
305
305
  {"model": "o3"},
306
306
  {"model": "gemini-2.5-pro"},
307
- {"model": "grok-4-0709"}
307
+ {"model": "grok-4"}
308
308
  ],
309
309
  "files": ["/docs/requirements.md", "/docs/current_architecture.md"],
310
310
  "enable_cross_feedback": true,
@@ -337,9 +337,8 @@ MCP_TRANSPORT=stdio npm start
337
337
 
338
338
  | Model | Alias | Context | Tokens | Features | Use Cases |
339
339
  |-------|-------|---------|--------|----------|-----------|
340
- | `grok-4-0709` | `grok` | 256K | 256K | Advanced | Latest capabilities |
341
- | `grok-3` | `grok3` | 131K | 131K | Previous gen | Stable reasoning |
342
- | `grok-3-fast` | - | 131K | 131K | High perf | Faster processing |
340
+ | `grok-4-0709` | `grok`, `grok-4` | 256K | 256K | Advanced | Latest capabilities |
341
+ | `grok-code-fast-1` | `grok-code-fast` | 256K | 256K | Code optimization | Agentic coding |
343
342
 
344
343
  ### Anthropic Models
345
344
 
@@ -397,6 +396,7 @@ Use `"auto"` for automatic selection or specify exact models:
397
396
  {"model": "flash"} // -> gemini-2.5-flash
398
397
  {"model": "pro"} // -> gemini-2.5-pro
399
398
  {"model": "grok"} // -> grok-4-0709
399
+ {"model": "grok-4"} // -> grok-4-0709
400
400
  ```
401
401
 
402
402
  ## Configuration
@@ -725,7 +725,7 @@ LOG_LEVEL=error # Errors only
725
725
  "models": [
726
726
  {"model": "o3"},
727
727
  {"model": "gemini-2.5-pro"},
728
- {"model": "grok-4-0709"}
728
+ {"model": "grok-4"}
729
729
  ],
730
730
  "files": [
731
731
  "/docs/requirements.md",
package/docs/PROVIDERS.md CHANGED
@@ -29,7 +29,7 @@ This guide documents all supported AI providers in the Converse MCP Server and t
29
29
  - **Environment Variable**: `XAI_API_KEY`
30
30
  - **Supported Models**:
31
31
  - `grok-4-0709` - Latest with image support and web search
32
- - `grok-3`, `grok-3-fast` - Previous generation
32
+ - `grok-code-fast-1` - Speedy and economical reasoning model that excels at agentic coding
33
33
 
34
34
  ### Anthropic (Claude)
35
35
  - **API Key Format**: `sk-ant-...` (starts with `sk-ant-`)
@@ -173,7 +173,7 @@ When using the chat or consensus tools, specify models using their identifiers:
173
173
  "model": "claude-opus-4", // Anthropic (keyword match, auto-resolves)
174
174
  "model": "sonnet", // Anthropic (keyword match)
175
175
  "model": "gemini-2.5-pro", // Google (keyword match)
176
- "model": "grok-4-0709", // X.AI (keyword match)
176
+ "model": "grok-4", // X.AI (keyword match)
177
177
  "model": "mistral-large", // Mistral (keyword match)
178
178
  "model": "deepseek-chat", // DeepSeek (keyword match)
179
179
  "model": "anthropic/claude-3.5-sonnet", // OpenRouter (slash format, not in Anthropic)
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "converse-mcp-server",
3
- "version": "1.14.1",
3
+ "version": "1.14.3",
4
4
  "description": "Converse MCP Server - Converse with other LLMs with chat and consensus tools",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
@@ -89,7 +89,7 @@ export function generateHelpContent() {
89
89
  return `\`\`\`json
90
90
  {
91
91
  "prompt": "Should we use microservices architecture for our new project?",
92
- "models": ["gpt-5", "gemini-2.5-pro", "grok-4-0709"],
92
+ "models": ["gpt-5", "gemini-2.5-pro", "grok-4"],
93
93
  "files": ["./requirements.md", "C:\\\\Users\\\\username\\\\architecture.md"],
94
94
  "enable_cross_feedback": true,
95
95
  "temperature": 0.3
@@ -136,7 +136,7 @@ ${formatProviderModels('OpenRouter', allModels.openrouter)}
136
136
 
137
137
  ### For Quick Responses
138
138
  - **Ultra-Fast**: gpt-5-nano, gemini-2.5-flash, gemini-2.0-flash, gpt-4o-mini
139
- - **Good Balance**: gpt-5-mini, o4-mini, grok-3-fast
139
+ - **Good Balance**: gpt-5-mini, o4-mini, grok-code-fast-1
140
140
 
141
141
  ### For Large Context Windows
142
142
  - **1M+ Tokens**: gpt-4.1 (1M), all Gemini models (1M)
@@ -147,7 +147,7 @@ ${formatProviderModels('OpenRouter', allModels.openrouter)}
147
147
  ### Special Features
148
148
  - **Web Search**: gpt-5, gpt-5-mini, gpt-5 series, o4-mini, gpt-4 series, gemini models with grounding, grok-4
149
149
  - **Thinking Mode**: gpt-5 series (reasoning_effort), gemini models (thinking budget)
150
- - **Image Support**: All models except gemini-2.0-flash-lite and grok-3 series
150
+ - **Image Support**: All models except gemini-2.0-flash-lite and grok-code-fast-1
151
151
 
152
152
  ## Configuration Tips
153
153
 
@@ -20,7 +20,7 @@ const SUPPORTED_MODELS = {
20
20
  supportsTemperature: false, // GPT-5 doesn't support temperature
21
21
  supportsWebSearch: true,
22
22
  supportsResponsesAPI: true,
23
- timeout: 300000, // 5 minutes
23
+ timeout: 3600000, // 1 hour
24
24
  description: 'Latest flagship model (400K context, 128K output) - Superior reasoning, code generation, and analysis',
25
25
  aliases: ['gpt5', 'gpt 5', 'gpt-5-2025-08-07']
26
26
  },
@@ -34,7 +34,7 @@ const SUPPORTED_MODELS = {
34
34
  supportsTemperature: false, // GPT-5 models don't support temperature
35
35
  supportsWebSearch: true,
36
36
  supportsResponsesAPI: true,
37
- timeout: 180000, // 3 minutes
37
+ timeout: 1800000, // 30 minutes
38
38
  description: 'Faster, cost-efficient GPT-5 (400K context, 128K output) - Well-defined tasks, precise prompts',
39
39
  aliases: ['gpt5-mini', 'gpt-5mini', 'gpt 5 mini', 'gpt-5-mini-2025-08-07']
40
40
  },
@@ -48,7 +48,7 @@ const SUPPORTED_MODELS = {
48
48
  supportsTemperature: false, // GPT-5 models don't support temperature
49
49
  supportsWebSearch: false, // GPT-5-nano doesn't support web search
50
50
  supportsResponsesAPI: true,
51
- timeout: 120000, // 2 minutes
51
+ timeout: 600000, // 10 minutes
52
52
  description: 'Fastest, most cost-efficient GPT-5 (400K context, 128K output) - Summarization, classification',
53
53
  aliases: ['gpt5-nano', 'gpt-5nano', 'gpt 5 nano', 'gpt-5-nano-2025-08-07']
54
54
  },
@@ -62,7 +62,7 @@ const SUPPORTED_MODELS = {
62
62
  supportsTemperature: false,
63
63
  supportsWebSearch: true,
64
64
  supportsResponsesAPI: true,
65
- timeout: 300000, // 5 minutes
65
+ timeout: 600000, // 10 minutes
66
66
  description: 'Strong reasoning (200K context) - Logical problems, code generation, systematic analysis',
67
67
  aliases: ['o3-2025-01-31']
68
68
  },
@@ -90,7 +90,7 @@ const SUPPORTED_MODELS = {
90
90
  supportsTemperature: false,
91
91
  supportsWebSearch: true,
92
92
  supportsResponsesAPI: true,
93
- timeout: 1800000, // 30 minutes
93
+ timeout: 3600000, // 60 minutes
94
94
  description: 'Professional-grade reasoning (200K context) - EXTREMELY EXPENSIVE: Only for the most complex problems',
95
95
  aliases: ['o3-pro', 'o3pro', 'o3 pro']
96
96
  },
@@ -161,7 +161,7 @@ const SUPPORTED_MODELS = {
161
161
  supportsWebSearch: true,
162
162
  supportsResponsesAPI: true,
163
163
  supportsDeepResearch: true,
164
- timeout: 5400000, // 90 minutes for deep research
164
+ timeout: 7200000, // 120 minutes for deep research
165
165
  description: 'Deep research model (200K context) - In-depth synthesis, comprehensive reports, multi-source analysis (30-90 min runtime)',
166
166
  aliases: ['o3-deep-research', 'o3-research', 'o3 deep research', 'deep-research-o3']
167
167
  },
@@ -36,32 +36,6 @@ const SUPPORTED_MODELS = {
36
36
  description: 'GROK Code Fast 1 (256K context) - Speedy and economical reasoning model that excels at agentic coding',
37
37
  aliases: ['grok-code-fast', 'grok-code-fast-1-0825', 'grok code fast', 'grok code fast 1']
38
38
  },
39
- 'grok-3': {
40
- modelName: 'grok-3',
41
- friendlyName: 'X.AI (Grok 3)',
42
- contextWindow: 131072,
43
- maxOutputTokens: 131072,
44
- supportsStreaming: true,
45
- supportsImages: false,
46
- supportsTemperature: true,
47
- supportsWebSearch: false,
48
- timeout: 300000,
49
- description: 'GROK-3 (131K context) - Previous generation reasoning model from X.AI',
50
- aliases: ['grok3', 'grok 3']
51
- },
52
- 'grok-3-fast': {
53
- modelName: 'grok-3-fast',
54
- friendlyName: 'X.AI (Grok 3 Fast)',
55
- contextWindow: 131072,
56
- maxOutputTokens: 131072,
57
- supportsStreaming: true,
58
- supportsImages: false,
59
- supportsTemperature: true,
60
- supportsWebSearch: false,
61
- timeout: 300000,
62
- description: 'GROK-3 Fast (131K context) - Higher performance variant, faster processing but more expensive',
63
- aliases: ['grok3fast', 'grok3-fast', 'grok 3 fast']
64
- }
65
39
  };
66
40
 
67
41
  /**