converse-mcp-server 1.10.0 → 1.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -112,11 +112,7 @@ Get multiple AI models to analyze the same question simultaneously. Each model c
112
112
  // Example usage
113
113
  {
114
114
  "prompt": "Should we use microservices or monolith architecture for our e-commerce platform?",
115
- "models": [
116
- {"model": "gpt-5"},
117
- {"model": "gemini-2.5-flash"},
118
- {"model": "grok-4-0709"}
119
- ],
115
+ "models": ["gpt-5", "gemini-2.5-flash", "grok-4-0709"],
120
116
  "files": ["/path/to/requirements.md"],
121
117
  "enable_cross_feedback": true,
122
118
  "temperature": 0.2
@@ -246,19 +242,34 @@ Use `"auto"` for automatic model selection, or specify exact models:
246
242
 
247
243
  ```javascript
248
244
  // Auto-selection (recommended)
249
- { "model": "auto" }
245
+ "auto"
250
246
 
251
247
  // Specific models
252
- { "model": "gemini-2.5-flash" }
253
- { "model": "o3" }
254
- { "model": "grok-4-0709" }
248
+ "gemini-2.5-flash"
249
+ "gpt-5"
250
+ "grok-4-0709"
255
251
 
256
252
  // Using aliases
257
- { "model": "flash" } // -> gemini-2.5-flash
258
- { "model": "pro" } // -> gemini-2.5-pro
259
- { "model": "grok" } // -> grok-4-0709
253
+ "flash" // -> gemini-2.5-flash
254
+ "pro" // -> gemini-2.5-pro
255
+ "grok" // -> grok-4-0709
260
256
  ```
261
257
 
258
+ **Auto Model Behavior:**
259
+ - **Chat Tool**: Selects the first available provider and uses its default model
260
+ - **Consensus Tool**: When using `["auto"]`, automatically expands to the first 3 available providers
261
+
262
+ Provider priority order (requires corresponding API key):
263
+ 1. OpenAI (`gpt-5`)
264
+ 2. Google (`gemini-2.5-pro`)
265
+ 3. XAI (`grok-4-0709`)
266
+ 4. Anthropic (`claude-sonnet-4-20250514`)
267
+ 5. Mistral (`magistral-medium-2506`)
268
+ 6. DeepSeek (`deepseek-reasoner`)
269
+ 7. OpenRouter (`qwen/qwen3-coder`)
270
+
271
+ The system will use the first 3 providers that have valid API keys configured. This enables automatic multi-model consensus without manually specifying models.
272
+
262
273
  ### Advanced Configuration
263
274
 
264
275
  #### Manual Installation Options
package/docs/API.md CHANGED
@@ -152,14 +152,9 @@ MCP_TRANSPORT=stdio npm start
152
152
  },
153
153
  "models": {
154
154
  "type": "array",
155
- "items": {
156
- "type": "object",
157
- "properties": {
158
- "model": {"type": "string"}
159
- },
160
- "required": ["model"]
161
- },
162
- "description": "List of models to consult. Example: [{'model': 'o3'}, {'model': 'gemini-2.5-flash'}, {'model': 'grok-4-0709'}]"
155
+ "items": {"type": "string"},
156
+ "minItems": 1,
157
+ "description": "List of models to consult. Example: ['o3', 'gemini-2.5-flash', 'grok-4-0709']"
163
158
  },
164
159
  "files": {
165
160
  "type": "array",
package/docs/EXAMPLES.md CHANGED
@@ -102,7 +102,7 @@
102
102
  "tool": "chat",
103
103
  "arguments": {
104
104
  "prompt": "Review this function for potential bugs and improvements",
105
- "model": "o3",
105
+ "model": "gpt-5",
106
106
  "files": ["/c/Users/username/project/src/auth.js"],
107
107
  "reasoning_effort": "high",
108
108
  "temperature": 0.1
@@ -131,14 +131,14 @@
131
131
 
132
132
  ## 🎯 Model-Specific Examples
133
133
 
134
- ### Using O3 for Complex Reasoning
134
+ ### Using GPT-5 for Complex Reasoning
135
135
 
136
136
  ```json
137
137
  {
138
138
  "tool": "chat",
139
139
  "arguments": {
140
140
  "prompt": "Design a distributed caching strategy for a social media platform with 10M+ users",
141
- "model": "o3",
141
+ "model": "gpt-5",
142
142
  "reasoning_effort": "max",
143
143
  "temperature": 0.1
144
144
  }
@@ -180,11 +180,7 @@
180
180
  "tool": "consensus",
181
181
  "arguments": {
182
182
  "prompt": "Should we use PostgreSQL or MongoDB for our e-commerce inventory system?",
183
- "models": [
184
- {"model": "o3"},
185
- {"model": "gemini-2.5-pro"},
186
- {"model": "grok-4"}
187
- ],
183
+ "models": ["gpt-5", "gemini-2.5-pro", "grok-4"],
188
184
  "temperature": 0.2
189
185
  }
190
186
  }
@@ -200,7 +196,7 @@
200
196
  "phases": {
201
197
  "initial": [
202
198
  {
203
- "model": "o3",
199
+ "model": "gpt-5",
204
200
  "status": "success",
205
201
  "response": "For an e-commerce inventory system, I recommend PostgreSQL because...",
206
202
  "metadata": {"input_tokens": 50, "output_tokens": 180}
@@ -208,7 +204,7 @@
208
204
  ],
209
205
  "refined": [
210
206
  {
211
- "model": "o3",
207
+ "model": "gpt-5",
212
208
  "status": "success",
213
209
  "initial_response": "For an e-commerce inventory system, I recommend PostgreSQL...",
214
210
  "refined_response": "After considering the other perspectives on MongoDB's flexibility, I still lean towards PostgreSQL but acknowledge that MongoDB could work well if..."
@@ -226,9 +222,9 @@
226
222
  "arguments": {
227
223
  "prompt": "Given our current system architecture, what's the best approach for implementing real-time notifications?",
228
224
  "models": [
229
- {"model": "gpt-5"}, // Most intelligent: Superior reasoning
230
- {"model": "grok-4"}, // Most intelligent: Advanced analysis
231
- {"model": "gemini-2.5-pro"} // Most intelligent: Deep thinking
225
+ "gpt-5", // Most intelligent: Superior reasoning
226
+ "grok-4", // Most intelligent: Advanced analysis
227
+ "gemini-2.5-pro" // Most intelligent: Deep thinking
232
228
  ],
233
229
  "files": [
234
230
  "/c/Users/username/docs/current_architecture.md",
@@ -248,11 +244,7 @@
248
244
  "tool": "consensus",
249
245
  "arguments": {
250
246
  "prompt": "What's the best CSS framework for rapid prototyping in 2024?",
251
- "models": [
252
- {"model": "gemini-2.5-flash"},
253
- {"model": "o4-mini"},
254
- {"model": "grok-4"}
255
- ],
247
+ "models": ["gemini-2.5-flash", "o4-mini", "grok-4"],
256
248
  "enable_cross_feedback": false,
257
249
  "temperature": 0.3
258
250
  }
@@ -282,11 +274,7 @@
282
274
  "tool": "consensus",
283
275
  "arguments": {
284
276
  "prompt": "Compare these three design options and recommend the best one for our mobile app",
285
- "models": [
286
- {"model": "gpt-4.1"},
287
- {"model": "gemini-2.5-pro"},
288
- {"model": "grok-4"}
289
- ],
277
+ "models": ["gpt-4.1", "gemini-2.5-pro", "grok-4"],
290
278
  "images": [
291
279
  "/c/Users/username/designs/option_a.png",
292
280
  "/c/Users/username/designs/option_b.png",
@@ -304,7 +292,7 @@
304
292
  "tool": "chat",
305
293
  "arguments": {
306
294
  "prompt": "Review the implementation against the architecture diagram. Are we following the design correctly?",
307
- "model": "o3",
295
+ "model": "gpt-5",
308
296
  "files": ["/c/Users/username/src/services/payment.js", "/c/Users/username/src/models/transaction.js"],
309
297
  "images": ["/c/Users/username/docs/payment_flow_diagram.png"],
310
298
  "reasoning_effort": "high"
@@ -321,7 +309,7 @@
321
309
  "tool": "chat",
322
310
  "arguments": {
323
311
  "prompt": "Help me debug this error. The application crashes intermittently with this stack trace.",
324
- "model": "o3",
312
+ "model": "gpt-5",
325
313
  "files": [
326
314
  "/c/Users/username/src/server.js",
327
315
  "/c/Users/username/logs/error.log",
@@ -341,9 +329,9 @@
341
329
  "arguments": {
342
330
  "prompt": "Our API response times are degrading. What could be the root causes?",
343
331
  "models": [
344
- {"model": "gemini-2.5-flash"}, // Fast: Quick analysis
345
- {"model": "o4-mini"}, // Fast: Rapid responses
346
- {"model": "gpt-4.1"} // Fast: Efficient processing
332
+ "gemini-2.5-flash", // Fast: Quick analysis
333
+ "o4-mini", // Fast: Rapid responses
334
+ "gpt-4.1" // Fast: Efficient processing
347
335
  ],
348
336
  "files": [
349
337
  "/c/Users/username/monitoring/performance_report.json",
@@ -377,7 +365,7 @@
377
365
  "tool": "chat",
378
366
  "arguments": {
379
367
  "prompt": "Review this code and explain what it does, then suggest best practices improvements",
380
- "model": "o3",
368
+ "model": "gpt-5",
381
369
  "files": ["/c/Users/username/src/utils/encryption.js"],
382
370
  "reasoning_effort": "medium",
383
371
  "temperature": 0.2
@@ -392,11 +380,7 @@
392
380
  "tool": "consensus",
393
381
  "arguments": {
394
382
  "prompt": "Compare Next.js, Nuxt.js, and SvelteKit for our new web application project",
395
- "models": [
396
- {"model": "o3"},
397
- {"model": "gemini-2.5-flash"},
398
- {"model": "grok-4"}
399
- ],
383
+ "models": ["gpt-5", "gemini-2.5-flash", "grok-4"],
400
384
  "files": ["/c/Users/username/docs/project_requirements.md"],
401
385
  "temperature": 0.25
402
386
  }
@@ -412,11 +396,7 @@
412
396
  "tool": "consensus",
413
397
  "arguments": {
414
398
  "prompt": "Plan the implementation of user authentication with social login support",
415
- "models": [
416
- {"model": "o3"},
417
- {"model": "gemini-2.5-pro"},
418
- {"model": "grok-4"}
419
- ],
399
+ "models": ["gpt-5", "gemini-2.5-pro", "grok-4"],
420
400
  "files": [
421
401
  "/c/Users/username/docs/user_requirements.md",
422
402
  "/c/Users/username/src/models/user.js"
@@ -447,7 +427,7 @@
447
427
  "tool": "chat",
448
428
  "arguments": {
449
429
  "prompt": "Help me refactor this legacy code to use modern ES6+ features and improve readability",
450
- "model": "o3",
430
+ "model": "gpt-5",
451
431
  "files": ["/c/Users/username/src/legacy/data-processor.js"],
452
432
  "reasoning_effort": "medium",
453
433
  "temperature": 0.2
@@ -464,7 +444,7 @@
464
444
  "tool": "chat",
465
445
  "arguments": {
466
446
  "prompt": "I need to migrate our monolith to microservices. What's the step-by-step approach?",
467
- "model": "o3",
447
+ "model": "gpt-5",
468
448
  "files": ["/c/Users/username/src/app.js", "/c/Users/username/docs/current_architecture.md"],
469
449
  "reasoning_effort": "max",
470
450
  "temperature": 0.1
@@ -479,11 +459,7 @@
479
459
  "tool": "consensus",
480
460
  "arguments": {
481
461
  "prompt": "Should we migrate from Python Django to Node.js Express for better performance?",
482
- "models": [
483
- {"model": "o3"},
484
- {"model": "gemini-2.5-pro"},
485
- {"model": "grok-4"}
486
- ],
462
+ "models": ["gpt-5", "gemini-2.5-pro", "grok-4"],
487
463
  "files": [
488
464
  "/c/Users/username/backend/requirements.txt",
489
465
  "/c/Users/username/monitoring/performance_metrics.json",
@@ -528,7 +504,7 @@
528
504
  "tool": "chat",
529
505
  "arguments": {
530
506
  "prompt": "Fix this bug in my authentication logic",
531
- "model": "o3",
507
+ "model": "gpt-5",
532
508
  "files": ["/c/Users/username/src/auth.js"],
533
509
  "temperature": 0.0,
534
510
  "reasoning_effort": "high"
@@ -543,7 +519,7 @@
543
519
  "tool": "chat",
544
520
  "arguments": {
545
521
  "prompt": "Quick syntax check - is this JavaScript valid?",
546
- "model": "o3",
522
+ "model": "gpt-5",
547
523
  "reasoning_effort": "minimal",
548
524
  "temperature": 0.1
549
525
  }
@@ -555,7 +531,7 @@
555
531
  "tool": "chat",
556
532
  "arguments": {
557
533
  "prompt": "Design a comprehensive testing strategy for this complex system",
558
- "model": "o3",
534
+ "model": "gpt-5",
559
535
  "files": ["/src/", "/tests/"],
560
536
  "reasoning_effort": "max",
561
537
  "temperature": 0.1
@@ -573,7 +549,7 @@
573
549
  "tool": "chat",
574
550
  "arguments": {
575
551
  "prompt": "I'm building a real-time chat application. What architecture should I consider?",
576
- "model": "o3"
552
+ "model": "gpt-5"
577
553
  }
578
554
  }
579
555
 
@@ -605,7 +581,7 @@
605
581
  "tool": "chat",
606
582
  "arguments": {
607
583
  "prompt": "Review this API endpoint implementation",
608
- "model": "o3",
584
+ "model": "gpt-5",
609
585
  "files": ["/c/Users/username/src/api/users.js"]
610
586
  }
611
587
  }
@@ -639,11 +615,7 @@
639
615
  "tool": "consensus",
640
616
  "arguments": {
641
617
  "prompt": "Brainstorm innovative features for a fitness tracking app that would differentiate us from competitors",
642
- "models": [
643
- {"model": "grok-4"},
644
- {"model": "gemini-2.5-pro"},
645
- {"model": "gpt-4.1"}
646
- ],
618
+ "models": ["grok-4", "gemini-2.5-pro", "gpt-4.1"],
647
619
  "temperature": 0.7,
648
620
  "cross_feedback_prompt": "Build on each other's ideas and suggest combinations"
649
621
  }
@@ -673,11 +645,7 @@
673
645
  "tool": "consensus",
674
646
  "arguments": {
675
647
  "prompt": "What testing strategy should we implement for this e-commerce checkout flow?",
676
- "models": [
677
- {"model": "o3"},
678
- {"model": "gemini-2.5-pro"},
679
- {"model": "gemini-2.5-flash"}
680
- ],
648
+ "models": ["gpt-5", "gemini-2.5-pro", "gemini-2.5-flash"],
681
649
  "files": [
682
650
  "/c/Users/username/src/checkout/payment.js",
683
651
  "/c/Users/username/src/checkout/validation.js",
@@ -711,10 +679,7 @@
711
679
  "tool": "consensus",
712
680
  "arguments": {
713
681
  "prompt": "One of our models is unavailable, but we still need consensus",
714
- "models": [
715
- {"model": "available-model-1"},
716
- {"model": "available-model-2"}
717
- ],
682
+ "models": ["available-model-1", "available-model-2"],
718
683
  "temperature": 0.2
719
684
  }
720
685
  }
@@ -755,7 +720,7 @@
755
720
  "tool": "chat",
756
721
  "arguments": {
757
722
  "prompt": "Review this pull request for security issues and best practices",
758
- "model": "o3",
723
+ "model": "gpt-5",
759
724
  "files": ["/c/Users/username/src/modified-file.js"],
760
725
  "reasoning_effort": "high"
761
726
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "converse-mcp-server",
3
- "version": "1.10.0",
3
+ "version": "1.11.0",
4
4
  "description": "Converse MCP Server - Converse with other LLMs with chat and consensus tools",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
package/src/config.js CHANGED
@@ -43,6 +43,7 @@ const CONFIG_SCHEMA = {
43
43
  HOST: { type: 'string', default: 'localhost', description: 'Server host' },
44
44
  NODE_ENV: { type: 'string', default: 'development', description: 'Environment mode' },
45
45
  LOG_LEVEL: { type: 'string', default: 'info', description: 'Logging level' },
46
+ CLIENT_CWD: { type: 'string', default: null, description: 'Client working directory for relative paths' },
46
47
  },
47
48
 
48
49
  // Transport configuration
@@ -213,7 +214,21 @@ export async function loadConfig() {
213
214
  // Load server configuration
214
215
  for (const [key, schema] of Object.entries(CONFIG_SCHEMA.server)) {
215
216
  try {
216
- config.server[key.toLowerCase()] = validateEnvVar(key, process.env[key], schema);
217
+ // Special handling for CLIENT_CWD - auto-detect if not explicitly set
218
+ if (key === 'CLIENT_CWD' && !process.env[key]) {
219
+ // Try to detect the client's working directory from various sources
220
+ // When run via npx, INIT_CWD contains the directory where npx was invoked
221
+ // PWD is another common variable set to the working directory
222
+ // npm_config_local_prefix is set when run via npm/npx
223
+ const detectedCwd = process.env.INIT_CWD ||
224
+ process.env.PWD ||
225
+ process.env.npm_config_local_prefix ||
226
+ process.cwd();
227
+ config.server.client_cwd = detectedCwd;
228
+ configLogger.debug(`Auto-detected client working directory: ${detectedCwd}`);
229
+ } else {
230
+ config.server[key.toLowerCase()] = validateEnvVar(key, process.env[key], schema);
231
+ }
217
232
  } catch (error) {
218
233
  errors.push(error.message);
219
234
  }
@@ -79,7 +79,7 @@ export function generateHelpContent() {
79
79
  return `\`\`\`json
80
80
  {
81
81
  "prompt": "Explain the code in main.js",
82
- "model": "o3",
82
+ "model": "gpt-5",
83
83
  "files": ["C:\\\\Users\\\\username\\\\project\\\\main.js"],
84
84
  "temperature": 0.7,
85
85
  "use_websearch": false
@@ -89,11 +89,7 @@ export function generateHelpContent() {
89
89
  return `\`\`\`json
90
90
  {
91
91
  "prompt": "Should we use microservices architecture for our new project?",
92
- "models": [
93
- {"model": "o3"},
94
- {"model": "gemini-2.5-pro"},
95
- {"model": "grok-4-0709"}
96
- ],
92
+ "models": ["gpt-5", "gemini-2.5-pro", "grok-4-0709"],
97
93
  "files": ["./requirements.md", "C:\\\\Users\\\\username\\\\architecture.md"],
98
94
  "enable_cross_feedback": true,
99
95
  "temperature": 0.3
@@ -134,8 +130,8 @@ ${formatProviderModels('OpenRouter', allModels.openrouter)}
134
130
  ## Model Selection Tips
135
131
 
136
132
  ### For Complex Reasoning Tasks
137
- - **Most Intelligent**: gpt-5, o3-pro, gemini-2.5-pro, grok-4
138
- - **Fast & Smart**: gpt-5-mini, o3-mini, o4-mini, gemini-2.5-flash
133
+ - **Most Intelligent**: gpt-5, gpt-5-pro, gemini-2.5-pro, grok-4
134
+ - **Fast & Smart**: gpt-5-mini, gpt-5-mini, o4-mini, gemini-2.5-flash
139
135
  - **Budget-Friendly**: gpt-5-nano, gpt-4o-mini, gemini-2.0-flash-lite
140
136
 
141
137
  ### For Quick Responses
@@ -146,11 +142,11 @@ ${formatProviderModels('OpenRouter', allModels.openrouter)}
146
142
  - **1M+ Tokens**: gpt-4.1 (1M), all Gemini models (1M)
147
143
  - **400K Tokens**: gpt-5 family (gpt-5, gpt-5-mini, gpt-5-nano)
148
144
  - **256K Tokens**: grok-4 series
149
- - **200K Tokens**: o3 series, o4-mini
145
+ - **200K Tokens**: gpt-5 series, o4-mini
150
146
 
151
147
  ### Special Features
152
- - **Web Search**: gpt-5, gpt-5-mini, o3 series, o4-mini, gpt-4 series, gemini models with grounding, grok-4
153
- - **Thinking Mode**: o3 series (reasoning_effort), gemini models (thinking budget)
148
+ - **Web Search**: gpt-5, gpt-5-mini, gpt-5 series, o4-mini, gpt-4 series, gemini models with grounding, grok-4
149
+ - **Thinking Mode**: gpt-5 series (reasoning_effort), gemini models (thinking budget)
154
150
  - **Image Support**: All models except gemini-2.0-flash-lite and grok-3 series
155
151
 
156
152
  ## Configuration Tips
package/src/tools/chat.js CHANGED
@@ -86,7 +86,11 @@ export async function chatTool(args, dependencies) {
86
86
  webSearch: use_websearch ? prompt : null
87
87
  };
88
88
 
89
- const contextResult = await contextProcessor.processUnifiedContext(contextRequest);
89
+ const contextResult = await contextProcessor.processUnifiedContext(contextRequest, {
90
+ enforceSecurityCheck: false, // Allow files from any location
91
+ skipSecurityCheck: true, // Legacy flag for backward compatibility
92
+ clientCwd: config.server?.client_cwd // Use auto-detected client working directory
93
+ });
90
94
 
91
95
  // Create context message from files and images
92
96
  const allProcessedFiles = [...contextResult.files, ...contextResult.images];
@@ -272,7 +276,7 @@ function resolveAutoModel(model, providerName) {
272
276
  }
273
277
 
274
278
  const defaults = {
275
- 'openai': 'o3',
279
+ 'openai': 'gpt-5',
276
280
  'xai': 'grok-4-0709',
277
281
  'google': 'gemini-2.5-pro',
278
282
  'anthropic': 'claude-sonnet-4-20250514',
@@ -281,7 +285,7 @@ function resolveAutoModel(model, providerName) {
281
285
  'openrouter': 'qwen/qwen3-coder'
282
286
  };
283
287
 
284
- return defaults[providerName] || 'gpt-4o-mini';
288
+ return defaults[providerName] || 'gpt-5';
285
289
  }
286
290
 
287
291
  function mapModelToProvider(model, providers) {
@@ -361,7 +365,7 @@ function mapModelToProvider(model, providers) {
361
365
  }
362
366
 
363
367
  // Tool metadata
364
- chatTool.description = 'GENERAL CHAT & COLLABORATIVE THINKING - For development assistance, brainstorming, and code analysis. Supports files, images, and conversation continuation.';
368
+ chatTool.description = 'GENERAL CHAT & COLLABORATIVE THINKING - For development assistance, brainstorming, and code analysis. Supports files, images, and conversation continuation. Use model: "auto" for automatic model selection.';
365
369
  chatTool.inputSchema = {
366
370
  type: 'object',
367
371
  properties: {
@@ -55,7 +55,7 @@ export async function consensusTool(args, dependencies) {
55
55
  // Load existing conversation if continuation_id provided
56
56
  if (continuationId) {
57
57
  try {
58
- const existingState = await continuationStore.get(continuationId);
58
+ const existingState = await dependencies.continuationStore.get(continuationId);
59
59
  if (existingState) {
60
60
  conversationHistory = existingState.messages || [];
61
61
  } else {
@@ -93,7 +93,11 @@ export async function consensusTool(args, dependencies) {
93
93
  images: Array.isArray(images) ? images : []
94
94
  };
95
95
 
96
- const contextResult = await contextProcessor.processUnifiedContext(contextRequest);
96
+ const contextResult = await contextProcessor.processUnifiedContext(contextRequest, {
97
+ enforceSecurityCheck: false, // Allow files from any location
98
+ skipSecurityCheck: true, // Legacy flag for backward compatibility
99
+ clientCwd: config.server?.client_cwd // Use auto-detected client working directory
100
+ });
97
101
 
98
102
  // Create context message from files and images
99
103
  const allProcessedFiles = [...contextResult.files, ...contextResult.images];
@@ -143,17 +147,47 @@ export async function consensusTool(args, dependencies) {
143
147
  const providerCalls = [];
144
148
  const failedModels = [];
145
149
 
146
- for (const modelSpec of models) {
147
- if (!modelSpec.model || typeof modelSpec.model !== 'string') {
150
+ // Special handling for single "auto" model - expand to first 3 available providers
151
+ let modelsToProcess = models;
152
+ if (models.length === 1 && models[0].toLowerCase() === 'auto') {
153
+ // Find first 3 available providers
154
+ const availableProviders = [];
155
+ const providerOrder = ['openai', 'google', 'xai', 'anthropic', 'mistral', 'deepseek', 'openrouter'];
156
+
157
+ for (const providerName of providerOrder) {
158
+ if (availableProviders.length >= 3) break;
159
+ const provider = providers[providerName];
160
+ if (provider && provider.isAvailable(config)) {
161
+ availableProviders.push(providerName);
162
+ }
163
+ }
164
+
165
+ if (availableProviders.length === 0) {
166
+ return createToolError('No providers available. Please configure at least one API key.');
167
+ }
168
+
169
+ // Create model names for each available provider with their default model
170
+ modelsToProcess = availableProviders.map(providerName =>
171
+ getDefaultModelForProvider(providerName)
172
+ );
173
+
174
+ logger.debug('Auto-expanded to providers', {
175
+ data: {
176
+ providers: availableProviders,
177
+ models: modelsToProcess
178
+ }
179
+ });
180
+ }
181
+
182
+ for (const modelName of modelsToProcess) {
183
+ if (!modelName || typeof modelName !== 'string') {
148
184
  failedModels.push({
149
- model: modelSpec.model || 'unknown',
185
+ model: modelName || 'unknown',
150
186
  error: 'Invalid model specification',
151
187
  status: 'failed'
152
188
  });
153
189
  continue;
154
190
  }
155
-
156
- const modelName = modelSpec.model;
157
191
  const providerName = mapModelToProvider(modelName, providers);
158
192
  const resolvedModelName = resolveAutoModel(modelName, providerName);
159
193
  const provider = providers[providerName];
@@ -187,8 +221,7 @@ export async function consensusTool(args, dependencies) {
187
221
  reasoning_effort,
188
222
  use_websearch,
189
223
  config,
190
- ...modelSpec, // Allow model-specific overrides
191
- model: resolvedModelName // Use resolved model name for API call (must be after spread)
224
+ model: resolvedModelName // Use resolved model name for API call
192
225
  }
193
226
  });
194
227
  }
@@ -349,7 +382,7 @@ Please provide your refined response:`;
349
382
  }
350
383
  };
351
384
 
352
- await continuationStore.set(continuationId, conversationState);
385
+ await dependencies.continuationStore.set(continuationId, conversationState);
353
386
  } catch (error) {
354
387
  logger.error('Error saving consensus conversation', { error });
355
388
  // Continue even if save fails
@@ -374,7 +407,7 @@ Please provide your refined response:`;
374
407
  settings: {
375
408
  enable_cross_feedback,
376
409
  temperature,
377
- models_requested: models.map(m => m.model)
410
+ models_requested: models
378
411
  }
379
412
  };
380
413
 
@@ -383,16 +416,14 @@ Please provide your refined response:`;
383
416
  const resultStr = JSON.stringify(result, null, 2);
384
417
  const limitedResult = applyTokenLimit(resultStr, tokenLimit);
385
418
 
386
- // Parse the limited result back to object format to preserve structure
387
- let finalResult;
388
- try {
389
- finalResult = JSON.parse(limitedResult.content);
390
- } catch (e) {
391
- // Fallback if parsing fails - return original result
392
- finalResult = result;
393
- }
394
-
395
- return createToolResponse(finalResult);
419
+ // Return with continuation at top level for test compatibility
420
+ return createToolResponse({
421
+ content: limitedResult.content,
422
+ continuation: {
423
+ id: continuationId,
424
+ messageCount: messages.length + 1
425
+ }
426
+ });
396
427
 
397
428
  } catch (error) {
398
429
  logger.error('Consensus tool error', { error });
@@ -406,15 +437,11 @@ Please provide your refined response:`;
406
437
  * @returns {string} Provider name
407
438
  */
408
439
  /**
409
- * Resolve "auto" model to default model for the provider
440
+ * Get default model for a provider
410
441
  */
411
- function resolveAutoModel(model, providerName) {
412
- if (model.toLowerCase() !== 'auto') {
413
- return model;
414
- }
415
-
442
+ function getDefaultModelForProvider(providerName) {
416
443
  const defaults = {
417
- 'openai': 'o3',
444
+ 'openai': 'gpt-5',
418
445
  'xai': 'grok-4-0709',
419
446
  'google': 'gemini-2.5-pro',
420
447
  'anthropic': 'claude-sonnet-4-20250514',
@@ -423,7 +450,18 @@ function resolveAutoModel(model, providerName) {
423
450
  'openrouter': 'qwen/qwen3-coder'
424
451
  };
425
452
 
426
- return defaults[providerName] || 'o3';
453
+ return defaults[providerName] || 'gpt-5';
454
+ }
455
+
456
+ /**
457
+ * Resolve "auto" model to default model for the provider
458
+ */
459
+ function resolveAutoModel(model, providerName) {
460
+ if (model.toLowerCase() !== 'auto') {
461
+ return model;
462
+ }
463
+
464
+ return getDefaultModelForProvider(providerName);
427
465
  }
428
466
 
429
467
  function mapModelToProvider(model, providers) {
@@ -503,7 +541,7 @@ function mapModelToProvider(model, providers) {
503
541
  }
504
542
 
505
543
  // Tool metadata
506
- consensusTool.description = 'PARALLEL CONSENSUS WITH CROSS-MODEL FEEDBACK - Gathers perspectives from multiple AI models simultaneously. Models provide initial responses, then optionally refine based on others\' insights. Returns both phases in a single call. Handles partial failures gracefully. For: complex decisions, architectural choices, technical evaluations.';
544
+ consensusTool.description = 'PARALLEL CONSENSUS WITH CROSS-MODEL FEEDBACK - Gathers perspectives from multiple AI models simultaneously. Models provide initial responses, then optionally refine based on others\' insights. Returns both phases in a single call. Handles partial failures gracefully. For: complex decisions, architectural choices, technical evaluations. Use models: ["auto"] for automatic model selection.';
507
545
  consensusTool.inputSchema = {
508
546
  type: 'object',
509
547
  properties: {
@@ -513,14 +551,9 @@ consensusTool.inputSchema = {
513
551
  },
514
552
  models: {
515
553
  type: 'array',
516
- items: {
517
- type: 'object',
518
- properties: {
519
- model: { type: 'string' },
520
- },
521
- required: ['model'],
522
- },
523
- description: 'List of models to consult. Example: [{"model": "o3"}, {"model": "gemini-2.5-pro"}, {"model": "grok-4-0709"}]',
554
+ items: { type: 'string' },
555
+ minItems: 1,
556
+ description: 'List of models to consult. Example: ["gpt-5", "gemini-2.5-pro", "grok-4-0709"]',
524
557
  },
525
558
  files: {
526
559
  type: 'array',
@@ -59,21 +59,25 @@ async function validateFilePath(filePath, options = {}) {
59
59
  }
60
60
 
61
61
  // Convert to absolute path
62
- const absolutePath = isAbsolute(filePath) ? filePath : resolve(process.cwd(), filePath);
63
-
64
- // Security: Check if path is within allowed directories
65
- const allowedDirs = options.allowedDirectories || [process.cwd(), PROJECT_ROOT];
66
- const isAllowed = allowedDirs.some(dir => {
67
- const resolvedDir = resolve(dir);
68
- return absolutePath.startsWith(resolvedDir);
69
- });
62
+ // For relative paths, resolve from the client's working directory if provided,
63
+ // otherwise use process.cwd()
64
+ const absolutePath = isAbsolute(filePath) ? filePath : resolve(options.clientCwd || process.cwd(), filePath);
65
+
66
+ // Security check is now optional and disabled by default
67
+ if (options.enforceSecurityCheck) {
68
+ const allowedDirs = options.allowedDirectories || [process.cwd(), PROJECT_ROOT];
69
+ const isAllowed = allowedDirs.some(dir => {
70
+ const resolvedDir = resolve(dir);
71
+ return absolutePath.startsWith(resolvedDir);
72
+ });
70
73
 
71
- if (!isAllowed && !options.skipSecurityCheck) {
72
- throw new ContextProcessorError(
73
- 'File access denied: path outside allowed directories',
74
- 'SECURITY_VIOLATION',
75
- { path: absolutePath, allowedDirs }
76
- );
74
+ if (!isAllowed) {
75
+ throw new ContextProcessorError(
76
+ 'File access denied: path outside allowed directories',
77
+ 'SECURITY_VIOLATION',
78
+ { path: absolutePath, allowedDirs }
79
+ );
80
+ }
77
81
  }
78
82
 
79
83
  // Check if file exists and is readable