converse-mcp-server 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,478 @@
1
+ /**
2
+ * Consensus Tool
3
+ *
4
+ * Multi-provider parallel execution with response aggregation.
5
+ * Calls all available providers simultaneously and aggregates responses.
6
+ */
7
+
8
+ import { createToolResponse, createToolError } from './index.js';
9
+ import { processUnifiedContext, createFileContext } from '../utils/contextProcessor.js';
10
+ import { generateContinuationId, addMessageToHistory } from '../continuationStore.js';
11
+ import { debugLog, debugError } from '../utils/console.js';
12
+ import { CONSENSUS_PROMPT } from '../systemPrompts.js';
13
+ import { applyTokenLimit, getTokenLimit } from '../utils/tokenLimiter.js';
14
+
15
+ /**
16
+ * Consensus tool implementation
17
+ * @param {object} args - Tool arguments
18
+ * @param {object} dependencies - Injected dependencies (config, providers, continuationStore)
19
+ * @returns {object} MCP tool response
20
+ */
21
+ export async function consensusTool(args, dependencies) {
22
+ try {
23
+ const { config, providers, continuationStore, contextProcessor } = dependencies;
24
+
25
+ // Validate required arguments
26
+ if (!args.prompt || typeof args.prompt !== 'string') {
27
+ return createToolError('Prompt is required and must be a string');
28
+ }
29
+
30
+ if (!args.models || !Array.isArray(args.models) || args.models.length === 0) {
31
+ return createToolError('Models array is required and must contain at least one model');
32
+ }
33
+
34
+ // Extract and validate arguments
35
+ const {
36
+ prompt,
37
+ models,
38
+ relevant_files = [],
39
+ images = [],
40
+ continuation_id,
41
+ enable_cross_feedback = true,
42
+ cross_feedback_prompt,
43
+ temperature = 0.2,
44
+ reasoning_effort = 'medium'
45
+ } = args;
46
+
47
+ let conversationHistory = [];
48
+ let continuationId = continuation_id;
49
+
50
+ // Load existing conversation if continuation_id provided
51
+ if (continuationId) {
52
+ try {
53
+ const existingState = await continuationStore.get(continuationId);
54
+ if (existingState) {
55
+ conversationHistory = existingState.messages || [];
56
+ } else {
57
+ // Invalid continuation ID - start fresh
58
+ continuationId = generateContinuationId();
59
+ }
60
+ } catch (error) {
61
+ console.error('Error loading conversation:', error);
62
+ // Continue with fresh conversation on error
63
+ continuationId = generateContinuationId();
64
+ }
65
+ } else {
66
+ // Generate new continuation ID for new conversation
67
+ continuationId = generateContinuationId();
68
+ }
69
+
70
+ // Process context (files and images)
71
+ let contextMessage = null;
72
+ if (relevant_files.length > 0 || images.length > 0) {
73
+ try {
74
+ const contextRequest = {
75
+ files: Array.isArray(relevant_files) ? relevant_files : [],
76
+ images: Array.isArray(images) ? images : []
77
+ };
78
+
79
+ const contextResult = await contextProcessor.processUnifiedContext(contextRequest);
80
+
81
+ // Create context message from files
82
+ if (contextResult.files.length > 0) {
83
+ contextMessage = createFileContext(contextResult.files, {
84
+ includeMetadata: true,
85
+ includeErrors: true
86
+ });
87
+ }
88
+
89
+ } catch (error) {
90
+ console.error('Error processing context:', error);
91
+ // Continue without context if processing fails
92
+ }
93
+ }
94
+
95
+ // Build message array for providers
96
+ const messages = [];
97
+
98
+ // Add system prompt
99
+ messages.push({
100
+ role: 'system',
101
+ content: CONSENSUS_PROMPT
102
+ });
103
+
104
+ // Add conversation history
105
+ messages.push(...conversationHistory);
106
+
107
+ // Add context message if available
108
+ if (contextMessage) {
109
+ messages.push(contextMessage);
110
+ }
111
+
112
+ // Add user prompt
113
+ messages.push({
114
+ role: 'user',
115
+ content: prompt
116
+ });
117
+
118
+ // Resolve model specifications to provider calls
119
+ const providerCalls = [];
120
+ const failedModels = [];
121
+
122
+ for (const modelSpec of models) {
123
+ if (!modelSpec.model || typeof modelSpec.model !== 'string') {
124
+ failedModels.push({
125
+ model: modelSpec.model || 'unknown',
126
+ error: 'Invalid model specification',
127
+ status: 'failed'
128
+ });
129
+ continue;
130
+ }
131
+
132
+ const modelName = modelSpec.model;
133
+ const providerName = mapModelToProvider(modelName);
134
+ const resolvedModelName = resolveAutoModel(modelName, providerName);
135
+ const provider = providers[providerName];
136
+
137
+ if (!provider) {
138
+ failedModels.push({
139
+ model: modelName,
140
+ provider: providerName,
141
+ error: `Provider not found: ${providerName}`,
142
+ status: 'failed'
143
+ });
144
+ continue;
145
+ }
146
+
147
+ if (!provider.isAvailable(config)) {
148
+ failedModels.push({
149
+ model: modelName,
150
+ provider: providerName,
151
+ error: `Provider ${providerName} not available (check API key)`,
152
+ status: 'failed'
153
+ });
154
+ continue;
155
+ }
156
+
157
+ providerCalls.push({
158
+ model: modelName, // Keep original model name for display
159
+ provider: providerName,
160
+ providerInstance: provider,
161
+ options: {
162
+ model: resolvedModelName, // Use resolved model name for API call
163
+ temperature,
164
+ reasoning_effort,
165
+ config,
166
+ ...modelSpec // Allow model-specific overrides
167
+ }
168
+ });
169
+ }
170
+
171
+ if (providerCalls.length === 0) {
172
+ return createToolError(
173
+ `No valid providers available for the specified models. Failed models: ${failedModels.map(f => f.model).join(', ')}`
174
+ );
175
+ }
176
+
177
+ // Phase 1: Initial parallel provider calls
178
+ debugLog(`Consensus: Calling ${providerCalls.length} providers in parallel...`);
179
+ const initialResults = await Promise.allSettled(
180
+ providerCalls.map(async (call) => {
181
+ try {
182
+ const response = await call.providerInstance.invoke(messages, call.options);
183
+ return {
184
+ model: call.model,
185
+ provider: call.provider,
186
+ status: 'success',
187
+ response: response.content,
188
+ metadata: response.metadata || {},
189
+ rawResponse: response.rawResponse
190
+ };
191
+ } catch (error) {
192
+ return {
193
+ model: call.model,
194
+ provider: call.provider,
195
+ status: 'failed',
196
+ error: error.message,
197
+ metadata: {}
198
+ };
199
+ }
200
+ })
201
+ );
202
+
203
+ // Process initial results
204
+ const initialPhase = {
205
+ successful: [],
206
+ failed: []
207
+ };
208
+
209
+ initialResults.forEach((result, index) => {
210
+ if (result.status === 'fulfilled') {
211
+ if (result.value.status === 'success') {
212
+ initialPhase.successful.push(result.value);
213
+ } else {
214
+ initialPhase.failed.push(result.value);
215
+ }
216
+ } else {
217
+ initialPhase.failed.push({
218
+ model: providerCalls[index].model,
219
+ provider: providerCalls[index].provider,
220
+ status: 'failed',
221
+ error: result.reason.message || 'Unknown error',
222
+ metadata: {}
223
+ });
224
+ }
225
+ });
226
+
227
+ // Add pre-failed models to failed list
228
+ initialPhase.failed.push(...failedModels);
229
+
230
+ let refinedPhase = null;
231
+
232
+ // Phase 2: Cross-feedback (if enabled and we have multiple successful responses)
233
+ if (enable_cross_feedback && initialPhase.successful.length > 1) {
234
+ debugLog(`Consensus: Running cross-feedback phase with ${initialPhase.successful.length} responses...`);
235
+
236
+ // Create cross-feedback prompt
237
+ const feedbackPrompt = cross_feedback_prompt ||
238
+ `Based on the other AI responses below, please refine your answer to the original question. Consider different perspectives and provide your final response:
239
+
240
+ Original Question: ${prompt}
241
+
242
+ Other AI Responses:
243
+ ${initialPhase.successful.map((r, i) => `${i + 1}. ${r.model}: ${r.response}`).join('\n\n')}
244
+
245
+ Please provide your refined response:`;
246
+
247
+ // Build feedback messages
248
+ const feedbackMessages = [...messages];
249
+ feedbackMessages.push({
250
+ role: 'user',
251
+ content: feedbackPrompt
252
+ });
253
+
254
+ // Run refinement calls in parallel
255
+ const refinementResults = await Promise.allSettled(
256
+ initialPhase.successful.map(async (initialResult) => {
257
+ try {
258
+ const call = providerCalls.find(c => c.model === initialResult.model);
259
+ const response = await call.providerInstance.invoke(feedbackMessages, call.options);
260
+
261
+ return {
262
+ ...initialResult,
263
+ refined_response: response.content,
264
+ refined_metadata: response.metadata || {},
265
+ initial_response: initialResult.response,
266
+ status: 'success'
267
+ };
268
+ } catch (error) {
269
+ return {
270
+ ...initialResult,
271
+ refined_response: null,
272
+ refined_error: error.message,
273
+ initial_response: initialResult.response,
274
+ status: 'partial' // Had initial success but refinement failed
275
+ };
276
+ }
277
+ })
278
+ );
279
+
280
+ // Process refinement results
281
+ refinedPhase = [];
282
+ refinementResults.forEach((result) => {
283
+ if (result.status === 'fulfilled') {
284
+ refinedPhase.push(result.value);
285
+ } else {
286
+ // This shouldn't happen with our error handling, but just in case
287
+ const originalResult = result.value || {};
288
+ refinedPhase.push({
289
+ ...originalResult,
290
+ refined_response: null,
291
+ refined_error: 'Refinement phase failed unexpectedly',
292
+ status: 'partial'
293
+ });
294
+ }
295
+ });
296
+ }
297
+
298
+ // Save conversation state
299
+ try {
300
+ const consensusMessage = {
301
+ role: 'assistant',
302
+ content: `Consensus completed with ${initialPhase.successful.length} successful responses` +
303
+ (refinedPhase ? ` and ${refinedPhase.filter(r => r.status === 'success').length} refined responses` : '')
304
+ };
305
+
306
+ const conversationState = {
307
+ messages: [...messages, consensusMessage],
308
+ type: 'consensus',
309
+ lastUpdated: Date.now(),
310
+ consensusData: {
311
+ modelsRequested: models.length,
312
+ providersSuccessful: initialPhase.successful.length,
313
+ providersFailed: initialPhase.failed.length,
314
+ crossFeedbackEnabled: enable_cross_feedback
315
+ }
316
+ };
317
+
318
+ await continuationStore.set(continuationId, conversationState);
319
+ } catch (error) {
320
+ debugError('Error saving consensus conversation:', error);
321
+ // Continue even if save fails
322
+ }
323
+
324
+ // Build result object
325
+ const result = {
326
+ status: 'consensus_complete',
327
+ models_consulted: models.length,
328
+ successful_initial_responses: initialPhase.successful.length,
329
+ failed_responses: initialPhase.failed.length,
330
+ refined_responses: refinedPhase ? refinedPhase.filter(r => r.status === 'success').length : 0,
331
+ phases: {
332
+ initial: initialPhase.successful,
333
+ ...(refinedPhase !== null && { refined: refinedPhase }),
334
+ failed: initialPhase.failed
335
+ },
336
+ continuation: {
337
+ id: continuationId,
338
+ messageCount: messages.length + 1
339
+ },
340
+ settings: {
341
+ enable_cross_feedback,
342
+ temperature,
343
+ models_requested: models.map(m => m.model)
344
+ }
345
+ };
346
+
347
+ // Apply token limiting to the final response
348
+ const tokenLimit = getTokenLimit(config);
349
+ const resultStr = JSON.stringify(result, null, 2);
350
+ const limitedResult = applyTokenLimit(resultStr, tokenLimit);
351
+
352
+ // Parse the limited result back to object format to preserve structure
353
+ let finalResult;
354
+ try {
355
+ finalResult = JSON.parse(limitedResult.content);
356
+ } catch (e) {
357
+ // Fallback if parsing fails - return original result
358
+ finalResult = result;
359
+ }
360
+
361
+ return createToolResponse(finalResult);
362
+
363
+ } catch (error) {
364
+ debugError('Consensus tool error:', error);
365
+ return createToolError('Consensus tool failed', error);
366
+ }
367
+ }
368
+
369
+ /**
370
+ * Map model name to provider name (same as chat tool)
371
+ * @param {string} model - Model name
372
+ * @returns {string} Provider name
373
+ */
374
+ /**
375
+ * Resolve "auto" model to default model for the provider
376
+ */
377
+ function resolveAutoModel(model, providerName) {
378
+ if (model.toLowerCase() !== 'auto') {
379
+ return model;
380
+ }
381
+
382
+ const defaults = {
383
+ 'openai': 'gpt-4o-mini',
384
+ 'xai': 'grok-4-0709',
385
+ 'google': 'gemini-2.5-flash'
386
+ };
387
+
388
+ return defaults[providerName] || 'gpt-4o-mini';
389
+ }
390
+
391
+ function mapModelToProvider(model) {
392
+ const modelLower = model.toLowerCase();
393
+
394
+ // Handle "auto" - default to OpenAI
395
+ if (modelLower === 'auto') {
396
+ return 'openai';
397
+ }
398
+
399
+ // OpenAI models
400
+ if (modelLower.includes('gpt') || modelLower.includes('o1') ||
401
+ modelLower.includes('o3') || modelLower.includes('o4')) {
402
+ return 'openai';
403
+ }
404
+
405
+ // XAI models
406
+ if (modelLower.includes('grok')) {
407
+ return 'xai';
408
+ }
409
+
410
+ // Google models
411
+ if (modelLower.includes('gemini') || modelLower.includes('flash') ||
412
+ modelLower.includes('pro') || modelLower === 'google') {
413
+ return 'google';
414
+ }
415
+
416
+ // Default fallback
417
+ return 'openai';
418
+ }
419
+
420
+ // Tool metadata
421
+ consensusTool.description = 'PARALLEL CONSENSUS WITH CROSS-MODEL FEEDBACK - Gathers perspectives from multiple AI models simultaneously. Models provide initial responses, then optionally refine based on others\' insights. Returns both phases in a single call. Handles partial failures gracefully. For: complex decisions, architectural choices, technical evaluations.';
422
+ consensusTool.inputSchema = {
423
+ type: 'object',
424
+ properties: {
425
+ prompt: {
426
+ type: 'string',
427
+ description: 'The problem or proposal to gather consensus on. Include context and specific questions. Example: "Should we use microservices or monolith architecture for our e-commerce platform with 100k users?"',
428
+ },
429
+ models: {
430
+ type: 'array',
431
+ items: {
432
+ type: 'object',
433
+ properties: {
434
+ model: { type: 'string' },
435
+ },
436
+ required: ['model'],
437
+ },
438
+ description: 'List of models to consult. Example: [{"model": "o3"}, {"model": "gemini-2.5-flash"}, {"model": "grok-4-0709"}]',
439
+ },
440
+ relevant_files: {
441
+ type: 'array',
442
+ items: { type: 'string' },
443
+ description: 'File paths for additional context (absolute paths). Example: ["/path/to/architecture.md", "/path/to/requirements.txt"]',
444
+ },
445
+ images: {
446
+ type: 'array',
447
+ items: { type: 'string' },
448
+ description: 'Image paths for visual context (absolute paths or base64). Example: ["/path/to/current_architecture.png", "/path/to/user_flow.jpg"]',
449
+ },
450
+ continuation_id: {
451
+ type: 'string',
452
+ description: 'Thread continuation ID for multi-turn conversations. Example: "consensus_1703123456789_xyz789"',
453
+ },
454
+ enable_cross_feedback: {
455
+ type: 'boolean',
456
+ description: 'Enable refinement phase where models see others\' responses and can improve their answers. Example: true (recommended), false (faster single-phase only). Default: true',
457
+ default: true,
458
+ },
459
+ cross_feedback_prompt: {
460
+ type: 'string',
461
+ description: 'Custom prompt for refinement phase. Example: "Focus on scalability trade-offs in your refinement" or leave empty for default cross-feedback prompt',
462
+ },
463
+ temperature: {
464
+ type: 'number',
465
+ description: 'Response randomness (0.0-1.0). Examples: 0.1 (very focused), 0.2 (analytical - default), 0.5 (balanced). Default: 0.2',
466
+ minimum: 0.0,
467
+ maximum: 1.0,
468
+ default: 0.2,
469
+ },
470
+ reasoning_effort: {
471
+ type: 'string',
472
+ enum: ['minimal', 'low', 'medium', 'high', 'max'],
473
+ description: 'Reasoning depth for thinking models. Examples: "medium" (balanced - default), "high" (complex analysis), "max" (thorough evaluation). Default: "medium"',
474
+ default: 'medium'
475
+ },
476
+ },
477
+ required: ['prompt', 'models'],
478
+ };
@@ -0,0 +1,156 @@
1
+ /**
2
+ * Tool Registry
3
+ *
4
+ * Central registry for all MCP tools following functional architecture.
5
+ * Each tool receives dependencies via injection and returns MCP-compatible responses.
6
+ */
7
+
8
+ // Import individual tools
9
+ import { chatTool } from './chat.js';
10
+ import { consensusTool } from './consensus.js';
11
+
12
+ /**
13
+ * Tool registry map
14
+ * Each tool must implement: async function(args, dependencies) => mcpResponse
15
+ * Tools also have metadata: description, inputSchema
16
+ */
17
+ const tools = {
18
+ chat: chatTool,
19
+ consensus: consensusTool,
20
+ };
21
+
22
+ /**
23
+ * Get all available tools
24
+ * @returns {object} Map of tool name to tool implementation
25
+ */
26
+ export function getTools() {
27
+ return tools;
28
+ }
29
+
30
+ /**
31
+ * Get a specific tool by name
32
+ * @param {string} name - Tool name
33
+ * @returns {object|null} Tool implementation or null if not found
34
+ */
35
+ export function getTool(name) {
36
+ return tools[name] || null;
37
+ }
38
+
39
+ /**
40
+ * Register a new tool
41
+ * @param {string} name - Tool name
42
+ * @param {function} toolHandler - Tool implementation function
43
+ * @param {object} metadata - Tool metadata (description, inputSchema)
44
+ */
45
+ export function registerTool(name, toolHandler, metadata = {}) {
46
+ // Validate tool interface
47
+ if (typeof toolHandler !== 'function') {
48
+ throw new Error(`Tool ${name} must be a function`);
49
+ }
50
+
51
+ // Add metadata to tool function
52
+ toolHandler.description = metadata.description || `${name} tool`;
53
+ toolHandler.inputSchema = metadata.inputSchema || {
54
+ type: 'object',
55
+ properties: {},
56
+ };
57
+
58
+ tools[name] = toolHandler;
59
+ }
60
+
61
+ /**
62
+ * Get list of available tool names
63
+ * @returns {string[]} Array of tool names
64
+ */
65
+ export function getAvailableTools() {
66
+ return Object.keys(tools);
67
+ }
68
+
69
+ /**
70
+ * Create MCP-compatible tool response
71
+ * @param {string|object} content - Response content (string) or full response object
72
+ * @param {boolean} isError - Whether this is an error response
73
+ * @param {object} additionalFields - Additional fields to include in response
74
+ * @returns {object} MCP tool response
75
+ */
76
+ export function createToolResponse(content, isError = false, additionalFields = {}) {
77
+ // If content is already a structured response object, use it directly
78
+ if (typeof content === 'object' && content !== null && !Array.isArray(content)) {
79
+ // If it's a complete response object with content array, return it directly
80
+ if (content.content && Array.isArray(content.content)) {
81
+ return {
82
+ ...content,
83
+ isError: isError || content.isError || false,
84
+ ...additionalFields
85
+ };
86
+ }
87
+
88
+ // If it's a tool result object (has continuation, metadata, etc.) convert to MCP format
89
+ if (content.continuation || content.metadata || content.content) {
90
+ const mcpResponse = {
91
+ content: [
92
+ {
93
+ type: 'text',
94
+ text: content.content || JSON.stringify(content, null, 2)
95
+ }
96
+ ],
97
+ isError: isError || content.isError || false,
98
+ ...additionalFields
99
+ };
100
+
101
+ // Preserve continuation and metadata at top level
102
+ if (content.continuation) {
103
+ mcpResponse.continuation = content.continuation;
104
+ }
105
+ if (content.metadata) {
106
+ mcpResponse.metadata = content.metadata;
107
+ }
108
+
109
+ return mcpResponse;
110
+ }
111
+
112
+ // If it's any other object, stringify it
113
+ return {
114
+ content: [
115
+ {
116
+ type: 'text',
117
+ text: JSON.stringify(content, null, 2),
118
+ },
119
+ ],
120
+ isError,
121
+ ...additionalFields
122
+ };
123
+ }
124
+
125
+ // Handle string content
126
+ return {
127
+ content: [
128
+ {
129
+ type: 'text',
130
+ text: content,
131
+ },
132
+ ],
133
+ isError,
134
+ ...additionalFields
135
+ };
136
+ }
137
+
138
+ /**
139
+ * Create MCP-compatible tool error response
140
+ * @param {string} message - Error message
141
+ * @param {Error} error - Original error object
142
+ * @returns {object} MCP error response
143
+ */
144
+ export function createToolError(message, error = null) {
145
+ const errorText = error ? `${message}: ${error.message}` : message;
146
+ const response = createToolResponse(errorText, true);
147
+
148
+ // Add error object for test compatibility
149
+ response.error = {
150
+ message: errorText,
151
+ type: 'ToolError',
152
+ timestamp: new Date().toISOString()
153
+ };
154
+
155
+ return response;
156
+ }