converse-mcp-server 2.5.3 → 2.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/docs/API.md CHANGED
@@ -109,6 +109,11 @@ MCP_TRANSPORT=stdio npm start
109
109
  "type": "boolean",
110
110
  "default": false,
111
111
  "description": "Execute in background mode. Returns continuation_id immediately for status monitoring. Example: true for long-running analysis"
112
+ },
113
+ "export": {
114
+ "type": "boolean",
115
+ "default": false,
116
+ "description": "Export conversation to disk. Creates folder with continuation_id name containing numbered request/response files and metadata. Example: true to save for documentation"
112
117
  }
113
118
  },
114
119
  "required": ["prompt"]
@@ -156,6 +161,7 @@ MCP_TRANSPORT=stdio npm start
156
161
 
157
162
  #### Example Usage
158
163
 
164
+ **Basic query:**
159
165
  ```json
160
166
  {
161
167
  "prompt": "Review this authentication function for security issues",
@@ -166,6 +172,26 @@ MCP_TRANSPORT=stdio npm start
166
172
  }
167
173
  ```
168
174
 
175
+ **With conversation export:**
176
+ ```json
177
+ {
178
+ "prompt": "Help me design a scalable architecture for our system",
179
+ "model": "gpt-5",
180
+ "export": true,
181
+ "continuation_id": "conv_architecture_design"
182
+ }
183
+ ```
184
+
185
+ When export is enabled, the conversation will be saved to disk in the following structure:
186
+ ```
187
+ conv_architecture_design/
188
+ ├── 1_request.txt # First user prompt
189
+ ├── 1_response.txt # First AI response
190
+ ├── 2_request.txt # Second user prompt (if continuing)
191
+ ├── 2_response.txt # Second AI response
192
+ └── metadata.json # Conversation metadata and settings
193
+ ```
194
+
169
195
  ### Consensus Tool
170
196
 
171
197
  **Description**: Multi-provider parallel execution with cross-model feedback for gathering perspectives from multiple AI models.
@@ -226,6 +252,11 @@ MCP_TRANSPORT=stdio npm start
226
252
  "type": "boolean",
227
253
  "default": false,
228
254
  "description": "Execute in background mode with per-provider progress tracking. Returns continuation_id immediately for monitoring."
255
+ },
256
+ "export": {
257
+ "type": "boolean",
258
+ "default": false,
259
+ "description": "Export conversation to disk. Creates folder with continuation_id name containing numbered request/response files and metadata. Example: true to save consensus results"
229
260
  }
230
261
  },
231
262
  "required": ["prompt", "models"]
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "converse-mcp-server",
3
- "version": "2.5.3",
3
+ "version": "2.6.1",
4
4
  "description": "Converse MCP Server - Converse with other LLMs with chat and consensus tools",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
package/src/tools/chat.js CHANGED
@@ -20,6 +20,7 @@ import { CHAT_PROMPT } from '../systemPrompts.js';
20
20
  import { applyTokenLimit, getTokenLimit } from '../utils/tokenLimiter.js';
21
21
  import { validateAllPaths } from '../utils/fileValidator.js';
22
22
  import { SummarizationService } from '../services/summarizationService.js';
23
+ import { exportConversation } from '../utils/conversationExporter.js';
23
24
 
24
25
  const logger = createLogger('chat');
25
26
 
@@ -57,6 +58,7 @@ export async function chatTool(args, dependencies) {
57
58
  reasoning_effort = 'medium',
58
59
  verbosity = 'medium',
59
60
  async = false,
61
+ export: shouldExport = false,
60
62
  } = args;
61
63
 
62
64
  // Handle async execution mode
@@ -356,6 +358,27 @@ export async function chatTool(args, dependencies) {
356
358
  // Continue even if save fails
357
359
  }
358
360
 
361
+ // Export conversation if requested
362
+ if (shouldExport) {
363
+ await exportConversation({
364
+ messages: updatedMessages,
365
+ provider: providerName,
366
+ model,
367
+ lastUpdated: Date.now(),
368
+ codexThreadId: response.metadata?.threadId,
369
+ }, {
370
+ clientCwd: config.server?.client_cwd,
371
+ continuation_id: continuationId,
372
+ model,
373
+ temperature,
374
+ reasoning_effort,
375
+ verbosity,
376
+ use_websearch,
377
+ files,
378
+ images,
379
+ });
380
+ }
381
+
359
382
  // Create unified status line (similar to async status display)
360
383
  const statusLine =
361
384
  config.environment?.nodeEnv !== 'test'
@@ -577,6 +600,7 @@ async function executeChatWithStreaming(args, dependencies, context) {
577
600
  images = [],
578
601
  reasoning_effort = 'medium',
579
602
  verbosity = 'medium',
603
+ export: shouldExport = false,
580
604
  } = args;
581
605
 
582
606
  // Initialize SummarizationService
@@ -916,6 +940,27 @@ async function executeChatWithStreaming(args, dependencies, context) {
916
940
  // Continue even if save fails
917
941
  }
918
942
 
943
+ // Export conversation if requested
944
+ if (shouldExport) {
945
+ await exportConversation({
946
+ messages: updatedMessages,
947
+ provider: providerName,
948
+ model,
949
+ lastUpdated: Date.now(),
950
+ codexThreadId: response.metadata?.threadId,
951
+ }, {
952
+ clientCwd: config.server?.client_cwd,
953
+ continuation_id: continuationId,
954
+ model,
955
+ temperature,
956
+ reasoning_effort,
957
+ verbosity,
958
+ use_websearch,
959
+ files,
960
+ images,
961
+ });
962
+ }
963
+
919
964
  // Return complete result for job completion
920
965
  return {
921
966
  content: response.content,
@@ -1000,6 +1045,12 @@ chatTool.inputSchema = {
1000
1045
  'Execute chat in background. When true, returns continuation_id immediately and processes request asynchronously. Default: false',
1001
1046
  default: false,
1002
1047
  },
1048
+ export: {
1049
+ type: 'boolean',
1050
+ description:
1051
+ 'Export conversation to disk. Creates folder with continuation_id name containing numbered request/response files and metadata. Default: false',
1052
+ default: false,
1053
+ },
1003
1054
  prompt: {
1004
1055
  type: 'string',
1005
1056
  description:
@@ -24,6 +24,7 @@ import { CONSENSUS_PROMPT } from '../systemPrompts.js';
24
24
  import { applyTokenLimit, getTokenLimit } from '../utils/tokenLimiter.js';
25
25
  import { validateAllPaths } from '../utils/fileValidator.js';
26
26
  import { SummarizationService } from '../services/summarizationService.js';
27
+ import { exportConversation } from '../utils/conversationExporter.js';
27
28
 
28
29
  const logger = createLogger('consensus');
29
30
 
@@ -72,6 +73,7 @@ export async function consensusTool(args, dependencies) {
72
73
  reasoning_effort = 'medium',
73
74
  use_websearch = false,
74
75
  async = false,
76
+ export: shouldExport = false,
75
77
  } = args;
76
78
 
77
79
  // Handle async execution mode
@@ -509,17 +511,43 @@ Please provide your refined response:`;
509
511
  }
510
512
 
511
513
  // Save conversation state
514
+ let conversationState;
512
515
  try {
516
+ // Build formatted consensus response with actual model responses
517
+ let formattedContent = '';
518
+
519
+ // Add initial responses
520
+ formattedContent += '## Initial Responses\n\n';
521
+ for (const result of initialPhase.successful) {
522
+ formattedContent += `### ${result.model} (initial response):\n${result.response}\n\n---\n\n`;
523
+ }
524
+
525
+ // Add refined responses if cross-feedback was enabled
526
+ if (refinedPhase) {
527
+ formattedContent += '## Refined Responses\n\n';
528
+ for (const result of refinedPhase) {
529
+ if (result.status === 'success' && result.refined_response) {
530
+ formattedContent += `### ${result.model} (refined response):\n${result.refined_response}\n\n---\n\n`;
531
+ } else if (result.status === 'partial') {
532
+ // Show initial response for models that failed refinement
533
+ formattedContent += `### ${result.model} (refinement failed, showing initial):\n${result.initial_response}\n\n---\n\n`;
534
+ }
535
+ }
536
+ }
537
+
538
+ // Add summary at the end
539
+ formattedContent += `\n**Summary:** Consensus completed with ${initialPhase.successful.length} successful initial responses`;
540
+ if (refinedPhase) {
541
+ const successfulRefinements = refinedPhase.filter(r => r.status === 'success').length;
542
+ formattedContent += ` and ${successfulRefinements} successful refined responses`;
543
+ }
544
+ formattedContent += '.';
513
545
  const consensusMessage = {
514
546
  role: 'assistant',
515
- content:
516
- `Consensus completed with ${initialPhase.successful.length} successful responses` +
517
- (refinedPhase
518
- ? ` and ${refinedPhase.filter((r) => r.status === 'success').length} refined responses`
519
- : ''),
547
+ content: formattedContent,
520
548
  };
521
549
 
522
- const conversationState = {
550
+ conversationState = {
523
551
  messages: [...messages, consensusMessage],
524
552
  type: 'consensus',
525
553
  lastUpdated: Date.now(),
@@ -540,6 +568,21 @@ Please provide your refined response:`;
540
568
  // Continue even if save fails
541
569
  }
542
570
 
571
+ // Export conversation if requested
572
+ if (shouldExport && conversationState) {
573
+ await exportConversation(conversationState, {
574
+ clientCwd: dependencies.config.server?.client_cwd,
575
+ continuation_id: continuationId,
576
+ models,
577
+ temperature,
578
+ reasoning_effort,
579
+ use_websearch,
580
+ files,
581
+ images,
582
+ enable_cross_feedback,
583
+ });
584
+ }
585
+
543
586
  const consensusExecutionTime = (Date.now() - consensusStartTime) / 1000; // Convert to seconds
544
587
 
545
588
  // Calculate final success count and collect failure details
@@ -826,6 +869,7 @@ async function executeConsensusWithStreaming(args, dependencies, context) {
826
869
  temperature = 0.2,
827
870
  reasoning_effort = 'medium',
828
871
  use_websearch = false,
872
+ export: shouldExport = false,
829
873
  } = args;
830
874
 
831
875
  let conversationHistory = [];
@@ -1166,17 +1210,43 @@ Please provide your refined response:`;
1166
1210
  }
1167
1211
 
1168
1212
  // Save conversation state
1213
+ let conversationState;
1169
1214
  try {
1215
+ // Build formatted consensus response with actual model responses
1216
+ let formattedContent = '';
1217
+
1218
+ // Add initial responses
1219
+ formattedContent += '## Initial Responses\n\n';
1220
+ for (const result of initialPhase.successful) {
1221
+ formattedContent += `### ${result.model} (initial response):\n${result.response}\n\n---\n\n`;
1222
+ }
1223
+
1224
+ // Add refined responses if cross-feedback was enabled
1225
+ if (refinedPhase) {
1226
+ formattedContent += '## Refined Responses\n\n';
1227
+ for (const result of refinedPhase) {
1228
+ if (result.status === 'success' && result.refined_response) {
1229
+ formattedContent += `### ${result.model} (refined response):\n${result.refined_response}\n\n---\n\n`;
1230
+ } else if (result.status === 'partial') {
1231
+ // Show initial response for models that failed refinement
1232
+ formattedContent += `### ${result.model} (refinement failed, showing initial):\n${result.initial_response}\n\n---\n\n`;
1233
+ }
1234
+ }
1235
+ }
1236
+
1237
+ // Add summary at the end
1238
+ formattedContent += `\n**Summary:** Consensus completed with ${initialPhase.successful.length} successful initial responses`;
1239
+ if (refinedPhase) {
1240
+ const successfulRefinements = refinedPhase.filter(r => r.status === 'success').length;
1241
+ formattedContent += ` and ${successfulRefinements} successful refined responses`;
1242
+ }
1243
+ formattedContent += '.';
1170
1244
  const consensusMessage = {
1171
1245
  role: 'assistant',
1172
- content:
1173
- `Consensus completed with ${initialPhase.successful.length} successful responses` +
1174
- (refinedPhase
1175
- ? ` and ${refinedPhase.filter((r) => r.status === 'success').length} refined responses`
1176
- : ''),
1246
+ content: formattedContent,
1177
1247
  };
1178
1248
 
1179
- const conversationState = {
1249
+ conversationState = {
1180
1250
  messages: [...messages, consensusMessage],
1181
1251
  type: 'consensus',
1182
1252
  lastUpdated: Date.now(),
@@ -1193,6 +1263,21 @@ Please provide your refined response:`;
1193
1263
  logger.error('Error saving consensus conversation', { error });
1194
1264
  }
1195
1265
 
1266
+ // Export conversation if requested
1267
+ if (shouldExport && conversationState) {
1268
+ await exportConversation(conversationState, {
1269
+ clientCwd: config.server?.client_cwd,
1270
+ continuation_id: continuationId,
1271
+ models,
1272
+ temperature,
1273
+ reasoning_effort,
1274
+ use_websearch,
1275
+ files,
1276
+ images,
1277
+ enable_cross_feedback,
1278
+ });
1279
+ }
1280
+
1196
1281
  const consensusExecutionTime = (Date.now() - consensusStartTime) / 1000;
1197
1282
 
1198
1283
  // Generate final summary from combined responses
@@ -1575,6 +1660,12 @@ consensusTool.inputSchema = {
1575
1660
  'Execute consensus in background with detailed progress tracking. When true, returns continuation_id immediately and processes request asynchronously with per-provider status updates. Default: false',
1576
1661
  default: false,
1577
1662
  },
1663
+ export: {
1664
+ type: 'boolean',
1665
+ description:
1666
+ 'Export conversation to disk. Creates folder with continuation_id name containing numbered request/response files and metadata. Default: false',
1667
+ default: false,
1668
+ },
1578
1669
  prompt: {
1579
1670
  type: 'string',
1580
1671
  description:
@@ -0,0 +1,278 @@
1
+ /**
2
+ * Conversation Exporter
3
+ *
4
+ * Exports chat conversations to disk with incremental file writes
5
+ * and atomic metadata updates. Follows functional architecture patterns.
6
+ */
7
+
8
+ import { promises as fs } from 'fs';
9
+ import path from 'path';
10
+ import { createLogger } from './logger.js';
11
+
12
+ const logger = createLogger('conversationExporter');
13
+
14
+ /**
15
+ * Write file only if it doesn't exist (write-if-missing pattern)
16
+ * @param {string} filePath - Path to file
17
+ * @param {string} content - Content to write
18
+ * @returns {Promise<boolean>} True if written, false if skipped
19
+ */
20
+ async function writeIfMissing(filePath, content) {
21
+ try {
22
+ // Check if file exists
23
+ await fs.access(filePath);
24
+ // File exists, skip writing
25
+ return false;
26
+ } catch {
27
+ // File doesn't exist, write it
28
+ try {
29
+ await fs.writeFile(filePath, content, 'utf8');
30
+ return true;
31
+ } catch (error) {
32
+ logger.warn(`Failed to write file ${filePath}`, { error });
33
+ return false;
34
+ }
35
+ }
36
+ }
37
+
38
+ /**
39
+ * Write file atomically (write to temp, then rename)
40
+ * @param {string} filePath - Target file path
41
+ * @param {string} content - Content to write
42
+ * @returns {Promise<void>}
43
+ */
44
+ async function writeAtomic(filePath, content) {
45
+ const tempPath = `${filePath}.tmp`;
46
+ try {
47
+ await fs.writeFile(tempPath, content, 'utf8');
48
+ await fs.rename(tempPath, filePath);
49
+ } catch (error) {
50
+ // Try to clean up temp file if rename failed
51
+ try {
52
+ await fs.unlink(tempPath);
53
+ } catch {
54
+ // Ignore cleanup error
55
+ }
56
+ throw error;
57
+ }
58
+ }
59
+
60
+ /**
61
+ * Extract turn pairs from conversation messages
62
+ * @param {Array} messages - Conversation messages array
63
+ * @returns {Array} Array of {user, assistant} turn pairs
64
+ */
65
+ function extractTurns(messages) {
66
+ const turns = [];
67
+ let currentTurn = null;
68
+
69
+ for (const msg of messages) {
70
+ // Skip system messages
71
+ if (msg.role === 'system') continue;
72
+
73
+ if (msg.role === 'user') {
74
+ // Start new turn
75
+ if (currentTurn && !currentTurn.assistant) {
76
+ // Previous turn incomplete, save it anyway
77
+ turns.push(currentTurn);
78
+ }
79
+ currentTurn = { user: msg, assistant: null };
80
+ } else if (msg.role === 'assistant' && currentTurn) {
81
+ // Complete current turn
82
+ currentTurn.assistant = msg;
83
+ turns.push(currentTurn);
84
+ currentTurn = null;
85
+ }
86
+ }
87
+
88
+ // Handle incomplete final turn
89
+ if (currentTurn && currentTurn.user) {
90
+ turns.push(currentTurn);
91
+ }
92
+
93
+ return turns;
94
+ }
95
+
96
+ /**
97
+ * Extract content from message (handles both string and complex content)
98
+ * @param {object} message - Message object
99
+ * @returns {string} Extracted content
100
+ */
101
+ function extractMessageContent(message) {
102
+ if (!message) return '';
103
+
104
+ const content = message.content;
105
+
106
+ // Handle simple string content
107
+ if (typeof content === 'string') {
108
+ return content;
109
+ }
110
+
111
+ // Handle complex content array (files/images + text)
112
+ if (Array.isArray(content)) {
113
+ const textParts = [];
114
+
115
+ for (const part of content) {
116
+ if (part.type === 'text' && part.text) {
117
+ textParts.push(part.text);
118
+ } else if (part.type === 'file' && part.file_content) {
119
+ // Include file reference in export
120
+ textParts.push(`[File: ${part.file_name || 'unknown'}]`);
121
+ } else if (part.type === 'image' && part.image_url) {
122
+ // Include image reference in export
123
+ const imageName = part.image_url.startsWith('data:')
124
+ ? 'embedded image'
125
+ : path.basename(part.image_url);
126
+ textParts.push(`[Image: ${imageName}]`);
127
+ }
128
+ }
129
+
130
+ return textParts.join('\n\n');
131
+ }
132
+
133
+ return '';
134
+ }
135
+
136
+ /**
137
+ * Generate metadata for the conversation
138
+ * @param {object} conversationState - Current conversation state
139
+ * @param {number} totalTurns - Total number of turns
140
+ * @param {object} params - Additional parameters from chat tool
141
+ * @returns {object} Metadata object
142
+ */
143
+ function generateMetadata(conversationState, totalTurns, params) {
144
+ const metadata = {
145
+ continuation_id: params.continuation_id,
146
+ model: params.model || 'auto',
147
+ provider: conversationState.provider,
148
+ temperature: params.temperature || 0.5,
149
+ total_turns: totalTurns,
150
+ created_at: conversationState.createdAt
151
+ ? new Date(conversationState.createdAt).toISOString()
152
+ : new Date().toISOString(),
153
+ last_updated: new Date(conversationState.lastUpdated || Date.now()).toISOString(),
154
+ };
155
+
156
+ // Add optional parameters if present
157
+ if (params.reasoning_effort) {
158
+ metadata.reasoning_effort = params.reasoning_effort;
159
+ }
160
+ if (params.verbosity) {
161
+ metadata.verbosity = params.verbosity;
162
+ }
163
+ if (params.use_websearch !== undefined) {
164
+ metadata.use_websearch = params.use_websearch;
165
+ }
166
+ if (params.files && params.files.length > 0) {
167
+ metadata.files = params.files;
168
+ }
169
+ if (params.images && params.images.length > 0) {
170
+ // Don't store base64 data, just file paths or indicators
171
+ metadata.images = params.images.map(img =>
172
+ img.startsWith('data:') ? '[base64 image]' : img
173
+ );
174
+ }
175
+ // Consensus-specific metadata
176
+ if (params.models) {
177
+ metadata.models = params.models;
178
+ }
179
+ if (params.enable_cross_feedback !== undefined) {
180
+ metadata.enable_cross_feedback = params.enable_cross_feedback;
181
+ }
182
+
183
+ return metadata;
184
+ }
185
+
186
+ /**
187
+ * Export conversation to disk
188
+ * @param {object} conversationState - Conversation state from continuation store
189
+ * @param {object} options - Export options
190
+ * @returns {Promise<void>}
191
+ */
192
+ export async function exportConversation(conversationState, options = {}) {
193
+ const {
194
+ clientCwd,
195
+ continuation_id,
196
+ model,
197
+ temperature,
198
+ reasoning_effort,
199
+ verbosity,
200
+ use_websearch,
201
+ files,
202
+ images,
203
+ models,
204
+ enable_cross_feedback,
205
+ } = options;
206
+
207
+ if (!continuation_id) {
208
+ logger.warn('Export skipped: no continuation_id provided');
209
+ return;
210
+ }
211
+
212
+ try {
213
+ // 1. Sanitize continuation ID for folder name
214
+ const safeId = path.basename(continuation_id);
215
+ const exportDir = path.resolve(clientCwd || process.cwd(), safeId);
216
+
217
+ // 2. Ensure directory exists
218
+ await fs.mkdir(exportDir, { recursive: true });
219
+ logger.debug(`Export directory created/verified: ${exportDir}`);
220
+
221
+ // 3. Extract turns from conversation
222
+ const turns = extractTurns(conversationState.messages);
223
+
224
+ if (turns.length === 0) {
225
+ logger.debug('No turns to export');
226
+ return;
227
+ }
228
+
229
+ // 4. Write request/response files (skip existing)
230
+ let filesWritten = 0;
231
+ for (const [index, turn] of turns.entries()) {
232
+ const turnNum = index + 1;
233
+
234
+ // Write request file if missing
235
+ if (turn.user) {
236
+ const requestPath = path.join(exportDir, `${turnNum}_request.txt`);
237
+ const userContent = extractMessageContent(turn.user);
238
+ const written = await writeIfMissing(requestPath, userContent);
239
+ if (written) filesWritten++;
240
+ }
241
+
242
+ // Write response file if missing
243
+ if (turn.assistant) {
244
+ const responsePath = path.join(exportDir, `${turnNum}_response.txt`);
245
+ const assistantContent = extractMessageContent(turn.assistant);
246
+ const written = await writeIfMissing(responsePath, assistantContent);
247
+ if (written) filesWritten++;
248
+ }
249
+ }
250
+
251
+ // 5. Always update metadata atomically
252
+ const metadata = generateMetadata(conversationState, turns.length, {
253
+ continuation_id,
254
+ model,
255
+ temperature,
256
+ reasoning_effort,
257
+ verbosity,
258
+ use_websearch,
259
+ files,
260
+ images,
261
+ models,
262
+ enable_cross_feedback,
263
+ });
264
+
265
+ const metadataPath = path.join(exportDir, 'metadata.json');
266
+ await writeAtomic(metadataPath, JSON.stringify(metadata, null, 2));
267
+
268
+ logger.info('Conversation exported', {
269
+ continuation_id,
270
+ exportDir,
271
+ totalTurns: turns.length,
272
+ filesWritten,
273
+ });
274
+ } catch (error) {
275
+ // Log error but don't interrupt conversation
276
+ logger.error('Export failed', { error, continuation_id });
277
+ }
278
+ }