converse-mcp-server 2.5.2 → 2.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/docs/API.md +31 -0
- package/package.json +1 -1
- package/src/providers/gemini-cli.js +15 -12
- package/src/tools/chat.js +51 -0
- package/src/tools/consensus.js +43 -2
- package/src/utils/conversationExporter.js +274 -0
package/docs/API.md
CHANGED
|
@@ -109,6 +109,11 @@ MCP_TRANSPORT=stdio npm start
|
|
|
109
109
|
"type": "boolean",
|
|
110
110
|
"default": false,
|
|
111
111
|
"description": "Execute in background mode. Returns continuation_id immediately for status monitoring. Example: true for long-running analysis"
|
|
112
|
+
},
|
|
113
|
+
"export": {
|
|
114
|
+
"type": "boolean",
|
|
115
|
+
"default": false,
|
|
116
|
+
"description": "Export conversation to disk. Creates folder with continuation_id name containing numbered request/response files and metadata. Example: true to save for documentation"
|
|
112
117
|
}
|
|
113
118
|
},
|
|
114
119
|
"required": ["prompt"]
|
|
@@ -156,6 +161,7 @@ MCP_TRANSPORT=stdio npm start
|
|
|
156
161
|
|
|
157
162
|
#### Example Usage
|
|
158
163
|
|
|
164
|
+
**Basic query:**
|
|
159
165
|
```json
|
|
160
166
|
{
|
|
161
167
|
"prompt": "Review this authentication function for security issues",
|
|
@@ -166,6 +172,26 @@ MCP_TRANSPORT=stdio npm start
|
|
|
166
172
|
}
|
|
167
173
|
```
|
|
168
174
|
|
|
175
|
+
**With conversation export:**
|
|
176
|
+
```json
|
|
177
|
+
{
|
|
178
|
+
"prompt": "Help me design a scalable architecture for our system",
|
|
179
|
+
"model": "gpt-5",
|
|
180
|
+
"export": true,
|
|
181
|
+
"continuation_id": "conv_architecture_design"
|
|
182
|
+
}
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
When export is enabled, the conversation will be saved to disk in the following structure:
|
|
186
|
+
```
|
|
187
|
+
conv_architecture_design/
|
|
188
|
+
├── 1_request.txt # First user prompt
|
|
189
|
+
├── 1_response.txt # First AI response
|
|
190
|
+
├── 2_request.txt # Second user prompt (if continuing)
|
|
191
|
+
├── 2_response.txt # Second AI response
|
|
192
|
+
└── metadata.json # Conversation metadata and settings
|
|
193
|
+
```
|
|
194
|
+
|
|
169
195
|
### Consensus Tool
|
|
170
196
|
|
|
171
197
|
**Description**: Multi-provider parallel execution with cross-model feedback for gathering perspectives from multiple AI models.
|
|
@@ -226,6 +252,11 @@ MCP_TRANSPORT=stdio npm start
|
|
|
226
252
|
"type": "boolean",
|
|
227
253
|
"default": false,
|
|
228
254
|
"description": "Execute in background mode with per-provider progress tracking. Returns continuation_id immediately for monitoring."
|
|
255
|
+
},
|
|
256
|
+
"export": {
|
|
257
|
+
"type": "boolean",
|
|
258
|
+
"default": false,
|
|
259
|
+
"description": "Export conversation to disk. Creates folder with continuation_id name containing numbered request/response files and metadata. Example: true to save consensus results"
|
|
229
260
|
}
|
|
230
261
|
},
|
|
231
262
|
"required": ["prompt", "models"]
|
package/package.json
CHANGED
|
@@ -203,19 +203,22 @@ function mapFinishReason(finishReason) {
|
|
|
203
203
|
}
|
|
204
204
|
|
|
205
205
|
/**
|
|
206
|
-
* Convert messages from Converse internal format to
|
|
206
|
+
* Convert messages from Converse internal format to AI SDK v5 ModelMessage format
|
|
207
207
|
*
|
|
208
208
|
* Converse format (used by other providers like Anthropic):
|
|
209
209
|
* - Images: { type: 'image', source: { type: 'base64', media_type: '...', data: '...' } }
|
|
210
210
|
*
|
|
211
|
-
*
|
|
212
|
-
* - Images: { type: 'image',
|
|
211
|
+
* AI SDK v5 ModelMessage format (required by generateText/streamText):
|
|
212
|
+
* - Images: { type: 'image', image: '...' } (base64 string, Buffer, or URL)
|
|
213
213
|
* - Text: { type: 'text', text: '...' }
|
|
214
214
|
*
|
|
215
|
+
* Note: The AI SDK validates ModelMessage format before passing to providers.
|
|
216
|
+
* We must use 'image' property (not 'data') for the AI SDK to accept the message.
|
|
217
|
+
*
|
|
215
218
|
* @param {Array} messages - Messages in Converse internal format
|
|
216
|
-
* @returns {Array} Messages in
|
|
219
|
+
* @returns {Array} Messages in AI SDK v5 ModelMessage format
|
|
217
220
|
*/
|
|
218
|
-
function
|
|
221
|
+
function convertToModelMessages(messages) {
|
|
219
222
|
return messages.map((message) => {
|
|
220
223
|
// If content is a string, no conversion needed
|
|
221
224
|
if (typeof message.content === 'string') {
|
|
@@ -230,20 +233,20 @@ function convertToGeminiCliMessages(messages) {
|
|
|
230
233
|
return part;
|
|
231
234
|
}
|
|
232
235
|
|
|
233
|
-
// Convert image from Converse format to
|
|
236
|
+
// Convert image from Converse format to AI SDK v5 ModelMessage format
|
|
234
237
|
if (part.type === 'image' && part.source) {
|
|
235
238
|
return {
|
|
236
239
|
type: 'image',
|
|
237
|
-
|
|
240
|
+
image: part.source.data, // AI SDK v5 expects 'image' property (not 'data')
|
|
238
241
|
};
|
|
239
242
|
}
|
|
240
243
|
|
|
241
|
-
// If already in
|
|
242
|
-
if (part.type === 'image' && part.
|
|
244
|
+
// If already in AI SDK v5 format, return as-is
|
|
245
|
+
if (part.type === 'image' && part.image) {
|
|
243
246
|
return part;
|
|
244
247
|
}
|
|
245
248
|
|
|
246
|
-
// Handle file parts (
|
|
249
|
+
// Handle file parts (already in correct format)
|
|
247
250
|
if (part.type === 'file' && part.data) {
|
|
248
251
|
return part;
|
|
249
252
|
}
|
|
@@ -326,8 +329,8 @@ export const geminiCliProvider = {
|
|
|
326
329
|
// Create model instance with SDK model name
|
|
327
330
|
const modelInstance = gemini(sdkModelName);
|
|
328
331
|
|
|
329
|
-
// Convert messages from Converse format to
|
|
330
|
-
const convertedMessages =
|
|
332
|
+
// Convert messages from Converse format to AI SDK v5 ModelMessage format
|
|
333
|
+
const convertedMessages = convertToModelMessages(messages);
|
|
331
334
|
|
|
332
335
|
// Build AI SDK options
|
|
333
336
|
const aiOptions = {
|
package/src/tools/chat.js
CHANGED
|
@@ -20,6 +20,7 @@ import { CHAT_PROMPT } from '../systemPrompts.js';
|
|
|
20
20
|
import { applyTokenLimit, getTokenLimit } from '../utils/tokenLimiter.js';
|
|
21
21
|
import { validateAllPaths } from '../utils/fileValidator.js';
|
|
22
22
|
import { SummarizationService } from '../services/summarizationService.js';
|
|
23
|
+
import { exportConversation } from '../utils/conversationExporter.js';
|
|
23
24
|
|
|
24
25
|
const logger = createLogger('chat');
|
|
25
26
|
|
|
@@ -57,6 +58,7 @@ export async function chatTool(args, dependencies) {
|
|
|
57
58
|
reasoning_effort = 'medium',
|
|
58
59
|
verbosity = 'medium',
|
|
59
60
|
async = false,
|
|
61
|
+
export: shouldExport = false,
|
|
60
62
|
} = args;
|
|
61
63
|
|
|
62
64
|
// Handle async execution mode
|
|
@@ -356,6 +358,27 @@ export async function chatTool(args, dependencies) {
|
|
|
356
358
|
// Continue even if save fails
|
|
357
359
|
}
|
|
358
360
|
|
|
361
|
+
// Export conversation if requested
|
|
362
|
+
if (shouldExport) {
|
|
363
|
+
await exportConversation({
|
|
364
|
+
messages: updatedMessages,
|
|
365
|
+
provider: providerName,
|
|
366
|
+
model,
|
|
367
|
+
lastUpdated: Date.now(),
|
|
368
|
+
codexThreadId: response.metadata?.threadId,
|
|
369
|
+
}, {
|
|
370
|
+
clientCwd: config.server?.client_cwd,
|
|
371
|
+
continuation_id: continuationId,
|
|
372
|
+
model,
|
|
373
|
+
temperature,
|
|
374
|
+
reasoning_effort,
|
|
375
|
+
verbosity,
|
|
376
|
+
use_websearch,
|
|
377
|
+
files,
|
|
378
|
+
images,
|
|
379
|
+
});
|
|
380
|
+
}
|
|
381
|
+
|
|
359
382
|
// Create unified status line (similar to async status display)
|
|
360
383
|
const statusLine =
|
|
361
384
|
config.environment?.nodeEnv !== 'test'
|
|
@@ -577,6 +600,7 @@ async function executeChatWithStreaming(args, dependencies, context) {
|
|
|
577
600
|
images = [],
|
|
578
601
|
reasoning_effort = 'medium',
|
|
579
602
|
verbosity = 'medium',
|
|
603
|
+
export: shouldExport = false,
|
|
580
604
|
} = args;
|
|
581
605
|
|
|
582
606
|
// Initialize SummarizationService
|
|
@@ -916,6 +940,27 @@ async function executeChatWithStreaming(args, dependencies, context) {
|
|
|
916
940
|
// Continue even if save fails
|
|
917
941
|
}
|
|
918
942
|
|
|
943
|
+
// Export conversation if requested
|
|
944
|
+
if (shouldExport) {
|
|
945
|
+
await exportConversation({
|
|
946
|
+
messages: updatedMessages,
|
|
947
|
+
provider: providerName,
|
|
948
|
+
model,
|
|
949
|
+
lastUpdated: Date.now(),
|
|
950
|
+
codexThreadId: response.metadata?.threadId,
|
|
951
|
+
}, {
|
|
952
|
+
clientCwd: config.server?.client_cwd,
|
|
953
|
+
continuation_id: continuationId,
|
|
954
|
+
model,
|
|
955
|
+
temperature,
|
|
956
|
+
reasoning_effort,
|
|
957
|
+
verbosity,
|
|
958
|
+
use_websearch,
|
|
959
|
+
files,
|
|
960
|
+
images,
|
|
961
|
+
});
|
|
962
|
+
}
|
|
963
|
+
|
|
919
964
|
// Return complete result for job completion
|
|
920
965
|
return {
|
|
921
966
|
content: response.content,
|
|
@@ -1000,6 +1045,12 @@ chatTool.inputSchema = {
|
|
|
1000
1045
|
'Execute chat in background. When true, returns continuation_id immediately and processes request asynchronously. Default: false',
|
|
1001
1046
|
default: false,
|
|
1002
1047
|
},
|
|
1048
|
+
export: {
|
|
1049
|
+
type: 'boolean',
|
|
1050
|
+
description:
|
|
1051
|
+
'Export conversation to disk. Creates folder with continuation_id name containing numbered request/response files and metadata. Default: false',
|
|
1052
|
+
default: false,
|
|
1053
|
+
},
|
|
1003
1054
|
prompt: {
|
|
1004
1055
|
type: 'string',
|
|
1005
1056
|
description:
|
package/src/tools/consensus.js
CHANGED
|
@@ -24,6 +24,7 @@ import { CONSENSUS_PROMPT } from '../systemPrompts.js';
|
|
|
24
24
|
import { applyTokenLimit, getTokenLimit } from '../utils/tokenLimiter.js';
|
|
25
25
|
import { validateAllPaths } from '../utils/fileValidator.js';
|
|
26
26
|
import { SummarizationService } from '../services/summarizationService.js';
|
|
27
|
+
import { exportConversation } from '../utils/conversationExporter.js';
|
|
27
28
|
|
|
28
29
|
const logger = createLogger('consensus');
|
|
29
30
|
|
|
@@ -72,6 +73,7 @@ export async function consensusTool(args, dependencies) {
|
|
|
72
73
|
reasoning_effort = 'medium',
|
|
73
74
|
use_websearch = false,
|
|
74
75
|
async = false,
|
|
76
|
+
export: shouldExport = false,
|
|
75
77
|
} = args;
|
|
76
78
|
|
|
77
79
|
// Handle async execution mode
|
|
@@ -509,6 +511,7 @@ Please provide your refined response:`;
|
|
|
509
511
|
}
|
|
510
512
|
|
|
511
513
|
// Save conversation state
|
|
514
|
+
let conversationState;
|
|
512
515
|
try {
|
|
513
516
|
const consensusMessage = {
|
|
514
517
|
role: 'assistant',
|
|
@@ -519,7 +522,7 @@ Please provide your refined response:`;
|
|
|
519
522
|
: ''),
|
|
520
523
|
};
|
|
521
524
|
|
|
522
|
-
|
|
525
|
+
conversationState = {
|
|
523
526
|
messages: [...messages, consensusMessage],
|
|
524
527
|
type: 'consensus',
|
|
525
528
|
lastUpdated: Date.now(),
|
|
@@ -540,6 +543,21 @@ Please provide your refined response:`;
|
|
|
540
543
|
// Continue even if save fails
|
|
541
544
|
}
|
|
542
545
|
|
|
546
|
+
// Export conversation if requested
|
|
547
|
+
if (shouldExport && conversationState) {
|
|
548
|
+
await exportConversation(conversationState, {
|
|
549
|
+
clientCwd: dependencies.config.server?.client_cwd,
|
|
550
|
+
continuation_id: continuationId,
|
|
551
|
+
models: models.join(','),
|
|
552
|
+
temperature,
|
|
553
|
+
reasoning_effort,
|
|
554
|
+
use_websearch,
|
|
555
|
+
files,
|
|
556
|
+
images,
|
|
557
|
+
enable_cross_feedback,
|
|
558
|
+
});
|
|
559
|
+
}
|
|
560
|
+
|
|
543
561
|
const consensusExecutionTime = (Date.now() - consensusStartTime) / 1000; // Convert to seconds
|
|
544
562
|
|
|
545
563
|
// Calculate final success count and collect failure details
|
|
@@ -826,6 +844,7 @@ async function executeConsensusWithStreaming(args, dependencies, context) {
|
|
|
826
844
|
temperature = 0.2,
|
|
827
845
|
reasoning_effort = 'medium',
|
|
828
846
|
use_websearch = false,
|
|
847
|
+
export: shouldExport = false,
|
|
829
848
|
} = args;
|
|
830
849
|
|
|
831
850
|
let conversationHistory = [];
|
|
@@ -1166,6 +1185,7 @@ Please provide your refined response:`;
|
|
|
1166
1185
|
}
|
|
1167
1186
|
|
|
1168
1187
|
// Save conversation state
|
|
1188
|
+
let conversationState;
|
|
1169
1189
|
try {
|
|
1170
1190
|
const consensusMessage = {
|
|
1171
1191
|
role: 'assistant',
|
|
@@ -1176,7 +1196,7 @@ Please provide your refined response:`;
|
|
|
1176
1196
|
: ''),
|
|
1177
1197
|
};
|
|
1178
1198
|
|
|
1179
|
-
|
|
1199
|
+
conversationState = {
|
|
1180
1200
|
messages: [...messages, consensusMessage],
|
|
1181
1201
|
type: 'consensus',
|
|
1182
1202
|
lastUpdated: Date.now(),
|
|
@@ -1193,6 +1213,21 @@ Please provide your refined response:`;
|
|
|
1193
1213
|
logger.error('Error saving consensus conversation', { error });
|
|
1194
1214
|
}
|
|
1195
1215
|
|
|
1216
|
+
// Export conversation if requested
|
|
1217
|
+
if (shouldExport && conversationState) {
|
|
1218
|
+
await exportConversation(conversationState, {
|
|
1219
|
+
clientCwd: config.server?.client_cwd,
|
|
1220
|
+
continuation_id: continuationId,
|
|
1221
|
+
models: models.join(','),
|
|
1222
|
+
temperature,
|
|
1223
|
+
reasoning_effort,
|
|
1224
|
+
use_websearch,
|
|
1225
|
+
files,
|
|
1226
|
+
images,
|
|
1227
|
+
enable_cross_feedback,
|
|
1228
|
+
});
|
|
1229
|
+
}
|
|
1230
|
+
|
|
1196
1231
|
const consensusExecutionTime = (Date.now() - consensusStartTime) / 1000;
|
|
1197
1232
|
|
|
1198
1233
|
// Generate final summary from combined responses
|
|
@@ -1575,6 +1610,12 @@ consensusTool.inputSchema = {
|
|
|
1575
1610
|
'Execute consensus in background with detailed progress tracking. When true, returns continuation_id immediately and processes request asynchronously with per-provider status updates. Default: false',
|
|
1576
1611
|
default: false,
|
|
1577
1612
|
},
|
|
1613
|
+
export: {
|
|
1614
|
+
type: 'boolean',
|
|
1615
|
+
description:
|
|
1616
|
+
'Export conversation to disk. Creates folder with continuation_id name containing numbered request/response files and metadata. Default: false',
|
|
1617
|
+
default: false,
|
|
1618
|
+
},
|
|
1578
1619
|
prompt: {
|
|
1579
1620
|
type: 'string',
|
|
1580
1621
|
description:
|
|
@@ -0,0 +1,274 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Conversation Exporter
|
|
3
|
+
*
|
|
4
|
+
* Exports chat conversations to disk with incremental file writes
|
|
5
|
+
* and atomic metadata updates. Follows functional architecture patterns.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { promises as fs } from 'fs';
|
|
9
|
+
import path from 'path';
|
|
10
|
+
import { createLogger } from './logger.js';
|
|
11
|
+
|
|
12
|
+
const logger = createLogger('conversationExporter');
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Write file only if it doesn't exist (write-if-missing pattern)
|
|
16
|
+
* @param {string} filePath - Path to file
|
|
17
|
+
* @param {string} content - Content to write
|
|
18
|
+
* @returns {Promise<boolean>} True if written, false if skipped
|
|
19
|
+
*/
|
|
20
|
+
async function writeIfMissing(filePath, content) {
|
|
21
|
+
try {
|
|
22
|
+
// Check if file exists
|
|
23
|
+
await fs.access(filePath);
|
|
24
|
+
// File exists, skip writing
|
|
25
|
+
return false;
|
|
26
|
+
} catch {
|
|
27
|
+
// File doesn't exist, write it
|
|
28
|
+
try {
|
|
29
|
+
await fs.writeFile(filePath, content, 'utf8');
|
|
30
|
+
return true;
|
|
31
|
+
} catch (error) {
|
|
32
|
+
logger.warn(`Failed to write file ${filePath}`, { error });
|
|
33
|
+
return false;
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* Write file atomically (write to temp, then rename)
|
|
40
|
+
* @param {string} filePath - Target file path
|
|
41
|
+
* @param {string} content - Content to write
|
|
42
|
+
* @returns {Promise<void>}
|
|
43
|
+
*/
|
|
44
|
+
async function writeAtomic(filePath, content) {
|
|
45
|
+
const tempPath = `${filePath}.tmp`;
|
|
46
|
+
try {
|
|
47
|
+
await fs.writeFile(tempPath, content, 'utf8');
|
|
48
|
+
await fs.rename(tempPath, filePath);
|
|
49
|
+
} catch (error) {
|
|
50
|
+
// Try to clean up temp file if rename failed
|
|
51
|
+
try {
|
|
52
|
+
await fs.unlink(tempPath);
|
|
53
|
+
} catch {
|
|
54
|
+
// Ignore cleanup error
|
|
55
|
+
}
|
|
56
|
+
throw error;
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* Extract turn pairs from conversation messages
|
|
62
|
+
* @param {Array} messages - Conversation messages array
|
|
63
|
+
* @returns {Array} Array of {user, assistant} turn pairs
|
|
64
|
+
*/
|
|
65
|
+
function extractTurns(messages) {
|
|
66
|
+
const turns = [];
|
|
67
|
+
let currentTurn = null;
|
|
68
|
+
|
|
69
|
+
for (const msg of messages) {
|
|
70
|
+
// Skip system messages
|
|
71
|
+
if (msg.role === 'system') continue;
|
|
72
|
+
|
|
73
|
+
if (msg.role === 'user') {
|
|
74
|
+
// Start new turn
|
|
75
|
+
if (currentTurn && !currentTurn.assistant) {
|
|
76
|
+
// Previous turn incomplete, save it anyway
|
|
77
|
+
turns.push(currentTurn);
|
|
78
|
+
}
|
|
79
|
+
currentTurn = { user: msg, assistant: null };
|
|
80
|
+
} else if (msg.role === 'assistant' && currentTurn) {
|
|
81
|
+
// Complete current turn
|
|
82
|
+
currentTurn.assistant = msg;
|
|
83
|
+
turns.push(currentTurn);
|
|
84
|
+
currentTurn = null;
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
// Handle incomplete final turn
|
|
89
|
+
if (currentTurn && currentTurn.user) {
|
|
90
|
+
turns.push(currentTurn);
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
return turns;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
/**
|
|
97
|
+
* Extract content from message (handles both string and complex content)
|
|
98
|
+
* @param {object} message - Message object
|
|
99
|
+
* @returns {string} Extracted content
|
|
100
|
+
*/
|
|
101
|
+
function extractMessageContent(message) {
|
|
102
|
+
if (!message) return '';
|
|
103
|
+
|
|
104
|
+
const content = message.content;
|
|
105
|
+
|
|
106
|
+
// Handle simple string content
|
|
107
|
+
if (typeof content === 'string') {
|
|
108
|
+
return content;
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
// Handle complex content array (files/images + text)
|
|
112
|
+
if (Array.isArray(content)) {
|
|
113
|
+
const textParts = [];
|
|
114
|
+
|
|
115
|
+
for (const part of content) {
|
|
116
|
+
if (part.type === 'text' && part.text) {
|
|
117
|
+
textParts.push(part.text);
|
|
118
|
+
} else if (part.type === 'file' && part.file_content) {
|
|
119
|
+
// Include file reference in export
|
|
120
|
+
textParts.push(`[File: ${part.file_name || 'unknown'}]`);
|
|
121
|
+
} else if (part.type === 'image' && part.image_url) {
|
|
122
|
+
// Include image reference in export
|
|
123
|
+
const imageName = part.image_url.startsWith('data:')
|
|
124
|
+
? 'embedded image'
|
|
125
|
+
: path.basename(part.image_url);
|
|
126
|
+
textParts.push(`[Image: ${imageName}]`);
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
return textParts.join('\n\n');
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
return '';
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
/**
|
|
137
|
+
* Generate metadata for the conversation
|
|
138
|
+
* @param {object} conversationState - Current conversation state
|
|
139
|
+
* @param {number} totalTurns - Total number of turns
|
|
140
|
+
* @param {object} params - Additional parameters from chat tool
|
|
141
|
+
* @returns {object} Metadata object
|
|
142
|
+
*/
|
|
143
|
+
function generateMetadata(conversationState, totalTurns, params) {
|
|
144
|
+
const metadata = {
|
|
145
|
+
continuation_id: params.continuation_id,
|
|
146
|
+
model: params.model || 'auto',
|
|
147
|
+
provider: conversationState.provider,
|
|
148
|
+
temperature: params.temperature || 0.5,
|
|
149
|
+
total_turns: totalTurns,
|
|
150
|
+
created_at: conversationState.createdAt
|
|
151
|
+
? new Date(conversationState.createdAt).toISOString()
|
|
152
|
+
: new Date().toISOString(),
|
|
153
|
+
last_updated: new Date(conversationState.lastUpdated || Date.now()).toISOString(),
|
|
154
|
+
};
|
|
155
|
+
|
|
156
|
+
// Add optional parameters if present
|
|
157
|
+
if (params.reasoning_effort) {
|
|
158
|
+
metadata.reasoning_effort = params.reasoning_effort;
|
|
159
|
+
}
|
|
160
|
+
if (params.verbosity) {
|
|
161
|
+
metadata.verbosity = params.verbosity;
|
|
162
|
+
}
|
|
163
|
+
if (params.use_websearch !== undefined) {
|
|
164
|
+
metadata.use_websearch = params.use_websearch;
|
|
165
|
+
}
|
|
166
|
+
if (params.files && params.files.length > 0) {
|
|
167
|
+
metadata.files = params.files;
|
|
168
|
+
}
|
|
169
|
+
if (params.images && params.images.length > 0) {
|
|
170
|
+
// Don't store base64 data, just file paths or indicators
|
|
171
|
+
metadata.images = params.images.map(img =>
|
|
172
|
+
img.startsWith('data:') ? '[base64 image]' : img
|
|
173
|
+
);
|
|
174
|
+
}
|
|
175
|
+
// Consensus-specific metadata
|
|
176
|
+
if (params.models) {
|
|
177
|
+
metadata.models = params.models;
|
|
178
|
+
}
|
|
179
|
+
if (params.enable_cross_feedback !== undefined) {
|
|
180
|
+
metadata.enable_cross_feedback = params.enable_cross_feedback;
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
return metadata;
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
/**
|
|
187
|
+
* Export conversation to disk
|
|
188
|
+
* @param {object} conversationState - Conversation state from continuation store
|
|
189
|
+
* @param {object} options - Export options
|
|
190
|
+
* @returns {Promise<void>}
|
|
191
|
+
*/
|
|
192
|
+
export async function exportConversation(conversationState, options = {}) {
|
|
193
|
+
const {
|
|
194
|
+
clientCwd,
|
|
195
|
+
continuation_id,
|
|
196
|
+
model,
|
|
197
|
+
temperature,
|
|
198
|
+
reasoning_effort,
|
|
199
|
+
verbosity,
|
|
200
|
+
use_websearch,
|
|
201
|
+
files,
|
|
202
|
+
images,
|
|
203
|
+
} = options;
|
|
204
|
+
|
|
205
|
+
if (!continuation_id) {
|
|
206
|
+
logger.warn('Export skipped: no continuation_id provided');
|
|
207
|
+
return;
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
try {
|
|
211
|
+
// 1. Sanitize continuation ID for folder name
|
|
212
|
+
const safeId = path.basename(continuation_id);
|
|
213
|
+
const exportDir = path.resolve(clientCwd || process.cwd(), safeId);
|
|
214
|
+
|
|
215
|
+
// 2. Ensure directory exists
|
|
216
|
+
await fs.mkdir(exportDir, { recursive: true });
|
|
217
|
+
logger.debug(`Export directory created/verified: ${exportDir}`);
|
|
218
|
+
|
|
219
|
+
// 3. Extract turns from conversation
|
|
220
|
+
const turns = extractTurns(conversationState.messages);
|
|
221
|
+
|
|
222
|
+
if (turns.length === 0) {
|
|
223
|
+
logger.debug('No turns to export');
|
|
224
|
+
return;
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
// 4. Write request/response files (skip existing)
|
|
228
|
+
let filesWritten = 0;
|
|
229
|
+
for (const [index, turn] of turns.entries()) {
|
|
230
|
+
const turnNum = index + 1;
|
|
231
|
+
|
|
232
|
+
// Write request file if missing
|
|
233
|
+
if (turn.user) {
|
|
234
|
+
const requestPath = path.join(exportDir, `${turnNum}_request.txt`);
|
|
235
|
+
const userContent = extractMessageContent(turn.user);
|
|
236
|
+
const written = await writeIfMissing(requestPath, userContent);
|
|
237
|
+
if (written) filesWritten++;
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
// Write response file if missing
|
|
241
|
+
if (turn.assistant) {
|
|
242
|
+
const responsePath = path.join(exportDir, `${turnNum}_response.txt`);
|
|
243
|
+
const assistantContent = extractMessageContent(turn.assistant);
|
|
244
|
+
const written = await writeIfMissing(responsePath, assistantContent);
|
|
245
|
+
if (written) filesWritten++;
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
// 5. Always update metadata atomically
|
|
250
|
+
const metadata = generateMetadata(conversationState, turns.length, {
|
|
251
|
+
continuation_id,
|
|
252
|
+
model,
|
|
253
|
+
temperature,
|
|
254
|
+
reasoning_effort,
|
|
255
|
+
verbosity,
|
|
256
|
+
use_websearch,
|
|
257
|
+
files,
|
|
258
|
+
images,
|
|
259
|
+
});
|
|
260
|
+
|
|
261
|
+
const metadataPath = path.join(exportDir, 'metadata.json');
|
|
262
|
+
await writeAtomic(metadataPath, JSON.stringify(metadata, null, 2));
|
|
263
|
+
|
|
264
|
+
logger.info('Conversation exported', {
|
|
265
|
+
continuation_id,
|
|
266
|
+
exportDir,
|
|
267
|
+
totalTurns: turns.length,
|
|
268
|
+
filesWritten,
|
|
269
|
+
});
|
|
270
|
+
} catch (error) {
|
|
271
|
+
// Log error but don't interrupt conversation
|
|
272
|
+
logger.error('Export failed', { error, continuation_id });
|
|
273
|
+
}
|
|
274
|
+
}
|