ydc-mcp-server 1.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +140 -0
- package/README_JA.md +138 -0
- package/README_ZH_CN.md +138 -0
- package/README_ZH_TW.md +138 -0
- package/index.js +604 -0
- package/lib/advanced-versions.js +113 -0
- package/lib/api-client.js +134 -0
- package/lib/auth-middleware.js +44 -0
- package/lib/conversation-store.js +271 -0
- package/lib/openai-mapper.js +215 -0
- package/lib/routes/chat.js +199 -0
- package/lib/routes/conversations.js +94 -0
- package/lib/routes/health.js +31 -0
- package/lib/routes/models.js +111 -0
- package/openai-server.js +93 -0
- package/package.json +62 -0
package/index.js
ADDED
|
@@ -0,0 +1,604 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
|
|
4
|
+
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
|
|
5
|
+
import {
|
|
6
|
+
CallToolRequestSchema,
|
|
7
|
+
ListToolsRequestSchema,
|
|
8
|
+
} from '@modelcontextprotocol/sdk/types.js';
|
|
9
|
+
import { randomUUID } from 'crypto';
|
|
10
|
+
import { spawn } from 'child_process';
|
|
11
|
+
import { fileURLToPath } from 'url';
|
|
12
|
+
import { dirname, join } from 'path';
|
|
13
|
+
|
|
14
|
+
// Import shared modules
|
|
15
|
+
import { AGENT_TYPES, callYouApi, extractText, buildConversationInput, buildAgentRequest } from './lib/api-client.js';
|
|
16
|
+
|
|
17
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
18
|
+
const __dirname = dirname(__filename);
|
|
19
|
+
|
|
20
|
+
// ============ CLI MODE CHECK ============
|
|
21
|
+
const args = process.argv.slice(2);
|
|
22
|
+
|
|
23
|
+
// Parse CLI arguments
|
|
24
|
+
for (let i = 0; i < args.length; i++) {
|
|
25
|
+
if (args[i] === '--api-key' && args[i + 1]) {
|
|
26
|
+
process.env.YDC_API_KEY = args[i + 1];
|
|
27
|
+
}
|
|
28
|
+
if (args[i] === '--api-keys' && args[i + 1]) {
|
|
29
|
+
process.env.YDC_API_KEYS = args[i + 1];
|
|
30
|
+
}
|
|
31
|
+
if ((args[i] === '--port' || args[i] === '-p') && args[i + 1]) {
|
|
32
|
+
process.env.YDC_OPENAI_PORT = args[i + 1];
|
|
33
|
+
}
|
|
34
|
+
if (args[i] === '--access-token' && args[i + 1]) {
|
|
35
|
+
process.env.YDC_OPENAI_ACCESS_TOKENS = args[i + 1];
|
|
36
|
+
}
|
|
37
|
+
if (args[i] === '--key-mode' && args[i + 1]) {
|
|
38
|
+
process.env.YDC_KEY_MODE = args[i + 1];
|
|
39
|
+
}
|
|
40
|
+
if (args[i] === '--agent' && args[i + 1]) {
|
|
41
|
+
// Format: name:id or just id (name defaults to id)
|
|
42
|
+
const existing = process.env.YDC_CUSTOM_AGENTS || '';
|
|
43
|
+
process.env.YDC_CUSTOM_AGENTS = existing ? `${existing},${args[i + 1]}` : args[i + 1];
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
const isOpenAIMode = args.includes('--openai') || args.includes('openai');
|
|
48
|
+
|
|
49
|
+
if (isOpenAIMode) {
|
|
50
|
+
// Start OpenAI-compatible HTTP server using spawn
|
|
51
|
+
const openaiServerPath = join(__dirname, 'openai-server.js');
|
|
52
|
+
const child = spawn('node', [openaiServerPath], {
|
|
53
|
+
stdio: 'inherit',
|
|
54
|
+
env: process.env
|
|
55
|
+
});
|
|
56
|
+
child.on('exit', (code) => process.exit(code));
|
|
57
|
+
} else if (args.includes('--help') || args.includes('-h')) {
|
|
58
|
+
console.log(`
|
|
59
|
+
ydc-mcp-server - MCP server for You.com AI agents
|
|
60
|
+
|
|
61
|
+
Usage:
|
|
62
|
+
npx ydc-mcp-server Start MCP server (stdio)
|
|
63
|
+
npx ydc-mcp-server --openai Start OpenAI-compatible HTTP server
|
|
64
|
+
npx ydc-mcp-server --openai --api-key KEY Start with single API key
|
|
65
|
+
npx ydc-mcp-server --openai --api-keys K1,K2 Start with multiple API keys
|
|
66
|
+
npx ydc-mcp-server --openai --port 3003 Start on custom port
|
|
67
|
+
npx ydc-mcp-server --openai --access-token TOK Require access token for HTTP server
|
|
68
|
+
|
|
69
|
+
Options:
|
|
70
|
+
--openai Start OpenAI-compatible HTTP server
|
|
71
|
+
--api-key KEY Set single You.com API key
|
|
72
|
+
--api-keys K1,K2,K3 Set multiple API keys (comma-separated)
|
|
73
|
+
--key-mode MODE Key rotation: round-robin (default) / sequential / random
|
|
74
|
+
--port, -p PORT Set HTTP server port (default: 3002)
|
|
75
|
+
--access-token TOKEN Set access token for HTTP server authentication
|
|
76
|
+
--agent NAME:ID Add custom agent to models list (can use multiple times)
|
|
77
|
+
--help, -h Show this help
|
|
78
|
+
|
|
79
|
+
Environment Variables:
|
|
80
|
+
YDC_API_KEY You.com API key (required)
|
|
81
|
+
YDC_API_KEYS Multiple keys (comma-separated)
|
|
82
|
+
YDC_KEY_MODE round-robin / sequential / random
|
|
83
|
+
YDC_OPENAI_PORT HTTP server port (default: 3002)
|
|
84
|
+
YDC_CONVERSATION_STORE sqlite / memory (default: sqlite)
|
|
85
|
+
YDC_OPENAI_ACCESS_TOKENS Allowed tokens (comma-separated)
|
|
86
|
+
YDC_CUSTOM_AGENTS Custom agents (name:id,name2:id2)
|
|
87
|
+
`);
|
|
88
|
+
process.exit(0);
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
// Skip MCP server if in OpenAI mode
|
|
92
|
+
if (isOpenAIMode) {
|
|
93
|
+
// Wait forever, child process handles everything
|
|
94
|
+
setInterval(() => {}, 1000000);
|
|
95
|
+
} else {
|
|
96
|
+
|
|
97
|
+
// ============ OPENAI SERVER CONTROL ============
|
|
98
|
+
let openaiServerProcess = null;
|
|
99
|
+
let openaiServerPort = null;
|
|
100
|
+
let openaiServerStatus = 'stopped';
|
|
101
|
+
|
|
102
|
+
// ============ MULTI-KEY CONFIGURATION ============
|
|
103
|
+
const API_KEYS_RAW = process.env.YDC_API_KEYS || process.env.YDC_API_KEY || '';
|
|
104
|
+
const API_KEYS = API_KEYS_RAW.split(',').map(k => k.trim()).filter(k => k);
|
|
105
|
+
const KEY_MODE = process.env.YDC_KEY_MODE || 'round-robin';
|
|
106
|
+
let currentKeyIndex = 0;
|
|
107
|
+
const keyUsageCount = new Map();
|
|
108
|
+
const keyErrorCount = new Map();
|
|
109
|
+
|
|
110
|
+
// ============ OPENAI SERVER ENV PASSTHROUGH ============
|
|
111
|
+
const OPENAI_SERVER_STORE_TYPE = process.env.YDC_CONVERSATION_STORE || 'sqlite';
|
|
112
|
+
const OPENAI_SERVER_DB_PATH = process.env.YDC_CONVERSATION_DB_PATH || '';
|
|
113
|
+
const OPENAI_SERVER_ACCESS_TOKENS = process.env.YDC_OPENAI_ACCESS_TOKENS || '';
|
|
114
|
+
|
|
115
|
+
// ============ CONVERSATION STORE (Memory for MCP) ============
|
|
116
|
+
const conversationStore = new Map();
|
|
117
|
+
const CONVERSATION_TTL = 24 * 60 * 60 * 1000;
|
|
118
|
+
const MAX_MESSAGES_PER_CONVERSATION = 100;
|
|
119
|
+
|
|
120
|
+
function generateConversationId() {
|
|
121
|
+
return randomUUID();
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
function getConversation(conversationId) {
|
|
125
|
+
if (!conversationId || !conversationStore.has(conversationId)) return null;
|
|
126
|
+
const conv = conversationStore.get(conversationId);
|
|
127
|
+
conv.updatedAt = Date.now();
|
|
128
|
+
return conv;
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
function createConversation(conversationId = null) {
|
|
132
|
+
const id = conversationId || generateConversationId();
|
|
133
|
+
const conv = { id, messages: [], createdAt: Date.now(), updatedAt: Date.now() };
|
|
134
|
+
conversationStore.set(id, conv);
|
|
135
|
+
return conv;
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
function addMessageToConversation(conversationId, role, content) {
|
|
139
|
+
let conv = getConversation(conversationId);
|
|
140
|
+
if (!conv) conv = createConversation(conversationId);
|
|
141
|
+
|
|
142
|
+
if (conv.messages.length >= MAX_MESSAGES_PER_CONVERSATION) {
|
|
143
|
+
const systemMsg = conv.messages.find(m => m.role === 'system');
|
|
144
|
+
conv.messages = systemMsg
|
|
145
|
+
? [systemMsg, ...conv.messages.slice(-MAX_MESSAGES_PER_CONVERSATION + 2)]
|
|
146
|
+
: conv.messages.slice(-MAX_MESSAGES_PER_CONVERSATION + 1);
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
conv.messages.push({ role, content, timestamp: Date.now() });
|
|
150
|
+
conv.updatedAt = Date.now();
|
|
151
|
+
return conv;
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
// Cleanup expired conversations
|
|
155
|
+
setInterval(() => {
|
|
156
|
+
const now = Date.now();
|
|
157
|
+
for (const [id, conv] of conversationStore.entries()) {
|
|
158
|
+
if (now - conv.updatedAt > CONVERSATION_TTL) conversationStore.delete(id);
|
|
159
|
+
}
|
|
160
|
+
}, 60 * 60 * 1000);
|
|
161
|
+
|
|
162
|
+
// ============ API KEY MANAGEMENT ============
|
|
163
|
+
function isRunningAsNpx() {
|
|
164
|
+
const execPath = process.argv[1] || '';
|
|
165
|
+
return execPath.includes('node_modules') || execPath.includes('.npm/_npx') ||
|
|
166
|
+
execPath.includes('npx') || execPath.includes('pnpm/global');
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
function validateApiKeys() {
|
|
170
|
+
if (API_KEYS.length === 0) {
|
|
171
|
+
console.error('ERROR: No API keys configured!');
|
|
172
|
+
console.error('Please set YDC_API_KEY or YDC_API_KEYS environment variable in mcp.json');
|
|
173
|
+
|
|
174
|
+
const config = isRunningAsNpx()
|
|
175
|
+
? { command: "npx", args: ["-y", "ydc-mcp-server"] }
|
|
176
|
+
: { command: "node", args: [process.argv[1] || "path/to/index.js"] };
|
|
177
|
+
|
|
178
|
+
console.error('\nExample mcp.json config:');
|
|
179
|
+
console.error(JSON.stringify({
|
|
180
|
+
mcpServers: {
|
|
181
|
+
"ydc-mcp-server": {
|
|
182
|
+
...config,
|
|
183
|
+
env: { YDC_API_KEY: "your-api-key-here", YDC_KEY_MODE: "round-robin" }
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
}, null, 2));
|
|
187
|
+
return false;
|
|
188
|
+
}
|
|
189
|
+
return true;
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
function getNextApiKey() {
|
|
193
|
+
if (API_KEYS.length === 0) throw new Error('No API keys configured');
|
|
194
|
+
|
|
195
|
+
let key;
|
|
196
|
+
if (API_KEYS.length === 1) {
|
|
197
|
+
key = API_KEYS[0];
|
|
198
|
+
} else {
|
|
199
|
+
switch (KEY_MODE) {
|
|
200
|
+
case 'sequential': key = API_KEYS[currentKeyIndex]; break;
|
|
201
|
+
case 'random': key = API_KEYS[Math.floor(Math.random() * API_KEYS.length)]; break;
|
|
202
|
+
default: // round-robin
|
|
203
|
+
key = API_KEYS[currentKeyIndex];
|
|
204
|
+
currentKeyIndex = (currentKeyIndex + 1) % API_KEYS.length;
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
keyUsageCount.set(key, (keyUsageCount.get(key) || 0) + 1);
|
|
209
|
+
return key;
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
function markKeyError(key) {
|
|
213
|
+
keyErrorCount.set(key, (keyErrorCount.get(key) || 0) + 1);
|
|
214
|
+
if (KEY_MODE === 'sequential') currentKeyIndex = (currentKeyIndex + 1) % API_KEYS.length;
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
// ============ MCP SERVER ============
|
|
218
|
+
class YouAgentsServer {
|
|
219
|
+
constructor() {
|
|
220
|
+
this.server = new Server(
|
|
221
|
+
{ name: 'ydc-mcp-server', version: '1.5.1' },
|
|
222
|
+
{ capabilities: { tools: {} } }
|
|
223
|
+
);
|
|
224
|
+
this.setupToolHandlers();
|
|
225
|
+
this.server.onerror = (error) => console.error('[MCP Error]', error);
|
|
226
|
+
process.on('SIGINT', async () => { await this.server.close(); process.exit(0); });
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
setupToolHandlers() {
|
|
230
|
+
this.server.setRequestHandler(ListToolsRequestSchema, async () => ({
|
|
231
|
+
tools: [
|
|
232
|
+
{
|
|
233
|
+
name: 'you_search',
|
|
234
|
+
description: 'Web and news search via You.com',
|
|
235
|
+
inputSchema: {
|
|
236
|
+
type: 'object',
|
|
237
|
+
properties: { query: { type: 'string', description: 'Search query' } },
|
|
238
|
+
required: ['query'],
|
|
239
|
+
},
|
|
240
|
+
},
|
|
241
|
+
{
|
|
242
|
+
name: 'you_express',
|
|
243
|
+
description: 'Fast AI answers with web search',
|
|
244
|
+
inputSchema: {
|
|
245
|
+
type: 'object',
|
|
246
|
+
properties: { input: { type: 'string', description: 'Query or prompt' } },
|
|
247
|
+
required: ['input'],
|
|
248
|
+
},
|
|
249
|
+
},
|
|
250
|
+
{
|
|
251
|
+
name: 'you_advanced',
|
|
252
|
+
description: 'Advanced AI agent for complex reasoning and research',
|
|
253
|
+
inputSchema: {
|
|
254
|
+
type: 'object',
|
|
255
|
+
properties: {
|
|
256
|
+
input: { type: 'string', description: 'Query or prompt' },
|
|
257
|
+
conversation_id: { type: 'string', description: 'Optional conversation ID for multi-turn dialogue' },
|
|
258
|
+
agent_type: {
|
|
259
|
+
type: 'string',
|
|
260
|
+
enum: Object.keys(AGENT_TYPES),
|
|
261
|
+
default: 'advanced-3.0-high'
|
|
262
|
+
},
|
|
263
|
+
verbosity: { type: 'string', enum: ['medium', 'high'], default: 'high' },
|
|
264
|
+
max_workflow_steps: { type: 'number', default: 15, minimum: 1, maximum: 20 },
|
|
265
|
+
},
|
|
266
|
+
required: ['input'],
|
|
267
|
+
},
|
|
268
|
+
},
|
|
269
|
+
{
|
|
270
|
+
name: 'you_chat',
|
|
271
|
+
description: 'OpenAI-compatible chat interface with conversation history',
|
|
272
|
+
inputSchema: {
|
|
273
|
+
type: 'object',
|
|
274
|
+
properties: {
|
|
275
|
+
messages: {
|
|
276
|
+
type: 'array',
|
|
277
|
+
items: {
|
|
278
|
+
type: 'object',
|
|
279
|
+
properties: {
|
|
280
|
+
role: { type: 'string', enum: ['system', 'user', 'assistant'] },
|
|
281
|
+
content: { type: 'string' }
|
|
282
|
+
},
|
|
283
|
+
required: ['role', 'content']
|
|
284
|
+
}
|
|
285
|
+
},
|
|
286
|
+
conversation_id: { type: 'string' },
|
|
287
|
+
model: {
|
|
288
|
+
type: 'string',
|
|
289
|
+
enum: Object.keys(AGENT_TYPES),
|
|
290
|
+
default: 'advanced-3.0-high'
|
|
291
|
+
},
|
|
292
|
+
},
|
|
293
|
+
required: ['messages'],
|
|
294
|
+
},
|
|
295
|
+
},
|
|
296
|
+
{
|
|
297
|
+
name: 'you_conversation_list',
|
|
298
|
+
description: 'List all active conversations',
|
|
299
|
+
inputSchema: { type: 'object', properties: {} },
|
|
300
|
+
},
|
|
301
|
+
{
|
|
302
|
+
name: 'you_conversation_get',
|
|
303
|
+
description: 'Get conversation history by ID',
|
|
304
|
+
inputSchema: {
|
|
305
|
+
type: 'object',
|
|
306
|
+
properties: { conversation_id: { type: 'string' } },
|
|
307
|
+
required: ['conversation_id'],
|
|
308
|
+
},
|
|
309
|
+
},
|
|
310
|
+
{
|
|
311
|
+
name: 'you_conversation_delete',
|
|
312
|
+
description: 'Delete a conversation',
|
|
313
|
+
inputSchema: {
|
|
314
|
+
type: 'object',
|
|
315
|
+
properties: { conversation_id: { type: 'string' } },
|
|
316
|
+
required: ['conversation_id'],
|
|
317
|
+
},
|
|
318
|
+
},
|
|
319
|
+
{
|
|
320
|
+
name: 'you_key_status',
|
|
321
|
+
description: 'Get API key usage status',
|
|
322
|
+
inputSchema: { type: 'object', properties: {} },
|
|
323
|
+
},
|
|
324
|
+
{
|
|
325
|
+
name: 'openai_server_control',
|
|
326
|
+
description: 'Control the OpenAI-compatible HTTP server (start/stop/restart/status)',
|
|
327
|
+
inputSchema: {
|
|
328
|
+
type: 'object',
|
|
329
|
+
properties: {
|
|
330
|
+
action: { type: 'string', enum: ['start', 'stop', 'restart', 'status'] },
|
|
331
|
+
port: { type: 'number', default: 3002 },
|
|
332
|
+
access_tokens: { type: 'array', items: { type: 'string' } },
|
|
333
|
+
store_type: { type: 'string', enum: ['sqlite', 'memory'], default: 'sqlite' },
|
|
334
|
+
db_path: { type: 'string' },
|
|
335
|
+
},
|
|
336
|
+
required: ['action'],
|
|
337
|
+
},
|
|
338
|
+
},
|
|
339
|
+
],
|
|
340
|
+
}));
|
|
341
|
+
|
|
342
|
+
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
343
|
+
const { name, arguments: args } = request.params;
|
|
344
|
+
try {
|
|
345
|
+
switch (name) {
|
|
346
|
+
case 'you_search': return await this.callExpress(args.query);
|
|
347
|
+
case 'you_express': return await this.callExpress(args.input);
|
|
348
|
+
case 'you_advanced': return await this.callAdvanced(args);
|
|
349
|
+
case 'you_chat': return await this.callChat(args);
|
|
350
|
+
case 'you_conversation_list': return this.listConversations();
|
|
351
|
+
case 'you_conversation_get': return this.getConversationHistory(args.conversation_id);
|
|
352
|
+
case 'you_conversation_delete': return this.deleteConversation(args.conversation_id);
|
|
353
|
+
case 'you_key_status': return this.getKeyStatus();
|
|
354
|
+
case 'openai_server_control': return await this.controlOpenAIServer(args);
|
|
355
|
+
default: throw new Error(`Unknown tool: ${name}`);
|
|
356
|
+
}
|
|
357
|
+
} catch (error) {
|
|
358
|
+
return { content: [{ type: 'text', text: `Error: ${error.message}` }] };
|
|
359
|
+
}
|
|
360
|
+
});
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
async callExpress(input) {
|
|
364
|
+
const apiKey = getNextApiKey();
|
|
365
|
+
try {
|
|
366
|
+
const response = await callYouApi(apiKey, { agent: 'express', input, stream: false });
|
|
367
|
+
const data = await response.json();
|
|
368
|
+
return { content: [{ type: 'text', text: extractText(data) }] };
|
|
369
|
+
} catch (error) {
|
|
370
|
+
markKeyError(apiKey);
|
|
371
|
+
throw error;
|
|
372
|
+
}
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
async callAdvanced(args) {
|
|
376
|
+
const { input, conversation_id, agent_type = 'advanced-3.0-high', verbosity, max_workflow_steps } = args;
|
|
377
|
+
const apiKey = getNextApiKey();
|
|
378
|
+
let conversationId = conversation_id || generateConversationId();
|
|
379
|
+
let fullInput = input;
|
|
380
|
+
|
|
381
|
+
const conv = getConversation(conversationId);
|
|
382
|
+
if (conv && conv.messages.length > 0) {
|
|
383
|
+
const history = conv.messages.map(m => `${m.role === 'user' ? 'User' : 'Assistant'}: ${m.content}`).join('\n\n');
|
|
384
|
+
fullInput = `[Conversation History]\n${history}\n\n[Current Message]\n${input}`;
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
addMessageToConversation(conversationId, 'user', input);
|
|
388
|
+
const requestBody = buildAgentRequest(agent_type, fullInput, { verbosity, max_workflow_steps });
|
|
389
|
+
|
|
390
|
+
try {
|
|
391
|
+
const response = await callYouApi(apiKey, requestBody);
|
|
392
|
+
const data = await response.json();
|
|
393
|
+
const resultText = extractText(data);
|
|
394
|
+
addMessageToConversation(conversationId, 'assistant', resultText);
|
|
395
|
+
|
|
396
|
+
return {
|
|
397
|
+
content: [
|
|
398
|
+
{ type: 'text', text: resultText },
|
|
399
|
+
{ type: 'text', text: `\n\n---\nConversation ID: ${conversationId}\nAgent: ${agent_type}` }
|
|
400
|
+
],
|
|
401
|
+
};
|
|
402
|
+
} catch (error) {
|
|
403
|
+
markKeyError(apiKey);
|
|
404
|
+
throw error;
|
|
405
|
+
}
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
async callChat(args) {
|
|
409
|
+
const { messages, conversation_id, model = 'advanced-3.0-high' } = args;
|
|
410
|
+
const apiKey = getNextApiKey();
|
|
411
|
+
let conversationId = conversation_id || generateConversationId();
|
|
412
|
+
let fullMessages = [...messages];
|
|
413
|
+
|
|
414
|
+
const conv = getConversation(conversationId);
|
|
415
|
+
if (conv && conv.messages.length > 0) {
|
|
416
|
+
const storedMessages = conv.messages.map(m => ({ role: m.role, content: m.content }));
|
|
417
|
+
const systemMsg = fullMessages.find(m => m.role === 'system') || storedMessages.find(m => m.role === 'system');
|
|
418
|
+
fullMessages = systemMsg ? [systemMsg] : [];
|
|
419
|
+
fullMessages.push(...storedMessages.filter(m => m.role !== 'system'));
|
|
420
|
+
const lastNewUserMsg = messages.filter(m => m.role === 'user').pop();
|
|
421
|
+
if (lastNewUserMsg && !fullMessages.some(m => m.role === 'user' && m.content === lastNewUserMsg.content)) {
|
|
422
|
+
fullMessages.push(lastNewUserMsg);
|
|
423
|
+
}
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
const lastUserMsg = fullMessages.filter(m => m.role === 'user').pop();
|
|
427
|
+
if (lastUserMsg) addMessageToConversation(conversationId, 'user', lastUserMsg.content);
|
|
428
|
+
|
|
429
|
+
const input = buildConversationInput(fullMessages);
|
|
430
|
+
const requestBody = buildAgentRequest(model, input);
|
|
431
|
+
|
|
432
|
+
try {
|
|
433
|
+
const response = await callYouApi(apiKey, requestBody);
|
|
434
|
+
const data = await response.json();
|
|
435
|
+
const resultText = extractText(data);
|
|
436
|
+
addMessageToConversation(conversationId, 'assistant', resultText);
|
|
437
|
+
|
|
438
|
+
return {
|
|
439
|
+
content: [
|
|
440
|
+
{ type: 'text', text: resultText },
|
|
441
|
+
{ type: 'text', text: `\n\n---\nConversation ID: ${conversationId}` }
|
|
442
|
+
],
|
|
443
|
+
};
|
|
444
|
+
} catch (error) {
|
|
445
|
+
markKeyError(apiKey);
|
|
446
|
+
throw error;
|
|
447
|
+
}
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
listConversations() {
|
|
451
|
+
const conversations = [...conversationStore.entries()].map(([id, conv]) => ({
|
|
452
|
+
id,
|
|
453
|
+
message_count: conv.messages.length,
|
|
454
|
+
created_at: new Date(conv.createdAt).toISOString(),
|
|
455
|
+
updated_at: new Date(conv.updatedAt).toISOString(),
|
|
456
|
+
preview: conv.messages.slice(-1)[0]?.content?.substring(0, 100) || ''
|
|
457
|
+
})).sort((a, b) => new Date(b.updated_at) - new Date(a.updated_at));
|
|
458
|
+
|
|
459
|
+
return { content: [{ type: 'text', text: JSON.stringify({ total: conversations.length, conversations }, null, 2) }] };
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
getConversationHistory(conversationId) {
|
|
463
|
+
const conv = getConversation(conversationId);
|
|
464
|
+
if (!conv) return { content: [{ type: 'text', text: `Conversation not found: ${conversationId}` }] };
|
|
465
|
+
return {
|
|
466
|
+
content: [{
|
|
467
|
+
type: 'text',
|
|
468
|
+
text: JSON.stringify({
|
|
469
|
+
id: conv.id,
|
|
470
|
+
messages: conv.messages.map(m => ({ role: m.role, content: m.content, timestamp: new Date(m.timestamp).toISOString() })),
|
|
471
|
+
created_at: new Date(conv.createdAt).toISOString(),
|
|
472
|
+
updated_at: new Date(conv.updatedAt).toISOString()
|
|
473
|
+
}, null, 2)
|
|
474
|
+
}],
|
|
475
|
+
};
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
deleteConversation(conversationId) {
|
|
479
|
+
if (!conversationStore.has(conversationId)) {
|
|
480
|
+
return { content: [{ type: 'text', text: `Conversation not found: ${conversationId}` }] };
|
|
481
|
+
}
|
|
482
|
+
conversationStore.delete(conversationId);
|
|
483
|
+
return { content: [{ type: 'text', text: `Deleted conversation: ${conversationId}` }] };
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
getKeyStatus() {
|
|
487
|
+
const status = {
|
|
488
|
+
total_keys: API_KEYS.length,
|
|
489
|
+
key_mode: KEY_MODE,
|
|
490
|
+
current_key_index: currentKeyIndex,
|
|
491
|
+
keys: API_KEYS.map((key, index) => ({
|
|
492
|
+
index,
|
|
493
|
+
key_preview: `${key.substring(0, 8)}...${key.substring(key.length - 4)}`,
|
|
494
|
+
usage_count: keyUsageCount.get(key) || 0,
|
|
495
|
+
error_count: keyErrorCount.get(key) || 0,
|
|
496
|
+
is_current: index === currentKeyIndex
|
|
497
|
+
}))
|
|
498
|
+
};
|
|
499
|
+
return { content: [{ type: 'text', text: JSON.stringify(status, null, 2) }] };
|
|
500
|
+
}
|
|
501
|
+
|
|
502
|
+
async controlOpenAIServer(args) {
|
|
503
|
+
const { action, port = 3002, access_tokens = [], store_type, db_path } = args;
|
|
504
|
+
const openaiServerPath = join(__dirname, 'openai-server.js');
|
|
505
|
+
|
|
506
|
+
const jsonResponse = (obj) => ({ content: [{ type: 'text', text: JSON.stringify(obj, null, 2) }] });
|
|
507
|
+
|
|
508
|
+
switch (action) {
|
|
509
|
+
case 'start':
|
|
510
|
+
if (openaiServerProcess && openaiServerStatus === 'running') {
|
|
511
|
+
return jsonResponse({ success: false, message: `Already running on port ${openaiServerPort}`, status: openaiServerStatus, port: openaiServerPort });
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
try {
|
|
515
|
+
const finalStoreType = store_type || OPENAI_SERVER_STORE_TYPE;
|
|
516
|
+
const finalDbPath = db_path || OPENAI_SERVER_DB_PATH;
|
|
517
|
+
const finalAccessTokens = access_tokens?.length > 0 ? access_tokens.join(',') : OPENAI_SERVER_ACCESS_TOKENS;
|
|
518
|
+
|
|
519
|
+
const env = {
|
|
520
|
+
...process.env,
|
|
521
|
+
YDC_OPENAI_PORT: port.toString(),
|
|
522
|
+
YDC_API_KEY: API_KEYS[0] || '',
|
|
523
|
+
YDC_API_KEYS: API_KEYS.join(','),
|
|
524
|
+
YDC_OPENAI_ACCESS_TOKENS: finalAccessTokens,
|
|
525
|
+
YDC_CONVERSATION_STORE: finalStoreType,
|
|
526
|
+
...(finalDbPath && { YDC_CONVERSATION_DB_PATH: finalDbPath })
|
|
527
|
+
};
|
|
528
|
+
|
|
529
|
+
openaiServerProcess = spawn('node', [openaiServerPath], { env, stdio: ['ignore', 'pipe', 'pipe'], detached: false });
|
|
530
|
+
openaiServerPort = port;
|
|
531
|
+
openaiServerStatus = 'starting';
|
|
532
|
+
|
|
533
|
+
await new Promise((resolve, reject) => {
|
|
534
|
+
const timeout = setTimeout(() => { openaiServerStatus = 'running'; resolve(); }, 2000);
|
|
535
|
+
openaiServerProcess.stderr.on('data', (data) => {
|
|
536
|
+
if (data.toString().includes('running')) { clearTimeout(timeout); openaiServerStatus = 'running'; resolve(); }
|
|
537
|
+
});
|
|
538
|
+
openaiServerProcess.on('error', (err) => { clearTimeout(timeout); openaiServerStatus = 'error'; reject(err); });
|
|
539
|
+
openaiServerProcess.on('exit', (code) => {
|
|
540
|
+
if (code !== 0 && openaiServerStatus === 'starting') { clearTimeout(timeout); openaiServerStatus = 'stopped'; reject(new Error(`Exit code ${code}`)); }
|
|
541
|
+
});
|
|
542
|
+
});
|
|
543
|
+
|
|
544
|
+
return jsonResponse({
|
|
545
|
+
success: true,
|
|
546
|
+
message: `OpenAI server started on port ${port}`,
|
|
547
|
+
status: openaiServerStatus,
|
|
548
|
+
port: openaiServerPort,
|
|
549
|
+
endpoint: `http://localhost:${port}/v1/chat/completions`,
|
|
550
|
+
pid: openaiServerProcess.pid,
|
|
551
|
+
storage: { store_type: finalStoreType, db_path: finalStoreType === 'sqlite' ? (finalDbPath || 'conversations.db') : null },
|
|
552
|
+
api_keys: { passthrough: API_KEYS.length > 0, count: API_KEYS.length }
|
|
553
|
+
});
|
|
554
|
+
} catch (error) {
|
|
555
|
+
openaiServerStatus = 'error';
|
|
556
|
+
return jsonResponse({ success: false, message: `Failed to start: ${error.message}`, status: openaiServerStatus });
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
case 'stop':
|
|
560
|
+
if (!openaiServerProcess || openaiServerStatus === 'stopped') {
|
|
561
|
+
return jsonResponse({ success: false, message: 'Server is not running', status: openaiServerStatus });
|
|
562
|
+
}
|
|
563
|
+
try {
|
|
564
|
+
openaiServerProcess.kill('SIGTERM');
|
|
565
|
+
const stoppedPort = openaiServerPort;
|
|
566
|
+
openaiServerProcess = null;
|
|
567
|
+
openaiServerPort = null;
|
|
568
|
+
openaiServerStatus = 'stopped';
|
|
569
|
+
return jsonResponse({ success: true, message: `Stopped (was on port ${stoppedPort})`, status: openaiServerStatus });
|
|
570
|
+
} catch (error) {
|
|
571
|
+
return jsonResponse({ success: false, message: `Failed to stop: ${error.message}`, status: openaiServerStatus });
|
|
572
|
+
}
|
|
573
|
+
|
|
574
|
+
case 'restart':
|
|
575
|
+
if (openaiServerProcess && openaiServerStatus === 'running') {
|
|
576
|
+
try { openaiServerProcess.kill('SIGTERM'); openaiServerProcess = null; openaiServerStatus = 'stopped'; await new Promise(r => setTimeout(r, 1000)); } catch {}
|
|
577
|
+
}
|
|
578
|
+
return await this.controlOpenAIServer({ action: 'start', port, access_tokens, store_type, db_path });
|
|
579
|
+
|
|
580
|
+
case 'status':
|
|
581
|
+
return jsonResponse({
|
|
582
|
+
status: openaiServerStatus,
|
|
583
|
+
port: openaiServerPort,
|
|
584
|
+
pid: openaiServerProcess?.pid || null,
|
|
585
|
+
endpoint: openaiServerStatus === 'running' ? `http://localhost:${openaiServerPort}/v1/chat/completions` : null
|
|
586
|
+
});
|
|
587
|
+
|
|
588
|
+
default:
|
|
589
|
+
return jsonResponse({ success: false, message: `Unknown action: ${action}` });
|
|
590
|
+
}
|
|
591
|
+
}
|
|
592
|
+
|
|
593
|
+
async run() {
|
|
594
|
+
if (!validateApiKeys()) process.exit(1);
|
|
595
|
+
const transport = new StdioServerTransport();
|
|
596
|
+
await this.server.connect(transport);
|
|
597
|
+
console.error(`You.com Agents MCP server v1.5.0 running on stdio`);
|
|
598
|
+
console.error(`API Keys: ${API_KEYS.length}, Mode: ${KEY_MODE}`);
|
|
599
|
+
}
|
|
600
|
+
}
|
|
601
|
+
|
|
602
|
+
const server = new YouAgentsServer();
|
|
603
|
+
server.run().catch(console.error);
|
|
604
|
+
}
|