ydc-agent 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +217 -0
- package/README_JA.md +189 -0
- package/README_ZH_CN.md +189 -0
- package/README_ZH_TW.md +189 -0
- package/index.js +1160 -0
- package/lib/advanced-versions.js +113 -0
- package/lib/anthropic-mapper.js +255 -0
- package/lib/api-client.js +140 -0
- package/lib/auth-middleware.js +44 -0
- package/lib/conversation-store.js +358 -0
- package/lib/openai-mapper.js +215 -0
- package/lib/request-logger.js +132 -0
- package/lib/routes/anthropic-messages.js +269 -0
- package/lib/routes/chat.js +249 -0
- package/lib/routes/conversations.js +94 -0
- package/lib/routes/health.js +31 -0
- package/lib/routes/models.js +111 -0
- package/openai-server.js +99 -0
- package/package.json +70 -0
package/index.js
ADDED
|
@@ -0,0 +1,1160 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
|
|
4
|
+
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
|
|
5
|
+
import {
|
|
6
|
+
CallToolRequestSchema,
|
|
7
|
+
ListToolsRequestSchema,
|
|
8
|
+
} from '@modelcontextprotocol/sdk/types.js';
|
|
9
|
+
import { randomUUID } from 'crypto';
|
|
10
|
+
import { spawn } from 'child_process';
|
|
11
|
+
import { fileURLToPath } from 'url';
|
|
12
|
+
import { dirname, join } from 'path';
|
|
13
|
+
|
|
14
|
+
// Import shared modules
|
|
15
|
+
import { AGENT_TYPES, callYouApi, extractText, buildConversationInput, buildAgentRequest } from './lib/api-client.js';
|
|
16
|
+
|
|
17
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
18
|
+
const __dirname = dirname(__filename);
|
|
19
|
+
|
|
20
|
+
// ============ CLI MODE CHECK ============
|
|
21
|
+
const args = process.argv.slice(2);
|
|
22
|
+
|
|
23
|
+
// Parse CLI arguments
|
|
24
|
+
for (let i = 0; i < args.length; i++) {
|
|
25
|
+
if (args[i] === '--api-key' && args[i + 1]) {
|
|
26
|
+
process.env.YDC_API_KEY = args[i + 1];
|
|
27
|
+
}
|
|
28
|
+
if (args[i] === '--api-keys' && args[i + 1]) {
|
|
29
|
+
process.env.YDC_API_KEYS = args[i + 1];
|
|
30
|
+
}
|
|
31
|
+
if ((args[i] === '--port' || args[i] === '-p') && args[i + 1]) {
|
|
32
|
+
process.env.YDC_OPENAI_PORT = args[i + 1];
|
|
33
|
+
}
|
|
34
|
+
if (args[i] === '--access-token' && args[i + 1]) {
|
|
35
|
+
process.env.YDC_OPENAI_ACCESS_TOKENS = args[i + 1];
|
|
36
|
+
}
|
|
37
|
+
if (args[i] === '--key-mode' && args[i + 1]) {
|
|
38
|
+
process.env.YDC_KEY_MODE = args[i + 1];
|
|
39
|
+
}
|
|
40
|
+
if (args[i] === '--agent' && args[i + 1]) {
|
|
41
|
+
// Format: name:id or just id (name defaults to id)
|
|
42
|
+
const existing = process.env.YDC_CUSTOM_AGENTS || '';
|
|
43
|
+
process.env.YDC_CUSTOM_AGENTS = existing ? `${existing},${args[i + 1]}` : args[i + 1];
|
|
44
|
+
}
|
|
45
|
+
if (args[i] === '--no-history') {
|
|
46
|
+
process.env.YDC_NO_HISTORY = 'true';
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
const isOpenAIMode = args.includes('--openai') || args.includes('openai');
|
|
51
|
+
|
|
52
|
+
if (isOpenAIMode) {
|
|
53
|
+
// Start OpenAI-compatible HTTP server using spawn
|
|
54
|
+
const openaiServerPath = join(__dirname, 'openai-server.js');
|
|
55
|
+
const child = spawn('node', [openaiServerPath], {
|
|
56
|
+
stdio: 'inherit',
|
|
57
|
+
env: process.env
|
|
58
|
+
});
|
|
59
|
+
child.on('exit', (code) => process.exit(code));
|
|
60
|
+
} else if (args.includes('--help') || args.includes('-h')) {
|
|
61
|
+
console.log(`
|
|
62
|
+
ydc-agent - MCP server for You.com AI agents
|
|
63
|
+
|
|
64
|
+
Usage:
|
|
65
|
+
npx ydc-agent Start MCP server (stdio)
|
|
66
|
+
npx ydc-agent --openai Start OpenAI-compatible HTTP server
|
|
67
|
+
npx ydc-agent --openai --api-key KEY Start with single API key
|
|
68
|
+
npx ydc-agent --openai --api-keys K1,K2 Start with multiple API keys
|
|
69
|
+
npx ydc-agent --openai --port 3003 Start on custom port
|
|
70
|
+
npx ydc-agent --openai --access-token TOK Require access token for HTTP server
|
|
71
|
+
|
|
72
|
+
Options:
|
|
73
|
+
--openai Start OpenAI-compatible HTTP server
|
|
74
|
+
--api-key KEY Set single You.com API key
|
|
75
|
+
--api-keys K1,K2,K3 Set multiple API keys (comma-separated)
|
|
76
|
+
--key-mode MODE Key rotation: round-robin (default) / sequential / random
|
|
77
|
+
--port, -p PORT Set HTTP server port (default: 3002)
|
|
78
|
+
--access-token TOKEN Set access token for HTTP server authentication
|
|
79
|
+
--agent NAME:ID Add custom agent to models list (can use multiple times)
|
|
80
|
+
--no-history Minimal logging (one line per request/response)
|
|
81
|
+
--help, -h Show this help
|
|
82
|
+
|
|
83
|
+
Environment Variables:
|
|
84
|
+
YDC_API_KEY You.com API key (required)
|
|
85
|
+
YDC_API_KEYS Multiple keys (comma-separated)
|
|
86
|
+
YDC_KEY_MODE round-robin / sequential / random
|
|
87
|
+
YDC_OPENAI_PORT HTTP server port (default: 3002)
|
|
88
|
+
YDC_CONVERSATION_STORE sqlite / memory (default: sqlite)
|
|
89
|
+
YDC_OPENAI_ACCESS_TOKENS Allowed tokens (comma-separated)
|
|
90
|
+
YDC_CUSTOM_AGENTS Custom agents (name:id,name2:id2)
|
|
91
|
+
YDC_NO_HISTORY Set to 'true' for minimal logging
|
|
92
|
+
`);
|
|
93
|
+
process.exit(0);
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// Skip MCP server if in OpenAI mode
|
|
97
|
+
if (isOpenAIMode) {
|
|
98
|
+
// Wait forever, child process handles everything
|
|
99
|
+
setInterval(() => {}, 1000000);
|
|
100
|
+
} else {
|
|
101
|
+
|
|
102
|
+
// ============ OPENAI SERVER CONTROL ============
|
|
103
|
+
let openaiServerProcess = null;
|
|
104
|
+
let openaiServerPort = null;
|
|
105
|
+
let openaiServerStatus = 'stopped';
|
|
106
|
+
|
|
107
|
+
// ============ MULTI-KEY CONFIGURATION ============
|
|
108
|
+
const API_KEYS_RAW = process.env.YDC_API_KEYS || process.env.YDC_API_KEY || '';
|
|
109
|
+
const API_KEYS = API_KEYS_RAW.split(',').map(k => k.trim()).filter(k => k);
|
|
110
|
+
const KEY_MODE = process.env.YDC_KEY_MODE || 'round-robin';
|
|
111
|
+
let currentKeyIndex = 0;
|
|
112
|
+
const keyUsageCount = new Map();
|
|
113
|
+
const keyErrorCount = new Map();
|
|
114
|
+
|
|
115
|
+
// ============ OPENAI SERVER ENV PASSTHROUGH ============
|
|
116
|
+
const OPENAI_SERVER_STORE_TYPE = process.env.YDC_CONVERSATION_STORE || 'sqlite';
|
|
117
|
+
const OPENAI_SERVER_DB_PATH = process.env.YDC_CONVERSATION_DB_PATH || '';
|
|
118
|
+
const OPENAI_SERVER_ACCESS_TOKENS = process.env.YDC_OPENAI_ACCESS_TOKENS || '';
|
|
119
|
+
|
|
120
|
+
// ============ SUMMARY PREFERENCE ============
|
|
121
|
+
const PREFER_SUMMARY = process.env.YDC_PREFER_SUMMARY === 'true';
|
|
122
|
+
const DEFAULT_SUMMARY_LANGUAGE = process.env.YDC_SUMMARY_LANGUAGE || 'en';
|
|
123
|
+
|
|
124
|
+
// ============ CUSTOM AGENT CONFIGURATION ============
|
|
125
|
+
const CUSTOM_AGENTS_RAW = process.env.YDC_CUSTOM_AGENTS || '';
|
|
126
|
+
const CUSTOM_AGENTS = CUSTOM_AGENTS_RAW.split(',').map(a => a.trim()).filter(a => a);
|
|
127
|
+
const HAS_CUSTOM_AGENT = CUSTOM_AGENTS.length > 0;
|
|
128
|
+
const DEFAULT_AGENT_ID = HAS_CUSTOM_AGENT ? CUSTOM_AGENTS[0].split(':').pop() : null;
|
|
129
|
+
|
|
130
|
+
// ============ YOU_AGENT FAILURE TRACKING ============
|
|
131
|
+
let youAgentFailureCount = 0;
|
|
132
|
+
const YOU_AGENT_FAILURE_THRESHOLD = parseInt(process.env.YDC_AGENT_FAILURE_THRESHOLD) || 3;
|
|
133
|
+
let youAgentDisabled = false;
|
|
134
|
+
|
|
135
|
+
function recordYouAgentFailure() {
|
|
136
|
+
youAgentFailureCount++;
|
|
137
|
+
if (youAgentFailureCount >= YOU_AGENT_FAILURE_THRESHOLD) {
|
|
138
|
+
youAgentDisabled = true;
|
|
139
|
+
}
|
|
140
|
+
return { count: youAgentFailureCount, disabled: youAgentDisabled };
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
function resetYouAgentFailures() {
|
|
144
|
+
youAgentFailureCount = 0;
|
|
145
|
+
youAgentDisabled = false;
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
function getYouAgentStatus() {
|
|
149
|
+
return {
|
|
150
|
+
failure_count: youAgentFailureCount,
|
|
151
|
+
threshold: YOU_AGENT_FAILURE_THRESHOLD,
|
|
152
|
+
disabled: youAgentDisabled
|
|
153
|
+
};
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
// ============ CONVERSATION STORE (Memory for MCP) ============
|
|
157
|
+
const conversationStore = new Map();
|
|
158
|
+
const CONVERSATION_TTL = 24 * 60 * 60 * 1000;
|
|
159
|
+
const MAX_MESSAGES_PER_CONVERSATION = 100;
|
|
160
|
+
|
|
161
|
+
function generateConversationId() {
|
|
162
|
+
return randomUUID();
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
function getConversation(conversationId) {
|
|
166
|
+
if (!conversationId || !conversationStore.has(conversationId)) return null;
|
|
167
|
+
const conv = conversationStore.get(conversationId);
|
|
168
|
+
conv.updatedAt = Date.now();
|
|
169
|
+
return conv;
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
function createConversation(conversationId = null) {
|
|
173
|
+
const id = conversationId || generateConversationId();
|
|
174
|
+
const conv = { id, messages: [], createdAt: Date.now(), updatedAt: Date.now() };
|
|
175
|
+
conversationStore.set(id, conv);
|
|
176
|
+
return conv;
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
function addMessageToConversation(conversationId, role, content) {
|
|
180
|
+
let conv = getConversation(conversationId);
|
|
181
|
+
if (!conv) conv = createConversation(conversationId);
|
|
182
|
+
|
|
183
|
+
if (conv.messages.length >= MAX_MESSAGES_PER_CONVERSATION) {
|
|
184
|
+
const systemMsg = conv.messages.find(m => m.role === 'system');
|
|
185
|
+
conv.messages = systemMsg
|
|
186
|
+
? [systemMsg, ...conv.messages.slice(-MAX_MESSAGES_PER_CONVERSATION + 2)]
|
|
187
|
+
: conv.messages.slice(-MAX_MESSAGES_PER_CONVERSATION + 1);
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
conv.messages.push({ role, content, timestamp: Date.now() });
|
|
191
|
+
conv.updatedAt = Date.now();
|
|
192
|
+
return conv;
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
// Cleanup expired conversations
|
|
196
|
+
setInterval(() => {
|
|
197
|
+
const now = Date.now();
|
|
198
|
+
for (const [id, conv] of conversationStore.entries()) {
|
|
199
|
+
if (now - conv.updatedAt > CONVERSATION_TTL) conversationStore.delete(id);
|
|
200
|
+
}
|
|
201
|
+
}, 60 * 60 * 1000);
|
|
202
|
+
|
|
203
|
+
// ============ API KEY MANAGEMENT ============
|
|
204
|
+
function isRunningAsNpx() {
|
|
205
|
+
const execPath = process.argv[1] || '';
|
|
206
|
+
return execPath.includes('node_modules') || execPath.includes('.npm/_npx') ||
|
|
207
|
+
execPath.includes('npx') || execPath.includes('pnpm/global');
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
function validateApiKeys() {
|
|
211
|
+
if (API_KEYS.length === 0) {
|
|
212
|
+
console.error('ERROR: No API keys configured!');
|
|
213
|
+
console.error('Please set YDC_API_KEY or YDC_API_KEYS environment variable in mcp.json');
|
|
214
|
+
|
|
215
|
+
const config = isRunningAsNpx()
|
|
216
|
+
? { command: "npx", args: ["-y", "ydc-agent"] }
|
|
217
|
+
: { command: "node", args: [process.argv[1] || "path/to/index.js"] };
|
|
218
|
+
|
|
219
|
+
console.error('\nExample mcp.json config:');
|
|
220
|
+
console.error(JSON.stringify({
|
|
221
|
+
mcpServers: {
|
|
222
|
+
"ydc-agent": {
|
|
223
|
+
...config,
|
|
224
|
+
env: { YDC_API_KEY: "your-api-key-here", YDC_KEY_MODE: "round-robin" }
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
}, null, 2));
|
|
228
|
+
return false;
|
|
229
|
+
}
|
|
230
|
+
return true;
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
function getNextApiKey() {
|
|
234
|
+
if (API_KEYS.length === 0) throw new Error('No API keys configured');
|
|
235
|
+
|
|
236
|
+
let key;
|
|
237
|
+
if (API_KEYS.length === 1) {
|
|
238
|
+
key = API_KEYS[0];
|
|
239
|
+
} else {
|
|
240
|
+
switch (KEY_MODE) {
|
|
241
|
+
case 'sequential': key = API_KEYS[currentKeyIndex]; break;
|
|
242
|
+
case 'random': key = API_KEYS[Math.floor(Math.random() * API_KEYS.length)]; break;
|
|
243
|
+
default: // round-robin
|
|
244
|
+
key = API_KEYS[currentKeyIndex];
|
|
245
|
+
currentKeyIndex = (currentKeyIndex + 1) % API_KEYS.length;
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
keyUsageCount.set(key, (keyUsageCount.get(key) || 0) + 1);
|
|
250
|
+
return key;
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
function markKeyError(key) {
|
|
254
|
+
keyErrorCount.set(key, (keyErrorCount.get(key) || 0) + 1);
|
|
255
|
+
if (KEY_MODE === 'sequential') currentKeyIndex = (currentKeyIndex + 1) % API_KEYS.length;
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
// ============ MCP SERVER ============
|
|
259
|
+
class YouAgentsServer {
|
|
260
|
+
constructor() {
|
|
261
|
+
this.server = new Server(
|
|
262
|
+
{ name: 'ydc-agent', version: '1.5.1' },
|
|
263
|
+
{ capabilities: { tools: {} } }
|
|
264
|
+
);
|
|
265
|
+
this.setupToolHandlers();
|
|
266
|
+
this.server.onerror = (error) => console.error('[MCP Error]', error);
|
|
267
|
+
process.on('SIGINT', async () => { await this.server.close(); process.exit(0); });
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
setupToolHandlers() {
|
|
271
|
+
this.server.setRequestHandler(ListToolsRequestSchema, async () => ({
|
|
272
|
+
tools: [
|
|
273
|
+
{
|
|
274
|
+
name: 'you_search',
|
|
275
|
+
description: `Web and news search via You.com. Supports multiple calls (1-20) with array results. [Performance: ~2s avg, comparable to GPT-3]
|
|
276
|
+
|
|
277
|
+
⚠️ AI CAPABILITY RULE: If you are GPT-3.5 level or above:
|
|
278
|
+
- Use call_count=5-20 for diverse search results as reference
|
|
279
|
+
- Integrate and verify results with YOUR own knowledge
|
|
280
|
+
|
|
281
|
+
[RECOMMENDED USAGE]
|
|
282
|
+
- Diverse search results → call_count=5-20, show_results=true
|
|
283
|
+
- Comparison/summary → call_count=3-5, summary=true${PREFER_SUMMARY ? '\n⚡ PREFERRED: Use call_count=3-5 with summary=true for consolidated insights (default enabled)' : ''}`,
|
|
284
|
+
inputSchema: {
|
|
285
|
+
type: 'object',
|
|
286
|
+
properties: {
|
|
287
|
+
query: { type: 'string', description: 'Search query' },
|
|
288
|
+
call_count: { type: 'number', description: 'Number of API calls (1-20). Use 1 for single detailed result, 3-5 for comparison, 10-20 for diverse opinions', default: PREFER_SUMMARY ? 3 : 1, minimum: 1, maximum: 20 },
|
|
289
|
+
summary: { type: 'boolean', description: 'Generate AI summary comparing results. Use with call_count>1. Returns comparison table, NOT full content', default: PREFER_SUMMARY },
|
|
290
|
+
language: { type: 'string', description: 'Summary language (e.g., zh-TW, zh-CN, en, ja)', default: DEFAULT_SUMMARY_LANGUAGE },
|
|
291
|
+
show_results: { type: 'boolean', description: 'Include full results. Set TRUE to get complete content, FALSE only returns summary/metadata', default: false }
|
|
292
|
+
},
|
|
293
|
+
required: ['query'],
|
|
294
|
+
},
|
|
295
|
+
},
|
|
296
|
+
{
|
|
297
|
+
name: 'you_express',
|
|
298
|
+
description: `Fast AI answers with web search. Supports multiple calls (1-20) for diverse perspectives. [Performance: ~2s avg, comparable to GPT-3. Best for quick answers and multiple parallel calls]
|
|
299
|
+
|
|
300
|
+
⚠️ AI CAPABILITY RULE: If you are GPT-3.5 level or above:
|
|
301
|
+
- Use call_count=5-20 for diverse reference opinions, then integrate with YOUR own judgment
|
|
302
|
+
- Results are reference material, not authoritative answers
|
|
303
|
+
|
|
304
|
+
[RECOMMENDED USAGE]
|
|
305
|
+
- Quick diverse opinions → call_count=10-20, show_results=true
|
|
306
|
+
- Comparison/summary → call_count=5-10, summary=true${PREFER_SUMMARY ? '\n⚡ PREFERRED: Use call_count=5 with summary=true for consolidated insights (default enabled)' : ''}`,
|
|
307
|
+
inputSchema: {
|
|
308
|
+
type: 'object',
|
|
309
|
+
properties: {
|
|
310
|
+
input: { type: 'string', description: 'Query or prompt' },
|
|
311
|
+
call_count: { type: 'number', description: 'Number of API calls (1-20). Use 1 for single detailed result, 3-5 for comparison, 10-20 for diverse opinions', default: PREFER_SUMMARY ? 5 : 1, minimum: 1, maximum: 20 },
|
|
312
|
+
summary: { type: 'boolean', description: 'Generate AI summary comparing results. Use with call_count>1. Returns comparison table, NOT full content', default: PREFER_SUMMARY },
|
|
313
|
+
language: { type: 'string', description: 'Summary language (e.g., zh-TW, zh-CN, en, ja)', default: DEFAULT_SUMMARY_LANGUAGE },
|
|
314
|
+
show_results: { type: 'boolean', description: 'Include full results. Set TRUE to get complete content, FALSE only returns summary/metadata', default: false }
|
|
315
|
+
},
|
|
316
|
+
required: ['input'],
|
|
317
|
+
},
|
|
318
|
+
},
|
|
319
|
+
{
|
|
320
|
+
name: 'you_advanced',
|
|
321
|
+
description: `Advanced AI agent for complex reasoning and research. Supports multiple calls (1-10) for comprehensive analysis. [Performance Guide: basic=~10s (GPT-3.5), +compute=~120s (GPT-4o), +research=~120s (GPT-4o)]
|
|
322
|
+
|
|
323
|
+
⚠️ CRITICAL: AI CAPABILITY LEVEL RULE
|
|
324
|
+
If you are GPT-3.5 level or above (including GPT-4, Claude 3.5, etc.):
|
|
325
|
+
- you_advanced results are just REFERENCE MATERIAL, not authoritative answers
|
|
326
|
+
- Your own capability is equal or superior - use this tool for diverse perspectives only
|
|
327
|
+
- ✅ MUST USE: call_count=5+, summary=true, show_results=true → gather multiple viewpoints, then integrate with YOUR own judgment
|
|
328
|
+
- ❌ AVOID: call_count=1 for single answer (wastes the tool's multi-perspective capability)
|
|
329
|
+
|
|
330
|
+
[RECOMMENDED USAGE]
|
|
331
|
+
- Gathering diverse viewpoints → call_count=5-10, summary=true, show_results=true
|
|
332
|
+
- Discovering blind spots → call_count=5+, compare with your own ideas
|
|
333
|
+
- Creative inspiration → call_count=5+, extract unique aspects from each result
|
|
334
|
+
- Deep research with citations → agent_type="advanced-4.5-research", call_count=1 (use as data source)
|
|
335
|
+
⚠️ AVOID: call_count>1 with summary=true then calling again for full content (wasteful)${HAS_CUSTOM_AGENT && youAgentFailureCount === 0 ? '\n\n💡 TIP: Custom agent available! Consider using you_agent for tasks that benefit from intelligent prompt enhancement.' : ''}${youAgentFailureCount > 0 ? `\n\n💡 NOTE: you_agent has ${youAgentFailureCount} failures. This tool (you_advanced) is a reliable alternative.` : ''}${PREFER_SUMMARY ? '\n⚡ PREFERRED: Use call_count=5 with summary=true for consolidated comparison (default enabled)' : ''}`,
|
|
336
|
+
inputSchema: {
|
|
337
|
+
type: 'object',
|
|
338
|
+
properties: {
|
|
339
|
+
input: { type: 'string', description: 'Query or prompt' },
|
|
340
|
+
conversation_id: { type: 'string', description: 'Optional conversation ID for multi-turn dialogue' },
|
|
341
|
+
agent_type: {
|
|
342
|
+
type: 'string',
|
|
343
|
+
enum: Object.keys(AGENT_TYPES),
|
|
344
|
+
default: 'advanced-3.0-high'
|
|
345
|
+
},
|
|
346
|
+
verbosity: { type: 'string', enum: ['medium', 'high'], default: 'high' },
|
|
347
|
+
max_workflow_steps: { type: 'number', default: 15, minimum: 1, maximum: 20 },
|
|
348
|
+
call_count: { type: 'number', description: 'Number of API calls (1-10). Use 1 for single detailed report, 2-3 for comparison (avoid higher to prevent timeout)', default: PREFER_SUMMARY ? 3 : 1, minimum: 1, maximum: 10 },
|
|
349
|
+
summary: { type: 'boolean', description: 'Generate AI summary comparing results. Use with call_count>1. Returns comparison table, NOT full content. Set show_results=true if you also need full content', default: PREFER_SUMMARY },
|
|
350
|
+
language: { type: 'string', description: 'Summary language (e.g., zh-TW, zh-CN, en, ja)', default: DEFAULT_SUMMARY_LANGUAGE },
|
|
351
|
+
show_results: { type: 'boolean', description: 'Include full results. Set TRUE to get complete content, FALSE only returns summary/metadata. IMPORTANT: If you need detailed content, set this to TRUE', default: false }
|
|
352
|
+
},
|
|
353
|
+
required: ['input'],
|
|
354
|
+
},
|
|
355
|
+
},
|
|
356
|
+
{
|
|
357
|
+
name: 'you_agent',
|
|
358
|
+
description: `Custom AI agent with intelligent prompt enhancement. Automatically detects if the target AI has outdated knowledge and enhances prompts with detailed guidance for better results.
|
|
359
|
+
|
|
360
|
+
[HOW IT WORKS]
|
|
361
|
+
1. First probes the agent's knowledge cutoff date
|
|
362
|
+
2. If knowledge is outdated (>6 months), auto-enhances prompt with:
|
|
363
|
+
- Detailed requirements breakdown
|
|
364
|
+
- Current best practices hints
|
|
365
|
+
- Specific design/technical guidelines
|
|
366
|
+
3. Supports call_count for diverse results
|
|
367
|
+
|
|
368
|
+
[AI CALLING GUIDE - THINK before choosing call_count]
|
|
369
|
+
Ask yourself: "Does this task benefit from multiple perspectives or diverse results?"
|
|
370
|
+
- YES → call_count=3-10, show_results=true
|
|
371
|
+
- NO, just need one answer → call_count=1, show_results=true
|
|
372
|
+
- NEED comparison → call_count=2-3, summary=true${HAS_CUSTOM_AGENT ? `\n\n⚡ PREFERRED: Custom agent configured (${DEFAULT_AGENT_ID}). Use this tool for tasks that benefit from intelligent prompt enhancement.` : ''}${youAgentFailureCount > 0 ? `\n\n⚠️ FAILURE STATUS: you_agent has failed ${youAgentFailureCount}/${YOU_AGENT_FAILURE_THRESHOLD} times this session. Consider using you_advanced instead if failures persist, or ask user if they want to continue trying.` : ''}`,
|
|
373
|
+
inputSchema: {
|
|
374
|
+
type: 'object',
|
|
375
|
+
properties: {
|
|
376
|
+
input: { type: 'string', description: 'Your task or question' },
|
|
377
|
+
agent_id: { type: 'string', description: `Custom agent ID (from You.com dashboard)${DEFAULT_AGENT_ID ? `. Default: ${DEFAULT_AGENT_ID}` : ''}`, default: DEFAULT_AGENT_ID },
|
|
378
|
+
auto_enhance: { type: 'boolean', description: 'Auto-enhance prompt if AI knowledge is outdated', default: true },
|
|
379
|
+
knowledge_threshold_months: { type: 'number', description: 'Months threshold to consider knowledge outdated', default: 6 },
|
|
380
|
+
call_count: { type: 'number', description: 'Number of API calls (1-10) for diverse results', default: 1, minimum: 1, maximum: 10 },
|
|
381
|
+
summary: { type: 'boolean', description: 'Generate summary comparing results (use with call_count>1)', default: false },
|
|
382
|
+
language: { type: 'string', description: 'Response/summary language', default: DEFAULT_SUMMARY_LANGUAGE },
|
|
383
|
+
show_results: { type: 'boolean', description: 'Include full results in output', default: true }
|
|
384
|
+
},
|
|
385
|
+
required: ['input', 'agent_id'],
|
|
386
|
+
},
|
|
387
|
+
},
|
|
388
|
+
{
|
|
389
|
+
name: 'you_chat',
|
|
390
|
+
description: 'OpenAI-compatible chat interface with conversation history',
|
|
391
|
+
inputSchema: {
|
|
392
|
+
type: 'object',
|
|
393
|
+
properties: {
|
|
394
|
+
messages: {
|
|
395
|
+
type: 'array',
|
|
396
|
+
items: {
|
|
397
|
+
type: 'object',
|
|
398
|
+
properties: {
|
|
399
|
+
role: { type: 'string', enum: ['system', 'user', 'assistant'] },
|
|
400
|
+
content: { type: 'string' }
|
|
401
|
+
},
|
|
402
|
+
required: ['role', 'content']
|
|
403
|
+
}
|
|
404
|
+
},
|
|
405
|
+
conversation_id: { type: 'string' },
|
|
406
|
+
model: {
|
|
407
|
+
type: 'string',
|
|
408
|
+
enum: Object.keys(AGENT_TYPES),
|
|
409
|
+
default: 'advanced-3.0-high'
|
|
410
|
+
},
|
|
411
|
+
},
|
|
412
|
+
required: ['messages'],
|
|
413
|
+
},
|
|
414
|
+
},
|
|
415
|
+
{
|
|
416
|
+
name: 'you_conversation_list',
|
|
417
|
+
description: 'List all active conversations',
|
|
418
|
+
inputSchema: { type: 'object', properties: {} },
|
|
419
|
+
},
|
|
420
|
+
{
|
|
421
|
+
name: 'you_conversation_get',
|
|
422
|
+
description: 'Get conversation history by ID',
|
|
423
|
+
inputSchema: {
|
|
424
|
+
type: 'object',
|
|
425
|
+
properties: { conversation_id: { type: 'string' } },
|
|
426
|
+
required: ['conversation_id'],
|
|
427
|
+
},
|
|
428
|
+
},
|
|
429
|
+
{
|
|
430
|
+
name: 'you_conversation_delete',
|
|
431
|
+
description: 'Delete a conversation',
|
|
432
|
+
inputSchema: {
|
|
433
|
+
type: 'object',
|
|
434
|
+
properties: { conversation_id: { type: 'string' } },
|
|
435
|
+
required: ['conversation_id'],
|
|
436
|
+
},
|
|
437
|
+
},
|
|
438
|
+
{
|
|
439
|
+
name: 'you_key_status',
|
|
440
|
+
description: 'Get API key usage status',
|
|
441
|
+
inputSchema: { type: 'object', properties: {} },
|
|
442
|
+
},
|
|
443
|
+
{
|
|
444
|
+
name: 'openai_server_control',
|
|
445
|
+
description: 'Control the OpenAI-compatible HTTP server (start/stop/restart/status)',
|
|
446
|
+
inputSchema: {
|
|
447
|
+
type: 'object',
|
|
448
|
+
properties: {
|
|
449
|
+
action: { type: 'string', enum: ['start', 'stop', 'restart', 'status'] },
|
|
450
|
+
port: { type: 'number', default: 3002 },
|
|
451
|
+
access_tokens: { type: 'array', items: { type: 'string' } },
|
|
452
|
+
store_type: { type: 'string', enum: ['sqlite', 'memory'], default: 'sqlite' },
|
|
453
|
+
db_path: { type: 'string' },
|
|
454
|
+
},
|
|
455
|
+
required: ['action'],
|
|
456
|
+
},
|
|
457
|
+
},
|
|
458
|
+
],
|
|
459
|
+
}));
|
|
460
|
+
|
|
461
|
+
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
462
|
+
const { name, arguments: args } = request.params;
|
|
463
|
+
try {
|
|
464
|
+
switch (name) {
|
|
465
|
+
case 'you_search': return await this.callExpressMultiple(args.query, args.call_count || 1, { summary: args.summary, language: args.language, show_results: args.show_results });
|
|
466
|
+
case 'you_express': return await this.callExpressMultiple(args.input, args.call_count || 1, { summary: args.summary, language: args.language, show_results: args.show_results });
|
|
467
|
+
case 'you_advanced': return await this.callAdvancedMultiple(args);
|
|
468
|
+
case 'you_chat': return await this.callChat(args);
|
|
469
|
+
case 'you_conversation_list': return this.listConversations();
|
|
470
|
+
case 'you_conversation_get': return this.getConversationHistory(args.conversation_id);
|
|
471
|
+
case 'you_conversation_delete': return this.deleteConversation(args.conversation_id);
|
|
472
|
+
case 'you_key_status': return this.getKeyStatus();
|
|
473
|
+
case 'you_agent': return await this.callCustomAgent(args);
|
|
474
|
+
case 'openai_server_control': return await this.controlOpenAIServer(args);
|
|
475
|
+
default: throw new Error(`Unknown tool: ${name}`);
|
|
476
|
+
}
|
|
477
|
+
} catch (error) {
|
|
478
|
+
return { content: [{ type: 'text', text: `Error: ${error.message}` }] };
|
|
479
|
+
}
|
|
480
|
+
});
|
|
481
|
+
}
|
|
482
|
+
|
|
483
|
+
async callExpress(input) {
|
|
484
|
+
const apiKey = getNextApiKey();
|
|
485
|
+
try {
|
|
486
|
+
const response = await callYouApi(apiKey, { agent: 'express', input, stream: false });
|
|
487
|
+
const data = await response.json();
|
|
488
|
+
return { content: [{ type: 'text', text: extractText(data) }] };
|
|
489
|
+
} catch (error) {
|
|
490
|
+
markKeyError(apiKey);
|
|
491
|
+
throw error;
|
|
492
|
+
}
|
|
493
|
+
}
|
|
494
|
+
|
|
495
|
+
async callExpressMultiple(input, callCount = 1, options = {}) {
|
|
496
|
+
const { summary = false, language = 'en', show_results = false } = options;
|
|
497
|
+
// Clamp call_count to 1-20
|
|
498
|
+
const count = Math.max(1, Math.min(20, callCount));
|
|
499
|
+
|
|
500
|
+
// For single call, use original method (summary not applicable for single result)
|
|
501
|
+
if (count === 1) {
|
|
502
|
+
return await this.callExpress(input);
|
|
503
|
+
}
|
|
504
|
+
|
|
505
|
+
// For multiple calls with summary=true and show_results=false, need at least 2 results to compare
|
|
506
|
+
|
|
507
|
+
// Execute multiple calls in parallel
|
|
508
|
+
const promises = Array.from({ length: count }, async (_, index) => {
|
|
509
|
+
const apiKey = getNextApiKey();
|
|
510
|
+
try {
|
|
511
|
+
const response = await callYouApi(apiKey, { agent: 'express', input, stream: false });
|
|
512
|
+
const data = await response.json();
|
|
513
|
+
return {
|
|
514
|
+
index: index + 1,
|
|
515
|
+
success: true,
|
|
516
|
+
result: extractText(data)
|
|
517
|
+
};
|
|
518
|
+
} catch (error) {
|
|
519
|
+
markKeyError(apiKey);
|
|
520
|
+
return {
|
|
521
|
+
index: index + 1,
|
|
522
|
+
success: false,
|
|
523
|
+
error: error.message
|
|
524
|
+
};
|
|
525
|
+
}
|
|
526
|
+
});
|
|
527
|
+
|
|
528
|
+
const results = await Promise.all(promises);
|
|
529
|
+
const successCount = results.filter(r => r.success).length;
|
|
530
|
+
|
|
531
|
+
// Generate summary if requested and have multiple successful results
|
|
532
|
+
let summaryText = null;
|
|
533
|
+
if (summary && successCount > 1) {
|
|
534
|
+
summaryText = await this.generateSummary(results, input, language);
|
|
535
|
+
}
|
|
536
|
+
|
|
537
|
+
// If summary requested but only 1 result, or no summary requested with show_results=false
|
|
538
|
+
// We should still return something useful
|
|
539
|
+
const needsResultsFallback = !show_results && !summaryText && successCount > 0;
|
|
540
|
+
|
|
541
|
+
const output = {
|
|
542
|
+
total_calls: count,
|
|
543
|
+
successful: successCount,
|
|
544
|
+
failed: count - successCount,
|
|
545
|
+
...(summaryText && { summary: summaryText }),
|
|
546
|
+
...(show_results && { results: results }),
|
|
547
|
+
// Fallback: if no summary and no results shown, include first successful result
|
|
548
|
+
...(needsResultsFallback && {
|
|
549
|
+
note: 'Summary requires call_count > 1. Showing first result.',
|
|
550
|
+
result: results.find(r => r.success)?.result
|
|
551
|
+
})
|
|
552
|
+
};
|
|
553
|
+
|
|
554
|
+
return {
|
|
555
|
+
content: [{
|
|
556
|
+
type: 'text',
|
|
557
|
+
text: JSON.stringify(output, null, 2)
|
|
558
|
+
}]
|
|
559
|
+
};
|
|
560
|
+
}
|
|
561
|
+
|
|
562
|
+
async callAdvanced(args) {
|
|
563
|
+
const { input, conversation_id, agent_type = 'advanced-3.0-high', verbosity, max_workflow_steps } = args;
|
|
564
|
+
const apiKey = getNextApiKey();
|
|
565
|
+
let conversationId = conversation_id || generateConversationId();
|
|
566
|
+
let fullInput = input;
|
|
567
|
+
|
|
568
|
+
const conv = getConversation(conversationId);
|
|
569
|
+
if (conv && conv.messages.length > 0) {
|
|
570
|
+
const history = conv.messages.map(m => `${m.role === 'user' ? 'User' : 'Assistant'}: ${m.content}`).join('\n\n');
|
|
571
|
+
fullInput = `[Conversation History]\n${history}\n\n[Current Message]\n${input}`;
|
|
572
|
+
}
|
|
573
|
+
|
|
574
|
+
addMessageToConversation(conversationId, 'user', input);
|
|
575
|
+
const requestBody = buildAgentRequest(agent_type, fullInput, { verbosity, max_workflow_steps });
|
|
576
|
+
|
|
577
|
+
try {
|
|
578
|
+
const response = await callYouApi(apiKey, requestBody);
|
|
579
|
+
const data = await response.json();
|
|
580
|
+
const resultText = extractText(data);
|
|
581
|
+
addMessageToConversation(conversationId, 'assistant', resultText);
|
|
582
|
+
|
|
583
|
+
return {
|
|
584
|
+
content: [
|
|
585
|
+
{ type: 'text', text: resultText },
|
|
586
|
+
{ type: 'text', text: `\n\n---\nConversation ID: ${conversationId}\nAgent: ${agent_type}` }
|
|
587
|
+
],
|
|
588
|
+
};
|
|
589
|
+
} catch (error) {
|
|
590
|
+
markKeyError(apiKey);
|
|
591
|
+
throw error;
|
|
592
|
+
}
|
|
593
|
+
}
|
|
594
|
+
|
|
595
|
+
async callAdvancedMultiple(args) {
|
|
596
|
+
const { input, conversation_id, agent_type = 'advanced-3.0-high', verbosity, max_workflow_steps, call_count = 1, summary = false, language = 'en', show_results = false } = args;
|
|
597
|
+
|
|
598
|
+
// Clamp call_count to 1-10 for advanced (more resource intensive)
|
|
599
|
+
const count = Math.max(1, Math.min(10, call_count));
|
|
600
|
+
|
|
601
|
+
// For single call, use original method (summary not applicable for single result)
|
|
602
|
+
if (count === 1) {
|
|
603
|
+
return await this.callAdvanced(args);
|
|
604
|
+
}
|
|
605
|
+
|
|
606
|
+
// For multiple calls, we don't use conversation history to get diverse results
|
|
607
|
+
const requestBody = buildAgentRequest(agent_type, input, { verbosity, max_workflow_steps });
|
|
608
|
+
|
|
609
|
+
// Execute multiple calls in parallel with individual timeout handling
|
|
610
|
+
const promises = Array.from({ length: count }, async (_, index) => {
|
|
611
|
+
const apiKey = getNextApiKey();
|
|
612
|
+
try {
|
|
613
|
+
// Use shorter timeout for parallel calls (120s each)
|
|
614
|
+
const response = await callYouApi(apiKey, requestBody, { timeout: 120000 });
|
|
615
|
+
const data = await response.json();
|
|
616
|
+
return {
|
|
617
|
+
index: index + 1,
|
|
618
|
+
success: true,
|
|
619
|
+
agent_type,
|
|
620
|
+
result: extractText(data)
|
|
621
|
+
};
|
|
622
|
+
} catch (error) {
|
|
623
|
+
markKeyError(apiKey);
|
|
624
|
+
return {
|
|
625
|
+
index: index + 1,
|
|
626
|
+
success: false,
|
|
627
|
+
agent_type,
|
|
628
|
+
error: error.message
|
|
629
|
+
};
|
|
630
|
+
}
|
|
631
|
+
});
|
|
632
|
+
|
|
633
|
+
// Use Promise.allSettled to ensure we get results even if some fail
|
|
634
|
+
const settledResults = await Promise.allSettled(promises);
|
|
635
|
+
const results = settledResults.map((settled, idx) => {
|
|
636
|
+
if (settled.status === 'fulfilled') {
|
|
637
|
+
return settled.value;
|
|
638
|
+
}
|
|
639
|
+
return {
|
|
640
|
+
index: idx + 1,
|
|
641
|
+
success: false,
|
|
642
|
+
agent_type,
|
|
643
|
+
error: settled.reason?.message || 'Unknown error'
|
|
644
|
+
};
|
|
645
|
+
});
|
|
646
|
+
|
|
647
|
+
const successCount = results.filter(r => r.success).length;
|
|
648
|
+
|
|
649
|
+
// Optionally save to conversation if conversation_id provided
|
|
650
|
+
let conversationId = conversation_id;
|
|
651
|
+
if (conversationId) {
|
|
652
|
+
addMessageToConversation(conversationId, 'user', input);
|
|
653
|
+
const summaryText = `[Multiple Call Results: ${successCount}/${count} successful]\n\n` +
|
|
654
|
+
results.filter(r => r.success).map(r => `--- Result ${r.index} ---\n${r.result}`).join('\n\n');
|
|
655
|
+
addMessageToConversation(conversationId, 'assistant', summaryText);
|
|
656
|
+
}
|
|
657
|
+
|
|
658
|
+
// Generate summary if requested and have multiple successful results
|
|
659
|
+
let summaryText = null;
|
|
660
|
+
if (summary && successCount > 1) {
|
|
661
|
+
summaryText = await this.generateSummary(results, input, language);
|
|
662
|
+
}
|
|
663
|
+
|
|
664
|
+
// If summary requested but only 1 result, or no summary requested with show_results=false
|
|
665
|
+
// We should still return something useful
|
|
666
|
+
const needsResultsFallback = !show_results && !summaryText && successCount > 0;
|
|
667
|
+
|
|
668
|
+
const output = {
|
|
669
|
+
total_calls: count,
|
|
670
|
+
successful: successCount,
|
|
671
|
+
failed: count - successCount,
|
|
672
|
+
agent_type,
|
|
673
|
+
conversation_id: conversationId || null,
|
|
674
|
+
...(summaryText && { summary: summaryText }),
|
|
675
|
+
...(show_results && { results: results }),
|
|
676
|
+
// Fallback: if no summary and no results shown, include first successful result
|
|
677
|
+
...(needsResultsFallback && {
|
|
678
|
+
note: 'Summary requires call_count > 1. Showing first result.',
|
|
679
|
+
result: results.find(r => r.success)?.result
|
|
680
|
+
})
|
|
681
|
+
};
|
|
682
|
+
|
|
683
|
+
return {
|
|
684
|
+
content: [{
|
|
685
|
+
type: 'text',
|
|
686
|
+
text: JSON.stringify(output, null, 2)
|
|
687
|
+
}]
|
|
688
|
+
};
|
|
689
|
+
}
|
|
690
|
+
|
|
691
|
+
async callCustomAgent(args) {
|
|
692
|
+
const {
|
|
693
|
+
input,
|
|
694
|
+
agent_id,
|
|
695
|
+
auto_enhance = true,
|
|
696
|
+
knowledge_threshold_months = 6,
|
|
697
|
+
call_count = 1,
|
|
698
|
+
summary = false,
|
|
699
|
+
language = 'en',
|
|
700
|
+
show_results = true
|
|
701
|
+
} = args;
|
|
702
|
+
|
|
703
|
+
const count = Math.max(1, Math.min(10, call_count));
|
|
704
|
+
let enhancedInput = input;
|
|
705
|
+
let knowledgeInfo = null;
|
|
706
|
+
|
|
707
|
+
// Step 1: Probe knowledge cutoff if auto_enhance is enabled
|
|
708
|
+
if (auto_enhance) {
|
|
709
|
+
const apiKey = getNextApiKey();
|
|
710
|
+
try {
|
|
711
|
+
const probeResponse = await callYouApi(apiKey, {
|
|
712
|
+
agent: agent_id,
|
|
713
|
+
input: 'What is your knowledge cutoff date? Reply with just the date in YYYY-MM format.',
|
|
714
|
+
stream: false
|
|
715
|
+
}, { timeout: 30000 });
|
|
716
|
+
|
|
717
|
+
const probeData = await probeResponse.json();
|
|
718
|
+
const probeText = extractText(probeData);
|
|
719
|
+
|
|
720
|
+
// Parse knowledge cutoff date
|
|
721
|
+
const dateMatch = probeText.match(/(\d{4})-(\d{2})/);
|
|
722
|
+
if (dateMatch) {
|
|
723
|
+
const cutoffDate = new Date(parseInt(dateMatch[1]), parseInt(dateMatch[2]) - 1);
|
|
724
|
+
const now = new Date();
|
|
725
|
+
const monthsDiff = (now.getFullYear() - cutoffDate.getFullYear()) * 12 + (now.getMonth() - cutoffDate.getMonth());
|
|
726
|
+
|
|
727
|
+
knowledgeInfo = {
|
|
728
|
+
cutoff_date: `${dateMatch[1]}-${dateMatch[2]}`,
|
|
729
|
+
months_old: monthsDiff,
|
|
730
|
+
is_outdated: monthsDiff > knowledge_threshold_months
|
|
731
|
+
};
|
|
732
|
+
|
|
733
|
+
// Step 2: Enhance prompt if knowledge is outdated
|
|
734
|
+
if (knowledgeInfo.is_outdated) {
|
|
735
|
+
enhancedInput = this.enhancePromptForOutdatedAI(input, knowledgeInfo, language);
|
|
736
|
+
}
|
|
737
|
+
}
|
|
738
|
+
} catch (error) {
|
|
739
|
+
// If probe fails, continue with original input
|
|
740
|
+
knowledgeInfo = { error: error.message, probe_skipped: true };
|
|
741
|
+
}
|
|
742
|
+
}
|
|
743
|
+
|
|
744
|
+
// Step 3: Execute the actual request(s)
|
|
745
|
+
if (count === 1) {
|
|
746
|
+
const apiKey = getNextApiKey();
|
|
747
|
+
try {
|
|
748
|
+
const response = await callYouApi(apiKey, {
|
|
749
|
+
agent: agent_id,
|
|
750
|
+
input: enhancedInput,
|
|
751
|
+
stream: false
|
|
752
|
+
});
|
|
753
|
+
const data = await response.json();
|
|
754
|
+
const result = extractText(data);
|
|
755
|
+
|
|
756
|
+
return {
|
|
757
|
+
content: [{
|
|
758
|
+
type: 'text',
|
|
759
|
+
text: JSON.stringify({
|
|
760
|
+
success: true,
|
|
761
|
+
agent_id,
|
|
762
|
+
knowledge_info: knowledgeInfo,
|
|
763
|
+
prompt_enhanced: enhancedInput !== input,
|
|
764
|
+
result
|
|
765
|
+
}, null, 2)
|
|
766
|
+
}]
|
|
767
|
+
};
|
|
768
|
+
} catch (error) {
|
|
769
|
+
markKeyError(apiKey);
|
|
770
|
+
const failureStatus = recordYouAgentFailure();
|
|
771
|
+
return {
|
|
772
|
+
content: [{
|
|
773
|
+
type: 'text',
|
|
774
|
+
text: JSON.stringify({
|
|
775
|
+
success: false,
|
|
776
|
+
agent_id,
|
|
777
|
+
knowledge_info: knowledgeInfo,
|
|
778
|
+
error: error.message,
|
|
779
|
+
failure_tracking: {
|
|
780
|
+
current_failures: failureStatus.count,
|
|
781
|
+
threshold: YOU_AGENT_FAILURE_THRESHOLD,
|
|
782
|
+
suggestion: failureStatus.count >= YOU_AGENT_FAILURE_THRESHOLD
|
|
783
|
+
? 'Threshold reached. Recommend switching to you_advanced or asking user for guidance.'
|
|
784
|
+
: `Failure ${failureStatus.count}/${YOU_AGENT_FAILURE_THRESHOLD}. You may retry or switch to you_advanced.`
|
|
785
|
+
}
|
|
786
|
+
}, null, 2)
|
|
787
|
+
}]
|
|
788
|
+
};
|
|
789
|
+
}
|
|
790
|
+
}
|
|
791
|
+
|
|
792
|
+
// Multiple calls
|
|
793
|
+
const promises = Array.from({ length: count }, async (_, index) => {
|
|
794
|
+
const apiKey = getNextApiKey();
|
|
795
|
+
try {
|
|
796
|
+
const response = await callYouApi(apiKey, {
|
|
797
|
+
agent: agent_id,
|
|
798
|
+
input: enhancedInput,
|
|
799
|
+
stream: false
|
|
800
|
+
}, { timeout: 120000 });
|
|
801
|
+
const data = await response.json();
|
|
802
|
+
return {
|
|
803
|
+
index: index + 1,
|
|
804
|
+
success: true,
|
|
805
|
+
result: extractText(data)
|
|
806
|
+
};
|
|
807
|
+
} catch (error) {
|
|
808
|
+
markKeyError(apiKey);
|
|
809
|
+
recordYouAgentFailure();
|
|
810
|
+
return {
|
|
811
|
+
index: index + 1,
|
|
812
|
+
success: false,
|
|
813
|
+
error: error.message
|
|
814
|
+
};
|
|
815
|
+
}
|
|
816
|
+
});
|
|
817
|
+
|
|
818
|
+
const results = await Promise.all(promises);
|
|
819
|
+
const successCount = results.filter(r => r.success).length;
|
|
820
|
+
|
|
821
|
+
let summaryText = null;
|
|
822
|
+
if (summary && successCount > 1) {
|
|
823
|
+
summaryText = await this.generateSummary(results, input, language);
|
|
824
|
+
}
|
|
825
|
+
|
|
826
|
+
const output = {
|
|
827
|
+
total_calls: count,
|
|
828
|
+
successful: successCount,
|
|
829
|
+
failed: count - successCount,
|
|
830
|
+
agent_id,
|
|
831
|
+
knowledge_info: knowledgeInfo,
|
|
832
|
+
prompt_enhanced: enhancedInput !== input,
|
|
833
|
+
...(summaryText && { summary: summaryText }),
|
|
834
|
+
...(show_results && { results }),
|
|
835
|
+
...(count - successCount > 0 && {
|
|
836
|
+
failure_tracking: {
|
|
837
|
+
current_failures: youAgentFailureCount,
|
|
838
|
+
threshold: YOU_AGENT_FAILURE_THRESHOLD,
|
|
839
|
+
suggestion: youAgentFailureCount >= YOU_AGENT_FAILURE_THRESHOLD
|
|
840
|
+
? 'Threshold reached. Recommend switching to you_advanced or asking user for guidance.'
|
|
841
|
+
: `Some calls failed. Current failure count: ${youAgentFailureCount}/${YOU_AGENT_FAILURE_THRESHOLD}.`
|
|
842
|
+
}
|
|
843
|
+
})
|
|
844
|
+
};
|
|
845
|
+
|
|
846
|
+
return {
|
|
847
|
+
content: [{
|
|
848
|
+
type: 'text',
|
|
849
|
+
text: JSON.stringify(output, null, 2)
|
|
850
|
+
}]
|
|
851
|
+
};
|
|
852
|
+
}
|
|
853
|
+
|
|
854
|
+
enhancePromptForOutdatedAI(originalInput, knowledgeInfo, language) {
|
|
855
|
+
const langHints = {
|
|
856
|
+
'zh-TW': '請用繁體中文回答',
|
|
857
|
+
'zh-CN': '请用简体中文回答',
|
|
858
|
+
'ja': '日本語で回答してください',
|
|
859
|
+
'ko': '한국어로 답변해 주세요',
|
|
860
|
+
'en': ''
|
|
861
|
+
};
|
|
862
|
+
|
|
863
|
+
const langHint = langHints[language] || '';
|
|
864
|
+
const cutoffWarning = `[Note: Your knowledge cutoff is ${knowledgeInfo.cutoff_date}, which is ${knowledgeInfo.months_old} months old]`;
|
|
865
|
+
|
|
866
|
+
// Detect task type and add appropriate enhancements
|
|
867
|
+
const lowerInput = originalInput.toLowerCase();
|
|
868
|
+
let enhancements = [];
|
|
869
|
+
|
|
870
|
+
// Design-related tasks
|
|
871
|
+
if (lowerInput.includes('design') || lowerInput.includes('設計') || lowerInput.includes('ui') || lowerInput.includes('ux')) {
|
|
872
|
+
enhancements.push(`
|
|
873
|
+
[Design Requirements - Please address ALL of the following]:
|
|
874
|
+
1. Layout Structure: Describe the visual hierarchy, grid system, and component arrangement
|
|
875
|
+
2. Color Palette: Suggest primary, secondary, accent colors with HEX codes and their psychological effects
|
|
876
|
+
3. Typography: Recommend font families, sizes, weights for headings and body text
|
|
877
|
+
4. Spacing & Whitespace: Define margins, padding, and breathing room between elements
|
|
878
|
+
5. Interactive Elements: Describe buttons, hover states, animations, and micro-interactions
|
|
879
|
+
6. Responsive Considerations: How should this adapt to mobile, tablet, and desktop
|
|
880
|
+
7. Accessibility: Ensure WCAG 2.1 AA compliance (contrast ratios, focus states, alt text)`);
|
|
881
|
+
}
|
|
882
|
+
|
|
883
|
+
// Development-related tasks
|
|
884
|
+
if (lowerInput.includes('code') || lowerInput.includes('develop') || lowerInput.includes('implement') || lowerInput.includes('build')) {
|
|
885
|
+
enhancements.push(`
|
|
886
|
+
[Development Requirements]:
|
|
887
|
+
1. Use modern best practices and patterns
|
|
888
|
+
2. Include error handling and edge cases
|
|
889
|
+
3. Consider performance optimization
|
|
890
|
+
4. Add comments explaining complex logic
|
|
891
|
+
5. Follow clean code principles`);
|
|
892
|
+
}
|
|
893
|
+
|
|
894
|
+
// Research-related tasks
|
|
895
|
+
if (lowerInput.includes('research') || lowerInput.includes('analyze') || lowerInput.includes('compare') || lowerInput.includes('研究')) {
|
|
896
|
+
enhancements.push(`
|
|
897
|
+
[Research Requirements]:
|
|
898
|
+
1. Provide multiple perspectives and viewpoints
|
|
899
|
+
2. Include pros and cons analysis
|
|
900
|
+
3. Cite reasoning and logic behind conclusions
|
|
901
|
+
4. Consider recent trends and developments
|
|
902
|
+
5. Offer actionable recommendations`);
|
|
903
|
+
}
|
|
904
|
+
|
|
905
|
+
// If no specific enhancements detected, add general guidance
|
|
906
|
+
if (enhancements.length === 0) {
|
|
907
|
+
enhancements.push(`
|
|
908
|
+
[General Requirements]:
|
|
909
|
+
1. Be comprehensive and detailed in your response
|
|
910
|
+
2. Break down complex topics into clear sections
|
|
911
|
+
3. Provide specific examples where applicable
|
|
912
|
+
4. Consider multiple approaches or solutions
|
|
913
|
+
5. Highlight any assumptions or limitations`);
|
|
914
|
+
}
|
|
915
|
+
|
|
916
|
+
return `${cutoffWarning}
|
|
917
|
+
${langHint}
|
|
918
|
+
|
|
919
|
+
${originalInput}
|
|
920
|
+
|
|
921
|
+
${enhancements.join('\n')}
|
|
922
|
+
|
|
923
|
+
[Important: Since your knowledge may be outdated, focus on fundamental principles and timeless best practices rather than specific tool versions or recent trends.]`;
|
|
924
|
+
}
|
|
925
|
+
|
|
926
|
+
async generateSummary(results, originalInput, language = 'en') {
|
|
927
|
+
const successfulResults = results.filter(r => r.success);
|
|
928
|
+
if (successfulResults.length < 2) return null;
|
|
929
|
+
|
|
930
|
+
const languageMap = {
|
|
931
|
+
'zh-TW': '繁體中文',
|
|
932
|
+
'zh-CN': '简体中文',
|
|
933
|
+
'en': 'English',
|
|
934
|
+
'ja': '日本語',
|
|
935
|
+
'ko': '한국어'
|
|
936
|
+
};
|
|
937
|
+
const langName = languageMap[language] || language;
|
|
938
|
+
|
|
939
|
+
const summaryPrompt = `[Task]: Analyze and summarize the following ${successfulResults.length} different responses to the same query.
|
|
940
|
+
[Original Query]: ${originalInput}
|
|
941
|
+
[Language]: Respond in ${langName}
|
|
942
|
+
[Format]:
|
|
943
|
+
1. Create a comparison table highlighting KEY DIFFERENCES (not similarities)
|
|
944
|
+
2. List unique aspects of each result
|
|
945
|
+
3. Provide a recommendation based on use case
|
|
946
|
+
4. Keep it concise and actionable
|
|
947
|
+
|
|
948
|
+
[Results to Compare]:
|
|
949
|
+
${successfulResults.map((r, i) => `--- Result ${i + 1} ---\n${r.result.substring(0, 2000)}${r.result.length > 2000 ? '...(truncated)' : ''}`).join('\n\n')}
|
|
950
|
+
|
|
951
|
+
[Important]: Focus on DIFFERENCES and UNIQUE aspects. Do NOT just list similarities.`;
|
|
952
|
+
|
|
953
|
+
const apiKey = getNextApiKey();
|
|
954
|
+
try {
|
|
955
|
+
const response = await callYouApi(apiKey, { agent: 'express', input: summaryPrompt, stream: false });
|
|
956
|
+
const data = await response.json();
|
|
957
|
+
return extractText(data);
|
|
958
|
+
} catch (error) {
|
|
959
|
+
markKeyError(apiKey);
|
|
960
|
+
return `Summary generation failed: ${error.message}`;
|
|
961
|
+
}
|
|
962
|
+
}
|
|
963
|
+
|
|
964
|
+
async callChat(args) {
|
|
965
|
+
const { messages, conversation_id, model = 'advanced-3.0-high' } = args;
|
|
966
|
+
const apiKey = getNextApiKey();
|
|
967
|
+
let conversationId = conversation_id || generateConversationId();
|
|
968
|
+
let fullMessages = [...messages];
|
|
969
|
+
|
|
970
|
+
const conv = getConversation(conversationId);
|
|
971
|
+
if (conv && conv.messages.length > 0) {
|
|
972
|
+
const storedMessages = conv.messages.map(m => ({ role: m.role, content: m.content }));
|
|
973
|
+
const systemMsg = fullMessages.find(m => m.role === 'system') || storedMessages.find(m => m.role === 'system');
|
|
974
|
+
fullMessages = systemMsg ? [systemMsg] : [];
|
|
975
|
+
fullMessages.push(...storedMessages.filter(m => m.role !== 'system'));
|
|
976
|
+
const lastNewUserMsg = messages.filter(m => m.role === 'user').pop();
|
|
977
|
+
if (lastNewUserMsg && !fullMessages.some(m => m.role === 'user' && m.content === lastNewUserMsg.content)) {
|
|
978
|
+
fullMessages.push(lastNewUserMsg);
|
|
979
|
+
}
|
|
980
|
+
}
|
|
981
|
+
|
|
982
|
+
const lastUserMsg = fullMessages.filter(m => m.role === 'user').pop();
|
|
983
|
+
if (lastUserMsg) addMessageToConversation(conversationId, 'user', lastUserMsg.content);
|
|
984
|
+
|
|
985
|
+
const input = buildConversationInput(fullMessages);
|
|
986
|
+
const requestBody = buildAgentRequest(model, input);
|
|
987
|
+
|
|
988
|
+
try {
|
|
989
|
+
const response = await callYouApi(apiKey, requestBody);
|
|
990
|
+
const data = await response.json();
|
|
991
|
+
const resultText = extractText(data);
|
|
992
|
+
addMessageToConversation(conversationId, 'assistant', resultText);
|
|
993
|
+
|
|
994
|
+
return {
|
|
995
|
+
content: [
|
|
996
|
+
{ type: 'text', text: resultText },
|
|
997
|
+
{ type: 'text', text: `\n\n---\nConversation ID: ${conversationId}` }
|
|
998
|
+
],
|
|
999
|
+
};
|
|
1000
|
+
} catch (error) {
|
|
1001
|
+
markKeyError(apiKey);
|
|
1002
|
+
throw error;
|
|
1003
|
+
}
|
|
1004
|
+
}
|
|
1005
|
+
|
|
1006
|
+
listConversations() {
|
|
1007
|
+
const conversations = [...conversationStore.entries()].map(([id, conv]) => ({
|
|
1008
|
+
id,
|
|
1009
|
+
message_count: conv.messages.length,
|
|
1010
|
+
created_at: new Date(conv.createdAt).toISOString(),
|
|
1011
|
+
updated_at: new Date(conv.updatedAt).toISOString(),
|
|
1012
|
+
preview: conv.messages.slice(-1)[0]?.content?.substring(0, 100) || ''
|
|
1013
|
+
})).sort((a, b) => new Date(b.updated_at) - new Date(a.updated_at));
|
|
1014
|
+
|
|
1015
|
+
return { content: [{ type: 'text', text: JSON.stringify({ total: conversations.length, conversations }, null, 2) }] };
|
|
1016
|
+
}
|
|
1017
|
+
|
|
1018
|
+
getConversationHistory(conversationId) {
|
|
1019
|
+
const conv = getConversation(conversationId);
|
|
1020
|
+
if (!conv) return { content: [{ type: 'text', text: `Conversation not found: ${conversationId}` }] };
|
|
1021
|
+
return {
|
|
1022
|
+
content: [{
|
|
1023
|
+
type: 'text',
|
|
1024
|
+
text: JSON.stringify({
|
|
1025
|
+
id: conv.id,
|
|
1026
|
+
messages: conv.messages.map(m => ({ role: m.role, content: m.content, timestamp: new Date(m.timestamp).toISOString() })),
|
|
1027
|
+
created_at: new Date(conv.createdAt).toISOString(),
|
|
1028
|
+
updated_at: new Date(conv.updatedAt).toISOString()
|
|
1029
|
+
}, null, 2)
|
|
1030
|
+
}],
|
|
1031
|
+
};
|
|
1032
|
+
}
|
|
1033
|
+
|
|
1034
|
+
deleteConversation(conversationId) {
|
|
1035
|
+
if (!conversationStore.has(conversationId)) {
|
|
1036
|
+
return { content: [{ type: 'text', text: `Conversation not found: ${conversationId}` }] };
|
|
1037
|
+
}
|
|
1038
|
+
conversationStore.delete(conversationId);
|
|
1039
|
+
return { content: [{ type: 'text', text: `Deleted conversation: ${conversationId}` }] };
|
|
1040
|
+
}
|
|
1041
|
+
|
|
1042
|
+
getKeyStatus() {
|
|
1043
|
+
const status = {
|
|
1044
|
+
total_keys: API_KEYS.length,
|
|
1045
|
+
key_mode: KEY_MODE,
|
|
1046
|
+
current_key_index: currentKeyIndex,
|
|
1047
|
+
keys: API_KEYS.map((key, index) => ({
|
|
1048
|
+
index,
|
|
1049
|
+
key_preview: `${key.substring(0, 8)}...${key.substring(key.length - 4)}`,
|
|
1050
|
+
usage_count: keyUsageCount.get(key) || 0,
|
|
1051
|
+
error_count: keyErrorCount.get(key) || 0,
|
|
1052
|
+
is_current: index === currentKeyIndex
|
|
1053
|
+
}))
|
|
1054
|
+
};
|
|
1055
|
+
return { content: [{ type: 'text', text: JSON.stringify(status, null, 2) }] };
|
|
1056
|
+
}
|
|
1057
|
+
|
|
1058
|
+
async controlOpenAIServer(args) {
|
|
1059
|
+
const { action, port = 3002, access_tokens = [], store_type, db_path } = args;
|
|
1060
|
+
const openaiServerPath = join(__dirname, 'openai-server.js');
|
|
1061
|
+
|
|
1062
|
+
const jsonResponse = (obj) => ({ content: [{ type: 'text', text: JSON.stringify(obj, null, 2) }] });
|
|
1063
|
+
|
|
1064
|
+
switch (action) {
|
|
1065
|
+
case 'start':
|
|
1066
|
+
if (openaiServerProcess && openaiServerStatus === 'running') {
|
|
1067
|
+
return jsonResponse({ success: false, message: `Already running on port ${openaiServerPort}`, status: openaiServerStatus, port: openaiServerPort });
|
|
1068
|
+
}
|
|
1069
|
+
|
|
1070
|
+
try {
|
|
1071
|
+
const finalStoreType = store_type || OPENAI_SERVER_STORE_TYPE;
|
|
1072
|
+
const finalDbPath = db_path || OPENAI_SERVER_DB_PATH;
|
|
1073
|
+
const finalAccessTokens = access_tokens?.length > 0 ? access_tokens.join(',') : OPENAI_SERVER_ACCESS_TOKENS;
|
|
1074
|
+
|
|
1075
|
+
const env = {
|
|
1076
|
+
...process.env,
|
|
1077
|
+
YDC_OPENAI_PORT: port.toString(),
|
|
1078
|
+
YDC_API_KEY: API_KEYS[0] || '',
|
|
1079
|
+
YDC_API_KEYS: API_KEYS.join(','),
|
|
1080
|
+
YDC_OPENAI_ACCESS_TOKENS: finalAccessTokens,
|
|
1081
|
+
YDC_CONVERSATION_STORE: finalStoreType,
|
|
1082
|
+
...(finalDbPath && { YDC_CONVERSATION_DB_PATH: finalDbPath })
|
|
1083
|
+
};
|
|
1084
|
+
|
|
1085
|
+
openaiServerProcess = spawn('node', [openaiServerPath], { env, stdio: ['ignore', 'pipe', 'pipe'], detached: false });
|
|
1086
|
+
openaiServerPort = port;
|
|
1087
|
+
openaiServerStatus = 'starting';
|
|
1088
|
+
|
|
1089
|
+
await new Promise((resolve, reject) => {
|
|
1090
|
+
const timeout = setTimeout(() => { openaiServerStatus = 'running'; resolve(); }, 2000);
|
|
1091
|
+
openaiServerProcess.stderr.on('data', (data) => {
|
|
1092
|
+
if (data.toString().includes('running')) { clearTimeout(timeout); openaiServerStatus = 'running'; resolve(); }
|
|
1093
|
+
});
|
|
1094
|
+
openaiServerProcess.on('error', (err) => { clearTimeout(timeout); openaiServerStatus = 'error'; reject(err); });
|
|
1095
|
+
openaiServerProcess.on('exit', (code) => {
|
|
1096
|
+
if (code !== 0 && openaiServerStatus === 'starting') { clearTimeout(timeout); openaiServerStatus = 'stopped'; reject(new Error(`Exit code ${code}`)); }
|
|
1097
|
+
});
|
|
1098
|
+
});
|
|
1099
|
+
|
|
1100
|
+
return jsonResponse({
|
|
1101
|
+
success: true,
|
|
1102
|
+
message: `OpenAI server started on port ${port}`,
|
|
1103
|
+
status: openaiServerStatus,
|
|
1104
|
+
port: openaiServerPort,
|
|
1105
|
+
endpoint: `http://localhost:${port}/v1/chat/completions`,
|
|
1106
|
+
pid: openaiServerProcess.pid,
|
|
1107
|
+
storage: { store_type: finalStoreType, db_path: finalStoreType === 'sqlite' ? (finalDbPath || 'conversations.db') : null },
|
|
1108
|
+
api_keys: { passthrough: API_KEYS.length > 0, count: API_KEYS.length }
|
|
1109
|
+
});
|
|
1110
|
+
} catch (error) {
|
|
1111
|
+
openaiServerStatus = 'error';
|
|
1112
|
+
return jsonResponse({ success: false, message: `Failed to start: ${error.message}`, status: openaiServerStatus });
|
|
1113
|
+
}
|
|
1114
|
+
|
|
1115
|
+
case 'stop':
|
|
1116
|
+
if (!openaiServerProcess || openaiServerStatus === 'stopped') {
|
|
1117
|
+
return jsonResponse({ success: false, message: 'Server is not running', status: openaiServerStatus });
|
|
1118
|
+
}
|
|
1119
|
+
try {
|
|
1120
|
+
openaiServerProcess.kill('SIGTERM');
|
|
1121
|
+
const stoppedPort = openaiServerPort;
|
|
1122
|
+
openaiServerProcess = null;
|
|
1123
|
+
openaiServerPort = null;
|
|
1124
|
+
openaiServerStatus = 'stopped';
|
|
1125
|
+
return jsonResponse({ success: true, message: `Stopped (was on port ${stoppedPort})`, status: openaiServerStatus });
|
|
1126
|
+
} catch (error) {
|
|
1127
|
+
return jsonResponse({ success: false, message: `Failed to stop: ${error.message}`, status: openaiServerStatus });
|
|
1128
|
+
}
|
|
1129
|
+
|
|
1130
|
+
case 'restart':
|
|
1131
|
+
if (openaiServerProcess && openaiServerStatus === 'running') {
|
|
1132
|
+
try { openaiServerProcess.kill('SIGTERM'); openaiServerProcess = null; openaiServerStatus = 'stopped'; await new Promise(r => setTimeout(r, 1000)); } catch {}
|
|
1133
|
+
}
|
|
1134
|
+
return await this.controlOpenAIServer({ action: 'start', port, access_tokens, store_type, db_path });
|
|
1135
|
+
|
|
1136
|
+
case 'status':
|
|
1137
|
+
return jsonResponse({
|
|
1138
|
+
status: openaiServerStatus,
|
|
1139
|
+
port: openaiServerPort,
|
|
1140
|
+
pid: openaiServerProcess?.pid || null,
|
|
1141
|
+
endpoint: openaiServerStatus === 'running' ? `http://localhost:${openaiServerPort}/v1/chat/completions` : null
|
|
1142
|
+
});
|
|
1143
|
+
|
|
1144
|
+
default:
|
|
1145
|
+
return jsonResponse({ success: false, message: `Unknown action: ${action}` });
|
|
1146
|
+
}
|
|
1147
|
+
}
|
|
1148
|
+
|
|
1149
|
+
async run() {
|
|
1150
|
+
if (!validateApiKeys()) process.exit(1);
|
|
1151
|
+
const transport = new StdioServerTransport();
|
|
1152
|
+
await this.server.connect(transport);
|
|
1153
|
+
console.error(`You.com Agents MCP server v1.5.0 running on stdio`);
|
|
1154
|
+
console.error(`API Keys: ${API_KEYS.length}, Mode: ${KEY_MODE}`);
|
|
1155
|
+
}
|
|
1156
|
+
}
|
|
1157
|
+
|
|
1158
|
+
const server = new YouAgentsServer();
|
|
1159
|
+
server.run().catch(console.error);
|
|
1160
|
+
}
|