ydc-agent 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +217 -0
- package/README_JA.md +189 -0
- package/README_ZH_CN.md +189 -0
- package/README_ZH_TW.md +189 -0
- package/index.js +1160 -0
- package/lib/advanced-versions.js +113 -0
- package/lib/anthropic-mapper.js +255 -0
- package/lib/api-client.js +140 -0
- package/lib/auth-middleware.js +44 -0
- package/lib/conversation-store.js +358 -0
- package/lib/openai-mapper.js +215 -0
- package/lib/request-logger.js +132 -0
- package/lib/routes/anthropic-messages.js +269 -0
- package/lib/routes/chat.js +249 -0
- package/lib/routes/conversations.js +94 -0
- package/lib/routes/health.js +31 -0
- package/lib/routes/models.js +111 -0
- package/openai-server.js +99 -0
- package/package.json +70 -0
|
@@ -0,0 +1,269 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Anthropic Messages Route
|
|
3
|
+
* Handles /v1/messages endpoint (Anthropic/Claude API compatible)
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import { Router } from 'express';
|
|
7
|
+
import { callYouApi } from '../api-client.js';
|
|
8
|
+
import {
|
|
9
|
+
mapAnthropicToYouParams,
|
|
10
|
+
convertToAnthropicResponse,
|
|
11
|
+
createMessageStartEvent,
|
|
12
|
+
createContentBlockStartEvent,
|
|
13
|
+
createContentBlockDeltaEvent,
|
|
14
|
+
createContentBlockStopEvent,
|
|
15
|
+
createMessageDeltaEvent,
|
|
16
|
+
createMessageStopEvent
|
|
17
|
+
} from '../anthropic-mapper.js';
|
|
18
|
+
import {
|
|
19
|
+
getConversation,
|
|
20
|
+
createConversation,
|
|
21
|
+
addMessageToConversation,
|
|
22
|
+
generateConversationId
|
|
23
|
+
} from '../conversation-store.js';
|
|
24
|
+
import { logRequest, logStreamComplete, logResponse } from '../request-logger.js';
|
|
25
|
+
|
|
26
|
+
const router = Router();
|
|
27
|
+
|
|
28
|
+
// Get API key with rotation support
|
|
29
|
+
function getApiKey() {
|
|
30
|
+
const keys = (process.env.YDC_API_KEYS || process.env.YDC_API_KEY || '').split(',').filter(k => k.trim());
|
|
31
|
+
if (keys.length === 0) throw new Error('No API key configured');
|
|
32
|
+
return keys[Math.floor(Math.random() * keys.length)].trim();
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
// Anthropic Messages endpoint
|
|
36
|
+
router.post('/v1/messages', async (req, res) => {
|
|
37
|
+
try {
|
|
38
|
+
const {
|
|
39
|
+
model = 'claude-3-5-sonnet-20241022',
|
|
40
|
+
messages,
|
|
41
|
+
system,
|
|
42
|
+
max_tokens = 1024,
|
|
43
|
+
temperature = 0.7,
|
|
44
|
+
stream = false,
|
|
45
|
+
metadata
|
|
46
|
+
} = req.body;
|
|
47
|
+
|
|
48
|
+
if (!messages || !Array.isArray(messages) || messages.length === 0) {
|
|
49
|
+
return res.status(400).json({
|
|
50
|
+
type: 'error',
|
|
51
|
+
error: {
|
|
52
|
+
type: 'invalid_request_error',
|
|
53
|
+
message: 'messages is required and must be a non-empty array'
|
|
54
|
+
}
|
|
55
|
+
});
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
const apiKey = getApiKey();
|
|
59
|
+
|
|
60
|
+
// Handle conversation persistence via metadata or generate new one
|
|
61
|
+
let conversationId = metadata?.conversation_id || generateConversationId();
|
|
62
|
+
const existingConv = getConversation(conversationId);
|
|
63
|
+
if (!existingConv) {
|
|
64
|
+
createConversation(conversationId);
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
// Map to You.com parameters
|
|
68
|
+
const youParams = mapAnthropicToYouParams({
|
|
69
|
+
model,
|
|
70
|
+
messages,
|
|
71
|
+
system,
|
|
72
|
+
temperature,
|
|
73
|
+
max_tokens,
|
|
74
|
+
stream
|
|
75
|
+
});
|
|
76
|
+
|
|
77
|
+
// Get current user input for logging
|
|
78
|
+
const lastUserMsg = messages.filter(m => m.role === 'user').pop();
|
|
79
|
+
const currentInput = typeof lastUserMsg?.content === 'string'
|
|
80
|
+
? lastUserMsg.content
|
|
81
|
+
: lastUserMsg?.content?.filter(c => c.type === 'text').map(c => c.text).join('\n') || '';
|
|
82
|
+
|
|
83
|
+
// Build full messages for logging (including system prompt)
|
|
84
|
+
const fullMessages = [];
|
|
85
|
+
if (system) {
|
|
86
|
+
fullMessages.push({ role: 'system', content: system });
|
|
87
|
+
}
|
|
88
|
+
messages.forEach(m => {
|
|
89
|
+
const content = typeof m.content === 'string'
|
|
90
|
+
? m.content
|
|
91
|
+
: m.content?.filter(c => c.type === 'text').map(c => c.text).join('\n') || '';
|
|
92
|
+
fullMessages.push({ role: m.role, content });
|
|
93
|
+
});
|
|
94
|
+
|
|
95
|
+
logRequest({
|
|
96
|
+
endpoint: '/v1/messages (Anthropic)',
|
|
97
|
+
agent: youParams.agent,
|
|
98
|
+
model,
|
|
99
|
+
stream,
|
|
100
|
+
conversationId,
|
|
101
|
+
messageCount: fullMessages.length,
|
|
102
|
+
input: currentInput,
|
|
103
|
+
inputMessages: fullMessages
|
|
104
|
+
});
|
|
105
|
+
|
|
106
|
+
// Store user message
|
|
107
|
+
if (conversationId) {
|
|
108
|
+
const lastUserMsg = messages.filter(m => m.role === 'user').pop();
|
|
109
|
+
if (lastUserMsg) {
|
|
110
|
+
const content = typeof lastUserMsg.content === 'string'
|
|
111
|
+
? lastUserMsg.content
|
|
112
|
+
: lastUserMsg.content.filter(c => c.type === 'text').map(c => c.text).join('\n');
|
|
113
|
+
addMessageToConversation(conversationId, 'user', content);
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
if (stream) {
|
|
118
|
+
// Streaming response
|
|
119
|
+
res.setHeader('Content-Type', 'text/event-stream');
|
|
120
|
+
res.setHeader('Cache-Control', 'no-cache');
|
|
121
|
+
res.setHeader('Connection', 'keep-alive');
|
|
122
|
+
|
|
123
|
+
// Send message_start with conversation_id
|
|
124
|
+
res.write(createMessageStartEvent(model, conversationId));
|
|
125
|
+
|
|
126
|
+
// Send content_block_start
|
|
127
|
+
res.write(createContentBlockStartEvent(0));
|
|
128
|
+
|
|
129
|
+
try {
|
|
130
|
+
const response = await callYouApi(apiKey, { ...youParams, stream: true });
|
|
131
|
+
|
|
132
|
+
let fullContent = '';
|
|
133
|
+
let buffer = '';
|
|
134
|
+
|
|
135
|
+
// Use Web Streams API (ReadableStream)
|
|
136
|
+
const reader = response.body.getReader();
|
|
137
|
+
const decoder = new TextDecoder();
|
|
138
|
+
|
|
139
|
+
const processStream = async () => {
|
|
140
|
+
try {
|
|
141
|
+
while (true) {
|
|
142
|
+
const { done, value } = await reader.read();
|
|
143
|
+
if (done) break;
|
|
144
|
+
|
|
145
|
+
buffer += decoder.decode(value, { stream: true });
|
|
146
|
+
const lines = buffer.split('\n');
|
|
147
|
+
buffer = lines.pop() || '';
|
|
148
|
+
|
|
149
|
+
for (const line of lines) {
|
|
150
|
+
if (line.startsWith('data: ')) {
|
|
151
|
+
const data = line.slice(6);
|
|
152
|
+
if (data === '[DONE]') continue;
|
|
153
|
+
|
|
154
|
+
try {
|
|
155
|
+
const parsed = JSON.parse(data);
|
|
156
|
+
|
|
157
|
+
// Handle streaming delta format
|
|
158
|
+
if (parsed.type === 'response.output_text.delta' &&
|
|
159
|
+
parsed.response?.type === 'message.answer' &&
|
|
160
|
+
parsed.response?.delta) {
|
|
161
|
+
const newText = parsed.response.delta;
|
|
162
|
+
if (newText) {
|
|
163
|
+
fullContent += newText;
|
|
164
|
+
res.write(createContentBlockDeltaEvent(newText, 0));
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
// Also handle full output format (non-streaming fallback)
|
|
169
|
+
if (parsed.output) {
|
|
170
|
+
for (const item of parsed.output) {
|
|
171
|
+
if (item.type === 'message.answer' && item.text) {
|
|
172
|
+
const newText = item.text.slice(fullContent.length);
|
|
173
|
+
if (newText) {
|
|
174
|
+
fullContent = item.text;
|
|
175
|
+
res.write(createContentBlockDeltaEvent(newText, 0));
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
} catch (e) {
|
|
181
|
+
// Skip invalid JSON
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
// Store assistant response
|
|
188
|
+
if (conversationId && fullContent) {
|
|
189
|
+
addMessageToConversation(conversationId, 'assistant', fullContent);
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
logStreamComplete({
|
|
193
|
+
conversationId,
|
|
194
|
+
contentLength: fullContent.length,
|
|
195
|
+
messageCount: fullMessages.length + 1,
|
|
196
|
+
agent: youParams.agent,
|
|
197
|
+
stream: true,
|
|
198
|
+
responsePreview: fullContent,
|
|
199
|
+
inputMessages: [...fullMessages, { role: 'assistant', content: fullContent }]
|
|
200
|
+
});
|
|
201
|
+
|
|
202
|
+
// Send closing events
|
|
203
|
+
res.write(createContentBlockStopEvent(0));
|
|
204
|
+
res.write(createMessageDeltaEvent(Math.floor(fullContent.length / 4)));
|
|
205
|
+
res.write(createMessageStopEvent());
|
|
206
|
+
res.end();
|
|
207
|
+
} catch (error) {
|
|
208
|
+
console.error('Stream processing error:', error);
|
|
209
|
+
res.write(createContentBlockDeltaEvent(`Error: ${error.message}`, 0));
|
|
210
|
+
res.write(createContentBlockStopEvent(0));
|
|
211
|
+
res.write(createMessageDeltaEvent(0));
|
|
212
|
+
res.write(createMessageStopEvent());
|
|
213
|
+
res.end();
|
|
214
|
+
}
|
|
215
|
+
};
|
|
216
|
+
|
|
217
|
+
processStream();
|
|
218
|
+
|
|
219
|
+
} catch (error) {
|
|
220
|
+
console.error('Streaming error:', error);
|
|
221
|
+
res.write(createContentBlockDeltaEvent(`Error: ${error.message}`, 0));
|
|
222
|
+
res.write(createContentBlockStopEvent(0));
|
|
223
|
+
res.write(createMessageDeltaEvent(0));
|
|
224
|
+
res.write(createMessageStopEvent());
|
|
225
|
+
res.end();
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
} else {
|
|
229
|
+
// Non-streaming response
|
|
230
|
+
const response = await callYouApi(apiKey, youParams);
|
|
231
|
+
const data = await response.json();
|
|
232
|
+
|
|
233
|
+
console.log('📥 You.com response received');
|
|
234
|
+
|
|
235
|
+
const anthropicResponse = convertToAnthropicResponse(data, model);
|
|
236
|
+
|
|
237
|
+
// Store assistant response
|
|
238
|
+
const assistantContent = anthropicResponse.content[0]?.text || '';
|
|
239
|
+
if (conversationId && assistantContent) {
|
|
240
|
+
addMessageToConversation(conversationId, 'assistant', assistantContent);
|
|
241
|
+
// Add conversation_id to response
|
|
242
|
+
anthropicResponse.metadata = { conversation_id: conversationId };
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
logStreamComplete({
|
|
246
|
+
conversationId,
|
|
247
|
+
contentLength: assistantContent.length,
|
|
248
|
+
messageCount: fullMessages.length + 1,
|
|
249
|
+
agent: youParams.agent,
|
|
250
|
+
stream: false,
|
|
251
|
+
inputMessages: [...fullMessages, { role: 'assistant', content: assistantContent }]
|
|
252
|
+
});
|
|
253
|
+
|
|
254
|
+
res.json(anthropicResponse);
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
} catch (error) {
|
|
258
|
+
console.error('Anthropic messages error:', error);
|
|
259
|
+
res.status(500).json({
|
|
260
|
+
type: 'error',
|
|
261
|
+
error: {
|
|
262
|
+
type: 'api_error',
|
|
263
|
+
message: error.message
|
|
264
|
+
}
|
|
265
|
+
});
|
|
266
|
+
}
|
|
267
|
+
});
|
|
268
|
+
|
|
269
|
+
export default router;
|
|
@@ -0,0 +1,249 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Chat Completions Route
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { Router } from 'express';
|
|
6
|
+
import { authenticate } from '../auth-middleware.js';
|
|
7
|
+
import { mapOpenAIToYouParams, convertToOpenAIResponse, createStreamChunk } from '../openai-mapper.js';
|
|
8
|
+
import { callYouApi } from '../api-client.js';
|
|
9
|
+
import {
|
|
10
|
+
getConversation,
|
|
11
|
+
addMessageToConversation,
|
|
12
|
+
generateConversationId
|
|
13
|
+
} from '../conversation-store.js';
|
|
14
|
+
import { logRequest, logStreamComplete } from '../request-logger.js';
|
|
15
|
+
|
|
16
|
+
const router = Router();
|
|
17
|
+
const API_KEY = process.env.YDC_API_KEY;
|
|
18
|
+
|
|
19
|
+
router.post('/v1/chat/completions', authenticate, async (req, res) => {
|
|
20
|
+
try {
|
|
21
|
+
// Debug: log raw request
|
|
22
|
+
console.log('📨 Raw request body:', JSON.stringify({
|
|
23
|
+
conversation_id: req.body.conversation_id,
|
|
24
|
+
model: req.body.model,
|
|
25
|
+
messages_count: req.body.messages?.length,
|
|
26
|
+
messages: req.body.messages?.map(m => ({ role: m.role, content: m.content?.substring(0, 50) }))
|
|
27
|
+
}, null, 2));
|
|
28
|
+
|
|
29
|
+
if (!API_KEY) {
|
|
30
|
+
return res.status(500).json({
|
|
31
|
+
error: {
|
|
32
|
+
message: 'YDC_API_KEY not configured on server',
|
|
33
|
+
type: 'server_error',
|
|
34
|
+
code: 'missing_api_key'
|
|
35
|
+
}
|
|
36
|
+
});
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
const { conversation_id, messages } = req.body;
|
|
40
|
+
let conversationId = conversation_id;
|
|
41
|
+
let fullMessages = messages || [];
|
|
42
|
+
|
|
43
|
+
if (conversationId) {
|
|
44
|
+
const existingConv = getConversation(conversationId);
|
|
45
|
+
if (existingConv && existingConv.messages.length > 0) {
|
|
46
|
+
const storedMessages = existingConv.messages.map(m => ({ role: m.role, content: m.content }));
|
|
47
|
+
const newUserMessages = fullMessages.filter(m => m.role === 'user');
|
|
48
|
+
const systemMsg = fullMessages.find(m => m.role === 'system') || storedMessages.find(m => m.role === 'system');
|
|
49
|
+
|
|
50
|
+
fullMessages = systemMsg ? [systemMsg] : [];
|
|
51
|
+
fullMessages.push(...storedMessages.filter(m => m.role !== 'system'));
|
|
52
|
+
|
|
53
|
+
const lastNewUserMsg = newUserMessages[newUserMessages.length - 1];
|
|
54
|
+
if (lastNewUserMsg) {
|
|
55
|
+
const alreadyExists = fullMessages.some(m => m.role === 'user' && m.content === lastNewUserMsg.content);
|
|
56
|
+
if (!alreadyExists) {
|
|
57
|
+
fullMessages.push(lastNewUserMsg);
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
} else {
|
|
62
|
+
conversationId = generateConversationId();
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
const lastUserMsg = fullMessages.filter(m => m.role === 'user').pop();
|
|
66
|
+
if (lastUserMsg) {
|
|
67
|
+
addMessageToConversation(conversationId, 'user', lastUserMsg.content);
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
const systemMsg = fullMessages.find(m => m.role === 'system');
|
|
71
|
+
if (systemMsg) {
|
|
72
|
+
const conv = getConversation(conversationId);
|
|
73
|
+
if (conv && !conv.messages.some(m => m.role === 'system')) {
|
|
74
|
+
conv.messages.unshift({ role: 'system', content: systemMsg.content, timestamp: Date.now() });
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
const youParams = mapOpenAIToYouParams({ ...req.body, messages: fullMessages });
|
|
79
|
+
|
|
80
|
+
// Get current user input for logging
|
|
81
|
+
const currentUserMsg = fullMessages.filter(m => m.role === 'user').pop();
|
|
82
|
+
const currentInput = currentUserMsg?.content || '';
|
|
83
|
+
|
|
84
|
+
logRequest({
|
|
85
|
+
endpoint: '/v1/chat/completions (OpenAI)',
|
|
86
|
+
agent: youParams.agent,
|
|
87
|
+
model: req.body.model,
|
|
88
|
+
stream: req.body.stream || false,
|
|
89
|
+
conversationId,
|
|
90
|
+
messageCount: fullMessages.length,
|
|
91
|
+
input: currentInput,
|
|
92
|
+
inputMessages: fullMessages
|
|
93
|
+
});
|
|
94
|
+
|
|
95
|
+
const timeoutMs = youParams.timeout || (youParams.agent === 'advanced' ? 3000000 : 300000);
|
|
96
|
+
|
|
97
|
+
if (req.body.stream) {
|
|
98
|
+
const response = await callYouApi(API_KEY, { ...youParams, stream: true }, { timeout: timeoutMs });
|
|
99
|
+
await handleStreamingResponse(req, res, response, youParams, conversationId, fullMessages);
|
|
100
|
+
} else {
|
|
101
|
+
const response = await callYouApi(API_KEY, youParams, { timeout: timeoutMs });
|
|
102
|
+
await handleNonStreamingResponse(req, res, response, conversationId, fullMessages);
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
} catch (error) {
|
|
106
|
+
console.error('❌ Server error:', error);
|
|
107
|
+
|
|
108
|
+
if (error.name === 'AbortError') {
|
|
109
|
+
return res.status(408).json({
|
|
110
|
+
error: {
|
|
111
|
+
message: 'Request timeout - Advanced agent responses may require extended processing time',
|
|
112
|
+
type: 'timeout_error',
|
|
113
|
+
code: 'request_timeout'
|
|
114
|
+
}
|
|
115
|
+
});
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
res.status(500).json({
|
|
119
|
+
error: {
|
|
120
|
+
message: error.message,
|
|
121
|
+
type: 'server_error',
|
|
122
|
+
code: 'internal_error'
|
|
123
|
+
}
|
|
124
|
+
});
|
|
125
|
+
}
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
async function handleStreamingResponse(req, res, response, youParams, conversationId, inputMessages = []) {
|
|
129
|
+
res.setHeader('Content-Type', 'text/event-stream');
|
|
130
|
+
res.setHeader('Cache-Control', 'no-cache');
|
|
131
|
+
res.setHeader('Connection', 'keep-alive');
|
|
132
|
+
res.setHeader('Access-Control-Allow-Origin', '*');
|
|
133
|
+
|
|
134
|
+
if (!response.body) {
|
|
135
|
+
res.write(`data: {"error": "No response body"}\n\n`);
|
|
136
|
+
res.end();
|
|
137
|
+
return;
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
const reader = response.body.getReader();
|
|
141
|
+
const decoder = new TextDecoder();
|
|
142
|
+
let buffer = '';
|
|
143
|
+
let fullContent = '';
|
|
144
|
+
const model = req.body.model || 'advanced';
|
|
145
|
+
const messageCount = req.body.messages?.length || 0;
|
|
146
|
+
const STREAM_TIMEOUT = youParams.timeout || (youParams.agent === 'advanced' ? 3000000 : 300000);
|
|
147
|
+
|
|
148
|
+
try {
|
|
149
|
+
let streamTimeout = setTimeout(() => {
|
|
150
|
+
const chunk = createStreamChunk(model, `\n\n[Response timeout]`, 'length');
|
|
151
|
+
res.write(`data: ${JSON.stringify(chunk)}\n\n`);
|
|
152
|
+
res.write('data: [DONE]\n\n');
|
|
153
|
+
res.end();
|
|
154
|
+
}, STREAM_TIMEOUT);
|
|
155
|
+
|
|
156
|
+
while (true) {
|
|
157
|
+
const { done, value } = await reader.read();
|
|
158
|
+
if (done) {
|
|
159
|
+
clearTimeout(streamTimeout);
|
|
160
|
+
break;
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
buffer += decoder.decode(value, { stream: true });
|
|
164
|
+
const lines = buffer.split('\n');
|
|
165
|
+
buffer = lines.pop() || '';
|
|
166
|
+
|
|
167
|
+
for (const line of lines) {
|
|
168
|
+
if (line.startsWith('data: ')) {
|
|
169
|
+
try {
|
|
170
|
+
const data = JSON.parse(line.slice(6));
|
|
171
|
+
|
|
172
|
+
if (data.type === 'response.output_text.delta' &&
|
|
173
|
+
data.response?.type === 'message.answer' &&
|
|
174
|
+
data.response?.delta) {
|
|
175
|
+
|
|
176
|
+
clearTimeout(streamTimeout);
|
|
177
|
+
streamTimeout = setTimeout(() => {
|
|
178
|
+
const chunk = createStreamChunk(model, `\n\n[Response timeout]`, 'length');
|
|
179
|
+
res.write(`data: ${JSON.stringify(chunk)}\n\n`);
|
|
180
|
+
res.write('data: [DONE]\n\n');
|
|
181
|
+
res.end();
|
|
182
|
+
}, STREAM_TIMEOUT);
|
|
183
|
+
|
|
184
|
+
fullContent += data.response.delta;
|
|
185
|
+
const chunk = createStreamChunk(model, data.response.delta);
|
|
186
|
+
res.write(`data: ${JSON.stringify(chunk)}\n\n`);
|
|
187
|
+
}
|
|
188
|
+
} catch (e) {
|
|
189
|
+
console.error('❌ Error parsing streaming data:', e);
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
clearTimeout(streamTimeout);
|
|
196
|
+
|
|
197
|
+
// Store assistant response
|
|
198
|
+
if (conversationId && fullContent) {
|
|
199
|
+
addMessageToConversation(conversationId, 'assistant', fullContent);
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
// Log completion
|
|
203
|
+
logStreamComplete({
|
|
204
|
+
conversationId,
|
|
205
|
+
contentLength: fullContent.length,
|
|
206
|
+
messageCount: messageCount + 1,
|
|
207
|
+
agent: youParams.agent,
|
|
208
|
+
stream: true,
|
|
209
|
+
responsePreview: fullContent,
|
|
210
|
+
inputMessages: inputMessages
|
|
211
|
+
});
|
|
212
|
+
|
|
213
|
+
} catch (streamError) {
|
|
214
|
+
console.error('❌ Streaming error:', streamError);
|
|
215
|
+
res.write(`data: {"error": "Streaming error: ${streamError.message}"}\n\n`);
|
|
216
|
+
} finally {
|
|
217
|
+
const finalChunk = createStreamChunk(model, null, 'stop');
|
|
218
|
+
res.write(`data: ${JSON.stringify(finalChunk)}\n\n`);
|
|
219
|
+
res.write('data: [DONE]\n\n');
|
|
220
|
+
res.end();
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
async function handleNonStreamingResponse(req, res, response, conversationId, inputMessages = []) {
|
|
225
|
+
const data = await response.json();
|
|
226
|
+
console.log('📥 You.com response:', JSON.stringify(data, null, 2));
|
|
227
|
+
|
|
228
|
+
const openaiResponse = convertToOpenAIResponse(data, req.body.model || 'advanced');
|
|
229
|
+
|
|
230
|
+
const assistantContent = openaiResponse.choices?.[0]?.message?.content;
|
|
231
|
+
if (assistantContent && conversationId) {
|
|
232
|
+
addMessageToConversation(conversationId, 'assistant', assistantContent);
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
// Log completion
|
|
236
|
+
logStreamComplete({
|
|
237
|
+
conversationId,
|
|
238
|
+
contentLength: assistantContent?.length || 0,
|
|
239
|
+
messageCount: inputMessages.length + 1,
|
|
240
|
+
agent: req.body.model || 'advanced',
|
|
241
|
+
stream: false,
|
|
242
|
+
inputMessages: inputMessages
|
|
243
|
+
});
|
|
244
|
+
|
|
245
|
+
openaiResponse.conversation_id = conversationId;
|
|
246
|
+
res.json(openaiResponse);
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
export default router;
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Conversations Route
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { Router } from 'express';
|
|
6
|
+
import { authenticate } from '../auth-middleware.js';
|
|
7
|
+
import {
|
|
8
|
+
getConversation,
|
|
9
|
+
createConversation,
|
|
10
|
+
addMessageToConversation,
|
|
11
|
+
listAllConversations,
|
|
12
|
+
deleteConversation,
|
|
13
|
+
clearAllConversations
|
|
14
|
+
} from '../conversation-store.js';
|
|
15
|
+
|
|
16
|
+
const router = Router();
|
|
17
|
+
|
|
18
|
+
// List all conversations
|
|
19
|
+
router.get('/v1/conversations', authenticate, (req, res) => {
|
|
20
|
+
const conversations = listAllConversations();
|
|
21
|
+
|
|
22
|
+
res.json({
|
|
23
|
+
object: 'list',
|
|
24
|
+
data: conversations.sort((a, b) => new Date(b.updated_at) - new Date(a.updated_at)),
|
|
25
|
+
total: conversations.length
|
|
26
|
+
});
|
|
27
|
+
});
|
|
28
|
+
|
|
29
|
+
// Get single conversation
|
|
30
|
+
router.get('/v1/conversations/:id', authenticate, (req, res) => {
|
|
31
|
+
const conv = getConversation(req.params.id);
|
|
32
|
+
if (!conv) {
|
|
33
|
+
return res.status(404).json({
|
|
34
|
+
error: {
|
|
35
|
+
message: 'Conversation not found',
|
|
36
|
+
type: 'not_found_error',
|
|
37
|
+
code: 'conversation_not_found'
|
|
38
|
+
}
|
|
39
|
+
});
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
res.json({
|
|
43
|
+
id: conv.id,
|
|
44
|
+
messages: conv.messages.map(m => ({
|
|
45
|
+
role: m.role,
|
|
46
|
+
content: m.content,
|
|
47
|
+
timestamp: new Date(m.timestamp).toISOString()
|
|
48
|
+
})),
|
|
49
|
+
created_at: new Date(conv.createdAt).toISOString(),
|
|
50
|
+
updated_at: new Date(conv.updatedAt).toISOString(),
|
|
51
|
+
metadata: conv.metadata
|
|
52
|
+
});
|
|
53
|
+
});
|
|
54
|
+
|
|
55
|
+
// Create new conversation
|
|
56
|
+
router.post('/v1/conversations', authenticate, (req, res) => {
|
|
57
|
+
const { metadata = {}, system_message } = req.body;
|
|
58
|
+
const conv = createConversation(null, metadata);
|
|
59
|
+
|
|
60
|
+
if (system_message) {
|
|
61
|
+
addMessageToConversation(conv.id, 'system', system_message);
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
res.status(201).json({
|
|
65
|
+
id: conv.id,
|
|
66
|
+
created_at: new Date(conv.createdAt).toISOString(),
|
|
67
|
+
metadata: conv.metadata
|
|
68
|
+
});
|
|
69
|
+
});
|
|
70
|
+
|
|
71
|
+
// Delete conversation
|
|
72
|
+
router.delete('/v1/conversations/:id', authenticate, (req, res) => {
|
|
73
|
+
const conv = getConversation(req.params.id);
|
|
74
|
+
if (!conv) {
|
|
75
|
+
return res.status(404).json({
|
|
76
|
+
error: {
|
|
77
|
+
message: 'Conversation not found',
|
|
78
|
+
type: 'not_found_error',
|
|
79
|
+
code: 'conversation_not_found'
|
|
80
|
+
}
|
|
81
|
+
});
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
deleteConversation(req.params.id);
|
|
85
|
+
res.json({ deleted: true, id: req.params.id });
|
|
86
|
+
});
|
|
87
|
+
|
|
88
|
+
// Clear all conversations
|
|
89
|
+
router.delete('/v1/conversations', authenticate, (req, res) => {
|
|
90
|
+
const count = clearAllConversations();
|
|
91
|
+
res.json({ deleted: true, count });
|
|
92
|
+
});
|
|
93
|
+
|
|
94
|
+
export default router;
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Health Check Route
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { Router } from 'express';
|
|
6
|
+
import { getConversationCount, storeConfig } from '../conversation-store.js';
|
|
7
|
+
import { authConfig } from '../auth-middleware.js';
|
|
8
|
+
|
|
9
|
+
const router = Router();
|
|
10
|
+
const API_KEY = process.env.YDC_API_KEY;
|
|
11
|
+
|
|
12
|
+
router.get('/health', (req, res) => {
|
|
13
|
+
res.json({
|
|
14
|
+
status: 'healthy',
|
|
15
|
+
timestamp: new Date().toISOString(),
|
|
16
|
+
ydc_api_key_configured: !!API_KEY,
|
|
17
|
+
auth: {
|
|
18
|
+
token_auth_enabled: authConfig.REQUIRE_TOKEN_AUTH,
|
|
19
|
+
allowed_tokens_count: authConfig.ACCESS_TOKENS_COUNT
|
|
20
|
+
},
|
|
21
|
+
conversations: {
|
|
22
|
+
store_type: storeConfig.STORE_TYPE,
|
|
23
|
+
db_path: storeConfig.STORE_TYPE === 'sqlite' && storeConfig.isDbConnected() ? storeConfig.DB_PATH : null,
|
|
24
|
+
active: getConversationCount(),
|
|
25
|
+
max: storeConfig.MAX_CONVERSATIONS,
|
|
26
|
+
ttl_hours: storeConfig.CONVERSATION_TTL / (60 * 60 * 1000)
|
|
27
|
+
}
|
|
28
|
+
});
|
|
29
|
+
});
|
|
30
|
+
|
|
31
|
+
export default router;
|