agentic-flow 1.3.0 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,707 @@
1
+ // Anthropic to Requesty Proxy Server
2
+ // Converts Anthropic API format to Requesty format
3
+ import express from 'express';
4
+ import { logger } from '../utils/logger.js';
5
+ import { getMaxTokensForModel } from './provider-instructions.js';
6
+ import { detectModelCapabilities } from '../utils/modelCapabilities.js';
7
+ import { ToolEmulator, executeEmulation } from './tool-emulation.js';
8
+ export class AnthropicToRequestyProxy {
9
+ app;
10
+ requestyApiKey;
11
+ requestyBaseUrl;
12
+ defaultModel;
13
+ capabilities;
14
+ constructor(config) {
15
+ this.app = express();
16
+ this.requestyApiKey = config.requestyApiKey;
17
+ this.requestyBaseUrl = config.requestyBaseUrl || 'https://router.requesty.ai/v1';
18
+ this.defaultModel = config.defaultModel || 'deepseek/deepseek-chat';
19
+ this.capabilities = config.capabilities;
20
+ // Debug logging
21
+ if (this.capabilities) {
22
+ logger.info('Proxy initialized with capabilities', {
23
+ model: this.defaultModel,
24
+ requiresEmulation: this.capabilities.requiresEmulation,
25
+ strategy: this.capabilities.emulationStrategy
26
+ });
27
+ }
28
+ this.setupMiddleware();
29
+ this.setupRoutes();
30
+ }
31
+ setupMiddleware() {
32
+ // Parse JSON bodies
33
+ this.app.use(express.json({ limit: '50mb' }));
34
+ // Logging middleware
35
+ this.app.use((req, res, next) => {
36
+ logger.debug('Proxy request', {
37
+ method: req.method,
38
+ path: req.path,
39
+ headers: Object.keys(req.headers)
40
+ });
41
+ next();
42
+ });
43
+ }
44
+ setupRoutes() {
45
+ // Health check
46
+ this.app.get('/health', (req, res) => {
47
+ res.json({ status: 'ok', service: 'anthropic-to-requesty-proxy' });
48
+ });
49
+ // Anthropic Messages API → Requesty Chat Completions
50
+ this.app.post('/v1/messages', async (req, res) => {
51
+ console.log('šŸ”µ REQUEST RECEIVED AT PROXY - /v1/messages');
52
+ logger.info('šŸ”µ REQUEST RECEIVED AT PROXY - /v1/messages');
53
+ try {
54
+ const anthropicReq = req.body;
55
+ console.log('šŸ”µ Request body parsed successfully');
56
+ // VERBOSE LOGGING: Log incoming Anthropic request
57
+ // Handle system prompt which can be string OR array of content blocks
58
+ const systemPreview = typeof anthropicReq.system === 'string'
59
+ ? anthropicReq.system.substring(0, 200)
60
+ : Array.isArray(anthropicReq.system)
61
+ ? JSON.stringify(anthropicReq.system).substring(0, 200)
62
+ : undefined;
63
+ logger.info('=== INCOMING ANTHROPIC REQUEST ===', {
64
+ model: anthropicReq.model,
65
+ systemPrompt: systemPreview,
66
+ systemType: typeof anthropicReq.system,
67
+ messageCount: anthropicReq.messages?.length,
68
+ toolCount: anthropicReq.tools?.length || 0,
69
+ toolNames: anthropicReq.tools?.map(t => t.name) || [],
70
+ maxTokens: anthropicReq.max_tokens,
71
+ temperature: anthropicReq.temperature,
72
+ stream: anthropicReq.stream
73
+ });
74
+ // Log first user message for debugging
75
+ if (anthropicReq.messages && anthropicReq.messages.length > 0) {
76
+ const firstMsg = anthropicReq.messages[0];
77
+ logger.info('First user message:', {
78
+ role: firstMsg.role,
79
+ contentPreview: typeof firstMsg.content === 'string'
80
+ ? firstMsg.content.substring(0, 200)
81
+ : JSON.stringify(firstMsg.content).substring(0, 200)
82
+ });
83
+ }
84
+ // Route to appropriate handler based on capabilities
85
+ const result = await this.handleRequest(anthropicReq, res);
86
+ if (result) {
87
+ res.json(result);
88
+ }
89
+ }
90
+ catch (error) {
91
+ logger.error('Proxy error', { error: error.message, stack: error.stack });
92
+ res.status(500).json({
93
+ error: {
94
+ type: 'proxy_error',
95
+ message: error.message
96
+ }
97
+ });
98
+ }
99
+ });
100
+ // Fallback for other Anthropic API endpoints
101
+ this.app.use((req, res) => {
102
+ logger.warn('Unsupported endpoint', { path: req.path, method: req.method });
103
+ res.status(404).json({
104
+ error: {
105
+ type: 'not_found',
106
+ message: `Endpoint ${req.path} not supported by proxy`
107
+ }
108
+ });
109
+ });
110
+ }
111
+ async handleRequest(anthropicReq, res) {
112
+ let model = anthropicReq.model || this.defaultModel;
113
+ // If SDK is requesting a Claude model but we're using Requesty with a different default,
114
+ // override to use the CLI-specified model
115
+ if (model.startsWith('claude-') && this.defaultModel && !this.defaultModel.startsWith('claude-')) {
116
+ logger.info(`Overriding SDK Claude model ${model} with CLI-specified ${this.defaultModel}`);
117
+ model = this.defaultModel;
118
+ anthropicReq.model = model;
119
+ }
120
+ const capabilities = this.capabilities || detectModelCapabilities(model);
121
+ // Check if emulation is required
122
+ if (capabilities.requiresEmulation && anthropicReq.tools && anthropicReq.tools.length > 0) {
123
+ logger.info(`Using tool emulation for model: ${model}`);
124
+ return this.handleEmulatedRequest(anthropicReq, capabilities);
125
+ }
126
+ return this.handleNativeRequest(anthropicReq, res);
127
+ }
128
+ async handleNativeRequest(anthropicReq, res) {
129
+ // Convert Anthropic format to OpenAI format
130
+ const openaiReq = this.convertAnthropicToOpenAI(anthropicReq);
131
+ // VERBOSE LOGGING: Log converted OpenAI request
132
+ logger.info('=== CONVERTED OPENAI REQUEST ===', {
133
+ anthropicModel: anthropicReq.model,
134
+ openaiModel: openaiReq.model,
135
+ messageCount: openaiReq.messages.length,
136
+ systemPrompt: openaiReq.messages[0]?.content?.substring(0, 300),
137
+ toolCount: openaiReq.tools?.length || 0,
138
+ toolNames: openaiReq.tools?.map(t => t.function.name) || [],
139
+ maxTokens: openaiReq.max_tokens,
140
+ apiKeyPresent: !!this.requestyApiKey,
141
+ apiKeyPrefix: this.requestyApiKey?.substring(0, 10)
142
+ });
143
+ // Forward to Requesty
144
+ const response = await fetch(`${this.requestyBaseUrl}/chat/completions`, {
145
+ method: 'POST',
146
+ headers: {
147
+ 'Authorization': `Bearer ${this.requestyApiKey}`,
148
+ 'Content-Type': 'application/json',
149
+ 'HTTP-Referer': 'https://github.com/ruvnet/agentic-flow',
150
+ 'X-Title': 'Agentic Flow'
151
+ },
152
+ body: JSON.stringify(openaiReq)
153
+ });
154
+ if (!response.ok) {
155
+ const error = await response.text();
156
+ logger.error('Requesty API error', { status: response.status, error });
157
+ res.status(response.status).json({
158
+ error: {
159
+ type: 'api_error',
160
+ message: error
161
+ }
162
+ });
163
+ return null;
164
+ }
165
+ // VERBOSE LOGGING: Log Requesty response status
166
+ logger.info('=== REQUESTY RESPONSE RECEIVED ===', {
167
+ status: response.status,
168
+ statusText: response.statusText,
169
+ headers: Object.fromEntries(response.headers.entries())
170
+ });
171
+ // Handle streaming vs non-streaming
172
+ if (anthropicReq.stream) {
173
+ logger.info('Handling streaming response...');
174
+ // Stream response
175
+ res.setHeader('Content-Type', 'text/event-stream');
176
+ res.setHeader('Cache-Control', 'no-cache');
177
+ res.setHeader('Connection', 'keep-alive');
178
+ const reader = response.body?.getReader();
179
+ if (!reader) {
180
+ throw new Error('No response body');
181
+ }
182
+ const decoder = new TextDecoder();
183
+ while (true) {
184
+ const { done, value } = await reader.read();
185
+ if (done)
186
+ break;
187
+ const chunk = decoder.decode(value);
188
+ const anthropicChunk = this.convertOpenAIStreamToAnthropic(chunk);
189
+ res.write(anthropicChunk);
190
+ }
191
+ res.end();
192
+ return null; // Already sent response
193
+ }
194
+ else {
195
+ logger.info('Handling non-streaming response...');
196
+ // Non-streaming response
197
+ const openaiRes = await response.json();
198
+ // VERBOSE LOGGING: Log raw OpenAI response
199
+ logger.info('=== RAW OPENAI RESPONSE ===', {
200
+ id: openaiRes.id,
201
+ model: openaiRes.model,
202
+ choices: openaiRes.choices?.length,
203
+ finishReason: openaiRes.choices?.[0]?.finish_reason,
204
+ hasToolCalls: !!(openaiRes.choices?.[0]?.message?.tool_calls),
205
+ toolCallCount: openaiRes.choices?.[0]?.message?.tool_calls?.length || 0,
206
+ toolCallNames: openaiRes.choices?.[0]?.message?.tool_calls?.map((tc) => tc.function.name) || [],
207
+ contentPreview: openaiRes.choices?.[0]?.message?.content?.substring(0, 300),
208
+ usage: openaiRes.usage
209
+ });
210
+ const anthropicRes = this.convertOpenAIToAnthropic(openaiRes);
211
+ // VERBOSE LOGGING: Log converted Anthropic response
212
+ logger.info('=== CONVERTED ANTHROPIC RESPONSE ===', {
213
+ id: anthropicRes.id,
214
+ model: anthropicRes.model,
215
+ role: anthropicRes.role,
216
+ stopReason: anthropicRes.stop_reason,
217
+ contentBlocks: anthropicRes.content?.length,
218
+ contentTypes: anthropicRes.content?.map((c) => c.type),
219
+ toolUseCount: anthropicRes.content?.filter((c) => c.type === 'tool_use').length,
220
+ textPreview: anthropicRes.content?.find((c) => c.type === 'text')?.text?.substring(0, 200),
221
+ usage: anthropicRes.usage
222
+ });
223
+ return anthropicRes;
224
+ }
225
+ }
226
+ async handleEmulatedRequest(anthropicReq, capabilities) {
227
+ const emulator = new ToolEmulator(anthropicReq.tools || [], capabilities.emulationStrategy);
228
+ const lastMessage = anthropicReq.messages[anthropicReq.messages.length - 1];
229
+ const userMessage = typeof lastMessage.content === 'string'
230
+ ? lastMessage.content
231
+ : (lastMessage.content.find(c => c.type === 'text')?.text || '');
232
+ const result = await executeEmulation(emulator, userMessage, async (prompt) => {
233
+ // Call model with emulation prompt
234
+ // Cap max_tokens at 8192 for OpenAI models via Requesty
235
+ let maxTokens = anthropicReq.max_tokens;
236
+ if (maxTokens && maxTokens > 8192) {
237
+ maxTokens = 8192;
238
+ }
239
+ const openaiReq = {
240
+ model: anthropicReq.model || this.defaultModel,
241
+ messages: [{ role: 'user', content: prompt }],
242
+ temperature: anthropicReq.temperature,
243
+ max_tokens: maxTokens
244
+ };
245
+ const response = await this.callRequesty(openaiReq);
246
+ return response.choices[0].message.content;
247
+ }, async (toolCall) => {
248
+ logger.warn(`Tool execution not yet implemented: ${toolCall.name}`);
249
+ return { error: 'Tool execution not implemented in Phase 2' };
250
+ }, { maxIterations: 5, verbose: process.env.VERBOSE === 'true' });
251
+ return {
252
+ id: `emulated_${Date.now()}`,
253
+ type: 'message',
254
+ role: 'assistant',
255
+ content: [{ type: 'text', text: result.finalAnswer || 'No response' }],
256
+ model: anthropicReq.model || this.defaultModel,
257
+ stop_reason: 'end_turn',
258
+ usage: { input_tokens: 0, output_tokens: 0 }
259
+ };
260
+ }
261
+ async callRequesty(openaiReq) {
262
+ // Add timeout for Requesty API calls (60 seconds)
263
+ const controller = new AbortController();
264
+ const timeoutId = setTimeout(() => controller.abort(), 60000);
265
+ try {
266
+ const response = await fetch(`${this.requestyBaseUrl}/chat/completions`, {
267
+ method: 'POST',
268
+ headers: {
269
+ 'Authorization': `Bearer ${this.requestyApiKey}`,
270
+ 'Content-Type': 'application/json',
271
+ 'HTTP-Referer': 'https://github.com/ruvnet/agentic-flow',
272
+ 'X-Title': 'Agentic Flow'
273
+ },
274
+ body: JSON.stringify(openaiReq),
275
+ signal: controller.signal
276
+ });
277
+ clearTimeout(timeoutId);
278
+ if (!response.ok) {
279
+ const error = await response.text();
280
+ throw new Error(`Requesty API error: ${error}`);
281
+ }
282
+ return response.json();
283
+ }
284
+ catch (error) {
285
+ clearTimeout(timeoutId);
286
+ if (error.name === 'AbortError') {
287
+ throw new Error('Requesty API request timed out after 60 seconds');
288
+ }
289
+ throw error;
290
+ }
291
+ }
292
+ /**
293
+ * Sanitize JSON Schema to be OpenAI-compatible
294
+ * Fixes array properties without items, removes unsupported keywords
295
+ */
296
+ sanitizeJsonSchema(schema, path = 'root') {
297
+ if (!schema || typeof schema !== 'object') {
298
+ return schema;
299
+ }
300
+ // Create a shallow copy to avoid mutations
301
+ const sanitized = { ...schema };
302
+ // Fix array types without items
303
+ if (sanitized.type === 'array' && !sanitized.items) {
304
+ logger.warn(`Schema sanitization: Adding missing 'items' for array at ${path}`);
305
+ sanitized.items = { type: 'string' };
306
+ }
307
+ // Remove JSON Schema 2020-12 keywords not supported by OpenAI
308
+ const unsupportedKeywords = [
309
+ '$schema', '$id', '$ref', '$defs', 'definitions',
310
+ 'if', 'then', 'else', 'dependentSchemas', 'dependentRequired',
311
+ 'prefixItems', 'unevaluatedItems', 'unevaluatedProperties',
312
+ 'minContains', 'maxContains', 'patternProperties',
313
+ 'additionalItems', 'contains'
314
+ ];
315
+ for (const keyword of unsupportedKeywords) {
316
+ if (keyword in sanitized) {
317
+ logger.warn(`Schema sanitization: Removing unsupported keyword '${keyword}' at ${path}`);
318
+ delete sanitized[keyword];
319
+ }
320
+ }
321
+ // Recursively sanitize nested properties
322
+ if (sanitized.properties && typeof sanitized.properties === 'object') {
323
+ sanitized.properties = {};
324
+ for (const [key, value] of Object.entries(schema.properties)) {
325
+ sanitized.properties[key] = this.sanitizeJsonSchema(value, `${path}.properties.${key}`);
326
+ }
327
+ }
328
+ // Recursively sanitize array items
329
+ if (sanitized.items && typeof sanitized.items === 'object') {
330
+ sanitized.items = this.sanitizeJsonSchema(sanitized.items, `${path}.items`);
331
+ }
332
+ // Recursively sanitize allOf, anyOf, oneOf
333
+ for (const combinator of ['allOf', 'anyOf', 'oneOf']) {
334
+ if (Array.isArray(sanitized[combinator])) {
335
+ sanitized[combinator] = sanitized[combinator].map((subschema, index) => this.sanitizeJsonSchema(subschema, `${path}.${combinator}[${index}]`));
336
+ }
337
+ }
338
+ return sanitized;
339
+ }
340
+ convertAnthropicToOpenAI(anthropicReq) {
341
+ logger.info('=== STARTING ANTHROPIC TO OPENAI CONVERSION ===');
342
+ const messages = [];
343
+ // Get model-specific tool instructions
344
+ const modelId = anthropicReq.model || this.defaultModel;
345
+ const provider = this.extractProvider(modelId);
346
+ logger.info('Model detection:', {
347
+ requestedModel: anthropicReq.model,
348
+ defaultModel: this.defaultModel,
349
+ finalModelId: modelId,
350
+ extractedProvider: provider
351
+ });
352
+ // CRITICAL: Requesty models use native OpenAI tool calling
353
+ // - If MCP tools are provided, Requesty handles them via function calling
354
+ // - Do NOT inject XML instructions - they cause malformed output
355
+ // - Let Requesty models use tools via OpenAI's tool_calls format
356
+ let systemContent = '';
357
+ // Check if we have MCP tools (function calling)
358
+ const hasMcpTools = anthropicReq.tools && anthropicReq.tools.length > 0;
359
+ logger.info('Tool detection:', {
360
+ hasMcpTools,
361
+ toolCount: anthropicReq.tools?.length || 0,
362
+ toolNames: anthropicReq.tools?.map(t => t.name) || []
363
+ });
364
+ if (hasMcpTools) {
365
+ // MCP tools present - Requesty will handle via function calling
366
+ systemContent = 'You are a helpful AI assistant. When you need to perform actions, use the available tools by calling functions. Always explain what you\'re doing.';
367
+ logger.info('Using MCP tools system prompt (with function calling support)');
368
+ }
369
+ else {
370
+ // No tools - simple response mode
371
+ systemContent = 'You are a helpful AI assistant. Provide clear, well-formatted code and explanations.';
372
+ logger.info('Using simple system prompt (no tools)');
373
+ }
374
+ if (anthropicReq.system) {
375
+ // System can be string OR array of content blocks
376
+ let originalSystem;
377
+ if (typeof anthropicReq.system === 'string') {
378
+ originalSystem = anthropicReq.system;
379
+ }
380
+ else if (Array.isArray(anthropicReq.system)) {
381
+ // Extract text from content blocks
382
+ originalSystem = anthropicReq.system
383
+ .filter(block => block.type === 'text' && block.text)
384
+ .map(block => block.text)
385
+ .join('\n');
386
+ }
387
+ else {
388
+ originalSystem = '';
389
+ }
390
+ logger.info('Appending original system prompt:', {
391
+ systemType: typeof anthropicReq.system,
392
+ isArray: Array.isArray(anthropicReq.system),
393
+ originalSystemLength: originalSystem.length,
394
+ originalSystemPreview: originalSystem.substring(0, 200)
395
+ });
396
+ if (originalSystem) {
397
+ systemContent += '\n\n' + originalSystem;
398
+ }
399
+ }
400
+ messages.push({
401
+ role: 'system',
402
+ content: systemContent
403
+ });
404
+ logger.info('System message created:', {
405
+ systemContentLength: systemContent.length,
406
+ systemContentPreview: systemContent.substring(0, 300)
407
+ });
408
+ // Override model - if request has a Claude model, use defaultModel instead
409
+ const requestedModel = anthropicReq.model || '';
410
+ const shouldOverrideModel = requestedModel.startsWith('claude-') || !requestedModel;
411
+ const finalModel = shouldOverrideModel ? this.defaultModel : requestedModel;
412
+ // Convert Anthropic messages to OpenAI format
413
+ for (const msg of anthropicReq.messages) {
414
+ let content;
415
+ if (typeof msg.content === 'string') {
416
+ content = msg.content;
417
+ }
418
+ else if (Array.isArray(msg.content)) {
419
+ // Extract text from content blocks
420
+ content = msg.content
421
+ .filter(block => block.type === 'text')
422
+ .map(block => block.text)
423
+ .join('\n');
424
+ }
425
+ else {
426
+ content = '';
427
+ }
428
+ messages.push({
429
+ role: msg.role,
430
+ content
431
+ });
432
+ }
433
+ // Get appropriate max_tokens for this model
434
+ let maxTokens = getMaxTokensForModel(finalModel, anthropicReq.max_tokens);
435
+ // Cap at 8192 for OpenAI models via Requesty
436
+ if (maxTokens && maxTokens > 8192) {
437
+ maxTokens = 8192;
438
+ }
439
+ const openaiReq = {
440
+ model: finalModel,
441
+ messages,
442
+ max_tokens: maxTokens,
443
+ temperature: anthropicReq.temperature,
444
+ stream: anthropicReq.stream
445
+ };
446
+ // Convert MCP/Anthropic tools to OpenAI tools format
447
+ if (anthropicReq.tools && anthropicReq.tools.length > 0) {
448
+ logger.info('Converting MCP tools to OpenAI format...', {
449
+ totalTools: anthropicReq.tools.length
450
+ });
451
+ // Requesty has strict limits - only send a subset of tools to avoid timeouts
452
+ // Requesty also rejects empty tools arrays, so we either send tools or omit the parameter
453
+ const MAX_TOOLS_FOR_REQUESTY = 10; // Very conservative limit - Requesty timeouts with more
454
+ const toolsToConvert = anthropicReq.tools.slice(0, MAX_TOOLS_FOR_REQUESTY);
455
+ if (anthropicReq.tools.length > MAX_TOOLS_FOR_REQUESTY) {
456
+ logger.warn(`Limiting tools to ${MAX_TOOLS_FOR_REQUESTY} for Requesty (${anthropicReq.tools.length} available)`);
457
+ }
458
+ // Only set tools if we have at least one (Requesty rejects empty arrays)
459
+ if (toolsToConvert.length > 0) {
460
+ openaiReq.tools = toolsToConvert.map(tool => {
461
+ // Sanitize the input schema to fix array properties without items
462
+ const rawSchema = tool.input_schema || {
463
+ type: 'object',
464
+ properties: {},
465
+ required: []
466
+ };
467
+ const sanitizedSchema = this.sanitizeJsonSchema(rawSchema, `tool.${tool.name}`);
468
+ const openaiTool = {
469
+ type: 'function',
470
+ function: {
471
+ name: tool.name,
472
+ description: tool.description || '',
473
+ parameters: sanitizedSchema
474
+ }
475
+ };
476
+ return openaiTool;
477
+ });
478
+ logger.info('Forwarding MCP tools to Requesty', {
479
+ toolCount: openaiReq.tools.length,
480
+ toolNames: openaiReq.tools.map(t => t.function.name).slice(0, 5)
481
+ });
482
+ }
483
+ else {
484
+ logger.info('No tools to send (omitting tools parameter entirely for Requesty)');
485
+ // Don't set openaiReq.tools at all - Requesty rejects empty arrays
486
+ }
487
+ }
488
+ else {
489
+ logger.info('No MCP tools to convert');
490
+ }
491
+ logger.info('=== CONVERSION COMPLETE ===', {
492
+ messageCount: openaiReq.messages.length,
493
+ hasMcpTools: !!openaiReq.tools,
494
+ toolCount: openaiReq.tools?.length || 0,
495
+ maxTokens: openaiReq.max_tokens,
496
+ model: openaiReq.model
497
+ });
498
+ return openaiReq;
499
+ }
500
+ parseStructuredCommands(text) {
501
+ const toolUses = [];
502
+ let cleanText = text;
503
+ // Parse file_write commands
504
+ const fileWriteRegex = /<file_write path="([^"]+)">([\s\S]*?)<\/file_write>/g;
505
+ let match;
506
+ while ((match = fileWriteRegex.exec(text)) !== null) {
507
+ toolUses.push({
508
+ type: 'tool_use',
509
+ id: `tool_${Date.now()}_${toolUses.length}`,
510
+ name: 'Write',
511
+ input: {
512
+ file_path: match[1],
513
+ content: match[2].trim()
514
+ }
515
+ });
516
+ cleanText = cleanText.replace(match[0], `[File written: ${match[1]}]`);
517
+ }
518
+ // Parse file_read commands
519
+ const fileReadRegex = /<file_read path="([^"]+)"\/>/g;
520
+ while ((match = fileReadRegex.exec(text)) !== null) {
521
+ toolUses.push({
522
+ type: 'tool_use',
523
+ id: `tool_${Date.now()}_${toolUses.length}`,
524
+ name: 'Read',
525
+ input: {
526
+ file_path: match[1]
527
+ }
528
+ });
529
+ cleanText = cleanText.replace(match[0], `[Reading file: ${match[1]}]`);
530
+ }
531
+ // Parse bash commands
532
+ const bashRegex = /<bash_command>([\s\S]*?)<\/bash_command>/g;
533
+ while ((match = bashRegex.exec(text)) !== null) {
534
+ toolUses.push({
535
+ type: 'tool_use',
536
+ id: `tool_${Date.now()}_${toolUses.length}`,
537
+ name: 'Bash',
538
+ input: {
539
+ command: match[1].trim()
540
+ }
541
+ });
542
+ cleanText = cleanText.replace(match[0], `[Executing: ${match[1].trim()}]`);
543
+ }
544
+ return { cleanText: cleanText.trim(), toolUses };
545
+ }
546
+ convertOpenAIToAnthropic(openaiRes) {
547
+ const choice = openaiRes.choices?.[0];
548
+ if (!choice) {
549
+ throw new Error('No choices in OpenAI response');
550
+ }
551
+ const message = choice.message || {};
552
+ const rawText = message.content || choice.text || '';
553
+ const toolCalls = message.tool_calls || [];
554
+ logger.info('=== CONVERTING OPENAI TO ANTHROPIC ===', {
555
+ hasMessage: !!message,
556
+ hasContent: !!rawText,
557
+ contentLength: rawText?.length,
558
+ hasToolCalls: toolCalls.length > 0,
559
+ toolCallCount: toolCalls.length,
560
+ finishReason: choice.finish_reason
561
+ });
562
+ // CRITICAL: Use ONLY native OpenAI tool_calls format
563
+ // Do NOT parse XML from text - models output malformed XML
564
+ // Requesty handles tools via OpenAI function calling standard
565
+ const contentBlocks = [];
566
+ // Add tool uses from OpenAI tool_calls (MCP tools via function calling)
567
+ if (toolCalls.length > 0) {
568
+ logger.info('Processing tool calls from OpenAI response...');
569
+ for (const toolCall of toolCalls) {
570
+ try {
571
+ logger.info('Tool call details:', {
572
+ id: toolCall.id,
573
+ name: toolCall.function.name,
574
+ argumentsRaw: toolCall.function.arguments
575
+ });
576
+ contentBlocks.push({
577
+ type: 'tool_use',
578
+ id: toolCall.id,
579
+ name: toolCall.function.name,
580
+ input: JSON.parse(toolCall.function.arguments || '{}')
581
+ });
582
+ }
583
+ catch (error) {
584
+ logger.error('Failed to parse tool call arguments', {
585
+ toolCall,
586
+ error: error.message
587
+ });
588
+ }
589
+ }
590
+ logger.info('Converted Requesty tool calls to Anthropic format', {
591
+ toolCallCount: toolCalls.length,
592
+ toolNames: toolCalls.map((tc) => tc.function.name)
593
+ });
594
+ }
595
+ // Add text response if present
596
+ if (rawText && rawText.trim()) {
597
+ logger.info('Adding text content block', {
598
+ textLength: rawText.length,
599
+ textPreview: rawText.substring(0, 200)
600
+ });
601
+ contentBlocks.push({
602
+ type: 'text',
603
+ text: rawText
604
+ });
605
+ }
606
+ // If no content blocks, add empty text
607
+ if (contentBlocks.length === 0) {
608
+ logger.warn('No content blocks found, adding empty text block');
609
+ contentBlocks.push({
610
+ type: 'text',
611
+ text: rawText || ''
612
+ });
613
+ }
614
+ logger.info('Final content blocks:', {
615
+ blockCount: contentBlocks.length,
616
+ blockTypes: contentBlocks.map(b => b.type)
617
+ });
618
+ const result = {
619
+ id: openaiRes.id || `msg_${Date.now()}`,
620
+ type: 'message',
621
+ role: 'assistant',
622
+ model: openaiRes.model,
623
+ content: contentBlocks,
624
+ stop_reason: this.mapFinishReason(choice.finish_reason),
625
+ usage: {
626
+ input_tokens: openaiRes.usage?.prompt_tokens || 0,
627
+ output_tokens: openaiRes.usage?.completion_tokens || 0
628
+ }
629
+ };
630
+ logger.info('Conversion complete, returning Anthropic response');
631
+ return result;
632
+ }
633
+ convertOpenAIStreamToAnthropic(chunk) {
634
+ // Convert OpenAI SSE format to Anthropic SSE format
635
+ const lines = chunk.split('\n').filter(line => line.trim());
636
+ const anthropicChunks = [];
637
+ for (const line of lines) {
638
+ if (line.startsWith('data: ')) {
639
+ const data = line.slice(6);
640
+ if (data === '[DONE]') {
641
+ anthropicChunks.push('event: message_stop\ndata: {}\n\n');
642
+ continue;
643
+ }
644
+ try {
645
+ const parsed = JSON.parse(data);
646
+ const delta = parsed.choices?.[0]?.delta;
647
+ if (delta?.content) {
648
+ anthropicChunks.push(`event: content_block_delta\ndata: ${JSON.stringify({
649
+ type: 'content_block_delta',
650
+ delta: { type: 'text_delta', text: delta.content }
651
+ })}\n\n`);
652
+ }
653
+ }
654
+ catch (e) {
655
+ // Ignore parse errors
656
+ }
657
+ }
658
+ }
659
+ return anthropicChunks.join('');
660
+ }
661
+ extractProvider(modelId) {
662
+ // Extract provider from model ID (e.g., "openai/gpt-4" -> "openai")
663
+ const parts = modelId.split('/');
664
+ return parts.length > 1 ? parts[0] : '';
665
+ }
666
+ mapFinishReason(reason) {
667
+ const mapping = {
668
+ 'stop': 'end_turn',
669
+ 'length': 'max_tokens',
670
+ 'content_filter': 'stop_sequence',
671
+ 'function_call': 'tool_use'
672
+ };
673
+ return mapping[reason || 'stop'] || 'end_turn';
674
+ }
675
+ start(port) {
676
+ this.app.listen(port, () => {
677
+ logger.info('Anthropic to Requesty proxy started', {
678
+ port,
679
+ requestyBaseUrl: this.requestyBaseUrl,
680
+ defaultModel: this.defaultModel
681
+ });
682
+ console.log(`\nāœ… Anthropic Proxy running at http://localhost:${port}`);
683
+ console.log(` Requesty Base URL: ${this.requestyBaseUrl}`);
684
+ console.log(` Default Model: ${this.defaultModel}`);
685
+ if (this.capabilities?.requiresEmulation) {
686
+ console.log(`\n āš™ļø Tool Emulation: ${this.capabilities.emulationStrategy.toUpperCase()} pattern`);
687
+ console.log(` šŸ“Š Expected reliability: ${this.capabilities.emulationStrategy === 'react' ? '70-85%' : '50-70%'}`);
688
+ }
689
+ console.log('');
690
+ });
691
+ }
692
+ }
693
+ // CLI entry point
694
+ if (import.meta.url === `file://${process.argv[1]}`) {
695
+ const port = parseInt(process.env.PORT || '3000');
696
+ const requestyApiKey = process.env.REQUESTY_API_KEY;
697
+ if (!requestyApiKey) {
698
+ console.error('āŒ Error: REQUESTY_API_KEY environment variable required');
699
+ process.exit(1);
700
+ }
701
+ const proxy = new AnthropicToRequestyProxy({
702
+ requestyApiKey,
703
+ requestyBaseUrl: process.env.ANTHROPIC_PROXY_BASE_URL,
704
+ defaultModel: process.env.COMPLETION_MODEL || process.env.REASONING_MODEL
705
+ });
706
+ proxy.start(port);
707
+ }