agentic-flow 1.1.5 → 1.1.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/claudeAgent.js +188 -54
- package/dist/agents/directApiAgent.js +1 -2
- package/dist/agents/sdkAgent.js +151 -0
- package/dist/cli-proxy.js +3 -3
- package/dist/proxy/anthropic-to-gemini.js +345 -0
- package/dist/proxy/anthropic-to-openrouter.js +82 -8
- package/dist/proxy/provider-instructions.js +198 -0
- package/docs/.claude-flow/metrics/agent-metrics.json +1 -0
- package/docs/.claude-flow/metrics/performance.json +9 -0
- package/docs/.claude-flow/metrics/task-metrics.json +10 -0
- package/docs/FINAL_SDK_VALIDATION.md +328 -0
- package/docs/MCP_INTEGRATION_SUCCESS.md +305 -0
- package/docs/OPTIMIZATION_SUMMARY.md +181 -0
- package/docs/PROVIDER_INSTRUCTION_OPTIMIZATION.md +139 -0
- package/docs/SDK_INTEGRATION_COMPLETE.md +336 -0
- package/docs/TOOL_INSTRUCTION_ENHANCEMENT.md +200 -0
- package/docs/TOP20_MODELS_MATRIX.md +80 -0
- package/docs/VALIDATION_COMPLETE.md +178 -0
- package/docs/VALIDATION_SUMMARY.md +224 -0
- package/docs/archived/HOTFIX_1.1.7.md +133 -0
- package/docs/validation/PROXY_VALIDATION.md +239 -0
- package/docs/validation/README_SDK_VALIDATION.md +356 -0
- package/package.json +2 -1
- package/docs/CHANGELOG.md +0 -155
|
@@ -0,0 +1,345 @@
|
|
|
1
|
+
// Anthropic to Gemini Proxy Server
|
|
2
|
+
// Converts Anthropic API format to Google Gemini format
|
|
3
|
+
import express from 'express';
|
|
4
|
+
import { logger } from '../utils/logger.js';
|
|
5
|
+
export class AnthropicToGeminiProxy {
|
|
6
|
+
app;
|
|
7
|
+
geminiApiKey;
|
|
8
|
+
geminiBaseUrl;
|
|
9
|
+
defaultModel;
|
|
10
|
+
constructor(config) {
|
|
11
|
+
this.app = express();
|
|
12
|
+
this.geminiApiKey = config.geminiApiKey;
|
|
13
|
+
this.geminiBaseUrl = config.geminiBaseUrl || 'https://generativelanguage.googleapis.com/v1beta';
|
|
14
|
+
this.defaultModel = config.defaultModel || 'gemini-2.0-flash-exp';
|
|
15
|
+
this.setupMiddleware();
|
|
16
|
+
this.setupRoutes();
|
|
17
|
+
}
|
|
18
|
+
setupMiddleware() {
|
|
19
|
+
// Parse JSON bodies
|
|
20
|
+
this.app.use(express.json({ limit: '50mb' }));
|
|
21
|
+
// Logging middleware
|
|
22
|
+
this.app.use((req, res, next) => {
|
|
23
|
+
logger.debug('Gemini proxy request', {
|
|
24
|
+
method: req.method,
|
|
25
|
+
path: req.path,
|
|
26
|
+
headers: Object.keys(req.headers)
|
|
27
|
+
});
|
|
28
|
+
next();
|
|
29
|
+
});
|
|
30
|
+
}
|
|
31
|
+
setupRoutes() {
|
|
32
|
+
// Health check
|
|
33
|
+
this.app.get('/health', (req, res) => {
|
|
34
|
+
res.json({ status: 'ok', service: 'anthropic-to-gemini-proxy' });
|
|
35
|
+
});
|
|
36
|
+
// Anthropic Messages API → Gemini generateContent
|
|
37
|
+
this.app.post('/v1/messages', async (req, res) => {
|
|
38
|
+
try {
|
|
39
|
+
const anthropicReq = req.body;
|
|
40
|
+
// Convert Anthropic format to Gemini format
|
|
41
|
+
const geminiReq = this.convertAnthropicToGemini(anthropicReq);
|
|
42
|
+
logger.info('Converting Anthropic request to Gemini', {
|
|
43
|
+
anthropicModel: anthropicReq.model,
|
|
44
|
+
geminiModel: this.defaultModel,
|
|
45
|
+
messageCount: geminiReq.contents.length,
|
|
46
|
+
stream: anthropicReq.stream,
|
|
47
|
+
apiKeyPresent: !!this.geminiApiKey,
|
|
48
|
+
apiKeyPrefix: this.geminiApiKey?.substring(0, 10)
|
|
49
|
+
});
|
|
50
|
+
// Determine endpoint based on streaming
|
|
51
|
+
const endpoint = anthropicReq.stream ? 'streamGenerateContent' : 'generateContent';
|
|
52
|
+
const url = `${this.geminiBaseUrl}/models/${this.defaultModel}:${endpoint}?key=${this.geminiApiKey}`;
|
|
53
|
+
// Forward to Gemini
|
|
54
|
+
const response = await fetch(url, {
|
|
55
|
+
method: 'POST',
|
|
56
|
+
headers: {
|
|
57
|
+
'Content-Type': 'application/json'
|
|
58
|
+
},
|
|
59
|
+
body: JSON.stringify(geminiReq)
|
|
60
|
+
});
|
|
61
|
+
if (!response.ok) {
|
|
62
|
+
const error = await response.text();
|
|
63
|
+
logger.error('Gemini API error', { status: response.status, error });
|
|
64
|
+
return res.status(response.status).json({
|
|
65
|
+
error: {
|
|
66
|
+
type: 'api_error',
|
|
67
|
+
message: error
|
|
68
|
+
}
|
|
69
|
+
});
|
|
70
|
+
}
|
|
71
|
+
// Handle streaming vs non-streaming
|
|
72
|
+
if (anthropicReq.stream) {
|
|
73
|
+
// Stream response
|
|
74
|
+
res.setHeader('Content-Type', 'text/event-stream');
|
|
75
|
+
res.setHeader('Cache-Control', 'no-cache');
|
|
76
|
+
res.setHeader('Connection', 'keep-alive');
|
|
77
|
+
const reader = response.body?.getReader();
|
|
78
|
+
if (!reader) {
|
|
79
|
+
throw new Error('No response body');
|
|
80
|
+
}
|
|
81
|
+
const decoder = new TextDecoder();
|
|
82
|
+
while (true) {
|
|
83
|
+
const { done, value } = await reader.read();
|
|
84
|
+
if (done)
|
|
85
|
+
break;
|
|
86
|
+
const chunk = decoder.decode(value);
|
|
87
|
+
const anthropicChunk = this.convertGeminiStreamToAnthropic(chunk);
|
|
88
|
+
res.write(anthropicChunk);
|
|
89
|
+
}
|
|
90
|
+
res.end();
|
|
91
|
+
}
|
|
92
|
+
else {
|
|
93
|
+
// Non-streaming response
|
|
94
|
+
const geminiRes = await response.json();
|
|
95
|
+
const anthropicRes = this.convertGeminiToAnthropic(geminiRes);
|
|
96
|
+
logger.info('Gemini proxy response sent', {
|
|
97
|
+
model: this.defaultModel,
|
|
98
|
+
usage: anthropicRes.usage
|
|
99
|
+
});
|
|
100
|
+
res.json(anthropicRes);
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
catch (error) {
|
|
104
|
+
logger.error('Gemini proxy error', { error: error.message, stack: error.stack });
|
|
105
|
+
res.status(500).json({
|
|
106
|
+
error: {
|
|
107
|
+
type: 'proxy_error',
|
|
108
|
+
message: error.message
|
|
109
|
+
}
|
|
110
|
+
});
|
|
111
|
+
}
|
|
112
|
+
});
|
|
113
|
+
// Fallback for other Anthropic API endpoints
|
|
114
|
+
this.app.use((req, res) => {
|
|
115
|
+
logger.warn('Unsupported endpoint', { path: req.path, method: req.method });
|
|
116
|
+
res.status(404).json({
|
|
117
|
+
error: {
|
|
118
|
+
type: 'not_found',
|
|
119
|
+
message: `Endpoint ${req.path} not supported by Gemini proxy`
|
|
120
|
+
}
|
|
121
|
+
});
|
|
122
|
+
});
|
|
123
|
+
}
|
|
124
|
+
convertAnthropicToGemini(anthropicReq) {
|
|
125
|
+
const contents = [];
|
|
126
|
+
// Add system message as first user message if present
|
|
127
|
+
// Gemini doesn't have a dedicated system role, so we prepend it to the first user message
|
|
128
|
+
let systemPrefix = '';
|
|
129
|
+
if (anthropicReq.system) {
|
|
130
|
+
systemPrefix = `System: ${anthropicReq.system}\n\n`;
|
|
131
|
+
}
|
|
132
|
+
// Add tool instructions for Gemini to understand file operations
|
|
133
|
+
// Since Gemini doesn't have native tool calling, we instruct it to use structured XML-like commands
|
|
134
|
+
const toolInstructions = `
|
|
135
|
+
IMPORTANT: You have access to file system operations through structured commands. Use these exact formats:
|
|
136
|
+
|
|
137
|
+
<file_write path="filename.ext">
|
|
138
|
+
content here
|
|
139
|
+
</file_write>
|
|
140
|
+
|
|
141
|
+
<file_read path="filename.ext"/>
|
|
142
|
+
|
|
143
|
+
<bash_command>
|
|
144
|
+
command here
|
|
145
|
+
</bash_command>
|
|
146
|
+
|
|
147
|
+
When you need to create, edit, or read files, use these structured commands in your response.
|
|
148
|
+
The system will automatically execute these commands and provide results.
|
|
149
|
+
|
|
150
|
+
`;
|
|
151
|
+
// Prepend tool instructions to system prompt
|
|
152
|
+
if (systemPrefix) {
|
|
153
|
+
systemPrefix = toolInstructions + systemPrefix;
|
|
154
|
+
}
|
|
155
|
+
else {
|
|
156
|
+
systemPrefix = toolInstructions;
|
|
157
|
+
}
|
|
158
|
+
// Convert Anthropic messages to Gemini format
|
|
159
|
+
for (let i = 0; i < anthropicReq.messages.length; i++) {
|
|
160
|
+
const msg = anthropicReq.messages[i];
|
|
161
|
+
let text;
|
|
162
|
+
if (typeof msg.content === 'string') {
|
|
163
|
+
text = msg.content;
|
|
164
|
+
}
|
|
165
|
+
else if (Array.isArray(msg.content)) {
|
|
166
|
+
// Extract text from content blocks
|
|
167
|
+
text = msg.content
|
|
168
|
+
.filter(block => block.type === 'text')
|
|
169
|
+
.map(block => block.text)
|
|
170
|
+
.join('\n');
|
|
171
|
+
}
|
|
172
|
+
else {
|
|
173
|
+
text = '';
|
|
174
|
+
}
|
|
175
|
+
// Add system prefix to first user message
|
|
176
|
+
if (i === 0 && msg.role === 'user' && systemPrefix) {
|
|
177
|
+
text = systemPrefix + text;
|
|
178
|
+
}
|
|
179
|
+
contents.push({
|
|
180
|
+
role: msg.role === 'assistant' ? 'model' : 'user',
|
|
181
|
+
parts: [{ text }]
|
|
182
|
+
});
|
|
183
|
+
}
|
|
184
|
+
const geminiReq = {
|
|
185
|
+
contents
|
|
186
|
+
};
|
|
187
|
+
// Add generation config if temperature or max_tokens specified
|
|
188
|
+
if (anthropicReq.temperature !== undefined || anthropicReq.max_tokens !== undefined) {
|
|
189
|
+
geminiReq.generationConfig = {};
|
|
190
|
+
if (anthropicReq.temperature !== undefined) {
|
|
191
|
+
geminiReq.generationConfig.temperature = anthropicReq.temperature;
|
|
192
|
+
}
|
|
193
|
+
if (anthropicReq.max_tokens !== undefined) {
|
|
194
|
+
geminiReq.generationConfig.maxOutputTokens = anthropicReq.max_tokens;
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
return geminiReq;
|
|
198
|
+
}
|
|
199
|
+
parseStructuredCommands(text) {
|
|
200
|
+
const toolUses = [];
|
|
201
|
+
let cleanText = text;
|
|
202
|
+
// Parse file_write commands
|
|
203
|
+
const fileWriteRegex = /<file_write path="([^"]+)">([\s\S]*?)<\/file_write>/g;
|
|
204
|
+
let match;
|
|
205
|
+
while ((match = fileWriteRegex.exec(text)) !== null) {
|
|
206
|
+
toolUses.push({
|
|
207
|
+
type: 'tool_use',
|
|
208
|
+
id: `tool_${Date.now()}_${toolUses.length}`,
|
|
209
|
+
name: 'Write',
|
|
210
|
+
input: {
|
|
211
|
+
file_path: match[1],
|
|
212
|
+
content: match[2].trim()
|
|
213
|
+
}
|
|
214
|
+
});
|
|
215
|
+
cleanText = cleanText.replace(match[0], `[File written: ${match[1]}]`);
|
|
216
|
+
}
|
|
217
|
+
// Parse file_read commands
|
|
218
|
+
const fileReadRegex = /<file_read path="([^"]+)"\/>/g;
|
|
219
|
+
while ((match = fileReadRegex.exec(text)) !== null) {
|
|
220
|
+
toolUses.push({
|
|
221
|
+
type: 'tool_use',
|
|
222
|
+
id: `tool_${Date.now()}_${toolUses.length}`,
|
|
223
|
+
name: 'Read',
|
|
224
|
+
input: {
|
|
225
|
+
file_path: match[1]
|
|
226
|
+
}
|
|
227
|
+
});
|
|
228
|
+
cleanText = cleanText.replace(match[0], `[Reading file: ${match[1]}]`);
|
|
229
|
+
}
|
|
230
|
+
// Parse bash commands
|
|
231
|
+
const bashRegex = /<bash_command>([\s\S]*?)<\/bash_command>/g;
|
|
232
|
+
while ((match = bashRegex.exec(text)) !== null) {
|
|
233
|
+
toolUses.push({
|
|
234
|
+
type: 'tool_use',
|
|
235
|
+
id: `tool_${Date.now()}_${toolUses.length}`,
|
|
236
|
+
name: 'Bash',
|
|
237
|
+
input: {
|
|
238
|
+
command: match[1].trim()
|
|
239
|
+
}
|
|
240
|
+
});
|
|
241
|
+
cleanText = cleanText.replace(match[0], `[Executing: ${match[1].trim()}]`);
|
|
242
|
+
}
|
|
243
|
+
return { cleanText: cleanText.trim(), toolUses };
|
|
244
|
+
}
|
|
245
|
+
convertGeminiToAnthropic(geminiRes) {
|
|
246
|
+
const candidate = geminiRes.candidates?.[0];
|
|
247
|
+
if (!candidate) {
|
|
248
|
+
throw new Error('No candidates in Gemini response');
|
|
249
|
+
}
|
|
250
|
+
const content = candidate.content;
|
|
251
|
+
const rawText = content?.parts?.map((part) => part.text).join('') || '';
|
|
252
|
+
// Parse structured commands from Gemini's response
|
|
253
|
+
const { cleanText, toolUses } = this.parseStructuredCommands(rawText);
|
|
254
|
+
// Build content array with text and tool uses
|
|
255
|
+
const contentBlocks = [];
|
|
256
|
+
if (cleanText) {
|
|
257
|
+
contentBlocks.push({
|
|
258
|
+
type: 'text',
|
|
259
|
+
text: cleanText
|
|
260
|
+
});
|
|
261
|
+
}
|
|
262
|
+
// Add tool uses
|
|
263
|
+
contentBlocks.push(...toolUses);
|
|
264
|
+
return {
|
|
265
|
+
id: `msg_${Date.now()}`,
|
|
266
|
+
type: 'message',
|
|
267
|
+
role: 'assistant',
|
|
268
|
+
model: this.defaultModel,
|
|
269
|
+
content: contentBlocks.length > 0 ? contentBlocks : [
|
|
270
|
+
{
|
|
271
|
+
type: 'text',
|
|
272
|
+
text: rawText
|
|
273
|
+
}
|
|
274
|
+
],
|
|
275
|
+
stop_reason: this.mapFinishReason(candidate.finishReason),
|
|
276
|
+
usage: {
|
|
277
|
+
input_tokens: geminiRes.usageMetadata?.promptTokenCount || 0,
|
|
278
|
+
output_tokens: geminiRes.usageMetadata?.candidatesTokenCount || 0
|
|
279
|
+
}
|
|
280
|
+
};
|
|
281
|
+
}
|
|
282
|
+
convertGeminiStreamToAnthropic(chunk) {
|
|
283
|
+
// Gemini streaming returns newline-delimited JSON
|
|
284
|
+
const lines = chunk.split('\n').filter(line => line.trim());
|
|
285
|
+
const anthropicChunks = [];
|
|
286
|
+
for (const line of lines) {
|
|
287
|
+
try {
|
|
288
|
+
const parsed = JSON.parse(line);
|
|
289
|
+
const candidate = parsed.candidates?.[0];
|
|
290
|
+
const text = candidate?.content?.parts?.[0]?.text;
|
|
291
|
+
if (text) {
|
|
292
|
+
anthropicChunks.push(`event: content_block_delta\ndata: ${JSON.stringify({
|
|
293
|
+
type: 'content_block_delta',
|
|
294
|
+
delta: { type: 'text_delta', text }
|
|
295
|
+
})}\n\n`);
|
|
296
|
+
}
|
|
297
|
+
// Check for finish
|
|
298
|
+
if (candidate?.finishReason) {
|
|
299
|
+
anthropicChunks.push('event: message_stop\ndata: {}\n\n');
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
catch (e) {
|
|
303
|
+
// Ignore parse errors
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
return anthropicChunks.join('');
|
|
307
|
+
}
|
|
308
|
+
mapFinishReason(reason) {
|
|
309
|
+
const mapping = {
|
|
310
|
+
'STOP': 'end_turn',
|
|
311
|
+
'MAX_TOKENS': 'max_tokens',
|
|
312
|
+
'SAFETY': 'stop_sequence',
|
|
313
|
+
'RECITATION': 'stop_sequence',
|
|
314
|
+
'OTHER': 'end_turn'
|
|
315
|
+
};
|
|
316
|
+
return mapping[reason || 'STOP'] || 'end_turn';
|
|
317
|
+
}
|
|
318
|
+
start(port) {
|
|
319
|
+
this.app.listen(port, () => {
|
|
320
|
+
logger.info('Anthropic to Gemini proxy started', {
|
|
321
|
+
port,
|
|
322
|
+
geminiBaseUrl: this.geminiBaseUrl,
|
|
323
|
+
defaultModel: this.defaultModel
|
|
324
|
+
});
|
|
325
|
+
console.log(`\n✅ Gemini Proxy running at http://localhost:${port}`);
|
|
326
|
+
console.log(` Gemini Base URL: ${this.geminiBaseUrl}`);
|
|
327
|
+
console.log(` Default Model: ${this.defaultModel}\n`);
|
|
328
|
+
});
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
// CLI entry point
|
|
332
|
+
if (import.meta.url === `file://${process.argv[1]}`) {
|
|
333
|
+
const port = parseInt(process.env.PORT || '3001');
|
|
334
|
+
const geminiApiKey = process.env.GOOGLE_GEMINI_API_KEY;
|
|
335
|
+
if (!geminiApiKey) {
|
|
336
|
+
console.error('❌ Error: GOOGLE_GEMINI_API_KEY environment variable required');
|
|
337
|
+
process.exit(1);
|
|
338
|
+
}
|
|
339
|
+
const proxy = new AnthropicToGeminiProxy({
|
|
340
|
+
geminiApiKey,
|
|
341
|
+
geminiBaseUrl: process.env.GEMINI_BASE_URL,
|
|
342
|
+
defaultModel: process.env.COMPLETION_MODEL || process.env.REASONING_MODEL
|
|
343
|
+
});
|
|
344
|
+
proxy.start(port);
|
|
345
|
+
}
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
// Converts Anthropic API format to OpenRouter format
|
|
3
3
|
import express from 'express';
|
|
4
4
|
import { logger } from '../utils/logger.js';
|
|
5
|
+
import { getInstructionsForModel, formatInstructions } from './provider-instructions.js';
|
|
5
6
|
export class AnthropicToOpenRouterProxy {
|
|
6
7
|
app;
|
|
7
8
|
openrouterApiKey;
|
|
@@ -42,7 +43,9 @@ export class AnthropicToOpenRouterProxy {
|
|
|
42
43
|
logger.info('Converting Anthropic request to OpenRouter', {
|
|
43
44
|
anthropicModel: anthropicReq.model,
|
|
44
45
|
openaiModel: openaiReq.model,
|
|
45
|
-
messageCount: openaiReq.messages.length
|
|
46
|
+
messageCount: openaiReq.messages.length,
|
|
47
|
+
apiKeyPresent: !!this.openrouterApiKey,
|
|
48
|
+
apiKeyPrefix: this.openrouterApiKey?.substring(0, 10)
|
|
46
49
|
});
|
|
47
50
|
// Forward to OpenRouter
|
|
48
51
|
const response = await fetch(`${this.openrouterBaseUrl}/chat/completions`, {
|
|
@@ -120,13 +123,20 @@ export class AnthropicToOpenRouterProxy {
|
|
|
120
123
|
}
|
|
121
124
|
convertAnthropicToOpenAI(anthropicReq) {
|
|
122
125
|
const messages = [];
|
|
123
|
-
//
|
|
126
|
+
// Get model-specific tool instructions
|
|
127
|
+
const modelId = anthropicReq.model || this.defaultModel;
|
|
128
|
+
const provider = this.extractProvider(modelId);
|
|
129
|
+
const instructions = getInstructionsForModel(modelId, provider);
|
|
130
|
+
const toolInstructions = formatInstructions(instructions);
|
|
131
|
+
// Add system message with optimized tool instructions
|
|
132
|
+
let systemContent = toolInstructions;
|
|
124
133
|
if (anthropicReq.system) {
|
|
125
|
-
|
|
126
|
-
role: 'system',
|
|
127
|
-
content: anthropicReq.system
|
|
128
|
-
});
|
|
134
|
+
systemContent += '\n\n' + anthropicReq.system;
|
|
129
135
|
}
|
|
136
|
+
messages.push({
|
|
137
|
+
role: 'system',
|
|
138
|
+
content: systemContent
|
|
139
|
+
});
|
|
130
140
|
// Override model - if request has a Claude model, use defaultModel instead
|
|
131
141
|
const requestedModel = anthropicReq.model || '';
|
|
132
142
|
const shouldOverrideModel = requestedModel.startsWith('claude-') || !requestedModel;
|
|
@@ -160,20 +170,79 @@ export class AnthropicToOpenRouterProxy {
|
|
|
160
170
|
stream: anthropicReq.stream
|
|
161
171
|
};
|
|
162
172
|
}
|
|
173
|
+
parseStructuredCommands(text) {
|
|
174
|
+
const toolUses = [];
|
|
175
|
+
let cleanText = text;
|
|
176
|
+
// Parse file_write commands
|
|
177
|
+
const fileWriteRegex = /<file_write path="([^"]+)">([\s\S]*?)<\/file_write>/g;
|
|
178
|
+
let match;
|
|
179
|
+
while ((match = fileWriteRegex.exec(text)) !== null) {
|
|
180
|
+
toolUses.push({
|
|
181
|
+
type: 'tool_use',
|
|
182
|
+
id: `tool_${Date.now()}_${toolUses.length}`,
|
|
183
|
+
name: 'Write',
|
|
184
|
+
input: {
|
|
185
|
+
file_path: match[1],
|
|
186
|
+
content: match[2].trim()
|
|
187
|
+
}
|
|
188
|
+
});
|
|
189
|
+
cleanText = cleanText.replace(match[0], `[File written: ${match[1]}]`);
|
|
190
|
+
}
|
|
191
|
+
// Parse file_read commands
|
|
192
|
+
const fileReadRegex = /<file_read path="([^"]+)"\/>/g;
|
|
193
|
+
while ((match = fileReadRegex.exec(text)) !== null) {
|
|
194
|
+
toolUses.push({
|
|
195
|
+
type: 'tool_use',
|
|
196
|
+
id: `tool_${Date.now()}_${toolUses.length}`,
|
|
197
|
+
name: 'Read',
|
|
198
|
+
input: {
|
|
199
|
+
file_path: match[1]
|
|
200
|
+
}
|
|
201
|
+
});
|
|
202
|
+
cleanText = cleanText.replace(match[0], `[Reading file: ${match[1]}]`);
|
|
203
|
+
}
|
|
204
|
+
// Parse bash commands
|
|
205
|
+
const bashRegex = /<bash_command>([\s\S]*?)<\/bash_command>/g;
|
|
206
|
+
while ((match = bashRegex.exec(text)) !== null) {
|
|
207
|
+
toolUses.push({
|
|
208
|
+
type: 'tool_use',
|
|
209
|
+
id: `tool_${Date.now()}_${toolUses.length}`,
|
|
210
|
+
name: 'Bash',
|
|
211
|
+
input: {
|
|
212
|
+
command: match[1].trim()
|
|
213
|
+
}
|
|
214
|
+
});
|
|
215
|
+
cleanText = cleanText.replace(match[0], `[Executing: ${match[1].trim()}]`);
|
|
216
|
+
}
|
|
217
|
+
return { cleanText: cleanText.trim(), toolUses };
|
|
218
|
+
}
|
|
163
219
|
convertOpenAIToAnthropic(openaiRes) {
|
|
164
220
|
const choice = openaiRes.choices?.[0];
|
|
165
221
|
if (!choice) {
|
|
166
222
|
throw new Error('No choices in OpenAI response');
|
|
167
223
|
}
|
|
224
|
+
const rawText = choice.message?.content || choice.text || '';
|
|
225
|
+
// Parse structured commands from model's response
|
|
226
|
+
const { cleanText, toolUses } = this.parseStructuredCommands(rawText);
|
|
227
|
+
// Build content array with text and tool uses
|
|
228
|
+
const contentBlocks = [];
|
|
229
|
+
if (cleanText) {
|
|
230
|
+
contentBlocks.push({
|
|
231
|
+
type: 'text',
|
|
232
|
+
text: cleanText
|
|
233
|
+
});
|
|
234
|
+
}
|
|
235
|
+
// Add tool uses
|
|
236
|
+
contentBlocks.push(...toolUses);
|
|
168
237
|
return {
|
|
169
238
|
id: openaiRes.id || `msg_${Date.now()}`,
|
|
170
239
|
type: 'message',
|
|
171
240
|
role: 'assistant',
|
|
172
241
|
model: openaiRes.model,
|
|
173
|
-
content: [
|
|
242
|
+
content: contentBlocks.length > 0 ? contentBlocks : [
|
|
174
243
|
{
|
|
175
244
|
type: 'text',
|
|
176
|
-
text:
|
|
245
|
+
text: rawText
|
|
177
246
|
}
|
|
178
247
|
],
|
|
179
248
|
stop_reason: this.mapFinishReason(choice.finish_reason),
|
|
@@ -211,6 +280,11 @@ export class AnthropicToOpenRouterProxy {
|
|
|
211
280
|
}
|
|
212
281
|
return anthropicChunks.join('');
|
|
213
282
|
}
|
|
283
|
+
extractProvider(modelId) {
|
|
284
|
+
// Extract provider from model ID (e.g., "openai/gpt-4" -> "openai")
|
|
285
|
+
const parts = modelId.split('/');
|
|
286
|
+
return parts.length > 1 ? parts[0] : '';
|
|
287
|
+
}
|
|
214
288
|
mapFinishReason(reason) {
|
|
215
289
|
const mapping = {
|
|
216
290
|
'stop': 'end_turn',
|
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
// Provider-specific and model-specific tool instructions
|
|
2
|
+
// Optimized for different LLM families to improve tool calling success rate
|
|
3
|
+
// Base structured command format (works for most models)
|
|
4
|
+
export const BASE_INSTRUCTIONS = {
|
|
5
|
+
format: 'xml',
|
|
6
|
+
commands: {
|
|
7
|
+
write: '<file_write path="filename.ext">\ncontent here\n</file_write>',
|
|
8
|
+
read: '<file_read path="filename.ext"/>',
|
|
9
|
+
bash: '<bash_command>\ncommand here\n</bash_command>'
|
|
10
|
+
},
|
|
11
|
+
examples: `
|
|
12
|
+
Example: Create a file
|
|
13
|
+
<file_write path="hello.js">
|
|
14
|
+
function hello() {
|
|
15
|
+
console.log("Hello!");
|
|
16
|
+
}
|
|
17
|
+
</file_write>
|
|
18
|
+
`,
|
|
19
|
+
emphasis: 'IMPORTANT: Use these structured commands in your response. The system will automatically execute them.'
|
|
20
|
+
};
|
|
21
|
+
// Anthropic models - Native tool calling, minimal instructions needed
|
|
22
|
+
export const ANTHROPIC_INSTRUCTIONS = {
|
|
23
|
+
format: 'native',
|
|
24
|
+
commands: {
|
|
25
|
+
write: 'Use Write tool with file_path and content parameters',
|
|
26
|
+
read: 'Use Read tool with file_path parameter',
|
|
27
|
+
bash: 'Use Bash tool with command parameter'
|
|
28
|
+
},
|
|
29
|
+
emphasis: 'You have native access to file system tools. Use them directly.'
|
|
30
|
+
};
|
|
31
|
+
// OpenAI/GPT models - Prefer function calling style
|
|
32
|
+
export const OPENAI_INSTRUCTIONS = {
|
|
33
|
+
format: 'xml',
|
|
34
|
+
commands: {
|
|
35
|
+
write: '<file_write path="filename.ext">\ncontent here\n</file_write>',
|
|
36
|
+
read: '<file_read path="filename.ext"/>',
|
|
37
|
+
bash: '<bash_command>\ncommand here\n</bash_command>'
|
|
38
|
+
},
|
|
39
|
+
examples: `
|
|
40
|
+
When you need to create a file, respond with:
|
|
41
|
+
<file_write path="example.txt">
|
|
42
|
+
File content here
|
|
43
|
+
</file_write>
|
|
44
|
+
|
|
45
|
+
The system will create the file for you.
|
|
46
|
+
`,
|
|
47
|
+
emphasis: 'CRITICAL: You must use these exact XML tag formats. Do not just describe the file - actually use the tags.'
|
|
48
|
+
};
|
|
49
|
+
// Google/Gemini models - Detailed, explicit instructions
|
|
50
|
+
export const GOOGLE_INSTRUCTIONS = {
|
|
51
|
+
format: 'xml',
|
|
52
|
+
commands: {
|
|
53
|
+
write: '<file_write path="filename.ext">\ncontent here\n</file_write>',
|
|
54
|
+
read: '<file_read path="filename.ext"/>',
|
|
55
|
+
bash: '<bash_command>\ncommand here\n</bash_command>'
|
|
56
|
+
},
|
|
57
|
+
examples: `
|
|
58
|
+
Step-by-step file creation:
|
|
59
|
+
1. Determine the filename
|
|
60
|
+
2. Write the content
|
|
61
|
+
3. Use this exact format:
|
|
62
|
+
|
|
63
|
+
<file_write path="your_file.txt">
|
|
64
|
+
Your content here
|
|
65
|
+
</file_write>
|
|
66
|
+
|
|
67
|
+
The file will be automatically created.
|
|
68
|
+
`,
|
|
69
|
+
emphasis: 'IMPORTANT: Always use the XML tags. Just writing code blocks will NOT create files. You MUST use <file_write> tags.'
|
|
70
|
+
};
|
|
71
|
+
// Meta/Llama models - Clear, concise instructions
|
|
72
|
+
export const META_INSTRUCTIONS = {
|
|
73
|
+
format: 'xml',
|
|
74
|
+
commands: {
|
|
75
|
+
write: '<file_write path="filename.ext">\ncontent here\n</file_write>',
|
|
76
|
+
read: '<file_read path="filename.ext"/>',
|
|
77
|
+
bash: '<bash_command>\ncommand here\n</bash_command>'
|
|
78
|
+
},
|
|
79
|
+
examples: `
|
|
80
|
+
To create files, use:
|
|
81
|
+
<file_write path="file.txt">content</file_write>
|
|
82
|
+
|
|
83
|
+
To read files, use:
|
|
84
|
+
<file_read path="file.txt"/>
|
|
85
|
+
|
|
86
|
+
To run commands, use:
|
|
87
|
+
<bash_command>ls -la</bash_command>
|
|
88
|
+
`,
|
|
89
|
+
emphasis: 'Use these tags to perform actual file operations. Code blocks alone will not create files.'
|
|
90
|
+
};
|
|
91
|
+
// DeepSeek models - Technical, precise instructions
|
|
92
|
+
export const DEEPSEEK_INSTRUCTIONS = {
|
|
93
|
+
format: 'xml',
|
|
94
|
+
commands: {
|
|
95
|
+
write: '<file_write path="filename.ext">\ncontent here\n</file_write>',
|
|
96
|
+
read: '<file_read path="filename.ext"/>',
|
|
97
|
+
bash: '<bash_command>\ncommand here\n</bash_command>'
|
|
98
|
+
},
|
|
99
|
+
examples: `
|
|
100
|
+
File system operations use XML-like structured commands:
|
|
101
|
+
|
|
102
|
+
<file_write path="example.py">
|
|
103
|
+
def main():
|
|
104
|
+
print("Hello")
|
|
105
|
+
</file_write>
|
|
106
|
+
|
|
107
|
+
These commands are parsed and executed by the system.
|
|
108
|
+
`,
|
|
109
|
+
emphasis: 'Use structured commands for file I/O. Standard code blocks are for display only.'
|
|
110
|
+
};
|
|
111
|
+
// Mistral models - Direct, action-oriented
|
|
112
|
+
export const MISTRAL_INSTRUCTIONS = {
|
|
113
|
+
format: 'xml',
|
|
114
|
+
commands: {
|
|
115
|
+
write: '<file_write path="filename.ext">\ncontent here\n</file_write>',
|
|
116
|
+
read: '<file_read path="filename.ext"/>',
|
|
117
|
+
bash: '<bash_command>\ncommand here\n</bash_command>'
|
|
118
|
+
},
|
|
119
|
+
examples: `
|
|
120
|
+
ACTION REQUIRED: To create actual files, you must use these tags:
|
|
121
|
+
|
|
122
|
+
<file_write path="file.txt">
|
|
123
|
+
content
|
|
124
|
+
</file_write>
|
|
125
|
+
|
|
126
|
+
Do not just show code - use the tags to create real files.
|
|
127
|
+
`,
|
|
128
|
+
emphasis: 'CRITICAL: File operations require XML tags. Code blocks alone will not create files on disk.'
|
|
129
|
+
};
|
|
130
|
+
// X.AI/Grok models - Balanced, clear instructions
|
|
131
|
+
export const XAI_INSTRUCTIONS = {
|
|
132
|
+
format: 'xml',
|
|
133
|
+
commands: {
|
|
134
|
+
write: '<file_write path="filename.ext">\ncontent here\n</file_write>',
|
|
135
|
+
read: '<file_read path="filename.ext"/>',
|
|
136
|
+
bash: '<bash_command>\ncommand here\n</bash_command>'
|
|
137
|
+
},
|
|
138
|
+
examples: `
|
|
139
|
+
File system commands:
|
|
140
|
+
- Create: <file_write path="file.txt">content</file_write>
|
|
141
|
+
- Read: <file_read path="file.txt"/>
|
|
142
|
+
- Execute: <bash_command>command</bash_command>
|
|
143
|
+
`,
|
|
144
|
+
emphasis: 'Use structured commands to interact with the file system.'
|
|
145
|
+
};
|
|
146
|
+
// Map provider/model patterns to instruction sets
|
|
147
|
+
export function getInstructionsForModel(modelId, provider) {
|
|
148
|
+
const normalizedModel = modelId.toLowerCase();
|
|
149
|
+
// Anthropic models - native tool calling
|
|
150
|
+
if (normalizedModel.includes('claude') || provider === 'anthropic') {
|
|
151
|
+
return ANTHROPIC_INSTRUCTIONS;
|
|
152
|
+
}
|
|
153
|
+
// OpenAI models
|
|
154
|
+
if (normalizedModel.includes('gpt') || normalizedModel.includes('openai') || provider === 'openai') {
|
|
155
|
+
return OPENAI_INSTRUCTIONS;
|
|
156
|
+
}
|
|
157
|
+
// Google/Gemini models
|
|
158
|
+
if (normalizedModel.includes('gemini') || normalizedModel.includes('gemma') || provider === 'google') {
|
|
159
|
+
return GOOGLE_INSTRUCTIONS;
|
|
160
|
+
}
|
|
161
|
+
// Meta/Llama models
|
|
162
|
+
if (normalizedModel.includes('llama') || provider === 'meta-llama' || provider === 'meta') {
|
|
163
|
+
return META_INSTRUCTIONS;
|
|
164
|
+
}
|
|
165
|
+
// DeepSeek models
|
|
166
|
+
if (normalizedModel.includes('deepseek') || provider === 'deepseek') {
|
|
167
|
+
return DEEPSEEK_INSTRUCTIONS;
|
|
168
|
+
}
|
|
169
|
+
// Mistral models
|
|
170
|
+
if (normalizedModel.includes('mistral') || provider === 'mistralai') {
|
|
171
|
+
return MISTRAL_INSTRUCTIONS;
|
|
172
|
+
}
|
|
173
|
+
// X.AI/Grok models
|
|
174
|
+
if (normalizedModel.includes('grok') || provider === 'x-ai') {
|
|
175
|
+
return XAI_INSTRUCTIONS;
|
|
176
|
+
}
|
|
177
|
+
// Qwen models
|
|
178
|
+
if (normalizedModel.includes('qwen')) {
|
|
179
|
+
return DEEPSEEK_INSTRUCTIONS; // Similar to DeepSeek
|
|
180
|
+
}
|
|
181
|
+
// Default to base instructions
|
|
182
|
+
return BASE_INSTRUCTIONS;
|
|
183
|
+
}
|
|
184
|
+
// Generate formatted instruction string for injection
|
|
185
|
+
export function formatInstructions(instructions) {
|
|
186
|
+
if (instructions.format === 'native') {
|
|
187
|
+
return `${instructions.emphasis}\n\n${instructions.commands.write}\n${instructions.commands.read}\n${instructions.commands.bash}`;
|
|
188
|
+
}
|
|
189
|
+
let formatted = `${instructions.emphasis}\n\n`;
|
|
190
|
+
formatted += `Available commands:\n`;
|
|
191
|
+
formatted += `${instructions.commands.write}\n`;
|
|
192
|
+
formatted += `${instructions.commands.read}\n`;
|
|
193
|
+
formatted += `${instructions.commands.bash}\n`;
|
|
194
|
+
if (instructions.examples) {
|
|
195
|
+
formatted += `\n${instructions.examples}`;
|
|
196
|
+
}
|
|
197
|
+
return formatted;
|
|
198
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{}
|