@askalf/dario 1.2.1 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +38 -4
- package/dist/proxy.js +180 -8
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -16,9 +16,9 @@
|
|
|
16
16
|
|
|
17
17
|
<p align="center">
|
|
18
18
|
<a href="#quick-start">Quick Start</a> •
|
|
19
|
-
<a href="#
|
|
20
|
-
<a href="#usage-examples">Examples</a> •
|
|
19
|
+
<a href="#openai-compatibility">OpenAI Compat</a> •
|
|
21
20
|
<a href="#cli-backend">CLI Backend</a> •
|
|
21
|
+
<a href="#usage-examples">Examples</a> •
|
|
22
22
|
<a href="#faq">FAQ</a>
|
|
23
23
|
</p>
|
|
24
24
|
|
|
@@ -157,6 +157,37 @@ Combine with `--cli` for rate-limit-proof Opus:
|
|
|
157
157
|
dario proxy --cli --model=opus
|
|
158
158
|
```
|
|
159
159
|
|
|
160
|
+
## OpenAI Compatibility
|
|
161
|
+
|
|
162
|
+
Dario speaks both Anthropic and OpenAI API formats. Any tool built for OpenAI works with your Claude subscription — Cursor, Continue, LiteLLM, anything.
|
|
163
|
+
|
|
164
|
+
```bash
|
|
165
|
+
# Use with any OpenAI SDK or tool
|
|
166
|
+
export OPENAI_BASE_URL=http://localhost:3456/v1
|
|
167
|
+
export OPENAI_API_KEY=dario
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
```python
|
|
171
|
+
from openai import OpenAI
|
|
172
|
+
|
|
173
|
+
client = OpenAI(base_url="http://localhost:3456/v1", api_key="dario")
|
|
174
|
+
response = client.chat.completions.create(
|
|
175
|
+
model="claude-opus-4-6", # or use "gpt-4" — auto-maps to Opus
|
|
176
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
177
|
+
)
|
|
178
|
+
```
|
|
179
|
+
|
|
180
|
+
Model mapping (automatic):
|
|
181
|
+
|
|
182
|
+
| OpenAI model | Maps to |
|
|
183
|
+
|---|---|
|
|
184
|
+
| `gpt-4`, `gpt-4o`, `o1`, `o3` | `claude-opus-4-6` |
|
|
185
|
+
| `o1-mini`, `o3-mini` | `claude-sonnet-4-6` |
|
|
186
|
+
| `gpt-3.5-turbo`, `gpt-4o-mini` | `claude-haiku-4-5` |
|
|
187
|
+
| Any `claude-*` model | Passed through directly |
|
|
188
|
+
|
|
189
|
+
Streaming, system prompts, temperature, and stop sequences all translate automatically.
|
|
190
|
+
|
|
160
191
|
## Usage Examples
|
|
161
192
|
|
|
162
193
|
### curl
|
|
@@ -290,7 +321,8 @@ ANTHROPIC_BASE_URL=http://localhost:3456 ANTHROPIC_API_KEY=dario your-tool-here
|
|
|
290
321
|
|
|
291
322
|
### Direct API Mode
|
|
292
323
|
- All Claude models (Opus 4.6, Sonnet 4.6, Haiku 4.5)
|
|
293
|
-
-
|
|
324
|
+
- **OpenAI-compatible** (`/v1/chat/completions`) — works with any OpenAI SDK or tool
|
|
325
|
+
- Streaming and non-streaming (both Anthropic and OpenAI SSE formats)
|
|
294
326
|
- Tool use / function calling
|
|
295
327
|
- System prompts and multi-turn conversations
|
|
296
328
|
- Prompt caching and extended thinking
|
|
@@ -307,7 +339,9 @@ ANTHROPIC_BASE_URL=http://localhost:3456 ANTHROPIC_API_KEY=dario your-tool-here
|
|
|
307
339
|
|
|
308
340
|
| Path | Description |
|
|
309
341
|
|------|-------------|
|
|
310
|
-
| `POST /v1/messages` | Anthropic Messages API
|
|
342
|
+
| `POST /v1/messages` | Anthropic Messages API |
|
|
343
|
+
| `POST /v1/chat/completions` | OpenAI-compatible Chat API |
|
|
344
|
+
| `GET /v1/models` | Model list (works with both SDKs) |
|
|
311
345
|
| `GET /health` | Proxy health + OAuth status + request count |
|
|
312
346
|
| `GET /status` | Detailed OAuth token status |
|
|
313
347
|
|
package/dist/proxy.js
CHANGED
|
@@ -55,6 +55,128 @@ const MODEL_ALIASES = {
|
|
|
55
55
|
'sonnet': 'claude-sonnet-4-6',
|
|
56
56
|
'haiku': 'claude-haiku-4-5',
|
|
57
57
|
};
|
|
58
|
+
// OpenAI model name → Anthropic model name
|
|
59
|
+
const OPENAI_MODEL_MAP = {
|
|
60
|
+
'gpt-4': 'claude-opus-4-6',
|
|
61
|
+
'gpt-4o': 'claude-opus-4-6',
|
|
62
|
+
'gpt-4-turbo': 'claude-opus-4-6',
|
|
63
|
+
'gpt-4o-mini': 'claude-haiku-4-5',
|
|
64
|
+
'gpt-3.5-turbo': 'claude-haiku-4-5',
|
|
65
|
+
'o1': 'claude-opus-4-6',
|
|
66
|
+
'o1-mini': 'claude-sonnet-4-6',
|
|
67
|
+
'o1-preview': 'claude-opus-4-6',
|
|
68
|
+
'o3': 'claude-opus-4-6',
|
|
69
|
+
'o3-mini': 'claude-sonnet-4-6',
|
|
70
|
+
};
|
|
71
|
+
/**
|
|
72
|
+
* Translate OpenAI chat completion request → Anthropic Messages request.
|
|
73
|
+
*/
|
|
74
|
+
function openaiToAnthropic(body, modelOverride) {
|
|
75
|
+
const messages = body.messages;
|
|
76
|
+
if (!messages)
|
|
77
|
+
return body;
|
|
78
|
+
// Extract system messages
|
|
79
|
+
const systemMessages = messages.filter(m => m.role === 'system');
|
|
80
|
+
const nonSystemMessages = messages.filter(m => m.role !== 'system');
|
|
81
|
+
// Map model name
|
|
82
|
+
const requestModel = String(body.model || '');
|
|
83
|
+
const model = modelOverride || OPENAI_MODEL_MAP[requestModel] || requestModel;
|
|
84
|
+
const result = {
|
|
85
|
+
model,
|
|
86
|
+
messages: nonSystemMessages.map(m => ({
|
|
87
|
+
role: m.role === 'assistant' ? 'assistant' : 'user',
|
|
88
|
+
content: m.content,
|
|
89
|
+
})),
|
|
90
|
+
max_tokens: body.max_tokens ?? body.max_completion_tokens ?? 8192,
|
|
91
|
+
};
|
|
92
|
+
if (systemMessages.length > 0) {
|
|
93
|
+
result.system = systemMessages.map(m => typeof m.content === 'string' ? m.content : JSON.stringify(m.content)).join('\n');
|
|
94
|
+
}
|
|
95
|
+
if (body.stream)
|
|
96
|
+
result.stream = true;
|
|
97
|
+
if (body.temperature != null)
|
|
98
|
+
result.temperature = body.temperature;
|
|
99
|
+
if (body.top_p != null)
|
|
100
|
+
result.top_p = body.top_p;
|
|
101
|
+
if (body.stop)
|
|
102
|
+
result.stop_sequences = Array.isArray(body.stop) ? body.stop : [body.stop];
|
|
103
|
+
return result;
|
|
104
|
+
}
|
|
105
|
+
/**
|
|
106
|
+
* Translate Anthropic Messages response → OpenAI chat completion response.
|
|
107
|
+
*/
|
|
108
|
+
function anthropicToOpenai(body) {
|
|
109
|
+
const content = body.content;
|
|
110
|
+
const text = content?.find(c => c.type === 'text')?.text ?? '';
|
|
111
|
+
const usage = body.usage;
|
|
112
|
+
return {
|
|
113
|
+
id: `chatcmpl-${(body.id || '').replace('msg_', '')}`,
|
|
114
|
+
object: 'chat.completion',
|
|
115
|
+
created: Math.floor(Date.now() / 1000),
|
|
116
|
+
model: body.model,
|
|
117
|
+
choices: [{
|
|
118
|
+
index: 0,
|
|
119
|
+
message: { role: 'assistant', content: text },
|
|
120
|
+
finish_reason: body.stop_reason === 'end_turn' ? 'stop' : body.stop_reason === 'max_tokens' ? 'length' : 'stop',
|
|
121
|
+
}],
|
|
122
|
+
usage: {
|
|
123
|
+
prompt_tokens: usage?.input_tokens ?? 0,
|
|
124
|
+
completion_tokens: usage?.output_tokens ?? 0,
|
|
125
|
+
total_tokens: (usage?.input_tokens ?? 0) + (usage?.output_tokens ?? 0),
|
|
126
|
+
},
|
|
127
|
+
};
|
|
128
|
+
}
|
|
129
|
+
/**
|
|
130
|
+
* Translate Anthropic SSE stream → OpenAI SSE stream.
|
|
131
|
+
*/
|
|
132
|
+
function translateStreamChunk(line) {
|
|
133
|
+
if (!line.startsWith('data: '))
|
|
134
|
+
return null;
|
|
135
|
+
const json = line.slice(6).trim();
|
|
136
|
+
if (json === '[DONE]')
|
|
137
|
+
return 'data: [DONE]\n\n';
|
|
138
|
+
try {
|
|
139
|
+
const event = JSON.parse(json);
|
|
140
|
+
if (event.type === 'content_block_delta') {
|
|
141
|
+
const delta = event.delta;
|
|
142
|
+
if (delta?.type === 'text_delta' && delta.text) {
|
|
143
|
+
return `data: ${JSON.stringify({
|
|
144
|
+
id: 'chatcmpl-dario',
|
|
145
|
+
object: 'chat.completion.chunk',
|
|
146
|
+
created: Math.floor(Date.now() / 1000),
|
|
147
|
+
model: 'claude',
|
|
148
|
+
choices: [{ index: 0, delta: { content: delta.text }, finish_reason: null }],
|
|
149
|
+
})}\n\n`;
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
if (event.type === 'message_stop') {
|
|
153
|
+
return `data: ${JSON.stringify({
|
|
154
|
+
id: 'chatcmpl-dario',
|
|
155
|
+
object: 'chat.completion.chunk',
|
|
156
|
+
created: Math.floor(Date.now() / 1000),
|
|
157
|
+
model: 'claude',
|
|
158
|
+
choices: [{ index: 0, delta: {}, finish_reason: 'stop' }],
|
|
159
|
+
})}\n\ndata: [DONE]\n\n`;
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
catch { /* skip unparseable */ }
|
|
163
|
+
return null;
|
|
164
|
+
}
|
|
165
|
+
/**
|
|
166
|
+
* OpenAI-compatible models list.
|
|
167
|
+
*/
|
|
168
|
+
function openaiModelsList() {
|
|
169
|
+
const models = ['claude-opus-4-6', 'claude-sonnet-4-6', 'claude-haiku-4-5'];
|
|
170
|
+
return {
|
|
171
|
+
object: 'list',
|
|
172
|
+
data: models.map(id => ({
|
|
173
|
+
id,
|
|
174
|
+
object: 'model',
|
|
175
|
+
created: 1700000000,
|
|
176
|
+
owned_by: 'anthropic',
|
|
177
|
+
})),
|
|
178
|
+
};
|
|
179
|
+
}
|
|
58
180
|
function sanitizeError(err) {
|
|
59
181
|
const msg = err instanceof Error ? err.message : String(err);
|
|
60
182
|
// Never leak tokens in error messages
|
|
@@ -197,20 +319,28 @@ export async function startProxy(opts = {}) {
|
|
|
197
319
|
res.end(JSON.stringify(s));
|
|
198
320
|
return;
|
|
199
321
|
}
|
|
322
|
+
// OpenAI-compatible models list
|
|
323
|
+
if (urlPath === '/v1/models' && req.method === 'GET') {
|
|
324
|
+
requestCount++;
|
|
325
|
+
res.writeHead(200, { 'Content-Type': 'application/json', 'Access-Control-Allow-Origin': CORS_ORIGIN });
|
|
326
|
+
res.end(JSON.stringify(openaiModelsList()));
|
|
327
|
+
return;
|
|
328
|
+
}
|
|
329
|
+
// Detect OpenAI-format requests
|
|
330
|
+
const isOpenAI = urlPath === '/v1/chat/completions';
|
|
200
331
|
// Allowlisted API paths — only these are proxied (prevents SSRF)
|
|
201
332
|
const allowedPaths = {
|
|
202
333
|
'/v1/messages': `${ANTHROPIC_API}/v1/messages`,
|
|
203
|
-
'/v1/models': `${ANTHROPIC_API}/v1/models`,
|
|
204
334
|
'/v1/complete': `${ANTHROPIC_API}/v1/complete`,
|
|
205
335
|
};
|
|
206
|
-
const targetBase = allowedPaths[urlPath];
|
|
336
|
+
const targetBase = isOpenAI ? `${ANTHROPIC_API}/v1/messages` : allowedPaths[urlPath];
|
|
207
337
|
if (!targetBase) {
|
|
208
338
|
res.writeHead(403, { 'Content-Type': 'application/json' });
|
|
209
339
|
res.end(JSON.stringify({ error: 'Forbidden', message: 'Path not allowed' }));
|
|
210
340
|
return;
|
|
211
341
|
}
|
|
212
|
-
// Only allow POST (Messages API) and GET (models)
|
|
213
|
-
if (req.method !== 'POST'
|
|
342
|
+
// Only allow POST (Messages/Chat API) and GET (models)
|
|
343
|
+
if (req.method !== 'POST') {
|
|
214
344
|
res.writeHead(405, { 'Content-Type': 'application/json' });
|
|
215
345
|
res.end(JSON.stringify({ error: 'Method not allowed' }));
|
|
216
346
|
return;
|
|
@@ -243,9 +373,18 @@ export async function startProxy(opts = {}) {
|
|
|
243
373
|
res.end(cliResult.body);
|
|
244
374
|
return;
|
|
245
375
|
}
|
|
246
|
-
//
|
|
376
|
+
// Translate OpenAI → Anthropic format if needed
|
|
247
377
|
let finalBody = body.length > 0 ? body : undefined;
|
|
248
|
-
if (
|
|
378
|
+
if (isOpenAI && body.length > 0) {
|
|
379
|
+
try {
|
|
380
|
+
const parsed = JSON.parse(body.toString());
|
|
381
|
+
const translated = openaiToAnthropic(parsed, modelOverride);
|
|
382
|
+
finalBody = Buffer.from(JSON.stringify(translated));
|
|
383
|
+
}
|
|
384
|
+
catch { /* not JSON, send as-is */ }
|
|
385
|
+
}
|
|
386
|
+
else if (modelOverride && body.length > 0) {
|
|
387
|
+
// Override model in request body if --model flag was set
|
|
249
388
|
try {
|
|
250
389
|
const parsed = JSON.parse(body.toString());
|
|
251
390
|
parsed.model = modelOverride;
|
|
@@ -321,12 +460,33 @@ export async function startProxy(opts = {}) {
|
|
|
321
460
|
if (isStream && upstream.body) {
|
|
322
461
|
// Stream SSE chunks through
|
|
323
462
|
const reader = upstream.body.getReader();
|
|
463
|
+
const decoder = new TextDecoder();
|
|
324
464
|
try {
|
|
465
|
+
let buffer = '';
|
|
325
466
|
while (true) {
|
|
326
467
|
const { done, value } = await reader.read();
|
|
327
468
|
if (done)
|
|
328
469
|
break;
|
|
329
|
-
|
|
470
|
+
if (isOpenAI) {
|
|
471
|
+
// Translate Anthropic SSE → OpenAI SSE
|
|
472
|
+
buffer += decoder.decode(value, { stream: true });
|
|
473
|
+
const lines = buffer.split('\n');
|
|
474
|
+
buffer = lines.pop() ?? '';
|
|
475
|
+
for (const line of lines) {
|
|
476
|
+
const translated = translateStreamChunk(line);
|
|
477
|
+
if (translated)
|
|
478
|
+
res.write(translated);
|
|
479
|
+
}
|
|
480
|
+
}
|
|
481
|
+
else {
|
|
482
|
+
res.write(value);
|
|
483
|
+
}
|
|
484
|
+
}
|
|
485
|
+
// Flush remaining buffer
|
|
486
|
+
if (isOpenAI && buffer.trim()) {
|
|
487
|
+
const translated = translateStreamChunk(buffer);
|
|
488
|
+
if (translated)
|
|
489
|
+
res.write(translated);
|
|
330
490
|
}
|
|
331
491
|
}
|
|
332
492
|
catch (err) {
|
|
@@ -338,7 +498,19 @@ export async function startProxy(opts = {}) {
|
|
|
338
498
|
else {
|
|
339
499
|
// Buffer and forward
|
|
340
500
|
const responseBody = await upstream.text();
|
|
341
|
-
|
|
501
|
+
if (isOpenAI && upstream.status >= 200 && upstream.status < 300) {
|
|
502
|
+
// Translate Anthropic response → OpenAI format
|
|
503
|
+
try {
|
|
504
|
+
const parsed = JSON.parse(responseBody);
|
|
505
|
+
res.end(JSON.stringify(anthropicToOpenai(parsed)));
|
|
506
|
+
}
|
|
507
|
+
catch {
|
|
508
|
+
res.end(responseBody);
|
|
509
|
+
}
|
|
510
|
+
}
|
|
511
|
+
else {
|
|
512
|
+
res.end(responseBody);
|
|
513
|
+
}
|
|
342
514
|
// Quick token estimate for logging
|
|
343
515
|
if (verbose && responseBody) {
|
|
344
516
|
try {
|