claude-connect 0.1.4 → 0.1.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +29 -3
- package/package.json +3 -1
- package/src/data/catalog-store.js +152 -0
- package/src/gateway/messages.js +182 -1
- package/src/gateway/server.js +116 -5
- package/src/index.js +10 -0
- package/src/lib/claude-settings.js +17 -0
- package/src/lib/ollama.js +100 -0
- package/src/wizard.js +165 -15
package/README.md
CHANGED
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
# Claude Connect
|
|
2
2
|
|
|
3
|
-
> Conecta `Claude Code` con `OpenCode Go`, `Zen`, `Kimi`, `DeepSeek`, `OpenRouter` y `Qwen` desde una interfaz de consola clara, rápida y reversible.
|
|
3
|
+
> Conecta `Claude Code` con `OpenCode Go`, `Zen`, `Kimi`, `DeepSeek`, `Ollama`, `OpenAI`, `OpenRouter` y `Qwen` desde una interfaz de consola clara, rápida y reversible.
|
|
4
4
|
|
|
5
5
|
[](https://www.npmjs.com/package/claude-connect)
|
|
6
6
|
[](https://nodejs.org/)
|
|
7
7
|
[](./LICENSE)
|
|
8
|
-
[](https://www.npmjs.com/package/claude-connect)
|
|
8
|
+
[](https://www.npmjs.com/package/claude-connect)
|
|
9
9
|
|
|
10
10
|
## Why Claude Connect
|
|
11
11
|
|
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
|
|
14
14
|
### Highlights
|
|
15
15
|
|
|
16
|
-
- `OpenCode Go`, `Zen`, `Kimi`, `DeepSeek`, `OpenRouter` y `Qwen` listos desde el primer arranque
|
|
16
|
+
- `OpenCode Go`, `Zen`, `Kimi`, `DeepSeek`, `Ollama`, `OpenAI`, `OpenRouter` y `Qwen` listos desde el primer arranque
|
|
17
17
|
- soporte para `Token` y `OAuth` cuando el proveedor lo permite
|
|
18
18
|
- API keys compartidas por proveedor para no repetir el mismo token en cada modelo
|
|
19
19
|
- activación reversible sobre la instalación real de `Claude Code`
|
|
@@ -78,6 +78,8 @@ Al activar:
|
|
|
78
78
|
- `Zen` usa conexión directa o gateway según el modelo elegido
|
|
79
79
|
- `Kimi` usa gateway local y reenvia al endpoint Anthropic de `https://api.kimi.com/coding/`
|
|
80
80
|
- `DeepSeek` apunta a `https://api.deepseek.com/anthropic`
|
|
81
|
+
- `Ollama` pide una URL local o remota, valida `/api/tags` y usa el gateway local sobre `.../api/chat`
|
|
82
|
+
- `OpenAI` usa el gateway local sobre `https://api.openai.com/v1/chat/completions`
|
|
81
83
|
- `OpenRouter` usa `openrouter/free` por gateway sobre `https://openrouter.ai/api/v1`
|
|
82
84
|
- `Qwen` apunta al gateway local `http://127.0.0.1:4310/anthropic`
|
|
83
85
|
|
|
@@ -89,6 +91,8 @@ Al activar:
|
|
|
89
91
|
| `Zen` | `Claude*` de Zen + modelos `chat/completions` de Zen | `Token` | Mixta |
|
|
90
92
|
| `Kimi` | `kimi-for-coding` | `Token` | Gateway local |
|
|
91
93
|
| `DeepSeek` | `deepseek-chat`, `deepseek-reasoner` | `Token` | Directa |
|
|
94
|
+
| `Ollama` | modelos descubiertos desde tu servidor | `Servidor Ollama` | Gateway local |
|
|
95
|
+
| `OpenAI` | `gpt-5.4`, `gpt-5.4-mini`, `gpt-5.3-codex`, `gpt-5.2-codex`, `gpt-5.2`, `gpt-5.1-codex-max`, `gpt-5.1-codex-mini` | `Token` | Gateway local |
|
|
92
96
|
| `OpenRouter` | `openrouter/free` | `Token` | Gateway local |
|
|
93
97
|
| `Qwen` | `qwen3-coder-plus` | `OAuth`, `Token` | Gateway local |
|
|
94
98
|
|
|
@@ -103,6 +107,28 @@ Nota sobre `Zen`:
|
|
|
103
107
|
- los modelos de Zen servidos por `chat/completions` van por gateway local
|
|
104
108
|
- esta primera integración no incluye todavía los modelos de Zen expuestos por `responses` ni los de endpoint tipo Google
|
|
105
109
|
|
|
110
|
+
Nota sobre `OpenAI`:
|
|
111
|
+
|
|
112
|
+
- esta integración usa `Chat Completions` por `gateway local`
|
|
113
|
+
- el bridge actual encaja bien con los modelos GPT/Codex listados porque Claude Code sigue hablando Anthropic hacia `claude-connect`
|
|
114
|
+
- la autenticación soportada hoy es `API key`; no se expone `OAuth` para este proveedor
|
|
115
|
+
- `gpt-5.4` quedó validado con una llamada real a través del gateway local
|
|
116
|
+
- referencia oficial:
|
|
117
|
+
- https://platform.openai.com/docs/api-reference/chat/create
|
|
118
|
+
- https://platform.openai.com/docs/api-reference/authentication
|
|
119
|
+
- https://developers.openai.com/api/docs/models
|
|
120
|
+
|
|
121
|
+
Nota sobre `Ollama`:
|
|
122
|
+
|
|
123
|
+
- la URL del servidor se define al crear la conexión
|
|
124
|
+
- sirve tanto para `localhost` como para un VPS o servidor remoto con Ollama expuesto
|
|
125
|
+
- Claude Connect consulta `/api/tags` para listar modelos y validar la conexión antes de guardar
|
|
126
|
+
- luego usa el endpoint nativo `POST /api/chat`, que resultó más compatible para servidores remotos que publican mal `/v1/*`
|
|
127
|
+
- servidores remotos pueden seguir fallando por timeout, auth cloud o respuestas pobres del modelo; la app ya distingue mejor esos casos
|
|
128
|
+
- referencia oficial:
|
|
129
|
+
- https://docs.ollama.com/openai
|
|
130
|
+
- https://docs.ollama.com/api/tags
|
|
131
|
+
|
|
106
132
|
## What It Stores
|
|
107
133
|
|
|
108
134
|
Claude Connect guarda el estado sensible fuera del repo.
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "claude-connect",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.6",
|
|
4
4
|
"description": "CLI para configurar Claude Code con proveedores de modelos externos",
|
|
5
5
|
"author": "wmcarlosv",
|
|
6
6
|
"type": "module",
|
|
@@ -36,6 +36,8 @@
|
|
|
36
36
|
"anthropic",
|
|
37
37
|
"deepseek",
|
|
38
38
|
"kimi",
|
|
39
|
+
"ollama",
|
|
40
|
+
"openai",
|
|
39
41
|
"qwen",
|
|
40
42
|
"terminal"
|
|
41
43
|
],
|
|
@@ -521,6 +521,158 @@ const seedProviders = [
|
|
|
521
521
|
}
|
|
522
522
|
]
|
|
523
523
|
},
|
|
524
|
+
{
|
|
525
|
+
id: 'ollama',
|
|
526
|
+
name: 'Ollama',
|
|
527
|
+
vendor: 'Ollama',
|
|
528
|
+
description: 'Servidor Ollama autohospedado. La conexion pide una base URL manual, descubre modelos via /api/tags y luego usa Chat Completions por el gateway local.',
|
|
529
|
+
docsUrl: 'https://docs.ollama.com/openai',
|
|
530
|
+
docsVerifiedAt: '2026-04-02',
|
|
531
|
+
baseUrl: 'http://127.0.0.1:11434',
|
|
532
|
+
defaultModelId: null,
|
|
533
|
+
defaultAuthMethodId: 'server',
|
|
534
|
+
defaultApiKeyEnvVar: 'OLLAMA_API_KEY',
|
|
535
|
+
models: [],
|
|
536
|
+
authMethods: [
|
|
537
|
+
{
|
|
538
|
+
id: 'server',
|
|
539
|
+
name: 'Servidor Ollama',
|
|
540
|
+
description: 'Conexion sin API key propia de Claude Connect. Solo necesitas la URL de tu servidor Ollama local o remoto.',
|
|
541
|
+
credentialKind: 'none',
|
|
542
|
+
sortOrder: 1,
|
|
543
|
+
isDefault: 1
|
|
544
|
+
}
|
|
545
|
+
]
|
|
546
|
+
},
|
|
547
|
+
{
|
|
548
|
+
id: 'openai',
|
|
549
|
+
name: 'OpenAI',
|
|
550
|
+
vendor: 'OpenAI',
|
|
551
|
+
description: 'OpenAI con modelos GPT y Codex orientados a coding. Claude Code se conecta a traves del gateway local para mantener compatibilidad Anthropic, herramientas y vision.',
|
|
552
|
+
docsUrl: 'https://developers.openai.com/api/docs/models',
|
|
553
|
+
docsVerifiedAt: '2026-04-02',
|
|
554
|
+
baseUrl: 'https://api.openai.com/v1',
|
|
555
|
+
defaultModelId: 'gpt-5.4',
|
|
556
|
+
defaultAuthMethodId: 'token',
|
|
557
|
+
defaultApiKeyEnvVar: 'OPENAI_API_KEY',
|
|
558
|
+
models: [
|
|
559
|
+
{
|
|
560
|
+
id: 'gpt-5.4',
|
|
561
|
+
name: 'GPT-5.4',
|
|
562
|
+
category: 'OpenAI Chat Completions',
|
|
563
|
+
contextWindow: '1M',
|
|
564
|
+
summary: 'Modelo frontier actual de OpenAI para trabajo complejo, coding y flujos profesionales.',
|
|
565
|
+
upstreamModelId: 'gpt-5.4',
|
|
566
|
+
transportMode: 'gateway',
|
|
567
|
+
apiStyle: 'openai-chat',
|
|
568
|
+
apiBaseUrl: 'https://api.openai.com/v1',
|
|
569
|
+
apiPath: '/chat/completions',
|
|
570
|
+
authEnvMode: 'auth_token',
|
|
571
|
+
sortOrder: 1,
|
|
572
|
+
isDefault: 1
|
|
573
|
+
},
|
|
574
|
+
{
|
|
575
|
+
id: 'gpt-5.4-mini',
|
|
576
|
+
name: 'GPT-5.4 Mini',
|
|
577
|
+
category: 'OpenAI Chat Completions',
|
|
578
|
+
contextWindow: '400K',
|
|
579
|
+
summary: 'Variante mas rapida y economica de GPT-5.4 para coding, subagentes y alto volumen.',
|
|
580
|
+
upstreamModelId: 'gpt-5.4-mini',
|
|
581
|
+
transportMode: 'gateway',
|
|
582
|
+
apiStyle: 'openai-chat',
|
|
583
|
+
apiBaseUrl: 'https://api.openai.com/v1',
|
|
584
|
+
apiPath: '/chat/completions',
|
|
585
|
+
authEnvMode: 'auth_token',
|
|
586
|
+
sortOrder: 2,
|
|
587
|
+
isDefault: 0
|
|
588
|
+
},
|
|
589
|
+
{
|
|
590
|
+
id: 'gpt-5.3-codex',
|
|
591
|
+
name: 'GPT-5.3 Codex',
|
|
592
|
+
category: 'OpenAI Chat Completions',
|
|
593
|
+
contextWindow: '400K',
|
|
594
|
+
summary: 'Modelo Codex mas capaz de OpenAI para tareas agenticas de programacion.',
|
|
595
|
+
upstreamModelId: 'gpt-5.3-codex',
|
|
596
|
+
transportMode: 'gateway',
|
|
597
|
+
apiStyle: 'openai-chat',
|
|
598
|
+
apiBaseUrl: 'https://api.openai.com/v1',
|
|
599
|
+
apiPath: '/chat/completions',
|
|
600
|
+
authEnvMode: 'auth_token',
|
|
601
|
+
sortOrder: 3,
|
|
602
|
+
isDefault: 0
|
|
603
|
+
},
|
|
604
|
+
{
|
|
605
|
+
id: 'gpt-5.2-codex',
|
|
606
|
+
name: 'GPT-5.2 Codex',
|
|
607
|
+
category: 'OpenAI Chat Completions',
|
|
608
|
+
contextWindow: '400K',
|
|
609
|
+
summary: 'Modelo Codex inteligente para tareas largas de coding y automatizacion.',
|
|
610
|
+
upstreamModelId: 'gpt-5.2-codex',
|
|
611
|
+
transportMode: 'gateway',
|
|
612
|
+
apiStyle: 'openai-chat',
|
|
613
|
+
apiBaseUrl: 'https://api.openai.com/v1',
|
|
614
|
+
apiPath: '/chat/completions',
|
|
615
|
+
authEnvMode: 'auth_token',
|
|
616
|
+
sortOrder: 4,
|
|
617
|
+
isDefault: 0
|
|
618
|
+
},
|
|
619
|
+
{
|
|
620
|
+
id: 'gpt-5.2',
|
|
621
|
+
name: 'GPT-5.2',
|
|
622
|
+
category: 'OpenAI Chat Completions',
|
|
623
|
+
contextWindow: '400K',
|
|
624
|
+
summary: 'Modelo frontier previo de OpenAI para trabajo profesional con razonamiento configurable.',
|
|
625
|
+
upstreamModelId: 'gpt-5.2',
|
|
626
|
+
transportMode: 'gateway',
|
|
627
|
+
apiStyle: 'openai-chat',
|
|
628
|
+
apiBaseUrl: 'https://api.openai.com/v1',
|
|
629
|
+
apiPath: '/chat/completions',
|
|
630
|
+
authEnvMode: 'auth_token',
|
|
631
|
+
sortOrder: 5,
|
|
632
|
+
isDefault: 0
|
|
633
|
+
},
|
|
634
|
+
{
|
|
635
|
+
id: 'gpt-5.1-codex-max',
|
|
636
|
+
name: 'GPT-5.1 Codex Max',
|
|
637
|
+
category: 'OpenAI Chat Completions',
|
|
638
|
+
contextWindow: '400K',
|
|
639
|
+
summary: 'Variante Codex optimizada para tareas de larga duracion y sesiones de coding mas extensas.',
|
|
640
|
+
upstreamModelId: 'gpt-5.1-codex-max',
|
|
641
|
+
transportMode: 'gateway',
|
|
642
|
+
apiStyle: 'openai-chat',
|
|
643
|
+
apiBaseUrl: 'https://api.openai.com/v1',
|
|
644
|
+
apiPath: '/chat/completions',
|
|
645
|
+
authEnvMode: 'auth_token',
|
|
646
|
+
sortOrder: 6,
|
|
647
|
+
isDefault: 0
|
|
648
|
+
},
|
|
649
|
+
{
|
|
650
|
+
id: 'gpt-5.1-codex-mini',
|
|
651
|
+
name: 'GPT-5.1 Codex Mini',
|
|
652
|
+
category: 'OpenAI Chat Completions',
|
|
653
|
+
contextWindow: '400K',
|
|
654
|
+
summary: 'Version mas ligera y economica de la linea Codex 5.1 para iteraciones rapidas.',
|
|
655
|
+
upstreamModelId: 'gpt-5.1-codex-mini',
|
|
656
|
+
transportMode: 'gateway',
|
|
657
|
+
apiStyle: 'openai-chat',
|
|
658
|
+
apiBaseUrl: 'https://api.openai.com/v1',
|
|
659
|
+
apiPath: '/chat/completions',
|
|
660
|
+
authEnvMode: 'auth_token',
|
|
661
|
+
sortOrder: 7,
|
|
662
|
+
isDefault: 0
|
|
663
|
+
}
|
|
664
|
+
],
|
|
665
|
+
authMethods: [
|
|
666
|
+
{
|
|
667
|
+
id: 'token',
|
|
668
|
+
name: 'Token',
|
|
669
|
+
description: 'Conexion por API key de OpenAI.',
|
|
670
|
+
credentialKind: 'env_var',
|
|
671
|
+
sortOrder: 1,
|
|
672
|
+
isDefault: 1
|
|
673
|
+
}
|
|
674
|
+
]
|
|
675
|
+
},
|
|
524
676
|
{
|
|
525
677
|
id: 'openrouter',
|
|
526
678
|
name: 'OpenRouter',
|
package/src/gateway/messages.js
CHANGED
|
@@ -192,6 +192,10 @@ function buildOpenAIContentPartFromAnthropicBlock(block) {
|
|
|
192
192
|
}
|
|
193
193
|
|
|
194
194
|
function safeParseJson(value) {
|
|
195
|
+
if (isObject(value)) {
|
|
196
|
+
return value;
|
|
197
|
+
}
|
|
198
|
+
|
|
195
199
|
if (typeof value !== 'string' || value.length === 0) {
|
|
196
200
|
return {};
|
|
197
201
|
}
|
|
@@ -232,6 +236,10 @@ export function estimateTokenCountFromAnthropicRequest(body) {
|
|
|
232
236
|
return Math.max(1, Math.ceil(totalLength / 4));
|
|
233
237
|
}
|
|
234
238
|
|
|
239
|
+
function usesMaxCompletionTokens(model) {
|
|
240
|
+
return typeof model === 'string' && /^gpt-5(?:[.-]|$)/.test(model);
|
|
241
|
+
}
|
|
242
|
+
|
|
235
243
|
export function buildOpenAIRequestFromAnthropic({ body, model }) {
|
|
236
244
|
const messages = [];
|
|
237
245
|
const systemText = collectText(body.system).trim();
|
|
@@ -323,7 +331,11 @@ export function buildOpenAIRequestFromAnthropic({ body, model }) {
|
|
|
323
331
|
};
|
|
324
332
|
|
|
325
333
|
if (typeof body.max_tokens === 'number') {
|
|
326
|
-
|
|
334
|
+
if (usesMaxCompletionTokens(model)) {
|
|
335
|
+
request.max_completion_tokens = body.max_tokens;
|
|
336
|
+
} else {
|
|
337
|
+
request.max_tokens = body.max_tokens;
|
|
338
|
+
}
|
|
327
339
|
}
|
|
328
340
|
|
|
329
341
|
if (typeof body.temperature === 'number') {
|
|
@@ -368,6 +380,134 @@ export function buildOpenAIRequestFromAnthropic({ body, model }) {
|
|
|
368
380
|
return request;
|
|
369
381
|
}
|
|
370
382
|
|
|
383
|
+
export function buildOllamaRequestFromAnthropic({ body, model }) {
|
|
384
|
+
const messages = [];
|
|
385
|
+
const toolUseIdToName = new Map();
|
|
386
|
+
const systemText = collectText(body.system).trim();
|
|
387
|
+
|
|
388
|
+
if (systemText.length > 0) {
|
|
389
|
+
messages.push({
|
|
390
|
+
role: 'system',
|
|
391
|
+
content: systemText
|
|
392
|
+
});
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
for (const message of Array.isArray(body.messages) ? body.messages : []) {
|
|
396
|
+
const blocks = normalizeBlocks(message?.content);
|
|
397
|
+
|
|
398
|
+
if (message?.role === 'user') {
|
|
399
|
+
let textParts = [];
|
|
400
|
+
let imageParts = [];
|
|
401
|
+
|
|
402
|
+
const flushUserMessage = () => {
|
|
403
|
+
if (textParts.length === 0 && imageParts.length === 0) {
|
|
404
|
+
return;
|
|
405
|
+
}
|
|
406
|
+
|
|
407
|
+
messages.push({
|
|
408
|
+
role: 'user',
|
|
409
|
+
content: textParts.join('\n\n'),
|
|
410
|
+
...(imageParts.length > 0 ? { images: imageParts } : {})
|
|
411
|
+
});
|
|
412
|
+
|
|
413
|
+
textParts = [];
|
|
414
|
+
imageParts = [];
|
|
415
|
+
};
|
|
416
|
+
|
|
417
|
+
for (const block of blocks) {
|
|
418
|
+
if (block?.type === 'tool_result') {
|
|
419
|
+
flushUserMessage();
|
|
420
|
+
messages.push({
|
|
421
|
+
role: 'tool',
|
|
422
|
+
tool_name: toolUseIdToName.get(block.tool_use_id) ?? block.tool_use_id ?? 'tool',
|
|
423
|
+
content: collectText(block.content)
|
|
424
|
+
});
|
|
425
|
+
continue;
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
if (block?.type === 'image' && block?.source?.type === 'base64' && typeof block?.source?.data === 'string') {
|
|
429
|
+
imageParts.push(block.source.data);
|
|
430
|
+
continue;
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
textParts.push(collectText(block?.text ?? block));
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
flushUserMessage();
|
|
437
|
+
continue;
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
if (message?.role === 'assistant') {
|
|
441
|
+
const textParts = [];
|
|
442
|
+
const toolCalls = [];
|
|
443
|
+
|
|
444
|
+
for (const block of blocks) {
|
|
445
|
+
if (block?.type === 'tool_use') {
|
|
446
|
+
toolUseIdToName.set(block.id, block.name);
|
|
447
|
+
toolCalls.push({
|
|
448
|
+
function: {
|
|
449
|
+
name: block.name,
|
|
450
|
+
arguments: block.input ?? {}
|
|
451
|
+
}
|
|
452
|
+
});
|
|
453
|
+
continue;
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
textParts.push(collectText(block?.text ?? block));
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
messages.push({
|
|
460
|
+
role: 'assistant',
|
|
461
|
+
content: textParts.join('\n\n'),
|
|
462
|
+
...(toolCalls.length > 0 ? { tool_calls: toolCalls } : {})
|
|
463
|
+
});
|
|
464
|
+
}
|
|
465
|
+
}
|
|
466
|
+
|
|
467
|
+
const request = {
|
|
468
|
+
model,
|
|
469
|
+
messages,
|
|
470
|
+
stream: false
|
|
471
|
+
};
|
|
472
|
+
|
|
473
|
+
if (typeof body.max_tokens === 'number') {
|
|
474
|
+
request.options = {
|
|
475
|
+
...(isObject(request.options) ? request.options : {}),
|
|
476
|
+
num_predict: body.max_tokens
|
|
477
|
+
};
|
|
478
|
+
}
|
|
479
|
+
|
|
480
|
+
if (typeof body.temperature === 'number') {
|
|
481
|
+
request.options = {
|
|
482
|
+
...(isObject(request.options) ? request.options : {}),
|
|
483
|
+
temperature: body.temperature
|
|
484
|
+
};
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
if (Array.isArray(body.stop_sequences) && body.stop_sequences.length > 0) {
|
|
488
|
+
request.options = {
|
|
489
|
+
...(isObject(request.options) ? request.options : {}),
|
|
490
|
+
stop: body.stop_sequences
|
|
491
|
+
};
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
if (Array.isArray(body.tools) && body.tools.length > 0) {
|
|
495
|
+
request.tools = body.tools.map((tool) => ({
|
|
496
|
+
type: 'function',
|
|
497
|
+
function: {
|
|
498
|
+
name: tool.name,
|
|
499
|
+
description: tool.description ?? '',
|
|
500
|
+
parameters: tool.input_schema ?? {
|
|
501
|
+
type: 'object',
|
|
502
|
+
properties: {}
|
|
503
|
+
}
|
|
504
|
+
}
|
|
505
|
+
}));
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
return request;
|
|
509
|
+
}
|
|
510
|
+
|
|
371
511
|
export function buildAnthropicMessageFromOpenAI({ response, requestedModel }) {
|
|
372
512
|
const choice = response?.choices?.[0] ?? {};
|
|
373
513
|
const assistantMessage = choice?.message ?? {};
|
|
@@ -413,6 +553,47 @@ export function buildAnthropicMessageFromOpenAI({ response, requestedModel }) {
|
|
|
413
553
|
};
|
|
414
554
|
}
|
|
415
555
|
|
|
556
|
+
export function buildAnthropicMessageFromOllama({ response, requestedModel }) {
|
|
557
|
+
const assistantMessage = isObject(response?.message) ? response.message : {};
|
|
558
|
+
const content = [];
|
|
559
|
+
const text = typeof assistantMessage.content === 'string' ? assistantMessage.content : '';
|
|
560
|
+
const toolCalls = Array.isArray(assistantMessage.tool_calls) ? assistantMessage.tool_calls : [];
|
|
561
|
+
|
|
562
|
+
if (text.length > 0) {
|
|
563
|
+
content.push({
|
|
564
|
+
type: 'text',
|
|
565
|
+
text
|
|
566
|
+
});
|
|
567
|
+
}
|
|
568
|
+
|
|
569
|
+
for (const toolCall of toolCalls) {
|
|
570
|
+
content.push({
|
|
571
|
+
type: 'tool_use',
|
|
572
|
+
id: `toolu_${crypto.randomUUID().replace(/-/g, '')}`,
|
|
573
|
+
name: toolCall?.function?.name || 'tool',
|
|
574
|
+
input: safeParseJson(toolCall?.function?.arguments)
|
|
575
|
+
});
|
|
576
|
+
}
|
|
577
|
+
|
|
578
|
+
return {
|
|
579
|
+
id: `msg_${crypto.randomUUID().replace(/-/g, '')}`,
|
|
580
|
+
type: 'message',
|
|
581
|
+
role: 'assistant',
|
|
582
|
+
model: requestedModel || response?.model || 'unknown',
|
|
583
|
+
content,
|
|
584
|
+
stop_reason: toolCalls.length > 0
|
|
585
|
+
? 'tool_use'
|
|
586
|
+
: response?.done_reason === 'length'
|
|
587
|
+
? 'max_tokens'
|
|
588
|
+
: 'end_turn',
|
|
589
|
+
stop_sequence: null,
|
|
590
|
+
usage: {
|
|
591
|
+
input_tokens: Number(response?.prompt_eval_count ?? 0),
|
|
592
|
+
output_tokens: Number(response?.eval_count ?? 0)
|
|
593
|
+
}
|
|
594
|
+
};
|
|
595
|
+
}
|
|
596
|
+
|
|
416
597
|
function writeSseEvent(response, event, payload) {
|
|
417
598
|
response.write(`event: ${event}\n`);
|
|
418
599
|
response.write(`data: ${JSON.stringify(payload)}\n\n`);
|
package/src/gateway/server.js
CHANGED
|
@@ -6,7 +6,9 @@ import process from 'node:process';
|
|
|
6
6
|
import { spawn } from 'node:child_process';
|
|
7
7
|
import { fileURLToPath } from 'node:url';
|
|
8
8
|
import {
|
|
9
|
+
buildAnthropicMessageFromOllama,
|
|
9
10
|
buildAnthropicMessageFromOpenAI,
|
|
11
|
+
buildOllamaRequestFromAnthropic,
|
|
10
12
|
buildOpenAIRequestFromAnthropic,
|
|
11
13
|
estimateTokenCountFromAnthropicRequest,
|
|
12
14
|
normalizeAnthropicRequestForUpstream,
|
|
@@ -33,6 +35,30 @@ function isObject(value) {
|
|
|
33
35
|
return typeof value === 'object' && value !== null && !Array.isArray(value);
|
|
34
36
|
}
|
|
35
37
|
|
|
38
|
+
function describeRequestError(error) {
|
|
39
|
+
if (!(error instanceof Error)) {
|
|
40
|
+
return String(error);
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
const parts = [error.message];
|
|
44
|
+
const cause = error.cause;
|
|
45
|
+
|
|
46
|
+
if (cause && typeof cause === 'object') {
|
|
47
|
+
const code = 'code' in cause && typeof cause.code === 'string' ? cause.code : null;
|
|
48
|
+
const message = 'message' in cause && typeof cause.message === 'string' ? cause.message : null;
|
|
49
|
+
|
|
50
|
+
if (code) {
|
|
51
|
+
parts.push(`code=${code}`);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
if (message && message !== error.message) {
|
|
55
|
+
parts.push(message);
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
return parts.join(' · ');
|
|
60
|
+
}
|
|
61
|
+
|
|
36
62
|
async function terminatePid(pid) {
|
|
37
63
|
if (!isProcessAlive(pid)) {
|
|
38
64
|
return false;
|
|
@@ -136,6 +162,18 @@ function getUpstreamModelId(profile) {
|
|
|
136
162
|
}
|
|
137
163
|
|
|
138
164
|
function resolveGatewayUpstreamConfig(profile) {
|
|
165
|
+
if (profile?.provider?.id === 'ollama') {
|
|
166
|
+
return {
|
|
167
|
+
upstreamBaseUrl: typeof profile?.endpoint?.baseUrl === 'string' && profile.endpoint.baseUrl.length > 0
|
|
168
|
+
? profile.endpoint.baseUrl
|
|
169
|
+
: typeof profile?.model?.apiBaseUrl === 'string' && profile.model.apiBaseUrl.length > 0
|
|
170
|
+
? profile.model.apiBaseUrl
|
|
171
|
+
: 'http://127.0.0.1:11434',
|
|
172
|
+
upstreamApiStyle: 'ollama-chat',
|
|
173
|
+
upstreamApiPath: '/api/chat'
|
|
174
|
+
};
|
|
175
|
+
}
|
|
176
|
+
|
|
139
177
|
return {
|
|
140
178
|
upstreamBaseUrl: typeof profile?.model?.apiBaseUrl === 'string' && profile.model.apiBaseUrl.length > 0
|
|
141
179
|
? profile.model.apiBaseUrl
|
|
@@ -169,6 +207,19 @@ async function resolveGatewayContext() {
|
|
|
169
207
|
const profile = await readProfileFile(switchState.profilePath);
|
|
170
208
|
const authMethod = profile?.auth?.method === 'api_key' ? 'token' : profile?.auth?.method;
|
|
171
209
|
|
|
210
|
+
if (authMethod === 'server' && profile?.provider?.id === 'ollama') {
|
|
211
|
+
const upstream = resolveGatewayUpstreamConfig(profile);
|
|
212
|
+
|
|
213
|
+
return {
|
|
214
|
+
profile,
|
|
215
|
+
authMethod,
|
|
216
|
+
upstreamBaseUrl: upstream.upstreamBaseUrl,
|
|
217
|
+
upstreamApiStyle: upstream.upstreamApiStyle,
|
|
218
|
+
upstreamApiPath: upstream.upstreamApiPath,
|
|
219
|
+
accessToken: 'ollama'
|
|
220
|
+
};
|
|
221
|
+
}
|
|
222
|
+
|
|
172
223
|
if (authMethod === 'token') {
|
|
173
224
|
const envVar = profile?.auth?.envVar;
|
|
174
225
|
let token = typeof envVar === 'string' ? process.env[envVar] : '';
|
|
@@ -236,11 +287,25 @@ async function resolveGatewayContext() {
|
|
|
236
287
|
}
|
|
237
288
|
|
|
238
289
|
async function forwardUpstreamRequest({ targetUrl, headers, payload, context, refreshOnUnauthorized = true }) {
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
290
|
+
let response;
|
|
291
|
+
|
|
292
|
+
try {
|
|
293
|
+
response = await fetch(targetUrl, {
|
|
294
|
+
method: 'POST',
|
|
295
|
+
headers,
|
|
296
|
+
body: JSON.stringify(payload)
|
|
297
|
+
});
|
|
298
|
+
} catch (error) {
|
|
299
|
+
const providerName = context?.profile?.provider?.name ?? context?.profile?.provider?.id ?? 'El proveedor';
|
|
300
|
+
|
|
301
|
+
if (context?.profile?.provider?.id === 'ollama') {
|
|
302
|
+
throw new Error(
|
|
303
|
+
`${providerName} no respondio en ${targetUrl}. Revisa que el servidor remoto este accesible, que el puerto este expuesto y que Ollama escuche en esa URL. Detalle: ${describeRequestError(error)}`
|
|
304
|
+
);
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
throw new Error(`${providerName} no respondio en ${targetUrl}. Detalle: ${describeRequestError(error)}`);
|
|
308
|
+
}
|
|
244
309
|
|
|
245
310
|
const responsePayload = await response.json().catch(() => ({}));
|
|
246
311
|
|
|
@@ -305,6 +370,22 @@ async function forwardChatCompletion({ openAiRequest, context, refreshOnUnauthor
|
|
|
305
370
|
});
|
|
306
371
|
}
|
|
307
372
|
|
|
373
|
+
async function forwardOllamaChat({ ollamaRequest, context, refreshOnUnauthorized = true }) {
|
|
374
|
+
const targetUrl = `${context.upstreamBaseUrl.replace(/\/$/, '')}${context.upstreamApiPath || '/api/chat'}`;
|
|
375
|
+
|
|
376
|
+
return forwardUpstreamRequest({
|
|
377
|
+
targetUrl,
|
|
378
|
+
headers: {
|
|
379
|
+
'content-type': 'application/json',
|
|
380
|
+
accept: 'application/json',
|
|
381
|
+
'user-agent': 'claude-connect-gateway/0.1.0'
|
|
382
|
+
},
|
|
383
|
+
payload: ollamaRequest,
|
|
384
|
+
context,
|
|
385
|
+
refreshOnUnauthorized
|
|
386
|
+
});
|
|
387
|
+
}
|
|
388
|
+
|
|
308
389
|
async function forwardAnthropicMessage({ requestBody, context, refreshOnUnauthorized = true }) {
|
|
309
390
|
const targetUrl = `${context.upstreamBaseUrl.replace(/\/$/, '')}${context.upstreamApiPath || '/v1/messages'}`;
|
|
310
391
|
|
|
@@ -384,6 +465,36 @@ async function handleMessages(request, response) {
|
|
|
384
465
|
return;
|
|
385
466
|
}
|
|
386
467
|
|
|
468
|
+
if (context.upstreamApiStyle === 'ollama-chat') {
|
|
469
|
+
const ollamaRequest = buildOllamaRequestFromAnthropic({
|
|
470
|
+
body,
|
|
471
|
+
model: getUpstreamModelId(context.profile)
|
|
472
|
+
});
|
|
473
|
+
const upstreamResponse = await forwardOllamaChat({
|
|
474
|
+
ollamaRequest,
|
|
475
|
+
context
|
|
476
|
+
});
|
|
477
|
+
const anthropicMessage = buildAnthropicMessageFromOllama({
|
|
478
|
+
response: upstreamResponse,
|
|
479
|
+
requestedModel: getUpstreamModelId(context.profile)
|
|
480
|
+
});
|
|
481
|
+
|
|
482
|
+
if (body.stream === true) {
|
|
483
|
+
response.writeHead(200, {
|
|
484
|
+
'content-type': 'text/event-stream; charset=utf-8',
|
|
485
|
+
'cache-control': 'no-cache, no-transform',
|
|
486
|
+
connection: 'keep-alive',
|
|
487
|
+
'x-accel-buffering': 'no'
|
|
488
|
+
});
|
|
489
|
+
writeAnthropicStreamFromMessage(response, anthropicMessage);
|
|
490
|
+
response.end();
|
|
491
|
+
return;
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
sendJson(response, 200, anthropicMessage);
|
|
495
|
+
return;
|
|
496
|
+
}
|
|
497
|
+
|
|
387
498
|
if (context.upstreamApiStyle !== 'openai-chat') {
|
|
388
499
|
throw new Error(`El gateway todavia no soporta el estilo ${context.upstreamApiStyle} para ${context.profile.provider.name}.`);
|
|
389
500
|
}
|
package/src/index.js
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import packageJson from '../package.json' with { type: 'json' };
|
|
1
2
|
import { getCatalogStore } from './data/catalog-store.js';
|
|
2
3
|
import { gatewayBaseUrl } from './gateway/constants.js';
|
|
3
4
|
import { getGatewayStatus } from './gateway/state.js';
|
|
@@ -18,6 +19,10 @@ function printGatewayStatus(status) {
|
|
|
18
19
|
process.stdout.write(`${lines.join('\n')}\n`);
|
|
19
20
|
}
|
|
20
21
|
|
|
22
|
+
function printVersion() {
|
|
23
|
+
process.stdout.write(`${packageJson.version}\n`);
|
|
24
|
+
}
|
|
25
|
+
|
|
21
26
|
async function runGatewayCommand(args) {
|
|
22
27
|
const action = args[0] ?? 'status';
|
|
23
28
|
|
|
@@ -51,6 +56,11 @@ async function runGatewayCommand(args) {
|
|
|
51
56
|
}
|
|
52
57
|
|
|
53
58
|
export async function run(argv = process.argv.slice(2)) {
|
|
59
|
+
if (argv[0] === '--version' || argv[0] === '-v' || argv[0] === 'version') {
|
|
60
|
+
printVersion();
|
|
61
|
+
return;
|
|
62
|
+
}
|
|
63
|
+
|
|
54
64
|
if (argv[0] === 'gateway') {
|
|
55
65
|
await runGatewayCommand(argv.slice(1));
|
|
56
66
|
return;
|
|
@@ -99,6 +99,10 @@ export async function readSwitchState() {
|
|
|
99
99
|
}
|
|
100
100
|
|
|
101
101
|
async function resolveTokenValueForProfile(profile) {
|
|
102
|
+
if (profile?.provider?.id === 'ollama') {
|
|
103
|
+
return 'ollama';
|
|
104
|
+
}
|
|
105
|
+
|
|
102
106
|
const envVar = profile?.auth?.envVar;
|
|
103
107
|
const envToken = typeof envVar === 'string' ? process.env[envVar] : '';
|
|
104
108
|
|
|
@@ -163,6 +167,16 @@ export async function resolveClaudeTransportForProfile({
|
|
|
163
167
|
};
|
|
164
168
|
}
|
|
165
169
|
|
|
170
|
+
if (authMethod === 'server') {
|
|
171
|
+
return {
|
|
172
|
+
connectionMode: 'gateway',
|
|
173
|
+
connectionBaseUrl: gatewayBaseUrl,
|
|
174
|
+
authToken: 'claude-connect-local',
|
|
175
|
+
authEnvMode: 'auth_token',
|
|
176
|
+
extraEnv: {}
|
|
177
|
+
};
|
|
178
|
+
}
|
|
179
|
+
|
|
166
180
|
return {
|
|
167
181
|
connectionMode: 'gateway',
|
|
168
182
|
connectionBaseUrl: gatewayBaseUrl,
|
|
@@ -212,6 +226,9 @@ export function buildClaudeSettingsForProfile({
|
|
|
212
226
|
if (authMethod === 'token') {
|
|
213
227
|
env.CLAUDE_CONNECT_TOKEN_ENV_VAR = profile.auth.envVar;
|
|
214
228
|
delete env.CLAUDE_CONNECT_TOKEN_FILE;
|
|
229
|
+
} else if (authMethod === 'server') {
|
|
230
|
+
delete env.CLAUDE_CONNECT_TOKEN_ENV_VAR;
|
|
231
|
+
delete env.CLAUDE_CONNECT_TOKEN_FILE;
|
|
215
232
|
} else if (authMethod === 'oauth' && profile.auth.oauth?.tokenFile) {
|
|
216
233
|
env.CLAUDE_CONNECT_TOKEN_FILE = profile.auth.oauth.tokenFile;
|
|
217
234
|
delete env.CLAUDE_CONNECT_TOKEN_ENV_VAR;
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
function describeRequestError(error) {
|
|
2
|
+
if (error && typeof error === 'object') {
|
|
3
|
+
if ('cause' in error && error.cause && typeof error.cause === 'object' && 'message' in error.cause) {
|
|
4
|
+
return String(error.cause.message);
|
|
5
|
+
}
|
|
6
|
+
|
|
7
|
+
if ('message' in error) {
|
|
8
|
+
return String(error.message);
|
|
9
|
+
}
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
return String(error);
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
export function normalizeOllamaBaseUrl(value) {
|
|
16
|
+
const trimmed = typeof value === 'string' ? value.trim() : '';
|
|
17
|
+
|
|
18
|
+
if (trimmed.length === 0) {
|
|
19
|
+
throw new Error('La URL de Ollama no puede quedar vacia.');
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
const withProtocol = /^https?:\/\//i.test(trimmed)
|
|
23
|
+
? trimmed
|
|
24
|
+
: `http://${trimmed}`;
|
|
25
|
+
|
|
26
|
+
let url;
|
|
27
|
+
|
|
28
|
+
try {
|
|
29
|
+
url = new URL(withProtocol);
|
|
30
|
+
} catch (_error) {
|
|
31
|
+
throw new Error('La URL de Ollama no es valida.');
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
if (!url.hostname) {
|
|
35
|
+
throw new Error('La URL de Ollama no es valida.');
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
return url.toString().replace(/\/$/, '');
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
function summarizeOllamaModel(model) {
|
|
42
|
+
const details = model?.details && typeof model.details === 'object' ? model.details : {};
|
|
43
|
+
const segments = [
|
|
44
|
+
typeof details.family === 'string' && details.family.length > 0 ? details.family : null,
|
|
45
|
+
typeof details.parameter_size === 'string' && details.parameter_size.length > 0 ? details.parameter_size : null,
|
|
46
|
+
typeof details.quantization_level === 'string' && details.quantization_level.length > 0 ? details.quantization_level : null
|
|
47
|
+
].filter(Boolean);
|
|
48
|
+
|
|
49
|
+
return segments.length > 0
|
|
50
|
+
? segments.join(' · ')
|
|
51
|
+
: 'Modelo descubierto desde /api/tags';
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
export async function fetchOllamaModels({ baseUrl, timeoutMs = 8000 }) {
|
|
55
|
+
const normalizedBaseUrl = normalizeOllamaBaseUrl(baseUrl);
|
|
56
|
+
const controller = new AbortController();
|
|
57
|
+
const timer = setTimeout(() => controller.abort(new Error('timeout')), timeoutMs);
|
|
58
|
+
|
|
59
|
+
try {
|
|
60
|
+
const response = await fetch(`${normalizedBaseUrl}/api/tags`, {
|
|
61
|
+
method: 'GET',
|
|
62
|
+
headers: {
|
|
63
|
+
accept: 'application/json'
|
|
64
|
+
},
|
|
65
|
+
signal: controller.signal
|
|
66
|
+
});
|
|
67
|
+
|
|
68
|
+
const payload = await response.json().catch(() => ({}));
|
|
69
|
+
|
|
70
|
+
if (!response.ok) {
|
|
71
|
+
const message = payload?.error || payload?.message || `HTTP ${response.status}`;
|
|
72
|
+
throw new Error(`Ollama respondio ${response.status}: ${message}`);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
const rawModels = Array.isArray(payload?.models) ? payload.models : [];
|
|
76
|
+
|
|
77
|
+
return {
|
|
78
|
+
baseUrl: normalizedBaseUrl,
|
|
79
|
+
models: rawModels.map((model, index) => ({
|
|
80
|
+
id: model?.model || model?.name || `ollama-model-${index + 1}`,
|
|
81
|
+
name: model?.name || model?.model || `Modelo ${index + 1}`,
|
|
82
|
+
category: 'Ollama OpenAI-compatible',
|
|
83
|
+
contextWindow: 'Auto',
|
|
84
|
+
summary: summarizeOllamaModel(model),
|
|
85
|
+
upstreamModelId: model?.model || model?.name || `ollama-model-${index + 1}`,
|
|
86
|
+
transportMode: 'gateway',
|
|
87
|
+
apiStyle: 'openai-chat',
|
|
88
|
+
apiBaseUrl: normalizedBaseUrl,
|
|
89
|
+
apiPath: '/v1/chat/completions',
|
|
90
|
+
authEnvMode: 'auth_token',
|
|
91
|
+
sortOrder: index + 1,
|
|
92
|
+
isDefault: index === 0
|
|
93
|
+
}))
|
|
94
|
+
};
|
|
95
|
+
} catch (error) {
|
|
96
|
+
throw new Error(`No se pudo consultar ${normalizedBaseUrl}/api/tags: ${describeRequestError(error)}`);
|
|
97
|
+
} finally {
|
|
98
|
+
clearTimeout(timer);
|
|
99
|
+
}
|
|
100
|
+
}
|
package/src/wizard.js
CHANGED
|
@@ -22,6 +22,7 @@ import {
|
|
|
22
22
|
readManagedTokenSecret,
|
|
23
23
|
saveManagedProviderTokenSecret
|
|
24
24
|
} from './lib/secrets.js';
|
|
25
|
+
import { fetchOllamaModels, normalizeOllamaBaseUrl } from './lib/ollama.js';
|
|
25
26
|
import {
|
|
26
27
|
assertInteractiveTerminal,
|
|
27
28
|
buildFrame,
|
|
@@ -151,6 +152,13 @@ function buildTokenDetailLines(profile) {
|
|
|
151
152
|
return [`Token file: ${profile.auth.oauth?.tokenFile ?? 'no encontrado'}`];
|
|
152
153
|
}
|
|
153
154
|
|
|
155
|
+
if (profile.auth.method === 'server') {
|
|
156
|
+
return [
|
|
157
|
+
`Base URL: ${profile.endpoint?.baseUrl ?? 'sin definir'}`,
|
|
158
|
+
`Modelo upstream: ${profile.model?.upstreamModelId ?? profile.model?.id ?? 'sin definir'}`
|
|
159
|
+
];
|
|
160
|
+
}
|
|
161
|
+
|
|
154
162
|
if (profile.providerCredentialConfigured) {
|
|
155
163
|
return [
|
|
156
164
|
`Credencial compartida: ${profile.providerSecretRecord?.filePath ?? profile.auth.providerSecretFile ?? 'configurada'}`,
|
|
@@ -185,11 +193,6 @@ function profileActionItems(profile) {
|
|
|
185
193
|
description: 'Borra solo el perfil. La API key compartida del proveedor se conserva.',
|
|
186
194
|
value: 'delete'
|
|
187
195
|
});
|
|
188
|
-
items.push({
|
|
189
|
-
label: 'Volver',
|
|
190
|
-
description: 'Regresa al menu principal.',
|
|
191
|
-
value: 'back'
|
|
192
|
-
});
|
|
193
196
|
|
|
194
197
|
return items;
|
|
195
198
|
}
|
|
@@ -233,7 +236,7 @@ function renderWelcome() {
|
|
|
233
236
|
colorize('4. Guardar perfil y credenciales locales', colors.soft),
|
|
234
237
|
'',
|
|
235
238
|
colorize('Catalogo actual', colors.bold, colors.accentSoft),
|
|
236
|
-
colorize('OpenCode Go, Zen, Kimi, DeepSeek, OpenRouter y Qwen ya vienen almacenados en SQLite.', colors.soft),
|
|
239
|
+
colorize('OpenCode Go, Zen, Kimi, DeepSeek, Ollama, OpenAI, OpenRouter y Qwen ya vienen almacenados en SQLite.', colors.soft),
|
|
237
240
|
'',
|
|
238
241
|
colorize('Seguridad', colors.bold, colors.accentSoft),
|
|
239
242
|
colorize('El token OAuth se guarda localmente y el modo Token puede guardarse una sola vez por proveedor.', colors.soft)
|
|
@@ -246,8 +249,12 @@ function renderWelcome() {
|
|
|
246
249
|
function renderSummary({ profile, filePath }) {
|
|
247
250
|
const authSummary = profile.auth.method === 'oauth'
|
|
248
251
|
? `Auth: oauth con token en ${profile.auth.oauth.tokenFile}`
|
|
249
|
-
:
|
|
250
|
-
|
|
252
|
+
: profile.auth.method === 'server'
|
|
253
|
+
? 'Auth: servidor Ollama sin API key administrada por Claude Connect'
|
|
254
|
+
: `Auth: ${profile.auth.method} con fallback en ${profile.auth.envVar}`;
|
|
255
|
+
const managedSecretSummary = profile.auth.method === 'server'
|
|
256
|
+
? colorize('Esta conexion usa solo la URL y el modelo descubiertos en el servidor Ollama.', colors.soft)
|
|
257
|
+
: profile.auth.method !== 'oauth' && profile.auth.providerSecretFile
|
|
251
258
|
? colorize(`API key compartida del proveedor en: ${profile.auth.providerSecretFile}`, colors.soft)
|
|
252
259
|
: profile.auth.method !== 'oauth' && profile.auth.secretFile
|
|
253
260
|
? colorize(`API key antigua detectada en: ${profile.auth.secretFile}`, colors.soft)
|
|
@@ -279,12 +286,18 @@ function renderSummary({ profile, filePath }) {
|
|
|
279
286
|
colorize(`export OPENAI_MODEL=${profile.model.id}`, colors.soft),
|
|
280
287
|
colorize('El access token y refresh token ya quedaron guardados localmente.', colors.soft)
|
|
281
288
|
]
|
|
282
|
-
:
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
289
|
+
: profile.auth.method === 'server'
|
|
290
|
+
? [
|
|
291
|
+
colorize(`export OPENAI_BASE_URL=${profile.endpoint.baseUrl}`, colors.soft),
|
|
292
|
+
colorize(`export OPENAI_MODEL=${profile.model.id}`, colors.soft),
|
|
293
|
+
colorize('La conexion usa el servidor Ollama descubierto y se valida antes de guardar.', colors.soft)
|
|
294
|
+
]
|
|
295
|
+
: [
|
|
296
|
+
colorize(`Fallback opcional: export ${profile.auth.envVar}=<tu_token>`, colors.soft),
|
|
297
|
+
colorize(`export OPENAI_BASE_URL=${profile.endpoint.baseUrl}`, colors.soft),
|
|
298
|
+
colorize(`export OPENAI_MODEL=${profile.model.id}`, colors.soft),
|
|
299
|
+
colorize('La API key puede guardarse una sola vez por proveedor en Claude Connect.', colors.soft)
|
|
300
|
+
])
|
|
288
301
|
],
|
|
289
302
|
footer: [colorize('Presiona cualquier tecla para volver al menu', colors.dim, colors.muted)]
|
|
290
303
|
})
|
|
@@ -561,7 +574,7 @@ async function deleteSavedProfile(profile) {
|
|
|
561
574
|
lines: [
|
|
562
575
|
colorize(`Perfil: ${profile.profileName}`, colors.soft),
|
|
563
576
|
colorize(`Archivo eliminado: ${profile.filePath}`, colors.soft),
|
|
564
|
-
...(profile.auth?.method
|
|
577
|
+
...(profile.auth?.method === 'token' || profile.auth?.method === 'api_key'
|
|
565
578
|
? [colorize('La API key compartida del proveedor se conserva para otros modelos.', colors.soft)]
|
|
566
579
|
: [])
|
|
567
580
|
],
|
|
@@ -703,6 +716,143 @@ async function createNewConnection(store) {
|
|
|
703
716
|
}
|
|
704
717
|
|
|
705
718
|
const catalog = store.getProviderCatalog(provider.id);
|
|
719
|
+
|
|
720
|
+
if (catalog.id === 'ollama') {
|
|
721
|
+
const ollamaBaseUrlInput = await promptText({
|
|
722
|
+
step: 2,
|
|
723
|
+
totalSteps: 4,
|
|
724
|
+
title: 'URL del servidor Ollama',
|
|
725
|
+
subtitle: 'Puede ser local o remoto, por ejemplo http://127.0.0.1:11434 o https://mi-vps:11434.',
|
|
726
|
+
label: 'Base URL',
|
|
727
|
+
defaultValue: catalog.baseUrl,
|
|
728
|
+
placeholder: catalog.baseUrl,
|
|
729
|
+
allowBack: true
|
|
730
|
+
});
|
|
731
|
+
|
|
732
|
+
if (isExit(ollamaBaseUrlInput)) {
|
|
733
|
+
return ollamaBaseUrlInput;
|
|
734
|
+
}
|
|
735
|
+
|
|
736
|
+
if (isBack(ollamaBaseUrlInput)) {
|
|
737
|
+
continue;
|
|
738
|
+
}
|
|
739
|
+
|
|
740
|
+
let normalizedOllamaBaseUrl;
|
|
741
|
+
|
|
742
|
+
try {
|
|
743
|
+
normalizedOllamaBaseUrl = normalizeOllamaBaseUrl(ollamaBaseUrlInput);
|
|
744
|
+
} catch (error) {
|
|
745
|
+
renderInfoScreen({
|
|
746
|
+
title: 'URL invalida',
|
|
747
|
+
subtitle: 'La direccion del servidor Ollama no se pudo normalizar.',
|
|
748
|
+
lines: [
|
|
749
|
+
colorize(error instanceof Error ? error.message : String(error), colors.warning)
|
|
750
|
+
],
|
|
751
|
+
footer: 'Presiona una tecla para volver'
|
|
752
|
+
});
|
|
753
|
+
|
|
754
|
+
const invalidUrlResult = await waitForAnyKey();
|
|
755
|
+
|
|
756
|
+
if (isExit(invalidUrlResult)) {
|
|
757
|
+
return invalidUrlResult;
|
|
758
|
+
}
|
|
759
|
+
|
|
760
|
+
continue;
|
|
761
|
+
}
|
|
762
|
+
|
|
763
|
+
let discovered;
|
|
764
|
+
|
|
765
|
+
try {
|
|
766
|
+
discovered = await fetchOllamaModels({ baseUrl: normalizedOllamaBaseUrl });
|
|
767
|
+
} catch (error) {
|
|
768
|
+
renderInfoScreen({
|
|
769
|
+
title: 'No se pudo conectar a Ollama',
|
|
770
|
+
subtitle: 'Claude Connect intento consultar /api/tags para descubrir modelos.',
|
|
771
|
+
lines: [
|
|
772
|
+
colorize(`Base URL: ${normalizedOllamaBaseUrl}`, colors.soft),
|
|
773
|
+
colorize(error instanceof Error ? error.message : String(error), colors.warning)
|
|
774
|
+
],
|
|
775
|
+
footer: 'Presiona una tecla para volver'
|
|
776
|
+
});
|
|
777
|
+
|
|
778
|
+
const failedConnectionResult = await waitForAnyKey();
|
|
779
|
+
|
|
780
|
+
if (isExit(failedConnectionResult)) {
|
|
781
|
+
return failedConnectionResult;
|
|
782
|
+
}
|
|
783
|
+
|
|
784
|
+
continue;
|
|
785
|
+
}
|
|
786
|
+
|
|
787
|
+
if (discovered.models.length === 0) {
|
|
788
|
+
renderInfoScreen({
|
|
789
|
+
title: 'Sin modelos en Ollama',
|
|
790
|
+
subtitle: 'La conexion esta viva, pero /api/tags no devolvio modelos disponibles.',
|
|
791
|
+
lines: [
|
|
792
|
+
colorize(`Base URL: ${normalizedOllamaBaseUrl}`, colors.soft),
|
|
793
|
+
colorize('Carga al menos un modelo en ese servidor y vuelve a intentarlo.', colors.soft)
|
|
794
|
+
],
|
|
795
|
+
footer: 'Presiona una tecla para volver'
|
|
796
|
+
});
|
|
797
|
+
|
|
798
|
+
const emptyModelsResult = await waitForAnyKey();
|
|
799
|
+
|
|
800
|
+
if (isExit(emptyModelsResult)) {
|
|
801
|
+
return emptyModelsResult;
|
|
802
|
+
}
|
|
803
|
+
|
|
804
|
+
continue;
|
|
805
|
+
}
|
|
806
|
+
|
|
807
|
+
const discoveredModel = await selectFromList({
|
|
808
|
+
step: 3,
|
|
809
|
+
totalSteps: 4,
|
|
810
|
+
title: 'Selecciona el modelo de Ollama',
|
|
811
|
+
subtitle: `Servidor: ${normalizedOllamaBaseUrl}.`,
|
|
812
|
+
items: modelItems(discovered.models),
|
|
813
|
+
allowBack: true,
|
|
814
|
+
detailBuilder: (selected) => [
|
|
815
|
+
`Modelo: ${selected.value.id}`,
|
|
816
|
+
`Categoria: ${selected.value.category}`,
|
|
817
|
+
`Contexto: ${selected.value.contextWindow}`,
|
|
818
|
+
selected.value.summary
|
|
819
|
+
]
|
|
820
|
+
});
|
|
821
|
+
|
|
822
|
+
if (isExit(discoveredModel)) {
|
|
823
|
+
return discoveredModel;
|
|
824
|
+
}
|
|
825
|
+
|
|
826
|
+
if (isBack(discoveredModel)) {
|
|
827
|
+
continue;
|
|
828
|
+
}
|
|
829
|
+
|
|
830
|
+
const authMethod = catalog.authMethods[0];
|
|
831
|
+
const profileName = slugifyProfileName(`${provider.id}-${discoveredModel.id}-${authMethod.id}`);
|
|
832
|
+
const customProvider = {
|
|
833
|
+
...catalog,
|
|
834
|
+
baseUrl: normalizedOllamaBaseUrl
|
|
835
|
+
};
|
|
836
|
+
const profile = buildProfile({
|
|
837
|
+
provider: customProvider,
|
|
838
|
+
model: {
|
|
839
|
+
...discoveredModel,
|
|
840
|
+
apiBaseUrl: normalizedOllamaBaseUrl,
|
|
841
|
+
apiPath: '/api/chat',
|
|
842
|
+
transportMode: 'gateway',
|
|
843
|
+
apiStyle: 'ollama-chat',
|
|
844
|
+
authEnvMode: 'auth_token'
|
|
845
|
+
},
|
|
846
|
+
authMethod,
|
|
847
|
+
profileName,
|
|
848
|
+
apiKeyEnvVar: catalog.defaultApiKeyEnvVar
|
|
849
|
+
});
|
|
850
|
+
|
|
851
|
+
const filePath = await saveProfile(profile);
|
|
852
|
+
renderSummary({ profile, filePath });
|
|
853
|
+
return await waitForAnyKey();
|
|
854
|
+
}
|
|
855
|
+
|
|
706
856
|
const totalSteps = catalog.models.length > 1 ? 3 : 2;
|
|
707
857
|
let model = catalog.models[0];
|
|
708
858
|
|