@johpaz/hive-cli 1.0.2 → 1.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,687 @@
1
+ import * as p from "@clack/prompts";
2
+ import * as fs from "fs";
3
+ import * as path from "path";
4
+ import * as yaml from "js-yaml";
5
+
6
+ const VERSION = "1.0.4";
7
+
8
+ const DEFAULT_MODELS: Record<string, string> = {
9
+ anthropic: "claude-sonnet-4-5",
10
+ openai: "gpt-4o",
11
+ gemini: "gemini-2.0-flash",
12
+ deepseek: "deepseek-chat",
13
+ kimi: "moonshot-v1-128k",
14
+ openrouter: "anthropic/claude-sonnet-4-5",
15
+ ollama: "llama3.2",
16
+ };
17
+
18
+ const PROVIDER_BASE_URLS: Record<string, string> = {
19
+ anthropic: "https://api.anthropic.com",
20
+ openai: "https://api.openai.com",
21
+ gemini: "https://generativelanguage.googleapis.com",
22
+ deepseek: "https://api.deepseek.com",
23
+ kimi: "https://api.moonshot.cn",
24
+ openrouter: "https://openrouter.ai/api",
25
+ ollama: "http://localhost:11434",
26
+ };
27
+
28
+ const API_KEY_PLACEHOLDERS: Record<string, string> = {
29
+ anthropic: "sk-ant-...",
30
+ openai: "sk-...",
31
+ gemini: "AIza...",
32
+ deepseek: "sk-...",
33
+ kimi: "sk-...",
34
+ openrouter: "sk-or-...",
35
+ ollama: "",
36
+ };
37
+
38
+ const API_KEY_LINKS: Record<string, string> = {
39
+ anthropic: "https://console.anthropic.com/keys",
40
+ openai: "https://platform.openai.com/api-keys",
41
+ gemini: "https://aistudio.google.com/app/apikey",
42
+ deepseek: "https://platform.deepseek.com/api_keys",
43
+ kimi: "https://platform.moonshot.cn/console/api-keys",
44
+ openrouter: "https://openrouter.ai/keys",
45
+ ollama: "",
46
+ };
47
+
48
+ const AVAILABLE_MODELS: Record<string, Array<{ value: string; label: string; hint?: string }>> = {
49
+ anthropic: [
50
+ { value: "claude-sonnet-4-5", label: "Claude Sonnet 4.5", hint: "Recomendado — rápido y capaz" },
51
+ { value: "claude-opus-4-5", label: "Claude Opus 4.5", hint: "Más potente, más lento" },
52
+ { value: "claude-haiku-4-5", label: "Claude Haiku 4.5", hint: "Más rápido y económico" },
53
+ ],
54
+ openai: [
55
+ { value: "gpt-4o", label: "GPT-4o", hint: "Recomendado" },
56
+ { value: "gpt-4o-mini", label: "GPT-4o mini", hint: "Más económico" },
57
+ { value: "o3-mini", label: "o3-mini", hint: "Razonamiento avanzado" },
58
+ ],
59
+ gemini: [
60
+ { value: "gemini-2.0-flash", label: "Gemini 2.0 Flash", hint: "Recomendado — muy rápido" },
61
+ { value: "gemini-2.0-pro", label: "Gemini 2.0 Pro", hint: "Más potente" },
62
+ { value: "gemini-1.5-flash", label: "Gemini 1.5 Flash", hint: "Estable y probado" },
63
+ ],
64
+ deepseek: [
65
+ { value: "deepseek-chat", label: "DeepSeek-V3", hint: "Recomendado — muy económico" },
66
+ { value: "deepseek-reasoner", label: "DeepSeek-R1", hint: "Razonamiento, más lento" },
67
+ ],
68
+ kimi: [
69
+ { value: "moonshot-v1-128k", label: "Kimi 128k", hint: "Recomendado — contexto largo" },
70
+ { value: "moonshot-v1-32k", label: "Kimi 32k", hint: "Más rápido" },
71
+ { value: "moonshot-v1-8k", label: "Kimi 8k", hint: "Más económico" },
72
+ ],
73
+ openrouter: [
74
+ { value: "anthropic/claude-sonnet-4-5", label: "Claude Sonnet via OpenRouter" },
75
+ { value: "google/gemini-2.0-flash", label: "Gemini 2.0 Flash via OpenRouter" },
76
+ { value: "deepseek/deepseek-chat", label: "DeepSeek-V3 via OpenRouter" },
77
+ { value: "meta-llama/llama-3.3-70b", label: "Llama 3.3 70B via OpenRouter" },
78
+ ],
79
+ ollama: [
80
+ { value: "llama3.2", label: "Llama 3.2 3B", hint: "Rápido, requiere ~2GB RAM" },
81
+ { value: "llama3.1:8b", label: "Llama 3.1 8B", hint: "Equilibrado, requiere ~5GB RAM" },
82
+ { value: "mistral", label: "Mistral 7B", hint: "Alternativa a Llama" },
83
+ { value: "qwen2.5-coder", label: "Qwen 2.5 Coder", hint: "Especializado en código" },
84
+ ],
85
+ };
86
+
87
+ interface OnboardConfig {
88
+ agentName: string;
89
+ provider: string;
90
+ model: string;
91
+ apiKey: string;
92
+ channel: string;
93
+ channelToken: string;
94
+ workspace: string;
95
+ }
96
+
97
+ function generateToken(): string {
98
+ const chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
99
+ let token = "";
100
+ for (let i = 0; i < 32; i++) {
101
+ token += chars.charAt(Math.floor(Math.random() * chars.length));
102
+ }
103
+ return token;
104
+ }
105
+
106
+ async function testLLMConnection(provider: string, apiKey: string, model: string): Promise<boolean> {
107
+ if (provider === "ollama") {
108
+ try {
109
+ const response = await fetch("http://localhost:11434/api/tags");
110
+ return response.ok;
111
+ } catch {
112
+ return false;
113
+ }
114
+ }
115
+
116
+ const testMessages = [{ role: "user" as const, content: "Say 'ok' if you can read this." }];
117
+
118
+ try {
119
+ if (provider === "anthropic") {
120
+ const response = await fetch("https://api.anthropic.com/v1/messages", {
121
+ method: "POST",
122
+ headers: {
123
+ "Content-Type": "application/json",
124
+ "x-api-key": apiKey,
125
+ "anthropic-version": "2023-06-01",
126
+ "anthropic-dangerous-direct-browser-access": "true",
127
+ },
128
+ body: JSON.stringify({
129
+ model: model,
130
+ max_tokens: 10,
131
+ messages: testMessages,
132
+ }),
133
+ });
134
+ return response.ok;
135
+ }
136
+
137
+ if (provider === "openai") {
138
+ const response = await fetch("https://api.openai.com/v1/chat/completions", {
139
+ method: "POST",
140
+ headers: {
141
+ "Content-Type": "application/json",
142
+ Authorization: `Bearer ${apiKey}`,
143
+ },
144
+ body: JSON.stringify({
145
+ model: model,
146
+ max_tokens: 10,
147
+ messages: testMessages,
148
+ }),
149
+ });
150
+ return response.ok;
151
+ }
152
+
153
+ if (provider === "gemini") {
154
+ const response = await fetch(
155
+ `https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent?key=${apiKey}`,
156
+ {
157
+ method: "POST",
158
+ headers: { "Content-Type": "application/json" },
159
+ body: JSON.stringify({
160
+ contents: [{ parts: [{ text: "Say ok" }] }],
161
+ }),
162
+ }
163
+ );
164
+ return response.ok;
165
+ }
166
+
167
+ if (provider === "deepseek") {
168
+ const response = await fetch("https://api.deepseek.com/v1/chat/completions", {
169
+ method: "POST",
170
+ headers: {
171
+ "Content-Type": "application/json",
172
+ Authorization: `Bearer ${apiKey}`,
173
+ },
174
+ body: JSON.stringify({
175
+ model: model,
176
+ max_tokens: 10,
177
+ messages: testMessages,
178
+ }),
179
+ });
180
+ return response.ok;
181
+ }
182
+
183
+ if (provider === "kimi") {
184
+ const response = await fetch("https://api.moonshot.cn/v1/chat/completions", {
185
+ method: "POST",
186
+ headers: {
187
+ "Content-Type": "application/json",
188
+ Authorization: `Bearer ${apiKey}`,
189
+ },
190
+ body: JSON.stringify({
191
+ model: model,
192
+ max_tokens: 10,
193
+ messages: testMessages,
194
+ }),
195
+ });
196
+ return response.ok;
197
+ }
198
+
199
+ if (provider === "openrouter") {
200
+ const response = await fetch("https://openrouter.ai/api/v1/chat/completions", {
201
+ method: "POST",
202
+ headers: {
203
+ "Content-Type": "application/json",
204
+ Authorization: `Bearer ${apiKey}`,
205
+ },
206
+ body: JSON.stringify({
207
+ model: model,
208
+ max_tokens: 10,
209
+ messages: testMessages,
210
+ }),
211
+ });
212
+ return response.ok;
213
+ }
214
+
215
+ return false;
216
+ } catch {
217
+ return false;
218
+ }
219
+ }
220
+
221
+ async function generateConfig(config: OnboardConfig): Promise<void> {
222
+ const hiveDir = path.join(process.env.HOME || "", ".hive");
223
+ const configPath = path.join(hiveDir, "hive.yaml");
224
+
225
+ if (!fs.existsSync(hiveDir)) {
226
+ fs.mkdirSync(hiveDir, { recursive: true });
227
+ }
228
+
229
+ const configObj: Record<string, unknown> = {
230
+ name: config.agentName,
231
+ version: VERSION,
232
+ gateway: {
233
+ port: 18790,
234
+ host: "127.0.0.1",
235
+ token: generateToken(),
236
+ },
237
+ model: {
238
+ provider: config.provider,
239
+ name: config.model,
240
+ apiKey: config.provider === "ollama" ? "" : config.apiKey,
241
+ },
242
+ agents: {
243
+ list: [
244
+ {
245
+ id: "main",
246
+ default: true,
247
+ name: config.agentName,
248
+ workspace: config.workspace,
249
+ agentDir: path.join(process.env.HOME || "", ".hive", "agents", "main", "agent"),
250
+ },
251
+ ],
252
+ },
253
+ channels: {},
254
+ skills: {
255
+ watch: true,
256
+ allowBundled: [],
257
+ denyBundled: [],
258
+ },
259
+ sessions: {
260
+ pruneAfterHours: 168,
261
+ pruneInterval: 24,
262
+ },
263
+ logging: {
264
+ level: "info",
265
+ dir: path.join(process.env.HOME || "", ".hive", "logs"),
266
+ redactSensitive: true,
267
+ },
268
+ };
269
+
270
+ if (config.provider === "gemini") {
271
+ (configObj.model as Record<string, unknown>).baseUrl = "https://generativelanguage.googleapis.com/v1beta";
272
+ } else if (config.provider === "deepseek") {
273
+ (configObj.model as Record<string, unknown>).baseUrl = "https://api.deepseek.com/v1";
274
+ } else if (config.provider === "kimi") {
275
+ (configObj.model as Record<string, unknown>).baseUrl = "https://api.moonshot.cn/v1";
276
+ } else if (config.provider === "ollama") {
277
+ (configObj.model as Record<string, unknown>).baseUrl = "http://localhost:11434/api";
278
+ }
279
+
280
+ if (config.channel === "telegram" && config.channelToken) {
281
+ configObj.channels = {
282
+ telegram: {
283
+ accounts: {
284
+ default: {
285
+ botToken: config.channelToken,
286
+ },
287
+ },
288
+ },
289
+ };
290
+ } else if (config.channel === "discord" && config.channelToken) {
291
+ configObj.channels = {
292
+ discord: {
293
+ accounts: {
294
+ default: {
295
+ token: config.channelToken,
296
+ },
297
+ },
298
+ },
299
+ };
300
+ }
301
+
302
+ fs.writeFileSync(configPath, yaml.dump(configObj, { lineWidth: -1 }), "utf-8");
303
+ fs.chmodSync(configPath, 0o600);
304
+ }
305
+
306
+ async function generateWorkspace(workspace: string, agentName: string, ethicsChoice: string): Promise<void> {
307
+ if (!fs.existsSync(workspace)) {
308
+ fs.mkdirSync(workspace, { recursive: true });
309
+ }
310
+
311
+ const soulPath = path.join(workspace, "SOUL.md");
312
+ if (!fs.existsSync(soulPath)) {
313
+ fs.writeFileSync(
314
+ soulPath,
315
+ `# ${agentName} — Soul
316
+
317
+ You are ${agentName}, a personal AI assistant.
318
+
319
+ ## Purpose
320
+
321
+ Help the user with their tasks, answer questions, and provide assistance.
322
+
323
+ ## Personality
324
+
325
+ - Helpful and friendly
326
+ - Concise but thorough
327
+ - Proactive in suggesting solutions
328
+
329
+ ## Capabilities
330
+
331
+ - Execute commands and scripts
332
+ - Read and write files
333
+ - Search the web
334
+ - Manage tasks
335
+ `,
336
+ "utf-8"
337
+ );
338
+ }
339
+
340
+ const userPath = path.join(workspace, "USER.md");
341
+ if (!fs.existsSync(userPath)) {
342
+ fs.writeFileSync(
343
+ userPath,
344
+ `# User Profile
345
+
346
+ ## Preferences
347
+
348
+ - Language: Spanish
349
+ - Timezone: Auto-detect
350
+
351
+ ## Notes
352
+
353
+ Add personal notes about the user here.
354
+ `,
355
+ "utf-8"
356
+ );
357
+ }
358
+
359
+ const ethicsPath = path.join(workspace, "ETHICS.md");
360
+ if (!fs.existsSync(ethicsPath) && ethicsChoice === "default") {
361
+ fs.writeFileSync(
362
+ ethicsPath,
363
+ `# Ethical Guidelines
364
+
365
+ ## Core Principles
366
+
367
+ 1. **Respect Privacy**: Never share or expose sensitive user data
368
+ 2. **Honesty**: Provide accurate information, admit uncertainty
369
+ 3. **Safety**: Avoid harmful actions, warn about risks
370
+ 4. **Autonomy**: Respect user decisions and preferences
371
+
372
+ ## Boundaries
373
+
374
+ - Do not execute commands that could harm the system
375
+ - Do not access files outside the workspace without permission
376
+ - Do not share API keys or credentials
377
+
378
+ ## Uncertainty
379
+
380
+ When uncertain about an action:
381
+ 1. Ask for clarification
382
+ 2. Explain the risks
383
+ 3. Suggest alternatives
384
+ `,
385
+ "utf-8"
386
+ );
387
+ }
388
+ }
389
+
390
+ async function installSystemdService(): Promise<void> {
391
+ const home = process.env.HOME || "";
392
+ const systemdDir = path.join(home, ".config", "systemd", "user");
393
+
394
+ if (!fs.existsSync(systemdDir)) {
395
+ fs.mkdirSync(systemdDir, { recursive: true });
396
+ }
397
+
398
+ const serviceContent = `[Unit]
399
+ Description=Hive Personal AI Gateway
400
+ After=network-online.target
401
+ Wants=network-online.target
402
+
403
+ [Service]
404
+ Type=simple
405
+ ExecStart=${home}/.bun/bin/hive start
406
+ ExecStop=${home}/.bun/bin/hive stop
407
+ Restart=on-failure
408
+ RestartSec=5
409
+ Environment=PATH=${home}/.bun/bin:${home}/.npm-global/bin:/usr/local/bin:/usr/bin:/bin
410
+ WorkingDirectory=${home}
411
+
412
+ [Install]
413
+ WantedBy=default.target
414
+ `;
415
+
416
+ const servicePath = path.join(systemdDir, "hive.service");
417
+ fs.writeFileSync(servicePath, serviceContent, "utf-8");
418
+
419
+ const { spawnSync } = require("child_process");
420
+ spawnSync("systemctl", ["--user", "daemon-reload"], { stdio: "inherit" });
421
+ spawnSync("systemctl", ["--user", "enable", "hive"], { stdio: "inherit" });
422
+ }
423
+
424
+ export async function onboard(): Promise<void> {
425
+ p.intro("🐝 Bienvenido a Hive — Personal AI Gateway");
426
+
427
+ const agentName = await p.text({
428
+ message: "¿Cómo se llama tu agente?",
429
+ placeholder: "Hive",
430
+ defaultValue: "Hive",
431
+ });
432
+
433
+ if (p.isCancel(agentName)) {
434
+ p.cancel("Onboarding cancelado.");
435
+ process.exit(0);
436
+ }
437
+
438
+ const provider = await p.select({
439
+ message: "¿Qué proveedor LLM quieres usar?",
440
+ options: [
441
+ { value: "anthropic", label: "Anthropic (Claude)", hint: "Recomendado" },
442
+ { value: "openai", label: "OpenAI (GPT-4o)" },
443
+ { value: "gemini", label: "Google Gemini", hint: "Gemini 2.0 Flash / Pro" },
444
+ { value: "deepseek", label: "DeepSeek", hint: "DeepSeek-V3 / R1, muy económico" },
445
+ { value: "kimi", label: "Kimi (Moonshot AI)", hint: "kimi-k1.5, contexto largo" },
446
+ { value: "openrouter", label: "OpenRouter", hint: "Acceso a todos los modelos" },
447
+ { value: "ollama", label: "Ollama (local, sin costo)" },
448
+ ],
449
+ });
450
+
451
+ if (p.isCancel(provider)) {
452
+ p.cancel("Onboarding cancelado.");
453
+ process.exit(0);
454
+ }
455
+
456
+ const providerKey = provider as string;
457
+ const models = AVAILABLE_MODELS[providerKey] || [{ value: DEFAULT_MODELS[providerKey], label: DEFAULT_MODELS[providerKey] }];
458
+
459
+ let model: string | symbol;
460
+ if (models.length > 1) {
461
+ model = await p.select({
462
+ message: `¿Qué modelo de ${providerKey} quieres usar?`,
463
+ options: models,
464
+ });
465
+ if (p.isCancel(model)) {
466
+ p.cancel("Onboarding cancelado.");
467
+ process.exit(0);
468
+ }
469
+ } else {
470
+ model = models[0].value;
471
+ }
472
+
473
+ let apiKey = "";
474
+ if (providerKey !== "ollama") {
475
+ const link = API_KEY_LINKS[providerKey];
476
+ if (link) {
477
+ p.note(`Obtén tu API key en:\n${link}`, `API key de ${providerKey}`);
478
+ }
479
+
480
+ const keyResult = await p.text({
481
+ message: `API key de ${providerKey}:`,
482
+ placeholder: API_KEY_PLACEHOLDERS[providerKey] || "sk-...",
483
+ validate: (v) => (!v || v.length < 10 ? "La key parece muy corta" : undefined),
484
+ });
485
+
486
+ if (p.isCancel(keyResult)) {
487
+ p.cancel("Onboarding cancelado.");
488
+ process.exit(0);
489
+ }
490
+ apiKey = keyResult;
491
+
492
+ const spinner = p.spinner();
493
+ spinner.start("Verificando conexión con el LLM...");
494
+
495
+ const connected = await testLLMConnection(providerKey, apiKey, model as string);
496
+
497
+ if (!connected) {
498
+ spinner.stop(`❌ Error conectando con ${providerKey}`);
499
+ p.note(
500
+ `Verifica que:\n` +
501
+ `1. La API key es correcta\n` +
502
+ `2. Tienes saldo/créditos en tu cuenta\n` +
503
+ `3. Tu conexión a internet funciona`,
504
+ "Error de conexión"
505
+ );
506
+ const retry = await p.confirm({
507
+ message: "¿Quieres introducir la API key de nuevo?",
508
+ });
509
+ if (p.isCancel(retry) || !retry) {
510
+ p.cancel("Onboarding cancelado. Ejecuta 'hive onboard' cuando tengas la key correcta.");
511
+ process.exit(1);
512
+ }
513
+ const retryKey = await p.text({
514
+ message: `API key de ${providerKey}:`,
515
+ placeholder: API_KEY_PLACEHOLDERS[providerKey] || "sk-...",
516
+ validate: (v) => (!v || v.length < 10 ? "La key parece muy corta" : undefined),
517
+ });
518
+ if (p.isCancel(retryKey)) {
519
+ p.cancel("Onboarding cancelado.");
520
+ process.exit(0);
521
+ }
522
+ apiKey = retryKey;
523
+
524
+ spinner.start("Verificando conexión con el LLM...");
525
+ const retryConnected = await testLLMConnection(providerKey, apiKey, model as string);
526
+ if (!retryConnected) {
527
+ spinner.stop(`❌ Error conectando con ${providerKey}`);
528
+ p.cancel("No se pudo verificar la conexión. Verifica tu API key e intenta de nuevo.");
529
+ process.exit(1);
530
+ }
531
+ spinner.stop(`✅ Conexión con ${providerKey} verificada`);
532
+ } else {
533
+ spinner.stop(`✅ Conexión con ${providerKey} verificada`);
534
+ }
535
+ }
536
+
537
+ const workspaceDefault = `${process.env.HOME}/.hive/workspace`;
538
+ const workspace = await p.text({
539
+ message: "Directorio de trabajo del agente:",
540
+ placeholder: workspaceDefault,
541
+ defaultValue: workspaceDefault,
542
+ });
543
+
544
+ if (p.isCancel(workspace)) {
545
+ p.cancel("Onboarding cancelado.");
546
+ process.exit(0);
547
+ }
548
+
549
+ const ethicsChoice = await p.select({
550
+ message: "Lineamientos éticos del agente:",
551
+ options: [
552
+ { value: "default", label: "Usar defaults (recomendado)" },
553
+ { value: "skip", label: "Configurar después" },
554
+ ],
555
+ });
556
+
557
+ if (p.isCancel(ethicsChoice)) {
558
+ p.cancel("Onboarding cancelado.");
559
+ process.exit(0);
560
+ }
561
+
562
+ const configureChannel = await p.confirm({
563
+ message: "¿Quieres configurar un canal ahora? (puedes hacerlo después)",
564
+ initialValue: false,
565
+ });
566
+
567
+ if (p.isCancel(configureChannel)) {
568
+ p.cancel("Onboarding cancelado.");
569
+ process.exit(0);
570
+ }
571
+
572
+ let channel = "none";
573
+ let channelToken = "";
574
+
575
+ if (configureChannel) {
576
+ channel = await p.select({
577
+ message: "¿Qué canal quieres configurar?",
578
+ options: [
579
+ { value: "telegram", label: "Telegram", hint: "Recomendado para empezar" },
580
+ { value: "discord", label: "Discord" },
581
+ { value: "webchat", label: "WebChat (UI web local)" },
582
+ { value: "none", label: "Ninguno por ahora" },
583
+ ],
584
+ }) as string;
585
+
586
+ if (p.isCancel(channel)) {
587
+ p.cancel("Onboarding cancelado.");
588
+ process.exit(0);
589
+ }
590
+
591
+ if (channel === "telegram") {
592
+ p.note(
593
+ "1. Abre Telegram y busca @BotFather\n" +
594
+ "2. Escribe /newbot y sigue las instrucciones\n" +
595
+ "3. Copia el token que te da BotFather",
596
+ "Cómo obtener el token de Telegram"
597
+ );
598
+ const tokenResult = await p.text({
599
+ message: "Token de Telegram BotFather:",
600
+ placeholder: "123456789:ABCdefGHI...",
601
+ });
602
+
603
+ if (p.isCancel(tokenResult)) {
604
+ p.cancel("Onboarding cancelado.");
605
+ process.exit(0);
606
+ }
607
+ channelToken = tokenResult;
608
+ } else if (channel === "discord") {
609
+ p.note(
610
+ "1. Ve a https://discord.com/developers/applications\n" +
611
+ "2. Crea una nueva aplicación\n" +
612
+ "3. Ve a Bot → Reset Token\n" +
613
+ "4. Habilita 'Message Content Intent'",
614
+ "Cómo obtener el token de Discord"
615
+ );
616
+ const tokenResult = await p.text({
617
+ message: "Token del bot de Discord:",
618
+ placeholder: "MTk4NjIyNDgzNDcxO...",
619
+ });
620
+
621
+ if (p.isCancel(tokenResult)) {
622
+ p.cancel("Onboarding cancelado.");
623
+ process.exit(0);
624
+ }
625
+ channelToken = tokenResult;
626
+ }
627
+ }
628
+
629
+ const installService = await p.confirm({
630
+ message: "¿Instalar Hive como servicio del sistema? (arranca automáticamente)",
631
+ initialValue: false,
632
+ });
633
+
634
+ if (p.isCancel(installService)) {
635
+ p.cancel("Onboarding cancelado.");
636
+ process.exit(0);
637
+ }
638
+
639
+ const spinner = p.spinner();
640
+ spinner.start("Creando configuración...");
641
+
642
+ await generateConfig({
643
+ agentName: agentName as string,
644
+ provider: providerKey,
645
+ model: model as string,
646
+ apiKey,
647
+ channel: channel as string,
648
+ channelToken,
649
+ workspace: workspace as string,
650
+ });
651
+
652
+ await generateWorkspace(workspace as string, agentName as string, ethicsChoice as string);
653
+
654
+ spinner.stop("Configuración creada ✅");
655
+
656
+ if (installService) {
657
+ const serviceSpinner = p.spinner();
658
+ serviceSpinner.start("Instalando servicio systemd...");
659
+ await installSystemdService();
660
+ serviceSpinner.stop("Servicio instalado ✅");
661
+ }
662
+
663
+ const channelDisplay = channel === "none" || channel === "webchat" ? "WebChat (local)" : channel;
664
+
665
+ p.outro(
666
+ `🐝 Hive está listo.\n\n` +
667
+ ` Tu agente: ${agentName}\n` +
668
+ ` Proveedor: ${providerKey} (${model})\n` +
669
+ ` Canal: ${channelDisplay}\n\n` +
670
+ ` Comandos:\n` +
671
+ ` ─────────────────────────────────────\n` +
672
+ ` hive start Arrancar el Gateway\n` +
673
+ ` hive chat Chatear en la terminal\n` +
674
+ ` hive status Ver estado\n` +
675
+ ` hive logs --follow Ver logs en tiempo real\n\n` +
676
+ ` Control UI:\n` +
677
+ ` ─────────────────────────────────────\n` +
678
+ ` http://127.0.0.1:18790/ui\n\n` +
679
+ ` Config guardada en:\n` +
680
+ ` ─────────────────────────────────────\n` +
681
+ ` ~/.hive/hive.yaml (permisos 600)\n\n` +
682
+ ` Para añadir más canales o agentes:\n` +
683
+ ` ─────────────────────────────────────\n` +
684
+ ` hive config edit\n` +
685
+ ` hive agents add <nombre>`
686
+ );
687
+ }