@johpaz/hive-cli 1.0.3 → 1.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,690 @@
1
+ import * as p from "@clack/prompts";
2
+ import * as fs from "fs";
3
+ import * as path from "path";
4
+ import * as yaml from "js-yaml";
5
+
6
+ const VERSION = "1.0.5";
7
+
8
+ const DEFAULT_MODELS: Record<string, string> = {
9
+ anthropic: "claude-sonnet-4-6",
10
+ openai: "gpt-5.2",
11
+ gemini: "gemini-2.5-flash",
12
+ deepseek: "deepseek-chat",
13
+ kimi: "kimi-k2.5",
14
+ openrouter: "meta-llama/llama-3.3-70b-instruct",
15
+ ollama: "llama3.3:8b",
16
+ };
17
+
18
+ const PROVIDER_BASE_URLS: Record<string, string> = {
19
+ anthropic: "https://api.anthropic.com",
20
+ openai: "https://api.openai.com",
21
+ gemini: "https://generativelanguage.googleapis.com",
22
+ deepseek: "https://api.deepseek.com",
23
+ kimi: "https://api.moonshot.cn",
24
+ openrouter: "https://openrouter.ai/api",
25
+ ollama: "http://localhost:11434",
26
+ };
27
+
28
+ const API_KEY_PLACEHOLDERS: Record<string, string> = {
29
+ anthropic: "sk-ant-...",
30
+ openai: "sk-...",
31
+ gemini: "AIza...",
32
+ deepseek: "sk-...",
33
+ kimi: "sk-...",
34
+ openrouter: "sk-or-...",
35
+ ollama: "",
36
+ };
37
+
38
+ const API_KEY_LINKS: Record<string, string> = {
39
+ anthropic: "https://console.anthropic.com/keys",
40
+ openai: "https://platform.openai.com/api-keys",
41
+ gemini: "https://aistudio.google.com/app/apikey",
42
+ deepseek: "https://platform.deepseek.com/api_keys",
43
+ kimi: "https://platform.moonshot.cn/console/api-keys",
44
+ openrouter: "https://openrouter.ai/keys",
45
+ ollama: "",
46
+ };
47
+
48
+ const AVAILABLE_MODELS: Record<string, Array<{ value: string; label: string; hint?: string }>> = {
49
+ anthropic: [
50
+ { value: "claude-sonnet-4-6", label: "Claude Sonnet 4.6", hint: "Recomendado — mejor equilibrio, 1M contexto" },
51
+ { value: "claude-opus-4-6", label: "Claude Opus 4.6", hint: "Más potente — agentic coding, 1M contexto" },
52
+ { value: "claude-haiku-4-6", label: "Claude Haiku 4.6", hint: "Más rápido y económico" },
53
+ ],
54
+ openai: [
55
+ { value: "gpt-5.2", label: "GPT-5.2", hint: "Recomendado — 400K contexto, latest" },
56
+ { value: "gpt-5.1", label: "GPT-5.1", hint: "Versión anterior estable" },
57
+ { value: "gpt-5.2-codex", label: "GPT-5.2 Codex", hint: "Especializado en código" },
58
+ { value: "o4-mini", label: "o4-mini", hint: "Razonamiento avanzado, económico" },
59
+ ],
60
+ gemini: [
61
+ { value: "gemini-3-flash-preview", label: "Gemini 3 Flash (Preview)", hint: "Frontier-class, muy económico" },
62
+ { value: "gemini-2.5-flash", label: "Gemini 2.5 Flash", hint: "Recomendado — estable, rápido" },
63
+ { value: "gemini-2.5-pro", label: "Gemini 2.5 Pro", hint: "Más potente — razonamiento profundo" },
64
+ { value: "gemini-3.1-pro-preview", label: "Gemini 3.1 Pro (Preview)", hint: "Latest — tareas complejas" },
65
+ ],
66
+ deepseek: [
67
+ { value: "deepseek-chat", label: "DeepSeek-V3", hint: "Recomendado — muy económico, capaz" },
68
+ { value: "deepseek-reasoner", label: "DeepSeek-R1", hint: "Razonamiento profundo" },
69
+ { value: "deepseek-coder", label: "DeepSeek Coder", hint: "Especializado en código" },
70
+ ],
71
+ kimi: [
72
+ { value: "kimi-k2.5", label: "Kimi K2.5", hint: "Recomendado — multimodal, agentic, 1T params" },
73
+ { value: "kimi-k2-thinking", label: "Kimi K2 Thinking", hint: "Largo razonamiento" },
74
+ { value: "kimi-k2-turbo-preview", label: "Kimi K2 Turbo", hint: "Rápido, preview" },
75
+ ],
76
+ openrouter: [
77
+ { value: "meta-llama/llama-3.3-70b-instruct", label: "Llama 3.3 70B", hint: "Gratis — GPT-4 level" },
78
+ { value: "google/gemini-2.0-flash-exp:free", label: "Gemini 2.0 Flash", hint: "Gratis — 1M contexto" },
79
+ { value: "deepseek/deepseek-r1:free", label: "DeepSeek R1", hint: "Gratis — razonamiento fuerte" },
80
+ { value: "anthropic/claude-sonnet-4-6", label: "Claude Sonnet 4.6", hint: "Vía OpenRouter" },
81
+ ],
82
+ ollama: [
83
+ { value: "llama3.3:8b", label: "Llama 3.3 8B", hint: "Recomendado — general, ~5GB RAM" },
84
+ { value: "qwen2.5:7b", label: "Qwen 2.5 7B", hint: "Multilingual, código, ~4.5GB RAM" },
85
+ { value: "mistral:7b", label: "Mistral 7B", hint: "Rápido, ~4GB RAM" },
86
+ { value: "phi4:14b", label: "Phi-4 14B", hint: "Mejor calidad, ~8GB RAM" },
87
+ ],
88
+ };
89
+
90
+ interface OnboardConfig {
91
+ agentName: string;
92
+ provider: string;
93
+ model: string;
94
+ apiKey: string;
95
+ channel: string;
96
+ channelToken: string;
97
+ workspace: string;
98
+ }
99
+
100
+ function generateToken(): string {
101
+ const chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
102
+ let token = "";
103
+ for (let i = 0; i < 32; i++) {
104
+ token += chars.charAt(Math.floor(Math.random() * chars.length));
105
+ }
106
+ return token;
107
+ }
108
+
109
+ async function testLLMConnection(provider: string, apiKey: string, model: string): Promise<boolean> {
110
+ if (provider === "ollama") {
111
+ try {
112
+ const response = await fetch("http://localhost:11434/api/tags");
113
+ return response.ok;
114
+ } catch {
115
+ return false;
116
+ }
117
+ }
118
+
119
+ const testMessages = [{ role: "user" as const, content: "Say 'ok' if you can read this." }];
120
+
121
+ try {
122
+ if (provider === "anthropic") {
123
+ const response = await fetch("https://api.anthropic.com/v1/messages", {
124
+ method: "POST",
125
+ headers: {
126
+ "Content-Type": "application/json",
127
+ "x-api-key": apiKey,
128
+ "anthropic-version": "2023-06-01",
129
+ "anthropic-dangerous-direct-browser-access": "true",
130
+ },
131
+ body: JSON.stringify({
132
+ model: model,
133
+ max_tokens: 10,
134
+ messages: testMessages,
135
+ }),
136
+ });
137
+ return response.ok;
138
+ }
139
+
140
+ if (provider === "openai") {
141
+ const response = await fetch("https://api.openai.com/v1/chat/completions", {
142
+ method: "POST",
143
+ headers: {
144
+ "Content-Type": "application/json",
145
+ Authorization: `Bearer ${apiKey}`,
146
+ },
147
+ body: JSON.stringify({
148
+ model: model,
149
+ max_tokens: 10,
150
+ messages: testMessages,
151
+ }),
152
+ });
153
+ return response.ok;
154
+ }
155
+
156
+ if (provider === "gemini") {
157
+ const response = await fetch(
158
+ `https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent?key=${apiKey}`,
159
+ {
160
+ method: "POST",
161
+ headers: { "Content-Type": "application/json" },
162
+ body: JSON.stringify({
163
+ contents: [{ parts: [{ text: "Say ok" }] }],
164
+ }),
165
+ }
166
+ );
167
+ return response.ok;
168
+ }
169
+
170
+ if (provider === "deepseek") {
171
+ const response = await fetch("https://api.deepseek.com/v1/chat/completions", {
172
+ method: "POST",
173
+ headers: {
174
+ "Content-Type": "application/json",
175
+ Authorization: `Bearer ${apiKey}`,
176
+ },
177
+ body: JSON.stringify({
178
+ model: model,
179
+ max_tokens: 10,
180
+ messages: testMessages,
181
+ }),
182
+ });
183
+ return response.ok;
184
+ }
185
+
186
+ if (provider === "kimi") {
187
+ const response = await fetch("https://api.moonshot.cn/v1/chat/completions", {
188
+ method: "POST",
189
+ headers: {
190
+ "Content-Type": "application/json",
191
+ Authorization: `Bearer ${apiKey}`,
192
+ },
193
+ body: JSON.stringify({
194
+ model: model,
195
+ max_tokens: 10,
196
+ messages: testMessages,
197
+ }),
198
+ });
199
+ return response.ok;
200
+ }
201
+
202
+ if (provider === "openrouter") {
203
+ const response = await fetch("https://openrouter.ai/api/v1/chat/completions", {
204
+ method: "POST",
205
+ headers: {
206
+ "Content-Type": "application/json",
207
+ Authorization: `Bearer ${apiKey}`,
208
+ },
209
+ body: JSON.stringify({
210
+ model: model,
211
+ max_tokens: 10,
212
+ messages: testMessages,
213
+ }),
214
+ });
215
+ return response.ok;
216
+ }
217
+
218
+ return false;
219
+ } catch {
220
+ return false;
221
+ }
222
+ }
223
+
224
+ async function generateConfig(config: OnboardConfig): Promise<void> {
225
+ const hiveDir = path.join(process.env.HOME || "", ".hive");
226
+ const configPath = path.join(hiveDir, "hive.yaml");
227
+
228
+ if (!fs.existsSync(hiveDir)) {
229
+ fs.mkdirSync(hiveDir, { recursive: true });
230
+ }
231
+
232
+ const configObj: Record<string, unknown> = {
233
+ name: config.agentName,
234
+ version: VERSION,
235
+ gateway: {
236
+ port: 18790,
237
+ host: "127.0.0.1",
238
+ token: generateToken(),
239
+ },
240
+ model: {
241
+ provider: config.provider,
242
+ name: config.model,
243
+ apiKey: config.provider === "ollama" ? "" : config.apiKey,
244
+ },
245
+ agents: {
246
+ list: [
247
+ {
248
+ id: "main",
249
+ default: true,
250
+ name: config.agentName,
251
+ workspace: config.workspace,
252
+ agentDir: path.join(process.env.HOME || "", ".hive", "agents", "main", "agent"),
253
+ },
254
+ ],
255
+ },
256
+ channels: {},
257
+ skills: {
258
+ watch: true,
259
+ allowBundled: [],
260
+ denyBundled: [],
261
+ },
262
+ sessions: {
263
+ pruneAfterHours: 168,
264
+ pruneInterval: 24,
265
+ },
266
+ logging: {
267
+ level: "info",
268
+ dir: path.join(process.env.HOME || "", ".hive", "logs"),
269
+ redactSensitive: true,
270
+ },
271
+ };
272
+
273
+ if (config.provider === "gemini") {
274
+ (configObj.model as Record<string, unknown>).baseUrl = "https://generativelanguage.googleapis.com/v1beta";
275
+ } else if (config.provider === "deepseek") {
276
+ (configObj.model as Record<string, unknown>).baseUrl = "https://api.deepseek.com/v1";
277
+ } else if (config.provider === "kimi") {
278
+ (configObj.model as Record<string, unknown>).baseUrl = "https://api.moonshot.cn/v1";
279
+ } else if (config.provider === "ollama") {
280
+ (configObj.model as Record<string, unknown>).baseUrl = "http://localhost:11434/api";
281
+ }
282
+
283
+ if (config.channel === "telegram" && config.channelToken) {
284
+ configObj.channels = {
285
+ telegram: {
286
+ accounts: {
287
+ default: {
288
+ botToken: config.channelToken,
289
+ },
290
+ },
291
+ },
292
+ };
293
+ } else if (config.channel === "discord" && config.channelToken) {
294
+ configObj.channels = {
295
+ discord: {
296
+ accounts: {
297
+ default: {
298
+ token: config.channelToken,
299
+ },
300
+ },
301
+ },
302
+ };
303
+ }
304
+
305
+ fs.writeFileSync(configPath, yaml.dump(configObj, { lineWidth: -1 }), "utf-8");
306
+ fs.chmodSync(configPath, 0o600);
307
+ }
308
+
309
+ async function generateWorkspace(workspace: string, agentName: string, ethicsChoice: string): Promise<void> {
310
+ if (!fs.existsSync(workspace)) {
311
+ fs.mkdirSync(workspace, { recursive: true });
312
+ }
313
+
314
+ const soulPath = path.join(workspace, "SOUL.md");
315
+ if (!fs.existsSync(soulPath)) {
316
+ fs.writeFileSync(
317
+ soulPath,
318
+ `# ${agentName} — Soul
319
+
320
+ You are ${agentName}, a personal AI assistant.
321
+
322
+ ## Purpose
323
+
324
+ Help the user with their tasks, answer questions, and provide assistance.
325
+
326
+ ## Personality
327
+
328
+ - Helpful and friendly
329
+ - Concise but thorough
330
+ - Proactive in suggesting solutions
331
+
332
+ ## Capabilities
333
+
334
+ - Execute commands and scripts
335
+ - Read and write files
336
+ - Search the web
337
+ - Manage tasks
338
+ `,
339
+ "utf-8"
340
+ );
341
+ }
342
+
343
+ const userPath = path.join(workspace, "USER.md");
344
+ if (!fs.existsSync(userPath)) {
345
+ fs.writeFileSync(
346
+ userPath,
347
+ `# User Profile
348
+
349
+ ## Preferences
350
+
351
+ - Language: Spanish
352
+ - Timezone: Auto-detect
353
+
354
+ ## Notes
355
+
356
+ Add personal notes about the user here.
357
+ `,
358
+ "utf-8"
359
+ );
360
+ }
361
+
362
+ const ethicsPath = path.join(workspace, "ETHICS.md");
363
+ if (!fs.existsSync(ethicsPath) && ethicsChoice === "default") {
364
+ fs.writeFileSync(
365
+ ethicsPath,
366
+ `# Ethical Guidelines
367
+
368
+ ## Core Principles
369
+
370
+ 1. **Respect Privacy**: Never share or expose sensitive user data
371
+ 2. **Honesty**: Provide accurate information, admit uncertainty
372
+ 3. **Safety**: Avoid harmful actions, warn about risks
373
+ 4. **Autonomy**: Respect user decisions and preferences
374
+
375
+ ## Boundaries
376
+
377
+ - Do not execute commands that could harm the system
378
+ - Do not access files outside the workspace without permission
379
+ - Do not share API keys or credentials
380
+
381
+ ## Uncertainty
382
+
383
+ When uncertain about an action:
384
+ 1. Ask for clarification
385
+ 2. Explain the risks
386
+ 3. Suggest alternatives
387
+ `,
388
+ "utf-8"
389
+ );
390
+ }
391
+ }
392
+
393
+ async function installSystemdService(): Promise<void> {
394
+ const home = process.env.HOME || "";
395
+ const systemdDir = path.join(home, ".config", "systemd", "user");
396
+
397
+ if (!fs.existsSync(systemdDir)) {
398
+ fs.mkdirSync(systemdDir, { recursive: true });
399
+ }
400
+
401
+ const serviceContent = `[Unit]
402
+ Description=Hive Personal AI Gateway
403
+ After=network-online.target
404
+ Wants=network-online.target
405
+
406
+ [Service]
407
+ Type=simple
408
+ ExecStart=${home}/.bun/bin/hive start
409
+ ExecStop=${home}/.bun/bin/hive stop
410
+ Restart=on-failure
411
+ RestartSec=5
412
+ Environment=PATH=${home}/.bun/bin:${home}/.npm-global/bin:/usr/local/bin:/usr/bin:/bin
413
+ WorkingDirectory=${home}
414
+
415
+ [Install]
416
+ WantedBy=default.target
417
+ `;
418
+
419
+ const servicePath = path.join(systemdDir, "hive.service");
420
+ fs.writeFileSync(servicePath, serviceContent, "utf-8");
421
+
422
+ const { spawnSync } = require("child_process");
423
+ spawnSync("systemctl", ["--user", "daemon-reload"], { stdio: "inherit" });
424
+ spawnSync("systemctl", ["--user", "enable", "hive"], { stdio: "inherit" });
425
+ }
426
+
427
+ export async function onboard(): Promise<void> {
428
+ p.intro("🐝 Bienvenido a Hive — Personal AI Gateway");
429
+
430
+ const agentName = await p.text({
431
+ message: "¿Cómo se llama tu agente?",
432
+ placeholder: "Hive",
433
+ defaultValue: "Hive",
434
+ });
435
+
436
+ if (p.isCancel(agentName)) {
437
+ p.cancel("Onboarding cancelado.");
438
+ process.exit(0);
439
+ }
440
+
441
+ const provider = await p.select({
442
+ message: "¿Qué proveedor LLM quieres usar?",
443
+ options: [
444
+ { value: "anthropic", label: "Anthropic (Claude)", hint: "Recomendado — Claude 4.6, 1M contexto" },
445
+ { value: "openai", label: "OpenAI (GPT-5)", hint: "GPT-5.2, 400K contexto" },
446
+ { value: "gemini", label: "Google Gemini", hint: "Gemini 3 Flash/Pro, 1M contexto" },
447
+ { value: "deepseek", label: "DeepSeek", hint: "V3/R1 — muy económico, 1/100 costo" },
448
+ { value: "kimi", label: "Kimi (Moonshot AI)", hint: "K2.5 — multimodal, agentic, 1T params" },
449
+ { value: "openrouter", label: "OpenRouter", hint: "Gratis: Llama 3.3 70B, Gemini Flash" },
450
+ { value: "ollama", label: "Ollama (local)", hint: "Llama 3.3, Qwen 2.5, sin costo" },
451
+ ],
452
+ });
453
+
454
+ if (p.isCancel(provider)) {
455
+ p.cancel("Onboarding cancelado.");
456
+ process.exit(0);
457
+ }
458
+
459
+ const providerKey = provider as string;
460
+ const models = AVAILABLE_MODELS[providerKey] || [{ value: DEFAULT_MODELS[providerKey], label: DEFAULT_MODELS[providerKey] }];
461
+
462
+ let model: string | symbol;
463
+ if (models.length > 1) {
464
+ model = await p.select({
465
+ message: `¿Qué modelo de ${providerKey} quieres usar?`,
466
+ options: models,
467
+ });
468
+ if (p.isCancel(model)) {
469
+ p.cancel("Onboarding cancelado.");
470
+ process.exit(0);
471
+ }
472
+ } else {
473
+ model = models[0].value;
474
+ }
475
+
476
+ let apiKey = "";
477
+ if (providerKey !== "ollama") {
478
+ const link = API_KEY_LINKS[providerKey];
479
+ if (link) {
480
+ p.note(`Obtén tu API key en:\n${link}`, `API key de ${providerKey}`);
481
+ }
482
+
483
+ const keyResult = await p.text({
484
+ message: `API key de ${providerKey}:`,
485
+ placeholder: API_KEY_PLACEHOLDERS[providerKey] || "sk-...",
486
+ validate: (v) => (!v || v.length < 10 ? "La key parece muy corta" : undefined),
487
+ });
488
+
489
+ if (p.isCancel(keyResult)) {
490
+ p.cancel("Onboarding cancelado.");
491
+ process.exit(0);
492
+ }
493
+ apiKey = keyResult;
494
+
495
+ const spinner = p.spinner();
496
+ spinner.start("Verificando conexión con el LLM...");
497
+
498
+ const connected = await testLLMConnection(providerKey, apiKey, model as string);
499
+
500
+ if (!connected) {
501
+ spinner.stop(`❌ Error conectando con ${providerKey}`);
502
+ p.note(
503
+ `Verifica que:\n` +
504
+ `1. La API key es correcta\n` +
505
+ `2. Tienes saldo/créditos en tu cuenta\n` +
506
+ `3. Tu conexión a internet funciona`,
507
+ "Error de conexión"
508
+ );
509
+ const retry = await p.confirm({
510
+ message: "¿Quieres introducir la API key de nuevo?",
511
+ });
512
+ if (p.isCancel(retry) || !retry) {
513
+ p.cancel("Onboarding cancelado. Ejecuta 'hive onboard' cuando tengas la key correcta.");
514
+ process.exit(1);
515
+ }
516
+ const retryKey = await p.text({
517
+ message: `API key de ${providerKey}:`,
518
+ placeholder: API_KEY_PLACEHOLDERS[providerKey] || "sk-...",
519
+ validate: (v) => (!v || v.length < 10 ? "La key parece muy corta" : undefined),
520
+ });
521
+ if (p.isCancel(retryKey)) {
522
+ p.cancel("Onboarding cancelado.");
523
+ process.exit(0);
524
+ }
525
+ apiKey = retryKey;
526
+
527
+ spinner.start("Verificando conexión con el LLM...");
528
+ const retryConnected = await testLLMConnection(providerKey, apiKey, model as string);
529
+ if (!retryConnected) {
530
+ spinner.stop(`❌ Error conectando con ${providerKey}`);
531
+ p.cancel("No se pudo verificar la conexión. Verifica tu API key e intenta de nuevo.");
532
+ process.exit(1);
533
+ }
534
+ spinner.stop(`✅ Conexión con ${providerKey} verificada`);
535
+ } else {
536
+ spinner.stop(`✅ Conexión con ${providerKey} verificada`);
537
+ }
538
+ }
539
+
540
+ const workspaceDefault = `${process.env.HOME}/.hive/workspace`;
541
+ const workspace = await p.text({
542
+ message: "Directorio de trabajo del agente:",
543
+ placeholder: workspaceDefault,
544
+ defaultValue: workspaceDefault,
545
+ });
546
+
547
+ if (p.isCancel(workspace)) {
548
+ p.cancel("Onboarding cancelado.");
549
+ process.exit(0);
550
+ }
551
+
552
+ const ethicsChoice = await p.select({
553
+ message: "Lineamientos éticos del agente:",
554
+ options: [
555
+ { value: "default", label: "Usar defaults (recomendado)" },
556
+ { value: "skip", label: "Configurar después" },
557
+ ],
558
+ });
559
+
560
+ if (p.isCancel(ethicsChoice)) {
561
+ p.cancel("Onboarding cancelado.");
562
+ process.exit(0);
563
+ }
564
+
565
+ const configureChannel = await p.confirm({
566
+ message: "¿Quieres configurar un canal ahora? (puedes hacerlo después)",
567
+ initialValue: false,
568
+ });
569
+
570
+ if (p.isCancel(configureChannel)) {
571
+ p.cancel("Onboarding cancelado.");
572
+ process.exit(0);
573
+ }
574
+
575
+ let channel = "none";
576
+ let channelToken = "";
577
+
578
+ if (configureChannel) {
579
+ channel = await p.select({
580
+ message: "¿Qué canal quieres configurar?",
581
+ options: [
582
+ { value: "telegram", label: "Telegram", hint: "Recomendado para empezar" },
583
+ { value: "discord", label: "Discord" },
584
+ { value: "webchat", label: "WebChat (UI web local)" },
585
+ { value: "none", label: "Ninguno por ahora" },
586
+ ],
587
+ }) as string;
588
+
589
+ if (p.isCancel(channel)) {
590
+ p.cancel("Onboarding cancelado.");
591
+ process.exit(0);
592
+ }
593
+
594
+ if (channel === "telegram") {
595
+ p.note(
596
+ "1. Abre Telegram y busca @BotFather\n" +
597
+ "2. Escribe /newbot y sigue las instrucciones\n" +
598
+ "3. Copia el token que te da BotFather",
599
+ "Cómo obtener el token de Telegram"
600
+ );
601
+ const tokenResult = await p.text({
602
+ message: "Token de Telegram BotFather:",
603
+ placeholder: "123456789:ABCdefGHI...",
604
+ });
605
+
606
+ if (p.isCancel(tokenResult)) {
607
+ p.cancel("Onboarding cancelado.");
608
+ process.exit(0);
609
+ }
610
+ channelToken = tokenResult;
611
+ } else if (channel === "discord") {
612
+ p.note(
613
+ "1. Ve a https://discord.com/developers/applications\n" +
614
+ "2. Crea una nueva aplicación\n" +
615
+ "3. Ve a Bot → Reset Token\n" +
616
+ "4. Habilita 'Message Content Intent'",
617
+ "Cómo obtener el token de Discord"
618
+ );
619
+ const tokenResult = await p.text({
620
+ message: "Token del bot de Discord:",
621
+ placeholder: "MTk4NjIyNDgzNDcxO...",
622
+ });
623
+
624
+ if (p.isCancel(tokenResult)) {
625
+ p.cancel("Onboarding cancelado.");
626
+ process.exit(0);
627
+ }
628
+ channelToken = tokenResult;
629
+ }
630
+ }
631
+
632
+ const installService = await p.confirm({
633
+ message: "¿Instalar Hive como servicio del sistema? (arranca automáticamente)",
634
+ initialValue: false,
635
+ });
636
+
637
+ if (p.isCancel(installService)) {
638
+ p.cancel("Onboarding cancelado.");
639
+ process.exit(0);
640
+ }
641
+
642
+ const spinner = p.spinner();
643
+ spinner.start("Creando configuración...");
644
+
645
+ await generateConfig({
646
+ agentName: agentName as string,
647
+ provider: providerKey,
648
+ model: model as string,
649
+ apiKey,
650
+ channel: channel as string,
651
+ channelToken,
652
+ workspace: workspace as string,
653
+ });
654
+
655
+ await generateWorkspace(workspace as string, agentName as string, ethicsChoice as string);
656
+
657
+ spinner.stop("Configuración creada ✅");
658
+
659
+ if (installService) {
660
+ const serviceSpinner = p.spinner();
661
+ serviceSpinner.start("Instalando servicio systemd...");
662
+ await installSystemdService();
663
+ serviceSpinner.stop("Servicio instalado ✅");
664
+ }
665
+
666
+ const channelDisplay = channel === "none" || channel === "webchat" ? "WebChat (local)" : channel;
667
+
668
+ p.outro(
669
+ `🐝 Hive está listo.\n\n` +
670
+ ` Tu agente: ${agentName}\n` +
671
+ ` Proveedor: ${providerKey} (${model})\n` +
672
+ ` Canal: ${channelDisplay}\n\n` +
673
+ ` Comandos:\n` +
674
+ ` ─────────────────────────────────────\n` +
675
+ ` hive start Arrancar el Gateway\n` +
676
+ ` hive chat Chatear en la terminal\n` +
677
+ ` hive status Ver estado\n` +
678
+ ` hive logs --follow Ver logs en tiempo real\n\n` +
679
+ ` Control UI:\n` +
680
+ ` ─────────────────────────────────────\n` +
681
+ ` http://127.0.0.1:18790/ui\n\n` +
682
+ ` Config guardada en:\n` +
683
+ ` ─────────────────────────────────────\n` +
684
+ ` ~/.hive/hive.yaml (permisos 600)\n\n` +
685
+ ` Para añadir más canales o agentes:\n` +
686
+ ` ─────────────────────────────────────\n` +
687
+ ` hive config edit\n` +
688
+ ` hive agents add <nombre>`
689
+ );
690
+ }