@decido/kernel-bridge 1.0.0 → 4.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs ADDED
@@ -0,0 +1,2518 @@
1
+ // src/kernel.ts
2
+ import { io } from "socket.io-client";
3
+
4
+ // src/ai/services/providers/LLMProvider.ts
5
+ var providers = /* @__PURE__ */ new Map();
6
+ function registerProvider(provider) {
7
+ providers.set(provider.id, provider);
8
+ console.log(`\u{1F50C} [LLM] Provider registered: ${provider.name} (${provider.id})`);
9
+ }
10
+ function getProvider(id) {
11
+ return providers.get(id);
12
+ }
13
+ function getAllProviders() {
14
+ return Array.from(providers.values());
15
+ }
16
+
17
+ // src/ai/services/providers/OllamaProvider.ts
18
+ var OLLAMA_BASE_URL = "http://localhost:11434";
19
+ var DEFAULT_MODEL = "qwen2:latest";
20
+ var OllamaProvider = class {
21
+ id = "ollama";
22
+ name = "Ollama (Local)";
23
+ requiresApiKey = false;
24
+ defaultModel = DEFAULT_MODEL;
25
+ async listModels() {
26
+ try {
27
+ const res = await fetch(`${OLLAMA_BASE_URL}/api/tags`);
28
+ if (!res.ok) return [];
29
+ const data = await res.json();
30
+ return (data.models || []).map((m) => m.name);
31
+ } catch {
32
+ return [];
33
+ }
34
+ }
35
+ async checkStatus() {
36
+ try {
37
+ const res = await fetch(`${OLLAMA_BASE_URL}/api/tags`, {
38
+ signal: AbortSignal.timeout(2e3)
39
+ });
40
+ return res.ok ? "available" : "error";
41
+ } catch {
42
+ return "unavailable";
43
+ }
44
+ }
45
+ async chat(messages, options) {
46
+ const model = options?.model || this.defaultModel;
47
+ const start = Date.now();
48
+ const ollamaMessages = messages.map((m) => ({
49
+ role: m.role,
50
+ content: m.content
51
+ }));
52
+ const res = await fetch(`${OLLAMA_BASE_URL}/api/chat`, {
53
+ method: "POST",
54
+ headers: { "Content-Type": "application/json" },
55
+ body: JSON.stringify({
56
+ model,
57
+ messages: ollamaMessages,
58
+ stream: false,
59
+ options: {
60
+ temperature: options?.temperature ?? 0.7,
61
+ num_predict: options?.maxTokens ?? 512
62
+ }
63
+ }),
64
+ signal: options?.signal ?? AbortSignal.timeout(3e4)
65
+ });
66
+ if (!res.ok) {
67
+ const errorText = await res.text();
68
+ throw new Error(`Ollama API error ${res.status}: ${errorText}`);
69
+ }
70
+ const data = await res.json();
71
+ const latencyMs = Date.now() - start;
72
+ return {
73
+ text: data.message?.content || "(sin respuesta)",
74
+ model,
75
+ latencyMs,
76
+ tokensUsed: data.eval_count
77
+ };
78
+ }
79
+ };
80
+
81
+ // src/ai/services/providers/GeminiProvider.ts
82
+ var GEMINI_API_BASE = "https://generativelanguage.googleapis.com/v1beta";
83
+ var DEFAULT_MODEL2 = "gemini-flash-lite-latest";
84
+ var AVAILABLE_MODELS = [
85
+ "gemini-flash-lite-latest",
86
+ "gemini-flash-lite-latest-lite",
87
+ "gemini-1.5-pro",
88
+ "gemini-flash-latest"
89
+ ];
90
+ var GeminiProvider = class {
91
+ id = "gemini";
92
+ name = "Google Gemini";
93
+ requiresApiKey = true;
94
+ defaultModel = DEFAULT_MODEL2;
95
+ apiKey = null;
96
+ setApiKey(key) {
97
+ this.apiKey = key;
98
+ }
99
+ async listModels() {
100
+ return AVAILABLE_MODELS;
101
+ }
102
+ async checkStatus() {
103
+ if (!this.apiKey) return "unconfigured";
104
+ try {
105
+ const res = await fetch(
106
+ `${GEMINI_API_BASE}/models?key=${this.apiKey}`,
107
+ { signal: AbortSignal.timeout(5e3) }
108
+ );
109
+ return res.ok ? "available" : "error";
110
+ } catch {
111
+ return "unavailable";
112
+ }
113
+ }
114
+ async chat(messages, options) {
115
+ if (!this.apiKey) {
116
+ throw new Error("Gemini API key not configured. Set it in Settings \u2192 Providers.");
117
+ }
118
+ const model = options?.model || this.defaultModel;
119
+ const start = Date.now();
120
+ const systemMsg = messages.find((m) => m.role === "system");
121
+ const conversationMsgs = messages.filter((m) => m.role !== "system");
122
+ const contents = conversationMsgs.map((m) => ({
123
+ role: m.role === "assistant" ? "model" : "user",
124
+ parts: [{ text: m.content }]
125
+ }));
126
+ const body = {
127
+ contents,
128
+ generationConfig: {
129
+ temperature: options?.temperature ?? 0.7,
130
+ maxOutputTokens: options?.maxTokens ?? 2048
131
+ }
132
+ };
133
+ if (systemMsg) {
134
+ body.systemInstruction = {
135
+ parts: [{ text: systemMsg.content }]
136
+ };
137
+ }
138
+ const url = `${GEMINI_API_BASE}/models/${model}:generateContent?key=${this.apiKey}`;
139
+ const res = await fetch(url, {
140
+ method: "POST",
141
+ headers: { "Content-Type": "application/json" },
142
+ body: JSON.stringify(body),
143
+ signal: options?.signal ?? AbortSignal.timeout(6e4)
144
+ });
145
+ if (!res.ok) {
146
+ const errorBody = await res.text();
147
+ throw new Error(`Gemini API error ${res.status}: ${errorBody}`);
148
+ }
149
+ const data = await res.json();
150
+ const latencyMs = Date.now() - start;
151
+ const candidate = data.candidates?.[0];
152
+ const text = candidate?.content?.parts?.map((p) => p.text || "").join("") || "(sin respuesta)";
153
+ const tokensUsed = (data.usageMetadata?.promptTokenCount || 0) + (data.usageMetadata?.candidatesTokenCount || 0);
154
+ return {
155
+ text,
156
+ model,
157
+ latencyMs,
158
+ tokensUsed
159
+ };
160
+ }
161
+ };
162
+
163
+ // src/ai/services/providers/AnthropicProvider.ts
164
+ var ANTHROPIC_API_BASE = "https://api.anthropic.com/v1";
165
+ var DEFAULT_MODEL3 = "claude-sonnet-4-20250514";
166
+ var API_VERSION = "2023-06-01";
167
+ var AVAILABLE_MODELS2 = [
168
+ "claude-sonnet-4-20250514",
169
+ "claude-3-5-sonnet-20241022",
170
+ "claude-3-5-haiku-20241022",
171
+ "claude-3-opus-20240229"
172
+ ];
173
+ var AnthropicProvider = class {
174
+ id = "anthropic";
175
+ name = "Anthropic Claude";
176
+ requiresApiKey = true;
177
+ defaultModel = DEFAULT_MODEL3;
178
+ apiKey = null;
179
+ setApiKey(key) {
180
+ this.apiKey = key;
181
+ }
182
+ async listModels() {
183
+ return AVAILABLE_MODELS2;
184
+ }
185
+ async checkStatus() {
186
+ if (!this.apiKey) return "unconfigured";
187
+ try {
188
+ const res = await fetch(`${ANTHROPIC_API_BASE}/messages`, {
189
+ method: "POST",
190
+ headers: {
191
+ "x-api-key": this.apiKey,
192
+ "anthropic-version": API_VERSION,
193
+ "content-type": "application/json"
194
+ },
195
+ body: JSON.stringify({
196
+ model: this.defaultModel,
197
+ max_tokens: 1,
198
+ messages: [{ role: "user", content: "ping" }]
199
+ }),
200
+ signal: AbortSignal.timeout(5e3)
201
+ });
202
+ return res.ok || res.status === 429 ? "available" : "error";
203
+ } catch {
204
+ return "unavailable";
205
+ }
206
+ }
207
+ async chat(messages, options) {
208
+ if (!this.apiKey) {
209
+ throw new Error("Anthropic API key not configured. Set it in Settings \u2192 Providers.");
210
+ }
211
+ const model = options?.model || this.defaultModel;
212
+ const start = Date.now();
213
+ const systemMsg = messages.find((m) => m.role === "system");
214
+ const conversationMsgs = messages.filter((m) => m.role !== "system").map((m) => ({
215
+ role: m.role,
216
+ content: m.content
217
+ }));
218
+ const sanitized = this._sanitizeMessages(conversationMsgs);
219
+ const body = {
220
+ model,
221
+ max_tokens: options?.maxTokens ?? 2048,
222
+ messages: sanitized
223
+ };
224
+ if (options?.temperature !== void 0) {
225
+ body.temperature = options.temperature;
226
+ }
227
+ if (systemMsg) {
228
+ body.system = systemMsg.content;
229
+ }
230
+ const res = await fetch(`${ANTHROPIC_API_BASE}/messages`, {
231
+ method: "POST",
232
+ headers: {
233
+ "x-api-key": this.apiKey,
234
+ "anthropic-version": API_VERSION,
235
+ "content-type": "application/json"
236
+ },
237
+ body: JSON.stringify(body),
238
+ signal: options?.signal ?? AbortSignal.timeout(6e4)
239
+ });
240
+ if (!res.ok) {
241
+ const errorBody = await res.text();
242
+ throw new Error(`Anthropic API error ${res.status}: ${errorBody}`);
243
+ }
244
+ const data = await res.json();
245
+ const latencyMs = Date.now() - start;
246
+ const text = (data.content || []).filter((block) => block.type === "text").map((block) => block.text).join("") || "(sin respuesta)";
247
+ const tokensUsed = (data.usage?.input_tokens || 0) + (data.usage?.output_tokens || 0);
248
+ return { text, model, latencyMs, tokensUsed };
249
+ }
250
+ /** Stream chat completion — yields text chunks */
251
+ async *chatStream(messages, options) {
252
+ if (!this.apiKey) {
253
+ throw new Error("Anthropic API key not configured.");
254
+ }
255
+ const model = options?.model || this.defaultModel;
256
+ const systemMsg = messages.find((m) => m.role === "system");
257
+ const conversationMsgs = messages.filter((m) => m.role !== "system").map((m) => ({ role: m.role, content: m.content }));
258
+ const sanitized = this._sanitizeMessages(conversationMsgs);
259
+ const body = {
260
+ model,
261
+ max_tokens: options?.maxTokens ?? 2048,
262
+ messages: sanitized,
263
+ stream: true
264
+ };
265
+ if (options?.temperature !== void 0) body.temperature = options.temperature;
266
+ if (systemMsg) body.system = systemMsg.content;
267
+ const res = await fetch(`${ANTHROPIC_API_BASE}/messages`, {
268
+ method: "POST",
269
+ headers: {
270
+ "x-api-key": this.apiKey,
271
+ "anthropic-version": API_VERSION,
272
+ "content-type": "application/json"
273
+ },
274
+ body: JSON.stringify(body),
275
+ signal: options?.signal ?? AbortSignal.timeout(12e4)
276
+ });
277
+ if (!res.ok) {
278
+ const errorBody = await res.text();
279
+ throw new Error(`Anthropic stream error ${res.status}: ${errorBody}`);
280
+ }
281
+ const reader = res.body?.getReader();
282
+ if (!reader) return;
283
+ const decoder = new TextDecoder();
284
+ let buffer = "";
285
+ while (true) {
286
+ const { done, value } = await reader.read();
287
+ if (done) break;
288
+ buffer += decoder.decode(value, { stream: true });
289
+ const lines = buffer.split("\n");
290
+ buffer = lines.pop() || "";
291
+ for (const line of lines) {
292
+ if (!line.startsWith("data: ")) continue;
293
+ const data = line.slice(6).trim();
294
+ if (data === "[DONE]") return;
295
+ try {
296
+ const event = JSON.parse(data);
297
+ if (event.type === "content_block_delta" && event.delta?.text) {
298
+ yield event.delta.text;
299
+ }
300
+ } catch {
301
+ }
302
+ }
303
+ }
304
+ }
305
+ /** Ensure alternating user/assistant (Anthropic requires this) */
306
+ _sanitizeMessages(msgs) {
307
+ if (msgs.length === 0) return [{ role: "user", content: "..." }];
308
+ const result = [];
309
+ let lastRole = "";
310
+ for (const msg of msgs) {
311
+ if (msg.role === lastRole) {
312
+ result[result.length - 1].content += "\n" + msg.content;
313
+ } else {
314
+ result.push({ ...msg });
315
+ lastRole = msg.role;
316
+ }
317
+ }
318
+ if (result[0]?.role !== "user") {
319
+ result.unshift({ role: "user", content: "..." });
320
+ }
321
+ return result;
322
+ }
323
+ };
324
+
325
+ // src/ai/services/providers/OpenAIProvider.ts
326
+ var OPENAI_API_BASE = "https://api.openai.com/v1";
327
+ var DEFAULT_MODEL4 = "gpt-4o";
328
+ var AVAILABLE_MODELS3 = [
329
+ "gpt-4o",
330
+ "gpt-4o-mini",
331
+ "gpt-4-turbo",
332
+ "gpt-4",
333
+ "gpt-3.5-turbo",
334
+ "o3-mini"
335
+ ];
336
+ var OpenAIProvider = class {
337
+ id = "openai";
338
+ name = "OpenAI";
339
+ requiresApiKey = true;
340
+ defaultModel = DEFAULT_MODEL4;
341
+ apiKey = null;
342
+ baseUrl = OPENAI_API_BASE;
343
+ setApiKey(key) {
344
+ this.apiKey = key;
345
+ }
346
+ /** Override base URL for OpenAI-compatible endpoints */
347
+ setBaseUrl(url) {
348
+ this.baseUrl = url.replace(/\/$/, "");
349
+ }
350
+ async listModels() {
351
+ if (!this.apiKey) return AVAILABLE_MODELS3;
352
+ try {
353
+ const res = await fetch(`${this.baseUrl}/models`, {
354
+ headers: { Authorization: `Bearer ${this.apiKey}` },
355
+ signal: AbortSignal.timeout(5e3)
356
+ });
357
+ if (!res.ok) return AVAILABLE_MODELS3;
358
+ const data = await res.json();
359
+ const models = (data.data || []).map((m) => m.id).filter((id) => id.startsWith("gpt-") || id.startsWith("o"));
360
+ return models.length > 0 ? models : AVAILABLE_MODELS3;
361
+ } catch {
362
+ return AVAILABLE_MODELS3;
363
+ }
364
+ }
365
+ async checkStatus() {
366
+ if (!this.apiKey) return "unconfigured";
367
+ try {
368
+ const res = await fetch(`${this.baseUrl}/models`, {
369
+ headers: { Authorization: `Bearer ${this.apiKey}` },
370
+ signal: AbortSignal.timeout(5e3)
371
+ });
372
+ return res.ok ? "available" : "error";
373
+ } catch {
374
+ return "unavailable";
375
+ }
376
+ }
377
+ async chat(messages, options) {
378
+ if (!this.apiKey) {
379
+ throw new Error("OpenAI API key not configured. Set it in Settings \u2192 Providers.");
380
+ }
381
+ const model = options?.model || this.defaultModel;
382
+ const start = Date.now();
383
+ const body = {
384
+ model,
385
+ messages: messages.map((m) => ({ role: m.role, content: m.content })),
386
+ temperature: options?.temperature ?? 0.7,
387
+ max_tokens: options?.maxTokens ?? 2048
388
+ };
389
+ const res = await fetch(`${this.baseUrl}/chat/completions`, {
390
+ method: "POST",
391
+ headers: {
392
+ Authorization: `Bearer ${this.apiKey}`,
393
+ "Content-Type": "application/json"
394
+ },
395
+ body: JSON.stringify(body),
396
+ signal: options?.signal ?? AbortSignal.timeout(6e4)
397
+ });
398
+ if (!res.ok) {
399
+ const errorBody = await res.text();
400
+ throw new Error(`OpenAI API error ${res.status}: ${errorBody}`);
401
+ }
402
+ const data = await res.json();
403
+ const latencyMs = Date.now() - start;
404
+ const text = data.choices?.[0]?.message?.content || "(sin respuesta)";
405
+ const tokensUsed = (data.usage?.prompt_tokens || 0) + (data.usage?.completion_tokens || 0);
406
+ return { text, model, latencyMs, tokensUsed };
407
+ }
408
+ /** Stream chat completion — yields text chunks */
409
+ async *chatStream(messages, options) {
410
+ if (!this.apiKey) {
411
+ throw new Error("OpenAI API key not configured.");
412
+ }
413
+ const model = options?.model || this.defaultModel;
414
+ const body = {
415
+ model,
416
+ messages: messages.map((m) => ({ role: m.role, content: m.content })),
417
+ temperature: options?.temperature ?? 0.7,
418
+ max_tokens: options?.maxTokens ?? 2048,
419
+ stream: true
420
+ };
421
+ const res = await fetch(`${this.baseUrl}/chat/completions`, {
422
+ method: "POST",
423
+ headers: {
424
+ Authorization: `Bearer ${this.apiKey}`,
425
+ "Content-Type": "application/json"
426
+ },
427
+ body: JSON.stringify(body),
428
+ signal: options?.signal ?? AbortSignal.timeout(12e4)
429
+ });
430
+ if (!res.ok) {
431
+ const errorBody = await res.text();
432
+ throw new Error(`OpenAI stream error ${res.status}: ${errorBody}`);
433
+ }
434
+ const reader = res.body?.getReader();
435
+ if (!reader) return;
436
+ const decoder = new TextDecoder();
437
+ let buffer = "";
438
+ while (true) {
439
+ const { done, value } = await reader.read();
440
+ if (done) break;
441
+ buffer += decoder.decode(value, { stream: true });
442
+ const lines = buffer.split("\n");
443
+ buffer = lines.pop() || "";
444
+ for (const line of lines) {
445
+ if (!line.startsWith("data: ")) continue;
446
+ const data = line.slice(6).trim();
447
+ if (data === "[DONE]") return;
448
+ try {
449
+ const chunk = JSON.parse(data);
450
+ const delta = chunk.choices?.[0]?.delta?.content;
451
+ if (delta) yield delta;
452
+ } catch {
453
+ }
454
+ }
455
+ }
456
+ }
457
+ };
458
+
459
+ // src/ai/services/TokenWallet.ts
460
+ var PRICING = {
461
+ // Ollama / local — free
462
+ "qwen2:latest": { input: 0, output: 0 },
463
+ "llama3:latest": { input: 0, output: 0 },
464
+ "mistral:latest": { input: 0, output: 0 },
465
+ "codellama:latest": { input: 0, output: 0 },
466
+ // Gemini
467
+ "gemini-flash-lite-latest": { input: 0.1, output: 0.4 },
468
+ "gemini-flash-lite-latest-lite": { input: 0.075, output: 0.3 },
469
+ "gemini-1.5-pro": { input: 1.25, output: 5 },
470
+ "gemini-flash-latest": { input: 0.075, output: 0.3 },
471
+ // Anthropic
472
+ "claude-sonnet-4-20250514": { input: 3, output: 15 },
473
+ "claude-3-5-sonnet-20241022": { input: 3, output: 15 },
474
+ "claude-3-5-haiku-20241022": { input: 0.8, output: 4 },
475
+ "claude-3-opus-20240229": { input: 15, output: 75 },
476
+ // OpenAI
477
+ "gpt-4o": { input: 2.5, output: 10 },
478
+ "gpt-4o-mini": { input: 0.15, output: 0.6 },
479
+ "gpt-4-turbo": { input: 10, output: 30 },
480
+ "gpt-4": { input: 30, output: 60 },
481
+ "gpt-3.5-turbo": { input: 0.5, output: 1.5 },
482
+ "o3-mini": { input: 1.1, output: 4.4 }
483
+ };
484
+ var DEFAULT_PRICING = { input: 1, output: 3 };
485
+ var STORAGE_KEY = "decido-token-wallet";
486
+ var MAX_HISTORY = 500;
487
+ var TokenWalletImpl = class {
488
+ history = [];
489
+ sessionStart = Date.now();
490
+ listeners = /* @__PURE__ */ new Set();
491
+ constructor() {
492
+ this._loadFromStorage();
493
+ }
494
+ // ── Record Usage ────────────────────────────────────────
495
+ record(entry) {
496
+ const totalTokens = entry.tokensUsed ?? (entry.inputTokens ?? 0) + (entry.outputTokens ?? 0);
497
+ const inputTokens = entry.inputTokens ?? Math.round(totalTokens * 0.4);
498
+ const outputTokens = entry.outputTokens ?? totalTokens - inputTokens;
499
+ const pricing = this._getPricing(entry.model, entry.provider);
500
+ const estimatedCostUsd = inputTokens / 1e6 * pricing.input + outputTokens / 1e6 * pricing.output;
501
+ const usageEntry = {
502
+ provider: entry.provider,
503
+ model: entry.model,
504
+ inputTokens,
505
+ outputTokens,
506
+ totalTokens,
507
+ estimatedCostUsd,
508
+ timestamp: Date.now()
509
+ };
510
+ this.history.push(usageEntry);
511
+ if (this.history.length > MAX_HISTORY) {
512
+ this.history = this.history.slice(-MAX_HISTORY);
513
+ }
514
+ this._saveToStorage();
515
+ this._emit();
516
+ console.log(
517
+ `\u{1F4B0} [Wallet] ${entry.provider}/${entry.model}: ${totalTokens} tokens \u2248 $${estimatedCostUsd.toFixed(6)}`
518
+ );
519
+ }
520
+ // ── Summary ─────────────────────────────────────────────
521
+ getSummary() {
522
+ const byProvider = {};
523
+ let totalTokens = 0;
524
+ let totalCostUsd = 0;
525
+ for (const entry of this.history) {
526
+ totalTokens += entry.totalTokens;
527
+ totalCostUsd += entry.estimatedCostUsd;
528
+ if (!byProvider[entry.provider]) {
529
+ byProvider[entry.provider] = { tokens: 0, cost: 0, calls: 0 };
530
+ }
531
+ byProvider[entry.provider].tokens += entry.totalTokens;
532
+ byProvider[entry.provider].cost += entry.estimatedCostUsd;
533
+ byProvider[entry.provider].calls += 1;
534
+ }
535
+ return {
536
+ totalTokens,
537
+ totalCostUsd,
538
+ totalCalls: this.history.length,
539
+ byProvider,
540
+ sessionStart: this.sessionStart
541
+ };
542
+ }
543
+ getHistory() {
544
+ return [...this.history];
545
+ }
546
+ getRecentHistory(count) {
547
+ return this.history.slice(-count);
548
+ }
549
+ // ── Lifecycle ───────────────────────────────────────────
550
+ clearHistory() {
551
+ this.history = [];
552
+ this.sessionStart = Date.now();
553
+ this._saveToStorage();
554
+ this._emit();
555
+ }
556
+ // ── Subscriptions ───────────────────────────────────────
557
+ subscribe(listener) {
558
+ this.listeners.add(listener);
559
+ return () => {
560
+ this.listeners.delete(listener);
561
+ };
562
+ }
563
+ // ── Pricing Lookup ──────────────────────────────────────
564
+ _getPricing(model, provider) {
565
+ if (PRICING[model]) return PRICING[model];
566
+ if (provider === "ollama" || provider === "mlx") {
567
+ return { input: 0, output: 0 };
568
+ }
569
+ return DEFAULT_PRICING;
570
+ }
571
+ // ── Persistence ─────────────────────────────────────────
572
+ _saveToStorage() {
573
+ try {
574
+ const data = JSON.stringify({
575
+ history: this.history,
576
+ sessionStart: this.sessionStart
577
+ });
578
+ localStorage.setItem(STORAGE_KEY, data);
579
+ } catch {
580
+ }
581
+ }
582
+ _loadFromStorage() {
583
+ try {
584
+ const raw = localStorage.getItem(STORAGE_KEY);
585
+ if (raw) {
586
+ const data = JSON.parse(raw);
587
+ this.history = data.history || [];
588
+ this.sessionStart = data.sessionStart || Date.now();
589
+ }
590
+ } catch {
591
+ }
592
+ }
593
+ // ── Emit ────────────────────────────────────────────────
594
+ _emit() {
595
+ const summary = this.getSummary();
596
+ for (const listener of this.listeners) {
597
+ try {
598
+ listener(summary);
599
+ } catch {
600
+ }
601
+ }
602
+ }
603
+ };
604
+ var tokenWallet = new TokenWalletImpl();
605
+
606
+ // src/ai/services/InferenceRouter.ts
607
+ var DEFAULT_CONFIG = {
608
+ preferredBackend: "auto",
609
+ maxLatencyMs: 3e4,
610
+ enableFallback: true,
611
+ temperature: 0.7,
612
+ maxTokens: 512
613
+ };
614
+ var _initialized = false;
615
+ function ensureProviders() {
616
+ if (_initialized) return;
617
+ _initialized = true;
618
+ registerProvider(new OllamaProvider());
619
+ registerProvider(new GeminiProvider());
620
+ registerProvider(new AnthropicProvider());
621
+ registerProvider(new OpenAIProvider());
622
+ console.log("\u{1F50C} [InferenceRouter] 4 providers initialized (Ollama, Gemini, Anthropic, OpenAI)");
623
+ }
624
+ var _cachedKeys = null;
625
+ async function getInvoke() {
626
+ try {
627
+ const tauri = await import("@tauri-apps/api/core");
628
+ return tauri.invoke;
629
+ } catch {
630
+ return null;
631
+ }
632
+ }
633
+ async function loadProviderKeys() {
634
+ if (_cachedKeys) return _cachedKeys;
635
+ try {
636
+ const invoke = await getInvoke();
637
+ if (invoke) {
638
+ const raw = await invoke("load_provider_keys");
639
+ if (raw) {
640
+ _cachedKeys = JSON.parse(raw);
641
+ return _cachedKeys;
642
+ }
643
+ }
644
+ } catch (e) {
645
+ console.warn("[InferenceRouter] Failed to load provider keys:", e);
646
+ }
647
+ try {
648
+ const stored = localStorage.getItem("macia-provider-keys");
649
+ if (stored) {
650
+ _cachedKeys = JSON.parse(stored);
651
+ return _cachedKeys;
652
+ }
653
+ } catch {
654
+ }
655
+ _cachedKeys = {};
656
+ return _cachedKeys;
657
+ }
658
+ async function saveProviderKeys(keys) {
659
+ _cachedKeys = keys;
660
+ try {
661
+ const invoke = await getInvoke();
662
+ if (invoke) {
663
+ await invoke("save_provider_keys", {
664
+ json: JSON.stringify(keys, null, 2)
665
+ });
666
+ return;
667
+ }
668
+ } catch (e) {
669
+ console.warn("[InferenceRouter] Failed to save provider keys via Tauri:", e);
670
+ }
671
+ try {
672
+ localStorage.setItem("macia-provider-keys", JSON.stringify(keys));
673
+ } catch {
674
+ }
675
+ }
676
+ async function initProviders() {
677
+ ensureProviders();
678
+ const keys = await loadProviderKeys();
679
+ const providerKeyMap = {
680
+ gemini: keys.gemini,
681
+ anthropic: keys.anthropic,
682
+ openai: keys.openai
683
+ };
684
+ for (const [id, key] of Object.entries(providerKeyMap)) {
685
+ if (key) {
686
+ const provider = getProvider(id);
687
+ provider?.setApiKey?.(key);
688
+ console.log(`\u{1F511} [InferenceRouter] ${id} API key loaded`);
689
+ }
690
+ }
691
+ }
692
+ async function setProviderApiKey(providerId, apiKey) {
693
+ const provider = getProvider(providerId);
694
+ if (provider?.setApiKey) {
695
+ provider.setApiKey(apiKey);
696
+ }
697
+ const keys = await loadProviderKeys();
698
+ keys[providerId] = apiKey;
699
+ await saveProviderKeys(keys);
700
+ }
701
+ async function getProviderStatuses() {
702
+ ensureProviders();
703
+ const statuses = {};
704
+ for (const provider of getAllProviders()) {
705
+ statuses[provider.id] = await provider.checkStatus();
706
+ }
707
+ return statuses;
708
+ }
709
+ function classifyTask(prompt) {
710
+ const lower = prompt.toLowerCase();
711
+ const toolPatterns = [
712
+ "ejecuta",
713
+ "revisa",
714
+ "escanea",
715
+ "limpia",
716
+ "optimiza",
717
+ "monitorea",
718
+ "diagnostica",
719
+ "analiza el sistema",
720
+ "procesos",
721
+ "disco",
722
+ "memoria",
723
+ "red",
724
+ "seguridad",
725
+ "git",
726
+ "commit",
727
+ "deploy",
728
+ "backup"
729
+ ];
730
+ if (toolPatterns.some((p) => lower.includes(p))) return "tool-calling";
731
+ const codePatterns = ["codigo", "funcion", "clase", "error", "debug", "refactor", "typescript", "python", "rust"];
732
+ if (codePatterns.some((p) => lower.includes(p))) return "code";
733
+ const analysisPatterns = ["analiza", "compara", "benchmark", "rendimiento", "metricas", "estadisticas"];
734
+ if (analysisPatterns.some((p) => lower.includes(p))) return "analysis";
735
+ const creativePatterns = ["genera", "crea", "escribe", "inventa", "imagina", "dise\xF1a"];
736
+ if (creativePatterns.some((p) => lower.includes(p))) return "creative";
737
+ return "chat";
738
+ }
739
+ async function routeChat(messages, config = {}) {
740
+ ensureProviders();
741
+ const cfg = { ...DEFAULT_CONFIG, ...config };
742
+ const providerOrder = resolveProviderOrder(cfg.preferredBackend);
743
+ for (const providerId of providerOrder) {
744
+ const provider = getProvider(providerId);
745
+ if (!provider) continue;
746
+ const status = await provider.checkStatus();
747
+ if (status !== "available") {
748
+ if (!cfg.enableFallback) {
749
+ throw new Error(`Provider ${provider.name} is ${status}`);
750
+ }
751
+ continue;
752
+ }
753
+ try {
754
+ const result = await provider.chat(messages, {
755
+ model: void 0,
756
+ // use provider default
757
+ temperature: cfg.temperature,
758
+ maxTokens: cfg.maxTokens
759
+ });
760
+ tokenWallet.record({
761
+ provider: providerId,
762
+ model: result.model,
763
+ tokensUsed: result.tokensUsed
764
+ });
765
+ return {
766
+ ...result,
767
+ backend: providerId
768
+ };
769
+ } catch (err) {
770
+ console.warn(`[InferenceRouter] ${provider.name} failed:`, err);
771
+ if (!cfg.enableFallback) throw err;
772
+ }
773
+ }
774
+ throw new Error("No hay backends de IA disponibles. Inicia Ollama o configura una API key de Gemini.");
775
+ }
776
+ function resolveProviderOrder(preferred) {
777
+ switch (preferred) {
778
+ case "ollama":
779
+ return ["ollama", "gemini", "anthropic", "openai"];
780
+ case "gemini":
781
+ return ["gemini", "ollama", "anthropic", "openai"];
782
+ case "anthropic":
783
+ return ["anthropic", "gemini", "ollama", "openai"];
784
+ case "openai":
785
+ return ["openai", "gemini", "ollama", "anthropic"];
786
+ case "auto":
787
+ default:
788
+ return ["ollama", "gemini", "anthropic", "openai"];
789
+ }
790
+ }
791
+ async function routeInference(prompt, config = {}) {
792
+ const messages = [
793
+ { role: "user", content: prompt }
794
+ ];
795
+ try {
796
+ const result = await routeChat(messages, config);
797
+ return {
798
+ text: result.text,
799
+ backend: result.backend,
800
+ model: result.model,
801
+ latencyMs: result.latencyMs,
802
+ tokensPerSecond: result.tokensUsed ? result.tokensUsed / (result.latencyMs / 1e3) : 0,
803
+ toolsInvoked: []
804
+ };
805
+ } catch {
806
+ return {
807
+ text: "No hay backends disponibles. Inicia Ollama o configura un modelo.",
808
+ backend: "auto",
809
+ model: "none",
810
+ latencyMs: 0,
811
+ tokensPerSecond: 0,
812
+ toolsInvoked: []
813
+ };
814
+ }
815
+ }
816
+ var inferenceRouter = {
817
+ route: routeInference,
818
+ routeChat,
819
+ classifyTask,
820
+ initProviders,
821
+ getProviderStatuses,
822
+ setProviderApiKey,
823
+ loadProviderKeys,
824
+ getAllProviders,
825
+ getProvider,
826
+ tokenWallet
827
+ };
828
+
829
+ // src/kernel.ts
830
+ import { createSafeEvent } from "@decido/sdk";
831
+ async function getTauriCore() {
832
+ return import("@tauri-apps/api/core");
833
+ }
834
+ async function getTauriEvent() {
835
+ return import("@tauri-apps/api/event");
836
+ }
837
+ var TauriTransport = class {
838
+ name = "Anillo 0/1 (Tauri IPC)";
839
+ isConnectedToCloud = false;
840
+ unlistenFns = [];
841
+ connectToSwarm(tenantId, url, token) {
842
+ console.log(`[Kernel/Tauri] Conexi\xF3n nativa delegada a Core-Rust para tenant: ${tenantId}`);
843
+ }
844
+ async execute(tenantId, cmd, args) {
845
+ const { invoke } = await getTauriCore();
846
+ return invoke(cmd, { tenantId, ...args });
847
+ }
848
+ async notify(title, body) {
849
+ try {
850
+ const { sendNotification } = await import("@tauri-apps/plugin-notification");
851
+ await sendNotification({ title, body });
852
+ } catch (e) {
853
+ console.log("[Kernel/Tauri] Notification plugin not available.");
854
+ }
855
+ }
856
+ async vibrate(pattern) {
857
+ try {
858
+ const { vibrate: nativeVibrate } = await import("@tauri-apps/plugin-haptics");
859
+ await nativeVibrate(pattern);
860
+ } catch (e) {
861
+ console.log("[Kernel/Tauri] Haptics not available on this platform.");
862
+ }
863
+ }
864
+ async getEventHistory(tenantId, limit) {
865
+ return this.execute(tenantId, "get_stream_history", { limit });
866
+ }
867
+ listenEvents(onEvent) {
868
+ let unlistenFn = null;
869
+ getTauriEvent().then(({ listen }) => {
870
+ listen("orchestrator-event", (event) => {
871
+ onEvent({ type: "ipc_in", channel: "orchestrator-event", payload: event.payload });
872
+ }).then((fn) => {
873
+ unlistenFn = fn;
874
+ this.unlistenFns.push(fn);
875
+ });
876
+ });
877
+ return () => {
878
+ if (unlistenFn) {
879
+ unlistenFn();
880
+ this.unlistenFns = this.unlistenFns.filter((f) => f !== unlistenFn);
881
+ }
882
+ };
883
+ }
884
+ };
885
+ var WebSocketTransport = class {
886
+ name = "Sat\xE9lite Web (WebSocket)";
887
+ isConnectedToCloud = false;
888
+ sockets = {};
889
+ eventListeners = /* @__PURE__ */ new Set();
890
+ connectToSwarm(tenantId, url, token) {
891
+ if (this.sockets[tenantId]) return;
892
+ const socket = io(url, { auth: token ? { token } : void 0 });
893
+ socket.on("connect", () => {
894
+ this.isConnectedToCloud = true;
895
+ console.log(`\u2705 [Kernel/WS] Sat\xE9lite conectado al Enjambre del Tenant: ${tenantId}`);
896
+ });
897
+ socket.on("orchestrator-event", (eventPayload) => {
898
+ this.eventListeners.forEach((cb) => cb({ type: "ipc_in", channel: "orchestrator-event", payload: { tenantId, ...eventPayload } }));
899
+ });
900
+ socket.on("rpc_broadcast", (eventPayload) => {
901
+ console.log(`[Kernel] \u{1F310} Broadcast Recibido [${tenantId}]:`, eventPayload);
902
+ this.eventListeners.forEach((cb) => cb({ type: "ipc_in", channel: "rpc_broadcast", payload: { tenantId, ...eventPayload } }));
903
+ });
904
+ this.sockets[tenantId] = socket;
905
+ }
906
+ async execute(tenantId, cmd, args) {
907
+ const socket = this.sockets[tenantId];
908
+ if (socket) {
909
+ return new Promise((resolve, reject) => {
910
+ socket.emit("rpc_call", { cmd, args }, (response) => {
911
+ if (response.error) reject(new Error(response.error));
912
+ else resolve(response.data);
913
+ });
914
+ });
915
+ }
916
+ console.warn(`[Kernel Mock] Executing ${cmd} for tenant ${tenantId}`, args);
917
+ return {};
918
+ }
919
+ async notify(title, body) {
920
+ console.log(`[Notification Mock] ${title}: ${body}`);
921
+ }
922
+ async vibrate(pattern) {
923
+ if (typeof navigator !== "undefined" && navigator.vibrate) {
924
+ navigator.vibrate(pattern === "heavy" ? 200 : 50);
925
+ }
926
+ }
927
+ async getEventHistory(tenantId, limit) {
928
+ console.warn("[Kernel Mock] Cannot fetch stream history in Web Sandbox.");
929
+ return [];
930
+ }
931
+ listenEvents(onEvent) {
932
+ this.eventListeners.add(onEvent);
933
+ return () => this.eventListeners.delete(onEvent);
934
+ }
935
+ };
936
+ var DecidoKernel = class {
937
+ logs = [];
938
+ logListeners = /* @__PURE__ */ new Set();
939
+ universalListeners = /* @__PURE__ */ new Set();
940
+ transport;
941
+ rateLimits = /* @__PURE__ */ new Map();
942
+ checkRateLimit(cmd) {
943
+ const now = Date.now();
944
+ const limitWin = 1e3;
945
+ const maxCalls = 50;
946
+ let tracker = this.rateLimits.get(cmd);
947
+ if (!tracker || tracker.resetAt < now) {
948
+ tracker = { calls: 0, resetAt: now + limitWin };
949
+ this.rateLimits.set(cmd, tracker);
950
+ }
951
+ tracker.calls++;
952
+ if (tracker.calls > maxCalls) {
953
+ console.warn(`[Kernel Throttle] \u{1F6A8} Command ${cmd} blocked (Rate limit: >50 calls/sec). Possible plugin infinite loop.`);
954
+ return false;
955
+ }
956
+ return true;
957
+ }
958
+ constructor() {
959
+ const isTauri = typeof window !== "undefined" && "__TAURI_INTERNALS__" in window;
960
+ this.transport = isTauri ? new TauriTransport() : new WebSocketTransport();
961
+ console.log(`[Kernel] Entorno Detectado. Operando en Modo ${this.transport.name}.`);
962
+ this.transport.listenEvents((event) => {
963
+ if (event.type === "ipc_in") {
964
+ this.addLog({ type: "ipc_in", channel: event.channel, payload: event.payload });
965
+ this.notifyUniversalListeners(event.payload.payload || event.payload);
966
+ }
967
+ });
968
+ }
969
+ get isConnectedToCloud() {
970
+ return this.transport.isConnectedToCloud;
971
+ }
972
+ connectToSwarm(tenantId, url, token) {
973
+ this.transport.connectToSwarm(tenantId, url, token);
974
+ }
975
+ // Validador Zod y Notificador Universal
976
+ notifyUniversalListeners(rawPayload) {
977
+ const parsed = createSafeEvent(rawPayload);
978
+ if (!parsed.success) {
979
+ console.error("[Kernel] \u{1F6A8} Inbound Mesh Event Failed Contract Validation:", parsed.error.format());
980
+ this.universalListeners.forEach((cb) => cb({
981
+ type: "system_alert",
982
+ source: "kernel-bridge",
983
+ payload: {
984
+ level: "error",
985
+ title: "Contract Violation",
986
+ message: "A received event did not match the strict schema. Ignored."
987
+ }
988
+ }));
989
+ return;
990
+ }
991
+ this.universalListeners.forEach((cb) => cb(parsed.data));
992
+ }
993
+ injectEvent(payload) {
994
+ this.notifyUniversalListeners(payload);
995
+ }
996
+ addLog(entry) {
997
+ const log = {
998
+ id: typeof crypto !== "undefined" && crypto.randomUUID ? crypto.randomUUID() : Math.random().toString(36).substring(2, 9),
999
+ timestamp: Date.now(),
1000
+ ...entry
1001
+ };
1002
+ this.logs = [log, ...this.logs].slice(0, 500);
1003
+ this.logListeners.forEach((listener) => listener(this.logs));
1004
+ }
1005
+ onLogsChange(callback) {
1006
+ this.logListeners.add(callback);
1007
+ callback(this.logs);
1008
+ return () => this.logListeners.delete(callback);
1009
+ }
1010
+ async execute(tenantIdOrCmd, cmdOrArgs, argsOpts) {
1011
+ let tenantId = "system";
1012
+ let cmd = tenantIdOrCmd;
1013
+ let args = cmdOrArgs;
1014
+ if (typeof cmdOrArgs === "string") {
1015
+ tenantId = tenantIdOrCmd;
1016
+ cmd = cmdOrArgs;
1017
+ args = argsOpts;
1018
+ }
1019
+ if (!this.checkRateLimit(cmd)) {
1020
+ throw new Error(`[Kernel Throttle] Request dropped for ${cmd} (Rate limit exceeded)`);
1021
+ }
1022
+ const start = performance.now();
1023
+ try {
1024
+ const result = await this.transport.execute(tenantId, cmd, args);
1025
+ this.addLog({ type: "ipc_out", channel: cmd, payload: { tenantId, ...args }, duration: performance.now() - start });
1026
+ return result;
1027
+ } catch (error) {
1028
+ this.addLog({ type: "error", channel: cmd, payload: { tenantId, args, error }, duration: performance.now() - start });
1029
+ throw error;
1030
+ }
1031
+ }
1032
+ async notify(title, body) {
1033
+ return this.transport.notify(title, body);
1034
+ }
1035
+ async vibrate(pattern = "medium") {
1036
+ return this.transport.vibrate(pattern);
1037
+ }
1038
+ async captureScreen(tenantId = "system") {
1039
+ return this.execute(tenantId, "capture_screen");
1040
+ }
1041
+ async getMachineId(tenantId = "system") {
1042
+ return this.execute(tenantId, "get_machine_id");
1043
+ }
1044
+ async initiateSingularity(tenantId = "system") {
1045
+ return this.execute(tenantId, "initiate_singularity");
1046
+ }
1047
+ async broadcastMessage(tenantId, message) {
1048
+ return this.execute(tenantId, "broadcast_message", { message });
1049
+ }
1050
+ async fetchMemories(tenantId = "system", query, limit = 20) {
1051
+ return this.execute(tenantId, "fetch_memories", { query, limit });
1052
+ }
1053
+ async storeMemory(tenantId = "system", text, metadata = {}) {
1054
+ return this.execute(tenantId, "store_memory", { text, metadata });
1055
+ }
1056
+ async deleteMemory(tenantId = "system", id) {
1057
+ return this.execute(tenantId, "delete_memory", { id });
1058
+ }
1059
+ async reasonWithMLX(base64Image, prompt, previousError) {
1060
+ let finalPrompt = prompt;
1061
+ if (previousError) {
1062
+ finalPrompt += `
1063
+
1064
+ WARNING: Your previous response failed to parse as JSON with the following error: ${previousError}. Fix the JSON syntax and return ONLY a valid JSON object.`;
1065
+ }
1066
+ try {
1067
+ const response = await fetch("http://127.0.0.1:8080/v1/chat/completions", {
1068
+ method: "POST",
1069
+ headers: { "Content-Type": "application/json" },
1070
+ body: JSON.stringify({
1071
+ model: "llava",
1072
+ messages: [{
1073
+ role: "user",
1074
+ content: [
1075
+ { type: "text", text: finalPrompt },
1076
+ { type: "image_url", image_url: { url: base64Image } }
1077
+ ]
1078
+ }],
1079
+ max_tokens: 150,
1080
+ temperature: 0.1
1081
+ })
1082
+ });
1083
+ const data = await response.json();
1084
+ let content = data.choices[0].message.content.trim();
1085
+ if (content.startsWith("```json")) content = content.replace(/```json\n?/, "").replace(/```$/, "").trim();
1086
+ else if (content.startsWith("```")) content = content.replace(/```\n?/, "").replace(/```$/, "").trim();
1087
+ return JSON.parse(content);
1088
+ } catch (e) {
1089
+ console.error("[MLX Bridge] Reasoning failed:", e);
1090
+ throw e;
1091
+ }
1092
+ }
1093
+ async executeAction(tenantId = "system", action) {
1094
+ return this.execute(tenantId, "execute_action", { action });
1095
+ }
1096
+ async runAutonomousStep(tenantId = "system", objective) {
1097
+ const image = await this.captureScreen(tenantId);
1098
+ let decision = null, retries = 0, lastError = null;
1099
+ const instruction = `You are an AI OS agent. Based on the screen image, what action should be taken to achieve: "${objective}"? Return ONLY a strict JSON object of the action, no markdown. E.g. {"type": "mouseClick", "x": 100, "y": 200, "button": "left", "count": 1}`;
1100
+ while (retries <= 2 && !decision) {
1101
+ try {
1102
+ decision = await this.reasonWithMLX(image, instruction, lastError || void 0);
1103
+ } catch (error) {
1104
+ lastError = error.message || String(error);
1105
+ retries++;
1106
+ }
1107
+ }
1108
+ if (!decision) return;
1109
+ await this.executeAction(tenantId, decision);
1110
+ }
1111
+ async askAI(prompt, taskType = "reasoning", forceProvider) {
1112
+ const result = await inferenceRouter.route(prompt, { preferredBackend: forceProvider || "auto" });
1113
+ return result.text;
1114
+ }
1115
+ onEvent(callback) {
1116
+ this.universalListeners.add(callback);
1117
+ return () => this.universalListeners.delete(callback);
1118
+ }
1119
+ async getEventHistory(tenantId, limit = 50) {
1120
+ return this.transport.getEventHistory(tenantId, limit);
1121
+ }
1122
+ };
1123
+ var kernel = new DecidoKernel();
1124
+
1125
+ // src/ai/services/OllamaService.ts
1126
+ var OLLAMA_BASE_URL2 = "http://localhost:11434";
1127
+ var DEFAULT_MODEL5 = "qwen2:latest";
1128
+ function buildSystemPrompt() {
1129
+ const liveContext = buildLiveContext();
1130
+ const toolSchemas = "";
1131
+ return `- Est\xE1s integrado en DecidoOS, una plataforma empresarial de escritorio
1132
+ - Corres localmente en la m\xE1quina del usuario \u2014 todo es privado
1133
+ - Puedes ejecutar herramientas del sistema para ayudar al usuario
1134
+
1135
+ ${liveContext}
1136
+
1137
+ ## Herramientas disponibles
1138
+
1139
+ ${toolSchemas}
1140
+
1141
+ ## Reglas de respuesta
1142
+ 1. Responde siempre en espa\xF1ol a menos que el usuario hable en otro idioma
1143
+ 2. S\xE9 directo \u2014 no des introducciones largas
1144
+ 3. Si puedes resolver algo con una herramienta, USA LA HERRAMIENTA nativa en vez de solo describirla
1145
+ 4. Mant\xE9n respuestas bajo 300 palabras
1146
+ 5. Si no sabes algo, dilo honestamente
1147
+ 6. NO inventes resultados de herramientas \u2014 espera el resultado real
1148
+ 7. Cuando el usuario pregunte sobre el estado del sistema, USA LOS DATOS del "Estado actual del sistema" que tienes arriba \u2014 esos son datos reales y recientes`;
1149
+ }
1150
+ function buildLiveContext() {
1151
+ const parts = ["## Estado actual del sistema"];
1152
+ const now = (/* @__PURE__ */ new Date()).toLocaleString("es-CO", { hour12: false });
1153
+ parts.push(`Hora actual: ${now}`);
1154
+ try {
1155
+ const watchdogModule = globalThis.__systemWatchdog;
1156
+ if (watchdogModule) {
1157
+ const snapshot = watchdogModule.getLastSnapshot?.();
1158
+ if (snapshot) {
1159
+ const metrics = [];
1160
+ if (snapshot.cpuPercent !== null) metrics.push(`CPU: ${snapshot.cpuPercent.toFixed(1)}%`);
1161
+ if (snapshot.memoryPercent !== null) metrics.push(`Memoria: ${snapshot.memoryPercent.toFixed(1)}%`);
1162
+ if (snapshot.diskFreeGB !== null) metrics.push(`Disco libre: ${snapshot.diskFreeGB.toFixed(1)} GB`);
1163
+ if (snapshot.connectionCount !== null) metrics.push(`Conexiones de red: ${snapshot.connectionCount}`);
1164
+ if (metrics.length > 0) {
1165
+ parts.push(`M\xE9tricas del sistema: ${metrics.join(" | ")}`);
1166
+ }
1167
+ }
1168
+ const alerts = watchdogModule.getAlerts?.()?.filter((a) => !a.dismissed).slice(-5) ?? [];
1169
+ if (alerts.length > 0) {
1170
+ parts.push("Alertas recientes:");
1171
+ for (const alert of alerts) {
1172
+ const emoji = alert.severity === "critical" ? "\u{1F6A8}" : "\u26A0\uFE0F";
1173
+ parts.push(` ${emoji} ${alert.title}`);
1174
+ }
1175
+ }
1176
+ }
1177
+ } catch {
1178
+ }
1179
+ try {
1180
+ const appStore = globalThis.__appStore;
1181
+ const ctx = appStore?.getState?.()?.contextSnapshot;
1182
+ if (ctx) {
1183
+ if (ctx.canvasNodeCount > 0) parts.push(`Canvas: ${ctx.canvasNodeCount} nodos`);
1184
+ if (ctx.gitBranch) parts.push(`Git: rama ${ctx.gitBranch}${ctx.gitModifiedFiles ? ` (${ctx.gitModifiedFiles} archivos modificados)` : ""}`);
1185
+ if (ctx.activeInsights > 0) parts.push(`Insights activos: ${ctx.activeInsights}`);
1186
+ if (ctx.criticalInsightsSummary?.length > 0) {
1187
+ parts.push("Insights cr\xEDticos: " + ctx.criticalInsightsSummary.slice(0, 3).join("; "));
1188
+ }
1189
+ }
1190
+ } catch {
1191
+ }
1192
+ try {
1193
+ const memoryModule = globalThis.__agentMemory;
1194
+ if (memoryModule) {
1195
+ const memContext = memoryModule.buildMemoryContext?.();
1196
+ if (memContext) {
1197
+ parts.push("");
1198
+ parts.push(memContext);
1199
+ }
1200
+ }
1201
+ } catch {
1202
+ }
1203
+ return parts.join("\n");
1204
+ }
1205
+ function parseToolCalls(message) {
1206
+ const calls = [];
1207
+ if (message?.tool_calls && Array.isArray(message.tool_calls)) {
1208
+ for (const tc of message.tool_calls) {
1209
+ if (tc.function?.name) {
1210
+ calls.push({
1211
+ name: tc.function.name,
1212
+ args: tc.function.arguments || {}
1213
+ });
1214
+ }
1215
+ }
1216
+ }
1217
+ return calls;
1218
+ }
1219
+ function stripToolCalls(text) {
1220
+ if (!text) return "";
1221
+ return text.replace(/<tool_call>[\s\S]*?<\/tool_call>/g, "").trim();
1222
+ }
1223
+ var conversationHistory = [];
1224
+ var MAX_HISTORY2 = 20;
1225
+ function addToHistory(msg) {
1226
+ conversationHistory.push(msg);
1227
+ if (conversationHistory.length > MAX_HISTORY2) {
1228
+ conversationHistory = conversationHistory.slice(-MAX_HISTORY2);
1229
+ }
1230
+ }
1231
+ function clearConversationHistory() {
1232
+ conversationHistory = [];
1233
+ }
1234
+ async function isOllamaAvailable() {
1235
+ try {
1236
+ const res = await fetch(`${OLLAMA_BASE_URL2}/api/tags`, {
1237
+ signal: AbortSignal.timeout(2e3)
1238
+ });
1239
+ return res.ok;
1240
+ } catch {
1241
+ return false;
1242
+ }
1243
+ }
1244
+ async function listModels() {
1245
+ try {
1246
+ const res = await fetch(`${OLLAMA_BASE_URL2}/api/tags`);
1247
+ if (!res.ok) return [];
1248
+ const data = await res.json();
1249
+ return (data.models || []).map((m) => m.name);
1250
+ } catch {
1251
+ return [];
1252
+ }
1253
+ }
1254
+ async function chat(userMessage, options) {
1255
+ const userMsg = { role: "user", content: userMessage };
1256
+ addToHistory(userMsg);
1257
+ const messages = [
1258
+ { role: "system", content: buildSystemPrompt() },
1259
+ ...conversationHistory
1260
+ ];
1261
+ try {
1262
+ const result = await routeChat(messages, {
1263
+ temperature: options?.temperature ?? 0.7,
1264
+ maxTokens: 512
1265
+ });
1266
+ const assistantContent = result?.text || "";
1267
+ addToHistory({ role: "assistant", content: assistantContent });
1268
+ if (result) {
1269
+ console.log(`\u{1F9E0}[Chat] Response via ${result.backend} (${result.model}) in ${result.latencyMs} ms`);
1270
+ }
1271
+ return assistantContent;
1272
+ } catch (error) {
1273
+ if (error instanceof DOMException && error.name === "TimeoutError") {
1274
+ return "\u23F3 La respuesta del modelo tard\xF3 demasiado. Intenta con una pregunta m\xE1s corta.";
1275
+ }
1276
+ throw error;
1277
+ }
1278
+ }
1279
+ async function* chatStream(userMessage, options) {
1280
+ const model = options?.model || DEFAULT_MODEL5;
1281
+ const userMsg = { role: "user", content: userMessage };
1282
+ addToHistory(userMsg);
1283
+ const messages = [
1284
+ { role: "system", content: buildSystemPrompt() },
1285
+ ...conversationHistory
1286
+ ];
1287
+ const res = await fetch(`${OLLAMA_BASE_URL2}/api/chat`, {
1288
+ method: "POST",
1289
+ headers: { "Content-Type": "application/json" },
1290
+ body: JSON.stringify({
1291
+ model,
1292
+ messages,
1293
+ stream: true,
1294
+ options: {
1295
+ temperature: options?.temperature ?? 0.7,
1296
+ num_predict: 512
1297
+ }
1298
+ })
1299
+ });
1300
+ if (!res.ok || !res.body) {
1301
+ throw new Error(`Ollama stream error: ${res.status}`);
1302
+ }
1303
+ const reader = res.body.getReader();
1304
+ const decoder = new TextDecoder();
1305
+ let fullContent = "";
1306
+ try {
1307
+ while (true) {
1308
+ const { done, value } = await reader.read();
1309
+ if (done) break;
1310
+ const chunk = decoder.decode(value, { stream: true });
1311
+ const lines = chunk.split("\n").filter(Boolean);
1312
+ for (const line of lines) {
1313
+ try {
1314
+ const data = JSON.parse(line);
1315
+ if (data.message?.content) {
1316
+ fullContent += data.message.content;
1317
+ yield data.message.content;
1318
+ }
1319
+ } catch {
1320
+ }
1321
+ }
1322
+ }
1323
+ } finally {
1324
+ reader.releaseLock();
1325
+ }
1326
+ addToHistory({ role: "assistant", content: fullContent });
1327
+ }
1328
+
1329
+ // src/ai/services/MLXBridge.ts
1330
+ async function runShellCommand(cmd, args) {
1331
+ try {
1332
+ const { invoke } = await import("@tauri-apps/api/core");
1333
+ const result = await invoke("run_shell_command", {
1334
+ command: cmd,
1335
+ args
1336
+ });
1337
+ return result;
1338
+ } catch (err) {
1339
+ console.error("[MLXBridge] Shell command failed:", err);
1340
+ throw err;
1341
+ }
1342
+ }
1343
+ var OLLAMA_URL = "http://localhost:11434";
1344
+ async function ollamaChat(model, prompt, temperature = 0.7) {
1345
+ const start = Date.now();
1346
+ try {
1347
+ const res = await fetch(`${OLLAMA_URL}/api/chat`, {
1348
+ method: "POST",
1349
+ headers: { "Content-Type": "application/json" },
1350
+ body: JSON.stringify({
1351
+ model,
1352
+ messages: [{ role: "user", content: prompt }],
1353
+ stream: false,
1354
+ options: { temperature }
1355
+ })
1356
+ });
1357
+ const data = await res.json();
1358
+ const latencyMs = Date.now() - start;
1359
+ return {
1360
+ text: data.message?.content ?? "",
1361
+ tokensPerSecond: data.eval_count ? data.eval_count / (latencyMs / 1e3) : 0,
1362
+ totalTokens: data.eval_count ?? 0,
1363
+ latencyMs,
1364
+ model
1365
+ };
1366
+ } catch {
1367
+ return { text: "Error: Ollama no disponible", tokensPerSecond: 0, totalTokens: 0, latencyMs: Date.now() - start, model };
1368
+ }
1369
+ }
1370
+ async function ollamaListModels() {
1371
+ try {
1372
+ const res = await fetch(`${OLLAMA_URL}/api/tags`);
1373
+ if (!res.ok) return [];
1374
+ const data = await res.json();
1375
+ return (data.models || []).map((m) => m.name);
1376
+ } catch {
1377
+ return [];
1378
+ }
1379
+ }
1380
+ async function ollamaPull(model) {
1381
+ await fetch(`${OLLAMA_URL}/api/pull`, {
1382
+ method: "POST",
1383
+ headers: { "Content-Type": "application/json" },
1384
+ body: JSON.stringify({ name: model })
1385
+ });
1386
+ }
1387
+ async function mlxGenerate(model, prompt) {
1388
+ const start = Date.now();
1389
+ try {
1390
+ const output = await runShellCommand("python3", [
1391
+ "-m",
1392
+ "mlx_lm.generate",
1393
+ "--model",
1394
+ model,
1395
+ "--prompt",
1396
+ prompt,
1397
+ "--max-tokens",
1398
+ "512"
1399
+ ]);
1400
+ return {
1401
+ text: output,
1402
+ tokensPerSecond: 0,
1403
+ // parsed from output in production
1404
+ totalTokens: output.split(" ").length,
1405
+ latencyMs: Date.now() - start,
1406
+ model
1407
+ };
1408
+ } catch (err) {
1409
+ return { text: `MLX Error: ${err}`, tokensPerSecond: 0, totalTokens: 0, latencyMs: Date.now() - start, model };
1410
+ }
1411
+ }
1412
+ async function listAvailableModels() {
1413
+ const models = [];
1414
+ const ollamaModels = await ollamaListModels();
1415
+ for (const name of ollamaModels) {
1416
+ models.push({
1417
+ name,
1418
+ family: "llm",
1419
+ path: `ollama:${name}`,
1420
+ loaded: true
1421
+ });
1422
+ }
1423
+ const mlxCatalog = [
1424
+ { name: "Qwen2.5-7B-Instruct-4bit", family: "llm", paramCount: "7B", quantization: "4bit", path: "mlx-community/Qwen2.5-7B-Instruct-4bit", loaded: false },
1425
+ { name: "Mistral-7B-Instruct-v0.3-4bit", family: "llm", paramCount: "7B", quantization: "4bit", path: "mlx-community/Mistral-7B-Instruct-v0.3-4bit", loaded: false },
1426
+ { name: "Llama-3.2-3B-Instruct-4bit", family: "llm", paramCount: "3B", quantization: "4bit", path: "mlx-community/Llama-3.2-3B-Instruct-4bit", loaded: false },
1427
+ { name: "Mixtral-8x7B-Instruct-v0.1-4bit", family: "llm", paramCount: "46.7B", quantization: "4bit", path: "mlx-community/Mixtral-8x7B-Instruct-v0.1-4bit", loaded: false },
1428
+ { name: "Whisper-large-v3", family: "audio", path: "mlx-community/whisper-large-v3", loaded: false },
1429
+ { name: "CLIP-ViT-B-32", family: "vision", path: "openai/clip-vit-base-patch32", loaded: false },
1430
+ { name: "FLUX.1-schnell-4bit", family: "image-gen", path: "mlx-community/FLUX.1-schnell-4bit-quantized", loaded: false },
1431
+ { name: "Stable-Diffusion-XL", family: "image-gen", path: "mlx-community/sdxl-turbo", loaded: false }
1432
+ ];
1433
+ models.push(...mlxCatalog);
1434
+ return models;
1435
+ }
1436
+ async function runInference(modelPath, prompt, options) {
1437
+ if (modelPath.startsWith("ollama:")) {
1438
+ const model = modelPath.replace("ollama:", "");
1439
+ return ollamaChat(model, prompt, options?.temperature);
1440
+ }
1441
+ return mlxGenerate(modelPath, prompt);
1442
+ }
1443
+ async function compareModels(modelA, modelB, prompt) {
1444
+ const [a, b] = await Promise.all([
1445
+ runInference(modelA, prompt),
1446
+ runInference(modelB, prompt)
1447
+ ]);
1448
+ return { a, b };
1449
+ }
1450
+ async function pullModel(model) {
1451
+ if (model.startsWith("ollama:")) {
1452
+ await ollamaPull(model.replace("ollama:", ""));
1453
+ } else {
1454
+ await runShellCommand("huggingface-cli", ["download", model]);
1455
+ }
1456
+ }
1457
+ async function benchmarkModel(modelPath) {
1458
+ const testPrompts = [
1459
+ "Explain quantum computing in one paragraph.",
1460
+ "Write a Python function to sort a list.",
1461
+ "What is the capital of Colombia?"
1462
+ ];
1463
+ let totalTokens = 0;
1464
+ let totalMs = 0;
1465
+ for (const prompt of testPrompts) {
1466
+ const result = await runInference(modelPath, prompt);
1467
+ totalTokens += result.totalTokens;
1468
+ totalMs += result.latencyMs;
1469
+ }
1470
+ return {
1471
+ model: modelPath,
1472
+ promptTokens: testPrompts.join(" ").split(" ").length,
1473
+ generatedTokens: totalTokens,
1474
+ tokensPerSecond: totalTokens / (totalMs / 1e3),
1475
+ latencyMs: totalMs / testPrompts.length,
1476
+ memoryMb: 0
1477
+ // would need system metrics
1478
+ };
1479
+ }
1480
+ function startLoRATraining(config) {
1481
+ let stopped = false;
1482
+ let progressCallback = null;
1483
+ const totalSteps = (config.epochs ?? 10) * 100;
1484
+ let step = 0;
1485
+ const start = Date.now();
1486
+ const interval = setInterval(() => {
1487
+ if (stopped || step >= totalSteps) {
1488
+ clearInterval(interval);
1489
+ return;
1490
+ }
1491
+ step++;
1492
+ const progress = {
1493
+ epoch: Math.floor(step / 100) + 1,
1494
+ totalEpochs: config.epochs ?? 10,
1495
+ step,
1496
+ totalSteps,
1497
+ loss: 2.5 * Math.exp(-step / 200) + 0.3 + Math.random() * 0.1,
1498
+ learningRate: config.learningRate ?? 1e-5,
1499
+ tokensPerSecond: 150 + Math.random() * 50,
1500
+ elapsedMs: Date.now() - start
1501
+ };
1502
+ progressCallback?.(progress);
1503
+ }, 500);
1504
+ return {
1505
+ stop: () => {
1506
+ stopped = true;
1507
+ clearInterval(interval);
1508
+ },
1509
+ onProgress: (cb) => {
1510
+ progressCallback = cb;
1511
+ }
1512
+ };
1513
+ }
1514
+
1515
+ // src/ai/services/EmbeddingService.ts
1516
+ var OLLAMA_URL2 = "http://localhost:11434";
1517
+ var EMBED_MODEL = "nomic-embed-text";
1518
+ var TFIDF_DIMS = 256;
1519
+ async function ollamaEmbed(text) {
1520
+ const start = Date.now();
1521
+ try {
1522
+ const res = await fetch(`${OLLAMA_URL2}/api/embed`, {
1523
+ method: "POST",
1524
+ headers: { "Content-Type": "application/json" },
1525
+ body: JSON.stringify({ model: EMBED_MODEL, input: text })
1526
+ });
1527
+ if (!res.ok) throw new Error(`Ollama embed failed: ${res.status}`);
1528
+ const data = await res.json();
1529
+ const vector = data.embeddings?.[0] || data.embedding || [];
1530
+ return {
1531
+ vector,
1532
+ dimensions: vector.length,
1533
+ model: EMBED_MODEL,
1534
+ latencyMs: Date.now() - start
1535
+ };
1536
+ } catch (err) {
1537
+ console.warn("[EmbeddingService] Ollama not available, falling back to TF-IDF", err);
1538
+ return tfidfEmbed(text);
1539
+ }
1540
+ }
1541
+ function tfidfEmbed(text) {
1542
+ const start = Date.now();
1543
+ const tokens = text.toLowerCase().replace(/[^\w\s]/g, "").split(/\s+/).filter((t) => t.length > 2);
1544
+ const vector = new Array(TFIDF_DIMS).fill(0);
1545
+ for (const token of tokens) {
1546
+ let hash = 0;
1547
+ for (let i = 0; i < token.length; i++) {
1548
+ hash = (hash << 5) - hash + token.charCodeAt(i) | 0;
1549
+ }
1550
+ const idx = Math.abs(hash) % TFIDF_DIMS;
1551
+ vector[idx] += 1 / tokens.length;
1552
+ }
1553
+ const norm = Math.sqrt(vector.reduce((s, v) => s + v * v, 0));
1554
+ if (norm > 0) {
1555
+ for (let i = 0; i < vector.length; i++) vector[i] /= norm;
1556
+ }
1557
+ return {
1558
+ vector,
1559
+ dimensions: TFIDF_DIMS,
1560
+ model: "tfidf-fallback",
1561
+ latencyMs: Date.now() - start
1562
+ };
1563
+ }
1564
+ var preferredBackend = "ollama";
1565
+ async function embed(text) {
1566
+ if (preferredBackend === "ollama") {
1567
+ return ollamaEmbed(text);
1568
+ }
1569
+ return tfidfEmbed(text);
1570
+ }
1571
+ async function embedBatch(texts) {
1572
+ return Promise.all(texts.map((t) => embed(t)));
1573
+ }
1574
+ function setBackend(backend) {
1575
+ preferredBackend = backend;
1576
+ }
1577
+ function cosineSimilarity(a, b) {
1578
+ if (a.length !== b.length) return 0;
1579
+ let dot = 0, normA = 0, normB = 0;
1580
+ for (let i = 0; i < a.length; i++) {
1581
+ dot += a[i] * b[i];
1582
+ normA += a[i] * a[i];
1583
+ normB += b[i] * b[i];
1584
+ }
1585
+ const denom = Math.sqrt(normA) * Math.sqrt(normB);
1586
+ return denom === 0 ? 0 : dot / denom;
1587
+ }
1588
+ var embeddingService = { embed, embedBatch, setBackend, cosineSimilarity };
1589
+
1590
+ // src/ai/services/LocalAgentResponder.ts
1591
+ var GREETING_PATTERNS = [
1592
+ /^(hola|hey|hi|hello|buenas|qué tal|que tal|saludos)\s*(decido|decidoos|agente|asistente)?/i,
1593
+ /^(buenos?\s*(días|tardes|noches))/i,
1594
+ /^(órale|oye|ey)\s*(decido)?/i
1595
+ ];
1596
+ var HELP_PATTERNS = [
1597
+ /^(ayuda|help|qué puedes hacer|que puedes hacer|comandos|tools|herramientas)/i,
1598
+ /^(qué|que)\s*(sabes|haces|puedes)/i
1599
+ ];
1600
+ var SYSTEM_PATTERNS = [
1601
+ /^(estado|status|sistema|system)\s*(del\s*sistema)?/i,
1602
+ /^(cómo|como)\s*(estás|estas|está|va)/i
1603
+ ];
1604
+ function getTimeGreeting() {
1605
+ const hour = (/* @__PURE__ */ new Date()).getHours();
1606
+ if (hour < 12) return "Buenos d\xEDas";
1607
+ if (hour < 18) return "Buenas tardes";
1608
+ return "Buenas noches";
1609
+ }
1610
+ function getWelcomeMessage() {
1611
+ const greeting = getTimeGreeting();
1612
+ return `${greeting}, operador. Soy **DecidoOS Agent** \u{1F9E0}
1613
+
1614
+ Estoy en l\xEDnea y operativo. Aqu\xED est\xE1 mi reporte de estado:
1615
+
1616
+ \u25B8 **Modo**: Local-first (sin dependencia de Cortex)
1617
+ \u25B8 **Estado**: \u2705 ONLINE
1618
+
1619
+ Puedo auditar tu seguridad, escanear tu flota de procesos y m\xE1s.
1620
+
1621
+ Escribe **"ayuda"** para ver mis comandos principales.`;
1622
+ }
1623
+ function getHelpMessage() {
1624
+ return `\u{1F6E0}\uFE0F **Capacidades de DecidoOS Agent**
1625
+
1626
+ \u2022 "escanea puertos" \u2014 Auditar puertos abiertos
1627
+ \u2022 "escanea red" \u2014 Monitorear conexiones de red
1628
+ \u2022 "escanea flota" \u2014 Ver procesos Agent / Node / Python
1629
+ \u2022 "lista tareas" \u2014 Ver tareas / playbooks
1630
+ \u2022 "estado" \u2014 Estado del sistema
1631
+
1632
+ **Voice Mode:**
1633
+ Presiona el bot\xF3n verde \u{1F4DE} para activar conversaci\xF3n por voz con Cortex.`;
1634
+ }
1635
+ async function getSystemStatus() {
1636
+ let cpuInfo = "N/A";
1637
+ let memInfo = "N/A";
1638
+ try {
1639
+ const { invoke } = await import("@tauri-apps/api/core");
1640
+ const cpu = await invoke("get_cpu_usage");
1641
+ const mem = await invoke("get_memory_usage");
1642
+ cpuInfo = `${cpu.usage?.toFixed(1)}%`;
1643
+ memInfo = `${(mem.used / 1024 / 1024 / 1024).toFixed(1)} GB / ${(mem.total / 1024 / 1024 / 1024).toFixed(1)} GB`;
1644
+ } catch {
1645
+ cpuInfo = "(requiere Tauri)";
1646
+ memInfo = "(requiere Tauri)";
1647
+ }
1648
+ return `\u{1F4CA} **Estado del Sistema DecidoOS**
1649
+
1650
+ \u25B8 **CPU**: ${cpuInfo}
1651
+ \u25B8 **RAM**: ${memInfo}
1652
+ \u25B8 **Cortex**: Local mode
1653
+ \u25B8 **Uptime Session**: ${getSessionUptime()}
1654
+
1655
+ Todo operativo. \xBFNecesitas algo m\xE1s?`;
1656
+ }
1657
+ function getSessionUptime() {
1658
+ const uptime = performance.now();
1659
+ const minutes = Math.floor(uptime / 6e4);
1660
+ const hours = Math.floor(minutes / 60);
1661
+ if (hours > 0) return `${hours}h ${minutes % 60}m`;
1662
+ return `${minutes}m`;
1663
+ }
1664
+ async function processLocalMessage(input) {
1665
+ const trimmed = input.trim();
1666
+ if (GREETING_PATTERNS.some((p) => p.test(trimmed))) {
1667
+ return { text: getWelcomeMessage() };
1668
+ }
1669
+ if (HELP_PATTERNS.some((p) => p.test(trimmed))) {
1670
+ return { text: getHelpMessage() };
1671
+ }
1672
+ if (SYSTEM_PATTERNS.some((p) => p.test(trimmed))) {
1673
+ const status = await getSystemStatus();
1674
+ return { text: status };
1675
+ }
1676
+ const execMatch = trimmed.match(/^(ejecuta|run|exec|corre)\s+(.+)/i);
1677
+ if (execMatch) {
1678
+ const command = execMatch[2];
1679
+ return {
1680
+ text: `\u26A1 Ejecutando: \`${command}\`...`,
1681
+ toolCalls: [{ name: "tactical.execute_command", args: { command } }]
1682
+ };
1683
+ }
1684
+ const scanMatch = trimmed.match(/^(escanea|scan|audita|audit)\s*(puertos|ports)/i);
1685
+ if (scanMatch) {
1686
+ return {
1687
+ text: "\u{1F50D} Escaneando puertos del sistema...",
1688
+ toolCalls: [{ name: "security.audit_ports", args: { includeLoopback: false } }]
1689
+ };
1690
+ }
1691
+ const networkMatch = trimmed.match(/^(escanea|scan|monitorea|monitor)\s*(red|network|conexiones|connections)/i);
1692
+ if (networkMatch) {
1693
+ return {
1694
+ text: "\u{1F310} Analizando conexiones de red...",
1695
+ toolCalls: [{ name: "security.network_monitor", args: {} }]
1696
+ };
1697
+ }
1698
+ const fleetMatch = trimmed.match(/^(escanea|scan)\s*(flota|fleet|procesos|processes)/i);
1699
+ if (fleetMatch) {
1700
+ return {
1701
+ text: "\u{1F6F8} Escaneando flotilla de agentes...",
1702
+ toolCalls: [{ name: "tactical.scan_fleet", args: {} }]
1703
+ };
1704
+ }
1705
+ const listMatch = trimmed.match(/^(lista|list|muestra|show)\s*(tareas|tasks|playbooks)/i);
1706
+ if (listMatch) {
1707
+ return {
1708
+ text: "\u{1F4CB} Listando tareas...",
1709
+ toolCalls: [{ name: "tactical.list_tasks", args: { status: "all" } }]
1710
+ };
1711
+ }
1712
+ const forensicMatch = trimmed.match(/^(analiza|analyze|forense|forensic)\s*(proceso|process)?\s*(\d+)?/i);
1713
+ if (forensicMatch) {
1714
+ const pid = forensicMatch[3] ? parseInt(forensicMatch[3]) : void 0;
1715
+ return {
1716
+ text: pid ? `\u{1F52C} Analizando proceso PID ${pid}...` : "\u{1F52C} Escaneando procesos sospechosos...",
1717
+ toolCalls: [{ name: "security.forensic_scan", args: { pid, deep: true } }]
1718
+ };
1719
+ }
1720
+ const vulnMatch = trimmed.match(/^(vulnerabilidades|vulnerabilities|vuln|audit\s*npm)/i);
1721
+ if (vulnMatch) {
1722
+ return {
1723
+ text: "\u{1F6E1}\uFE0F Escaneando vulnerabilidades en dependencias...",
1724
+ toolCalls: [{ name: "security.scan_vulnerabilities", args: {} }]
1725
+ };
1726
+ }
1727
+ return {
1728
+ text: `\u{1F916} Entendido: "${trimmed.slice(0, 100)}"
1729
+
1730
+ No tengo una respuesta local para esto. Para respuestas inteligentes con IA, conecta a **Cortex** (mindframe-cortex).
1731
+
1732
+ Mientras tanto, prueba:
1733
+ \u2022 "ayuda" \u2014 ver mis capacidades
1734
+ \u2022 "ejecuta ls" \u2014 ejecutar un comando
1735
+ \u2022 "escanea puertos" \u2014 auditar seguridad
1736
+ \u2022 "estado" \u2014 ver estado del sistema`
1737
+ };
1738
+ }
1739
+
1740
+ // src/ai/services/PeerMesh.ts
1741
+ import { v4 as uuid } from "uuid";
1742
+ var DEFAULT_PORT = 9876;
1743
+ var HEARTBEAT_INTERVAL = 1e4;
1744
+ var PEER_TIMEOUT = 3e4;
1745
+ var MAX_PEERS = 8;
1746
+ var PeerMeshImpl = class {
1747
+ myId = uuid();
1748
+ myName = this._getHostname();
1749
+ peers = /* @__PURE__ */ new Map();
1750
+ connections = /* @__PURE__ */ new Map();
1751
+ isHostingFlag = false;
1752
+ isConnectedFlag = false;
1753
+ heartbeatTimer = null;
1754
+ cleanupTimer = null;
1755
+ // Event listeners
1756
+ peersListeners = /* @__PURE__ */ new Set();
1757
+ hostingListeners = /* @__PURE__ */ new Set();
1758
+ connectionListeners = /* @__PURE__ */ new Set();
1759
+ resultListeners = /* @__PURE__ */ new Set();
1760
+ // ── Host Mode ─────────────────────────────────────────
1761
+ /**
1762
+ * Start hosting a peer group.
1763
+ * Note: In browser WebSocket server is not available. This method
1764
+ * is a placeholder for Tauri (Rust-side server). In browser mode,
1765
+ * we simulate by acting as a "relay" through a shared connection.
1766
+ */
1767
+ startHost(port = DEFAULT_PORT) {
1768
+ if (this.isHostingFlag) return;
1769
+ this.isHostingFlag = true;
1770
+ this._emitHosting(true);
1771
+ this._startHeartbeat();
1772
+ console.log(`\u{1F310} [PeerMesh] Hosting on port ${port} (peer: ${this.myId.slice(0, 8)})`);
1773
+ console.log(`\u{1F310} [PeerMesh] \u26A0\uFE0F Browser-mode: WebSocket server requires Tauri backend.`);
1774
+ console.log(`\u{1F310} [PeerMesh] Use connectToPeer() from another instance to connect.`);
1775
+ }
1776
+ stopHost() {
1777
+ if (!this.isHostingFlag) return;
1778
+ this.isHostingFlag = false;
1779
+ this._stopHeartbeat();
1780
+ this._broadcast({ type: "leave", peerId: this.myId, peerName: this.myName });
1781
+ this._disconnectAll();
1782
+ this._emitHosting(false);
1783
+ console.log("\u{1F310} [PeerMesh] Stopped hosting");
1784
+ }
1785
+ // ── Client Mode ───────────────────────────────────────
1786
+ connectToPeer(address) {
1787
+ if (this.connections.size >= MAX_PEERS) {
1788
+ console.warn(`\u{1F310} [PeerMesh] Max peers (${MAX_PEERS}) reached`);
1789
+ return;
1790
+ }
1791
+ const wsUrl = address.startsWith("ws") ? address : `ws://${address}`;
1792
+ console.log(`\u{1F310} [PeerMesh] Connecting to ${wsUrl}...`);
1793
+ try {
1794
+ const ws = new WebSocket(wsUrl);
1795
+ ws.onopen = () => {
1796
+ console.log(`\u{1F310} [PeerMesh] \u2705 Connected to ${address}`);
1797
+ this.connections.set(address, ws);
1798
+ this.isConnectedFlag = true;
1799
+ this._emitConnection(true);
1800
+ this._send(ws, {
1801
+ type: "join",
1802
+ peerId: this.myId,
1803
+ peerName: this.myName
1804
+ });
1805
+ this._startHeartbeat();
1806
+ };
1807
+ ws.onmessage = (event) => {
1808
+ try {
1809
+ const msg = JSON.parse(event.data);
1810
+ this._handleMessage(msg, address);
1811
+ } catch {
1812
+ }
1813
+ };
1814
+ ws.onclose = () => {
1815
+ console.log(`\u{1F310} [PeerMesh] Disconnected from ${address}`);
1816
+ this.connections.delete(address);
1817
+ for (const [id, peer] of this.peers) {
1818
+ if (peer.address === address) {
1819
+ this.peers.delete(id);
1820
+ }
1821
+ }
1822
+ this._emitPeers();
1823
+ if (this.connections.size === 0) {
1824
+ this.isConnectedFlag = false;
1825
+ this._emitConnection(false);
1826
+ this._stopHeartbeat();
1827
+ }
1828
+ };
1829
+ ws.onerror = (err) => {
1830
+ console.error(`\u{1F310} [PeerMesh] Connection error to ${address}:`, err);
1831
+ };
1832
+ } catch (err) {
1833
+ console.error(`\u{1F310} [PeerMesh] Failed to connect:`, err);
1834
+ }
1835
+ }
1836
+ disconnect() {
1837
+ this._broadcast({ type: "leave", peerId: this.myId, peerName: this.myName });
1838
+ this._disconnectAll();
1839
+ }
1840
+ // ── Share Results ─────────────────────────────────────
1841
+ shareResult(result) {
1842
+ const fullResult = {
1843
+ ...result,
1844
+ peerId: this.myId,
1845
+ timestamp: Date.now()
1846
+ };
1847
+ this._broadcast({
1848
+ type: "share-result",
1849
+ peerId: this.myId,
1850
+ peerName: this.myName,
1851
+ payload: fullResult
1852
+ });
1853
+ console.log(`\u{1F310} [PeerMesh] Shared result to ${this.connections.size} peers`);
1854
+ }
1855
+ // ── Event Subscriptions ───────────────────────────────
1856
+ onPeersChanged(cb) {
1857
+ this.peersListeners.add(cb);
1858
+ return () => {
1859
+ this.peersListeners.delete(cb);
1860
+ };
1861
+ }
1862
+ onHostingChanged(cb) {
1863
+ this.hostingListeners.add(cb);
1864
+ return () => {
1865
+ this.hostingListeners.delete(cb);
1866
+ };
1867
+ }
1868
+ onConnectionChanged(cb) {
1869
+ this.connectionListeners.add(cb);
1870
+ return () => {
1871
+ this.connectionListeners.delete(cb);
1872
+ };
1873
+ }
1874
+ onSharedResult(cb) {
1875
+ this.resultListeners.add(cb);
1876
+ return () => {
1877
+ this.resultListeners.delete(cb);
1878
+ };
1879
+ }
1880
+ // ── Getters ───────────────────────────────────────────
1881
+ getPeers() {
1882
+ return Array.from(this.peers.values());
1883
+ }
1884
+ getMyId() {
1885
+ return this.myId;
1886
+ }
1887
+ getMyName() {
1888
+ return this.myName;
1889
+ }
1890
+ // ── Message Handling ──────────────────────────────────
1891
+ _handleMessage(msg, fromAddress) {
1892
+ switch (msg.type) {
1893
+ case "join":
1894
+ this.peers.set(msg.peerId, {
1895
+ id: msg.peerId,
1896
+ name: msg.peerName,
1897
+ address: fromAddress,
1898
+ joinedAt: Date.now(),
1899
+ lastSeen: Date.now()
1900
+ });
1901
+ this._emitPeers();
1902
+ console.log(`\u{1F310} [PeerMesh] Peer joined: ${msg.peerName} (${msg.peerId.slice(0, 8)})`);
1903
+ break;
1904
+ case "leave":
1905
+ this.peers.delete(msg.peerId);
1906
+ this._emitPeers();
1907
+ console.log(`\u{1F310} [PeerMesh] Peer left: ${msg.peerName}`);
1908
+ break;
1909
+ case "heartbeat":
1910
+ if (this.peers.has(msg.peerId)) {
1911
+ this.peers.get(msg.peerId).lastSeen = Date.now();
1912
+ }
1913
+ break;
1914
+ case "share-result":
1915
+ if (msg.payload) {
1916
+ const result = msg.payload;
1917
+ for (const cb of this.resultListeners) {
1918
+ try {
1919
+ cb(result);
1920
+ } catch {
1921
+ }
1922
+ }
1923
+ }
1924
+ break;
1925
+ case "peers-update":
1926
+ if (Array.isArray(msg.payload)) {
1927
+ for (const p of msg.payload) {
1928
+ if (p.id !== this.myId && !this.peers.has(p.id)) {
1929
+ this.peers.set(p.id, p);
1930
+ }
1931
+ }
1932
+ this._emitPeers();
1933
+ }
1934
+ break;
1935
+ }
1936
+ }
1937
+ // ── Internal Helpers ──────────────────────────────────
1938
+ _send(ws, msg) {
1939
+ if (ws.readyState === WebSocket.OPEN) {
1940
+ ws.send(JSON.stringify(msg));
1941
+ }
1942
+ }
1943
+ _broadcast(msg) {
1944
+ for (const ws of this.connections.values()) {
1945
+ this._send(ws, msg);
1946
+ }
1947
+ }
1948
+ _disconnectAll() {
1949
+ for (const ws of this.connections.values()) {
1950
+ try {
1951
+ ws.close();
1952
+ } catch {
1953
+ }
1954
+ }
1955
+ this.connections.clear();
1956
+ this.peers.clear();
1957
+ this.isConnectedFlag = false;
1958
+ this._stopHeartbeat();
1959
+ this._emitPeers();
1960
+ this._emitConnection(false);
1961
+ }
1962
+ _startHeartbeat() {
1963
+ if (this.heartbeatTimer) return;
1964
+ this.heartbeatTimer = setInterval(() => {
1965
+ this._broadcast({
1966
+ type: "heartbeat",
1967
+ peerId: this.myId,
1968
+ peerName: this.myName
1969
+ });
1970
+ }, HEARTBEAT_INTERVAL);
1971
+ this.cleanupTimer = setInterval(() => {
1972
+ const now = Date.now();
1973
+ let changed = false;
1974
+ for (const [id, peer] of this.peers) {
1975
+ if (now - peer.lastSeen > PEER_TIMEOUT) {
1976
+ this.peers.delete(id);
1977
+ changed = true;
1978
+ console.log(`\u{1F310} [PeerMesh] Peer timed out: ${peer.name}`);
1979
+ }
1980
+ }
1981
+ if (changed) this._emitPeers();
1982
+ }, PEER_TIMEOUT);
1983
+ }
1984
+ _stopHeartbeat() {
1985
+ if (this.heartbeatTimer) {
1986
+ clearInterval(this.heartbeatTimer);
1987
+ this.heartbeatTimer = null;
1988
+ }
1989
+ if (this.cleanupTimer) {
1990
+ clearInterval(this.cleanupTimer);
1991
+ this.cleanupTimer = null;
1992
+ }
1993
+ }
1994
+ // ── Event Emitters ────────────────────────────────────
1995
+ _emitPeers() {
1996
+ const list = this.getPeers();
1997
+ for (const cb of this.peersListeners) {
1998
+ try {
1999
+ cb(list);
2000
+ } catch {
2001
+ }
2002
+ }
2003
+ }
2004
+ _emitHosting(hosting) {
2005
+ for (const cb of this.hostingListeners) {
2006
+ try {
2007
+ cb(hosting);
2008
+ } catch {
2009
+ }
2010
+ }
2011
+ }
2012
+ _emitConnection(connected) {
2013
+ for (const cb of this.connectionListeners) {
2014
+ try {
2015
+ cb(connected);
2016
+ } catch {
2017
+ }
2018
+ }
2019
+ }
2020
+ // ── Hostname ──────────────────────────────────────────
2021
+ _getHostname() {
2022
+ try {
2023
+ return `user-${Math.random().toString(36).slice(2, 6)}`;
2024
+ } catch {
2025
+ return "anonymous";
2026
+ }
2027
+ }
2028
+ };
2029
+ var peerMesh = new PeerMeshImpl();
2030
+
2031
+ // src/ai/hooks/useTokenWallet.ts
2032
+ import { useState, useEffect, useCallback } from "react";
2033
+ function useTokenWallet() {
2034
+ const [summary, setSummary] = useState(tokenWallet.getSummary());
2035
+ useEffect(() => {
2036
+ const unsubscribe = tokenWallet.subscribe(setSummary);
2037
+ return unsubscribe;
2038
+ }, []);
2039
+ const clearHistory = useCallback(() => {
2040
+ tokenWallet.clearHistory();
2041
+ }, []);
2042
+ const getRecentHistory = useCallback((count = 20) => {
2043
+ return tokenWallet.getRecentHistory(count);
2044
+ }, []);
2045
+ return {
2046
+ summary,
2047
+ clearHistory,
2048
+ getRecentHistory,
2049
+ totalTokens: summary.totalTokens,
2050
+ totalCost: summary.totalCostUsd,
2051
+ totalCalls: summary.totalCalls,
2052
+ byProvider: summary.byProvider
2053
+ };
2054
+ }
2055
+
2056
+ // src/ai/hooks/usePeerMesh.ts
2057
+ import { useState as useState2, useEffect as useEffect2, useCallback as useCallback2, useRef } from "react";
2058
+ function usePeerMesh() {
2059
+ const [peers, setPeers] = useState2([]);
2060
+ const [isHosting, setIsHosting] = useState2(false);
2061
+ const [isConnected, setIsConnected] = useState2(false);
2062
+ const [sharedResults, setSharedResults] = useState2([]);
2063
+ const resultsRef = useRef(sharedResults);
2064
+ resultsRef.current = sharedResults;
2065
+ useEffect2(() => {
2066
+ const unsubPeers = peerMesh.onPeersChanged((newPeers) => {
2067
+ setPeers([...newPeers]);
2068
+ });
2069
+ const unsubHosting = peerMesh.onHostingChanged((hosting) => {
2070
+ setIsHosting(hosting);
2071
+ });
2072
+ const unsubConnected = peerMesh.onConnectionChanged((connected) => {
2073
+ setIsConnected(connected);
2074
+ });
2075
+ const unsubResult = peerMesh.onSharedResult((result) => {
2076
+ setSharedResults((prev) => [...prev.slice(-49), result]);
2077
+ });
2078
+ return () => {
2079
+ unsubPeers();
2080
+ unsubHosting();
2081
+ unsubConnected();
2082
+ unsubResult();
2083
+ };
2084
+ }, []);
2085
+ const startHost = useCallback2((port) => {
2086
+ peerMesh.startHost(port);
2087
+ }, []);
2088
+ const stopHost = useCallback2(() => {
2089
+ peerMesh.stopHost();
2090
+ }, []);
2091
+ const connectToPeer = useCallback2((address) => {
2092
+ peerMesh.connectToPeer(address);
2093
+ }, []);
2094
+ const disconnect = useCallback2(() => {
2095
+ peerMesh.disconnect();
2096
+ }, []);
2097
+ const shareResult = useCallback2((result) => {
2098
+ peerMesh.shareResult(result);
2099
+ }, []);
2100
+ return {
2101
+ peers,
2102
+ isHosting,
2103
+ isConnected,
2104
+ sharedResults,
2105
+ peerCount: peers.length,
2106
+ startHost,
2107
+ stopHost,
2108
+ connectToPeer,
2109
+ disconnect,
2110
+ shareResult
2111
+ };
2112
+ }
2113
+
2114
+ // src/ai/components/TokenWalletPanel.tsx
2115
+ import { useState as useState3 } from "react";
2116
+ import { jsx, jsxs } from "react/jsx-runtime";
2117
+ var PROVIDER_COLORS = {
2118
+ ollama: "text-accent-green",
2119
+ gemini: "text-accent-blue",
2120
+ anthropic: "text-accent-purple",
2121
+ openai: "text-accent-cyan",
2122
+ mlx: "text-accent-amber"
2123
+ };
2124
+ var PROVIDER_BG = {
2125
+ ollama: "bg-emerald-500/10",
2126
+ gemini: "bg-blue-500/10",
2127
+ anthropic: "bg-purple-500/10",
2128
+ openai: "bg-cyan-500/10",
2129
+ mlx: "bg-amber-500/10"
2130
+ };
2131
+ var PROVIDER_ICONS = {
2132
+ ollama: "\u{1F999}",
2133
+ gemini: "\u2728",
2134
+ anthropic: "\u{1F9E0}",
2135
+ openai: "\u26A1",
2136
+ mlx: "\u{1F34E}"
2137
+ };
2138
+ function formatCost(usd) {
2139
+ if (usd === 0) return "FREE";
2140
+ if (usd < 0.01) return `$${usd.toFixed(6)}`;
2141
+ if (usd < 1) return `$${usd.toFixed(4)}`;
2142
+ return `$${usd.toFixed(2)}`;
2143
+ }
2144
+ function formatTokens(n) {
2145
+ if (n >= 1e6) return `${(n / 1e6).toFixed(1)}M`;
2146
+ if (n >= 1e3) return `${(n / 1e3).toFixed(1)}K`;
2147
+ return String(n);
2148
+ }
2149
+ function timeAgo(ts) {
2150
+ const diff = Date.now() - ts;
2151
+ if (diff < 6e4) return "just now";
2152
+ if (diff < 36e5) return `${Math.floor(diff / 6e4)}m ago`;
2153
+ if (diff < 864e5) return `${Math.floor(diff / 36e5)}h ago`;
2154
+ return `${Math.floor(diff / 864e5)}d ago`;
2155
+ }
2156
+ function TokenWalletPanel() {
2157
+ const { summary, totalTokens, totalCost, totalCalls, byProvider, clearHistory, getRecentHistory } = useTokenWallet();
2158
+ const [showHistory, setShowHistory] = useState3(false);
2159
+ const providers2 = Object.entries(byProvider).sort((a, b) => b[1].cost - a[1].cost);
2160
+ const history = showHistory ? getRecentHistory(15) : [];
2161
+ return /* @__PURE__ */ jsxs("div", { className: "flex flex-col gap-3 p-4 bg-surface-secondary rounded-2xl border border-border-subtle", children: [
2162
+ /* @__PURE__ */ jsxs("div", { className: "flex items-center justify-between", children: [
2163
+ /* @__PURE__ */ jsxs("div", { className: "flex items-center gap-2", children: [
2164
+ /* @__PURE__ */ jsx("span", { className: "text-lg", children: "\u{1F4B0}" }),
2165
+ /* @__PURE__ */ jsx("h3", { className: "text-sm font-semibold text-text-primary", children: "Token Wallet" })
2166
+ ] }),
2167
+ /* @__PURE__ */ jsx(
2168
+ "button",
2169
+ {
2170
+ onClick: clearHistory,
2171
+ className: "text-[10px] px-2 py-0.5 rounded-md bg-surface-glass text-text-muted\n hover:bg-surface-tertiary hover:text-text-secondary transition-colors",
2172
+ children: "Clear"
2173
+ }
2174
+ )
2175
+ ] }),
2176
+ /* @__PURE__ */ jsxs("div", { className: "grid grid-cols-3 gap-2", children: [
2177
+ /* @__PURE__ */ jsx(StatCard, { label: "Tokens", value: formatTokens(totalTokens), accent: "text-accent-cyan" }),
2178
+ /* @__PURE__ */ jsx(StatCard, { label: "Cost", value: formatCost(totalCost), accent: totalCost === 0 ? "text-accent-green" : "text-accent-amber" }),
2179
+ /* @__PURE__ */ jsx(StatCard, { label: "Calls", value: String(totalCalls), accent: "text-accent-blue" })
2180
+ ] }),
2181
+ providers2.length > 0 && /* @__PURE__ */ jsxs("div", { className: "flex flex-col gap-1.5 mt-1", children: [
2182
+ /* @__PURE__ */ jsx("span", { className: "text-[10px] font-medium text-text-muted uppercase tracking-wider", children: "By Provider" }),
2183
+ providers2.map(([id, data]) => /* @__PURE__ */ jsxs(
2184
+ "div",
2185
+ {
2186
+ className: `flex items-center justify-between px-3 py-2 rounded-xl ${PROVIDER_BG[id] || "bg-surface-glass"} transition-colors`,
2187
+ children: [
2188
+ /* @__PURE__ */ jsxs("div", { className: "flex items-center gap-2", children: [
2189
+ /* @__PURE__ */ jsx("span", { className: "text-sm", children: PROVIDER_ICONS[id] || "\u{1F916}" }),
2190
+ /* @__PURE__ */ jsx("span", { className: `text-xs font-medium ${PROVIDER_COLORS[id] || "text-text-primary"}`, children: id.charAt(0).toUpperCase() + id.slice(1) })
2191
+ ] }),
2192
+ /* @__PURE__ */ jsxs("div", { className: "flex items-center gap-3 text-[11px]", children: [
2193
+ /* @__PURE__ */ jsxs("span", { className: "text-text-muted", children: [
2194
+ data.calls,
2195
+ " calls"
2196
+ ] }),
2197
+ /* @__PURE__ */ jsx("span", { className: "text-text-secondary font-mono", children: formatTokens(data.tokens) }),
2198
+ /* @__PURE__ */ jsx("span", { className: "font-semibold text-text-primary font-mono", children: formatCost(data.cost) })
2199
+ ] })
2200
+ ]
2201
+ },
2202
+ id
2203
+ ))
2204
+ ] }),
2205
+ providers2.length === 0 && /* @__PURE__ */ jsxs("div", { className: "flex flex-col items-center justify-center py-6 text-text-muted gap-1", children: [
2206
+ /* @__PURE__ */ jsx("span", { className: "text-2xl opacity-50", children: "\u{1F4CA}" }),
2207
+ /* @__PURE__ */ jsx("span", { className: "text-xs", children: "No AI calls recorded yet" })
2208
+ ] }),
2209
+ /* @__PURE__ */ jsx(
2210
+ "button",
2211
+ {
2212
+ onClick: () => setShowHistory(!showHistory),
2213
+ className: "text-[10px] text-text-muted hover:text-text-secondary transition-colors self-center",
2214
+ children: showHistory ? "\u25B2 Hide recent" : "\u25BC Show recent calls"
2215
+ }
2216
+ ),
2217
+ showHistory && history.length > 0 && /* @__PURE__ */ jsx("div", { className: "flex flex-col gap-1 max-h-48 overflow-y-auto", children: history.reverse().map((entry, i) => /* @__PURE__ */ jsx(HistoryRow, { entry }, `${entry.timestamp}-${i}`)) })
2218
+ ] });
2219
+ }
2220
+ function StatCard({ label, value, accent }) {
2221
+ return /* @__PURE__ */ jsxs("div", { className: "flex flex-col items-center p-2 rounded-xl bg-surface-tertiary/50", children: [
2222
+ /* @__PURE__ */ jsx("span", { className: "text-[9px] text-text-muted uppercase tracking-wider", children: label }),
2223
+ /* @__PURE__ */ jsx("span", { className: `text-base font-bold font-mono ${accent}`, children: value })
2224
+ ] });
2225
+ }
2226
+ function HistoryRow({ entry }) {
2227
+ return /* @__PURE__ */ jsxs("div", { className: "flex items-center justify-between px-2 py-1.5 rounded-lg bg-surface-glass text-[10px]", children: [
2228
+ /* @__PURE__ */ jsxs("div", { className: "flex items-center gap-1.5", children: [
2229
+ /* @__PURE__ */ jsx("span", { children: PROVIDER_ICONS[entry.provider] || "\u{1F916}" }),
2230
+ /* @__PURE__ */ jsx("span", { className: "text-text-secondary font-mono", children: entry.model.split("/").pop()?.slice(0, 18) })
2231
+ ] }),
2232
+ /* @__PURE__ */ jsxs("div", { className: "flex items-center gap-2", children: [
2233
+ /* @__PURE__ */ jsx("span", { className: "text-text-muted", children: formatTokens(entry.totalTokens) }),
2234
+ /* @__PURE__ */ jsx("span", { className: "text-text-primary font-mono", children: formatCost(entry.estimatedCostUsd) }),
2235
+ /* @__PURE__ */ jsx("span", { className: "text-text-muted", children: timeAgo(entry.timestamp) })
2236
+ ] })
2237
+ ] });
2238
+ }
2239
+
2240
+ // src/ai/components/PeerNetworkPanel.tsx
2241
+ import { useState as useState4, useCallback as useCallback3 } from "react";
2242
+ import { jsx as jsx2, jsxs as jsxs2 } from "react/jsx-runtime";
2243
+ function timeAgo2(ts) {
2244
+ const diff = Date.now() - ts;
2245
+ if (diff < 6e4) return "just now";
2246
+ if (diff < 36e5) return `${Math.floor(diff / 6e4)}m ago`;
2247
+ return `${Math.floor(diff / 36e5)}h ago`;
2248
+ }
2249
+ function PeerNetworkPanel() {
2250
+ const {
2251
+ peers,
2252
+ isHosting,
2253
+ isConnected,
2254
+ sharedResults,
2255
+ peerCount,
2256
+ startHost,
2257
+ stopHost,
2258
+ connectToPeer,
2259
+ disconnect
2260
+ } = usePeerMesh();
2261
+ const [peerAddress, setPeerAddress] = useState4("");
2262
+ const [hostPort, setHostPort] = useState4("9876");
2263
+ const handleConnect = useCallback3(() => {
2264
+ if (!peerAddress.trim()) return;
2265
+ connectToPeer(peerAddress.trim());
2266
+ setPeerAddress("");
2267
+ }, [peerAddress, connectToPeer]);
2268
+ const handleKeyDown = useCallback3((e) => {
2269
+ if (e.key === "Enter") handleConnect();
2270
+ }, [handleConnect]);
2271
+ return /* @__PURE__ */ jsxs2("div", { className: "flex flex-col gap-3 p-4 bg-surface-secondary rounded-2xl border border-border-subtle", children: [
2272
+ /* @__PURE__ */ jsxs2("div", { className: "flex items-center justify-between", children: [
2273
+ /* @__PURE__ */ jsxs2("div", { className: "flex items-center gap-2", children: [
2274
+ /* @__PURE__ */ jsx2("span", { className: "text-lg", children: "\u{1F310}" }),
2275
+ /* @__PURE__ */ jsx2("h3", { className: "text-sm font-semibold text-text-primary", children: "Peer Network" }),
2276
+ peerCount > 0 && /* @__PURE__ */ jsx2("span", { className: "px-1.5 py-0.5 text-[10px] font-bold rounded-full bg-emerald-500/20 text-accent-green", children: peerCount })
2277
+ ] }),
2278
+ /* @__PURE__ */ jsx2(StatusBadge, { isHosting, isConnected })
2279
+ ] }),
2280
+ /* @__PURE__ */ jsxs2("div", { className: "flex flex-col gap-2 p-3 rounded-xl bg-surface-tertiary/50", children: [
2281
+ /* @__PURE__ */ jsxs2("div", { className: "flex items-center justify-between", children: [
2282
+ /* @__PURE__ */ jsxs2("div", { className: "flex flex-col", children: [
2283
+ /* @__PURE__ */ jsx2("span", { className: "text-[11px] font-medium text-text-secondary", children: "Host a Group" }),
2284
+ /* @__PURE__ */ jsx2("span", { className: "text-[9px] text-text-muted", children: "Others can connect to you" })
2285
+ ] }),
2286
+ /* @__PURE__ */ jsxs2("div", { className: "flex items-center gap-2", children: [
2287
+ /* @__PURE__ */ jsx2(
2288
+ "input",
2289
+ {
2290
+ className: "w-16 px-2 py-1 text-[11px] font-mono text-text-primary bg-surface-primary\n border border-border-subtle rounded-lg text-center\n focus:border-border-strong focus:outline-none transition-colors",
2291
+ value: hostPort,
2292
+ onChange: (e) => setHostPort(e.target.value),
2293
+ placeholder: "Port",
2294
+ disabled: isHosting
2295
+ }
2296
+ ),
2297
+ /* @__PURE__ */ jsx2(
2298
+ "button",
2299
+ {
2300
+ onClick: () => isHosting ? stopHost() : startHost(Number(hostPort)),
2301
+ className: `px-3 py-1 text-[11px] font-medium rounded-lg transition-all
2302
+ ${isHosting ? "bg-red-500/15 text-accent-red hover:bg-red-500/25" : "bg-emerald-500/15 text-accent-green hover:bg-emerald-500/25"}`,
2303
+ children: isHosting ? "\u23F9 Stop" : "\u25B6 Start"
2304
+ }
2305
+ )
2306
+ ] })
2307
+ ] }),
2308
+ /* @__PURE__ */ jsx2("div", { className: "h-px bg-border-subtle" }),
2309
+ /* @__PURE__ */ jsxs2("div", { className: "flex flex-col gap-1.5", children: [
2310
+ /* @__PURE__ */ jsx2("span", { className: "text-[11px] font-medium text-text-secondary", children: "Join a Group" }),
2311
+ /* @__PURE__ */ jsxs2("div", { className: "flex gap-2", children: [
2312
+ /* @__PURE__ */ jsx2(
2313
+ "input",
2314
+ {
2315
+ className: "flex-1 px-3 py-1.5 text-[11px] font-mono text-text-primary bg-surface-primary\n border border-border-subtle rounded-lg placeholder:text-text-muted\n focus:border-border-strong focus:outline-none transition-colors",
2316
+ value: peerAddress,
2317
+ onChange: (e) => setPeerAddress(e.target.value),
2318
+ onKeyDown: handleKeyDown,
2319
+ placeholder: "192.168.1.10:9876",
2320
+ disabled: isConnected
2321
+ }
2322
+ ),
2323
+ isConnected ? /* @__PURE__ */ jsx2(
2324
+ "button",
2325
+ {
2326
+ onClick: disconnect,
2327
+ className: "px-3 py-1.5 text-[11px] font-medium rounded-lg bg-red-500/15 text-accent-red\n hover:bg-red-500/25 transition-all",
2328
+ children: "Disconnect"
2329
+ }
2330
+ ) : /* @__PURE__ */ jsx2(
2331
+ "button",
2332
+ {
2333
+ onClick: handleConnect,
2334
+ className: "px-3 py-1.5 text-[11px] font-medium rounded-lg bg-cyan-500/15 text-accent-cyan\n hover:bg-cyan-500/25 transition-all",
2335
+ children: "Connect"
2336
+ }
2337
+ )
2338
+ ] })
2339
+ ] })
2340
+ ] }),
2341
+ peers.length > 0 && /* @__PURE__ */ jsxs2("div", { className: "flex flex-col gap-1.5", children: [
2342
+ /* @__PURE__ */ jsx2("span", { className: "text-[10px] font-medium text-text-muted uppercase tracking-wider", children: "Connected Peers" }),
2343
+ peers.map((peer) => /* @__PURE__ */ jsx2(PeerRow, { peer }, peer.id))
2344
+ ] }),
2345
+ !isHosting && !isConnected && peers.length === 0 && /* @__PURE__ */ jsxs2("div", { className: "flex flex-col items-center justify-center py-5 text-text-muted gap-1.5", children: [
2346
+ /* @__PURE__ */ jsx2("span", { className: "text-2xl opacity-40", children: "\u{1F517}" }),
2347
+ /* @__PURE__ */ jsxs2("span", { className: "text-xs text-center", children: [
2348
+ "Host a group or connect to a peer",
2349
+ /* @__PURE__ */ jsx2("br", {}),
2350
+ "to share AI results"
2351
+ ] })
2352
+ ] }),
2353
+ sharedResults.length > 0 && /* @__PURE__ */ jsxs2("div", { className: "flex flex-col gap-1.5 mt-1", children: [
2354
+ /* @__PURE__ */ jsxs2("span", { className: "text-[10px] font-medium text-text-muted uppercase tracking-wider", children: [
2355
+ "Shared Results (",
2356
+ sharedResults.length,
2357
+ ")"
2358
+ ] }),
2359
+ /* @__PURE__ */ jsx2("div", { className: "flex flex-col gap-1 max-h-40 overflow-y-auto", children: sharedResults.slice(-8).reverse().map((r, i) => /* @__PURE__ */ jsxs2(
2360
+ "div",
2361
+ {
2362
+ className: "px-3 py-2 rounded-xl bg-surface-glass text-[10px]",
2363
+ children: [
2364
+ /* @__PURE__ */ jsxs2("div", { className: "flex items-center justify-between mb-1", children: [
2365
+ /* @__PURE__ */ jsx2("span", { className: "font-medium text-accent-purple", children: r.peerId.slice(0, 8) }),
2366
+ /* @__PURE__ */ jsx2("span", { className: "text-text-muted font-mono", children: r.model })
2367
+ ] }),
2368
+ /* @__PURE__ */ jsxs2("p", { className: "text-text-secondary line-clamp-2", children: [
2369
+ r.response.slice(0, 120),
2370
+ "..."
2371
+ ] })
2372
+ ]
2373
+ },
2374
+ `${r.timestamp}-${i}`
2375
+ )) })
2376
+ ] })
2377
+ ] });
2378
+ }
2379
+ function StatusBadge({ isHosting, isConnected }) {
2380
+ if (isHosting) {
2381
+ return /* @__PURE__ */ jsxs2("span", { className: "flex items-center gap-1.5 px-2 py-0.5 rounded-full bg-emerald-500/15 text-[10px] font-medium text-accent-green", children: [
2382
+ /* @__PURE__ */ jsx2("span", { className: "w-1.5 h-1.5 rounded-full bg-emerald-400 animate-pulse" }),
2383
+ "Hosting"
2384
+ ] });
2385
+ }
2386
+ if (isConnected) {
2387
+ return /* @__PURE__ */ jsxs2("span", { className: "flex items-center gap-1.5 px-2 py-0.5 rounded-full bg-cyan-500/15 text-[10px] font-medium text-accent-cyan", children: [
2388
+ /* @__PURE__ */ jsx2("span", { className: "w-1.5 h-1.5 rounded-full bg-cyan-400 animate-pulse" }),
2389
+ "Connected"
2390
+ ] });
2391
+ }
2392
+ return /* @__PURE__ */ jsxs2("span", { className: "flex items-center gap-1.5 px-2 py-0.5 rounded-full bg-surface-glass text-[10px] text-text-muted", children: [
2393
+ /* @__PURE__ */ jsx2("span", { className: "w-1.5 h-1.5 rounded-full bg-surface-tertiary" }),
2394
+ "Offline"
2395
+ ] });
2396
+ }
2397
+ function PeerRow({ peer }) {
2398
+ return /* @__PURE__ */ jsxs2("div", { className: "flex items-center justify-between px-3 py-2 rounded-xl bg-surface-glass transition-colors hover:bg-surface-tertiary/50", children: [
2399
+ /* @__PURE__ */ jsxs2("div", { className: "flex items-center gap-2", children: [
2400
+ /* @__PURE__ */ jsx2("span", { className: "w-2 h-2 rounded-full bg-emerald-400" }),
2401
+ /* @__PURE__ */ jsx2("span", { className: "text-xs font-medium text-text-primary", children: peer.name })
2402
+ ] }),
2403
+ /* @__PURE__ */ jsxs2("div", { className: "flex items-center gap-2 text-[10px] text-text-muted", children: [
2404
+ /* @__PURE__ */ jsx2("span", { className: "font-mono", children: peer.address }),
2405
+ /* @__PURE__ */ jsx2("span", { children: "\xB7" }),
2406
+ /* @__PURE__ */ jsx2("span", { children: timeAgo2(peer.lastSeen) })
2407
+ ] })
2408
+ ] });
2409
+ }
2410
+
2411
+ // src/crypto.ts
2412
+ async function decryptEvent(base64Payload, base64Key) {
2413
+ try {
2414
+ const keyBuffer = Uint8Array.from(atob(base64Key), (c) => c.charCodeAt(0));
2415
+ const payloadBuffer = Uint8Array.from(atob(base64Payload), (c) => c.charCodeAt(0));
2416
+ if (payloadBuffer.byteLength < 12) {
2417
+ throw new Error("Payload too short to contain a valid nonce");
2418
+ }
2419
+ const nonce = payloadBuffer.slice(0, 12);
2420
+ const ciphertext = payloadBuffer.slice(12);
2421
+ const cryptoKey = await crypto.subtle.importKey(
2422
+ "raw",
2423
+ keyBuffer,
2424
+ { name: "AES-GCM" },
2425
+ false,
2426
+ ["decrypt"]
2427
+ );
2428
+ const decryptedBuffer = await crypto.subtle.decrypt(
2429
+ { name: "AES-GCM", iv: nonce },
2430
+ cryptoKey,
2431
+ ciphertext
2432
+ );
2433
+ const decoder = new TextDecoder();
2434
+ const jsonString = decoder.decode(decryptedBuffer);
2435
+ return JSON.parse(jsonString);
2436
+ } catch (e) {
2437
+ console.error("[Crypto Bridge] Failed to decrypt secure event:", e);
2438
+ throw e;
2439
+ }
2440
+ }
2441
+
2442
+ // src/rehydration.ts
2443
+ var StateRehydrationManager = {
2444
+ async rehydrate(swarmKey) {
2445
+ console.log("[Rehydration] \u{1F504} Iniciando recuperaci\xF3n de estado...");
2446
+ try {
2447
+ const history = await kernel.execute("get_stream_history", { limit: 100 });
2448
+ if (!Array.isArray(history)) {
2449
+ console.warn("[Rehydration] El historial devuelto no es un array v\xE1lido.");
2450
+ return;
2451
+ }
2452
+ const events = history.reverse();
2453
+ for (const rawEvent of events) {
2454
+ let event = rawEvent;
2455
+ if (typeof rawEvent === "string") {
2456
+ try {
2457
+ event = await decryptEvent(rawEvent, swarmKey);
2458
+ } catch (e) {
2459
+ console.warn("[Rehydration] No se pudo descifrar un evento, saltando...");
2460
+ continue;
2461
+ }
2462
+ }
2463
+ this.replayEvent(event);
2464
+ }
2465
+ console.log(`[Rehydration] \u2705 Memoria restaurada: ${events.length} eventos procesados.`);
2466
+ } catch (error) {
2467
+ console.error("[Rehydration] Fallo cr\xEDtico en la rehidrataci\xF3n:", error);
2468
+ }
2469
+ },
2470
+ replayEvent(event) {
2471
+ try {
2472
+ kernel.injectEvent({ ...event, is_rehydration: true });
2473
+ } catch (e) {
2474
+ console.warn("[Rehydration] Error inyectando evento de historial", e);
2475
+ }
2476
+ }
2477
+ };
2478
+ export {
2479
+ AnthropicProvider,
2480
+ GeminiProvider,
2481
+ OllamaProvider,
2482
+ OpenAIProvider,
2483
+ PeerNetworkPanel,
2484
+ StateRehydrationManager,
2485
+ TokenWalletPanel,
2486
+ chat,
2487
+ chatStream,
2488
+ clearConversationHistory,
2489
+ decryptEvent,
2490
+ embeddingService,
2491
+ getAllProviders,
2492
+ getProviderStatuses,
2493
+ inferenceRouter,
2494
+ initProviders,
2495
+ isOllamaAvailable,
2496
+ kernel,
2497
+ loadProviderKeys,
2498
+ benchmarkModel as mlxBenchmarkModel,
2499
+ compareModels as mlxCompareModels,
2500
+ listAvailableModels as mlxListModels,
2501
+ pullModel as mlxPullModel,
2502
+ runInference as mlxRunInference,
2503
+ chat as ollamaChat,
2504
+ chatStream as ollamaChatStream,
2505
+ listModels as ollamaListModels,
2506
+ parseToolCalls,
2507
+ peerMesh,
2508
+ processLocalMessage,
2509
+ routeChat,
2510
+ routeInference,
2511
+ saveProviderKeys,
2512
+ setProviderApiKey,
2513
+ startLoRATraining,
2514
+ stripToolCalls,
2515
+ tokenWallet,
2516
+ usePeerMesh,
2517
+ useTokenWallet
2518
+ };