@decido/kernel-bridge 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,119 @@
1
+ /**
2
+ * EmbeddingService — Local text embeddings for RAG pipeline
3
+ *
4
+ * Generates vector embeddings from text using:
5
+ * 1. Ollama embeddings API (nomic-embed-text)
6
+ * 2. Simple TF-IDF fallback for offline use
7
+ *
8
+ * Used by VectorStore for semantic similarity search.
9
+ */
10
+
11
+ // ─── Types ───────────────────────────────────────────────────
12
+
13
+ export interface EmbeddingResult {
14
+ vector: number[];
15
+ dimensions: number;
16
+ model: string;
17
+ latencyMs: number;
18
+ }
19
+
20
+ export type EmbeddingBackend = 'ollama' | 'tfidf';
21
+
22
+ // ─── Constants ───────────────────────────────────────────────
23
+
24
+ const OLLAMA_URL = 'http://localhost:11434';
25
+ const EMBED_MODEL = 'nomic-embed-text';
26
+ const TFIDF_DIMS = 256;
27
+
28
+ // ─── Ollama Embeddings ───────────────────────────────────────
29
+
30
+ async function ollamaEmbed(text: string): Promise<EmbeddingResult> {
31
+ const start = Date.now();
32
+ try {
33
+ const res = await fetch(`${OLLAMA_URL}/api/embed`, {
34
+ method: 'POST',
35
+ headers: { 'Content-Type': 'application/json' },
36
+ body: JSON.stringify({ model: EMBED_MODEL, input: text }),
37
+ });
38
+ if (!res.ok) throw new Error(`Ollama embed failed: ${res.status}`);
39
+ const data = await res.json();
40
+ const vector = data.embeddings?.[0] || data.embedding || [];
41
+ return {
42
+ vector,
43
+ dimensions: vector.length,
44
+ model: EMBED_MODEL,
45
+ latencyMs: Date.now() - start,
46
+ };
47
+ } catch (err) {
48
+ console.warn('[EmbeddingService] Ollama not available, falling back to TF-IDF', err);
49
+ return tfidfEmbed(text);
50
+ }
51
+ }
52
+
53
+ // ─── TF-IDF Fallback ─────────────────────────────────────────
54
+
55
+ /** Simple hash-based embedding for offline use */
56
+ function tfidfEmbed(text: string): EmbeddingResult {
57
+ const start = Date.now();
58
+ const tokens = text.toLowerCase()
59
+ .replace(/[^\w\s]/g, '')
60
+ .split(/\s+/)
61
+ .filter(t => t.length > 2);
62
+
63
+ const vector = new Array(TFIDF_DIMS).fill(0);
64
+
65
+ for (const token of tokens) {
66
+ let hash = 0;
67
+ for (let i = 0; i < token.length; i++) {
68
+ hash = ((hash << 5) - hash + token.charCodeAt(i)) | 0;
69
+ }
70
+ const idx = Math.abs(hash) % TFIDF_DIMS;
71
+ vector[idx] += 1.0 / tokens.length;
72
+ }
73
+
74
+ // L2 normalize
75
+ const norm = Math.sqrt(vector.reduce((s: number, v: number) => s + v * v, 0));
76
+ if (norm > 0) {
77
+ for (let i = 0; i < vector.length; i++) vector[i] /= norm;
78
+ }
79
+
80
+ return {
81
+ vector,
82
+ dimensions: TFIDF_DIMS,
83
+ model: 'tfidf-fallback',
84
+ latencyMs: Date.now() - start,
85
+ };
86
+ }
87
+
88
+ // ─── Public API ──────────────────────────────────────────────
89
+
90
+ let preferredBackend: EmbeddingBackend = 'ollama';
91
+
92
+ export async function embed(text: string): Promise<EmbeddingResult> {
93
+ if (preferredBackend === 'ollama') {
94
+ return ollamaEmbed(text);
95
+ }
96
+ return tfidfEmbed(text);
97
+ }
98
+
99
+ export async function embedBatch(texts: string[]): Promise<EmbeddingResult[]> {
100
+ return Promise.all(texts.map(t => embed(t)));
101
+ }
102
+
103
+ export function setBackend(backend: EmbeddingBackend): void {
104
+ preferredBackend = backend;
105
+ }
106
+
107
+ export function cosineSimilarity(a: number[], b: number[]): number {
108
+ if (a.length !== b.length) return 0;
109
+ let dot = 0, normA = 0, normB = 0;
110
+ for (let i = 0; i < a.length; i++) {
111
+ dot += a[i] * b[i];
112
+ normA += a[i] * a[i];
113
+ normB += b[i] * b[i];
114
+ }
115
+ const denom = Math.sqrt(normA) * Math.sqrt(normB);
116
+ return denom === 0 ? 0 : dot / denom;
117
+ }
118
+
119
+ export const embeddingService = { embed, embedBatch, setBackend, cosineSimilarity };
@@ -0,0 +1,347 @@
1
+ /**
2
+ * InferenceRouter — Multi-backend AI inference routing
3
+ *
4
+ * Routes prompts to the optimal backend based on:
5
+ * - Provider availability (Ollama local → Gemini API → fallback)
6
+ * - Active provider setting from user config
7
+ * - Automatic fallback chain
8
+ *
9
+ * Providers:
10
+ * 1. Ollama (local, fast, tool-calling via Qwen2)
11
+ * 2. Gemini (Google API, powerful, requires API key)
12
+ * 3. MLX (local, Apple Silicon optimized)
13
+ */
14
+
15
+ import {
16
+ registerProvider,
17
+ getProvider,
18
+ getAllProviders,
19
+ type LLMProvider,
20
+ type ChatMessage,
21
+ type ChatResult,
22
+ type ProviderStatus,
23
+ } from './providers/LLMProvider';
24
+ import { OllamaProvider } from './providers/OllamaProvider';
25
+ import { GeminiProvider } from './providers/GeminiProvider';
26
+ import { AnthropicProvider } from './providers/AnthropicProvider';
27
+ import { OpenAIProvider } from './providers/OpenAIProvider';
28
+ import { tokenWallet } from './TokenWallet';
29
+
30
+ // Re-export for consumers
31
+ export { getAllProviders, type LLMProvider, type ProviderStatus };
32
+
33
+ // ─── Types ───────────────────────────────────────────────────
34
+
35
+ export type InferenceBackend = 'ollama' | 'gemini' | 'anthropic' | 'openai' | 'mlx' | 'auto';
36
+
37
+ export type TaskType = 'tool-calling' | 'chat' | 'code' | 'analysis' | 'creative';
38
+
39
+ export interface RouterConfig {
40
+ preferredBackend: InferenceBackend;
41
+ maxLatencyMs: number;
42
+ enableFallback: boolean;
43
+ temperature: number;
44
+ maxTokens: number;
45
+ }
46
+
47
+ export interface RoutedResponse {
48
+ text: string;
49
+ backend: InferenceBackend;
50
+ model: string;
51
+ latencyMs: number;
52
+ tokensPerSecond: number;
53
+ toolsInvoked: string[];
54
+ }
55
+
56
+ // ─── Default Config ──────────────────────────────────────────
57
+
58
+ const DEFAULT_CONFIG: RouterConfig = {
59
+ preferredBackend: 'auto',
60
+ maxLatencyMs: 30000,
61
+ enableFallback: true,
62
+ temperature: 0.7,
63
+ maxTokens: 512,
64
+ };
65
+
66
+ // ─── Provider Initialization ─────────────────────────────────
67
+
68
+ let _initialized = false;
69
+
70
+ function ensureProviders(): void {
71
+ if (_initialized) return;
72
+ _initialized = true;
73
+
74
+ registerProvider(new OllamaProvider());
75
+ registerProvider(new GeminiProvider());
76
+ registerProvider(new AnthropicProvider());
77
+ registerProvider(new OpenAIProvider());
78
+
79
+ console.log('🔌 [InferenceRouter] 4 providers initialized (Ollama, Gemini, Anthropic, OpenAI)');
80
+ }
81
+
82
+ // ─── API Key Management ─────────────────────────────────────
83
+
84
+ interface ProviderKeys {
85
+ gemini?: string;
86
+ openai?: string;
87
+ anthropic?: string;
88
+ [key: string]: string | undefined;
89
+ }
90
+
91
+ let _cachedKeys: ProviderKeys | null = null;
92
+
93
+ async function getInvoke(): Promise<((cmd: string, args?: Record<string, unknown>) => Promise<unknown>) | null> {
94
+ try {
95
+ const tauri = await import('@tauri-apps/api/core');
96
+ return tauri.invoke;
97
+ } catch {
98
+ return null;
99
+ }
100
+ }
101
+
102
+ export async function loadProviderKeys(): Promise<ProviderKeys> {
103
+ if (_cachedKeys) return _cachedKeys;
104
+
105
+ try {
106
+ const invoke = await getInvoke();
107
+ if (invoke) {
108
+ const raw = await invoke('load_provider_keys') as string | null;
109
+ if (raw) {
110
+ _cachedKeys = JSON.parse(raw);
111
+ return _cachedKeys!;
112
+ }
113
+ }
114
+ } catch (e) {
115
+ console.warn('[InferenceRouter] Failed to load provider keys:', e);
116
+ }
117
+
118
+ // Fallback to localStorage
119
+ try {
120
+ const stored = localStorage.getItem('macia-provider-keys');
121
+ if (stored) {
122
+ _cachedKeys = JSON.parse(stored);
123
+ return _cachedKeys!;
124
+ }
125
+ } catch { /* ignore */ }
126
+
127
+ _cachedKeys = {};
128
+ return _cachedKeys;
129
+ }
130
+
131
+ export async function saveProviderKeys(keys: ProviderKeys): Promise<void> {
132
+ _cachedKeys = keys;
133
+
134
+ try {
135
+ const invoke = await getInvoke();
136
+ if (invoke) {
137
+ await invoke('save_provider_keys', {
138
+ json: JSON.stringify(keys, null, 2),
139
+ });
140
+ return;
141
+ }
142
+ } catch (e) {
143
+ console.warn('[InferenceRouter] Failed to save provider keys via Tauri:', e);
144
+ }
145
+
146
+ // Fallback to localStorage
147
+ try {
148
+ localStorage.setItem('macia-provider-keys', JSON.stringify(keys));
149
+ } catch { /* ignore */ }
150
+ }
151
+
152
+ /**
153
+ * Initialize providers and load saved API keys.
154
+ * Call once during app bootstrap.
155
+ */
156
+ export async function initProviders(): Promise<void> {
157
+ ensureProviders();
158
+
159
+ const keys = await loadProviderKeys();
160
+
161
+ // Apply saved keys to all providers
162
+ const providerKeyMap: Record<string, string | undefined> = {
163
+ gemini: keys.gemini,
164
+ anthropic: keys.anthropic,
165
+ openai: keys.openai,
166
+ };
167
+
168
+ for (const [id, key] of Object.entries(providerKeyMap)) {
169
+ if (key) {
170
+ const provider = getProvider(id);
171
+ provider?.setApiKey?.(key);
172
+ console.log(`🔑 [InferenceRouter] ${id} API key loaded`);
173
+ }
174
+ }
175
+ }
176
+
177
+ /**
178
+ * Set/update an API key for a provider and persist it.
179
+ */
180
+ export async function setProviderApiKey(providerId: string, apiKey: string): Promise<void> {
181
+ const provider = getProvider(providerId);
182
+ if (provider?.setApiKey) {
183
+ provider.setApiKey(apiKey);
184
+ }
185
+
186
+ const keys = await loadProviderKeys();
187
+ keys[providerId] = apiKey;
188
+ await saveProviderKeys(keys);
189
+ }
190
+
191
+ // ─── Provider Status ─────────────────────────────────────────
192
+
193
+ export async function getProviderStatuses(): Promise<Record<string, ProviderStatus>> {
194
+ ensureProviders();
195
+ const statuses: Record<string, ProviderStatus> = {};
196
+ for (const provider of getAllProviders()) {
197
+ statuses[provider.id] = await provider.checkStatus();
198
+ }
199
+ return statuses;
200
+ }
201
+
202
+ // ─── Task Classifier ─────────────────────────────────────────
203
+
204
+ function classifyTask(prompt: string): TaskType {
205
+ const lower = prompt.toLowerCase();
206
+
207
+ const toolPatterns = [
208
+ 'ejecuta', 'revisa', 'escanea', 'limpia', 'optimiza',
209
+ 'monitorea', 'diagnostica', 'analiza el sistema',
210
+ 'procesos', 'disco', 'memoria', 'red', 'seguridad',
211
+ 'git', 'commit', 'deploy', 'backup',
212
+ ];
213
+ if (toolPatterns.some(p => lower.includes(p))) return 'tool-calling';
214
+
215
+ const codePatterns = ['codigo', 'funcion', 'clase', 'error', 'debug', 'refactor', 'typescript', 'python', 'rust'];
216
+ if (codePatterns.some(p => lower.includes(p))) return 'code';
217
+
218
+ const analysisPatterns = ['analiza', 'compara', 'benchmark', 'rendimiento', 'metricas', 'estadisticas'];
219
+ if (analysisPatterns.some(p => lower.includes(p))) return 'analysis';
220
+
221
+ const creativePatterns = ['genera', 'crea', 'escribe', 'inventa', 'imagina', 'diseña'];
222
+ if (creativePatterns.some(p => lower.includes(p))) return 'creative';
223
+
224
+ return 'chat';
225
+ }
226
+
227
+ // ─── Main Router ─────────────────────────────────────────────
228
+
229
+ /**
230
+ * Route a chat request to the best available provider.
231
+ *
232
+ * This is the main entry point used by OllamaService and AgentToolLoop.
233
+ */
234
+ export async function routeChat(
235
+ messages: ChatMessage[],
236
+ config: Partial<RouterConfig> = {},
237
+ ): Promise<ChatResult & { backend: string }> {
238
+ ensureProviders();
239
+ const cfg = { ...DEFAULT_CONFIG, ...config };
240
+
241
+ // Determine which provider to try
242
+ const providerOrder = resolveProviderOrder(cfg.preferredBackend);
243
+
244
+ for (const providerId of providerOrder) {
245
+ const provider = getProvider(providerId);
246
+ if (!provider) continue;
247
+
248
+ const status = await provider.checkStatus();
249
+ if (status !== 'available') {
250
+ if (!cfg.enableFallback) {
251
+ throw new Error(`Provider ${provider.name} is ${status}`);
252
+ }
253
+ continue;
254
+ }
255
+
256
+ try {
257
+ const result = await provider.chat(messages, {
258
+ model: undefined, // use provider default
259
+ temperature: cfg.temperature,
260
+ maxTokens: cfg.maxTokens,
261
+ });
262
+
263
+ // 💰 Record usage in Token Wallet
264
+ tokenWallet.record({
265
+ provider: providerId,
266
+ model: result.model,
267
+ tokensUsed: result.tokensUsed,
268
+ });
269
+
270
+ return {
271
+ ...result,
272
+ backend: providerId,
273
+ };
274
+ } catch (err) {
275
+ console.warn(`[InferenceRouter] ${provider.name} failed:`, err);
276
+ if (!cfg.enableFallback) throw err;
277
+ // Try next provider
278
+ }
279
+ }
280
+
281
+ throw new Error('No hay backends de IA disponibles. Inicia Ollama o configura una API key de Gemini.');
282
+ }
283
+
284
+ function resolveProviderOrder(preferred: InferenceBackend): string[] {
285
+ switch (preferred) {
286
+ case 'ollama': return ['ollama', 'gemini', 'anthropic', 'openai'];
287
+ case 'gemini': return ['gemini', 'ollama', 'anthropic', 'openai'];
288
+ case 'anthropic': return ['anthropic', 'gemini', 'ollama', 'openai'];
289
+ case 'openai': return ['openai', 'gemini', 'ollama', 'anthropic'];
290
+ case 'auto':
291
+ default:
292
+ return ['ollama', 'gemini', 'anthropic', 'openai'];
293
+ }
294
+ }
295
+
296
+ // ─── Legacy API Compatibility ────────────────────────────────
297
+
298
+ /**
299
+ * Legacy `routeInference` function — routes a prompt string.
300
+ * Used by AgentLoop, AgentPlanner, RAGPipeline.
301
+ */
302
+ export async function routeInference(
303
+ prompt: string,
304
+ config: Partial<RouterConfig> = {},
305
+ ): Promise<RoutedResponse> {
306
+ const messages: ChatMessage[] = [
307
+ { role: 'user', content: prompt },
308
+ ];
309
+
310
+ try {
311
+ const result = await routeChat(messages, config);
312
+ return {
313
+ text: result.text,
314
+ backend: result.backend as InferenceBackend,
315
+ model: result.model,
316
+ latencyMs: result.latencyMs,
317
+ tokensPerSecond: result.tokensUsed
318
+ ? result.tokensUsed / (result.latencyMs / 1000)
319
+ : 0,
320
+ toolsInvoked: [],
321
+ };
322
+ } catch {
323
+ return {
324
+ text: 'No hay backends disponibles. Inicia Ollama o configura un modelo.',
325
+ backend: 'auto',
326
+ model: 'none',
327
+ latencyMs: 0,
328
+ tokensPerSecond: 0,
329
+ toolsInvoked: [],
330
+ };
331
+ }
332
+ }
333
+
334
+ // ─── Convenience Export ──────────────────────────────────────
335
+
336
+ export const inferenceRouter = {
337
+ route: routeInference,
338
+ routeChat,
339
+ classifyTask,
340
+ initProviders,
341
+ getProviderStatuses,
342
+ setProviderApiKey,
343
+ loadProviderKeys,
344
+ getAllProviders,
345
+ getProvider,
346
+ tokenWallet,
347
+ };
@@ -0,0 +1,199 @@
1
+ /**
2
+ * LocalAgentResponder — Local-first AI responses for DecidoOS
3
+ *
4
+ * Handles user input locally when Cortex is not connected.
5
+ * Provides greeting responses, tool execution, system info queries,
6
+ * and basic conversational ability without requiring a remote AI.
7
+ */
8
+
9
+
10
+ // ─── Types ──────────────────────────────────────────────────
11
+
12
+ interface LocalResponse {
13
+ text: string;
14
+ toolCalls?: Array<{ name: string; args: Record<string, unknown> }>;
15
+ }
16
+
17
+ // ─── Greeting Patterns ──────────────────────────────────────
18
+
19
+ const GREETING_PATTERNS = [
20
+ /^(hola|hey|hi|hello|buenas|qué tal|que tal|saludos)\s*(decido|decidoos|agente|asistente)?/i,
21
+ /^(buenos?\s*(días|tardes|noches))/i,
22
+ /^(órale|oye|ey)\s*(decido)?/i,
23
+ ];
24
+
25
+ const HELP_PATTERNS = [
26
+ /^(ayuda|help|qué puedes hacer|que puedes hacer|comandos|tools|herramientas)/i,
27
+ /^(qué|que)\s*(sabes|haces|puedes)/i,
28
+ ];
29
+
30
+ const SYSTEM_PATTERNS = [
31
+ /^(estado|status|sistema|system)\s*(del\s*sistema)?/i,
32
+ /^(cómo|como)\s*(estás|estas|está|va)/i,
33
+ ];
34
+
35
+ // ─── Response Templates ─────────────────────────────────────
36
+
37
+ function getTimeGreeting(): string {
38
+ const hour = new Date().getHours();
39
+ if (hour < 12) return 'Buenos días';
40
+ if (hour < 18) return 'Buenas tardes';
41
+ return 'Buenas noches';
42
+ }
43
+
44
+ function getWelcomeMessage(): string {
45
+ const greeting = getTimeGreeting();
46
+
47
+ return `${greeting}, operador. Soy **DecidoOS Agent** 🧠
48
+
49
+ Estoy en línea y operativo. Aquí está mi reporte de estado:
50
+
51
+ ▸ **Modo**: Local-first (sin dependencia de Cortex)
52
+ ▸ **Estado**: ✅ ONLINE
53
+
54
+ Puedo auditar tu seguridad, escanear tu flota de procesos y más.
55
+
56
+ Escribe **"ayuda"** para ver mis comandos principales.`;
57
+ }
58
+
59
+ function getHelpMessage(): string {
60
+ return `🛠️ **Capacidades de DecidoOS Agent**
61
+
62
+ • "escanea puertos" — Auditar puertos abiertos
63
+ • "escanea red" — Monitorear conexiones de red
64
+ • "escanea flota" — Ver procesos Agent / Node / Python
65
+ • "lista tareas" — Ver tareas / playbooks
66
+ • "estado" — Estado del sistema
67
+
68
+ **Voice Mode:**
69
+ Presiona el botón verde 📞 para activar conversación por voz con Cortex.`;
70
+ }
71
+
72
+ async function getSystemStatus(): Promise<string> {
73
+ let cpuInfo = 'N/A';
74
+ let memInfo = 'N/A';
75
+
76
+ try {
77
+ const { invoke } = await import('@tauri-apps/api/core');
78
+ const cpu = await invoke<{ usage: number }>('get_cpu_usage');
79
+ const mem = await invoke<{ used: number; total: number }>('get_memory_usage');
80
+ cpuInfo = `${cpu.usage?.toFixed(1)}%`;
81
+ memInfo = `${(mem.used / 1024 / 1024 / 1024).toFixed(1)} GB / ${(mem.total / 1024 / 1024 / 1024).toFixed(1)} GB`;
82
+ } catch {
83
+ cpuInfo = '(requiere Tauri)';
84
+ memInfo = '(requiere Tauri)';
85
+ }
86
+
87
+ return `📊 **Estado del Sistema DecidoOS**
88
+
89
+ ▸ **CPU**: ${cpuInfo}
90
+ ▸ **RAM**: ${memInfo}
91
+ ▸ **Cortex**: Local mode
92
+ ▸ **Uptime Session**: ${getSessionUptime()}
93
+
94
+ Todo operativo. ¿Necesitas algo más?`;
95
+ }
96
+
97
+ function getSessionUptime(): string {
98
+ const uptime = performance.now();
99
+ const minutes = Math.floor(uptime / 60000);
100
+ const hours = Math.floor(minutes / 60);
101
+ if (hours > 0) return `${hours}h ${minutes % 60}m`;
102
+ return `${minutes}m`;
103
+ }
104
+
105
+ // ─── Main Responder ─────────────────────────────────────────
106
+
107
+ export async function processLocalMessage(input: string): Promise<LocalResponse> {
108
+ const trimmed = input.trim();
109
+
110
+ // 1. Greeting
111
+ if (GREETING_PATTERNS.some(p => p.test(trimmed))) {
112
+ return { text: getWelcomeMessage() };
113
+ }
114
+
115
+ // 2. Help
116
+ if (HELP_PATTERNS.some(p => p.test(trimmed))) {
117
+ return { text: getHelpMessage() };
118
+ }
119
+
120
+ // 3. System status
121
+ if (SYSTEM_PATTERNS.some(p => p.test(trimmed))) {
122
+ const status = await getSystemStatus();
123
+ return { text: status };
124
+ }
125
+
126
+ // 4. Tool execution patterns
127
+ const execMatch = trimmed.match(/^(ejecuta|run|exec|corre)\s+(.+)/i);
128
+ if (execMatch) {
129
+ const command = execMatch[2];
130
+ return {
131
+ text: `⚡ Ejecutando: \`${command}\`...`,
132
+ toolCalls: [{ name: 'tactical.execute_command', args: { command } }],
133
+ };
134
+ }
135
+
136
+ const scanMatch = trimmed.match(/^(escanea|scan|audita|audit)\s*(puertos|ports)/i);
137
+ if (scanMatch) {
138
+ return {
139
+ text: '🔍 Escaneando puertos del sistema...',
140
+ toolCalls: [{ name: 'security.audit_ports', args: { includeLoopback: false } }],
141
+ };
142
+ }
143
+
144
+ const networkMatch = trimmed.match(/^(escanea|scan|monitorea|monitor)\s*(red|network|conexiones|connections)/i);
145
+ if (networkMatch) {
146
+ return {
147
+ text: '🌐 Analizando conexiones de red...',
148
+ toolCalls: [{ name: 'security.network_monitor', args: {} }],
149
+ };
150
+ }
151
+
152
+ const fleetMatch = trimmed.match(/^(escanea|scan)\s*(flota|fleet|procesos|processes)/i);
153
+ if (fleetMatch) {
154
+ return {
155
+ text: '🛸 Escaneando flotilla de agentes...',
156
+ toolCalls: [{ name: 'tactical.scan_fleet', args: {} }],
157
+ };
158
+ }
159
+
160
+ const listMatch = trimmed.match(/^(lista|list|muestra|show)\s*(tareas|tasks|playbooks)/i);
161
+ if (listMatch) {
162
+ return {
163
+ text: '📋 Listando tareas...',
164
+ toolCalls: [{ name: 'tactical.list_tasks', args: { status: 'all' } }],
165
+ };
166
+ }
167
+
168
+ // 5. Forensic scan
169
+ const forensicMatch = trimmed.match(/^(analiza|analyze|forense|forensic)\s*(proceso|process)?\s*(\d+)?/i);
170
+ if (forensicMatch) {
171
+ const pid = forensicMatch[3] ? parseInt(forensicMatch[3]) : undefined;
172
+ return {
173
+ text: pid ? `🔬 Analizando proceso PID ${pid}...` : '🔬 Escaneando procesos sospechosos...',
174
+ toolCalls: [{ name: 'security.forensic_scan', args: { pid, deep: true } }],
175
+ };
176
+ }
177
+
178
+ // 6. Vulnerability scan
179
+ const vulnMatch = trimmed.match(/^(vulnerabilidades|vulnerabilities|vuln|audit\s*npm)/i);
180
+ if (vulnMatch) {
181
+ return {
182
+ text: '🛡️ Escaneando vulnerabilidades en dependencias...',
183
+ toolCalls: [{ name: 'security.scan_vulnerabilities', args: {} }],
184
+ };
185
+ }
186
+
187
+ // 7. Default - unrecognized
188
+ return {
189
+ text: `🤖 Entendido: "${trimmed.slice(0, 100)}"
190
+
191
+ No tengo una respuesta local para esto. Para respuestas inteligentes con IA, conecta a **Cortex** (mindframe-cortex).
192
+
193
+ Mientras tanto, prueba:
194
+ • "ayuda" — ver mis capacidades
195
+ • "ejecuta ls" — ejecutar un comando
196
+ • "escanea puertos" — auditar seguridad
197
+ • "estado" — ver estado del sistema`,
198
+ };
199
+ }