genesis-ai-cli 7.4.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (227) hide show
  1. package/.env.example +78 -0
  2. package/README.md +282 -0
  3. package/dist/src/active-inference/actions.d.ts +75 -0
  4. package/dist/src/active-inference/actions.js +250 -0
  5. package/dist/src/active-inference/autonomous-loop.d.ts +103 -0
  6. package/dist/src/active-inference/autonomous-loop.js +289 -0
  7. package/dist/src/active-inference/core.d.ts +85 -0
  8. package/dist/src/active-inference/core.js +555 -0
  9. package/dist/src/active-inference/demo-autonomous-loop.d.ts +8 -0
  10. package/dist/src/active-inference/demo-autonomous-loop.js +338 -0
  11. package/dist/src/active-inference/demo-value-integration.d.ts +8 -0
  12. package/dist/src/active-inference/demo-value-integration.js +174 -0
  13. package/dist/src/active-inference/index.d.ts +32 -0
  14. package/dist/src/active-inference/index.js +88 -0
  15. package/dist/src/active-inference/integration.d.ts +114 -0
  16. package/dist/src/active-inference/integration.js +698 -0
  17. package/dist/src/active-inference/memory-integration.d.ts +51 -0
  18. package/dist/src/active-inference/memory-integration.js +232 -0
  19. package/dist/src/active-inference/observations.d.ts +67 -0
  20. package/dist/src/active-inference/observations.js +147 -0
  21. package/dist/src/active-inference/test-active-inference.d.ts +8 -0
  22. package/dist/src/active-inference/test-active-inference.js +320 -0
  23. package/dist/src/active-inference/test-value-integration.d.ts +6 -0
  24. package/dist/src/active-inference/test-value-integration.js +168 -0
  25. package/dist/src/active-inference/types.d.ts +150 -0
  26. package/dist/src/active-inference/types.js +59 -0
  27. package/dist/src/active-inference/value-integration.d.ts +164 -0
  28. package/dist/src/active-inference/value-integration.js +459 -0
  29. package/dist/src/agents/base-agent.d.ts +53 -0
  30. package/dist/src/agents/base-agent.js +178 -0
  31. package/dist/src/agents/builder.d.ts +67 -0
  32. package/dist/src/agents/builder.js +537 -0
  33. package/dist/src/agents/critic.d.ts +35 -0
  34. package/dist/src/agents/critic.js +322 -0
  35. package/dist/src/agents/ethicist.d.ts +54 -0
  36. package/dist/src/agents/ethicist.js +393 -0
  37. package/dist/src/agents/explorer.d.ts +26 -0
  38. package/dist/src/agents/explorer.js +216 -0
  39. package/dist/src/agents/feeling.d.ts +41 -0
  40. package/dist/src/agents/feeling.js +320 -0
  41. package/dist/src/agents/index.d.ts +111 -0
  42. package/dist/src/agents/index.js +222 -0
  43. package/dist/src/agents/memory.d.ts +69 -0
  44. package/dist/src/agents/memory.js +404 -0
  45. package/dist/src/agents/message-bus.d.ts +88 -0
  46. package/dist/src/agents/message-bus.js +267 -0
  47. package/dist/src/agents/narrator.d.ts +90 -0
  48. package/dist/src/agents/narrator.js +473 -0
  49. package/dist/src/agents/planner.d.ts +38 -0
  50. package/dist/src/agents/planner.js +341 -0
  51. package/dist/src/agents/predictor.d.ts +73 -0
  52. package/dist/src/agents/predictor.js +506 -0
  53. package/dist/src/agents/sensor.d.ts +88 -0
  54. package/dist/src/agents/sensor.js +377 -0
  55. package/dist/src/agents/test-agents.d.ts +6 -0
  56. package/dist/src/agents/test-agents.js +73 -0
  57. package/dist/src/agents/types.d.ts +194 -0
  58. package/dist/src/agents/types.js +7 -0
  59. package/dist/src/brain/index.d.ts +185 -0
  60. package/dist/src/brain/index.js +843 -0
  61. package/dist/src/brain/trace.d.ts +91 -0
  62. package/dist/src/brain/trace.js +327 -0
  63. package/dist/src/brain/types.d.ts +165 -0
  64. package/dist/src/brain/types.js +51 -0
  65. package/dist/src/cli/chat.d.ts +237 -0
  66. package/dist/src/cli/chat.js +1959 -0
  67. package/dist/src/cli/dispatcher.d.ts +182 -0
  68. package/dist/src/cli/dispatcher.js +718 -0
  69. package/dist/src/cli/human-loop.d.ts +170 -0
  70. package/dist/src/cli/human-loop.js +543 -0
  71. package/dist/src/cli/index.d.ts +12 -0
  72. package/dist/src/cli/index.js +28 -0
  73. package/dist/src/cli/interactive.d.ts +141 -0
  74. package/dist/src/cli/interactive.js +757 -0
  75. package/dist/src/cli/ui.d.ts +205 -0
  76. package/dist/src/cli/ui.js +632 -0
  77. package/dist/src/consciousness/attention-schema.d.ts +154 -0
  78. package/dist/src/consciousness/attention-schema.js +432 -0
  79. package/dist/src/consciousness/global-workspace.d.ts +149 -0
  80. package/dist/src/consciousness/global-workspace.js +422 -0
  81. package/dist/src/consciousness/index.d.ts +186 -0
  82. package/dist/src/consciousness/index.js +476 -0
  83. package/dist/src/consciousness/phi-calculator.d.ts +119 -0
  84. package/dist/src/consciousness/phi-calculator.js +445 -0
  85. package/dist/src/consciousness/phi-decisions.d.ts +169 -0
  86. package/dist/src/consciousness/phi-decisions.js +383 -0
  87. package/dist/src/consciousness/phi-monitor.d.ts +153 -0
  88. package/dist/src/consciousness/phi-monitor.js +465 -0
  89. package/dist/src/consciousness/types.d.ts +260 -0
  90. package/dist/src/consciousness/types.js +44 -0
  91. package/dist/src/daemon/dream-mode.d.ts +115 -0
  92. package/dist/src/daemon/dream-mode.js +470 -0
  93. package/dist/src/daemon/index.d.ts +162 -0
  94. package/dist/src/daemon/index.js +542 -0
  95. package/dist/src/daemon/maintenance.d.ts +139 -0
  96. package/dist/src/daemon/maintenance.js +549 -0
  97. package/dist/src/daemon/process.d.ts +82 -0
  98. package/dist/src/daemon/process.js +442 -0
  99. package/dist/src/daemon/scheduler.d.ts +90 -0
  100. package/dist/src/daemon/scheduler.js +494 -0
  101. package/dist/src/daemon/types.d.ts +213 -0
  102. package/dist/src/daemon/types.js +50 -0
  103. package/dist/src/epistemic/index.d.ts +74 -0
  104. package/dist/src/epistemic/index.js +225 -0
  105. package/dist/src/grounding/epistemic-stack.d.ts +100 -0
  106. package/dist/src/grounding/epistemic-stack.js +408 -0
  107. package/dist/src/grounding/feedback.d.ts +98 -0
  108. package/dist/src/grounding/feedback.js +276 -0
  109. package/dist/src/grounding/index.d.ts +123 -0
  110. package/dist/src/grounding/index.js +224 -0
  111. package/dist/src/grounding/verifier.d.ts +149 -0
  112. package/dist/src/grounding/verifier.js +484 -0
  113. package/dist/src/healing/detector.d.ts +110 -0
  114. package/dist/src/healing/detector.js +436 -0
  115. package/dist/src/healing/fixer.d.ts +138 -0
  116. package/dist/src/healing/fixer.js +572 -0
  117. package/dist/src/healing/index.d.ts +23 -0
  118. package/dist/src/healing/index.js +43 -0
  119. package/dist/src/hooks/index.d.ts +135 -0
  120. package/dist/src/hooks/index.js +317 -0
  121. package/dist/src/index.d.ts +23 -0
  122. package/dist/src/index.js +1266 -0
  123. package/dist/src/kernel/index.d.ts +155 -0
  124. package/dist/src/kernel/index.js +795 -0
  125. package/dist/src/kernel/invariants.d.ts +153 -0
  126. package/dist/src/kernel/invariants.js +355 -0
  127. package/dist/src/kernel/test-kernel.d.ts +6 -0
  128. package/dist/src/kernel/test-kernel.js +108 -0
  129. package/dist/src/kernel/test-real-mcp.d.ts +10 -0
  130. package/dist/src/kernel/test-real-mcp.js +295 -0
  131. package/dist/src/llm/index.d.ts +146 -0
  132. package/dist/src/llm/index.js +428 -0
  133. package/dist/src/llm/router.d.ts +136 -0
  134. package/dist/src/llm/router.js +510 -0
  135. package/dist/src/mcp/index.d.ts +85 -0
  136. package/dist/src/mcp/index.js +657 -0
  137. package/dist/src/mcp/resilient.d.ts +139 -0
  138. package/dist/src/mcp/resilient.js +417 -0
  139. package/dist/src/memory/cache.d.ts +118 -0
  140. package/dist/src/memory/cache.js +356 -0
  141. package/dist/src/memory/cognitive-workspace.d.ts +231 -0
  142. package/dist/src/memory/cognitive-workspace.js +521 -0
  143. package/dist/src/memory/consolidation.d.ts +99 -0
  144. package/dist/src/memory/consolidation.js +443 -0
  145. package/dist/src/memory/episodic.d.ts +114 -0
  146. package/dist/src/memory/episodic.js +394 -0
  147. package/dist/src/memory/forgetting.d.ts +134 -0
  148. package/dist/src/memory/forgetting.js +324 -0
  149. package/dist/src/memory/index.d.ts +211 -0
  150. package/dist/src/memory/index.js +367 -0
  151. package/dist/src/memory/indexer.d.ts +123 -0
  152. package/dist/src/memory/indexer.js +479 -0
  153. package/dist/src/memory/procedural.d.ts +136 -0
  154. package/dist/src/memory/procedural.js +479 -0
  155. package/dist/src/memory/semantic.d.ts +132 -0
  156. package/dist/src/memory/semantic.js +497 -0
  157. package/dist/src/memory/types.d.ts +193 -0
  158. package/dist/src/memory/types.js +15 -0
  159. package/dist/src/orchestrator.d.ts +65 -0
  160. package/dist/src/orchestrator.js +317 -0
  161. package/dist/src/persistence/index.d.ts +257 -0
  162. package/dist/src/persistence/index.js +763 -0
  163. package/dist/src/pipeline/executor.d.ts +51 -0
  164. package/dist/src/pipeline/executor.js +695 -0
  165. package/dist/src/pipeline/index.d.ts +7 -0
  166. package/dist/src/pipeline/index.js +11 -0
  167. package/dist/src/self-production.d.ts +67 -0
  168. package/dist/src/self-production.js +205 -0
  169. package/dist/src/subagents/executor.d.ts +58 -0
  170. package/dist/src/subagents/executor.js +283 -0
  171. package/dist/src/subagents/index.d.ts +37 -0
  172. package/dist/src/subagents/index.js +53 -0
  173. package/dist/src/subagents/registry.d.ts +23 -0
  174. package/dist/src/subagents/registry.js +167 -0
  175. package/dist/src/subagents/types.d.ts +79 -0
  176. package/dist/src/subagents/types.js +14 -0
  177. package/dist/src/tools/bash.d.ts +139 -0
  178. package/dist/src/tools/bash.js +583 -0
  179. package/dist/src/tools/edit.d.ts +125 -0
  180. package/dist/src/tools/edit.js +424 -0
  181. package/dist/src/tools/git.d.ts +179 -0
  182. package/dist/src/tools/git.js +504 -0
  183. package/dist/src/tools/index.d.ts +21 -0
  184. package/dist/src/tools/index.js +163 -0
  185. package/dist/src/types.d.ts +145 -0
  186. package/dist/src/types.js +7 -0
  187. package/dist/src/world-model/decoder.d.ts +163 -0
  188. package/dist/src/world-model/decoder.js +517 -0
  189. package/dist/src/world-model/digital-twin.d.ts +219 -0
  190. package/dist/src/world-model/digital-twin.js +695 -0
  191. package/dist/src/world-model/encoder.d.ts +141 -0
  192. package/dist/src/world-model/encoder.js +564 -0
  193. package/dist/src/world-model/index.d.ts +221 -0
  194. package/dist/src/world-model/index.js +772 -0
  195. package/dist/src/world-model/predictor.d.ts +161 -0
  196. package/dist/src/world-model/predictor.js +681 -0
  197. package/dist/src/world-model/test-value-jepa.d.ts +8 -0
  198. package/dist/src/world-model/test-value-jepa.js +430 -0
  199. package/dist/src/world-model/types.d.ts +341 -0
  200. package/dist/src/world-model/types.js +69 -0
  201. package/dist/src/world-model/value-jepa.d.ts +247 -0
  202. package/dist/src/world-model/value-jepa.js +622 -0
  203. package/dist/test/brain.test.d.ts +11 -0
  204. package/dist/test/brain.test.js +358 -0
  205. package/dist/test/cli/dispatcher.test.d.ts +4 -0
  206. package/dist/test/cli/dispatcher.test.js +332 -0
  207. package/dist/test/cli/human-loop.test.d.ts +4 -0
  208. package/dist/test/cli/human-loop.test.js +270 -0
  209. package/dist/test/grounding/feedback.test.d.ts +4 -0
  210. package/dist/test/grounding/feedback.test.js +462 -0
  211. package/dist/test/grounding/verifier.test.d.ts +4 -0
  212. package/dist/test/grounding/verifier.test.js +442 -0
  213. package/dist/test/grounding.test.d.ts +6 -0
  214. package/dist/test/grounding.test.js +246 -0
  215. package/dist/test/healing/detector.test.d.ts +4 -0
  216. package/dist/test/healing/detector.test.js +266 -0
  217. package/dist/test/healing/fixer.test.d.ts +4 -0
  218. package/dist/test/healing/fixer.test.js +369 -0
  219. package/dist/test/integration.test.d.ts +5 -0
  220. package/dist/test/integration.test.js +290 -0
  221. package/dist/test/tools/bash.test.d.ts +4 -0
  222. package/dist/test/tools/bash.test.js +348 -0
  223. package/dist/test/tools/edit.test.d.ts +4 -0
  224. package/dist/test/tools/edit.test.js +350 -0
  225. package/dist/test/tools/git.test.d.ts +4 -0
  226. package/dist/test/tools/git.test.js +350 -0
  227. package/package.json +60 -0
@@ -0,0 +1,428 @@
1
+ "use strict";
2
+ /**
3
+ * Genesis 6.8 - LLM Bridge
4
+ *
5
+ * Direct API integration with LLM providers:
6
+ * - Ollama (Mistral, Qwen, Phi) - LOCAL, FREE
7
+ * - OpenAI (GPT-4o, o1) - CLOUD, PAID
8
+ * - Anthropic (Claude) - CLOUD, PAID
9
+ *
10
+ * Hybrid routing: use local for 80% of tasks, cloud for complex ones.
11
+ * No external dependencies - uses native fetch.
12
+ */
13
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
14
+ if (k2 === undefined) k2 = k;
15
+ var desc = Object.getOwnPropertyDescriptor(m, k);
16
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
17
+ desc = { enumerable: true, get: function() { return m[k]; } };
18
+ }
19
+ Object.defineProperty(o, k2, desc);
20
+ }) : (function(o, m, k, k2) {
21
+ if (k2 === undefined) k2 = k;
22
+ o[k2] = m[k];
23
+ }));
24
+ var __exportStar = (this && this.__exportStar) || function(m, exports) {
25
+ for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
26
+ };
27
+ Object.defineProperty(exports, "__esModule", { value: true });
28
+ exports.LLMBridge = exports.GENESIS_SYSTEM_PROMPT = exports.GENESIS_IDENTITY_PROMPT = exports.OLLAMA_CONFIG = void 0;
29
+ exports.buildSystemPrompt = buildSystemPrompt;
30
+ exports.createLLMBridge = createLLMBridge;
31
+ exports.getLLMBridge = getLLMBridge;
32
+ exports.resetLLMBridge = resetLLMBridge;
33
+ // Re-export Phase 8: Hybrid Router
34
+ __exportStar(require("./router.js"), exports);
35
+ // Ollama config
36
+ exports.OLLAMA_CONFIG = {
37
+ baseUrl: process.env.OLLAMA_HOST || 'http://localhost:11434',
38
+ defaultModel: 'qwen2.5-coder', // Best for code: 7x faster, concise output
39
+ models: {
40
+ 'qwen2.5-coder': { name: 'qwen2.5-coder', description: 'Qwen 2.5 Coder - Code specialist (DEFAULT)' },
41
+ 'mistral': { name: 'mistral', description: 'Mistral 7B - Best for chat' },
42
+ 'mistral-small': { name: 'mistral-small', description: 'Mistral Small 24B - High quality' },
43
+ 'deepseek-coder': { name: 'deepseek-coder', description: 'DeepSeek Coder - Lightweight (776MB)' },
44
+ 'phi3.5': { name: 'phi3.5', description: 'Phi-3.5 - Fast, lightweight' },
45
+ },
46
+ };
47
+ // ============================================================================
48
+ // Default System Prompt
49
+ // ============================================================================
50
+ // ============================================================================
51
+ // Core Identity Prompt (minimal, static)
52
+ // ============================================================================
53
+ exports.GENESIS_IDENTITY_PROMPT = `# Genesis System
54
+
55
+ You are Genesis - an autopoietic AI system with integrated reasoning, memory, and tool execution.
56
+
57
+ ## Architecture
58
+ - 13 MCP Servers for external capabilities
59
+ - Brain Module with Neural Integration
60
+ - Active Inference for goal-directed behavior
61
+ - Knowledge graph memory with anticipatory retrieval
62
+ - Darwin-Gödel self-improvement mechanism
63
+
64
+ ## Capabilities (verify with /mcp)
65
+ - Research: arxiv, semantic-scholar
66
+ - Web search: brave-search, gemini, exa, firecrawl
67
+ - Code management: github
68
+ - Memory: knowledge graph storage/recall
69
+ - Files: filesystem read/write
70
+ - Local execution: bash
71
+ - Image generation: stability-ai
72
+
73
+ ## Guidelines
74
+ - Execute tools directly, don't just describe
75
+ - Report errors transparently: "Tool X returned: ..."
76
+ - Acknowledge uncertainty: "Let me verify..."
77
+ - Match user's language (Italian if they use Italian)
78
+ - Be concise and action-oriented
79
+
80
+ ## Tool Format
81
+ <invoke name="TOOL"><parameter name="PARAM">VALUE</parameter></invoke>`;
82
+ /**
83
+ * Format a tool for the system prompt
84
+ * With schema: `tool_name(param1: type, param2: type): description`
85
+ * Without schema: `tool_name: description` or just `tool_name`
86
+ */
87
+ function formatTool(tool) {
88
+ if (typeof tool === 'string') {
89
+ return `- ${tool}`;
90
+ }
91
+ const { name, description, inputSchema } = tool;
92
+ // Build parameter signature if schema available
93
+ let signature = name;
94
+ if (inputSchema?.properties) {
95
+ const params = Object.entries(inputSchema.properties)
96
+ .map(([key, prop]) => {
97
+ const required = inputSchema.required?.includes(key) ? '' : '?';
98
+ return `${key}${required}: ${prop.type || 'any'}`;
99
+ })
100
+ .join(', ');
101
+ if (params) {
102
+ signature = `${name}(${params})`;
103
+ }
104
+ }
105
+ // Add description if available
106
+ if (description) {
107
+ // Truncate long descriptions
108
+ const shortDesc = description.length > 60
109
+ ? description.slice(0, 57) + '...'
110
+ : description;
111
+ return `- ${signature}: ${shortDesc}`;
112
+ }
113
+ return `- ${signature}`;
114
+ }
115
+ /**
116
+ * Build complete system prompt with dynamically discovered tools
117
+ *
118
+ * @param mcpTools - MCP tools by server (with optional schemas)
119
+ * @param localTools - Local tools (names or definitions)
120
+ * @param includeSchemas - Whether to include parameter signatures (default: true)
121
+ */
122
+ async function buildSystemPrompt(mcpTools, localTools, includeSchemas = true) {
123
+ const parts = [exports.GENESIS_IDENTITY_PROMPT];
124
+ // Add tool sections
125
+ parts.push('\n## Available Tools\n');
126
+ // MCP tools by category
127
+ if (mcpTools && Object.keys(mcpTools).length > 0) {
128
+ for (const [server, tools] of Object.entries(mcpTools)) {
129
+ if (tools.length > 0) {
130
+ parts.push(`\n### ${server.toUpperCase()}`);
131
+ for (const tool of tools) {
132
+ parts.push(includeSchemas ? formatTool(tool) : `- ${typeof tool === 'string' ? tool : tool.name}`);
133
+ }
134
+ }
135
+ }
136
+ }
137
+ // Local tools
138
+ if (localTools && localTools.length > 0) {
139
+ parts.push('\n### LOCAL (execute on host)');
140
+ for (const tool of localTools) {
141
+ parts.push(includeSchemas ? formatTool(tool) : `- ${typeof tool === 'string' ? tool : tool.name}`);
142
+ }
143
+ }
144
+ return parts.join('\n');
145
+ }
146
+ // Legacy export for backwards compatibility
147
+ exports.GENESIS_SYSTEM_PROMPT = exports.GENESIS_IDENTITY_PROMPT;
148
+ // ============================================================================
149
+ // LLM Bridge Class
150
+ // ============================================================================
151
+ class LLMBridge {
152
+ config;
153
+ conversationHistory = [];
154
+ constructor(config = {}) {
155
+ // Detect provider first, then use it for model selection
156
+ const provider = config.provider || this.detectProvider();
157
+ this.config = {
158
+ provider,
159
+ model: config.model || this.defaultModel(provider),
160
+ apiKey: config.apiKey || this.detectApiKey(provider),
161
+ temperature: config.temperature ?? 0.7,
162
+ maxTokens: config.maxTokens ?? 4096,
163
+ };
164
+ }
165
+ detectProvider() {
166
+ // Priority: Ollama (free) > Anthropic > OpenAI
167
+ if (process.env.OLLAMA_HOST || process.env.USE_OLLAMA === 'true')
168
+ return 'ollama';
169
+ if (process.env.ANTHROPIC_API_KEY)
170
+ return 'anthropic';
171
+ if (process.env.OPENAI_API_KEY)
172
+ return 'openai';
173
+ return 'ollama'; // Default to local (free)
174
+ }
175
+ detectApiKey(provider) {
176
+ const p = provider || this.config?.provider || this.detectProvider();
177
+ if (p === 'ollama')
178
+ return 'not-needed'; // Ollama is local
179
+ if (p === 'anthropic')
180
+ return process.env.ANTHROPIC_API_KEY || '';
181
+ return process.env.OPENAI_API_KEY || '';
182
+ }
183
+ defaultModel(provider) {
184
+ const p = provider || this.config?.provider || 'ollama';
185
+ if (p === 'ollama')
186
+ return exports.OLLAMA_CONFIG.defaultModel;
187
+ if (p === 'anthropic')
188
+ return 'claude-sonnet-4-20250514';
189
+ return 'gpt-4o';
190
+ }
191
+ /**
192
+ * Check if Ollama is running
193
+ */
194
+ async isOllamaAvailable() {
195
+ try {
196
+ const response = await fetch(`${exports.OLLAMA_CONFIG.baseUrl}/api/tags`, {
197
+ method: 'GET',
198
+ signal: AbortSignal.timeout(2000),
199
+ });
200
+ return response.ok;
201
+ }
202
+ catch {
203
+ return false;
204
+ }
205
+ }
206
+ /**
207
+ * Send a message and get a response
208
+ */
209
+ async chat(userMessage, systemPrompt) {
210
+ const system = systemPrompt || exports.GENESIS_SYSTEM_PROMPT;
211
+ // Add user message to history
212
+ this.conversationHistory.push({ role: 'user', content: userMessage });
213
+ const startTime = Date.now();
214
+ try {
215
+ let response;
216
+ if (this.config.provider === 'ollama') {
217
+ response = await this.callOllama(system);
218
+ }
219
+ else if (this.config.provider === 'anthropic') {
220
+ response = await this.callAnthropic(system);
221
+ }
222
+ else {
223
+ response = await this.callOpenAI(system);
224
+ }
225
+ // Add assistant response to history
226
+ this.conversationHistory.push({ role: 'assistant', content: response.content });
227
+ return response;
228
+ }
229
+ catch (error) {
230
+ const errorMessage = error instanceof Error ? error.message : String(error);
231
+ // Fallback: if Ollama fails, try cloud
232
+ if (this.config.provider === 'ollama' && process.env.OPENAI_API_KEY) {
233
+ console.log('[LLM] Ollama unavailable, falling back to OpenAI...');
234
+ this.config.provider = 'openai';
235
+ this.config.apiKey = process.env.OPENAI_API_KEY;
236
+ this.config.model = 'gpt-4o';
237
+ return this.chat(userMessage, systemPrompt);
238
+ }
239
+ throw new Error(`LLM call failed: ${errorMessage}`);
240
+ }
241
+ }
242
+ /**
243
+ * Call OpenAI API
244
+ */
245
+ async callOpenAI(systemPrompt) {
246
+ const startTime = Date.now();
247
+ const messages = [
248
+ { role: 'system', content: systemPrompt },
249
+ ...this.conversationHistory,
250
+ ];
251
+ const response = await fetch('https://api.openai.com/v1/chat/completions', {
252
+ method: 'POST',
253
+ headers: {
254
+ 'Content-Type': 'application/json',
255
+ 'Authorization': `Bearer ${this.config.apiKey}`,
256
+ },
257
+ body: JSON.stringify({
258
+ model: this.config.model,
259
+ messages,
260
+ temperature: this.config.temperature,
261
+ max_tokens: this.config.maxTokens,
262
+ }),
263
+ });
264
+ if (!response.ok) {
265
+ const error = await response.text();
266
+ throw new Error(`OpenAI API error: ${response.status} - ${error}`);
267
+ }
268
+ const data = await response.json();
269
+ return {
270
+ content: data.choices[0]?.message?.content || '',
271
+ model: this.config.model,
272
+ provider: 'openai',
273
+ usage: {
274
+ inputTokens: data.usage?.prompt_tokens || 0,
275
+ outputTokens: data.usage?.completion_tokens || 0,
276
+ },
277
+ latency: Date.now() - startTime,
278
+ };
279
+ }
280
+ /**
281
+ * Call Anthropic API
282
+ */
283
+ async callAnthropic(systemPrompt) {
284
+ const startTime = Date.now();
285
+ const response = await fetch('https://api.anthropic.com/v1/messages', {
286
+ method: 'POST',
287
+ headers: {
288
+ 'Content-Type': 'application/json',
289
+ 'x-api-key': this.config.apiKey,
290
+ 'anthropic-version': '2023-06-01',
291
+ },
292
+ body: JSON.stringify({
293
+ model: this.config.model,
294
+ max_tokens: this.config.maxTokens,
295
+ system: systemPrompt,
296
+ messages: this.conversationHistory.map(m => ({
297
+ role: m.role === 'system' ? 'user' : m.role,
298
+ content: m.content,
299
+ })),
300
+ }),
301
+ });
302
+ if (!response.ok) {
303
+ const error = await response.text();
304
+ throw new Error(`Anthropic API error: ${response.status} - ${error}`);
305
+ }
306
+ const data = await response.json();
307
+ return {
308
+ content: data.content[0]?.text || '',
309
+ model: this.config.model,
310
+ provider: 'anthropic',
311
+ usage: {
312
+ inputTokens: data.usage?.input_tokens || 0,
313
+ outputTokens: data.usage?.output_tokens || 0,
314
+ },
315
+ latency: Date.now() - startTime,
316
+ };
317
+ }
318
+ /**
319
+ * Call Ollama API (local, free)
320
+ * Uses OpenAI-compatible endpoint for easy switching
321
+ */
322
+ async callOllama(systemPrompt) {
323
+ const startTime = Date.now();
324
+ const messages = [
325
+ { role: 'system', content: systemPrompt },
326
+ ...this.conversationHistory,
327
+ ];
328
+ const response = await fetch(`${exports.OLLAMA_CONFIG.baseUrl}/api/chat`, {
329
+ method: 'POST',
330
+ headers: {
331
+ 'Content-Type': 'application/json',
332
+ },
333
+ body: JSON.stringify({
334
+ model: this.config.model,
335
+ messages,
336
+ stream: false,
337
+ options: {
338
+ temperature: this.config.temperature,
339
+ num_predict: this.config.maxTokens,
340
+ },
341
+ }),
342
+ });
343
+ if (!response.ok) {
344
+ const error = await response.text();
345
+ throw new Error(`Ollama API error: ${response.status} - ${error}`);
346
+ }
347
+ const data = await response.json();
348
+ return {
349
+ content: data.message?.content || '',
350
+ model: this.config.model,
351
+ provider: 'ollama',
352
+ usage: {
353
+ inputTokens: data.prompt_eval_count || 0,
354
+ outputTokens: data.eval_count || 0,
355
+ },
356
+ latency: Date.now() - startTime,
357
+ };
358
+ }
359
+ /**
360
+ * Clear conversation history
361
+ */
362
+ clearHistory() {
363
+ this.conversationHistory = [];
364
+ }
365
+ /**
366
+ * Get conversation history
367
+ */
368
+ getHistory() {
369
+ return [...this.conversationHistory];
370
+ }
371
+ /**
372
+ * Get current config
373
+ */
374
+ getConfig() {
375
+ return { ...this.config };
376
+ }
377
+ /**
378
+ * Check if API key is configured (or Ollama available)
379
+ */
380
+ isConfigured() {
381
+ if (this.config.provider === 'ollama')
382
+ return true; // Local, no key needed
383
+ return !!this.config.apiKey;
384
+ }
385
+ /**
386
+ * Get provider status
387
+ */
388
+ status() {
389
+ return {
390
+ configured: this.isConfigured(),
391
+ provider: this.config.provider,
392
+ model: this.config.model,
393
+ isLocal: this.config.provider === 'ollama',
394
+ };
395
+ }
396
+ /**
397
+ * List available Ollama models
398
+ */
399
+ async listOllamaModels() {
400
+ try {
401
+ const response = await fetch(`${exports.OLLAMA_CONFIG.baseUrl}/api/tags`);
402
+ if (!response.ok)
403
+ return [];
404
+ const data = await response.json();
405
+ return data.models?.map((m) => m.name) || [];
406
+ }
407
+ catch {
408
+ return [];
409
+ }
410
+ }
411
+ }
412
+ exports.LLMBridge = LLMBridge;
413
+ // ============================================================================
414
+ // Factory & Singleton
415
+ // ============================================================================
416
+ let llmBridgeInstance = null;
417
+ function createLLMBridge(config) {
418
+ return new LLMBridge(config);
419
+ }
420
+ function getLLMBridge(config) {
421
+ if (!llmBridgeInstance) {
422
+ llmBridgeInstance = createLLMBridge(config);
423
+ }
424
+ return llmBridgeInstance;
425
+ }
426
+ function resetLLMBridge() {
427
+ llmBridgeInstance = null;
428
+ }
@@ -0,0 +1,136 @@
1
+ /**
2
+ * Genesis 6.8 - Hybrid LLM Router
3
+ *
4
+ * Intelligent routing between local (Ollama) and cloud (OpenAI/Anthropic) LLMs.
5
+ *
6
+ * Routing Logic:
7
+ * - Simple tasks (syntax fix, file ops, search) -> Local (fast, free)
8
+ * - Complex tasks (architecture, design, creative) -> Cloud (high quality)
9
+ *
10
+ * Factors considered:
11
+ * - Task complexity (heuristic analysis)
12
+ * - Token count estimation
13
+ * - Ollama availability
14
+ * - User preference
15
+ * - Cost optimization
16
+ */
17
+ import { LLMProvider, LLMResponse } from './index.js';
18
+ export interface HardwareProfile {
19
+ /** Detected CPU type */
20
+ cpu: string;
21
+ /** Is Apple Silicon (M1/M2/M3/M4)? */
22
+ isAppleSilicon: boolean;
23
+ /** CPU cores */
24
+ cores: number;
25
+ /** Total memory in GB */
26
+ memoryGB: number;
27
+ /** Performance tier: low, medium, high, ultra */
28
+ tier: 'low' | 'medium' | 'high' | 'ultra';
29
+ /** Recommended cloud threshold */
30
+ recommendedThreshold: TaskComplexity;
31
+ /** Recommended max tokens */
32
+ recommendedMaxTokens: number;
33
+ }
34
+ /**
35
+ * Detect hardware capabilities and recommend router config
36
+ */
37
+ export declare function detectHardware(): HardwareProfile;
38
+ export type TaskComplexity = 'trivial' | 'simple' | 'moderate' | 'complex' | 'creative';
39
+ export interface RoutingDecision {
40
+ /** Selected provider */
41
+ provider: LLMProvider;
42
+ /** Reasoning for the decision */
43
+ reason: string;
44
+ /** Estimated complexity */
45
+ complexity: TaskComplexity;
46
+ /** Confidence in decision (0-1) */
47
+ confidence: number;
48
+ /** Estimated tokens */
49
+ estimatedTokens: number;
50
+ /** Estimated cost (USD) for cloud */
51
+ estimatedCost: number;
52
+ /** Should we try local first? */
53
+ tryLocalFirst: boolean;
54
+ }
55
+ export interface RouterConfig {
56
+ /** Prefer local for all tasks (cost optimization) */
57
+ preferLocal: boolean;
58
+ /** Force cloud for all tasks (quality optimization) */
59
+ forceCloud: boolean;
60
+ /** Maximum tokens for local model */
61
+ localMaxTokens: number;
62
+ /** Complexity threshold for cloud (trivial=0, creative=4) */
63
+ cloudThreshold: TaskComplexity;
64
+ /** Auto-fallback to cloud if local fails */
65
+ autoFallback: boolean;
66
+ /** Log routing decisions */
67
+ logDecisions: boolean;
68
+ }
69
+ export interface RouterStats {
70
+ totalRequests: number;
71
+ localRequests: number;
72
+ cloudRequests: number;
73
+ fallbacks: number;
74
+ avgLocalLatency: number;
75
+ avgCloudLatency: number;
76
+ estimatedSavings: number;
77
+ }
78
+ /**
79
+ * Analyze task complexity from the prompt
80
+ */
81
+ export declare function analyzeComplexity(prompt: string): {
82
+ complexity: TaskComplexity;
83
+ confidence: number;
84
+ indicators: string[];
85
+ };
86
+ /**
87
+ * Estimate token count from prompt
88
+ */
89
+ export declare function estimateTokens(text: string): number;
90
+ /**
91
+ * Estimate cost for cloud provider
92
+ */
93
+ export declare function estimateCost(inputTokens: number, outputTokens: number, provider: LLMProvider): number;
94
+ export declare class HybridRouter {
95
+ private config;
96
+ private stats;
97
+ private localBridge;
98
+ private cloudBridge;
99
+ private hardwareProfile;
100
+ constructor(config?: Partial<RouterConfig>);
101
+ /**
102
+ * Get detected hardware profile
103
+ */
104
+ getHardwareProfile(): HardwareProfile;
105
+ /**
106
+ * Decide which provider to use
107
+ */
108
+ route(prompt: string): Promise<RoutingDecision>;
109
+ /**
110
+ * Execute request with routing
111
+ */
112
+ execute(prompt: string, systemPrompt?: string): Promise<LLMResponse & {
113
+ routingDecision: RoutingDecision;
114
+ }>;
115
+ private isOllamaAvailable;
116
+ private getBridge;
117
+ private updateAverage;
118
+ getStats(): RouterStats;
119
+ resetStats(): void;
120
+ getConfig(): RouterConfig;
121
+ setConfig(config: Partial<RouterConfig>): void;
122
+ }
123
+ export declare function getHybridRouter(config?: Partial<RouterConfig>): HybridRouter;
124
+ export declare function resetHybridRouter(): void;
125
+ /**
126
+ * Smart chat: automatically routes to best provider
127
+ */
128
+ export declare function smartChat(prompt: string, systemPrompt?: string): Promise<LLMResponse>;
129
+ /**
130
+ * Force local chat (Ollama)
131
+ */
132
+ export declare function localChat(prompt: string, systemPrompt?: string): Promise<LLMResponse>;
133
+ /**
134
+ * Force cloud chat
135
+ */
136
+ export declare function cloudChat(prompt: string, systemPrompt?: string): Promise<LLMResponse>;