@build-astron-co/nimbus 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (313) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +628 -0
  3. package/bin/nimbus +38 -0
  4. package/package.json +80 -0
  5. package/src/__tests__/app.test.ts +76 -0
  6. package/src/__tests__/audit.test.ts +877 -0
  7. package/src/__tests__/circuit-breaker.test.ts +116 -0
  8. package/src/__tests__/cli-run.test.ts +115 -0
  9. package/src/__tests__/context-manager.test.ts +502 -0
  10. package/src/__tests__/context.test.ts +242 -0
  11. package/src/__tests__/enterprise.test.ts +401 -0
  12. package/src/__tests__/generator.test.ts +433 -0
  13. package/src/__tests__/hooks.test.ts +582 -0
  14. package/src/__tests__/init.test.ts +436 -0
  15. package/src/__tests__/intent-parser.test.ts +229 -0
  16. package/src/__tests__/llm-router.test.ts +209 -0
  17. package/src/__tests__/lsp.test.ts +293 -0
  18. package/src/__tests__/modes.test.ts +336 -0
  19. package/src/__tests__/permissions.test.ts +338 -0
  20. package/src/__tests__/serve.test.ts +275 -0
  21. package/src/__tests__/sessions.test.ts +227 -0
  22. package/src/__tests__/sharing.test.ts +288 -0
  23. package/src/__tests__/snapshots.test.ts +581 -0
  24. package/src/__tests__/state-db.test.ts +334 -0
  25. package/src/__tests__/stream-with-tools.test.ts +732 -0
  26. package/src/__tests__/subagents.test.ts +176 -0
  27. package/src/__tests__/system-prompt.test.ts +169 -0
  28. package/src/__tests__/tool-converter.test.ts +256 -0
  29. package/src/__tests__/tool-schemas.test.ts +397 -0
  30. package/src/__tests__/tools.test.ts +143 -0
  31. package/src/__tests__/version.test.ts +49 -0
  32. package/src/agent/compaction-agent.ts +227 -0
  33. package/src/agent/context-manager.ts +435 -0
  34. package/src/agent/context.ts +427 -0
  35. package/src/agent/deploy-preview.ts +426 -0
  36. package/src/agent/index.ts +68 -0
  37. package/src/agent/loop.ts +717 -0
  38. package/src/agent/modes.ts +429 -0
  39. package/src/agent/permissions.ts +466 -0
  40. package/src/agent/subagents/base.ts +116 -0
  41. package/src/agent/subagents/cost.ts +51 -0
  42. package/src/agent/subagents/explore.ts +42 -0
  43. package/src/agent/subagents/general.ts +54 -0
  44. package/src/agent/subagents/index.ts +102 -0
  45. package/src/agent/subagents/infra.ts +59 -0
  46. package/src/agent/subagents/security.ts +69 -0
  47. package/src/agent/system-prompt.ts +436 -0
  48. package/src/app.ts +122 -0
  49. package/src/audit/activity-log.ts +290 -0
  50. package/src/audit/compliance-checker.ts +540 -0
  51. package/src/audit/cost-tracker.ts +318 -0
  52. package/src/audit/index.ts +23 -0
  53. package/src/audit/security-scanner.ts +596 -0
  54. package/src/auth/guard.ts +75 -0
  55. package/src/auth/index.ts +56 -0
  56. package/src/auth/oauth.ts +455 -0
  57. package/src/auth/providers.ts +470 -0
  58. package/src/auth/sso.ts +113 -0
  59. package/src/auth/store.ts +505 -0
  60. package/src/auth/types.ts +187 -0
  61. package/src/build.ts +141 -0
  62. package/src/cli/index.ts +16 -0
  63. package/src/cli/init.ts +854 -0
  64. package/src/cli/openapi-spec.ts +356 -0
  65. package/src/cli/run.ts +237 -0
  66. package/src/cli/serve-auth.ts +80 -0
  67. package/src/cli/serve.ts +462 -0
  68. package/src/cli/web.ts +67 -0
  69. package/src/cli.ts +1417 -0
  70. package/src/clients/core-engine-client.ts +227 -0
  71. package/src/clients/enterprise-client.ts +334 -0
  72. package/src/clients/generator-client.ts +351 -0
  73. package/src/clients/git-client.ts +627 -0
  74. package/src/clients/github-client.ts +410 -0
  75. package/src/clients/helm-client.ts +504 -0
  76. package/src/clients/index.ts +80 -0
  77. package/src/clients/k8s-client.ts +497 -0
  78. package/src/clients/llm-client.ts +161 -0
  79. package/src/clients/rest-client.ts +130 -0
  80. package/src/clients/service-discovery.ts +33 -0
  81. package/src/clients/terraform-client.ts +482 -0
  82. package/src/clients/tools-client.ts +1843 -0
  83. package/src/clients/ws-client.ts +115 -0
  84. package/src/commands/analyze/index.ts +352 -0
  85. package/src/commands/apply/helm.ts +473 -0
  86. package/src/commands/apply/index.ts +213 -0
  87. package/src/commands/apply/k8s.ts +454 -0
  88. package/src/commands/apply/terraform.ts +582 -0
  89. package/src/commands/ask.ts +167 -0
  90. package/src/commands/audit/index.ts +238 -0
  91. package/src/commands/auth-cloud.ts +294 -0
  92. package/src/commands/auth-list.ts +134 -0
  93. package/src/commands/auth-profile.ts +121 -0
  94. package/src/commands/auth-status.ts +141 -0
  95. package/src/commands/aws/ec2.ts +501 -0
  96. package/src/commands/aws/iam.ts +397 -0
  97. package/src/commands/aws/index.ts +133 -0
  98. package/src/commands/aws/lambda.ts +396 -0
  99. package/src/commands/aws/rds.ts +439 -0
  100. package/src/commands/aws/s3.ts +439 -0
  101. package/src/commands/aws/vpc.ts +393 -0
  102. package/src/commands/aws-discover.ts +649 -0
  103. package/src/commands/aws-terraform.ts +805 -0
  104. package/src/commands/azure/aks.ts +376 -0
  105. package/src/commands/azure/functions.ts +253 -0
  106. package/src/commands/azure/index.ts +116 -0
  107. package/src/commands/azure/storage.ts +478 -0
  108. package/src/commands/azure/vm.ts +355 -0
  109. package/src/commands/billing/index.ts +256 -0
  110. package/src/commands/chat.ts +314 -0
  111. package/src/commands/config.ts +346 -0
  112. package/src/commands/cost/cloud-cost-estimator.ts +266 -0
  113. package/src/commands/cost/estimator.ts +79 -0
  114. package/src/commands/cost/index.ts +594 -0
  115. package/src/commands/cost/parsers/terraform.ts +273 -0
  116. package/src/commands/cost/parsers/types.ts +25 -0
  117. package/src/commands/cost/pricing/aws.ts +544 -0
  118. package/src/commands/cost/pricing/azure.ts +499 -0
  119. package/src/commands/cost/pricing/gcp.ts +396 -0
  120. package/src/commands/cost/pricing/index.ts +40 -0
  121. package/src/commands/demo.ts +250 -0
  122. package/src/commands/doctor.ts +794 -0
  123. package/src/commands/drift/index.ts +439 -0
  124. package/src/commands/explain.ts +277 -0
  125. package/src/commands/feedback.ts +389 -0
  126. package/src/commands/fix.ts +324 -0
  127. package/src/commands/fs/index.ts +402 -0
  128. package/src/commands/gcp/compute.ts +325 -0
  129. package/src/commands/gcp/functions.ts +271 -0
  130. package/src/commands/gcp/gke.ts +438 -0
  131. package/src/commands/gcp/iam.ts +344 -0
  132. package/src/commands/gcp/index.ts +129 -0
  133. package/src/commands/gcp/storage.ts +284 -0
  134. package/src/commands/generate-helm.ts +1249 -0
  135. package/src/commands/generate-k8s.ts +1560 -0
  136. package/src/commands/generate-terraform.ts +1460 -0
  137. package/src/commands/gh/index.ts +863 -0
  138. package/src/commands/git/index.ts +1343 -0
  139. package/src/commands/helm/index.ts +1126 -0
  140. package/src/commands/help.ts +539 -0
  141. package/src/commands/history.ts +142 -0
  142. package/src/commands/import.ts +868 -0
  143. package/src/commands/index.ts +367 -0
  144. package/src/commands/init.ts +1046 -0
  145. package/src/commands/k8s/index.ts +1137 -0
  146. package/src/commands/login.ts +631 -0
  147. package/src/commands/logout.ts +83 -0
  148. package/src/commands/onboarding.ts +228 -0
  149. package/src/commands/plan/display.ts +279 -0
  150. package/src/commands/plan/index.ts +599 -0
  151. package/src/commands/preview.ts +452 -0
  152. package/src/commands/questionnaire.ts +1270 -0
  153. package/src/commands/resume.ts +55 -0
  154. package/src/commands/team/index.ts +346 -0
  155. package/src/commands/template.ts +232 -0
  156. package/src/commands/tf/index.ts +1034 -0
  157. package/src/commands/upgrade.ts +550 -0
  158. package/src/commands/usage/index.ts +134 -0
  159. package/src/commands/version.ts +170 -0
  160. package/src/compat/index.ts +2 -0
  161. package/src/compat/runtime.ts +12 -0
  162. package/src/compat/sqlite.ts +107 -0
  163. package/src/config/index.ts +17 -0
  164. package/src/config/manager.ts +530 -0
  165. package/src/config/safety-policy.ts +358 -0
  166. package/src/config/schema.ts +125 -0
  167. package/src/config/types.ts +527 -0
  168. package/src/context/context-db.ts +199 -0
  169. package/src/demo/index.ts +349 -0
  170. package/src/demo/scenarios/full-journey.ts +229 -0
  171. package/src/demo/scenarios/getting-started.ts +127 -0
  172. package/src/demo/scenarios/helm-release.ts +341 -0
  173. package/src/demo/scenarios/k8s-deployment.ts +194 -0
  174. package/src/demo/scenarios/terraform-vpc.ts +170 -0
  175. package/src/demo/types.ts +92 -0
  176. package/src/engine/cost-estimator.ts +438 -0
  177. package/src/engine/diagram-generator.ts +256 -0
  178. package/src/engine/drift-detector.ts +902 -0
  179. package/src/engine/executor.ts +1035 -0
  180. package/src/engine/index.ts +76 -0
  181. package/src/engine/orchestrator.ts +636 -0
  182. package/src/engine/planner.ts +720 -0
  183. package/src/engine/safety.ts +743 -0
  184. package/src/engine/verifier.ts +770 -0
  185. package/src/enterprise/audit.ts +348 -0
  186. package/src/enterprise/auth.ts +270 -0
  187. package/src/enterprise/billing.ts +822 -0
  188. package/src/enterprise/index.ts +17 -0
  189. package/src/enterprise/teams.ts +443 -0
  190. package/src/generator/best-practices.ts +1608 -0
  191. package/src/generator/helm.ts +630 -0
  192. package/src/generator/index.ts +37 -0
  193. package/src/generator/intent-parser.ts +514 -0
  194. package/src/generator/kubernetes.ts +976 -0
  195. package/src/generator/terraform.ts +1867 -0
  196. package/src/history/index.ts +8 -0
  197. package/src/history/manager.ts +322 -0
  198. package/src/history/types.ts +34 -0
  199. package/src/hooks/config.ts +432 -0
  200. package/src/hooks/engine.ts +391 -0
  201. package/src/hooks/index.ts +4 -0
  202. package/src/llm/auth-bridge.ts +198 -0
  203. package/src/llm/circuit-breaker.ts +140 -0
  204. package/src/llm/config-loader.ts +201 -0
  205. package/src/llm/cost-calculator.ts +171 -0
  206. package/src/llm/index.ts +8 -0
  207. package/src/llm/model-aliases.ts +115 -0
  208. package/src/llm/provider-registry.ts +63 -0
  209. package/src/llm/providers/anthropic.ts +433 -0
  210. package/src/llm/providers/bedrock.ts +477 -0
  211. package/src/llm/providers/google.ts +405 -0
  212. package/src/llm/providers/ollama.ts +767 -0
  213. package/src/llm/providers/openai-compatible.ts +340 -0
  214. package/src/llm/providers/openai.ts +328 -0
  215. package/src/llm/providers/openrouter.ts +338 -0
  216. package/src/llm/router.ts +1035 -0
  217. package/src/llm/types.ts +232 -0
  218. package/src/lsp/client.ts +298 -0
  219. package/src/lsp/languages.ts +116 -0
  220. package/src/lsp/manager.ts +278 -0
  221. package/src/mcp/client.ts +402 -0
  222. package/src/mcp/index.ts +5 -0
  223. package/src/mcp/manager.ts +133 -0
  224. package/src/nimbus.ts +214 -0
  225. package/src/plugins/index.ts +27 -0
  226. package/src/plugins/loader.ts +334 -0
  227. package/src/plugins/manager.ts +376 -0
  228. package/src/plugins/types.ts +284 -0
  229. package/src/scanners/cicd-scanner.ts +258 -0
  230. package/src/scanners/cloud-scanner.ts +466 -0
  231. package/src/scanners/framework-scanner.ts +469 -0
  232. package/src/scanners/iac-scanner.ts +388 -0
  233. package/src/scanners/index.ts +539 -0
  234. package/src/scanners/language-scanner.ts +276 -0
  235. package/src/scanners/package-manager-scanner.ts +277 -0
  236. package/src/scanners/types.ts +172 -0
  237. package/src/sessions/manager.ts +365 -0
  238. package/src/sessions/types.ts +44 -0
  239. package/src/sharing/sync.ts +296 -0
  240. package/src/sharing/viewer.ts +97 -0
  241. package/src/snapshots/index.ts +2 -0
  242. package/src/snapshots/manager.ts +530 -0
  243. package/src/state/artifacts.ts +147 -0
  244. package/src/state/audit.ts +137 -0
  245. package/src/state/billing.ts +240 -0
  246. package/src/state/checkpoints.ts +117 -0
  247. package/src/state/config.ts +67 -0
  248. package/src/state/conversations.ts +14 -0
  249. package/src/state/credentials.ts +154 -0
  250. package/src/state/db.ts +58 -0
  251. package/src/state/index.ts +26 -0
  252. package/src/state/messages.ts +115 -0
  253. package/src/state/projects.ts +123 -0
  254. package/src/state/schema.ts +236 -0
  255. package/src/state/sessions.ts +147 -0
  256. package/src/state/teams.ts +200 -0
  257. package/src/telemetry.ts +108 -0
  258. package/src/tools/aws-ops.ts +952 -0
  259. package/src/tools/azure-ops.ts +579 -0
  260. package/src/tools/file-ops.ts +593 -0
  261. package/src/tools/gcp-ops.ts +625 -0
  262. package/src/tools/git-ops.ts +773 -0
  263. package/src/tools/github-ops.ts +799 -0
  264. package/src/tools/helm-ops.ts +943 -0
  265. package/src/tools/index.ts +17 -0
  266. package/src/tools/k8s-ops.ts +819 -0
  267. package/src/tools/schemas/converter.ts +184 -0
  268. package/src/tools/schemas/devops.ts +612 -0
  269. package/src/tools/schemas/index.ts +73 -0
  270. package/src/tools/schemas/standard.ts +1144 -0
  271. package/src/tools/schemas/types.ts +705 -0
  272. package/src/tools/terraform-ops.ts +862 -0
  273. package/src/types/ambient.d.ts +193 -0
  274. package/src/types/config.ts +83 -0
  275. package/src/types/drift.ts +116 -0
  276. package/src/types/enterprise.ts +335 -0
  277. package/src/types/index.ts +20 -0
  278. package/src/types/plan.ts +44 -0
  279. package/src/types/request.ts +65 -0
  280. package/src/types/response.ts +54 -0
  281. package/src/types/service.ts +51 -0
  282. package/src/ui/App.tsx +997 -0
  283. package/src/ui/DeployPreview.tsx +169 -0
  284. package/src/ui/Header.tsx +68 -0
  285. package/src/ui/InputBox.tsx +350 -0
  286. package/src/ui/MessageList.tsx +585 -0
  287. package/src/ui/PermissionPrompt.tsx +151 -0
  288. package/src/ui/StatusBar.tsx +158 -0
  289. package/src/ui/ToolCallDisplay.tsx +409 -0
  290. package/src/ui/chat-ui.ts +853 -0
  291. package/src/ui/index.ts +33 -0
  292. package/src/ui/ink/index.ts +711 -0
  293. package/src/ui/streaming.ts +176 -0
  294. package/src/ui/types.ts +57 -0
  295. package/src/utils/analytics.ts +72 -0
  296. package/src/utils/cost-warning.ts +27 -0
  297. package/src/utils/env.ts +46 -0
  298. package/src/utils/errors.ts +69 -0
  299. package/src/utils/event-bus.ts +38 -0
  300. package/src/utils/index.ts +24 -0
  301. package/src/utils/logger.ts +171 -0
  302. package/src/utils/rate-limiter.ts +121 -0
  303. package/src/utils/service-auth.ts +49 -0
  304. package/src/utils/validation.ts +53 -0
  305. package/src/version.ts +4 -0
  306. package/src/watcher/index.ts +163 -0
  307. package/src/wizard/approval.ts +383 -0
  308. package/src/wizard/index.ts +25 -0
  309. package/src/wizard/prompts.ts +338 -0
  310. package/src/wizard/types.ts +171 -0
  311. package/src/wizard/ui.ts +556 -0
  312. package/src/wizard/wizard.ts +304 -0
  313. package/tsconfig.json +24 -0
@@ -0,0 +1,1035 @@
1
+ /**
2
+ * LLM Router
3
+ * Routes requests to the appropriate provider based on model, cost optimization, and fallback logic.
4
+ *
5
+ * Refactored for the embedded Nimbus architecture. Key changes from the microservice version:
6
+ * - Imports providers from local ./providers/ directory
7
+ * - Integrates model alias resolution via resolveModelAlias
8
+ * - Integrates auto-detection via detectProvider
9
+ * - Supports OpenAI-compatible and Bedrock providers via env vars
10
+ * - persistUsage writes to the embedded SQLite usage table (fire-and-forget)
11
+ */
12
+
13
+ import { logger } from '../utils';
14
+ import {
15
+ getTextContent,
16
+ type LLMProvider,
17
+ type CompletionRequest,
18
+ type LLMResponse,
19
+ type StreamChunk,
20
+ type ToolCompletionRequest,
21
+ } from './types';
22
+ import { AnthropicProvider } from './providers/anthropic';
23
+ import { OpenAIProvider } from './providers/openai';
24
+ import { GoogleProvider } from './providers/google';
25
+ import { OllamaProvider } from './providers/ollama';
26
+ import { OpenRouterProvider } from './providers/openrouter';
27
+ import { OpenAICompatibleProvider } from './providers/openai-compatible';
28
+ import { BedrockProvider } from './providers/bedrock';
29
+ import { calculateCost, type CostResult } from './cost-calculator';
30
+ import { resolveModelAlias, stripProviderPrefix } from './model-aliases';
31
+ import { detectProvider } from './provider-registry';
32
+ import { ProviderCircuitBreaker } from './circuit-breaker';
33
+
34
+ export interface RouterConfig {
35
+ defaultProvider: string;
36
+ defaultModel: string;
37
+ costOptimization: {
38
+ enabled: boolean;
39
+ cheapModelFor: string[];
40
+ expensiveModelFor: string[];
41
+ cheapModel: string;
42
+ expensiveModel: string;
43
+ };
44
+ fallback: {
45
+ enabled: boolean;
46
+ providers: string[];
47
+ };
48
+ tokenBudget?: {
49
+ maxTokensPerRequest?: number;
50
+ };
51
+ }
52
+
53
+ export interface ProviderInfo {
54
+ name: string;
55
+ available: boolean;
56
+ models: string[];
57
+ }
58
+
59
+ /**
60
+ * Metadata emitted by the streaming fallback to indicate which provider
61
+ * is actually serving the response. The WebSocket handler inspects this
62
+ * to notify clients of provider switches.
63
+ */
64
+ export interface StreamFallbackMeta {
65
+ /** The provider that is actively streaming. */
66
+ activeProvider: string;
67
+ /** If a fallback occurred, the provider that originally failed. */
68
+ failedProvider?: string;
69
+ /** True when this stream is being served by a fallback provider. */
70
+ isFallback: boolean;
71
+ }
72
+
73
+ export class LLMRouter {
74
+ private providers: Map<string, LLMProvider>;
75
+ private config: RouterConfig;
76
+ private circuitBreaker = new ProviderCircuitBreaker();
77
+
78
+ /**
79
+ * Populated during streaming with fallback so callers (e.g. WebSocket)
80
+ * can inspect which provider ended up serving the stream. Reset on
81
+ * every call to routeStream / executeStreamWithFallback.
82
+ */
83
+ lastStreamFallbackMeta: StreamFallbackMeta | null = null;
84
+
85
+ constructor(config?: Partial<RouterConfig>) {
86
+ this.providers = new Map();
87
+ this.config = {
88
+ defaultProvider: config?.defaultProvider || process.env.DEFAULT_PROVIDER || 'anthropic',
89
+ defaultModel: config?.defaultModel || process.env.DEFAULT_MODEL || 'claude-sonnet-4-20250514',
90
+ costOptimization: {
91
+ enabled:
92
+ config?.costOptimization?.enabled ?? process.env.ENABLE_COST_OPTIMIZATION === 'true',
93
+ cheapModelFor: config?.costOptimization?.cheapModelFor || [
94
+ 'simple_queries',
95
+ 'summarization',
96
+ 'classification',
97
+ 'explanations',
98
+ ],
99
+ expensiveModelFor: config?.costOptimization?.expensiveModelFor || [
100
+ 'code_generation',
101
+ 'complex_reasoning',
102
+ 'planning',
103
+ ],
104
+ cheapModel:
105
+ config?.costOptimization?.cheapModel ||
106
+ process.env.CHEAP_MODEL ||
107
+ 'claude-haiku-4-20250514',
108
+ expensiveModel:
109
+ config?.costOptimization?.expensiveModel ||
110
+ process.env.EXPENSIVE_MODEL ||
111
+ 'claude-opus-4-20250514',
112
+ },
113
+ fallback: {
114
+ enabled: config?.fallback?.enabled ?? process.env.DISABLE_FALLBACK !== 'true',
115
+ providers:
116
+ config?.fallback?.providers ||
117
+ (process.env.FALLBACK_PROVIDERS?.split(',') ?? [
118
+ 'anthropic',
119
+ 'openai',
120
+ 'openrouter',
121
+ 'google',
122
+ ]),
123
+ },
124
+ };
125
+
126
+ this.initializeProviders();
127
+ }
128
+
129
+ /**
130
+ * Initialize all available providers based on API keys, auth.json, and environment variables.
131
+ *
132
+ * Resolution order per provider:
133
+ * 1. auth.json (~/.nimbus/auth.json) via the auth-bridge
134
+ * 2. Environment variables (ANTHROPIC_API_KEY, etc.)
135
+ */
136
+ private initializeProviders(): void {
137
+ // Lazy-import the auth-bridge to avoid circular deps at module level
138
+ let isConfigured: (name: string) => boolean;
139
+ let getApiKey: (name: string) => string | undefined;
140
+ try {
141
+ // eslint-disable-next-line @typescript-eslint/no-var-requires
142
+ const bridge = require('./auth-bridge');
143
+ isConfigured = bridge.isProviderConfigured;
144
+ getApiKey = bridge.getProviderApiKey;
145
+ } catch (err) {
146
+ // Auth-bridge unavailable (e.g., test environment) — fall back to env-only
147
+ logger.warn(
148
+ 'Auth-bridge unavailable, using environment variables only:',
149
+ err instanceof Error ? err.message : String(err)
150
+ );
151
+ isConfigured = () => false;
152
+ getApiKey = () => undefined;
153
+ }
154
+
155
+ // Anthropic
156
+ if (process.env.ANTHROPIC_API_KEY || isConfigured('anthropic')) {
157
+ this.providers.set('anthropic', new AnthropicProvider());
158
+ logger.info('Initialized Anthropic provider');
159
+ }
160
+
161
+ // OpenAI
162
+ if (process.env.OPENAI_API_KEY || isConfigured('openai')) {
163
+ this.providers.set('openai', new OpenAIProvider());
164
+ logger.info('Initialized OpenAI provider');
165
+ }
166
+
167
+ // Google
168
+ if (process.env.GOOGLE_API_KEY || isConfigured('google')) {
169
+ this.providers.set('google', new GoogleProvider());
170
+ logger.info('Initialized Google provider');
171
+ }
172
+
173
+ // OpenRouter
174
+ if (process.env.OPENROUTER_API_KEY || isConfigured('openrouter')) {
175
+ this.providers.set('openrouter', new OpenRouterProvider());
176
+ logger.info('Initialized OpenRouter provider');
177
+ }
178
+
179
+ // Ollama (only if explicitly configured via auth.json or env var)
180
+ if (process.env.OLLAMA_BASE_URL || isConfigured('ollama')) {
181
+ this.providers.set('ollama', new OllamaProvider());
182
+ logger.info('Initialized Ollama provider');
183
+ }
184
+
185
+ // AWS Bedrock (uses IAM credentials from environment / instance profile)
186
+ if (
187
+ process.env.AWS_BEDROCK_ENABLED === 'true' ||
188
+ process.env.AWS_REGION ||
189
+ isConfigured('bedrock') ||
190
+ (process.env.AWS_ACCESS_KEY_ID && process.env.AWS_SECRET_ACCESS_KEY)
191
+ ) {
192
+ this.providers.set('bedrock', new BedrockProvider());
193
+ logger.info('Initialized AWS Bedrock provider');
194
+ }
195
+
196
+ // Groq (OpenAI-compatible)
197
+ const groqKey = process.env.GROQ_API_KEY || getApiKey('groq');
198
+ if (groqKey) {
199
+ this.providers.set(
200
+ 'groq',
201
+ new OpenAICompatibleProvider({
202
+ name: 'groq',
203
+ apiKey: groqKey,
204
+ baseURL: 'https://api.groq.com/openai/v1',
205
+ defaultModel: 'llama-3.1-70b-versatile',
206
+ })
207
+ );
208
+ logger.info('Initialized Groq provider (OpenAI-compatible)');
209
+ }
210
+
211
+ // Together AI (OpenAI-compatible)
212
+ const togetherKey = process.env.TOGETHER_API_KEY || getApiKey('together');
213
+ if (togetherKey) {
214
+ this.providers.set(
215
+ 'together',
216
+ new OpenAICompatibleProvider({
217
+ name: 'together',
218
+ apiKey: togetherKey,
219
+ baseURL: 'https://api.together.xyz/v1',
220
+ defaultModel: 'meta-llama/Llama-3.1-70B-Instruct-Turbo',
221
+ })
222
+ );
223
+ logger.info('Initialized Together AI provider (OpenAI-compatible)');
224
+ }
225
+
226
+ // DeepSeek (OpenAI-compatible)
227
+ const deepseekKey = process.env.DEEPSEEK_API_KEY || getApiKey('deepseek');
228
+ if (deepseekKey) {
229
+ this.providers.set(
230
+ 'deepseek',
231
+ new OpenAICompatibleProvider({
232
+ name: 'deepseek',
233
+ apiKey: deepseekKey,
234
+ baseURL: 'https://api.deepseek.com/v1',
235
+ defaultModel: 'deepseek-chat',
236
+ })
237
+ );
238
+ logger.info('Initialized DeepSeek provider (OpenAI-compatible)');
239
+ }
240
+
241
+ // Fireworks AI (OpenAI-compatible)
242
+ const fireworksKey = process.env.FIREWORKS_API_KEY || getApiKey('fireworks');
243
+ if (fireworksKey) {
244
+ this.providers.set(
245
+ 'fireworks',
246
+ new OpenAICompatibleProvider({
247
+ name: 'fireworks',
248
+ apiKey: fireworksKey,
249
+ baseURL: 'https://api.fireworks.ai/inference/v1',
250
+ defaultModel: 'accounts/fireworks/models/llama-v3p1-70b-instruct',
251
+ })
252
+ );
253
+ logger.info('Initialized Fireworks AI provider (OpenAI-compatible)');
254
+ }
255
+
256
+ // Perplexity (OpenAI-compatible)
257
+ const perplexityKey = process.env.PERPLEXITY_API_KEY || getApiKey('perplexity');
258
+ if (perplexityKey) {
259
+ this.providers.set(
260
+ 'perplexity',
261
+ new OpenAICompatibleProvider({
262
+ name: 'perplexity',
263
+ apiKey: perplexityKey,
264
+ baseURL: 'https://api.perplexity.ai',
265
+ defaultModel: 'llama-3.1-sonar-large-128k-online',
266
+ })
267
+ );
268
+ logger.info('Initialized Perplexity provider (OpenAI-compatible)');
269
+ }
270
+ }
271
+
272
+ /**
273
+ * Get the names of all initialized providers.
274
+ */
275
+ getAvailableProviders(): string[] {
276
+ return [...this.providers.keys()];
277
+ }
278
+
279
+ /**
280
+ * Get the names of providers whose circuit breakers are currently OPEN
281
+ * (i.e. temporarily disabled due to consecutive failures).
282
+ */
283
+ getDisabledProviders(): string[] {
284
+ return this.circuitBreaker.getOpenCircuits();
285
+ }
286
+
287
+ /**
288
+ * Register a custom provider
289
+ */
290
+ registerProvider(provider: LLMProvider): void {
291
+ this.providers.set(provider.name, provider);
292
+ logger.info(`Registered custom provider: ${provider.name}`);
293
+ }
294
+
295
+ /**
296
+ * Route a completion request to the appropriate provider
297
+ */
298
+ async route(request: CompletionRequest, taskType?: string): Promise<LLMResponse> {
299
+ // Resolve model alias before routing
300
+ if (request.model) {
301
+ request.model = resolveModelAlias(request.model);
302
+ }
303
+
304
+ const provider = this.selectProvider(request, taskType);
305
+
306
+ // Strip provider prefix after routing (APIs expect model ID without prefix)
307
+ if (request.model) {
308
+ request.model = stripProviderPrefix(request.model);
309
+ }
310
+
311
+ // Enforce token budget
312
+ this.enforceTokenBudget(request);
313
+
314
+ if (!provider) {
315
+ throw new Error(
316
+ 'No LLM provider available. Run `nimbus login` to configure a provider, or set an API key via environment variable (e.g. ANTHROPIC_API_KEY).'
317
+ );
318
+ }
319
+
320
+ let response: LLMResponse;
321
+ if (this.config.fallback.enabled) {
322
+ response = await this.executeWithFallback(provider, request);
323
+ } else {
324
+ response = await provider.complete(request);
325
+ }
326
+
327
+ // Attach per-request cost calculation
328
+ const cost = this.computeCost(provider.name, response);
329
+ response.cost = cost;
330
+
331
+ // Persist usage (fire-and-forget)
332
+ if (response.usage) {
333
+ this.persistUsage(response.usage, response.model, provider.name, cost);
334
+ }
335
+
336
+ return response;
337
+ }
338
+
339
+ /**
340
+ * Route a streaming completion request.
341
+ * Collects token usage from the final chunk and persists cost data
342
+ * after the stream completes (fire-and-forget, same as route()).
343
+ */
344
+ async *routeStream(request: CompletionRequest, taskType?: string): AsyncIterable<StreamChunk> {
345
+ // Resolve model alias before routing
346
+ if (request.model) {
347
+ request.model = resolveModelAlias(request.model);
348
+ }
349
+
350
+ // Capture `this` and config references before yield points.
351
+ // TypeScript strict mode narrows `this` to `never` after yield in
352
+ // async generators, so all post-yield access goes through locals.
353
+ const self = this as LLMRouter;
354
+ const defaultModel = self.config.defaultModel;
355
+
356
+ const provider = self.selectProvider(request, taskType);
357
+
358
+ // Strip provider prefix after routing (APIs expect model ID without prefix)
359
+ if (request.model) {
360
+ request.model = stripProviderPrefix(request.model);
361
+ }
362
+
363
+ // Enforce token budget
364
+ self.enforceTokenBudget(request);
365
+
366
+ if (!provider) {
367
+ throw new Error(
368
+ 'No LLM provider available. Run `nimbus login` to configure a provider, or set an API key via environment variable (e.g. ANTHROPIC_API_KEY).'
369
+ );
370
+ }
371
+
372
+ // Reset fallback metadata
373
+ self.lastStreamFallbackMeta = null;
374
+
375
+ const stream = self.config.fallback.enabled
376
+ ? self.executeStreamWithFallback(provider, request)
377
+ : provider.stream(request);
378
+
379
+ let totalContent = '';
380
+ let lastUsage: StreamChunk['usage'] | undefined;
381
+
382
+ for await (const chunk of stream) {
383
+ if (chunk.content) {
384
+ totalContent += chunk.content;
385
+ }
386
+ if (chunk.usage) {
387
+ lastUsage = chunk.usage;
388
+ }
389
+ yield chunk;
390
+ }
391
+
392
+ // Determine which provider actually served the stream.
393
+ // Use type assertion because TS control-flow analysis incorrectly
394
+ // narrows lastStreamFallbackMeta to `null` -- it was mutated by
395
+ // executeStreamWithFallback during iteration above.
396
+ const fallbackMeta = self.lastStreamFallbackMeta as StreamFallbackMeta | null;
397
+ const activeProviderName = fallbackMeta?.activeProvider ?? provider.name;
398
+
399
+ // Track cost after stream completes
400
+ if (lastUsage) {
401
+ const model = request.model || defaultModel;
402
+ const cost = calculateCost(
403
+ activeProviderName,
404
+ model,
405
+ lastUsage.promptTokens,
406
+ lastUsage.completionTokens
407
+ );
408
+ self.persistUsage(lastUsage, model, activeProviderName, cost);
409
+ } else {
410
+ // Estimate tokens from content length if no usage data
411
+ const estimatedOutputTokens = Math.ceil(totalContent.length / 4);
412
+ const estimatedInputTokens = request.messages.reduce(
413
+ (sum, m) => sum + Math.ceil(getTextContent(m.content).length / 4),
414
+ 0
415
+ );
416
+ const model = request.model || defaultModel;
417
+ const cost = calculateCost(
418
+ activeProviderName,
419
+ model,
420
+ estimatedInputTokens,
421
+ estimatedOutputTokens
422
+ );
423
+ self.persistUsage(
424
+ {
425
+ promptTokens: estimatedInputTokens,
426
+ completionTokens: estimatedOutputTokens,
427
+ totalTokens: estimatedInputTokens + estimatedOutputTokens,
428
+ },
429
+ model,
430
+ activeProviderName,
431
+ cost
432
+ );
433
+ }
434
+ }
435
+
436
+ /**
437
+ * Route a streaming tool completion request.
438
+ * Text chunks are yielded incrementally; tool calls arrive on the final
439
+ * chunk. Falls back to non-streaming completeWithTools when the selected
440
+ * provider doesn't support streamWithTools.
441
+ */
442
+ async *routeStreamWithTools(
443
+ request: ToolCompletionRequest,
444
+ taskType?: string
445
+ ): AsyncIterable<StreamChunk> {
446
+ // Resolve model alias before routing
447
+ if (request.model) {
448
+ request.model = resolveModelAlias(request.model);
449
+ }
450
+
451
+ const self = this as LLMRouter;
452
+ const defaultModel = self.config.defaultModel;
453
+ const provider = self.selectProvider(request, taskType);
454
+
455
+ // Strip provider prefix after routing
456
+ if (request.model) {
457
+ request.model = stripProviderPrefix(request.model);
458
+ }
459
+
460
+ self.enforceTokenBudget(request);
461
+
462
+ if (!provider) {
463
+ throw new Error(
464
+ 'No LLM provider available. Run `nimbus login` to configure a provider, or set an API key via environment variable (e.g. ANTHROPIC_API_KEY).'
465
+ );
466
+ }
467
+
468
+ // Use native streaming-with-tools if providers support it
469
+ if (provider.streamWithTools && self.config.fallback.enabled) {
470
+ // Try primary provider first, then fallbacks
471
+ const fallbackProviders = self.config.fallback.providers
472
+ .map(name => self.providers.get(name))
473
+ .filter(Boolean) as LLMProvider[];
474
+ const allProviders = [provider, ...fallbackProviders.filter(p => p !== provider)];
475
+
476
+ for (const p of allProviders) {
477
+ if (!p.streamWithTools || !self.circuitBreaker.isAvailable(p.name)) {
478
+ continue;
479
+ }
480
+ try {
481
+ let lastUsage: StreamChunk['usage'] | undefined;
482
+ const bufferedChunks: StreamChunk[] = [];
483
+ for await (const chunk of p.streamWithTools(request)) {
484
+ bufferedChunks.push(chunk);
485
+ if (chunk.usage) {
486
+ lastUsage = chunk.usage;
487
+ }
488
+ }
489
+ self.circuitBreaker.recordSuccess(p.name);
490
+ for (const chunk of bufferedChunks) {
491
+ yield chunk;
492
+ }
493
+ if (lastUsage) {
494
+ const model = request.model || defaultModel;
495
+ const cost = calculateCost(
496
+ p.name,
497
+ model,
498
+ lastUsage.promptTokens,
499
+ lastUsage.completionTokens
500
+ );
501
+ self.persistUsage(lastUsage, model, p.name, cost);
502
+ }
503
+ return;
504
+ } catch (error) {
505
+ self.circuitBreaker.recordFailure(p.name);
506
+ logger.warn(`Provider ${p.name} failed for streamWithTools, trying fallback...`, {
507
+ error,
508
+ });
509
+ continue;
510
+ }
511
+ }
512
+ // If all providers with streamWithTools failed, fall through to non-streaming fallback below
513
+ } else if (provider.streamWithTools) {
514
+ // Fallback disabled — use provider directly
515
+ let lastUsage: StreamChunk['usage'] | undefined;
516
+ for await (const chunk of provider.streamWithTools(request)) {
517
+ if (chunk.usage) {
518
+ lastUsage = chunk.usage;
519
+ }
520
+ yield chunk;
521
+ }
522
+ if (lastUsage) {
523
+ const model = request.model || defaultModel;
524
+ const cost = calculateCost(
525
+ provider.name,
526
+ model,
527
+ lastUsage.promptTokens,
528
+ lastUsage.completionTokens
529
+ );
530
+ self.persistUsage(lastUsage, model, provider.name, cost);
531
+ }
532
+ return;
533
+ }
534
+
535
+ // Fallback: non-streaming completeWithTools, yield result as a single chunk
536
+ const response = await provider.completeWithTools(request);
537
+ const cost = self.computeCost(provider.name, response);
538
+ response.cost = cost;
539
+ if (response.usage) {
540
+ self.persistUsage(response.usage, response.model, provider.name, cost);
541
+ }
542
+
543
+ if (response.content) {
544
+ yield { content: response.content, done: false };
545
+ }
546
+ yield {
547
+ done: true,
548
+ toolCalls: response.toolCalls,
549
+ usage: response.usage,
550
+ };
551
+ }
552
+
553
+ /**
554
+ * Route a tool completion request
555
+ */
556
+ async routeWithTools(request: ToolCompletionRequest, taskType?: string): Promise<LLMResponse> {
557
+ // Resolve model alias before routing
558
+ if (request.model) {
559
+ request.model = resolveModelAlias(request.model);
560
+ }
561
+
562
+ const provider = this.selectProvider(request, taskType);
563
+
564
+ // Strip provider prefix after routing (APIs expect model ID without prefix)
565
+ if (request.model) {
566
+ request.model = stripProviderPrefix(request.model);
567
+ }
568
+
569
+ // Enforce token budget
570
+ this.enforceTokenBudget(request);
571
+
572
+ if (!provider) {
573
+ throw new Error(
574
+ 'No LLM provider available. Run `nimbus login` to configure a provider, or set an API key via environment variable (e.g. ANTHROPIC_API_KEY).'
575
+ );
576
+ }
577
+
578
+ let response: LLMResponse;
579
+ if (this.config.fallback.enabled) {
580
+ response = await this.executeToolsWithFallback(provider, request);
581
+ } else {
582
+ response = await provider.completeWithTools(request);
583
+ }
584
+
585
+ // Attach per-request cost calculation
586
+ const cost = this.computeCost(provider.name, response);
587
+ response.cost = cost;
588
+
589
+ // Persist usage (fire-and-forget)
590
+ if (response.usage) {
591
+ this.persistUsage(response.usage, response.model, provider.name, cost);
592
+ }
593
+
594
+ return response;
595
+ }
596
+
597
+ /**
598
+ * Get list of available models across all providers
599
+ */
600
+ async getAvailableModels(): Promise<Record<string, string[]>> {
601
+ const models: Record<string, string[]> = {};
602
+
603
+ const entries = Array.from(this.providers.entries());
604
+ const results = await Promise.allSettled(
605
+ entries.map(async ([name, provider]) => {
606
+ const providerModels = await provider.listModels();
607
+ return { name, models: providerModels };
608
+ })
609
+ );
610
+
611
+ for (const result of results) {
612
+ if (result.status === 'fulfilled') {
613
+ models[result.value.name] = result.value.models;
614
+ }
615
+ }
616
+
617
+ return models;
618
+ }
619
+
620
+ /**
621
+ * Get provider information including availability and models.
622
+ * Each registered provider is queried for its model list. If the query
623
+ * succeeds the provider is marked available; otherwise it is marked
624
+ * unavailable with an empty model list.
625
+ */
626
+ async getProviders(): Promise<ProviderInfo[]> {
627
+ const entries = Array.from(this.providers.entries());
628
+ const results = await Promise.allSettled(
629
+ entries.map(async ([name, provider]) => {
630
+ const models = await provider.listModels();
631
+ return { name, available: true, models };
632
+ })
633
+ );
634
+
635
+ const providers: ProviderInfo[] = [];
636
+ for (let i = 0; i < results.length; i++) {
637
+ const result = results[i];
638
+ if (result.status === 'fulfilled') {
639
+ providers.push(result.value);
640
+ } else {
641
+ providers.push({ name: entries[i][0], available: false, models: [] });
642
+ }
643
+ }
644
+
645
+ return providers;
646
+ }
647
+
648
+ /**
649
+ * Select the appropriate provider based on request and task type
650
+ */
651
+ private selectProvider(request: CompletionRequest, taskType?: string): LLMProvider | null {
652
+ // If model explicitly specified, use its provider
653
+ if (request.model) {
654
+ const providerName = this.getProviderForModel(request.model);
655
+ const provider = this.providers.get(providerName);
656
+ if (provider) {
657
+ logger.info(`Selected ${providerName} provider for model ${request.model}`);
658
+ return provider;
659
+ }
660
+ }
661
+
662
+ // Cost optimization
663
+ if (this.config.costOptimization.enabled && taskType) {
664
+ if (this.config.costOptimization.cheapModelFor.includes(taskType)) {
665
+ const cheapModel = this.config.costOptimization.cheapModel;
666
+ const provider = this.getProviderForModel(cheapModel)
667
+ ? this.providers.get(this.getProviderForModel(cheapModel)) || this.getCheapProvider()
668
+ : this.getCheapProvider();
669
+ if (provider) {
670
+ if (!request.model) {
671
+ request.model = cheapModel;
672
+ }
673
+ logger.info(
674
+ `Selected cheap provider ${provider.name} with model ${request.model} for task type: ${taskType}`
675
+ );
676
+ return provider;
677
+ }
678
+ }
679
+ if (this.config.costOptimization.expensiveModelFor.includes(taskType)) {
680
+ const expensiveModel = this.config.costOptimization.expensiveModel;
681
+ const provider = this.getProviderForModel(expensiveModel)
682
+ ? this.providers.get(this.getProviderForModel(expensiveModel)) ||
683
+ this.getExpensiveProvider()
684
+ : this.getExpensiveProvider();
685
+ if (provider) {
686
+ if (!request.model) {
687
+ request.model = expensiveModel;
688
+ }
689
+ logger.info(
690
+ `Selected expensive provider ${provider.name} with model ${request.model} for task type: ${taskType}`
691
+ );
692
+ return provider;
693
+ }
694
+ }
695
+ }
696
+
697
+ // Default provider
698
+ const defaultProvider = this.providers.get(this.config.defaultProvider);
699
+ if (defaultProvider) {
700
+ logger.info(`Using default provider: ${this.config.defaultProvider}`);
701
+ return defaultProvider;
702
+ }
703
+
704
+ // Fallback to any available provider
705
+ const firstAvailable = Array.from(this.providers.values())[0];
706
+ if (firstAvailable) {
707
+ logger.warn(`No default provider, using first available: ${firstAvailable.name}`);
708
+ return firstAvailable;
709
+ }
710
+
711
+ return null;
712
+ }
713
+
714
+ /**
715
+ * Check whether an error is a rate-limit (429) or server error (5xx)
716
+ * that should be retried with backoff before falling through.
717
+ */
718
+ private static isRetryableError(error: unknown): boolean {
719
+ if (error && typeof error === 'object') {
720
+ const errObj = error as Record<string, unknown>;
721
+ const status =
722
+ (typeof errObj.status === 'number' ? errObj.status : undefined) ??
723
+ (typeof errObj.statusCode === 'number' ? errObj.statusCode : undefined);
724
+ if (status !== undefined && (status === 429 || (status >= 500 && status < 600))) {
725
+ return true;
726
+ }
727
+ const msg = typeof errObj.message === 'string' ? errObj.message : '';
728
+ if (/rate.?limit|429|too many requests|overloaded|503/i.test(msg)) {
729
+ return true;
730
+ }
731
+ }
732
+ return false;
733
+ }
734
+
735
+ /**
736
+ * Execute an async function with retry + exponential backoff for rate limits.
737
+ * Retries up to `maxRetries` times with delays of 1s, 2s, 4s, ...
738
+ */
739
+ private async withRetry<T>(fn: () => Promise<T>, maxRetries = 3): Promise<T> {
740
+ let lastError: unknown;
741
+ for (let attempt = 0; attempt <= maxRetries; attempt++) {
742
+ try {
743
+ return await fn();
744
+ } catch (error) {
745
+ lastError = error;
746
+ if (attempt < maxRetries && LLMRouter.isRetryableError(error)) {
747
+ const delay = Math.min(1000 * Math.pow(2, attempt), 8000);
748
+ const jitter = Math.random() * 500;
749
+ logger.info(
750
+ `Rate limited — retrying in ${Math.round(delay + jitter)}ms (attempt ${attempt + 1}/${maxRetries})`
751
+ );
752
+ await new Promise(resolve => setTimeout(resolve, delay + jitter));
753
+ continue;
754
+ }
755
+ throw error;
756
+ }
757
+ }
758
+ throw lastError;
759
+ }
760
+
761
+ /**
762
+ * Execute request with fallback logic
763
+ */
764
+ private async executeWithFallback(
765
+ primaryProvider: LLMProvider,
766
+ request: CompletionRequest
767
+ ): Promise<LLMResponse> {
768
+ const fallbackProviders = this.config.fallback.providers
769
+ .map(name => this.providers.get(name))
770
+ .filter(Boolean) as LLMProvider[];
771
+
772
+ const allProviders = [primaryProvider, ...fallbackProviders.filter(p => p !== primaryProvider)];
773
+
774
+ for (const provider of allProviders) {
775
+ if (!this.circuitBreaker.isAvailable(provider.name)) {
776
+ logger.info(`Skipping ${provider.name} (circuit open)`);
777
+ continue;
778
+ }
779
+ try {
780
+ logger.info(`Attempting request with ${provider.name}`);
781
+ const result = await this.withRetry(() => provider.complete(request));
782
+ this.circuitBreaker.recordSuccess(provider.name);
783
+ return result;
784
+ } catch (error) {
785
+ this.circuitBreaker.recordFailure(provider.name);
786
+ logger.warn(`Provider ${provider.name} failed, trying fallback...`, { error });
787
+ continue;
788
+ }
789
+ }
790
+
791
+ throw new Error(
792
+ 'All LLM providers failed. Check your API keys and network connection, or try a different provider.'
793
+ );
794
+ }
795
+
796
+ /**
797
+ * Execute tool request with fallback logic
798
+ */
799
+ private async executeToolsWithFallback(
800
+ primaryProvider: LLMProvider,
801
+ request: ToolCompletionRequest
802
+ ): Promise<LLMResponse> {
803
+ const fallbackProviders = this.config.fallback.providers
804
+ .map(name => this.providers.get(name))
805
+ .filter(Boolean) as LLMProvider[];
806
+
807
+ const allProviders = [primaryProvider, ...fallbackProviders.filter(p => p !== primaryProvider)];
808
+
809
+ for (const provider of allProviders) {
810
+ if (!this.circuitBreaker.isAvailable(provider.name)) {
811
+ logger.info(`Skipping ${provider.name} for tool request (circuit open)`);
812
+ continue;
813
+ }
814
+ try {
815
+ logger.info(`Attempting tool request with ${provider.name}`);
816
+ const result = await this.withRetry(() => provider.completeWithTools(request));
817
+ this.circuitBreaker.recordSuccess(provider.name);
818
+ return result;
819
+ } catch (error) {
820
+ this.circuitBreaker.recordFailure(provider.name);
821
+ logger.warn(`Provider ${provider.name} failed for tool request, trying fallback...`, {
822
+ error,
823
+ });
824
+ continue;
825
+ }
826
+ }
827
+
828
+ throw new Error(
829
+ 'All LLM providers failed for tool request. Check your API keys and network connection, or try a different provider.'
830
+ );
831
+ }
832
+
833
+ /**
834
+ * Execute streaming request with fallback logic.
835
+ *
836
+ * Handles two failure modes:
837
+ * 1. Provider fails before producing any chunks (e.g. auth error, rate limit) --
838
+ * immediately falls through to the next provider.
839
+ * 2. Provider fails mid-stream (partial chunks already buffered) -- discards
840
+ * the partial output and starts fresh with the next provider.
841
+ *
842
+ * Chunks are buffered internally per-provider attempt. Only once a provider
843
+ * completes its full stream successfully are the buffered chunks yielded to
844
+ * the caller. This prevents the caller from receiving a garbled mix of
845
+ * partial responses from multiple providers.
846
+ */
847
+ private async *executeStreamWithFallback(
848
+ primaryProvider: LLMProvider,
849
+ request: CompletionRequest
850
+ ): AsyncIterable<StreamChunk> {
851
+ // Capture `this` for use across yield points
852
+ const self = this as LLMRouter;
853
+
854
+ const fallbackProviders = self.config.fallback.providers
855
+ .map(name => self.providers.get(name))
856
+ .filter(Boolean) as LLMProvider[];
857
+
858
+ const allProviders = [primaryProvider, ...fallbackProviders.filter(p => p !== primaryProvider)];
859
+
860
+ let failedProvider: string | undefined;
861
+
862
+ for (const provider of allProviders) {
863
+ if (!self.circuitBreaker.isAvailable(provider.name)) {
864
+ logger.info(`Skipping ${provider.name} for stream (circuit open)`);
865
+ continue;
866
+ }
867
+
868
+ const bufferedChunks: StreamChunk[] = [];
869
+ let streamCompleted = false;
870
+
871
+ try {
872
+ logger.info(`Attempting stream with ${provider.name}`);
873
+
874
+ for await (const chunk of provider.stream(request)) {
875
+ bufferedChunks.push(chunk);
876
+
877
+ if (chunk.done) {
878
+ streamCompleted = true;
879
+ }
880
+ }
881
+
882
+ // If we got here the stream completed without throwing.
883
+ // Even if there was no explicit done=true chunk we treat
884
+ // exhausting the iterator as success.
885
+ streamCompleted = true;
886
+ self.circuitBreaker.recordSuccess(provider.name);
887
+ } catch (error) {
888
+ self.circuitBreaker.recordFailure(provider.name);
889
+ const partialChunkCount = bufferedChunks.length;
890
+ logger.warn(
891
+ `Provider ${provider.name} failed for stream after ${partialChunkCount} chunk(s), trying fallback...`,
892
+ { error }
893
+ );
894
+ failedProvider = provider.name;
895
+ // Discard buffered chunks from the failed provider and try next
896
+ continue;
897
+ }
898
+
899
+ if (streamCompleted) {
900
+ // Record which provider served the response
901
+ self.lastStreamFallbackMeta = {
902
+ activeProvider: provider.name,
903
+ failedProvider,
904
+ isFallback: !!failedProvider,
905
+ };
906
+
907
+ if (failedProvider) {
908
+ logger.info(
909
+ `Stream fallback: ${failedProvider} -> ${provider.name} (${bufferedChunks.length} chunks)`
910
+ );
911
+ }
912
+
913
+ // Yield all buffered chunks to the caller
914
+ for (const chunk of bufferedChunks) {
915
+ yield chunk;
916
+ }
917
+ return;
918
+ }
919
+ }
920
+
921
+ throw new Error(
922
+ 'All LLM providers failed for streaming request. Check your API keys and network connection, or try a different provider.'
923
+ );
924
+ }
925
+
926
+ /**
927
+ * Compute cost for a response using the cost calculator
928
+ */
929
+ private computeCost(providerName: string, response: LLMResponse): CostResult {
930
+ return calculateCost(
931
+ providerName,
932
+ response.model,
933
+ response.usage.promptTokens,
934
+ response.usage.completionTokens
935
+ );
936
+ }
937
+
938
+ /**
939
+ * Get provider name for a specific model.
940
+ * Uses the detectProvider utility for auto-detection.
941
+ */
942
+ private getProviderForModel(model: string): string {
943
+ const detected = detectProvider(model);
944
+
945
+ // If the detected provider is registered, use it
946
+ if (this.providers.has(detected)) {
947
+ return detected;
948
+ }
949
+
950
+ // For models with "/" prefix that could be OpenRouter
951
+ if (model.includes('/') && this.providers.has('openrouter')) {
952
+ return 'openrouter';
953
+ }
954
+
955
+ return this.config.defaultProvider;
956
+ }
957
+
958
+ /**
959
+ * Get the cheapest available provider
960
+ */
961
+ private getCheapProvider(): LLMProvider | null {
962
+ // Prefer Ollama (free) > Haiku > GPT-4o-mini
963
+ return (
964
+ this.providers.get('ollama') ||
965
+ this.providers.get('anthropic') || // Will use Haiku in practice
966
+ this.providers.get('openai') || // Will use gpt-4o-mini in practice
967
+ null
968
+ );
969
+ }
970
+
971
+ /**
972
+ * Persist token usage to the embedded SQLite state layer (fire-and-forget).
973
+ *
974
+ * Inserts a row into the `usage` table with token counts, cost, and metadata.
975
+ * Failures are logged but never propagated -- persistence is non-critical and
976
+ * must not break the LLM request path.
977
+ */
978
+ persistUsage(
979
+ usage: { promptTokens: number; completionTokens: number; totalTokens: number },
980
+ model?: string,
981
+ provider?: string,
982
+ cost?: CostResult
983
+ ): void {
984
+ try {
985
+ // Lazy import to avoid circular dependency between llm/ and state/
986
+ import('../state/db')
987
+ .then(({ getDb }) => {
988
+ try {
989
+ const db = getDb();
990
+ const id = crypto.randomUUID();
991
+ const metadata = JSON.stringify({
992
+ model: model ?? null,
993
+ provider: provider ?? null,
994
+ prompt_tokens: usage.promptTokens,
995
+ completion_tokens: usage.completionTokens,
996
+ });
997
+
998
+ db.run(
999
+ `INSERT INTO usage (id, type, quantity, unit, cost_usd, metadata)
1000
+ VALUES (?, ?, ?, ?, ?, ?)`,
1001
+ [id, 'llm_call', usage.totalTokens, 'tokens', cost?.costUSD ?? 0, metadata]
1002
+ );
1003
+ } catch (err) {
1004
+ logger.debug('Failed to persist LLM usage to SQLite', { error: err });
1005
+ }
1006
+ })
1007
+ .catch(err => {
1008
+ logger.debug('Failed to import state/db for usage persistence', { error: err });
1009
+ });
1010
+ } catch (err) {
1011
+ logger.debug('Unexpected error in persistUsage', { error: err });
1012
+ }
1013
+ }
1014
+
1015
+ /**
1016
+ * Get the most capable (expensive) provider
1017
+ */
1018
+ private getExpensiveProvider(): LLMProvider | null {
1019
+ // Prefer Claude Opus/Sonnet > GPT-4o > Gemini Pro
1020
+ return (
1021
+ this.providers.get('anthropic') ||
1022
+ this.providers.get('openai') ||
1023
+ this.providers.get('google') ||
1024
+ null
1025
+ );
1026
+ }
1027
+
1028
+ /**
1029
+ * Enforce token budget on a request
1030
+ */
1031
+ private enforceTokenBudget(request: CompletionRequest): void {
1032
+ const maxTokens = this.config.tokenBudget?.maxTokensPerRequest || 32768;
1033
+ request.maxTokens = Math.min(request.maxTokens || 4096, maxTokens);
1034
+ }
1035
+ }