@timmeck/brain-core 2.36.12 → 2.36.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (132) hide show
  1. package/command-center.html +955 -0
  2. package/dist/cross-brain/__tests__/borg-sync-engine.test.d.ts +1 -0
  3. package/dist/cross-brain/__tests__/borg-sync-engine.test.js +240 -0
  4. package/dist/cross-brain/__tests__/borg-sync-engine.test.js.map +1 -0
  5. package/dist/cross-brain/borg-sync-engine.d.ts +62 -0
  6. package/dist/cross-brain/borg-sync-engine.js +215 -0
  7. package/dist/cross-brain/borg-sync-engine.js.map +1 -0
  8. package/dist/cross-brain/borg-types.d.ts +37 -0
  9. package/dist/cross-brain/borg-types.js +9 -0
  10. package/dist/cross-brain/borg-types.js.map +1 -0
  11. package/dist/dashboard/__tests__/command-center-server.test.d.ts +1 -0
  12. package/dist/dashboard/__tests__/command-center-server.test.js +298 -0
  13. package/dist/dashboard/__tests__/command-center-server.test.js.map +1 -0
  14. package/dist/dashboard/command-center-server.d.ts +38 -0
  15. package/dist/dashboard/command-center-server.js +289 -0
  16. package/dist/dashboard/command-center-server.js.map +1 -0
  17. package/dist/embeddings/engine.js +2 -1
  18. package/dist/embeddings/engine.js.map +1 -1
  19. package/dist/index.d.ts +20 -1
  20. package/dist/index.js +15 -0
  21. package/dist/index.js.map +1 -1
  22. package/dist/llm/__tests__/anthropic-provider.test.d.ts +1 -0
  23. package/dist/llm/__tests__/anthropic-provider.test.js +121 -0
  24. package/dist/llm/__tests__/anthropic-provider.test.js.map +1 -0
  25. package/dist/llm/__tests__/llm-service.test.js +181 -40
  26. package/dist/llm/__tests__/llm-service.test.js.map +1 -1
  27. package/dist/llm/__tests__/ollama-embedding.test.d.ts +1 -0
  28. package/dist/llm/__tests__/ollama-embedding.test.js +128 -0
  29. package/dist/llm/__tests__/ollama-embedding.test.js.map +1 -0
  30. package/dist/llm/__tests__/ollama-provider.test.d.ts +1 -0
  31. package/dist/llm/__tests__/ollama-provider.test.js +213 -0
  32. package/dist/llm/__tests__/ollama-provider.test.js.map +1 -0
  33. package/dist/llm/__tests__/provider.test.d.ts +1 -0
  34. package/dist/llm/__tests__/provider.test.js +126 -0
  35. package/dist/llm/__tests__/provider.test.js.map +1 -0
  36. package/dist/llm/anthropic-provider.d.ts +41 -0
  37. package/dist/llm/anthropic-provider.js +86 -0
  38. package/dist/llm/anthropic-provider.js.map +1 -0
  39. package/dist/llm/index.d.ts +9 -1
  40. package/dist/llm/index.js +4 -0
  41. package/dist/llm/index.js.map +1 -1
  42. package/dist/llm/llm-service.d.ts +55 -7
  43. package/dist/llm/llm-service.js +184 -82
  44. package/dist/llm/llm-service.js.map +1 -1
  45. package/dist/llm/ollama-embedding.d.ts +46 -0
  46. package/dist/llm/ollama-embedding.js +93 -0
  47. package/dist/llm/ollama-embedding.js.map +1 -0
  48. package/dist/llm/ollama-provider.d.ts +80 -0
  49. package/dist/llm/ollama-provider.js +178 -0
  50. package/dist/llm/ollama-provider.js.map +1 -0
  51. package/dist/llm/provider.d.ts +120 -0
  52. package/dist/llm/provider.js +104 -0
  53. package/dist/llm/provider.js.map +1 -0
  54. package/dist/missions/mission-engine.d.ts +4 -0
  55. package/dist/missions/mission-engine.js +30 -8
  56. package/dist/missions/mission-engine.js.map +1 -1
  57. package/dist/notifications/__tests__/notification-service.test.d.ts +1 -0
  58. package/dist/notifications/__tests__/notification-service.test.js +176 -0
  59. package/dist/notifications/__tests__/notification-service.test.js.map +1 -0
  60. package/dist/notifications/discord-provider.d.ts +30 -0
  61. package/dist/notifications/discord-provider.js +89 -0
  62. package/dist/notifications/discord-provider.js.map +1 -0
  63. package/dist/notifications/email-provider.d.ts +41 -0
  64. package/dist/notifications/email-provider.js +101 -0
  65. package/dist/notifications/email-provider.js.map +1 -0
  66. package/dist/notifications/index.d.ts +8 -0
  67. package/dist/notifications/index.js +5 -0
  68. package/dist/notifications/index.js.map +1 -0
  69. package/dist/notifications/notification-provider.d.ts +75 -0
  70. package/dist/notifications/notification-provider.js +47 -0
  71. package/dist/notifications/notification-provider.js.map +1 -0
  72. package/dist/notifications/notification-service.d.ts +85 -0
  73. package/dist/notifications/notification-service.js +184 -0
  74. package/dist/notifications/notification-service.js.map +1 -0
  75. package/dist/notifications/telegram-provider.d.ts +30 -0
  76. package/dist/notifications/telegram-provider.js +78 -0
  77. package/dist/notifications/telegram-provider.js.map +1 -0
  78. package/dist/plugin/__tests__/plugin-registry.test.d.ts +1 -0
  79. package/dist/plugin/__tests__/plugin-registry.test.js +166 -0
  80. package/dist/plugin/__tests__/plugin-registry.test.js.map +1 -0
  81. package/dist/plugin/plugin-registry.d.ts +38 -0
  82. package/dist/plugin/plugin-registry.js +185 -0
  83. package/dist/plugin/plugin-registry.js.map +1 -0
  84. package/dist/plugin/types.d.ts +59 -0
  85. package/dist/plugin/types.js +2 -0
  86. package/dist/plugin/types.js.map +1 -0
  87. package/dist/research/adapters/__tests__/web-adapters.test.d.ts +1 -0
  88. package/dist/research/adapters/__tests__/web-adapters.test.js +106 -0
  89. package/dist/research/adapters/__tests__/web-adapters.test.js.map +1 -0
  90. package/dist/research/adapters/firecrawl-adapter.d.ts +57 -0
  91. package/dist/research/adapters/firecrawl-adapter.js +137 -0
  92. package/dist/research/adapters/firecrawl-adapter.js.map +1 -0
  93. package/dist/research/adapters/index.d.ts +3 -0
  94. package/dist/research/adapters/index.js +2 -0
  95. package/dist/research/adapters/index.js.map +1 -1
  96. package/dist/research/adapters/playwright-adapter.d.ts +54 -0
  97. package/dist/research/adapters/playwright-adapter.js +130 -0
  98. package/dist/research/adapters/playwright-adapter.js.map +1 -0
  99. package/dist/research/research-orchestrator.d.ts +3 -0
  100. package/dist/research/research-orchestrator.js +19 -1
  101. package/dist/research/research-orchestrator.js.map +1 -1
  102. package/dist/techradar/__tests__/techradar-engine.test.d.ts +1 -0
  103. package/dist/techradar/__tests__/techradar-engine.test.js +246 -0
  104. package/dist/techradar/__tests__/techradar-engine.test.js.map +1 -0
  105. package/dist/techradar/daily-digest.d.ts +18 -0
  106. package/dist/techradar/daily-digest.js +100 -0
  107. package/dist/techradar/daily-digest.js.map +1 -0
  108. package/dist/techradar/index.d.ts +5 -0
  109. package/dist/techradar/index.js +5 -0
  110. package/dist/techradar/index.js.map +1 -0
  111. package/dist/techradar/relevance-scorer.d.ts +29 -0
  112. package/dist/techradar/relevance-scorer.js +139 -0
  113. package/dist/techradar/relevance-scorer.js.map +1 -0
  114. package/dist/techradar/repo-watcher.d.ts +24 -0
  115. package/dist/techradar/repo-watcher.js +87 -0
  116. package/dist/techradar/repo-watcher.js.map +1 -0
  117. package/dist/techradar/techradar-engine.d.ts +69 -0
  118. package/dist/techradar/techradar-engine.js +382 -0
  119. package/dist/techradar/techradar-engine.js.map +1 -0
  120. package/dist/techradar/types.d.ts +87 -0
  121. package/dist/techradar/types.js +5 -0
  122. package/dist/techradar/types.js.map +1 -0
  123. package/dist/watchdog/__tests__/watchdog-service.test.d.ts +1 -0
  124. package/dist/watchdog/__tests__/watchdog-service.test.js +113 -0
  125. package/dist/watchdog/__tests__/watchdog-service.test.js.map +1 -0
  126. package/dist/watchdog/watchdog-service.d.ts +60 -0
  127. package/dist/watchdog/watchdog-service.js +275 -0
  128. package/dist/watchdog/watchdog-service.js.map +1 -0
  129. package/dist/watchdog/windows-service.d.ts +39 -0
  130. package/dist/watchdog/windows-service.js +179 -0
  131. package/dist/watchdog/windows-service.js.map +1 -0
  132. package/package.json +20 -2
@@ -0,0 +1,41 @@
1
+ /**
2
+ * Anthropic Claude Provider — Cloud LLM
3
+ *
4
+ * Standard-Provider für das Brain Ecosystem.
5
+ * Nutzt die Anthropic Messages API direkt via fetch.
6
+ *
7
+ * Einrichten:
8
+ * In .env: ANTHROPIC_API_KEY=sk-ant-...
9
+ * Oder: new AnthropicProvider({ apiKey: '...' })
10
+ */
11
+ import type { LLMProvider, LLMMessage, LLMCallOptions, LLMProviderResponse } from './provider.js';
12
+ export interface AnthropicProviderConfig {
13
+ /** API key. Falls back to ANTHROPIC_API_KEY env var. */
14
+ apiKey?: string;
15
+ /** Model to use. Default: claude-sonnet-4-20250514 */
16
+ model?: string;
17
+ /** Max tokens per request. Default: 2048 */
18
+ maxTokens?: number;
19
+ /** API base URL. Default: https://api.anthropic.com */
20
+ baseUrl?: string;
21
+ }
22
+ export declare class AnthropicProvider implements LLMProvider {
23
+ readonly name = "anthropic";
24
+ readonly costTier: "expensive";
25
+ readonly capabilities: {
26
+ chat: boolean;
27
+ generate: boolean;
28
+ embed: boolean;
29
+ reasoning: boolean;
30
+ };
31
+ private readonly apiKey;
32
+ private readonly model;
33
+ private readonly maxTokens;
34
+ private readonly baseUrl;
35
+ private readonly log;
36
+ constructor(config?: AnthropicProviderConfig);
37
+ isAvailable(): Promise<boolean>;
38
+ chat(messages: LLMMessage[], options?: LLMCallOptions): Promise<LLMProviderResponse>;
39
+ generate(prompt: string, options?: LLMCallOptions): Promise<string>;
40
+ embed(_text: string): Promise<number[]>;
41
+ }
@@ -0,0 +1,86 @@
1
+ /**
2
+ * Anthropic Claude Provider — Cloud LLM
3
+ *
4
+ * Standard-Provider für das Brain Ecosystem.
5
+ * Nutzt die Anthropic Messages API direkt via fetch.
6
+ *
7
+ * Einrichten:
8
+ * In .env: ANTHROPIC_API_KEY=sk-ant-...
9
+ * Oder: new AnthropicProvider({ apiKey: '...' })
10
+ */
11
+ import { getLogger } from '../utils/logger.js';
12
+ export class AnthropicProvider {
13
+ name = 'anthropic';
14
+ costTier = 'expensive';
15
+ capabilities = {
16
+ chat: true,
17
+ generate: true,
18
+ embed: false,
19
+ reasoning: false,
20
+ };
21
+ apiKey;
22
+ model;
23
+ maxTokens;
24
+ baseUrl;
25
+ log = getLogger();
26
+ constructor(config = {}) {
27
+ this.apiKey = config.apiKey ?? process.env.ANTHROPIC_API_KEY ?? null;
28
+ this.model = config.model ?? 'claude-sonnet-4-20250514';
29
+ this.maxTokens = config.maxTokens ?? 2048;
30
+ this.baseUrl = config.baseUrl ?? 'https://api.anthropic.com';
31
+ }
32
+ async isAvailable() {
33
+ return this.apiKey !== null && this.apiKey.length > 0;
34
+ }
35
+ async chat(messages, options) {
36
+ if (!this.apiKey) {
37
+ throw new Error('AnthropicProvider: No API key configured');
38
+ }
39
+ // Separate system message from conversation
40
+ const systemMessages = messages.filter(m => m.role === 'system');
41
+ const conversationMessages = messages.filter(m => m.role !== 'system');
42
+ const systemPrompt = systemMessages.map(m => m.content).join('\n') || undefined;
43
+ const start = Date.now();
44
+ const response = await fetch(`${this.baseUrl}/v1/messages`, {
45
+ method: 'POST',
46
+ headers: {
47
+ 'Content-Type': 'application/json',
48
+ 'x-api-key': this.apiKey,
49
+ 'anthropic-version': '2023-06-01',
50
+ },
51
+ body: JSON.stringify({
52
+ model: this.model,
53
+ max_tokens: options?.maxTokens ?? this.maxTokens,
54
+ ...(options?.temperature !== undefined ? { temperature: options.temperature } : {}),
55
+ ...(systemPrompt ? { system: systemPrompt } : {}),
56
+ messages: conversationMessages.map(m => ({ role: m.role, content: m.content })),
57
+ }),
58
+ });
59
+ const durationMs = Date.now() - start;
60
+ if (!response.ok) {
61
+ const errText = await response.text();
62
+ throw new Error(`Anthropic API error (${response.status}): ${errText.substring(0, 200)}`);
63
+ }
64
+ const data = await response.json();
65
+ const text = data.content
66
+ ?.filter(c => c.type === 'text')
67
+ .map(c => c.text ?? '')
68
+ .join('\n') ?? '';
69
+ return {
70
+ text,
71
+ inputTokens: data.usage?.input_tokens ?? 0,
72
+ outputTokens: data.usage?.output_tokens ?? 0,
73
+ model: this.model,
74
+ durationMs,
75
+ };
76
+ }
77
+ async generate(prompt, options) {
78
+ const result = await this.chat([{ role: 'user', content: prompt }], options);
79
+ return result.text;
80
+ }
81
+ async embed(_text) {
82
+ // Anthropic doesn't provide embeddings
83
+ return [];
84
+ }
85
+ }
86
+ //# sourceMappingURL=anthropic-provider.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"anthropic-provider.js","sourceRoot":"","sources":["../../src/llm/anthropic-provider.ts"],"names":[],"mappings":"AAAA;;;;;;;;;GASG;AAEH,OAAO,EAAE,SAAS,EAAE,MAAM,oBAAoB,CAAC;AAc/C,MAAM,OAAO,iBAAiB;IACnB,IAAI,GAAG,WAAW,CAAC;IACnB,QAAQ,GAAG,WAAoB,CAAC;IAChC,YAAY,GAAG;QACtB,IAAI,EAAE,IAAI;QACV,QAAQ,EAAE,IAAI;QACd,KAAK,EAAE,KAAK;QACZ,SAAS,EAAE,KAAK;KACjB,CAAC;IAEe,MAAM,CAAgB;IACtB,KAAK,CAAS;IACd,SAAS,CAAS;IAClB,OAAO,CAAS;IAChB,GAAG,GAAG,SAAS,EAAE,CAAC;IAEnC,YAAY,SAAkC,EAAE;QAC9C,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC,MAAM,IAAI,OAAO,CAAC,GAAG,CAAC,iBAAiB,IAAI,IAAI,CAAC;QACrE,IAAI,CAAC,KAAK,GAAG,MAAM,CAAC,KAAK,IAAI,0BAA0B,CAAC;QACxD,IAAI,CAAC,SAAS,GAAG,MAAM,CAAC,SAAS,IAAI,IAAI,CAAC;QAC1C,IAAI,CAAC,OAAO,GAAG,MAAM,CAAC,OAAO,IAAI,2BAA2B,CAAC;IAC/D,CAAC;IAED,KAAK,CAAC,WAAW;QACf,OAAO,IAAI,CAAC,MAAM,KAAK,IAAI,IAAI,IAAI,CAAC,MAAM,CAAC,MAAM,GAAG,CAAC,CAAC;IACxD,CAAC;IAED,KAAK,CAAC,IAAI,CAAC,QAAsB,EAAE,OAAwB;QACzD,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC;YACjB,MAAM,IAAI,KAAK,CAAC,0CAA0C,CAAC,CAAC;QAC9D,CAAC;QAED,4CAA4C;QAC5C,MAAM,cAAc,GAAG,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,QAAQ,CAAC,CAAC;QACjE,MAAM,oBAAoB,GAAG,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,QAAQ,CAAC,CAAC;QACvE,MAAM,YAAY,GAAG,cAAc,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,SAAS,CAAC;QAEhF,MAAM,KAAK,GAAG,IAAI,CAAC,GAAG,EAAE,CAAC;QAEzB,MAAM,QAAQ,GAAG,MAAM,KAAK,CAAC,GAAG,IAAI,CAAC,OAAO,cAAc,EAAE;YAC1D,MAAM,EAAE,MAAM;YACd,OAAO,EAAE;gBACP,cAAc,EAAE,kBAAkB;gBAClC,WAAW,EAAE,IAAI,CAAC,MAAM;gBACxB,mBAAmB,EAAE,YAAY;aAClC;YACD,IAAI,EAAE,IAAI,CAAC,SAAS,CAAC;gBACnB,KAAK,EAAE,IAAI,CAAC,KAAK;gBACjB,UAAU,EAAE,OAAO,EAAE,SAAS,IAAI,IAAI,CAAC,SAAS;gBAChD,GAAG,CAAC,OAAO,EAAE,WAAW,KAAK,SAAS,CAAC,CAAC,CAAC,EAAE,WAAW,EAAE,OAAO,CAAC,WAAW,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;gBACnF,GAAG,CAAC,YAAY,CAAC,CAAC,CAAC,EAAE,MAAM,EAAE,YAAY,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;gBACjD,QAAQ,EAAE,oBAAoB,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,IAAI,EAAE,CAAC,CAAC,IAAI,EAAE,OAAO,EAAE,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC;aAChF,CAAC;SACH,CAAC,CAAC;QAEH,MAAM,UAAU,GAAG,IAAI,CAAC,GAAG,EAAE,GAAG,KAAK,CAAC;QAEtC,IAAI,CAAC,QAAQ,CAAC,EAAE,EAAE,CAAC;YACjB,MAAM,OAAO,GAAG,MAAM,QAAQ,CAAC,IAAI,EAAE,CAAC;YACtC,MAAM,IAAI,KAAK,CAAC,wBAAwB,QAAQ,CAAC,MAAM,MAAM,OAAO,CAAC,SAAS,CAAC,CAAC,EAAE,GAAG,CAAC,EAAE,CAAC,CAAC;QAC5F,CAAC;QAED,MAAM,IAAI,GAAG,MAAM,QAAQ,CAAC,IAAI,EAG/B,CAAC;QAEF,MAAM,IAAI,GAAG,IAAI,CAAC,OAAO;YACvB,EAAE,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,MAAM,CAAC;aAC/B,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,IAAI,EAAE,CAAC;aACtB,IAAI,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC;QAEpB,OAAO;YACL,IAAI;YACJ,WAAW,EAAE,IAAI,CAAC,KAAK,EAAE,YAAY,IAAI,CAAC;YAC1C,YAAY,EAAE,IAAI,CAAC,KAAK,EAAE,aAAa,IAAI,CAAC;YAC5C,KAAK,EAAE,IAAI,CAAC,KAAK;YACjB,UAAU;SACX,CAAC;IACJ,CAAC;IAED,KAAK,CAAC,QAAQ,CAAC,MAAc,EAAE,OAAwB;QACrD,MAAM,MAAM,GAAG,MAAM,IAAI,CAAC,IAAI,CAC5B,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,EACnC,OAAO,CACR,CAAC;QACF,OAAO,MAAM,CAAC,IAAI,CAAC;IACrB,CAAC;IAED,KAAK,CAAC,KAAK,CAAC,KAAa;QACvB,uCAAuC;QACvC,OAAO,EAAE,CAAC;IACZ,CAAC;CACF"}
@@ -1,2 +1,10 @@
1
1
  export { LLMService, runLLMServiceMigration } from './llm-service.js';
2
- export type { LLMServiceConfig, LLMResponse, LLMUsageStats, PromptTemplate, } from './llm-service.js';
2
+ export type { LLMServiceConfig, LLMResponse, LLMUsageStats, PromptTemplate, ProviderInfo, } from './llm-service.js';
3
+ export type { LLMProvider, LLMMessage, LLMCallOptions, LLMProviderResponse, RoutingTier, } from './provider.js';
4
+ export { TaskRouter } from './provider.js';
5
+ export { AnthropicProvider } from './anthropic-provider.js';
6
+ export type { AnthropicProviderConfig } from './anthropic-provider.js';
7
+ export { OllamaProvider } from './ollama-provider.js';
8
+ export type { OllamaProviderConfig, OllamaStatus, OllamaModelInfo, OllamaRunningModel } from './ollama-provider.js';
9
+ export { OllamaEmbeddingProvider } from './ollama-embedding.js';
10
+ export type { OllamaEmbeddingConfig } from './ollama-embedding.js';
package/dist/llm/index.js CHANGED
@@ -1,2 +1,6 @@
1
1
  export { LLMService, runLLMServiceMigration } from './llm-service.js';
2
+ export { TaskRouter } from './provider.js';
3
+ export { AnthropicProvider } from './anthropic-provider.js';
4
+ export { OllamaProvider } from './ollama-provider.js';
5
+ export { OllamaEmbeddingProvider } from './ollama-embedding.js';
2
6
  //# sourceMappingURL=index.js.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/llm/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,UAAU,EAAE,sBAAsB,EAAE,MAAM,kBAAkB,CAAC"}
1
+ {"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/llm/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,UAAU,EAAE,sBAAsB,EAAE,MAAM,kBAAkB,CAAC;AAQtE,OAAO,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;AAE3C,OAAO,EAAE,iBAAiB,EAAE,MAAM,yBAAyB,CAAC;AAG5D,OAAO,EAAE,cAAc,EAAE,MAAM,sBAAsB,CAAC;AAGtD,OAAO,EAAE,uBAAuB,EAAE,MAAM,uBAAuB,CAAC"}
@@ -1,4 +1,6 @@
1
1
  import type Database from 'better-sqlite3';
2
+ import type { LLMProvider } from './provider.js';
3
+ import { TaskRouter } from './provider.js';
2
4
  export interface LLMServiceConfig {
3
5
  /** Anthropic API key. Falls back to ANTHROPIC_API_KEY env var. */
4
6
  apiKey?: string;
@@ -6,16 +8,18 @@ export interface LLMServiceConfig {
6
8
  model?: string;
7
9
  /** Max tokens per request. Default: 2048 */
8
10
  maxTokens?: number;
9
- /** Max API calls per hour. Default: 30 */
11
+ /** Max API calls per hour (for paid providers). Default: 30 */
10
12
  maxCallsPerHour?: number;
11
- /** Max tokens per hour budget. Default: 100_000 */
13
+ /** Max tokens per hour budget (for paid providers). Default: 100_000 */
12
14
  tokenBudgetPerHour?: number;
13
- /** Max tokens per day budget. Default: 500_000 */
15
+ /** Max tokens per day budget (for paid providers). Default: 500_000 */
14
16
  tokenBudgetPerDay?: number;
15
17
  /** Cache TTL in ms. Default: 3_600_000 (1 hour) */
16
18
  cacheTtlMs?: number;
17
19
  /** Max cache entries. Default: 500 */
18
20
  maxCacheEntries?: number;
21
+ /** Prefer local providers for simple tasks. Default: true */
22
+ preferLocal?: boolean;
19
23
  }
20
24
  export interface LLMResponse {
21
25
  text: string;
@@ -25,6 +29,8 @@ export interface LLMResponse {
25
29
  cached: boolean;
26
30
  model: string;
27
31
  durationMs: number;
32
+ /** Which provider handled this request */
33
+ provider: string;
28
34
  }
29
35
  export interface LLMUsageStats {
30
36
  totalCalls: number;
@@ -42,12 +48,24 @@ export interface LLMUsageStats {
42
48
  errors: number;
43
49
  lastCallAt: number | null;
44
50
  model: string;
51
+ /** Active providers and their status */
52
+ providers: ProviderInfo[];
53
+ }
54
+ export interface ProviderInfo {
55
+ name: string;
56
+ available: boolean;
57
+ costTier: 'free' | 'cheap' | 'expensive';
58
+ capabilities: {
59
+ chat: boolean;
60
+ generate: boolean;
61
+ embed: boolean;
62
+ reasoning: boolean;
63
+ };
45
64
  }
46
65
  export type PromptTemplate = 'explain' | 'ask' | 'synthesize_debate' | 'creative_hypothesis' | 'research_question' | 'summarize' | 'analyze_contradiction' | 'custom';
47
66
  export declare function runLLMServiceMigration(db: Database.Database): void;
48
67
  export declare class LLMService {
49
68
  private db;
50
- private readonly apiKey;
51
69
  private readonly model;
52
70
  private readonly maxTokens;
53
71
  private readonly maxCallsPerHour;
@@ -56,23 +74,49 @@ export declare class LLMService {
56
74
  private readonly cacheTtlMs;
57
75
  private readonly maxCacheEntries;
58
76
  private readonly log;
77
+ private readonly router;
78
+ private providers;
59
79
  private cache;
60
80
  private callHistory;
61
81
  private stats;
62
82
  private stmtInsertUsage;
63
83
  constructor(db: Database.Database, config?: LLMServiceConfig);
64
- /** Check if LLM is available (API key set). */
84
+ /**
85
+ * Register an additional LLM provider.
86
+ *
87
+ * Example:
88
+ * ```typescript
89
+ * import { OllamaProvider } from '@timmeck/brain-core';
90
+ * llmService.registerProvider(new OllamaProvider());
91
+ * ```
92
+ */
93
+ registerProvider(provider: LLMProvider): void;
94
+ /** Remove a provider by name. */
95
+ removeProvider(name: string): void;
96
+ /** Get all registered providers. */
97
+ getProviders(): LLMProvider[];
98
+ /** Get the task router (for MCP tools / debugging). */
99
+ getRouter(): TaskRouter;
100
+ /** Check if any provider with chat capability is available. */
65
101
  isAvailable(): boolean;
66
102
  /**
67
- * Main entry point: call Claude with a template + context.
68
- * Returns null if budget exhausted or API key not set (caller should fallback to heuristic).
103
+ * Main entry point: call an LLM with a template + context.
104
+ * Returns null if budget exhausted or no provider available (caller should fallback to heuristic).
105
+ *
106
+ * The TaskRouter selects the best provider based on template complexity:
107
+ * - Simple tasks (summarize) → local provider first (free)
108
+ * - Complex tasks (debate, hypothesis) → cloud provider (quality)
109
+ * - Fallback chain: if preferred provider fails → try next
69
110
  */
70
111
  call(template: PromptTemplate, userMessage: string, options?: {
71
112
  maxTokens?: number;
72
113
  temperature?: number;
114
+ provider?: string;
73
115
  }): Promise<LLMResponse | null>;
74
116
  /** Get usage statistics. */
75
117
  getStats(): LLMUsageStats;
118
+ /** Get provider info with async availability checks. */
119
+ getProviderStatus(): Promise<ProviderInfo[]>;
76
120
  /** Get usage history from DB (for dashboard). */
77
121
  getUsageHistory(hours?: number): Array<{
78
122
  hour: string;
@@ -87,6 +131,10 @@ export declare class LLMService {
87
131
  tokens: number;
88
132
  avg_tokens: number;
89
133
  }>;
134
+ /** Gracefully shutdown all providers. */
135
+ shutdown(): Promise<void>;
136
+ /** Get sorted provider chain for a template, with availability pre-checked. */
137
+ private getProviderChain;
90
138
  private checkRateLimit;
91
139
  private checkTokenBudget;
92
140
  private getCacheKey;
@@ -1,5 +1,7 @@
1
1
  import { createHash } from 'node:crypto';
2
2
  import { getLogger } from '../utils/logger.js';
3
+ import { TaskRouter } from './provider.js';
4
+ import { AnthropicProvider } from './anthropic-provider.js';
3
5
  // ── Migration ───────────────────────────────────────────
4
6
  export function runLLMServiceMigration(db) {
5
7
  db.exec(`
@@ -18,6 +20,13 @@ export function runLLMServiceMigration(db) {
18
20
  CREATE INDEX IF NOT EXISTS idx_llm_usage_created ON llm_usage(created_at);
19
21
  CREATE INDEX IF NOT EXISTS idx_llm_usage_template ON llm_usage(template);
20
22
  `);
23
+ // Add provider column if not exists (migration for existing DBs)
24
+ try {
25
+ db.exec(`ALTER TABLE llm_usage ADD COLUMN provider TEXT NOT NULL DEFAULT 'anthropic'`);
26
+ }
27
+ catch {
28
+ // Column already exists — ignore
29
+ }
21
30
  }
22
31
  // ── Prompt Templates ────────────────────────────────────
23
32
  const SYSTEM_PROMPTS = {
@@ -65,7 +74,6 @@ Follow the user's instructions precisely.`,
65
74
  // ── Service ─────────────────────────────────────────────
66
75
  export class LLMService {
67
76
  db;
68
- apiKey;
69
77
  model;
70
78
  maxTokens;
71
79
  maxCallsPerHour;
@@ -74,6 +82,9 @@ export class LLMService {
74
82
  cacheTtlMs;
75
83
  maxCacheEntries;
76
84
  log = getLogger();
85
+ router;
86
+ // Providers
87
+ providers = [];
77
88
  // In-memory state
78
89
  cache = new Map();
79
90
  callHistory = [];
@@ -91,7 +102,6 @@ export class LLMService {
91
102
  stmtInsertUsage = null;
92
103
  constructor(db, config = {}) {
93
104
  this.db = db;
94
- this.apiKey = config.apiKey ?? process.env.ANTHROPIC_API_KEY ?? null;
95
105
  this.model = config.model ?? 'claude-sonnet-4-20250514';
96
106
  this.maxTokens = config.maxTokens ?? 2048;
97
107
  this.maxCallsPerHour = config.maxCallsPerHour ?? 30;
@@ -99,34 +109,72 @@ export class LLMService {
99
109
  this.tokenBudgetPerDay = config.tokenBudgetPerDay ?? 500_000;
100
110
  this.cacheTtlMs = config.cacheTtlMs ?? 3_600_000;
101
111
  this.maxCacheEntries = config.maxCacheEntries ?? 500;
112
+ this.router = new TaskRouter(config.preferLocal ?? true);
102
113
  runLLMServiceMigration(db);
103
- this.stmtInsertUsage = db.prepare('INSERT INTO llm_usage (prompt_hash, template, model, input_tokens, output_tokens, total_tokens, duration_ms, cached) VALUES (?, ?, ?, ?, ?, ?, ?, ?)');
104
- this.log.debug(`[LLMService] Initialized (model=${this.model}, apiKey=${this.apiKey ? 'set' : 'NOT SET'}, budget=${this.tokenBudgetPerHour}/h)`);
114
+ this.stmtInsertUsage = db.prepare('INSERT INTO llm_usage (prompt_hash, template, model, input_tokens, output_tokens, total_tokens, duration_ms, cached, provider) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)');
115
+ // Auto-register Anthropic provider from config
116
+ const anthropicConfig = {
117
+ apiKey: config.apiKey,
118
+ model: config.model,
119
+ maxTokens: config.maxTokens,
120
+ };
121
+ const anthropic = new AnthropicProvider(anthropicConfig);
122
+ this.providers.push(anthropic);
123
+ this.log.debug(`[LLMService] Initialized (model=${this.model}, providers=[anthropic], preferLocal=${config.preferLocal ?? true})`);
124
+ }
125
+ /**
126
+ * Register an additional LLM provider.
127
+ *
128
+ * Example:
129
+ * ```typescript
130
+ * import { OllamaProvider } from '@timmeck/brain-core';
131
+ * llmService.registerProvider(new OllamaProvider());
132
+ * ```
133
+ */
134
+ registerProvider(provider) {
135
+ // Don't register duplicates
136
+ if (this.providers.some(p => p.name === provider.name)) {
137
+ this.log.debug(`[LLMService] Provider '${provider.name}' already registered, skipping`);
138
+ return;
139
+ }
140
+ this.providers.push(provider);
141
+ this.log.debug(`[LLMService] Registered provider: ${provider.name} (${provider.costTier})`);
142
+ }
143
+ /** Remove a provider by name. */
144
+ removeProvider(name) {
145
+ this.providers = this.providers.filter(p => p.name !== name);
146
+ }
147
+ /** Get all registered providers. */
148
+ getProviders() {
149
+ return [...this.providers];
105
150
  }
106
- /** Check if LLM is available (API key set). */
151
+ /** Get the task router (for MCP tools / debugging). */
152
+ getRouter() {
153
+ return this.router;
154
+ }
155
+ /** Check if any provider with chat capability is available. */
107
156
  isAvailable() {
108
- return this.apiKey !== null && this.apiKey.length > 0;
157
+ // Synchronous check: at least one provider could potentially be available
158
+ // For Anthropic: API key set. For Ollama: we assume yes (async check done at call time).
159
+ return this.providers.some(p => {
160
+ if (p.name === 'anthropic') {
161
+ // Quick sync check for anthropic
162
+ return p.capabilities.chat;
163
+ }
164
+ return p.capabilities.chat;
165
+ });
109
166
  }
110
167
  /**
111
- * Main entry point: call Claude with a template + context.
112
- * Returns null if budget exhausted or API key not set (caller should fallback to heuristic).
168
+ * Main entry point: call an LLM with a template + context.
169
+ * Returns null if budget exhausted or no provider available (caller should fallback to heuristic).
170
+ *
171
+ * The TaskRouter selects the best provider based on template complexity:
172
+ * - Simple tasks (summarize) → local provider first (free)
173
+ * - Complex tasks (debate, hypothesis) → cloud provider (quality)
174
+ * - Fallback chain: if preferred provider fails → try next
113
175
  */
114
176
  async call(template, userMessage, options) {
115
- if (!this.isAvailable())
116
- return null;
117
- // Check rate limit
118
- if (!this.checkRateLimit()) {
119
- this.stats.rateLimitHits++;
120
- this.log.debug('[LLMService] Rate limit reached, falling back to heuristic');
121
- return null;
122
- }
123
- // Check token budget
124
- if (!this.checkTokenBudget()) {
125
- this.stats.rateLimitHits++;
126
- this.log.debug('[LLMService] Token budget exhausted, falling back to heuristic');
127
- return null;
128
- }
129
- // Check cache
177
+ // Check cache first (provider-agnostic)
130
178
  const cacheKey = this.getCacheKey(template, userMessage);
131
179
  const cached = this.getFromCache(cacheKey);
132
180
  if (cached) {
@@ -135,67 +183,71 @@ export class LLMService {
135
183
  return { ...cached, cached: true };
136
184
  }
137
185
  this.stats.cacheMisses++;
138
- // Make API call
186
+ // Build provider chain (priority order)
187
+ const providerChain = await this.getProviderChain(template, options?.provider);
188
+ if (providerChain.length === 0) {
189
+ this.log.debug('[LLMService] No available providers');
190
+ return null;
191
+ }
192
+ // Check rate limit + budget (only for paid providers)
139
193
  const systemPrompt = SYSTEM_PROMPTS[template];
140
- const start = Date.now();
141
- try {
142
- const response = await fetch('https://api.anthropic.com/v1/messages', {
143
- method: 'POST',
144
- headers: {
145
- 'Content-Type': 'application/json',
146
- 'x-api-key': this.apiKey,
147
- 'anthropic-version': '2023-06-01',
148
- },
149
- body: JSON.stringify({
150
- model: this.model,
151
- max_tokens: options?.maxTokens ?? this.maxTokens,
152
- ...(options?.temperature !== undefined ? { temperature: options.temperature } : {}),
153
- system: systemPrompt,
154
- messages: [{ role: 'user', content: userMessage }],
155
- }),
156
- });
157
- const durationMs = Date.now() - start;
158
- if (!response.ok) {
159
- const errText = await response.text();
194
+ const messages = [
195
+ { role: 'system', content: systemPrompt },
196
+ { role: 'user', content: userMessage },
197
+ ];
198
+ // Try each provider in order
199
+ for (const provider of providerChain) {
200
+ // Rate limit only applies to paid providers
201
+ if (provider.costTier !== 'free') {
202
+ if (!this.checkRateLimit()) {
203
+ this.stats.rateLimitHits++;
204
+ this.log.debug(`[LLMService] Rate limit reached for ${provider.name}, trying next`);
205
+ continue;
206
+ }
207
+ if (!this.checkTokenBudget()) {
208
+ this.stats.rateLimitHits++;
209
+ this.log.debug(`[LLMService] Token budget exhausted for ${provider.name}, trying next`);
210
+ continue;
211
+ }
212
+ }
213
+ try {
214
+ const providerResponse = await provider.chat(messages, {
215
+ maxTokens: options?.maxTokens ?? this.maxTokens,
216
+ temperature: options?.temperature,
217
+ });
218
+ const totalTokens = providerResponse.inputTokens + providerResponse.outputTokens;
219
+ const result = {
220
+ text: providerResponse.text,
221
+ tokensUsed: totalTokens,
222
+ inputTokens: providerResponse.inputTokens,
223
+ outputTokens: providerResponse.outputTokens,
224
+ cached: false,
225
+ model: providerResponse.model,
226
+ durationMs: providerResponse.durationMs,
227
+ provider: provider.name,
228
+ };
229
+ // Update stats
230
+ this.stats.totalCalls++;
231
+ this.stats.totalTokens += totalTokens;
232
+ this.stats.totalLatencyMs += providerResponse.durationMs;
233
+ this.stats.lastCallAt = Date.now();
234
+ this.callHistory.push({ timestamp: Date.now(), tokens: totalTokens, provider: provider.name });
235
+ // Cache the response
236
+ this.setCache(cacheKey, result);
237
+ // Record to DB
238
+ this.recordUsage(cacheKey, template, result, false);
239
+ this.log.debug(`[LLMService] ${template} via ${provider.name}: ${totalTokens} tokens, ${providerResponse.durationMs}ms`);
240
+ return result;
241
+ }
242
+ catch (err) {
160
243
  this.stats.errors++;
161
- this.log.warn(`[LLMService] API error (${response.status}): ${errText.substring(0, 200)}`);
162
- return null;
244
+ this.log.warn(`[LLMService] Provider '${provider.name}' failed: ${err.message}, trying next`);
245
+ continue;
163
246
  }
164
- const data = await response.json();
165
- const text = data.content
166
- ?.filter(c => c.type === 'text')
167
- .map(c => c.text ?? '')
168
- .join('\n') ?? '';
169
- const inputTokens = data.usage?.input_tokens ?? 0;
170
- const outputTokens = data.usage?.output_tokens ?? 0;
171
- const totalTokens = inputTokens + outputTokens;
172
- const result = {
173
- text,
174
- tokensUsed: totalTokens,
175
- inputTokens,
176
- outputTokens,
177
- cached: false,
178
- model: this.model,
179
- durationMs,
180
- };
181
- // Update stats
182
- this.stats.totalCalls++;
183
- this.stats.totalTokens += totalTokens;
184
- this.stats.totalLatencyMs += durationMs;
185
- this.stats.lastCallAt = Date.now();
186
- this.callHistory.push({ timestamp: Date.now(), tokens: totalTokens });
187
- // Cache the response
188
- this.setCache(cacheKey, result);
189
- // Record to DB
190
- this.recordUsage(cacheKey, template, result, false);
191
- this.log.debug(`[LLMService] ${template}: ${totalTokens} tokens, ${durationMs}ms`);
192
- return result;
193
- }
194
- catch (err) {
195
- this.stats.errors++;
196
- this.log.warn(`[LLMService] Call failed: ${err.message}`);
197
- return null;
198
247
  }
248
+ // All providers failed
249
+ this.log.warn('[LLMService] All providers failed, returning null');
250
+ return null;
199
251
  }
200
252
  /** Get usage statistics. */
201
253
  getStats() {
@@ -206,6 +258,13 @@ export class LLMService {
206
258
  const callsThisHour = this.callHistory.filter(c => c.timestamp > oneHourAgo).length;
207
259
  const tokensThisHour = this.callHistory.filter(c => c.timestamp > oneHourAgo).reduce((s, c) => s + c.tokens, 0);
208
260
  const tokensToday = this.callHistory.filter(c => c.timestamp > oneDayAgo).reduce((s, c) => s + c.tokens, 0);
261
+ // Build provider info (sync — use cached availability)
262
+ const providers = this.providers.map(p => ({
263
+ name: p.name,
264
+ available: p.name === 'anthropic' ? (p.capabilities.chat) : true, // best effort sync
265
+ costTier: p.costTier,
266
+ capabilities: { ...p.capabilities },
267
+ }));
209
268
  return {
210
269
  totalCalls: this.stats.totalCalls,
211
270
  totalTokens: this.stats.totalTokens,
@@ -224,8 +283,18 @@ export class LLMService {
224
283
  errors: this.stats.errors,
225
284
  lastCallAt: this.stats.lastCallAt,
226
285
  model: this.model,
286
+ providers,
227
287
  };
228
288
  }
289
+ /** Get provider info with async availability checks. */
290
+ async getProviderStatus() {
291
+ return Promise.all(this.providers.map(async (p) => ({
292
+ name: p.name,
293
+ available: await p.isAvailable(),
294
+ costTier: p.costTier,
295
+ capabilities: { ...p.capabilities },
296
+ })));
297
+ }
229
298
  /** Get usage history from DB (for dashboard). */
230
299
  getUsageHistory(hours = 24) {
231
300
  try {
@@ -262,10 +331,43 @@ export class LLMService {
262
331
  return [];
263
332
  }
264
333
  }
334
+ /** Gracefully shutdown all providers. */
335
+ async shutdown() {
336
+ for (const provider of this.providers) {
337
+ if (provider.shutdown) {
338
+ try {
339
+ await provider.shutdown();
340
+ }
341
+ catch {
342
+ // Best effort
343
+ }
344
+ }
345
+ }
346
+ }
265
347
  // ── Private Helpers ────────────────────────────────────
348
+ /** Get sorted provider chain for a template, with availability pre-checked. */
349
+ async getProviderChain(template, forcedProvider) {
350
+ if (forcedProvider) {
351
+ const p = this.providers.find(p => p.name === forcedProvider);
352
+ if (p && await p.isAvailable())
353
+ return [p];
354
+ return [];
355
+ }
356
+ // Get routing preference
357
+ const sorted = this.router.route(template, this.providers);
358
+ // Filter to available providers
359
+ const available = [];
360
+ for (const p of sorted) {
361
+ if (await p.isAvailable()) {
362
+ available.push(p);
363
+ }
364
+ }
365
+ return available;
366
+ }
266
367
  checkRateLimit() {
267
368
  const oneHourAgo = Date.now() - 3_600_000;
268
369
  this.pruneCallHistory();
370
+ // Only count paid provider calls for rate limiting
269
371
  const recentCalls = this.callHistory.filter(c => c.timestamp > oneHourAgo).length;
270
372
  return recentCalls < this.maxCallsPerHour;
271
373
  }
@@ -282,7 +384,7 @@ export class LLMService {
282
384
  return true;
283
385
  }
284
386
  getCacheKey(template, userMessage) {
285
- const input = `${template}:${this.model}:${userMessage}`;
387
+ const input = `${template}:${userMessage}`;
286
388
  return createHash('sha256').update(input).digest('hex');
287
389
  }
288
390
  getFromCache(key) {
@@ -313,7 +415,7 @@ export class LLMService {
313
415
  }
314
416
  recordUsage(hash, template, response, cached) {
315
417
  try {
316
- this.stmtInsertUsage?.run(hash, template, response.model, response.inputTokens, response.outputTokens, response.tokensUsed, response.durationMs, cached ? 1 : 0);
418
+ this.stmtInsertUsage?.run(hash, template, response.model, response.inputTokens, response.outputTokens, response.tokensUsed, response.durationMs, cached ? 1 : 0, response.provider ?? 'anthropic');
317
419
  }
318
420
  catch {
319
421
  // Best effort — don't crash on DB error