web-agent-bridge 3.2.0 → 3.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (256) hide show
  1. package/LICENSE +84 -72
  2. package/README.ar.md +1304 -1152
  3. package/README.md +298 -1635
  4. package/bin/agent-runner.js +474 -474
  5. package/bin/cli.js +237 -138
  6. package/bin/wab-init.js +223 -0
  7. package/bin/wab.js +80 -80
  8. package/examples/azure-dns-wab.js +83 -0
  9. package/examples/bidi-agent.js +119 -119
  10. package/examples/cloudflare-wab-dns.js +121 -0
  11. package/examples/cpanel-wab-dns.js +114 -0
  12. package/examples/cross-site-agent.js +91 -91
  13. package/examples/dns-discovery-agent.js +166 -0
  14. package/examples/gcp-dns-wab.js +76 -0
  15. package/examples/governance-agent.js +169 -0
  16. package/examples/mcp-agent.js +94 -94
  17. package/examples/next-app-router/README.md +44 -44
  18. package/examples/plesk-wab-dns.js +103 -0
  19. package/examples/puppeteer-agent.js +108 -108
  20. package/examples/route53-wab-dns.js +144 -0
  21. package/examples/saas-dashboard/README.md +55 -55
  22. package/examples/safe-mode-agent.js +96 -0
  23. package/examples/shopify-hydrogen/README.md +74 -74
  24. package/examples/vision-agent.js +171 -171
  25. package/examples/wab-sign.js +74 -0
  26. package/examples/wab-verify.js +60 -0
  27. package/examples/wordpress-elementor/README.md +77 -77
  28. package/package.json +19 -6
  29. package/public/.well-known/agent-tools.json +180 -180
  30. package/public/.well-known/ai-assets.json +59 -59
  31. package/public/.well-known/security.txt +8 -0
  32. package/public/.well-known/wab.json +28 -0
  33. package/public/activate.html +368 -0
  34. package/public/adoption-metrics.html +188 -0
  35. package/public/agent-workspace.html +349 -349
  36. package/public/ai.html +198 -198
  37. package/public/api.html +413 -412
  38. package/public/azure-dns-integration.html +289 -0
  39. package/public/browser.html +486 -486
  40. package/public/cloudflare-integration.html +380 -0
  41. package/public/commander-dashboard.html +243 -243
  42. package/public/cookies.html +210 -210
  43. package/public/cpanel-integration.html +398 -0
  44. package/public/css/agent-workspace.css +1713 -1713
  45. package/public/css/premium.css +317 -317
  46. package/public/css/styles.css +1263 -1235
  47. package/public/dashboard.html +707 -706
  48. package/public/dns.html +436 -0
  49. package/public/docs.html +588 -587
  50. package/public/feed.xml +89 -89
  51. package/public/gcp-dns-integration.html +318 -0
  52. package/public/growth.html +465 -463
  53. package/public/index.html +1266 -982
  54. package/public/integrations.html +556 -0
  55. package/public/js/activate.js +145 -0
  56. package/public/js/agent-workspace.js +1740 -1740
  57. package/public/js/auth-nav.js +65 -31
  58. package/public/js/auth-redirect.js +12 -12
  59. package/public/js/cookie-consent.js +56 -56
  60. package/public/js/dns.js +438 -0
  61. package/public/js/wab-demo-page.js +721 -721
  62. package/public/js/ws-client.js +74 -74
  63. package/public/llms-full.txt +360 -360
  64. package/public/llms.txt +125 -125
  65. package/public/login.html +85 -85
  66. package/public/mesh-dashboard.html +328 -328
  67. package/public/openapi.json +669 -580
  68. package/public/phone-shield.html +281 -0
  69. package/public/plesk-integration.html +375 -0
  70. package/public/premium-dashboard.html +2489 -2489
  71. package/public/premium.html +793 -793
  72. package/public/privacy.html +297 -297
  73. package/public/provider-onboarding.html +172 -0
  74. package/public/provider-sandbox.html +134 -0
  75. package/public/providers.html +359 -0
  76. package/public/register.html +105 -105
  77. package/public/registrar-integrations.html +141 -0
  78. package/public/robots.txt +99 -87
  79. package/public/route53-integration.html +531 -0
  80. package/public/script/wab-consent.d.ts +36 -36
  81. package/public/script/wab-consent.js +104 -104
  82. package/public/script/wab-schema.js +131 -131
  83. package/public/script/wab.d.ts +108 -108
  84. package/public/script/wab.min.js +580 -580
  85. package/public/security.txt +8 -0
  86. package/public/shieldqr.html +231 -0
  87. package/public/sitemap.xml +6 -0
  88. package/public/terms.html +256 -256
  89. package/public/wab-trust.html +200 -0
  90. package/public/wab-vs-protocols.html +210 -0
  91. package/public/whitepaper.html +449 -0
  92. package/script/ai-agent-bridge.js +1754 -1754
  93. package/sdk/README.md +99 -99
  94. package/sdk/agent-mesh.js +449 -449
  95. package/sdk/auto-discovery.js +288 -0
  96. package/sdk/commander.js +262 -262
  97. package/sdk/governance.js +262 -0
  98. package/sdk/index.d.ts +464 -464
  99. package/sdk/index.js +25 -1
  100. package/sdk/multi-agent.js +318 -318
  101. package/sdk/package.json +2 -2
  102. package/sdk/safe-mode.js +221 -0
  103. package/sdk/safety-shield.js +219 -0
  104. package/sdk/schema-discovery.js +83 -83
  105. package/server/adapters/index.js +520 -520
  106. package/server/config/plans.js +367 -367
  107. package/server/config/secrets.js +102 -102
  108. package/server/control-plane/index.js +301 -301
  109. package/server/data-plane/index.js +354 -354
  110. package/server/index.js +670 -427
  111. package/server/llm/index.js +404 -404
  112. package/server/middleware/adminAuth.js +35 -35
  113. package/server/middleware/auth.js +50 -50
  114. package/server/middleware/featureGate.js +88 -88
  115. package/server/middleware/rateLimits.js +100 -100
  116. package/server/middleware/sensitiveAction.js +157 -0
  117. package/server/migrations/001_add_analytics_indexes.sql +7 -7
  118. package/server/migrations/002_premium_features.sql +418 -418
  119. package/server/migrations/003_ads_integer_cents.sql +33 -33
  120. package/server/migrations/004_agent_os.sql +158 -158
  121. package/server/migrations/005_marketplace_metering.sql +126 -126
  122. package/server/migrations/007_governance.sql +106 -0
  123. package/server/migrations/008_plans.sql +144 -0
  124. package/server/migrations/009_shieldqr.sql +30 -0
  125. package/server/migrations/010_extended_trust.sql +33 -0
  126. package/server/models/adapters/index.js +33 -33
  127. package/server/models/adapters/mysql.js +183 -183
  128. package/server/models/adapters/postgresql.js +172 -172
  129. package/server/models/adapters/sqlite.js +7 -7
  130. package/server/models/db.js +740 -681
  131. package/server/observability/failure-analysis.js +337 -337
  132. package/server/observability/index.js +394 -394
  133. package/server/protocol/capabilities.js +223 -223
  134. package/server/protocol/index.js +243 -243
  135. package/server/protocol/schema.js +584 -584
  136. package/server/registry/certification.js +271 -271
  137. package/server/registry/index.js +326 -326
  138. package/server/routes/admin-plans.js +76 -0
  139. package/server/routes/admin-premium.js +673 -671
  140. package/server/routes/admin-shieldqr.js +90 -0
  141. package/server/routes/admin-trust-monitor.js +83 -0
  142. package/server/routes/admin.js +549 -261
  143. package/server/routes/ads.js +130 -130
  144. package/server/routes/agent-workspace.js +540 -540
  145. package/server/routes/api.js +150 -150
  146. package/server/routes/auth.js +71 -71
  147. package/server/routes/billing.js +57 -45
  148. package/server/routes/commander.js +316 -316
  149. package/server/routes/demo-showcase.js +332 -332
  150. package/server/routes/demo-store.js +154 -0
  151. package/server/routes/discovery.js +2348 -417
  152. package/server/routes/gateway.js +173 -157
  153. package/server/routes/governance.js +208 -0
  154. package/server/routes/license.js +251 -240
  155. package/server/routes/mesh.js +469 -469
  156. package/server/routes/noscript.js +543 -543
  157. package/server/routes/plans.js +33 -0
  158. package/server/routes/premium-v2.js +686 -686
  159. package/server/routes/premium.js +724 -724
  160. package/server/routes/providers.js +650 -0
  161. package/server/routes/runtime.js +2148 -2147
  162. package/server/routes/shieldqr.js +88 -0
  163. package/server/routes/sovereign.js +465 -385
  164. package/server/routes/universal.js +200 -185
  165. package/server/routes/wab-api.js +850 -501
  166. package/server/runtime/container-worker.js +111 -111
  167. package/server/runtime/container.js +448 -448
  168. package/server/runtime/distributed-worker.js +362 -362
  169. package/server/runtime/event-bus.js +210 -210
  170. package/server/runtime/index.js +253 -253
  171. package/server/runtime/queue.js +599 -599
  172. package/server/runtime/replay.js +666 -666
  173. package/server/runtime/sandbox.js +266 -266
  174. package/server/runtime/scheduler.js +534 -534
  175. package/server/runtime/session-engine.js +293 -293
  176. package/server/runtime/state-manager.js +188 -188
  177. package/server/security/cross-site-redactor.js +196 -0
  178. package/server/security/dry-run.js +180 -0
  179. package/server/security/human-gate-rate-limit.js +147 -0
  180. package/server/security/human-gate-transports.js +178 -0
  181. package/server/security/human-gate.js +281 -0
  182. package/server/security/index.js +368 -368
  183. package/server/security/intent-engine.js +245 -0
  184. package/server/security/reward-guard.js +171 -0
  185. package/server/security/rollback-store.js +239 -0
  186. package/server/security/token-scope.js +404 -0
  187. package/server/security/url-policy.js +139 -0
  188. package/server/services/agent-chat.js +506 -506
  189. package/server/services/agent-learning.js +601 -575
  190. package/server/services/agent-memory.js +625 -625
  191. package/server/services/agent-mesh.js +555 -539
  192. package/server/services/agent-symphony.js +717 -717
  193. package/server/services/agent-tasks.js +1807 -1807
  194. package/server/services/api-key-engine.js +292 -261
  195. package/server/services/cluster.js +894 -894
  196. package/server/services/commander.js +738 -738
  197. package/server/services/edge-compute.js +440 -440
  198. package/server/services/email.js +233 -204
  199. package/server/services/governance.js +466 -0
  200. package/server/services/hosted-runtime.js +205 -205
  201. package/server/services/lfd.js +635 -635
  202. package/server/services/local-ai.js +389 -389
  203. package/server/services/marketplace.js +270 -270
  204. package/server/services/metering.js +182 -182
  205. package/server/services/modules/affiliate-intelligence.js +93 -93
  206. package/server/services/modules/agent-firewall.js +90 -90
  207. package/server/services/modules/bounty.js +89 -89
  208. package/server/services/modules/collective-bargaining.js +92 -92
  209. package/server/services/modules/dark-pattern.js +66 -66
  210. package/server/services/modules/gov-intelligence.js +45 -45
  211. package/server/services/modules/neural.js +55 -55
  212. package/server/services/modules/notary.js +49 -49
  213. package/server/services/modules/price-time-machine.js +86 -86
  214. package/server/services/modules/protocol.js +104 -104
  215. package/server/services/negotiation.js +439 -439
  216. package/server/services/plans.js +214 -0
  217. package/server/services/plugins.js +771 -771
  218. package/server/services/premium.js +1 -1
  219. package/server/services/price-intelligence.js +566 -566
  220. package/server/services/price-shield.js +1137 -1137
  221. package/server/services/provider-clients.js +740 -0
  222. package/server/services/reputation.js +465 -465
  223. package/server/services/search-engine.js +357 -357
  224. package/server/services/security.js +513 -513
  225. package/server/services/self-healing.js +843 -843
  226. package/server/services/shieldqr.js +322 -0
  227. package/server/services/sovereign-shield.js +542 -0
  228. package/server/services/ssl-inspector.js +42 -0
  229. package/server/services/ssl-monitor.js +167 -0
  230. package/server/services/stripe.js +205 -192
  231. package/server/services/swarm.js +788 -788
  232. package/server/services/universal-scraper.js +662 -661
  233. package/server/services/verification.js +481 -481
  234. package/server/services/vision.js +1163 -1163
  235. package/server/services/wab-crypto.js +178 -0
  236. package/server/utils/cache.js +125 -125
  237. package/server/utils/migrate.js +81 -81
  238. package/server/utils/safe-fetch.js +228 -0
  239. package/server/utils/secureFields.js +50 -50
  240. package/server/ws.js +161 -161
  241. package/templates/artisan-marketplace.yaml +104 -104
  242. package/templates/book-price-scout.yaml +98 -98
  243. package/templates/electronics-price-tracker.yaml +108 -108
  244. package/templates/flight-deal-hunter.yaml +113 -113
  245. package/templates/freelancer-direct.yaml +116 -116
  246. package/templates/grocery-price-compare.yaml +93 -93
  247. package/templates/hotel-direct-booking.yaml +113 -113
  248. package/templates/local-services.yaml +98 -98
  249. package/templates/olive-oil-tunisia.yaml +88 -88
  250. package/templates/organic-farm-fresh.yaml +101 -101
  251. package/templates/restaurant-direct.yaml +97 -97
  252. package/public/score.html +0 -263
  253. package/server/migrations/006_growth_suite.sql +0 -138
  254. package/server/routes/growth.js +0 -962
  255. package/server/services/fairness-engine.js +0 -409
  256. package/server/services/fairness.js +0 -420
@@ -1,404 +1,404 @@
1
- 'use strict';
2
-
3
- /**
4
- * WAB LLM Abstraction Layer
5
- *
6
- * Model-agnostic LLM interface. Supports:
7
- * - OpenAI (GPT-4, GPT-3.5)
8
- * - Anthropic (Claude)
9
- * - Ollama (local models)
10
- * - Custom providers
11
- *
12
- * Provides a unified API with automatic fallback,
13
- * cost tracking, and response caching.
14
- */
15
-
16
- const { metrics, logger } = require('../observability');
17
-
18
- // ─── Provider Interface ─────────────────────────────────────────────────────
19
-
20
- class LLMProvider {
21
- constructor(name, config = {}) {
22
- this.name = name;
23
- this.config = config;
24
- this.available = false;
25
- this.models = [];
26
- }
27
-
28
- async initialize() { throw new Error('Not implemented'); }
29
- async complete(prompt, options) { throw new Error('Not implemented'); }
30
- async embed(text) { throw new Error('Not implemented'); }
31
- async listModels() { return this.models; }
32
- }
33
-
34
- // ─── OpenAI Provider ────────────────────────────────────────────────────────
35
-
36
- class OpenAIProvider extends LLMProvider {
37
- constructor(config) {
38
- super('openai', config);
39
- this.apiKey = config.apiKey || process.env.OPENAI_API_KEY;
40
- this.baseUrl = config.baseUrl || 'https://api.openai.com/v1';
41
- this.models = ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo', 'gpt-3.5-turbo'];
42
- }
43
-
44
- async initialize() {
45
- this.available = !!this.apiKey;
46
- return this.available;
47
- }
48
-
49
- async complete(prompt, options = {}) {
50
- if (!this.available) throw new Error('OpenAI provider not initialized');
51
-
52
- const model = options.model || 'gpt-4o-mini';
53
- const messages = [];
54
- if (options.systemPrompt) messages.push({ role: 'system', content: options.systemPrompt });
55
- messages.push({ role: 'user', content: prompt });
56
-
57
- const body = {
58
- model,
59
- messages,
60
- temperature: options.temperature ?? 0.7,
61
- max_tokens: options.maxTokens || 2048,
62
- };
63
-
64
- const res = await fetch(`${this.baseUrl}/chat/completions`, {
65
- method: 'POST',
66
- headers: {
67
- 'Content-Type': 'application/json',
68
- 'Authorization': `Bearer ${this.apiKey}`,
69
- },
70
- body: JSON.stringify(body),
71
- });
72
-
73
- if (!res.ok) {
74
- const err = await res.text();
75
- throw new Error(`OpenAI error ${res.status}: ${err}`);
76
- }
77
-
78
- const data = await res.json();
79
- return {
80
- text: data.choices[0]?.message?.content || '',
81
- model,
82
- provider: 'openai',
83
- usage: {
84
- promptTokens: data.usage?.prompt_tokens || 0,
85
- completionTokens: data.usage?.completion_tokens || 0,
86
- totalTokens: data.usage?.total_tokens || 0,
87
- },
88
- finishReason: data.choices[0]?.finish_reason,
89
- };
90
- }
91
-
92
- async embed(text) {
93
- if (!this.available) throw new Error('OpenAI provider not initialized');
94
-
95
- const res = await fetch(`${this.baseUrl}/embeddings`, {
96
- method: 'POST',
97
- headers: {
98
- 'Content-Type': 'application/json',
99
- 'Authorization': `Bearer ${this.apiKey}`,
100
- },
101
- body: JSON.stringify({ model: 'text-embedding-3-small', input: text }),
102
- });
103
-
104
- if (!res.ok) throw new Error(`OpenAI embed error ${res.status}`);
105
- const data = await res.json();
106
- return { embedding: data.data[0]?.embedding || [], model: 'text-embedding-3-small', provider: 'openai' };
107
- }
108
- }
109
-
110
- // ─── Anthropic Provider ─────────────────────────────────────────────────────
111
-
112
- class AnthropicProvider extends LLMProvider {
113
- constructor(config) {
114
- super('anthropic', config);
115
- this.apiKey = config.apiKey || process.env.ANTHROPIC_API_KEY;
116
- this.baseUrl = config.baseUrl || 'https://api.anthropic.com/v1';
117
- this.models = ['claude-sonnet-4-20250514', 'claude-3-5-haiku-20241022', 'claude-3-5-sonnet-20241022'];
118
- }
119
-
120
- async initialize() {
121
- this.available = !!this.apiKey;
122
- return this.available;
123
- }
124
-
125
- async complete(prompt, options = {}) {
126
- if (!this.available) throw new Error('Anthropic provider not initialized');
127
-
128
- const model = options.model || 'claude-3-5-haiku-20241022';
129
- const body = {
130
- model,
131
- max_tokens: options.maxTokens || 2048,
132
- messages: [{ role: 'user', content: prompt }],
133
- };
134
- if (options.systemPrompt) body.system = options.systemPrompt;
135
- if (options.temperature !== undefined) body.temperature = options.temperature;
136
-
137
- const res = await fetch(`${this.baseUrl}/messages`, {
138
- method: 'POST',
139
- headers: {
140
- 'Content-Type': 'application/json',
141
- 'x-api-key': this.apiKey,
142
- 'anthropic-version': '2023-06-01',
143
- },
144
- body: JSON.stringify(body),
145
- });
146
-
147
- if (!res.ok) {
148
- const err = await res.text();
149
- throw new Error(`Anthropic error ${res.status}: ${err}`);
150
- }
151
-
152
- const data = await res.json();
153
- return {
154
- text: data.content?.[0]?.text || '',
155
- model,
156
- provider: 'anthropic',
157
- usage: {
158
- promptTokens: data.usage?.input_tokens || 0,
159
- completionTokens: data.usage?.output_tokens || 0,
160
- totalTokens: (data.usage?.input_tokens || 0) + (data.usage?.output_tokens || 0),
161
- },
162
- finishReason: data.stop_reason,
163
- };
164
- }
165
- }
166
-
167
- // ─── Ollama Provider (Local) ────────────────────────────────────────────────
168
-
169
- class OllamaProvider extends LLMProvider {
170
- constructor(config) {
171
- super('ollama', config);
172
- this.baseUrl = config.baseUrl || process.env.OLLAMA_URL || 'http://localhost:11434';
173
- }
174
-
175
- async initialize() {
176
- try {
177
- const res = await fetch(`${this.baseUrl}/api/tags`, { signal: AbortSignal.timeout(3000) });
178
- if (res.ok) {
179
- const data = await res.json();
180
- this.models = (data.models || []).map(m => m.name);
181
- this.available = true;
182
- }
183
- } catch (_) {
184
- this.available = false;
185
- }
186
- return this.available;
187
- }
188
-
189
- async complete(prompt, options = {}) {
190
- if (!this.available) throw new Error('Ollama not available');
191
-
192
- const model = options.model || this.models[0] || 'llama3.2';
193
- const body = {
194
- model,
195
- prompt: options.systemPrompt ? `${options.systemPrompt}\n\n${prompt}` : prompt,
196
- stream: false,
197
- options: {},
198
- };
199
- if (options.temperature !== undefined) body.options.temperature = options.temperature;
200
-
201
- const res = await fetch(`${this.baseUrl}/api/generate`, {
202
- method: 'POST',
203
- headers: { 'Content-Type': 'application/json' },
204
- body: JSON.stringify(body),
205
- });
206
-
207
- if (!res.ok) throw new Error(`Ollama error ${res.status}`);
208
- const data = await res.json();
209
-
210
- return {
211
- text: data.response || '',
212
- model,
213
- provider: 'ollama',
214
- usage: {
215
- promptTokens: data.prompt_eval_count || 0,
216
- completionTokens: data.eval_count || 0,
217
- totalTokens: (data.prompt_eval_count || 0) + (data.eval_count || 0),
218
- },
219
- finishReason: data.done ? 'stop' : 'length',
220
- };
221
- }
222
-
223
- async embed(text) {
224
- if (!this.available) throw new Error('Ollama not available');
225
-
226
- const model = this.models.find(m => m.includes('embed')) || 'nomic-embed-text';
227
- const res = await fetch(`${this.baseUrl}/api/embeddings`, {
228
- method: 'POST',
229
- headers: { 'Content-Type': 'application/json' },
230
- body: JSON.stringify({ model, prompt: text }),
231
- });
232
-
233
- if (!res.ok) throw new Error(`Ollama embed error ${res.status}`);
234
- const data = await res.json();
235
- return { embedding: data.embedding || [], model, provider: 'ollama' };
236
- }
237
- }
238
-
239
- // ─── LLM Manager (Unified Interface) ───────────────────────────────────────
240
-
241
- class LLMManager {
242
- constructor() {
243
- this._providers = new Map();
244
- this._defaultProvider = null;
245
- this._fallbackOrder = [];
246
- this._cache = new Map();
247
- this._maxCache = 500;
248
- this._stats = { requests: 0, cacheHits: 0, failures: 0, totalTokens: 0 };
249
- }
250
-
251
- /**
252
- * Register a provider
253
- */
254
- registerProvider(provider) {
255
- this._providers.set(provider.name, provider);
256
- if (!this._defaultProvider) this._defaultProvider = provider.name;
257
- this._fallbackOrder.push(provider.name);
258
- }
259
-
260
- /**
261
- * Initialize all providers
262
- */
263
- async initialize() {
264
- const results = {};
265
- for (const [name, provider] of this._providers) {
266
- try {
267
- results[name] = await provider.initialize();
268
- } catch (_) {
269
- results[name] = false;
270
- }
271
- }
272
-
273
- // Set default to first available
274
- for (const name of this._fallbackOrder) {
275
- if (this._providers.get(name)?.available) {
276
- this._defaultProvider = name;
277
- break;
278
- }
279
- }
280
-
281
- return results;
282
- }
283
-
284
- /**
285
- * Complete a prompt (with automatic fallback)
286
- */
287
- async complete(prompt, options = {}) {
288
- this._stats.requests++;
289
-
290
- // Check cache
291
- if (options.cache !== false) {
292
- const cacheKey = this._cacheKey(prompt, options);
293
- const cached = this._cache.get(cacheKey);
294
- if (cached && (Date.now() - cached.timestamp < 300_000)) {
295
- this._stats.cacheHits++;
296
- return { ...cached.result, cached: true };
297
- }
298
- }
299
-
300
- const providerName = options.provider || this._defaultProvider;
301
- const providers = [providerName, ...this._fallbackOrder.filter(p => p !== providerName)];
302
-
303
- const endTimer = metrics.startTimer('llm.request.duration');
304
-
305
- for (const name of providers) {
306
- const provider = this._providers.get(name);
307
- if (!provider?.available) continue;
308
-
309
- try {
310
- const result = await provider.complete(prompt, options);
311
-
312
- endTimer();
313
- metrics.increment('llm.requests.success', 1, { provider: name });
314
- this._stats.totalTokens += result.usage?.totalTokens || 0;
315
-
316
- // Cache result
317
- if (options.cache !== false) {
318
- const cacheKey = this._cacheKey(prompt, options);
319
- this._cache.set(cacheKey, { result, timestamp: Date.now() });
320
- if (this._cache.size > this._maxCache) {
321
- const oldest = this._cache.keys().next().value;
322
- this._cache.delete(oldest);
323
- }
324
- }
325
-
326
- return { ...result, duration: endTimer() };
327
- } catch (err) {
328
- metrics.increment('llm.requests.failure', 1, { provider: name });
329
- this._stats.failures++;
330
- // Try next provider
331
- continue;
332
- }
333
- }
334
-
335
- endTimer();
336
- throw new Error('All LLM providers failed');
337
- }
338
-
339
- /**
340
- * Generate embeddings
341
- */
342
- async embed(text, options = {}) {
343
- const providerName = options.provider || this._defaultProvider;
344
- const provider = this._providers.get(providerName);
345
- if (!provider?.available) throw new Error(`Provider ${providerName} not available`);
346
- if (!provider.embed) throw new Error(`Provider ${providerName} does not support embeddings`);
347
- return provider.embed(text);
348
- }
349
-
350
- /**
351
- * List available models across all providers
352
- */
353
- listModels() {
354
- const models = [];
355
- for (const [name, provider] of this._providers) {
356
- if (!provider.available) continue;
357
- for (const model of provider.models) {
358
- models.push({ model, provider: name });
359
- }
360
- }
361
- return models;
362
- }
363
-
364
- /**
365
- * Get provider status
366
- */
367
- getStatus() {
368
- const providers = {};
369
- for (const [name, provider] of this._providers) {
370
- providers[name] = {
371
- available: provider.available,
372
- models: provider.models,
373
- };
374
- }
375
- return {
376
- defaultProvider: this._defaultProvider,
377
- providers,
378
- stats: { ...this._stats },
379
- };
380
- }
381
-
382
- _cacheKey(prompt, options) {
383
- const key = `${options.provider || ''}:${options.model || ''}:${prompt.slice(0, 200)}`;
384
- return require('crypto').createHash('md5').update(key).digest('hex');
385
- }
386
- }
387
-
388
- // ─── Singleton ──────────────────────────────────────────────────────────────
389
-
390
- const llm = new LLMManager();
391
-
392
- // Register default providers
393
- llm.registerProvider(new OpenAIProvider({}));
394
- llm.registerProvider(new AnthropicProvider({}));
395
- llm.registerProvider(new OllamaProvider({}));
396
-
397
- module.exports = {
398
- LLMProvider,
399
- OpenAIProvider,
400
- AnthropicProvider,
401
- OllamaProvider,
402
- LLMManager,
403
- llm,
404
- };
1
+ 'use strict';
2
+
3
+ /**
4
+ * WAB LLM Abstraction Layer
5
+ *
6
+ * Model-agnostic LLM interface. Supports:
7
+ * - OpenAI (GPT-4, GPT-3.5)
8
+ * - Anthropic (Claude)
9
+ * - Ollama (local models)
10
+ * - Custom providers
11
+ *
12
+ * Provides a unified API with automatic fallback,
13
+ * cost tracking, and response caching.
14
+ */
15
+
16
+ const { metrics, logger } = require('../observability');
17
+
18
+ // ─── Provider Interface ─────────────────────────────────────────────────────
19
+
20
+ class LLMProvider {
21
+ constructor(name, config = {}) {
22
+ this.name = name;
23
+ this.config = config;
24
+ this.available = false;
25
+ this.models = [];
26
+ }
27
+
28
+ async initialize() { throw new Error('Not implemented'); }
29
+ async complete(prompt, options) { throw new Error('Not implemented'); }
30
+ async embed(text) { throw new Error('Not implemented'); }
31
+ async listModels() { return this.models; }
32
+ }
33
+
34
+ // ─── OpenAI Provider ────────────────────────────────────────────────────────
35
+
36
+ class OpenAIProvider extends LLMProvider {
37
+ constructor(config) {
38
+ super('openai', config);
39
+ this.apiKey = config.apiKey || process.env.OPENAI_API_KEY;
40
+ this.baseUrl = config.baseUrl || 'https://api.openai.com/v1';
41
+ this.models = ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo', 'gpt-3.5-turbo'];
42
+ }
43
+
44
+ async initialize() {
45
+ this.available = !!this.apiKey;
46
+ return this.available;
47
+ }
48
+
49
+ async complete(prompt, options = {}) {
50
+ if (!this.available) throw new Error('OpenAI provider not initialized');
51
+
52
+ const model = options.model || 'gpt-4o-mini';
53
+ const messages = [];
54
+ if (options.systemPrompt) messages.push({ role: 'system', content: options.systemPrompt });
55
+ messages.push({ role: 'user', content: prompt });
56
+
57
+ const body = {
58
+ model,
59
+ messages,
60
+ temperature: options.temperature ?? 0.7,
61
+ max_tokens: options.maxTokens || 2048,
62
+ };
63
+
64
+ const res = await fetch(`${this.baseUrl}/chat/completions`, {
65
+ method: 'POST',
66
+ headers: {
67
+ 'Content-Type': 'application/json',
68
+ 'Authorization': `Bearer ${this.apiKey}`,
69
+ },
70
+ body: JSON.stringify(body),
71
+ });
72
+
73
+ if (!res.ok) {
74
+ const err = await res.text();
75
+ throw new Error(`OpenAI error ${res.status}: ${err}`);
76
+ }
77
+
78
+ const data = await res.json();
79
+ return {
80
+ text: data.choices[0]?.message?.content || '',
81
+ model,
82
+ provider: 'openai',
83
+ usage: {
84
+ promptTokens: data.usage?.prompt_tokens || 0,
85
+ completionTokens: data.usage?.completion_tokens || 0,
86
+ totalTokens: data.usage?.total_tokens || 0,
87
+ },
88
+ finishReason: data.choices[0]?.finish_reason,
89
+ };
90
+ }
91
+
92
+ async embed(text) {
93
+ if (!this.available) throw new Error('OpenAI provider not initialized');
94
+
95
+ const res = await fetch(`${this.baseUrl}/embeddings`, {
96
+ method: 'POST',
97
+ headers: {
98
+ 'Content-Type': 'application/json',
99
+ 'Authorization': `Bearer ${this.apiKey}`,
100
+ },
101
+ body: JSON.stringify({ model: 'text-embedding-3-small', input: text }),
102
+ });
103
+
104
+ if (!res.ok) throw new Error(`OpenAI embed error ${res.status}`);
105
+ const data = await res.json();
106
+ return { embedding: data.data[0]?.embedding || [], model: 'text-embedding-3-small', provider: 'openai' };
107
+ }
108
+ }
109
+
110
+ // ─── Anthropic Provider ─────────────────────────────────────────────────────
111
+
112
+ class AnthropicProvider extends LLMProvider {
113
+ constructor(config) {
114
+ super('anthropic', config);
115
+ this.apiKey = config.apiKey || process.env.ANTHROPIC_API_KEY;
116
+ this.baseUrl = config.baseUrl || 'https://api.anthropic.com/v1';
117
+ this.models = ['claude-sonnet-4-20250514', 'claude-3-5-haiku-20241022', 'claude-3-5-sonnet-20241022'];
118
+ }
119
+
120
+ async initialize() {
121
+ this.available = !!this.apiKey;
122
+ return this.available;
123
+ }
124
+
125
+ async complete(prompt, options = {}) {
126
+ if (!this.available) throw new Error('Anthropic provider not initialized');
127
+
128
+ const model = options.model || 'claude-3-5-haiku-20241022';
129
+ const body = {
130
+ model,
131
+ max_tokens: options.maxTokens || 2048,
132
+ messages: [{ role: 'user', content: prompt }],
133
+ };
134
+ if (options.systemPrompt) body.system = options.systemPrompt;
135
+ if (options.temperature !== undefined) body.temperature = options.temperature;
136
+
137
+ const res = await fetch(`${this.baseUrl}/messages`, {
138
+ method: 'POST',
139
+ headers: {
140
+ 'Content-Type': 'application/json',
141
+ 'x-api-key': this.apiKey,
142
+ 'anthropic-version': '2023-06-01',
143
+ },
144
+ body: JSON.stringify(body),
145
+ });
146
+
147
+ if (!res.ok) {
148
+ const err = await res.text();
149
+ throw new Error(`Anthropic error ${res.status}: ${err}`);
150
+ }
151
+
152
+ const data = await res.json();
153
+ return {
154
+ text: data.content?.[0]?.text || '',
155
+ model,
156
+ provider: 'anthropic',
157
+ usage: {
158
+ promptTokens: data.usage?.input_tokens || 0,
159
+ completionTokens: data.usage?.output_tokens || 0,
160
+ totalTokens: (data.usage?.input_tokens || 0) + (data.usage?.output_tokens || 0),
161
+ },
162
+ finishReason: data.stop_reason,
163
+ };
164
+ }
165
+ }
166
+
167
+ // ─── Ollama Provider (Local) ────────────────────────────────────────────────
168
+
169
+ class OllamaProvider extends LLMProvider {
170
+ constructor(config) {
171
+ super('ollama', config);
172
+ this.baseUrl = config.baseUrl || process.env.OLLAMA_URL || 'http://localhost:11434';
173
+ }
174
+
175
+ async initialize() {
176
+ try {
177
+ const res = await fetch(`${this.baseUrl}/api/tags`, { signal: AbortSignal.timeout(3000) });
178
+ if (res.ok) {
179
+ const data = await res.json();
180
+ this.models = (data.models || []).map(m => m.name);
181
+ this.available = true;
182
+ }
183
+ } catch (_) {
184
+ this.available = false;
185
+ }
186
+ return this.available;
187
+ }
188
+
189
+ async complete(prompt, options = {}) {
190
+ if (!this.available) throw new Error('Ollama not available');
191
+
192
+ const model = options.model || this.models[0] || 'llama3.2';
193
+ const body = {
194
+ model,
195
+ prompt: options.systemPrompt ? `${options.systemPrompt}\n\n${prompt}` : prompt,
196
+ stream: false,
197
+ options: {},
198
+ };
199
+ if (options.temperature !== undefined) body.options.temperature = options.temperature;
200
+
201
+ const res = await fetch(`${this.baseUrl}/api/generate`, {
202
+ method: 'POST',
203
+ headers: { 'Content-Type': 'application/json' },
204
+ body: JSON.stringify(body),
205
+ });
206
+
207
+ if (!res.ok) throw new Error(`Ollama error ${res.status}`);
208
+ const data = await res.json();
209
+
210
+ return {
211
+ text: data.response || '',
212
+ model,
213
+ provider: 'ollama',
214
+ usage: {
215
+ promptTokens: data.prompt_eval_count || 0,
216
+ completionTokens: data.eval_count || 0,
217
+ totalTokens: (data.prompt_eval_count || 0) + (data.eval_count || 0),
218
+ },
219
+ finishReason: data.done ? 'stop' : 'length',
220
+ };
221
+ }
222
+
223
+ async embed(text) {
224
+ if (!this.available) throw new Error('Ollama not available');
225
+
226
+ const model = this.models.find(m => m.includes('embed')) || 'nomic-embed-text';
227
+ const res = await fetch(`${this.baseUrl}/api/embeddings`, {
228
+ method: 'POST',
229
+ headers: { 'Content-Type': 'application/json' },
230
+ body: JSON.stringify({ model, prompt: text }),
231
+ });
232
+
233
+ if (!res.ok) throw new Error(`Ollama embed error ${res.status}`);
234
+ const data = await res.json();
235
+ return { embedding: data.embedding || [], model, provider: 'ollama' };
236
+ }
237
+ }
238
+
239
+ // ─── LLM Manager (Unified Interface) ───────────────────────────────────────
240
+
241
+ class LLMManager {
242
+ constructor() {
243
+ this._providers = new Map();
244
+ this._defaultProvider = null;
245
+ this._fallbackOrder = [];
246
+ this._cache = new Map();
247
+ this._maxCache = 500;
248
+ this._stats = { requests: 0, cacheHits: 0, failures: 0, totalTokens: 0 };
249
+ }
250
+
251
+ /**
252
+ * Register a provider
253
+ */
254
+ registerProvider(provider) {
255
+ this._providers.set(provider.name, provider);
256
+ if (!this._defaultProvider) this._defaultProvider = provider.name;
257
+ this._fallbackOrder.push(provider.name);
258
+ }
259
+
260
+ /**
261
+ * Initialize all providers
262
+ */
263
+ async initialize() {
264
+ const results = {};
265
+ for (const [name, provider] of this._providers) {
266
+ try {
267
+ results[name] = await provider.initialize();
268
+ } catch (_) {
269
+ results[name] = false;
270
+ }
271
+ }
272
+
273
+ // Set default to first available
274
+ for (const name of this._fallbackOrder) {
275
+ if (this._providers.get(name)?.available) {
276
+ this._defaultProvider = name;
277
+ break;
278
+ }
279
+ }
280
+
281
+ return results;
282
+ }
283
+
284
+ /**
285
+ * Complete a prompt (with automatic fallback)
286
+ */
287
+ async complete(prompt, options = {}) {
288
+ this._stats.requests++;
289
+
290
+ // Check cache
291
+ if (options.cache !== false) {
292
+ const cacheKey = this._cacheKey(prompt, options);
293
+ const cached = this._cache.get(cacheKey);
294
+ if (cached && (Date.now() - cached.timestamp < 300_000)) {
295
+ this._stats.cacheHits++;
296
+ return { ...cached.result, cached: true };
297
+ }
298
+ }
299
+
300
+ const providerName = options.provider || this._defaultProvider;
301
+ const providers = [providerName, ...this._fallbackOrder.filter(p => p !== providerName)];
302
+
303
+ const endTimer = metrics.startTimer('llm.request.duration');
304
+
305
+ for (const name of providers) {
306
+ const provider = this._providers.get(name);
307
+ if (!provider?.available) continue;
308
+
309
+ try {
310
+ const result = await provider.complete(prompt, options);
311
+
312
+ endTimer();
313
+ metrics.increment('llm.requests.success', 1, { provider: name });
314
+ this._stats.totalTokens += result.usage?.totalTokens || 0;
315
+
316
+ // Cache result
317
+ if (options.cache !== false) {
318
+ const cacheKey = this._cacheKey(prompt, options);
319
+ this._cache.set(cacheKey, { result, timestamp: Date.now() });
320
+ if (this._cache.size > this._maxCache) {
321
+ const oldest = this._cache.keys().next().value;
322
+ this._cache.delete(oldest);
323
+ }
324
+ }
325
+
326
+ return { ...result, duration: endTimer() };
327
+ } catch (err) {
328
+ metrics.increment('llm.requests.failure', 1, { provider: name });
329
+ this._stats.failures++;
330
+ // Try next provider
331
+ continue;
332
+ }
333
+ }
334
+
335
+ endTimer();
336
+ throw new Error('All LLM providers failed');
337
+ }
338
+
339
+ /**
340
+ * Generate embeddings
341
+ */
342
+ async embed(text, options = {}) {
343
+ const providerName = options.provider || this._defaultProvider;
344
+ const provider = this._providers.get(providerName);
345
+ if (!provider?.available) throw new Error(`Provider ${providerName} not available`);
346
+ if (!provider.embed) throw new Error(`Provider ${providerName} does not support embeddings`);
347
+ return provider.embed(text);
348
+ }
349
+
350
+ /**
351
+ * List available models across all providers
352
+ */
353
+ listModels() {
354
+ const models = [];
355
+ for (const [name, provider] of this._providers) {
356
+ if (!provider.available) continue;
357
+ for (const model of provider.models) {
358
+ models.push({ model, provider: name });
359
+ }
360
+ }
361
+ return models;
362
+ }
363
+
364
+ /**
365
+ * Get provider status
366
+ */
367
+ getStatus() {
368
+ const providers = {};
369
+ for (const [name, provider] of this._providers) {
370
+ providers[name] = {
371
+ available: provider.available,
372
+ models: provider.models,
373
+ };
374
+ }
375
+ return {
376
+ defaultProvider: this._defaultProvider,
377
+ providers,
378
+ stats: { ...this._stats },
379
+ };
380
+ }
381
+
382
+ _cacheKey(prompt, options) {
383
+ const key = `${options.provider || ''}:${options.model || ''}:${prompt.slice(0, 200)}`;
384
+ return require('crypto').createHash('md5').update(key).digest('hex');
385
+ }
386
+ }
387
+
388
+ // ─── Singleton ──────────────────────────────────────────────────────────────
389
+
390
+ const llm = new LLMManager();
391
+
392
+ // Register default providers
393
+ llm.registerProvider(new OpenAIProvider({}));
394
+ llm.registerProvider(new AnthropicProvider({}));
395
+ llm.registerProvider(new OllamaProvider({}));
396
+
397
+ module.exports = {
398
+ LLMProvider,
399
+ OpenAIProvider,
400
+ AnthropicProvider,
401
+ OllamaProvider,
402
+ LLMManager,
403
+ llm,
404
+ };