web-agent-bridge 2.4.0 → 2.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,404 @@
1
+ 'use strict';
2
+
3
+ /**
4
+ * WAB LLM Abstraction Layer
5
+ *
6
+ * Model-agnostic LLM interface. Supports:
7
+ * - OpenAI (GPT-4, GPT-3.5)
8
+ * - Anthropic (Claude)
9
+ * - Ollama (local models)
10
+ * - Custom providers
11
+ *
12
+ * Provides a unified API with automatic fallback,
13
+ * cost tracking, and response caching.
14
+ */
15
+
16
+ const { metrics, logger } = require('../observability');
17
+
18
+ // ─── Provider Interface ─────────────────────────────────────────────────────
19
+
20
+ class LLMProvider {
21
+ constructor(name, config = {}) {
22
+ this.name = name;
23
+ this.config = config;
24
+ this.available = false;
25
+ this.models = [];
26
+ }
27
+
28
+ async initialize() { throw new Error('Not implemented'); }
29
+ async complete(prompt, options) { throw new Error('Not implemented'); }
30
+ async embed(text) { throw new Error('Not implemented'); }
31
+ async listModels() { return this.models; }
32
+ }
33
+
34
+ // ─── OpenAI Provider ────────────────────────────────────────────────────────
35
+
36
+ class OpenAIProvider extends LLMProvider {
37
+ constructor(config) {
38
+ super('openai', config);
39
+ this.apiKey = config.apiKey || process.env.OPENAI_API_KEY;
40
+ this.baseUrl = config.baseUrl || 'https://api.openai.com/v1';
41
+ this.models = ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo', 'gpt-3.5-turbo'];
42
+ }
43
+
44
+ async initialize() {
45
+ this.available = !!this.apiKey;
46
+ return this.available;
47
+ }
48
+
49
+ async complete(prompt, options = {}) {
50
+ if (!this.available) throw new Error('OpenAI provider not initialized');
51
+
52
+ const model = options.model || 'gpt-4o-mini';
53
+ const messages = [];
54
+ if (options.systemPrompt) messages.push({ role: 'system', content: options.systemPrompt });
55
+ messages.push({ role: 'user', content: prompt });
56
+
57
+ const body = {
58
+ model,
59
+ messages,
60
+ temperature: options.temperature ?? 0.7,
61
+ max_tokens: options.maxTokens || 2048,
62
+ };
63
+
64
+ const res = await fetch(`${this.baseUrl}/chat/completions`, {
65
+ method: 'POST',
66
+ headers: {
67
+ 'Content-Type': 'application/json',
68
+ 'Authorization': `Bearer ${this.apiKey}`,
69
+ },
70
+ body: JSON.stringify(body),
71
+ });
72
+
73
+ if (!res.ok) {
74
+ const err = await res.text();
75
+ throw new Error(`OpenAI error ${res.status}: ${err}`);
76
+ }
77
+
78
+ const data = await res.json();
79
+ return {
80
+ text: data.choices[0]?.message?.content || '',
81
+ model,
82
+ provider: 'openai',
83
+ usage: {
84
+ promptTokens: data.usage?.prompt_tokens || 0,
85
+ completionTokens: data.usage?.completion_tokens || 0,
86
+ totalTokens: data.usage?.total_tokens || 0,
87
+ },
88
+ finishReason: data.choices[0]?.finish_reason,
89
+ };
90
+ }
91
+
92
+ async embed(text) {
93
+ if (!this.available) throw new Error('OpenAI provider not initialized');
94
+
95
+ const res = await fetch(`${this.baseUrl}/embeddings`, {
96
+ method: 'POST',
97
+ headers: {
98
+ 'Content-Type': 'application/json',
99
+ 'Authorization': `Bearer ${this.apiKey}`,
100
+ },
101
+ body: JSON.stringify({ model: 'text-embedding-3-small', input: text }),
102
+ });
103
+
104
+ if (!res.ok) throw new Error(`OpenAI embed error ${res.status}`);
105
+ const data = await res.json();
106
+ return { embedding: data.data[0]?.embedding || [], model: 'text-embedding-3-small', provider: 'openai' };
107
+ }
108
+ }
109
+
110
+ // ─── Anthropic Provider ─────────────────────────────────────────────────────
111
+
112
+ class AnthropicProvider extends LLMProvider {
113
+ constructor(config) {
114
+ super('anthropic', config);
115
+ this.apiKey = config.apiKey || process.env.ANTHROPIC_API_KEY;
116
+ this.baseUrl = config.baseUrl || 'https://api.anthropic.com/v1';
117
+ this.models = ['claude-sonnet-4-20250514', 'claude-3-5-haiku-20241022', 'claude-3-5-sonnet-20241022'];
118
+ }
119
+
120
+ async initialize() {
121
+ this.available = !!this.apiKey;
122
+ return this.available;
123
+ }
124
+
125
+ async complete(prompt, options = {}) {
126
+ if (!this.available) throw new Error('Anthropic provider not initialized');
127
+
128
+ const model = options.model || 'claude-3-5-haiku-20241022';
129
+ const body = {
130
+ model,
131
+ max_tokens: options.maxTokens || 2048,
132
+ messages: [{ role: 'user', content: prompt }],
133
+ };
134
+ if (options.systemPrompt) body.system = options.systemPrompt;
135
+ if (options.temperature !== undefined) body.temperature = options.temperature;
136
+
137
+ const res = await fetch(`${this.baseUrl}/messages`, {
138
+ method: 'POST',
139
+ headers: {
140
+ 'Content-Type': 'application/json',
141
+ 'x-api-key': this.apiKey,
142
+ 'anthropic-version': '2023-06-01',
143
+ },
144
+ body: JSON.stringify(body),
145
+ });
146
+
147
+ if (!res.ok) {
148
+ const err = await res.text();
149
+ throw new Error(`Anthropic error ${res.status}: ${err}`);
150
+ }
151
+
152
+ const data = await res.json();
153
+ return {
154
+ text: data.content?.[0]?.text || '',
155
+ model,
156
+ provider: 'anthropic',
157
+ usage: {
158
+ promptTokens: data.usage?.input_tokens || 0,
159
+ completionTokens: data.usage?.output_tokens || 0,
160
+ totalTokens: (data.usage?.input_tokens || 0) + (data.usage?.output_tokens || 0),
161
+ },
162
+ finishReason: data.stop_reason,
163
+ };
164
+ }
165
+ }
166
+
167
+ // ─── Ollama Provider (Local) ────────────────────────────────────────────────
168
+
169
+ class OllamaProvider extends LLMProvider {
170
+ constructor(config) {
171
+ super('ollama', config);
172
+ this.baseUrl = config.baseUrl || process.env.OLLAMA_URL || 'http://localhost:11434';
173
+ }
174
+
175
+ async initialize() {
176
+ try {
177
+ const res = await fetch(`${this.baseUrl}/api/tags`, { signal: AbortSignal.timeout(3000) });
178
+ if (res.ok) {
179
+ const data = await res.json();
180
+ this.models = (data.models || []).map(m => m.name);
181
+ this.available = true;
182
+ }
183
+ } catch (_) {
184
+ this.available = false;
185
+ }
186
+ return this.available;
187
+ }
188
+
189
+ async complete(prompt, options = {}) {
190
+ if (!this.available) throw new Error('Ollama not available');
191
+
192
+ const model = options.model || this.models[0] || 'llama3.2';
193
+ const body = {
194
+ model,
195
+ prompt: options.systemPrompt ? `${options.systemPrompt}\n\n${prompt}` : prompt,
196
+ stream: false,
197
+ options: {},
198
+ };
199
+ if (options.temperature !== undefined) body.options.temperature = options.temperature;
200
+
201
+ const res = await fetch(`${this.baseUrl}/api/generate`, {
202
+ method: 'POST',
203
+ headers: { 'Content-Type': 'application/json' },
204
+ body: JSON.stringify(body),
205
+ });
206
+
207
+ if (!res.ok) throw new Error(`Ollama error ${res.status}`);
208
+ const data = await res.json();
209
+
210
+ return {
211
+ text: data.response || '',
212
+ model,
213
+ provider: 'ollama',
214
+ usage: {
215
+ promptTokens: data.prompt_eval_count || 0,
216
+ completionTokens: data.eval_count || 0,
217
+ totalTokens: (data.prompt_eval_count || 0) + (data.eval_count || 0),
218
+ },
219
+ finishReason: data.done ? 'stop' : 'length',
220
+ };
221
+ }
222
+
223
+ async embed(text) {
224
+ if (!this.available) throw new Error('Ollama not available');
225
+
226
+ const model = this.models.find(m => m.includes('embed')) || 'nomic-embed-text';
227
+ const res = await fetch(`${this.baseUrl}/api/embeddings`, {
228
+ method: 'POST',
229
+ headers: { 'Content-Type': 'application/json' },
230
+ body: JSON.stringify({ model, prompt: text }),
231
+ });
232
+
233
+ if (!res.ok) throw new Error(`Ollama embed error ${res.status}`);
234
+ const data = await res.json();
235
+ return { embedding: data.embedding || [], model, provider: 'ollama' };
236
+ }
237
+ }
238
+
239
+ // ─── LLM Manager (Unified Interface) ───────────────────────────────────────
240
+
241
+ class LLMManager {
242
+ constructor() {
243
+ this._providers = new Map();
244
+ this._defaultProvider = null;
245
+ this._fallbackOrder = [];
246
+ this._cache = new Map();
247
+ this._maxCache = 500;
248
+ this._stats = { requests: 0, cacheHits: 0, failures: 0, totalTokens: 0 };
249
+ }
250
+
251
+ /**
252
+ * Register a provider
253
+ */
254
+ registerProvider(provider) {
255
+ this._providers.set(provider.name, provider);
256
+ if (!this._defaultProvider) this._defaultProvider = provider.name;
257
+ this._fallbackOrder.push(provider.name);
258
+ }
259
+
260
+ /**
261
+ * Initialize all providers
262
+ */
263
+ async initialize() {
264
+ const results = {};
265
+ for (const [name, provider] of this._providers) {
266
+ try {
267
+ results[name] = await provider.initialize();
268
+ } catch (_) {
269
+ results[name] = false;
270
+ }
271
+ }
272
+
273
+ // Set default to first available
274
+ for (const name of this._fallbackOrder) {
275
+ if (this._providers.get(name)?.available) {
276
+ this._defaultProvider = name;
277
+ break;
278
+ }
279
+ }
280
+
281
+ return results;
282
+ }
283
+
284
+ /**
285
+ * Complete a prompt (with automatic fallback)
286
+ */
287
+ async complete(prompt, options = {}) {
288
+ this._stats.requests++;
289
+
290
+ // Check cache
291
+ if (options.cache !== false) {
292
+ const cacheKey = this._cacheKey(prompt, options);
293
+ const cached = this._cache.get(cacheKey);
294
+ if (cached && (Date.now() - cached.timestamp < 300_000)) {
295
+ this._stats.cacheHits++;
296
+ return { ...cached.result, cached: true };
297
+ }
298
+ }
299
+
300
+ const providerName = options.provider || this._defaultProvider;
301
+ const providers = [providerName, ...this._fallbackOrder.filter(p => p !== providerName)];
302
+
303
+ const endTimer = metrics.startTimer('llm.request.duration');
304
+
305
+ for (const name of providers) {
306
+ const provider = this._providers.get(name);
307
+ if (!provider?.available) continue;
308
+
309
+ try {
310
+ const result = await provider.complete(prompt, options);
311
+
312
+ endTimer();
313
+ metrics.increment('llm.requests.success', 1, { provider: name });
314
+ this._stats.totalTokens += result.usage?.totalTokens || 0;
315
+
316
+ // Cache result
317
+ if (options.cache !== false) {
318
+ const cacheKey = this._cacheKey(prompt, options);
319
+ this._cache.set(cacheKey, { result, timestamp: Date.now() });
320
+ if (this._cache.size > this._maxCache) {
321
+ const oldest = this._cache.keys().next().value;
322
+ this._cache.delete(oldest);
323
+ }
324
+ }
325
+
326
+ return { ...result, duration: endTimer() };
327
+ } catch (err) {
328
+ metrics.increment('llm.requests.failure', 1, { provider: name });
329
+ this._stats.failures++;
330
+ // Try next provider
331
+ continue;
332
+ }
333
+ }
334
+
335
+ endTimer();
336
+ throw new Error('All LLM providers failed');
337
+ }
338
+
339
+ /**
340
+ * Generate embeddings
341
+ */
342
+ async embed(text, options = {}) {
343
+ const providerName = options.provider || this._defaultProvider;
344
+ const provider = this._providers.get(providerName);
345
+ if (!provider?.available) throw new Error(`Provider ${providerName} not available`);
346
+ if (!provider.embed) throw new Error(`Provider ${providerName} does not support embeddings`);
347
+ return provider.embed(text);
348
+ }
349
+
350
+ /**
351
+ * List available models across all providers
352
+ */
353
+ listModels() {
354
+ const models = [];
355
+ for (const [name, provider] of this._providers) {
356
+ if (!provider.available) continue;
357
+ for (const model of provider.models) {
358
+ models.push({ model, provider: name });
359
+ }
360
+ }
361
+ return models;
362
+ }
363
+
364
+ /**
365
+ * Get provider status
366
+ */
367
+ getStatus() {
368
+ const providers = {};
369
+ for (const [name, provider] of this._providers) {
370
+ providers[name] = {
371
+ available: provider.available,
372
+ models: provider.models,
373
+ };
374
+ }
375
+ return {
376
+ defaultProvider: this._defaultProvider,
377
+ providers,
378
+ stats: { ...this._stats },
379
+ };
380
+ }
381
+
382
+ _cacheKey(prompt, options) {
383
+ const key = `${options.provider || ''}:${options.model || ''}:${prompt.slice(0, 200)}`;
384
+ return require('crypto').createHash('md5').update(key).digest('hex');
385
+ }
386
+ }
387
+
388
+ // ─── Singleton ──────────────────────────────────────────────────────────────
389
+
390
+ const llm = new LLMManager();
391
+
392
+ // Register default providers
393
+ llm.registerProvider(new OpenAIProvider({}));
394
+ llm.registerProvider(new AnthropicProvider({}));
395
+ llm.registerProvider(new OllamaProvider({}));
396
+
397
+ module.exports = {
398
+ LLMProvider,
399
+ OpenAIProvider,
400
+ AnthropicProvider,
401
+ OllamaProvider,
402
+ LLMManager,
403
+ llm,
404
+ };