web-agent-bridge 2.4.0 → 2.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,404 @@
1
+ 'use strict';
2
+
3
+ /**
4
+ * WAB LLM Abstraction Layer
5
+ *
6
+ * Model-agnostic LLM interface. Supports:
7
+ * - OpenAI (GPT-4, GPT-3.5)
8
+ * - Anthropic (Claude)
9
+ * - Ollama (local models)
10
+ * - Custom providers
11
+ *
12
+ * Provides a unified API with automatic fallback,
13
+ * cost tracking, and response caching.
14
+ */
15
+
16
+ const { metrics, logger } = require('../observability');
17
+
18
+ // ─── Provider Interface ─────────────────────────────────────────────────────
19
+
20
+ class LLMProvider {
21
+ constructor(name, config = {}) {
22
+ this.name = name;
23
+ this.config = config;
24
+ this.available = false;
25
+ this.models = [];
26
+ }
27
+
28
+ async initialize() { throw new Error('Not implemented'); }
29
+ async complete(prompt, options) { throw new Error('Not implemented'); }
30
+ async embed(text) { throw new Error('Not implemented'); }
31
+ async listModels() { return this.models; }
32
+ }
33
+
34
+ // ─── OpenAI Provider ────────────────────────────────────────────────────────
35
+
36
+ class OpenAIProvider extends LLMProvider {
37
+ constructor(config) {
38
+ super('openai', config);
39
+ this.apiKey = config.apiKey || process.env.OPENAI_API_KEY;
40
+ this.baseUrl = config.baseUrl || 'https://api.openai.com/v1';
41
+ this.models = ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo', 'gpt-3.5-turbo'];
42
+ }
43
+
44
+ async initialize() {
45
+ this.available = !!this.apiKey;
46
+ return this.available;
47
+ }
48
+
49
+ async complete(prompt, options = {}) {
50
+ if (!this.available) throw new Error('OpenAI provider not initialized');
51
+
52
+ const model = options.model || 'gpt-4o-mini';
53
+ const messages = [];
54
+ if (options.systemPrompt) messages.push({ role: 'system', content: options.systemPrompt });
55
+ messages.push({ role: 'user', content: prompt });
56
+
57
+ const body = {
58
+ model,
59
+ messages,
60
+ temperature: options.temperature ?? 0.7,
61
+ max_tokens: options.maxTokens || 2048,
62
+ };
63
+
64
+ const res = await fetch(`${this.baseUrl}/chat/completions`, {
65
+ method: 'POST',
66
+ headers: {
67
+ 'Content-Type': 'application/json',
68
+ 'Authorization': `Bearer ${this.apiKey}`,
69
+ },
70
+ body: JSON.stringify(body),
71
+ });
72
+
73
+ if (!res.ok) {
74
+ const err = await res.text();
75
+ throw new Error(`OpenAI error ${res.status}: ${err}`);
76
+ }
77
+
78
+ const data = await res.json();
79
+ return {
80
+ text: data.choices[0]?.message?.content || '',
81
+ model,
82
+ provider: 'openai',
83
+ usage: {
84
+ promptTokens: data.usage?.prompt_tokens || 0,
85
+ completionTokens: data.usage?.completion_tokens || 0,
86
+ totalTokens: data.usage?.total_tokens || 0,
87
+ },
88
+ finishReason: data.choices[0]?.finish_reason,
89
+ };
90
+ }
91
+
92
+ async embed(text) {
93
+ if (!this.available) throw new Error('OpenAI provider not initialized');
94
+
95
+ const res = await fetch(`${this.baseUrl}/embeddings`, {
96
+ method: 'POST',
97
+ headers: {
98
+ 'Content-Type': 'application/json',
99
+ 'Authorization': `Bearer ${this.apiKey}`,
100
+ },
101
+ body: JSON.stringify({ model: 'text-embedding-3-small', input: text }),
102
+ });
103
+
104
+ if (!res.ok) throw new Error(`OpenAI embed error ${res.status}`);
105
+ const data = await res.json();
106
+ return { embedding: data.data[0]?.embedding || [], model: 'text-embedding-3-small', provider: 'openai' };
107
+ }
108
+ }
109
+
110
+ // ─── Anthropic Provider ─────────────────────────────────────────────────────
111
+
112
+ class AnthropicProvider extends LLMProvider {
113
+ constructor(config) {
114
+ super('anthropic', config);
115
+ this.apiKey = config.apiKey || process.env.ANTHROPIC_API_KEY;
116
+ this.baseUrl = config.baseUrl || 'https://api.anthropic.com/v1';
117
+ this.models = ['claude-sonnet-4-20250514', 'claude-3-5-haiku-20241022', 'claude-3-5-sonnet-20241022'];
118
+ }
119
+
120
+ async initialize() {
121
+ this.available = !!this.apiKey;
122
+ return this.available;
123
+ }
124
+
125
+ async complete(prompt, options = {}) {
126
+ if (!this.available) throw new Error('Anthropic provider not initialized');
127
+
128
+ const model = options.model || 'claude-3-5-haiku-20241022';
129
+ const body = {
130
+ model,
131
+ max_tokens: options.maxTokens || 2048,
132
+ messages: [{ role: 'user', content: prompt }],
133
+ };
134
+ if (options.systemPrompt) body.system = options.systemPrompt;
135
+ if (options.temperature !== undefined) body.temperature = options.temperature;
136
+
137
+ const res = await fetch(`${this.baseUrl}/messages`, {
138
+ method: 'POST',
139
+ headers: {
140
+ 'Content-Type': 'application/json',
141
+ 'x-api-key': this.apiKey,
142
+ 'anthropic-version': '2023-06-01',
143
+ },
144
+ body: JSON.stringify(body),
145
+ });
146
+
147
+ if (!res.ok) {
148
+ const err = await res.text();
149
+ throw new Error(`Anthropic error ${res.status}: ${err}`);
150
+ }
151
+
152
+ const data = await res.json();
153
+ return {
154
+ text: data.content?.[0]?.text || '',
155
+ model,
156
+ provider: 'anthropic',
157
+ usage: {
158
+ promptTokens: data.usage?.input_tokens || 0,
159
+ completionTokens: data.usage?.output_tokens || 0,
160
+ totalTokens: (data.usage?.input_tokens || 0) + (data.usage?.output_tokens || 0),
161
+ },
162
+ finishReason: data.stop_reason,
163
+ };
164
+ }
165
+ }
166
+
167
+ // ─── Ollama Provider (Local) ────────────────────────────────────────────────
168
+
169
+ class OllamaProvider extends LLMProvider {
170
+ constructor(config) {
171
+ super('ollama', config);
172
+ this.baseUrl = config.baseUrl || process.env.OLLAMA_URL || 'http://localhost:11434';
173
+ }
174
+
175
+ async initialize() {
176
+ try {
177
+ const res = await fetch(`${this.baseUrl}/api/tags`, { signal: AbortSignal.timeout(3000) });
178
+ if (res.ok) {
179
+ const data = await res.json();
180
+ this.models = (data.models || []).map(m => m.name);
181
+ this.available = true;
182
+ }
183
+ } catch (_) {
184
+ this.available = false;
185
+ }
186
+ return this.available;
187
+ }
188
+
189
+ async complete(prompt, options = {}) {
190
+ if (!this.available) throw new Error('Ollama not available');
191
+
192
+ const model = options.model || this.models[0] || 'llama3.2';
193
+ const body = {
194
+ model,
195
+ prompt: options.systemPrompt ? `${options.systemPrompt}\n\n${prompt}` : prompt,
196
+ stream: false,
197
+ options: {},
198
+ };
199
+ if (options.temperature !== undefined) body.options.temperature = options.temperature;
200
+
201
+ const res = await fetch(`${this.baseUrl}/api/generate`, {
202
+ method: 'POST',
203
+ headers: { 'Content-Type': 'application/json' },
204
+ body: JSON.stringify(body),
205
+ });
206
+
207
+ if (!res.ok) throw new Error(`Ollama error ${res.status}`);
208
+ const data = await res.json();
209
+
210
+ return {
211
+ text: data.response || '',
212
+ model,
213
+ provider: 'ollama',
214
+ usage: {
215
+ promptTokens: data.prompt_eval_count || 0,
216
+ completionTokens: data.eval_count || 0,
217
+ totalTokens: (data.prompt_eval_count || 0) + (data.eval_count || 0),
218
+ },
219
+ finishReason: data.done ? 'stop' : 'length',
220
+ };
221
+ }
222
+
223
+ async embed(text) {
224
+ if (!this.available) throw new Error('Ollama not available');
225
+
226
+ const model = this.models.find(m => m.includes('embed')) || 'nomic-embed-text';
227
+ const res = await fetch(`${this.baseUrl}/api/embeddings`, {
228
+ method: 'POST',
229
+ headers: { 'Content-Type': 'application/json' },
230
+ body: JSON.stringify({ model, prompt: text }),
231
+ });
232
+
233
+ if (!res.ok) throw new Error(`Ollama embed error ${res.status}`);
234
+ const data = await res.json();
235
+ return { embedding: data.embedding || [], model, provider: 'ollama' };
236
+ }
237
+ }
238
+
239
+ // ─── LLM Manager (Unified Interface) ───────────────────────────────────────
240
+
241
+ class LLMManager {
242
+ constructor() {
243
+ this._providers = new Map();
244
+ this._defaultProvider = null;
245
+ this._fallbackOrder = [];
246
+ this._cache = new Map();
247
+ this._maxCache = 500;
248
+ this._stats = { requests: 0, cacheHits: 0, failures: 0, totalTokens: 0 };
249
+ }
250
+
251
+ /**
252
+ * Register a provider
253
+ */
254
+ registerProvider(provider) {
255
+ this._providers.set(provider.name, provider);
256
+ if (!this._defaultProvider) this._defaultProvider = provider.name;
257
+ this._fallbackOrder.push(provider.name);
258
+ }
259
+
260
+ /**
261
+ * Initialize all providers
262
+ */
263
+ async initialize() {
264
+ const results = {};
265
+ for (const [name, provider] of this._providers) {
266
+ try {
267
+ results[name] = await provider.initialize();
268
+ } catch (_) {
269
+ results[name] = false;
270
+ }
271
+ }
272
+
273
+ // Set default to first available
274
+ for (const name of this._fallbackOrder) {
275
+ if (this._providers.get(name)?.available) {
276
+ this._defaultProvider = name;
277
+ break;
278
+ }
279
+ }
280
+
281
+ return results;
282
+ }
283
+
284
+ /**
285
+ * Complete a prompt (with automatic fallback)
286
+ */
287
+ async complete(prompt, options = {}) {
288
+ this._stats.requests++;
289
+
290
+ // Check cache
291
+ if (options.cache !== false) {
292
+ const cacheKey = this._cacheKey(prompt, options);
293
+ const cached = this._cache.get(cacheKey);
294
+ if (cached && (Date.now() - cached.timestamp < 300_000)) {
295
+ this._stats.cacheHits++;
296
+ return { ...cached.result, cached: true };
297
+ }
298
+ }
299
+
300
+ const providerName = options.provider || this._defaultProvider;
301
+ const providers = [providerName, ...this._fallbackOrder.filter(p => p !== providerName)];
302
+
303
+ const endTimer = metrics.startTimer('llm.request.duration');
304
+
305
+ for (const name of providers) {
306
+ const provider = this._providers.get(name);
307
+ if (!provider?.available) continue;
308
+
309
+ try {
310
+ const result = await provider.complete(prompt, options);
311
+
312
+ endTimer();
313
+ metrics.increment('llm.requests.success', 1, { provider: name });
314
+ this._stats.totalTokens += result.usage?.totalTokens || 0;
315
+
316
+ // Cache result
317
+ if (options.cache !== false) {
318
+ const cacheKey = this._cacheKey(prompt, options);
319
+ this._cache.set(cacheKey, { result, timestamp: Date.now() });
320
+ if (this._cache.size > this._maxCache) {
321
+ const oldest = this._cache.keys().next().value;
322
+ this._cache.delete(oldest);
323
+ }
324
+ }
325
+
326
+ return { ...result, duration: endTimer() };
327
+ } catch (err) {
328
+ metrics.increment('llm.requests.failure', 1, { provider: name });
329
+ this._stats.failures++;
330
+ // Try next provider
331
+ continue;
332
+ }
333
+ }
334
+
335
+ endTimer();
336
+ throw new Error('All LLM providers failed');
337
+ }
338
+
339
+ /**
340
+ * Generate embeddings
341
+ */
342
+ async embed(text, options = {}) {
343
+ const providerName = options.provider || this._defaultProvider;
344
+ const provider = this._providers.get(providerName);
345
+ if (!provider?.available) throw new Error(`Provider ${providerName} not available`);
346
+ if (!provider.embed) throw new Error(`Provider ${providerName} does not support embeddings`);
347
+ return provider.embed(text);
348
+ }
349
+
350
+ /**
351
+ * List available models across all providers
352
+ */
353
+ listModels() {
354
+ const models = [];
355
+ for (const [name, provider] of this._providers) {
356
+ if (!provider.available) continue;
357
+ for (const model of provider.models) {
358
+ models.push({ model, provider: name });
359
+ }
360
+ }
361
+ return models;
362
+ }
363
+
364
+ /**
365
+ * Get provider status
366
+ */
367
+ getStatus() {
368
+ const providers = {};
369
+ for (const [name, provider] of this._providers) {
370
+ providers[name] = {
371
+ available: provider.available,
372
+ models: provider.models,
373
+ };
374
+ }
375
+ return {
376
+ defaultProvider: this._defaultProvider,
377
+ providers,
378
+ stats: { ...this._stats },
379
+ };
380
+ }
381
+
382
+ _cacheKey(prompt, options) {
383
+ const key = `${options.provider || ''}:${options.model || ''}:${prompt.slice(0, 200)}`;
384
+ return require('crypto').createHash('md5').update(key).digest('hex');
385
+ }
386
+ }
387
+
388
+ // ─── Singleton ──────────────────────────────────────────────────────────────
389
+
390
+ const llm = new LLMManager();
391
+
392
+ // Register default providers
393
+ llm.registerProvider(new OpenAIProvider({}));
394
+ llm.registerProvider(new AnthropicProvider({}));
395
+ llm.registerProvider(new OllamaProvider({}));
396
+
397
+ module.exports = {
398
+ LLMProvider,
399
+ OpenAIProvider,
400
+ AnthropicProvider,
401
+ OllamaProvider,
402
+ LLMManager,
403
+ llm,
404
+ };
@@ -0,0 +1,158 @@
1
+ -- Agent OS persistence layer
2
+ -- Stores agents, tasks, deployments, registry data, and audit logs
3
+
4
+ -- Agent identities
5
+ CREATE TABLE IF NOT EXISTS os_agents (
6
+ id TEXT PRIMARY KEY,
7
+ name TEXT NOT NULL,
8
+ type TEXT NOT NULL DEFAULT 'autonomous',
9
+ status TEXT NOT NULL DEFAULT 'active',
10
+ capabilities TEXT DEFAULT '[]',
11
+ api_key_hash TEXT,
12
+ public_key TEXT,
13
+ metadata TEXT DEFAULT '{}',
14
+ ip_allowlist TEXT DEFAULT '[]',
15
+ command_count INTEGER DEFAULT 0,
16
+ created_at INTEGER NOT NULL,
17
+ last_seen INTEGER
18
+ );
19
+
20
+ -- Agent sessions
21
+ CREATE TABLE IF NOT EXISTS os_sessions (
22
+ token TEXT PRIMARY KEY,
23
+ agent_id TEXT NOT NULL,
24
+ ip TEXT,
25
+ expires_at INTEGER NOT NULL,
26
+ created_at INTEGER NOT NULL,
27
+ FOREIGN KEY (agent_id) REFERENCES os_agents(id) ON DELETE CASCADE
28
+ );
29
+
30
+ -- Tasks
31
+ CREATE TABLE IF NOT EXISTS os_tasks (
32
+ id TEXT PRIMARY KEY,
33
+ type TEXT NOT NULL,
34
+ state TEXT NOT NULL DEFAULT 'queued',
35
+ priority INTEGER DEFAULT 5,
36
+ agent_id TEXT,
37
+ params TEXT DEFAULT '{}',
38
+ result TEXT,
39
+ error TEXT,
40
+ retry_count INTEGER DEFAULT 0,
41
+ max_retries INTEGER DEFAULT 3,
42
+ depends_on TEXT DEFAULT '[]',
43
+ created_at INTEGER NOT NULL,
44
+ started_at INTEGER,
45
+ completed_at INTEGER,
46
+ timeout INTEGER DEFAULT 30000
47
+ );
48
+
49
+ CREATE INDEX IF NOT EXISTS idx_os_tasks_state ON os_tasks(state);
50
+ CREATE INDEX IF NOT EXISTS idx_os_tasks_agent ON os_tasks(agent_id);
51
+
52
+ -- Deployments
53
+ CREATE TABLE IF NOT EXISTS os_deployments (
54
+ id TEXT PRIMARY KEY,
55
+ agent_id TEXT NOT NULL,
56
+ status TEXT NOT NULL DEFAULT 'active',
57
+ config TEXT DEFAULT '{}',
58
+ sites TEXT DEFAULT '[]',
59
+ health_status TEXT DEFAULT 'unknown',
60
+ last_health_check INTEGER,
61
+ created_at INTEGER NOT NULL,
62
+ FOREIGN KEY (agent_id) REFERENCES os_agents(id) ON DELETE CASCADE
63
+ );
64
+
65
+ -- Registry: commands
66
+ CREATE TABLE IF NOT EXISTS os_registry_commands (
67
+ id TEXT PRIMARY KEY,
68
+ site_id TEXT NOT NULL,
69
+ name TEXT NOT NULL,
70
+ description TEXT DEFAULT '',
71
+ category TEXT DEFAULT 'general',
72
+ version TEXT DEFAULT '1.0.0',
73
+ input_schema TEXT DEFAULT '{}',
74
+ output_schema TEXT DEFAULT '{}',
75
+ capabilities TEXT DEFAULT '[]',
76
+ tags TEXT DEFAULT '[]',
77
+ usage_count INTEGER DEFAULT 0,
78
+ last_used INTEGER,
79
+ created_at INTEGER NOT NULL
80
+ );
81
+
82
+ CREATE INDEX IF NOT EXISTS idx_os_reg_cmd_site ON os_registry_commands(site_id);
83
+ CREATE INDEX IF NOT EXISTS idx_os_reg_cmd_cat ON os_registry_commands(category);
84
+
85
+ -- Registry: sites
86
+ CREATE TABLE IF NOT EXISTS os_registry_sites (
87
+ domain TEXT PRIMARY KEY,
88
+ name TEXT,
89
+ description TEXT DEFAULT '',
90
+ tier TEXT DEFAULT 'free',
91
+ protocol_version TEXT DEFAULT '1.0.0',
92
+ capabilities TEXT DEFAULT '[]',
93
+ endpoints TEXT DEFAULT '{}',
94
+ verified INTEGER DEFAULT 0,
95
+ agent_visits INTEGER DEFAULT 0,
96
+ last_seen INTEGER,
97
+ created_at INTEGER NOT NULL
98
+ );
99
+
100
+ -- Registry: templates
101
+ CREATE TABLE IF NOT EXISTS os_registry_templates (
102
+ id TEXT PRIMARY KEY,
103
+ name TEXT NOT NULL,
104
+ description TEXT DEFAULT '',
105
+ category TEXT DEFAULT 'general',
106
+ author TEXT DEFAULT 'system',
107
+ version TEXT DEFAULT '1.0.0',
108
+ steps TEXT DEFAULT '[]',
109
+ variables TEXT DEFAULT '{}',
110
+ required_capabilities TEXT DEFAULT '[]',
111
+ tags TEXT DEFAULT '[]',
112
+ downloads INTEGER DEFAULT 0,
113
+ created_at INTEGER NOT NULL
114
+ );
115
+
116
+ -- Audit log (immutable append-only)
117
+ CREATE TABLE IF NOT EXISTS os_audit_log (
118
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
119
+ timestamp INTEGER NOT NULL,
120
+ agent_id TEXT,
121
+ action TEXT NOT NULL,
122
+ resource TEXT,
123
+ resource_id TEXT,
124
+ details TEXT DEFAULT '{}',
125
+ ip TEXT,
126
+ outcome TEXT DEFAULT 'success'
127
+ );
128
+
129
+ CREATE INDEX IF NOT EXISTS idx_os_audit_ts ON os_audit_log(timestamp);
130
+ CREATE INDEX IF NOT EXISTS idx_os_audit_agent ON os_audit_log(agent_id);
131
+
132
+ -- Capability grants
133
+ CREATE TABLE IF NOT EXISTS os_capability_grants (
134
+ id TEXT PRIMARY KEY,
135
+ agent_id TEXT NOT NULL,
136
+ capability TEXT NOT NULL,
137
+ site_id TEXT DEFAULT '*',
138
+ max_calls INTEGER,
139
+ used_calls INTEGER DEFAULT 0,
140
+ rate_limit TEXT,
141
+ expires_at INTEGER,
142
+ status TEXT DEFAULT 'active',
143
+ created_at INTEGER NOT NULL,
144
+ FOREIGN KEY (agent_id) REFERENCES os_agents(id) ON DELETE CASCADE
145
+ );
146
+
147
+ CREATE INDEX IF NOT EXISTS idx_os_cap_agent ON os_capability_grants(agent_id);
148
+
149
+ -- Policies
150
+ CREATE TABLE IF NOT EXISTS os_policies (
151
+ id TEXT PRIMARY KEY,
152
+ name TEXT NOT NULL,
153
+ description TEXT DEFAULT '',
154
+ priority INTEGER DEFAULT 0,
155
+ rules TEXT DEFAULT '[]',
156
+ entity_bindings TEXT DEFAULT '[]',
157
+ created_at INTEGER NOT NULL
158
+ );