web-agent-bridge 2.2.0 → 2.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/README.ar.md +7 -0
  2. package/README.md +7 -0
  3. package/package.json +12 -4
  4. package/public/commander-dashboard.html +243 -0
  5. package/public/css/premium.css +317 -317
  6. package/public/demo.html +259 -259
  7. package/public/index.html +644 -592
  8. package/public/llms.txt +1 -0
  9. package/public/mesh-dashboard.html +328 -0
  10. package/public/premium-dashboard.html +2487 -2487
  11. package/public/premium.html +791 -791
  12. package/public/script/wab.min.js +181 -6
  13. package/script/ai-agent-bridge.js +196 -0
  14. package/sdk/agent-mesh.js +449 -0
  15. package/sdk/commander.js +262 -0
  16. package/sdk/index.js +260 -259
  17. package/sdk/package.json +1 -1
  18. package/server/index.js +13 -1
  19. package/server/migrations/002_premium_features.sql +418 -418
  20. package/server/models/db.js +24 -5
  21. package/server/routes/admin-premium.js +671 -671
  22. package/server/routes/commander.js +316 -0
  23. package/server/routes/mesh.js +469 -0
  24. package/server/routes/premium-v2.js +686 -686
  25. package/server/routes/premium.js +724 -724
  26. package/server/services/agent-learning.js +575 -0
  27. package/server/services/agent-memory.js +625 -625
  28. package/server/services/agent-mesh.js +539 -0
  29. package/server/services/agent-symphony.js +711 -0
  30. package/server/services/commander.js +738 -0
  31. package/server/services/edge-compute.js +440 -0
  32. package/server/services/local-ai.js +389 -0
  33. package/server/services/plugins.js +747 -747
  34. package/server/services/self-healing.js +843 -843
  35. package/server/services/swarm.js +788 -788
  36. package/server/services/vision.js +871 -871
  37. package/public/admin/dashboard.html +0 -848
  38. package/public/admin/login.html +0 -84
  39. package/public/video/tutorial.mp4 +0 -0
@@ -0,0 +1,575 @@
1
+ /**
2
+ * Agent Learning Engine — Local Reinforcement Learning
3
+ *
4
+ * Agents learn from user decisions, building behavioral models locally
5
+ * without sending data to external LLMs. The engine tracks:
6
+ * - Decision patterns (what the user chooses and when)
7
+ * - Reward signals (accepted/rejected/modified outcomes)
8
+ * - Policy weights (which factors matter most to this user)
9
+ * - Prediction accuracy over time
10
+ *
11
+ * Learning algorithms:
12
+ * - Multi-armed bandit (UCB1) for exploration/exploitation
13
+ * - Linear policy model with sigmoid activation and gradient descent
14
+ * - Temporal discount for preference freshness (recent > old)
15
+ * - Sequential pattern mining for behavior chains
16
+ * - Confidence estimation: volume × accuracy × recency
17
+ */
18
+
19
+ const crypto = require('crypto');
20
+ const { db } = require('../models/db');
21
+
22
+ // ─── Schema ──────────────────────────────────────────────────────────
23
+
24
+ db.exec(`
25
+ CREATE TABLE IF NOT EXISTS learning_decisions (
26
+ id TEXT PRIMARY KEY,
27
+ site_id TEXT NOT NULL,
28
+ agent_id TEXT NOT NULL,
29
+ domain TEXT NOT NULL,
30
+ action TEXT NOT NULL,
31
+ context TEXT DEFAULT '{}',
32
+ outcome TEXT DEFAULT 'pending',
33
+ reward REAL DEFAULT 0.0,
34
+ predicted_reward REAL,
35
+ features TEXT DEFAULT '{}',
36
+ created_at TEXT DEFAULT (datetime('now'))
37
+ );
38
+
39
+ CREATE TABLE IF NOT EXISTS learning_policies (
40
+ id TEXT PRIMARY KEY,
41
+ site_id TEXT NOT NULL,
42
+ agent_id TEXT NOT NULL,
43
+ domain TEXT NOT NULL,
44
+ feature TEXT NOT NULL,
45
+ weight REAL DEFAULT 0.0,
46
+ update_count INTEGER DEFAULT 0,
47
+ last_error REAL DEFAULT 0.0,
48
+ created_at TEXT DEFAULT (datetime('now')),
49
+ updated_at TEXT DEFAULT (datetime('now')),
50
+ UNIQUE(site_id, agent_id, domain, feature)
51
+ );
52
+
53
+ CREATE TABLE IF NOT EXISTS learning_patterns (
54
+ id TEXT PRIMARY KEY,
55
+ site_id TEXT NOT NULL,
56
+ agent_id TEXT NOT NULL,
57
+ pattern_type TEXT NOT NULL,
58
+ sequence TEXT NOT NULL,
59
+ frequency INTEGER DEFAULT 1,
60
+ confidence REAL DEFAULT 0.5,
61
+ last_seen TEXT DEFAULT (datetime('now')),
62
+ created_at TEXT DEFAULT (datetime('now'))
63
+ );
64
+
65
+ CREATE TABLE IF NOT EXISTS learning_bandit_arms (
66
+ id TEXT PRIMARY KEY,
67
+ site_id TEXT NOT NULL,
68
+ agent_id TEXT NOT NULL,
69
+ domain TEXT NOT NULL,
70
+ action TEXT NOT NULL,
71
+ pulls INTEGER DEFAULT 0,
72
+ total_reward REAL DEFAULT 0.0,
73
+ avg_reward REAL DEFAULT 0.0,
74
+ ucb_score REAL DEFAULT 0.0,
75
+ created_at TEXT DEFAULT (datetime('now')),
76
+ updated_at TEXT DEFAULT (datetime('now')),
77
+ UNIQUE(site_id, agent_id, domain, action)
78
+ );
79
+
80
+ CREATE TABLE IF NOT EXISTS learning_sessions (
81
+ id TEXT PRIMARY KEY,
82
+ site_id TEXT NOT NULL,
83
+ agent_id TEXT NOT NULL,
84
+ decisions_made INTEGER DEFAULT 0,
85
+ correct_predictions INTEGER DEFAULT 0,
86
+ accuracy REAL DEFAULT 0.0,
87
+ started_at TEXT DEFAULT (datetime('now')),
88
+ ended_at TEXT
89
+ );
90
+
91
+ CREATE INDEX IF NOT EXISTS idx_learn_dec_site ON learning_decisions(site_id, agent_id);
92
+ CREATE INDEX IF NOT EXISTS idx_learn_dec_domain ON learning_decisions(domain);
93
+ CREATE INDEX IF NOT EXISTS idx_learn_dec_outcome ON learning_decisions(outcome);
94
+ CREATE INDEX IF NOT EXISTS idx_learn_pol_lookup ON learning_policies(site_id, agent_id, domain);
95
+ CREATE INDEX IF NOT EXISTS idx_learn_pat_seq ON learning_patterns(site_id, agent_id, pattern_type);
96
+ CREATE INDEX IF NOT EXISTS idx_learn_bandit ON learning_bandit_arms(site_id, agent_id, domain);
97
+ `);
98
+
99
+ // ─── Config ──────────────────────────────────────────────────────────
100
+
101
+ const LEARNING_RATE = 0.1;
102
+ const DISCOUNT_FACTOR = 0.95; // Temporal discount per decision step
103
+ const DECAY_RATE = 0.01; // Recency decay per hour
104
+ const UCB_EXPLORATION = 1.414; // √2 for UCB1
105
+ const MIN_CONFIDENCE = 0.01;
106
+ const MAX_SEQUENCE_LENGTH = 5;
107
+
108
+ // ─── Prepared Statements ─────────────────────────────────────────────
109
+
110
+ const stmts = {
111
+ insertDecision: db.prepare('INSERT INTO learning_decisions (id, site_id, agent_id, domain, action, context, predicted_reward, features) VALUES (?, ?, ?, ?, ?, ?, ?, ?)'),
112
+ updateOutcome: db.prepare('UPDATE learning_decisions SET outcome = ?, reward = ? WHERE id = ?'),
113
+ getDecision: db.prepare('SELECT * FROM learning_decisions WHERE id = ?'),
114
+ getRecentDecisions: db.prepare('SELECT * FROM learning_decisions WHERE site_id = ? AND agent_id = ? AND domain = ? ORDER BY created_at DESC LIMIT ?'),
115
+ getDecisionsByOutcome: db.prepare("SELECT * FROM learning_decisions WHERE site_id = ? AND agent_id = ? AND outcome = ? ORDER BY created_at DESC LIMIT ?"),
116
+ getAllDomainDecisions: db.prepare('SELECT * FROM learning_decisions WHERE site_id = ? AND agent_id = ? AND domain = ? ORDER BY created_at DESC'),
117
+ countDecisions: db.prepare('SELECT COUNT(*) as count FROM learning_decisions WHERE site_id = ? AND agent_id = ?'),
118
+ getRecentRewards: db.prepare("SELECT reward, created_at FROM learning_decisions WHERE site_id = ? AND agent_id = ? AND outcome != 'pending' ORDER BY created_at DESC LIMIT ?"),
119
+
120
+ upsertPolicy: db.prepare("INSERT INTO learning_policies (id, site_id, agent_id, domain, feature, weight) VALUES (?, ?, ?, ?, ?, ?) ON CONFLICT(site_id, agent_id, domain, feature) DO UPDATE SET weight = ?, update_count = update_count + 1, last_error = ?, updated_at = datetime('now')"),
121
+ getPolicies: db.prepare('SELECT * FROM learning_policies WHERE site_id = ? AND agent_id = ? AND domain = ? ORDER BY ABS(weight) DESC'),
122
+ getPolicy: db.prepare('SELECT * FROM learning_policies WHERE site_id = ? AND agent_id = ? AND domain = ? AND feature = ?'),
123
+
124
+ insertPattern: db.prepare('INSERT INTO learning_patterns (id, site_id, agent_id, pattern_type, sequence, confidence) VALUES (?, ?, ?, ?, ?, ?)'),
125
+ findPattern: db.prepare('SELECT * FROM learning_patterns WHERE site_id = ? AND agent_id = ? AND sequence = ?'),
126
+ updatePattern: db.prepare("UPDATE learning_patterns SET frequency = frequency + 1, confidence = ?, last_seen = datetime('now') WHERE id = ?"),
127
+ getTopPatterns: db.prepare('SELECT * FROM learning_patterns WHERE site_id = ? AND agent_id = ? AND pattern_type = ? ORDER BY frequency DESC, confidence DESC LIMIT ?'),
128
+
129
+ upsertArm: db.prepare('INSERT INTO learning_bandit_arms (id, site_id, agent_id, domain, action) VALUES (?, ?, ?, ?, ?) ON CONFLICT(site_id, agent_id, domain, action) DO NOTHING'),
130
+ getArms: db.prepare('SELECT * FROM learning_bandit_arms WHERE site_id = ? AND agent_id = ? AND domain = ? ORDER BY ucb_score DESC'),
131
+ getArm: db.prepare('SELECT * FROM learning_bandit_arms WHERE site_id = ? AND agent_id = ? AND domain = ? AND action = ?'),
132
+ updateArm: db.prepare("UPDATE learning_bandit_arms SET pulls = pulls + 1, total_reward = total_reward + ?, avg_reward = ?, ucb_score = ?, updated_at = datetime('now') WHERE site_id = ? AND agent_id = ? AND domain = ? AND action = ?"),
133
+
134
+ insertSession: db.prepare('INSERT INTO learning_sessions (id, site_id, agent_id) VALUES (?, ?, ?)'),
135
+ updateSession: db.prepare("UPDATE learning_sessions SET decisions_made = ?, correct_predictions = ?, accuracy = ?, ended_at = datetime('now') WHERE id = ?"),
136
+ getSessionHistory: db.prepare('SELECT * FROM learning_sessions WHERE site_id = ? AND agent_id = ? ORDER BY started_at DESC LIMIT ?'),
137
+
138
+ getStats: db.prepare(`SELECT
139
+ (SELECT COUNT(*) FROM learning_decisions WHERE site_id = ? AND agent_id = ?) as total_decisions,
140
+ (SELECT COUNT(*) FROM learning_decisions WHERE site_id = ? AND agent_id = ? AND outcome = 'accepted') as accepted,
141
+ (SELECT COUNT(*) FROM learning_decisions WHERE site_id = ? AND agent_id = ? AND outcome = 'rejected') as rejected,
142
+ (SELECT AVG(reward) FROM learning_decisions WHERE site_id = ? AND agent_id = ? AND outcome != 'pending') as avg_reward,
143
+ (SELECT COUNT(DISTINCT domain) FROM learning_policies WHERE site_id = ? AND agent_id = ?) as policy_domains,
144
+ (SELECT COUNT(*) FROM learning_patterns WHERE site_id = ? AND agent_id = ?) as total_patterns`),
145
+
146
+ deletePolicies: db.prepare('DELETE FROM learning_policies WHERE site_id = ? AND agent_id = ? AND domain = ?'),
147
+ deletePatterns: db.prepare('DELETE FROM learning_patterns WHERE site_id = ? AND agent_id = ?'),
148
+ deleteArms: db.prepare('DELETE FROM learning_bandit_arms WHERE site_id = ? AND agent_id = ? AND domain = ?'),
149
+ deleteDecisions: db.prepare('DELETE FROM learning_decisions WHERE site_id = ? AND agent_id = ? AND domain = ?'),
150
+ };
151
+
152
+ // ─── Core Learning API ───────────────────────────────────────────────
153
+
154
+ /**
155
+ * Record a decision the agent is about to make, with predicted reward.
156
+ */
157
+ function recordDecision(siteId, agentId, domain, action, context = {}, features = {}) {
158
+ const id = crypto.randomUUID();
159
+ const extractedFeatures = { ..._extractFeatures(context), ...features };
160
+ const predictedReward = _predict(siteId, agentId, domain, extractedFeatures);
161
+
162
+ stmts.insertDecision.run(id, siteId, agentId, domain, action,
163
+ JSON.stringify(context), predictedReward, JSON.stringify(extractedFeatures));
164
+
165
+ // Ensure bandit arm exists
166
+ stmts.upsertArm.run(crypto.randomUUID(), siteId, agentId, domain, action);
167
+
168
+ return { decisionId: id, predictedReward, confidence: _getConfidence(siteId, agentId, domain) };
169
+ }
170
+
171
+ /**
172
+ * Provide feedback on a decision — the outcome and actual reward.
173
+ * This is the core learning signal.
174
+ */
175
+ function feedback(decisionId, outcome, reward) {
176
+ const decision = stmts.getDecision.get(decisionId);
177
+ if (!decision) throw new Error('Decision not found');
178
+
179
+ stmts.updateOutcome.run(outcome, reward, decisionId);
180
+
181
+ const features = JSON.parse(decision.features || '{}');
182
+ const predError = reward - (decision.predicted_reward || 0);
183
+
184
+ // Update policy weights via gradient descent with temporal discount
185
+ _updatePolicies(decision.site_id, decision.agent_id, decision.domain, features, predError);
186
+
187
+ // Update bandit arm with actual reward
188
+ _updateBanditArm(decision.site_id, decision.agent_id, decision.domain, decision.action, reward);
189
+
190
+ // Mine patterns from recent decisions
191
+ _minePatterns(decision.site_id, decision.agent_id, decision.domain);
192
+
193
+ return {
194
+ decisionId,
195
+ predictionError: Math.round(predError * 1000) / 1000,
196
+ updatedConfidence: _getConfidence(decision.site_id, decision.agent_id, decision.domain),
197
+ accuracy: Math.round((1 - Math.abs(predError)) * 1000) / 1000,
198
+ };
199
+ }
200
+
201
+ /**
202
+ * Batch feedback — provide multiple outcomes at once.
203
+ */
204
+ function batchFeedback(feedbackList) {
205
+ const results = [];
206
+ const txn = db.transaction(() => {
207
+ for (const fb of feedbackList) {
208
+ try {
209
+ results.push(feedback(fb.decisionId, fb.outcome, fb.reward));
210
+ } catch (err) {
211
+ results.push({ decisionId: fb.decisionId, error: err.message });
212
+ }
213
+ }
214
+ });
215
+ txn();
216
+ return results;
217
+ }
218
+
219
+ /**
220
+ * Get the best action for a domain using learned policies + bandit scores.
221
+ * UCB scores are normalized to [0,1] before blending with policy prediction.
222
+ */
223
+ function recommend(siteId, agentId, domain, availableActions, context = {}) {
224
+ const features = _extractFeatures(context);
225
+
226
+ // Get all arms to find normalization bounds
227
+ const allArms = stmts.getArms.all(siteId, agentId, domain);
228
+ const armMap = {};
229
+ for (const arm of allArms) armMap[arm.action] = arm;
230
+
231
+ // Normalize UCB scores to [0,1]
232
+ let minUCB = Infinity, maxUCB = -Infinity;
233
+ for (const arm of allArms) {
234
+ if (arm.pulls > 0) {
235
+ if (arm.ucb_score < minUCB) minUCB = arm.ucb_score;
236
+ if (arm.ucb_score > maxUCB) maxUCB = arm.ucb_score;
237
+ }
238
+ }
239
+ const ucbRange = maxUCB - minUCB;
240
+
241
+ const scored = availableActions.map((action) => {
242
+ const arm = armMap[action] || _getOrCreateArm(siteId, agentId, domain, action);
243
+ const policyScore = _predict(siteId, agentId, domain, { ...features, [`action:${action}`]: 1 });
244
+
245
+ // Normalize bandit score to [0,1]
246
+ let normalizedBandit;
247
+ if (arm.pulls === 0) {
248
+ normalizedBandit = 1.0; // unexplored arms get maximum exploration bonus
249
+ } else if (ucbRange > 0) {
250
+ normalizedBandit = (arm.ucb_score - minUCB) / ucbRange;
251
+ } else {
252
+ normalizedBandit = arm.avg_reward; // single arm — use raw avg
253
+ }
254
+
255
+ // Blend: as confidence grows, lean more on policy, less on exploration
256
+ const confidence = _getConfidence(siteId, agentId, domain);
257
+ const policyWeight = 0.4 + confidence * 0.4; // [0.4, 0.8]
258
+ const banditWeight = 1 - policyWeight; // [0.2, 0.6]
259
+ const blended = policyWeight * policyScore + banditWeight * normalizedBandit;
260
+
261
+ return {
262
+ action,
263
+ score: Math.round(blended * 1000) / 1000,
264
+ policyScore: Math.round(policyScore * 1000) / 1000,
265
+ banditScore: Math.round(normalizedBandit * 1000) / 1000,
266
+ pulls: arm.pulls,
267
+ avgReward: Math.round((arm.avg_reward || 0) * 1000) / 1000,
268
+ };
269
+ });
270
+
271
+ scored.sort((a, b) => b.score - a.score);
272
+
273
+ const confidence = _getConfidence(siteId, agentId, domain);
274
+ const topPatterns = stmts.getTopPatterns.all(siteId, agentId, 'action_sequence', 5);
275
+
276
+ return {
277
+ recommended: scored[0]?.action || availableActions[0],
278
+ rankings: scored,
279
+ confidence,
280
+ explorationLevel: confidence < 0.3 ? 'high' : confidence < 0.6 ? 'medium' : 'low',
281
+ patterns: topPatterns.map((p) => ({
282
+ sequence: p.sequence, frequency: p.frequency, confidence: p.confidence
283
+ })),
284
+ };
285
+ }
286
+
287
+ /**
288
+ * Get learned preference summary for a domain.
289
+ */
290
+ function getPreferences(siteId, agentId, domain) {
291
+ const policies = stmts.getPolicies.all(siteId, agentId, domain);
292
+ const decisions = stmts.getRecentDecisions.all(siteId, agentId, domain, 50);
293
+ const patterns = stmts.getTopPatterns.all(siteId, agentId, 'action_sequence', 10);
294
+
295
+ const accepted = decisions.filter((d) => d.outcome === 'accepted');
296
+ const rejected = decisions.filter((d) => d.outcome === 'rejected');
297
+
298
+ // Build preference profile from weights
299
+ const profile = {};
300
+ for (const p of policies) {
301
+ if (Math.abs(p.weight) > 0.05) {
302
+ profile[p.feature] = {
303
+ weight: Math.round(p.weight * 1000) / 1000,
304
+ direction: p.weight > 0 ? 'preferred' : 'avoided',
305
+ strength: Math.abs(p.weight) > 0.5 ? 'strong' : Math.abs(p.weight) > 0.2 ? 'moderate' : 'weak',
306
+ updates: p.update_count,
307
+ };
308
+ }
309
+ }
310
+
311
+ // Compute action frequencies
312
+ const actionFreqs = {};
313
+ for (const d of decisions) {
314
+ actionFreqs[d.action] = (actionFreqs[d.action] || 0) + 1;
315
+ }
316
+
317
+ return {
318
+ domain,
319
+ profile,
320
+ acceptRate: decisions.length > 0 ? Math.round((accepted.length / decisions.length) * 1000) / 1000 : 0,
321
+ rejectRate: decisions.length > 0 ? Math.round((rejected.length / decisions.length) * 1000) / 1000 : 0,
322
+ totalDecisions: decisions.length,
323
+ avgReward: decisions.length > 0
324
+ ? Math.round((decisions.reduce((s, d) => s + d.reward, 0) / decisions.length) * 1000) / 1000
325
+ : 0,
326
+ topActions: Object.entries(actionFreqs)
327
+ .sort(([, a], [, b]) => b - a)
328
+ .slice(0, 5)
329
+ .map(([action, count]) => ({ action, count, percentage: Math.round((count / decisions.length) * 100) })),
330
+ topPatterns: patterns.map((p) => ({ sequence: p.sequence, frequency: p.frequency })),
331
+ confidence: _getConfidence(siteId, agentId, domain),
332
+ };
333
+ }
334
+
335
+ /**
336
+ * Get reward history — recent rewards over time for charting.
337
+ */
338
+ function getRewardHistory(siteId, agentId, limit = 30) {
339
+ return stmts.getRecentRewards.all(siteId, agentId, limit).reverse();
340
+ }
341
+
342
+ // ─── Learning Sessions ───────────────────────────────────────────────
343
+
344
+ function startSession(siteId, agentId) {
345
+ const id = crypto.randomUUID();
346
+ stmts.insertSession.run(id, siteId, agentId);
347
+ return { sessionId: id };
348
+ }
349
+
350
+ function endSession(sessionId, decisionsMade, correctPredictions) {
351
+ const accuracy = decisionsMade > 0 ? correctPredictions / decisionsMade : 0;
352
+ stmts.updateSession.run(decisionsMade, correctPredictions, accuracy, sessionId);
353
+ return { accuracy: Math.round(accuracy * 1000) / 1000 };
354
+ }
355
+
356
+ // ─── Reset ───────────────────────────────────────────────────────────
357
+
358
+ /**
359
+ * Reset all learned data for a specific domain.
360
+ */
361
+ function resetDomain(siteId, agentId, domain) {
362
+ const txn = db.transaction(() => {
363
+ stmts.deletePolicies.run(siteId, agentId, domain);
364
+ stmts.deleteArms.run(siteId, agentId, domain);
365
+ stmts.deleteDecisions.run(siteId, agentId, domain);
366
+ });
367
+ txn();
368
+ return { reset: true, domain };
369
+ }
370
+
371
+ /**
372
+ * Reset all patterns for an agent.
373
+ */
374
+ function resetPatterns(siteId, agentId) {
375
+ stmts.deletePatterns.run(siteId, agentId);
376
+ return { reset: true };
377
+ }
378
+
379
+ // ─── Stats ───────────────────────────────────────────────────────────
380
+
381
+ function getStats(siteId, agentId) {
382
+ const row = stmts.getStats.get(siteId, agentId, siteId, agentId, siteId, agentId, siteId, agentId, siteId, agentId, siteId, agentId);
383
+ const sessions = stmts.getSessionHistory.all(siteId, agentId, 10);
384
+ const recentAccuracy = sessions.length > 0 ? sessions.reduce((s, sess) => s + sess.accuracy, 0) / sessions.length : 0;
385
+ const rewardHistory = stmts.getRecentRewards.all(siteId, agentId, 30).reverse();
386
+
387
+ return {
388
+ ...row,
389
+ avg_reward: row.avg_reward !== null ? Math.round(row.avg_reward * 1000) / 1000 : 0,
390
+ recentAccuracy: Math.round(recentAccuracy * 1000) / 1000,
391
+ sessionsCount: sessions.length,
392
+ acceptRate: row.total_decisions > 0
393
+ ? Math.round((row.accepted / row.total_decisions) * 1000) / 1000
394
+ : 0,
395
+ rewardHistory,
396
+ };
397
+ }
398
+
399
+ // ─── Internal: Prediction via Linear Model ───────────────────────────
400
+
401
+ function _predict(siteId, agentId, domain, features) {
402
+ const policies = stmts.getPolicies.all(siteId, agentId, domain);
403
+ if (policies.length === 0) return 0.5; // No data yet — neutral prediction
404
+
405
+ let score = 0;
406
+ let matchedFeatures = 0;
407
+ for (const p of policies) {
408
+ const featureVal = features[p.feature];
409
+ if (featureVal !== undefined) {
410
+ const fv = typeof featureVal === 'number' ? featureVal : (featureVal ? 1 : 0);
411
+
412
+ // Apply temporal discount: older policies (fewer recent updates) matter less
413
+ const recencyBoost = p.update_count > 0 ? Math.pow(DISCOUNT_FACTOR, Math.max(0, 10 - p.update_count)) : 1;
414
+ score += p.weight * fv * recencyBoost;
415
+ matchedFeatures++;
416
+ }
417
+ }
418
+
419
+ // Sigmoid squash to [0, 1]
420
+ return 1 / (1 + Math.exp(-score));
421
+ }
422
+
423
+ function _updatePolicies(siteId, agentId, domain, features, error) {
424
+ for (const [feature, value] of Object.entries(features)) {
425
+ const fv = typeof value === 'number' ? value : (value ? 1 : 0);
426
+ if (fv === 0) continue; // Skip zero-valued features
427
+
428
+ const gradient = error * fv * LEARNING_RATE;
429
+ const existing = stmts.getPolicy.get(siteId, agentId, domain, feature);
430
+
431
+ // Apply weight decay to prevent unbounded growth
432
+ const currentWeight = existing ? existing.weight * DISCOUNT_FACTOR : 0;
433
+ const newWeight = currentWeight + gradient;
434
+
435
+ // Clamp weights to [-5, 5] to prevent extreme values
436
+ const clampedWeight = Math.max(-5, Math.min(5, newWeight));
437
+
438
+ stmts.upsertPolicy.run(
439
+ crypto.randomUUID(), siteId, agentId, domain, feature, clampedWeight,
440
+ clampedWeight, Math.abs(error)
441
+ );
442
+ }
443
+ }
444
+
445
+ // ─── Internal: Multi-Armed Bandit ────────────────────────────────────
446
+
447
+ function _getOrCreateArm(siteId, agentId, domain, action) {
448
+ stmts.upsertArm.run(crypto.randomUUID(), siteId, agentId, domain, action);
449
+ const arm = stmts.getArm.get(siteId, agentId, domain, action);
450
+ return arm || { pulls: 0, ucb_score: 0, avg_reward: 0, total_reward: 0 };
451
+ }
452
+
453
+ function _updateBanditArm(siteId, agentId, domain, action, reward) {
454
+ const arm = stmts.getArm.get(siteId, agentId, domain, action);
455
+ if (!arm) {
456
+ stmts.upsertArm.run(crypto.randomUUID(), siteId, agentId, domain, action);
457
+ return;
458
+ }
459
+
460
+ const newPulls = arm.pulls + 1;
461
+ const newTotalReward = arm.total_reward + reward;
462
+ const newAvgReward = newTotalReward / newPulls;
463
+
464
+ // UCB1: avg_reward + C * sqrt(ln(N) / n_i)
465
+ // We need total pulls across all arms in this domain
466
+ const arms = stmts.getArms.all(siteId, agentId, domain);
467
+ const totalPulls = arms.reduce((s, a) => s + a.pulls, 0) + 1; // +1 for this pull
468
+
469
+ const exploration = UCB_EXPLORATION * Math.sqrt(Math.log(totalPulls) / newPulls);
470
+ const ucbScore = newAvgReward + exploration;
471
+
472
+ stmts.updateArm.run(reward, newAvgReward, ucbScore, siteId, agentId, domain, action);
473
+ }
474
+
475
+ // ─── Internal: Pattern Mining ────────────────────────────────────────
476
+
477
+ function _minePatterns(siteId, agentId, domain) {
478
+ const decisions = stmts.getRecentDecisions.all(siteId, agentId, domain, 20);
479
+ if (decisions.length < 3) return;
480
+
481
+ // Extract action sequences of length 2-5
482
+ for (let len = 2; len <= Math.min(MAX_SEQUENCE_LENGTH, decisions.length); len++) {
483
+ const sequence = decisions.slice(0, len).map((d) => d.action).reverse().join(' → ');
484
+ const existing = stmts.findPattern.get(siteId, agentId, sequence);
485
+
486
+ if (existing) {
487
+ // Asymptotic approach to 1.0 — confidence grows slower as it increases
488
+ const newConf = Math.min(0.99, existing.confidence + 0.05 * (1 - existing.confidence));
489
+ stmts.updatePattern.run(newConf, existing.id);
490
+ } else {
491
+ stmts.insertPattern.run(crypto.randomUUID(), siteId, agentId, 'action_sequence', sequence, 0.3);
492
+ }
493
+ }
494
+ }
495
+
496
+ // ─── Internal: Feature Extraction ────────────────────────────────────
497
+
498
+ function _extractFeatures(context) {
499
+ const features = {};
500
+
501
+ if (context.price !== undefined) {
502
+ features.price = context.price;
503
+ // Bucketize price for discrete learning
504
+ if (context.price < 10) features['price_bucket:cheap'] = 1;
505
+ else if (context.price < 50) features['price_bucket:moderate'] = 1;
506
+ else if (context.price < 200) features['price_bucket:premium'] = 1;
507
+ else features['price_bucket:luxury'] = 1;
508
+ }
509
+ if (context.quantity !== undefined) features.quantity = context.quantity;
510
+ if (context.discount !== undefined) {
511
+ features.discount = context.discount;
512
+ features.has_discount = context.discount > 0 ? 1 : 0;
513
+ }
514
+ if (context.rating !== undefined) {
515
+ features.rating = context.rating;
516
+ features.high_rated = context.rating >= 4.0 ? 1 : 0;
517
+ }
518
+ if (context.category) features[`category:${context.category}`] = 1;
519
+ if (context.brand) features[`brand:${context.brand}`] = 1;
520
+ if (context.timeOfDay !== undefined) {
521
+ features.morning = context.timeOfDay < 12 ? 1 : 0;
522
+ features.afternoon = context.timeOfDay >= 12 && context.timeOfDay < 18 ? 1 : 0;
523
+ features.evening = context.timeOfDay >= 18 ? 1 : 0;
524
+ }
525
+ if (context.isRepeat !== undefined) features.repeat_visit = context.isRepeat ? 1 : 0;
526
+ if (context.urgency !== undefined) features.urgency = context.urgency;
527
+ if (context.inStock !== undefined) features.in_stock = context.inStock ? 1 : 0;
528
+
529
+ // Pass through any raw numeric features
530
+ for (const [k, v] of Object.entries(context)) {
531
+ if (features[k] === undefined && typeof v === 'number') {
532
+ features[k] = v;
533
+ }
534
+ }
535
+
536
+ return features;
537
+ }
538
+
539
+ // ─── Internal: Confidence Estimation ─────────────────────────────────
540
+
541
+ function _getConfidence(siteId, agentId, domain) {
542
+ const decisions = stmts.getRecentDecisions.all(siteId, agentId, domain, 50);
543
+ if (decisions.length === 0) return 0;
544
+
545
+ const withOutcome = decisions.filter((d) => d.outcome !== 'pending');
546
+ if (withOutcome.length === 0) return MIN_CONFIDENCE;
547
+
548
+ // Volume component: log scale, saturates around 30 decisions
549
+ const volumeConf = Math.min(1, withOutcome.length / 30);
550
+
551
+ // Accuracy component: how close predictions were to actual rewards
552
+ let accuracySum = 0;
553
+ for (const d of withOutcome) {
554
+ if (d.predicted_reward !== null) {
555
+ const error = Math.abs(d.reward - d.predicted_reward);
556
+ accuracySum += Math.max(0, 1 - error);
557
+ }
558
+ }
559
+ const accuracyConf = withOutcome.length > 0 ? accuracySum / withOutcome.length : 0.5;
560
+
561
+ // Recency component: exponential decay based on age of newest data
562
+ const latestTs = new Date(withOutcome[0].created_at).getTime();
563
+ const ageHours = (Date.now() - latestTs) / 3600000;
564
+ const recencyConf = Math.exp(-DECAY_RATE * ageHours);
565
+
566
+ return Math.max(MIN_CONFIDENCE, Math.min(0.99,
567
+ volumeConf * 0.3 + accuracyConf * 0.5 + recencyConf * 0.2
568
+ ));
569
+ }
570
+
571
+ module.exports = {
572
+ recordDecision, feedback, batchFeedback, recommend, getPreferences,
573
+ getRewardHistory, startSession, endSession,
574
+ resetDomain, resetPatterns, getStats,
575
+ };