genesis-ai-cli 10.8.2 → 11.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/src/active-inference/autonomous-loop.d.ts +21 -0
- package/dist/src/active-inference/autonomous-loop.js +84 -1
- package/dist/src/active-inference/core.js +17 -12
- package/dist/src/active-inference/experience-replay.d.ts +124 -0
- package/dist/src/active-inference/experience-replay.js +227 -0
- package/dist/src/active-inference/index.d.ts +1 -0
- package/dist/src/active-inference/index.js +6 -1
- package/dist/src/active-inference/observations.d.ts +20 -0
- package/dist/src/active-inference/observations.js +61 -1
- package/dist/src/active-inference/types.d.ts +21 -0
- package/dist/src/index.js +9 -2
- package/dist/src/services/competitive-intel.d.ts +140 -0
- package/dist/src/services/competitive-intel.js +414 -0
- package/package.json +1 -1
|
@@ -18,6 +18,10 @@ export interface AutonomousLoopConfig {
|
|
|
18
18
|
persistModelPath: string;
|
|
19
19
|
persistEveryN: number;
|
|
20
20
|
loadOnStart: boolean;
|
|
21
|
+
replayEveryN: number;
|
|
22
|
+
replayBatchSize: number;
|
|
23
|
+
dreamEveryN: number;
|
|
24
|
+
dreamBatchSize: number;
|
|
21
25
|
verbose: boolean;
|
|
22
26
|
}
|
|
23
27
|
export declare const DEFAULT_LOOP_CONFIG: AutonomousLoopConfig;
|
|
@@ -44,6 +48,8 @@ export declare class AutonomousLoop {
|
|
|
44
48
|
private plateauCycles;
|
|
45
49
|
private deepAIFActive;
|
|
46
50
|
private deepAIF;
|
|
51
|
+
private replayBuffer;
|
|
52
|
+
private previousObservation;
|
|
47
53
|
constructor(config?: Partial<AutonomousLoopConfig>);
|
|
48
54
|
/**
|
|
49
55
|
* Run the autonomous loop
|
|
@@ -67,6 +73,21 @@ export declare class AutonomousLoop {
|
|
|
67
73
|
* Subscribe to stop events
|
|
68
74
|
*/
|
|
69
75
|
onStop(handler: (reason: string, stats: LoopStats) => void): () => void;
|
|
76
|
+
/**
|
|
77
|
+
* Run experience replay: sample a batch from buffer and re-learn.
|
|
78
|
+
* This strengthens A/B matrix updates for important past experiences.
|
|
79
|
+
*/
|
|
80
|
+
private runExperienceReplay;
|
|
81
|
+
/**
|
|
82
|
+
* Run dream consolidation: deep replay of high-surprise experiences.
|
|
83
|
+
* This is the "sleep" phase where the model integrates difficult experiences.
|
|
84
|
+
*
|
|
85
|
+
* Differences from regular replay:
|
|
86
|
+
* - Focuses on highest-surprise experiences
|
|
87
|
+
* - Runs multiple iterations per experience
|
|
88
|
+
* - Prunes consolidated experiences afterward
|
|
89
|
+
*/
|
|
90
|
+
private runDreamConsolidation;
|
|
70
91
|
/**
|
|
71
92
|
* Save learned model to disk.
|
|
72
93
|
* Persists A/B matrices, beliefs, and action counts between sessions.
|
|
@@ -48,6 +48,7 @@ const observations_js_1 = require("./observations.js");
|
|
|
48
48
|
const actions_js_1 = require("./actions.js");
|
|
49
49
|
const types_js_1 = require("./types.js");
|
|
50
50
|
const deep_aif_js_1 = require("./deep-aif.js");
|
|
51
|
+
const experience_replay_js_1 = require("./experience-replay.js");
|
|
51
52
|
const fs = __importStar(require("fs"));
|
|
52
53
|
const path = __importStar(require("path"));
|
|
53
54
|
exports.DEFAULT_LOOP_CONFIG = {
|
|
@@ -60,6 +61,10 @@ exports.DEFAULT_LOOP_CONFIG = {
|
|
|
60
61
|
persistModelPath: '.genesis/learned-model.json',
|
|
61
62
|
persistEveryN: 10, // Save every 10 cycles
|
|
62
63
|
loadOnStart: true, // Resume learning from previous session
|
|
64
|
+
replayEveryN: 5, // Replay every 5 cycles
|
|
65
|
+
replayBatchSize: 8, // 8 experiences per replay
|
|
66
|
+
dreamEveryN: 50, // Dream consolidation every 50 cycles
|
|
67
|
+
dreamBatchSize: 16, // 16 high-surprise experiences per dream
|
|
63
68
|
verbose: false,
|
|
64
69
|
};
|
|
65
70
|
// ============================================================================
|
|
@@ -85,11 +90,15 @@ class AutonomousLoop {
|
|
|
85
90
|
plateauCycles = 0; // Consecutive cycles with near-zero learning velocity
|
|
86
91
|
deepAIFActive = false; // Whether Deep-AIF has been activated
|
|
87
92
|
deepAIF = null;
|
|
93
|
+
// v11.0: Experience replay buffer
|
|
94
|
+
replayBuffer;
|
|
95
|
+
previousObservation = null;
|
|
88
96
|
constructor(config = {}) {
|
|
89
97
|
this.config = { ...exports.DEFAULT_LOOP_CONFIG, ...config };
|
|
90
98
|
this.engine = (0, core_js_1.createActiveInferenceEngine)();
|
|
91
99
|
this.observations = (0, observations_js_1.createObservationGatherer)();
|
|
92
100
|
this.actions = (0, actions_js_1.createActionExecutorManager)();
|
|
101
|
+
this.replayBuffer = (0, experience_replay_js_1.createExperienceReplayBuffer)();
|
|
93
102
|
// Subscribe to engine events
|
|
94
103
|
this.engine.on(this.handleEngineEvent.bind(this));
|
|
95
104
|
}
|
|
@@ -198,7 +207,23 @@ class AutonomousLoop {
|
|
|
198
207
|
this.observations.recordToolResult(result.success, result.duration);
|
|
199
208
|
// 5. v10.8: Record learning event
|
|
200
209
|
const surprise = this.engine.getStats().averageSurprise;
|
|
201
|
-
|
|
210
|
+
const outcome = result.success ? 'positive' : 'negative';
|
|
211
|
+
this.engine.recordLearningEvent(action, surprise, outcome);
|
|
212
|
+
// 5a. v11.0: Store experience in replay buffer
|
|
213
|
+
if (this.previousObservation) {
|
|
214
|
+
this.replayBuffer.store({
|
|
215
|
+
timestamp: Date.now(),
|
|
216
|
+
observation: this.previousObservation,
|
|
217
|
+
action,
|
|
218
|
+
actionIdx: types_js_1.ACTIONS.indexOf(action),
|
|
219
|
+
nextObservation: obs,
|
|
220
|
+
surprise,
|
|
221
|
+
outcome,
|
|
222
|
+
beliefs: { ...beliefs },
|
|
223
|
+
nextBeliefs: this.engine.getBeliefs(),
|
|
224
|
+
});
|
|
225
|
+
}
|
|
226
|
+
this.previousObservation = obs;
|
|
202
227
|
// 5b. v10.8.1: Meta-learning triggers (every 20 cycles)
|
|
203
228
|
if (this.cycleCount % 20 === 0 && this.cycleCount >= 40) {
|
|
204
229
|
const patterns = this.engine.analyzeLearningPatterns();
|
|
@@ -223,6 +248,14 @@ class AutonomousLoop {
|
|
|
223
248
|
this.plateauCycles = 0;
|
|
224
249
|
}
|
|
225
250
|
}
|
|
251
|
+
// 5c. v11.0: Experience replay (offline learning from past experiences)
|
|
252
|
+
if (this.config.replayEveryN > 0 && this.cycleCount % this.config.replayEveryN === 0) {
|
|
253
|
+
this.runExperienceReplay();
|
|
254
|
+
}
|
|
255
|
+
// 5d. v11.0: Dream consolidation (deep replay of high-surprise events)
|
|
256
|
+
if (this.config.dreamEveryN > 0 && this.cycleCount % this.config.dreamEveryN === 0 && this.cycleCount > 0) {
|
|
257
|
+
this.runDreamConsolidation();
|
|
258
|
+
}
|
|
226
259
|
// 6. v10.8: Persist model periodically
|
|
227
260
|
if (this.config.persistEveryN > 0 && this.cycleCount % this.config.persistEveryN === 0) {
|
|
228
261
|
this.saveModel();
|
|
@@ -299,6 +332,56 @@ class AutonomousLoop {
|
|
|
299
332
|
};
|
|
300
333
|
}
|
|
301
334
|
// ============================================================================
|
|
335
|
+
// v11.0: Experience Replay & Dream Consolidation
|
|
336
|
+
// ============================================================================
|
|
337
|
+
/**
|
|
338
|
+
* Run experience replay: sample a batch from buffer and re-learn.
|
|
339
|
+
* This strengthens A/B matrix updates for important past experiences.
|
|
340
|
+
*/
|
|
341
|
+
runExperienceReplay() {
|
|
342
|
+
const batch = this.replayBuffer.sampleBatch(this.config.replayBatchSize);
|
|
343
|
+
if (batch.experiences.length === 0)
|
|
344
|
+
return;
|
|
345
|
+
if (this.config.verbose) {
|
|
346
|
+
console.log(`[AI Loop] Replay: ${batch.experiences.length} experiences (avg surprise: ${batch.avgSurprise.toFixed(2)})`);
|
|
347
|
+
}
|
|
348
|
+
// Re-learn from each experience (offline update)
|
|
349
|
+
for (const exp of batch.experiences) {
|
|
350
|
+
// Feed the observation pair back through the engine's learning
|
|
351
|
+
// This is equivalent to "replaying" the experience in the model
|
|
352
|
+
this.engine.learn(exp.observation, exp.nextObservation, exp.beliefs, exp.actionIdx);
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
/**
|
|
356
|
+
* Run dream consolidation: deep replay of high-surprise experiences.
|
|
357
|
+
* This is the "sleep" phase where the model integrates difficult experiences.
|
|
358
|
+
*
|
|
359
|
+
* Differences from regular replay:
|
|
360
|
+
* - Focuses on highest-surprise experiences
|
|
361
|
+
* - Runs multiple iterations per experience
|
|
362
|
+
* - Prunes consolidated experiences afterward
|
|
363
|
+
*/
|
|
364
|
+
runDreamConsolidation() {
|
|
365
|
+
const highSurprise = this.replayBuffer.sampleHighSurprise(this.config.dreamBatchSize);
|
|
366
|
+
if (highSurprise.length === 0)
|
|
367
|
+
return;
|
|
368
|
+
if (this.config.verbose) {
|
|
369
|
+
const avgS = highSurprise.reduce((s, e) => s + e.surprise, 0) / highSurprise.length;
|
|
370
|
+
console.log(`[AI Loop] Dream: consolidating ${highSurprise.length} high-surprise experiences (avg: ${avgS.toFixed(2)})`);
|
|
371
|
+
}
|
|
372
|
+
// Deep replay: 3 iterations per experience for stronger consolidation
|
|
373
|
+
for (let iter = 0; iter < 3; iter++) {
|
|
374
|
+
for (const exp of highSurprise) {
|
|
375
|
+
this.engine.learn(exp.observation, exp.nextObservation, exp.beliefs, exp.actionIdx);
|
|
376
|
+
}
|
|
377
|
+
}
|
|
378
|
+
// Prune fully consolidated experiences
|
|
379
|
+
const pruned = this.replayBuffer.pruneConsolidated();
|
|
380
|
+
if (pruned > 0 && this.config.verbose) {
|
|
381
|
+
console.log(`[AI Loop] Dream: pruned ${pruned} consolidated experiences`);
|
|
382
|
+
}
|
|
383
|
+
}
|
|
384
|
+
// ============================================================================
|
|
302
385
|
// v10.8: Model Persistence (save/load learned matrices)
|
|
303
386
|
// ============================================================================
|
|
304
387
|
/**
|
|
@@ -661,21 +661,26 @@ class ActiveInferenceEngine {
|
|
|
661
661
|
// ============================================================================
|
|
662
662
|
computeLikelihoods(observation) {
|
|
663
663
|
// Compute P(observation | state) for each factor
|
|
664
|
-
//
|
|
665
|
-
|
|
666
|
-
//
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
664
|
+
// v11.0: Precision-weighted likelihoods
|
|
665
|
+
// likelihood_eff = precision * log P(o|s)
|
|
666
|
+
// When precision → 0, observation is ignored (uniform likelihood)
|
|
667
|
+
// When precision → 1, full weight on observation
|
|
668
|
+
const prec = observation.precision ?? { energy: 1, phi: 1, tool: 1, coherence: 1, task: 1, economic: 1 };
|
|
669
|
+
// Energy observation → viability likelihood (weighted by precision)
|
|
670
|
+
const viabilityLik = this.A.energy[observation.energy].map(p => prec.energy * safeLog(p));
|
|
671
|
+
// Phi observation → worldState likelihood (weighted by precision)
|
|
672
|
+
const worldStateLik = this.A.phi[observation.phi].map(p => prec.phi * safeLog(p));
|
|
673
|
+
// Tool observation → coupling likelihood (weighted by precision)
|
|
674
|
+
const couplingLik = this.A.tool[observation.tool].map(p => prec.tool * safeLog(p));
|
|
675
|
+
// Task observation → goalProgress likelihood (weighted by precision)
|
|
676
|
+
const goalProgressLik = this.A.task[observation.task].map(p => prec.task * safeLog(p));
|
|
677
|
+
// Coherence affects worldState (weighted by coherence precision)
|
|
673
678
|
const coherenceLik = this.A.coherence[observation.coherence];
|
|
674
679
|
for (let i = 0; i < worldStateLik.length; i++) {
|
|
675
|
-
worldStateLik[i] += safeLog(coherenceLik[i] || 0.1);
|
|
680
|
+
worldStateLik[i] += prec.coherence * safeLog(coherenceLik[i] || 0.1);
|
|
676
681
|
}
|
|
677
|
-
// v10.8.2: Economic observation → economic state likelihood
|
|
678
|
-
const economicLik = this.A.economic[observation.economic ?? 2].map(p => safeLog(p));
|
|
682
|
+
// v10.8.2: Economic observation → economic state likelihood (weighted)
|
|
683
|
+
const economicLik = this.A.economic[observation.economic ?? 2].map(p => prec.economic * safeLog(p));
|
|
679
684
|
return {
|
|
680
685
|
viability: viabilityLik,
|
|
681
686
|
worldState: worldStateLik,
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Genesis v11.0 - Experience Replay Buffer
|
|
3
|
+
*
|
|
4
|
+
* Implements prioritized experience replay for Active Inference.
|
|
5
|
+
* Inspired by Deep AIF papers (2025) showing that purely online
|
|
6
|
+
* Dirichlet updates are insufficient for long-horizon tasks.
|
|
7
|
+
*
|
|
8
|
+
* Key features:
|
|
9
|
+
* - Prioritized by surprise (TD-error analog in AIF)
|
|
10
|
+
* - Connects to dream mode for offline consolidation
|
|
11
|
+
* - Adaptive capacity based on learning velocity
|
|
12
|
+
*
|
|
13
|
+
* References:
|
|
14
|
+
* - "Deep Active Inference for Delayed/Long-Horizon Tasks" (2025)
|
|
15
|
+
* - Prioritized Experience Replay (Schaul et al. 2015)
|
|
16
|
+
* - Sleep replay in biological neural systems (Diekelmann & Born 2010)
|
|
17
|
+
*/
|
|
18
|
+
import { Observation, ActionType, Beliefs } from './types.js';
|
|
19
|
+
export interface Experience {
|
|
20
|
+
id: number;
|
|
21
|
+
timestamp: number;
|
|
22
|
+
observation: Observation;
|
|
23
|
+
action: ActionType;
|
|
24
|
+
actionIdx: number;
|
|
25
|
+
nextObservation: Observation;
|
|
26
|
+
surprise: number;
|
|
27
|
+
outcome: 'positive' | 'negative' | 'neutral';
|
|
28
|
+
beliefs: Beliefs;
|
|
29
|
+
nextBeliefs: Beliefs;
|
|
30
|
+
priority: number;
|
|
31
|
+
replayCount: number;
|
|
32
|
+
}
|
|
33
|
+
export interface ReplayBatch {
|
|
34
|
+
experiences: Experience[];
|
|
35
|
+
batchSize: number;
|
|
36
|
+
totalPriority: number;
|
|
37
|
+
avgSurprise: number;
|
|
38
|
+
}
|
|
39
|
+
export interface ReplayStats {
|
|
40
|
+
bufferSize: number;
|
|
41
|
+
totalStored: number;
|
|
42
|
+
totalReplayed: number;
|
|
43
|
+
avgSurprise: number;
|
|
44
|
+
avgPriority: number;
|
|
45
|
+
highSurpriseCount: number;
|
|
46
|
+
consolidatedCount: number;
|
|
47
|
+
}
|
|
48
|
+
export interface ReplayConfig {
|
|
49
|
+
maxCapacity: number;
|
|
50
|
+
priorityExponent: number;
|
|
51
|
+
importanceSamplingBeta: number;
|
|
52
|
+
surpriseThreshold: number;
|
|
53
|
+
minReplayBatch: number;
|
|
54
|
+
maxReplayBatch: number;
|
|
55
|
+
consolidationThreshold: number;
|
|
56
|
+
}
|
|
57
|
+
export declare const DEFAULT_REPLAY_CONFIG: ReplayConfig;
|
|
58
|
+
export declare class ExperienceReplayBuffer {
|
|
59
|
+
private buffer;
|
|
60
|
+
private config;
|
|
61
|
+
private nextId;
|
|
62
|
+
private totalStored;
|
|
63
|
+
private totalReplayed;
|
|
64
|
+
constructor(config?: Partial<ReplayConfig>);
|
|
65
|
+
/**
|
|
66
|
+
* Store a new experience in the buffer.
|
|
67
|
+
* Priority is initially set based on surprise.
|
|
68
|
+
*/
|
|
69
|
+
store(experience: Omit<Experience, 'id' | 'priority' | 'replayCount'>): void;
|
|
70
|
+
/**
|
|
71
|
+
* Sample a batch of experiences for replay, prioritized by surprise.
|
|
72
|
+
* Uses proportional prioritization: P(i) = p_i^α / Σ p_k^α
|
|
73
|
+
*/
|
|
74
|
+
sampleBatch(batchSize?: number): ReplayBatch;
|
|
75
|
+
/**
|
|
76
|
+
* Sample high-surprise experiences specifically (for dream consolidation).
|
|
77
|
+
* These are the most important experiences to replay during "sleep".
|
|
78
|
+
*/
|
|
79
|
+
sampleHighSurprise(maxCount?: number): Experience[];
|
|
80
|
+
/**
|
|
81
|
+
* Update priority of an experience after replay.
|
|
82
|
+
* New priority = new surprise after re-evaluating with current model.
|
|
83
|
+
*/
|
|
84
|
+
updatePriority(experienceId: number, newSurprise: number): void;
|
|
85
|
+
/**
|
|
86
|
+
* Get consolidated experiences (replayed enough times).
|
|
87
|
+
* These can be safely evicted or archived.
|
|
88
|
+
*/
|
|
89
|
+
getConsolidated(): Experience[];
|
|
90
|
+
/**
|
|
91
|
+
* Remove consolidated experiences from the buffer.
|
|
92
|
+
* Call after dream consolidation to free space.
|
|
93
|
+
*/
|
|
94
|
+
pruneConsolidated(): number;
|
|
95
|
+
/**
|
|
96
|
+
* Get buffer statistics.
|
|
97
|
+
*/
|
|
98
|
+
getStats(): ReplayStats;
|
|
99
|
+
/**
|
|
100
|
+
* Export buffer for persistence.
|
|
101
|
+
*/
|
|
102
|
+
export(): {
|
|
103
|
+
buffer: Experience[];
|
|
104
|
+
config: ReplayConfig;
|
|
105
|
+
stats: {
|
|
106
|
+
totalStored: number;
|
|
107
|
+
totalReplayed: number;
|
|
108
|
+
};
|
|
109
|
+
};
|
|
110
|
+
/**
|
|
111
|
+
* Import previously saved buffer.
|
|
112
|
+
*/
|
|
113
|
+
import(data: {
|
|
114
|
+
buffer?: Experience[];
|
|
115
|
+
stats?: {
|
|
116
|
+
totalStored?: number;
|
|
117
|
+
totalReplayed?: number;
|
|
118
|
+
};
|
|
119
|
+
}): void;
|
|
120
|
+
private computeInitialPriority;
|
|
121
|
+
private evict;
|
|
122
|
+
}
|
|
123
|
+
export declare function createExperienceReplayBuffer(config?: Partial<ReplayConfig>): ExperienceReplayBuffer;
|
|
124
|
+
export declare function getExperienceReplayBuffer(config?: Partial<ReplayConfig>): ExperienceReplayBuffer;
|
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Genesis v11.0 - Experience Replay Buffer
|
|
4
|
+
*
|
|
5
|
+
* Implements prioritized experience replay for Active Inference.
|
|
6
|
+
* Inspired by Deep AIF papers (2025) showing that purely online
|
|
7
|
+
* Dirichlet updates are insufficient for long-horizon tasks.
|
|
8
|
+
*
|
|
9
|
+
* Key features:
|
|
10
|
+
* - Prioritized by surprise (TD-error analog in AIF)
|
|
11
|
+
* - Connects to dream mode for offline consolidation
|
|
12
|
+
* - Adaptive capacity based on learning velocity
|
|
13
|
+
*
|
|
14
|
+
* References:
|
|
15
|
+
* - "Deep Active Inference for Delayed/Long-Horizon Tasks" (2025)
|
|
16
|
+
* - Prioritized Experience Replay (Schaul et al. 2015)
|
|
17
|
+
* - Sleep replay in biological neural systems (Diekelmann & Born 2010)
|
|
18
|
+
*/
|
|
19
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
20
|
+
exports.ExperienceReplayBuffer = exports.DEFAULT_REPLAY_CONFIG = void 0;
|
|
21
|
+
exports.createExperienceReplayBuffer = createExperienceReplayBuffer;
|
|
22
|
+
exports.getExperienceReplayBuffer = getExperienceReplayBuffer;
|
|
23
|
+
exports.DEFAULT_REPLAY_CONFIG = {
|
|
24
|
+
maxCapacity: 2000,
|
|
25
|
+
priorityExponent: 0.6, // Moderate prioritization
|
|
26
|
+
importanceSamplingBeta: 0.4, // Partial importance sampling correction
|
|
27
|
+
surpriseThreshold: 3.0,
|
|
28
|
+
minReplayBatch: 8,
|
|
29
|
+
maxReplayBatch: 32,
|
|
30
|
+
consolidationThreshold: 5,
|
|
31
|
+
};
|
|
32
|
+
// ============================================================================
|
|
33
|
+
// Experience Replay Buffer
|
|
34
|
+
// ============================================================================
|
|
35
|
+
class ExperienceReplayBuffer {
|
|
36
|
+
buffer = [];
|
|
37
|
+
config;
|
|
38
|
+
nextId = 0;
|
|
39
|
+
totalStored = 0;
|
|
40
|
+
totalReplayed = 0;
|
|
41
|
+
constructor(config = {}) {
|
|
42
|
+
this.config = { ...exports.DEFAULT_REPLAY_CONFIG, ...config };
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* Store a new experience in the buffer.
|
|
46
|
+
* Priority is initially set based on surprise.
|
|
47
|
+
*/
|
|
48
|
+
store(experience) {
|
|
49
|
+
const entry = {
|
|
50
|
+
...experience,
|
|
51
|
+
id: this.nextId++,
|
|
52
|
+
priority: this.computeInitialPriority(experience.surprise, experience.outcome),
|
|
53
|
+
replayCount: 0,
|
|
54
|
+
};
|
|
55
|
+
if (this.buffer.length >= this.config.maxCapacity) {
|
|
56
|
+
// Evict lowest-priority, most-replayed experience
|
|
57
|
+
this.evict();
|
|
58
|
+
}
|
|
59
|
+
this.buffer.push(entry);
|
|
60
|
+
this.totalStored++;
|
|
61
|
+
}
|
|
62
|
+
/**
|
|
63
|
+
* Sample a batch of experiences for replay, prioritized by surprise.
|
|
64
|
+
* Uses proportional prioritization: P(i) = p_i^α / Σ p_k^α
|
|
65
|
+
*/
|
|
66
|
+
sampleBatch(batchSize) {
|
|
67
|
+
const size = Math.min(batchSize ?? this.config.minReplayBatch, this.buffer.length, this.config.maxReplayBatch);
|
|
68
|
+
if (this.buffer.length === 0 || size === 0) {
|
|
69
|
+
return { experiences: [], batchSize: 0, totalPriority: 0, avgSurprise: 0 };
|
|
70
|
+
}
|
|
71
|
+
// Compute sampling probabilities
|
|
72
|
+
const priorities = this.buffer.map(e => Math.pow(e.priority + 1e-6, this.config.priorityExponent));
|
|
73
|
+
const totalPriority = priorities.reduce((s, p) => s + p, 0);
|
|
74
|
+
const probabilities = priorities.map(p => p / totalPriority);
|
|
75
|
+
// Sample without replacement
|
|
76
|
+
const sampled = [];
|
|
77
|
+
const used = new Set();
|
|
78
|
+
for (let i = 0; i < size; i++) {
|
|
79
|
+
let r = Math.random();
|
|
80
|
+
let cumsum = 0;
|
|
81
|
+
for (let j = 0; j < probabilities.length; j++) {
|
|
82
|
+
if (used.has(j))
|
|
83
|
+
continue;
|
|
84
|
+
cumsum += probabilities[j];
|
|
85
|
+
if (r < cumsum || j === probabilities.length - 1) {
|
|
86
|
+
sampled.push(this.buffer[j]);
|
|
87
|
+
used.add(j);
|
|
88
|
+
this.buffer[j].replayCount++;
|
|
89
|
+
this.totalReplayed++;
|
|
90
|
+
break;
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
return {
|
|
95
|
+
experiences: sampled,
|
|
96
|
+
batchSize: sampled.length,
|
|
97
|
+
totalPriority,
|
|
98
|
+
avgSurprise: sampled.reduce((s, e) => s + e.surprise, 0) / sampled.length,
|
|
99
|
+
};
|
|
100
|
+
}
|
|
101
|
+
/**
|
|
102
|
+
* Sample high-surprise experiences specifically (for dream consolidation).
|
|
103
|
+
* These are the most important experiences to replay during "sleep".
|
|
104
|
+
*/
|
|
105
|
+
sampleHighSurprise(maxCount = 16) {
|
|
106
|
+
const highSurprise = this.buffer
|
|
107
|
+
.filter(e => e.surprise > this.config.surpriseThreshold)
|
|
108
|
+
.sort((a, b) => b.surprise - a.surprise)
|
|
109
|
+
.slice(0, maxCount);
|
|
110
|
+
for (const exp of highSurprise) {
|
|
111
|
+
exp.replayCount++;
|
|
112
|
+
this.totalReplayed++;
|
|
113
|
+
}
|
|
114
|
+
return highSurprise;
|
|
115
|
+
}
|
|
116
|
+
/**
|
|
117
|
+
* Update priority of an experience after replay.
|
|
118
|
+
* New priority = new surprise after re-evaluating with current model.
|
|
119
|
+
*/
|
|
120
|
+
updatePriority(experienceId, newSurprise) {
|
|
121
|
+
const exp = this.buffer.find(e => e.id === experienceId);
|
|
122
|
+
if (exp) {
|
|
123
|
+
exp.priority = this.computeInitialPriority(newSurprise, exp.outcome);
|
|
124
|
+
exp.surprise = newSurprise; // Update surprise estimate
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
/**
|
|
128
|
+
* Get consolidated experiences (replayed enough times).
|
|
129
|
+
* These can be safely evicted or archived.
|
|
130
|
+
*/
|
|
131
|
+
getConsolidated() {
|
|
132
|
+
return this.buffer.filter(e => e.replayCount >= this.config.consolidationThreshold);
|
|
133
|
+
}
|
|
134
|
+
/**
|
|
135
|
+
* Remove consolidated experiences from the buffer.
|
|
136
|
+
* Call after dream consolidation to free space.
|
|
137
|
+
*/
|
|
138
|
+
pruneConsolidated() {
|
|
139
|
+
const before = this.buffer.length;
|
|
140
|
+
this.buffer = this.buffer.filter(e => e.replayCount < this.config.consolidationThreshold);
|
|
141
|
+
return before - this.buffer.length;
|
|
142
|
+
}
|
|
143
|
+
/**
|
|
144
|
+
* Get buffer statistics.
|
|
145
|
+
*/
|
|
146
|
+
getStats() {
|
|
147
|
+
const avgSurprise = this.buffer.length > 0
|
|
148
|
+
? this.buffer.reduce((s, e) => s + e.surprise, 0) / this.buffer.length
|
|
149
|
+
: 0;
|
|
150
|
+
const avgPriority = this.buffer.length > 0
|
|
151
|
+
? this.buffer.reduce((s, e) => s + e.priority, 0) / this.buffer.length
|
|
152
|
+
: 0;
|
|
153
|
+
return {
|
|
154
|
+
bufferSize: this.buffer.length,
|
|
155
|
+
totalStored: this.totalStored,
|
|
156
|
+
totalReplayed: this.totalReplayed,
|
|
157
|
+
avgSurprise,
|
|
158
|
+
avgPriority,
|
|
159
|
+
highSurpriseCount: this.buffer.filter(e => e.surprise > this.config.surpriseThreshold).length,
|
|
160
|
+
consolidatedCount: this.getConsolidated().length,
|
|
161
|
+
};
|
|
162
|
+
}
|
|
163
|
+
/**
|
|
164
|
+
* Export buffer for persistence.
|
|
165
|
+
*/
|
|
166
|
+
export() {
|
|
167
|
+
return {
|
|
168
|
+
buffer: [...this.buffer],
|
|
169
|
+
config: this.config,
|
|
170
|
+
stats: { totalStored: this.totalStored, totalReplayed: this.totalReplayed },
|
|
171
|
+
};
|
|
172
|
+
}
|
|
173
|
+
/**
|
|
174
|
+
* Import previously saved buffer.
|
|
175
|
+
*/
|
|
176
|
+
import(data) {
|
|
177
|
+
if (data.buffer)
|
|
178
|
+
this.buffer = data.buffer;
|
|
179
|
+
if (data.stats?.totalStored)
|
|
180
|
+
this.totalStored = data.stats.totalStored;
|
|
181
|
+
if (data.stats?.totalReplayed)
|
|
182
|
+
this.totalReplayed = data.stats.totalReplayed;
|
|
183
|
+
this.nextId = Math.max(0, ...this.buffer.map(e => e.id)) + 1;
|
|
184
|
+
}
|
|
185
|
+
// ============================================================================
|
|
186
|
+
// Private Helpers
|
|
187
|
+
// ============================================================================
|
|
188
|
+
computeInitialPriority(surprise, outcome) {
|
|
189
|
+
// Priority based on surprise + outcome bonus
|
|
190
|
+
// Negative outcomes get extra priority (learn from mistakes)
|
|
191
|
+
let priority = surprise;
|
|
192
|
+
if (outcome === 'negative')
|
|
193
|
+
priority *= 1.5;
|
|
194
|
+
if (outcome === 'positive' && surprise > this.config.surpriseThreshold)
|
|
195
|
+
priority *= 1.2;
|
|
196
|
+
return Math.max(0.01, priority);
|
|
197
|
+
}
|
|
198
|
+
evict() {
|
|
199
|
+
// Remove the lowest-priority, most-replayed experience
|
|
200
|
+
let worstIdx = 0;
|
|
201
|
+
let worstScore = Infinity;
|
|
202
|
+
for (let i = 0; i < this.buffer.length; i++) {
|
|
203
|
+
// Score: lower is worse (more evictable)
|
|
204
|
+
// Low priority + high replay count = safe to evict
|
|
205
|
+
const score = this.buffer[i].priority / (1 + this.buffer[i].replayCount);
|
|
206
|
+
if (score < worstScore) {
|
|
207
|
+
worstScore = score;
|
|
208
|
+
worstIdx = i;
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
this.buffer.splice(worstIdx, 1);
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
exports.ExperienceReplayBuffer = ExperienceReplayBuffer;
|
|
215
|
+
// ============================================================================
|
|
216
|
+
// Factory
|
|
217
|
+
// ============================================================================
|
|
218
|
+
let replayInstance = null;
|
|
219
|
+
function createExperienceReplayBuffer(config) {
|
|
220
|
+
return new ExperienceReplayBuffer(config);
|
|
221
|
+
}
|
|
222
|
+
function getExperienceReplayBuffer(config) {
|
|
223
|
+
if (!replayInstance) {
|
|
224
|
+
replayInstance = createExperienceReplayBuffer(config);
|
|
225
|
+
}
|
|
226
|
+
return replayInstance;
|
|
227
|
+
}
|
|
@@ -30,4 +30,5 @@ export { AutonomousLoop, createAutonomousLoop, getAutonomousLoop, resetAutonomou
|
|
|
30
30
|
export { integrateActiveInference, createIntegratedSystem, createKernelObservationBridge, registerKernelActions, registerDaemonTask, createMCPObservationBridge, createMCPInferenceLoop, type IntegrationConfig, type IntegratedSystem, type MCPObservationConfig, } from './integration.js';
|
|
31
31
|
export { ValueAugmentedEngine, createValueAugmentedEngine, createFullyIntegratedEngine, createValueIntegratedLoop, type ValueIntegrationConfig, type ValueIntegrationEvent, type ValueIntegratedLoopConfig, DEFAULT_VALUE_INTEGRATION_CONFIG, } from './value-integration.js';
|
|
32
32
|
export { integrateMemory, getMemoryMetrics, getWorkspaceState, type MemoryIntegrationConfig, DEFAULT_MEMORY_INTEGRATION_CONFIG, } from './memory-integration.js';
|
|
33
|
+
export { ExperienceReplayBuffer, createExperienceReplayBuffer, getExperienceReplayBuffer, type Experience, type ReplayBatch, type ReplayStats, type ReplayConfig, } from './experience-replay.js';
|
|
33
34
|
export { EconomicIntegration, getEconomicIntegration, recordLLMCost, recordRevenue, CostTracker, RevenueTracker, ServiceRegistry, type EconomicObservation, type CostRecord, type RevenueRecord, type ServiceDefinition, type EconomicGoal, } from './economic-integration.js';
|
|
@@ -38,7 +38,7 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
|
38
38
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
39
39
|
};
|
|
40
40
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
41
|
-
exports.ServiceRegistry = exports.RevenueTracker = exports.CostTracker = exports.recordRevenue = exports.recordLLMCost = exports.getEconomicIntegration = exports.EconomicIntegration = exports.DEFAULT_MEMORY_INTEGRATION_CONFIG = exports.getWorkspaceState = exports.getMemoryMetrics = exports.integrateMemory = exports.DEFAULT_VALUE_INTEGRATION_CONFIG = exports.createValueIntegratedLoop = exports.createFullyIntegratedEngine = exports.createValueAugmentedEngine = exports.ValueAugmentedEngine = exports.createMCPInferenceLoop = exports.createMCPObservationBridge = exports.registerDaemonTask = exports.registerKernelActions = exports.createKernelObservationBridge = exports.createIntegratedSystem = exports.integrateActiveInference = exports.DEFAULT_LOOP_CONFIG = exports.resetAutonomousLoop = exports.getAutonomousLoop = exports.createAutonomousLoop = exports.AutonomousLoop = exports.registerAction = exports.executeAction = exports.getActionExecutorManager = exports.createActionExecutorManager = exports.ActionExecutorManager = exports.getObservationGatherer = exports.createObservationGatherer = exports.ObservationGatherer = exports.createActiveInferenceEngine = exports.ActiveInferenceEngine = void 0;
|
|
41
|
+
exports.ServiceRegistry = exports.RevenueTracker = exports.CostTracker = exports.recordRevenue = exports.recordLLMCost = exports.getEconomicIntegration = exports.EconomicIntegration = exports.getExperienceReplayBuffer = exports.createExperienceReplayBuffer = exports.ExperienceReplayBuffer = exports.DEFAULT_MEMORY_INTEGRATION_CONFIG = exports.getWorkspaceState = exports.getMemoryMetrics = exports.integrateMemory = exports.DEFAULT_VALUE_INTEGRATION_CONFIG = exports.createValueIntegratedLoop = exports.createFullyIntegratedEngine = exports.createValueAugmentedEngine = exports.ValueAugmentedEngine = exports.createMCPInferenceLoop = exports.createMCPObservationBridge = exports.registerDaemonTask = exports.registerKernelActions = exports.createKernelObservationBridge = exports.createIntegratedSystem = exports.integrateActiveInference = exports.DEFAULT_LOOP_CONFIG = exports.resetAutonomousLoop = exports.getAutonomousLoop = exports.createAutonomousLoop = exports.AutonomousLoop = exports.registerAction = exports.executeAction = exports.getActionExecutorManager = exports.createActionExecutorManager = exports.ActionExecutorManager = exports.getObservationGatherer = exports.createObservationGatherer = exports.ObservationGatherer = exports.createActiveInferenceEngine = exports.ActiveInferenceEngine = void 0;
|
|
42
42
|
// Export types
|
|
43
43
|
__exportStar(require("./types.js"), exports);
|
|
44
44
|
// Export core components
|
|
@@ -86,6 +86,11 @@ Object.defineProperty(exports, "integrateMemory", { enumerable: true, get: funct
|
|
|
86
86
|
Object.defineProperty(exports, "getMemoryMetrics", { enumerable: true, get: function () { return memory_integration_js_1.getMemoryMetrics; } });
|
|
87
87
|
Object.defineProperty(exports, "getWorkspaceState", { enumerable: true, get: function () { return memory_integration_js_1.getWorkspaceState; } });
|
|
88
88
|
Object.defineProperty(exports, "DEFAULT_MEMORY_INTEGRATION_CONFIG", { enumerable: true, get: function () { return memory_integration_js_1.DEFAULT_MEMORY_INTEGRATION_CONFIG; } });
|
|
89
|
+
// Export Experience Replay Buffer (Genesis 11.0)
|
|
90
|
+
var experience_replay_js_1 = require("./experience-replay.js");
|
|
91
|
+
Object.defineProperty(exports, "ExperienceReplayBuffer", { enumerable: true, get: function () { return experience_replay_js_1.ExperienceReplayBuffer; } });
|
|
92
|
+
Object.defineProperty(exports, "createExperienceReplayBuffer", { enumerable: true, get: function () { return experience_replay_js_1.createExperienceReplayBuffer; } });
|
|
93
|
+
Object.defineProperty(exports, "getExperienceReplayBuffer", { enumerable: true, get: function () { return experience_replay_js_1.getExperienceReplayBuffer; } });
|
|
89
94
|
// Export Economic Integration (Genesis 9.3 - Autopoietic Self-Funding)
|
|
90
95
|
var economic_integration_js_1 = require("./economic-integration.js");
|
|
91
96
|
Object.defineProperty(exports, "EconomicIntegration", { enumerable: true, get: function () { return economic_integration_js_1.EconomicIntegration; } });
|
|
@@ -37,6 +37,8 @@ export declare class ObservationGatherer {
|
|
|
37
37
|
private mcpToolResults;
|
|
38
38
|
private lastStripeBalance;
|
|
39
39
|
private realSourcesInitialized;
|
|
40
|
+
private precisionHistory;
|
|
41
|
+
private readonly PRECISION_WINDOW;
|
|
40
42
|
/**
|
|
41
43
|
* Configure observation sources
|
|
42
44
|
*/
|
|
@@ -66,6 +68,24 @@ export declare class ObservationGatherer {
|
|
|
66
68
|
* v10.8: Now uses real MCP data when available
|
|
67
69
|
*/
|
|
68
70
|
gather(): Promise<Observation>;
|
|
71
|
+
/**
|
|
72
|
+
* v11.0: Compute precision weights for each observation modality.
|
|
73
|
+
* Precision = how much to trust this observation channel.
|
|
74
|
+
*
|
|
75
|
+
* Based on:
|
|
76
|
+
* - Stability: low variance in recent observations → high precision
|
|
77
|
+
* - Reliability: sensor success rate → high precision
|
|
78
|
+
* - Availability: channel responding → high precision
|
|
79
|
+
*/
|
|
80
|
+
private computePrecision;
|
|
81
|
+
/**
|
|
82
|
+
* Compute precision for a single channel from its history.
|
|
83
|
+
*
|
|
84
|
+
* For continuous channels: precision = 1 / (1 + variance)
|
|
85
|
+
* For binary channels: precision = success_rate
|
|
86
|
+
* Minimum precision = 0.1 (never fully ignore a channel)
|
|
87
|
+
*/
|
|
88
|
+
private channelPrecision;
|
|
69
89
|
/**
|
|
70
90
|
* Create observation from raw values (for testing)
|
|
71
91
|
*/
|
|
@@ -31,6 +31,9 @@ class ObservationGatherer {
|
|
|
31
31
|
mcpToolResults = [];
|
|
32
32
|
lastStripeBalance = -1; // -1 = never checked
|
|
33
33
|
realSourcesInitialized = false;
|
|
34
|
+
// v11.0: Precision tracking per modality
|
|
35
|
+
precisionHistory = { energy: [], phi: [], tool: [], coherence: [], task: [], economic: [] };
|
|
36
|
+
PRECISION_WINDOW = 20; // Track last N observations for precision
|
|
34
37
|
/**
|
|
35
38
|
* Configure observation sources
|
|
36
39
|
*/
|
|
@@ -190,7 +193,7 @@ class ObservationGatherer {
|
|
|
190
193
|
}
|
|
191
194
|
}
|
|
192
195
|
// Map to discrete observations
|
|
193
|
-
|
|
196
|
+
const obs = {
|
|
194
197
|
energy: this.mapEnergy(kernelState.energy),
|
|
195
198
|
phi: this.mapPhi(phiState),
|
|
196
199
|
tool: this.mapTool(sensorResult),
|
|
@@ -198,6 +201,63 @@ class ObservationGatherer {
|
|
|
198
201
|
task: this.mapTask(kernelState.taskStatus),
|
|
199
202
|
economic: economicObs,
|
|
200
203
|
};
|
|
204
|
+
// v11.0: Compute precision weights from reliability history
|
|
205
|
+
obs.precision = this.computePrecision(kernelState, phiState, sensorResult, worldModelState);
|
|
206
|
+
return obs;
|
|
207
|
+
}
|
|
208
|
+
/**
|
|
209
|
+
* v11.0: Compute precision weights for each observation modality.
|
|
210
|
+
* Precision = how much to trust this observation channel.
|
|
211
|
+
*
|
|
212
|
+
* Based on:
|
|
213
|
+
* - Stability: low variance in recent observations → high precision
|
|
214
|
+
* - Reliability: sensor success rate → high precision
|
|
215
|
+
* - Availability: channel responding → high precision
|
|
216
|
+
*/
|
|
217
|
+
computePrecision(kernel, phi, sensor, world) {
|
|
218
|
+
// Track observation reliability
|
|
219
|
+
this.precisionHistory.energy.push(kernel.energy);
|
|
220
|
+
this.precisionHistory.phi.push(phi.phi);
|
|
221
|
+
this.precisionHistory.tool.push(sensor.success ? 1 : 0);
|
|
222
|
+
this.precisionHistory.coherence.push(world.consistent ? 1 : 0);
|
|
223
|
+
this.precisionHistory.task.push(kernel.taskStatus === 'running' ? 1 : 0.5);
|
|
224
|
+
this.precisionHistory.economic.push(this.lastStripeBalance >= 0 ? 1 : 0.3);
|
|
225
|
+
// Trim to window size
|
|
226
|
+
for (const key of Object.keys(this.precisionHistory)) {
|
|
227
|
+
if (this.precisionHistory[key].length > this.PRECISION_WINDOW) {
|
|
228
|
+
this.precisionHistory[key] = this.precisionHistory[key].slice(-this.PRECISION_WINDOW);
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
return {
|
|
232
|
+
energy: this.channelPrecision(this.precisionHistory.energy, 'continuous'),
|
|
233
|
+
phi: this.channelPrecision(this.precisionHistory.phi, 'continuous'),
|
|
234
|
+
tool: this.channelPrecision(this.precisionHistory.tool, 'binary'),
|
|
235
|
+
coherence: this.channelPrecision(this.precisionHistory.coherence, 'binary'),
|
|
236
|
+
task: this.channelPrecision(this.precisionHistory.task, 'continuous'),
|
|
237
|
+
economic: this.channelPrecision(this.precisionHistory.economic, 'binary'),
|
|
238
|
+
};
|
|
239
|
+
}
|
|
240
|
+
/**
|
|
241
|
+
* Compute precision for a single channel from its history.
|
|
242
|
+
*
|
|
243
|
+
* For continuous channels: precision = 1 / (1 + variance)
|
|
244
|
+
* For binary channels: precision = success_rate
|
|
245
|
+
* Minimum precision = 0.1 (never fully ignore a channel)
|
|
246
|
+
*/
|
|
247
|
+
channelPrecision(history, type) {
|
|
248
|
+
if (history.length < 3)
|
|
249
|
+
return 0.5; // Insufficient data, moderate precision
|
|
250
|
+
if (type === 'binary') {
|
|
251
|
+
// Binary: precision = success rate (clamped)
|
|
252
|
+
const successRate = history.reduce((s, v) => s + v, 0) / history.length;
|
|
253
|
+
return Math.max(0.1, Math.min(1.0, successRate));
|
|
254
|
+
}
|
|
255
|
+
// Continuous: precision = 1 / (1 + normalized_variance)
|
|
256
|
+
const mean = history.reduce((s, v) => s + v, 0) / history.length;
|
|
257
|
+
const variance = history.reduce((s, v) => s + (v - mean) ** 2, 0) / history.length;
|
|
258
|
+
// Normalize variance by expected range [0, 1]
|
|
259
|
+
const normalizedVar = Math.min(variance, 1.0);
|
|
260
|
+
return Math.max(0.1, 1.0 / (1.0 + 4.0 * normalizedVar));
|
|
201
261
|
}
|
|
202
262
|
/**
|
|
203
263
|
* Create observation from raw values (for testing)
|
|
@@ -55,6 +55,27 @@ export interface Observation {
|
|
|
55
55
|
coherence: CoherenceObs;
|
|
56
56
|
task: TaskObs;
|
|
57
57
|
economic?: EconomicObs;
|
|
58
|
+
precision?: ObservationPrecision;
|
|
59
|
+
}
|
|
60
|
+
/**
|
|
61
|
+
* v11.0: Precision-weighted observations (Friston 2025, UAV Deep AIF papers)
|
|
62
|
+
*
|
|
63
|
+
* Each modality has a precision weight that modulates its influence on belief updates.
|
|
64
|
+
* Precision is learned from observation reliability:
|
|
65
|
+
* - High precision (→1): sensor is reliable, trust its signal
|
|
66
|
+
* - Low precision (→0): sensor is unreliable, downweight in inference
|
|
67
|
+
*
|
|
68
|
+
* This implements the precision-weighting scheme from:
|
|
69
|
+
* - "Deep Active Inference for Long-Horizon Tasks" (2025)
|
|
70
|
+
* - "Precision-weighted message passing in AIF" (Parr & Friston 2019)
|
|
71
|
+
*/
|
|
72
|
+
export interface ObservationPrecision {
|
|
73
|
+
energy: number;
|
|
74
|
+
phi: number;
|
|
75
|
+
tool: number;
|
|
76
|
+
coherence: number;
|
|
77
|
+
task: number;
|
|
78
|
+
economic: number;
|
|
58
79
|
}
|
|
59
80
|
export declare const OBSERVATION_DIMS: {
|
|
60
81
|
readonly energy: 5;
|
package/dist/src/index.js
CHANGED
|
@@ -876,7 +876,9 @@ ${c('Examples:', 'cyan')}
|
|
|
876
876
|
return;
|
|
877
877
|
}
|
|
878
878
|
// Standard mode (no Kernel/Daemon integration)
|
|
879
|
-
|
|
879
|
+
// v11.0: Value-JEPA augmented loop is now the default (adds trajectory planning)
|
|
880
|
+
const noValue = options['no-value'] === 'true';
|
|
881
|
+
const loopConfig = {
|
|
880
882
|
cycleInterval: interval,
|
|
881
883
|
maxCycles: 0,
|
|
882
884
|
verbose,
|
|
@@ -884,7 +886,12 @@ ${c('Examples:', 'cyan')}
|
|
|
884
886
|
stopOnEnergyCritical: !noEnergyStop,
|
|
885
887
|
loadOnStart: true,
|
|
886
888
|
persistEveryN: 10,
|
|
887
|
-
|
|
889
|
+
replayEveryN: 5,
|
|
890
|
+
dreamEveryN: 50,
|
|
891
|
+
};
|
|
892
|
+
const loop = noValue
|
|
893
|
+
? (0, index_js_4.createAutonomousLoop)(loopConfig)
|
|
894
|
+
: (0, index_js_4.createValueIntegratedLoop)(loopConfig).loop;
|
|
888
895
|
// Handle subcommands
|
|
889
896
|
if (subcommand === 'beliefs') {
|
|
890
897
|
const state = loop.getMostLikelyState();
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Genesis v11.0 - Competitive Intelligence Service
|
|
3
|
+
*
|
|
4
|
+
* First revenue-generating service. Monitors competitor websites,
|
|
5
|
+
* detects changes, analyzes their significance, and produces
|
|
6
|
+
* actionable intelligence digests.
|
|
7
|
+
*
|
|
8
|
+
* Differentiator vs Visualping/Crayon/Klue:
|
|
9
|
+
* - Not just "page changed" but "WHY it changed and what it means"
|
|
10
|
+
* - Builds temporal knowledge graph of competitor moves
|
|
11
|
+
* - Uses Active Inference to prioritize which competitors to monitor
|
|
12
|
+
* - Learns what changes matter to the user over time
|
|
13
|
+
*
|
|
14
|
+
* MCP servers used: brave-search, firecrawl, memory, openai/anthropic
|
|
15
|
+
*
|
|
16
|
+
* Revenue model: $49-199/month per monitored set
|
|
17
|
+
*/
|
|
18
|
+
export interface Competitor {
|
|
19
|
+
name: string;
|
|
20
|
+
domain: string;
|
|
21
|
+
pages: CompetitorPage[];
|
|
22
|
+
lastChecked?: number;
|
|
23
|
+
changeHistory: ChangeEvent[];
|
|
24
|
+
}
|
|
25
|
+
export interface CompetitorPage {
|
|
26
|
+
url: string;
|
|
27
|
+
type: 'pricing' | 'changelog' | 'blog' | 'jobs' | 'features' | 'landing' | 'docs';
|
|
28
|
+
lastContent?: string;
|
|
29
|
+
lastHash?: string;
|
|
30
|
+
lastChecked?: number;
|
|
31
|
+
}
|
|
32
|
+
export interface ChangeEvent {
|
|
33
|
+
timestamp: number;
|
|
34
|
+
pageUrl: string;
|
|
35
|
+
pageType: string;
|
|
36
|
+
changeType: 'added' | 'removed' | 'modified' | 'major_rewrite';
|
|
37
|
+
summary: string;
|
|
38
|
+
significance: 'low' | 'medium' | 'high' | 'critical';
|
|
39
|
+
analysis: string;
|
|
40
|
+
rawDiff?: string;
|
|
41
|
+
}
|
|
42
|
+
export interface IntelDigest {
|
|
43
|
+
generated: number;
|
|
44
|
+
period: {
|
|
45
|
+
from: number;
|
|
46
|
+
to: number;
|
|
47
|
+
};
|
|
48
|
+
competitors: Array<{
|
|
49
|
+
name: string;
|
|
50
|
+
changes: ChangeEvent[];
|
|
51
|
+
trend: string;
|
|
52
|
+
}>;
|
|
53
|
+
keyInsights: string[];
|
|
54
|
+
recommendations: string[];
|
|
55
|
+
}
|
|
56
|
+
export interface CompetitiveIntelConfig {
|
|
57
|
+
competitors: Array<{
|
|
58
|
+
name: string;
|
|
59
|
+
domain: string;
|
|
60
|
+
pages?: string[];
|
|
61
|
+
}>;
|
|
62
|
+
checkIntervalMs: number;
|
|
63
|
+
digestIntervalMs: number;
|
|
64
|
+
llmModel: string;
|
|
65
|
+
maxPagesPerCompetitor: number;
|
|
66
|
+
}
|
|
67
|
+
export declare const DEFAULT_INTEL_CONFIG: CompetitiveIntelConfig;
|
|
68
|
+
export declare class CompetitiveIntelService {
|
|
69
|
+
private config;
|
|
70
|
+
private competitors;
|
|
71
|
+
private running;
|
|
72
|
+
private lastDigest?;
|
|
73
|
+
private checkTimer?;
|
|
74
|
+
constructor(config?: Partial<CompetitiveIntelConfig>);
|
|
75
|
+
/**
|
|
76
|
+
* Start monitoring competitors.
|
|
77
|
+
*/
|
|
78
|
+
start(): void;
|
|
79
|
+
/**
|
|
80
|
+
* Stop monitoring.
|
|
81
|
+
*/
|
|
82
|
+
stop(): void;
|
|
83
|
+
/**
|
|
84
|
+
* Add a competitor to monitor.
|
|
85
|
+
*/
|
|
86
|
+
addCompetitor(name: string, domain: string, pages?: string[]): void;
|
|
87
|
+
/**
|
|
88
|
+
* Force check all competitors now.
|
|
89
|
+
*/
|
|
90
|
+
checkAll(): Promise<ChangeEvent[]>;
|
|
91
|
+
/**
|
|
92
|
+
* Generate a digest of recent changes.
|
|
93
|
+
*/
|
|
94
|
+
generateDigest(periodHours?: number): Promise<IntelDigest>;
|
|
95
|
+
/**
|
|
96
|
+
* Get all tracked competitors.
|
|
97
|
+
*/
|
|
98
|
+
getCompetitors(): Competitor[];
|
|
99
|
+
/**
|
|
100
|
+
* Get recent changes across all competitors.
|
|
101
|
+
*/
|
|
102
|
+
getRecentChanges(hours?: number): ChangeEvent[];
|
|
103
|
+
/**
|
|
104
|
+
* Check a single competitor for changes.
|
|
105
|
+
*/
|
|
106
|
+
private checkCompetitor;
|
|
107
|
+
/**
|
|
108
|
+
* Call LLM with MCP fallback to direct HTTP.
|
|
109
|
+
*/
|
|
110
|
+
private callLLM;
|
|
111
|
+
/**
|
|
112
|
+
* Analyze a detected change using LLM.
|
|
113
|
+
*/
|
|
114
|
+
private analyzeChange;
|
|
115
|
+
/**
|
|
116
|
+
* Generate strategic insights from recent changes.
|
|
117
|
+
*/
|
|
118
|
+
private generateInsights;
|
|
119
|
+
private initCompetitors;
|
|
120
|
+
private inferPages;
|
|
121
|
+
private inferPageType;
|
|
122
|
+
private computeDiff;
|
|
123
|
+
private analyzeTrend;
|
|
124
|
+
private simpleHash;
|
|
125
|
+
}
|
|
126
|
+
export declare function createCompetitiveIntelService(config?: Partial<CompetitiveIntelConfig>): CompetitiveIntelService;
|
|
127
|
+
export declare function getCompetitiveIntelService(config?: Partial<CompetitiveIntelConfig>): CompetitiveIntelService;
|
|
128
|
+
/**
|
|
129
|
+
* Action handler for Active Inference integration.
|
|
130
|
+
* Called when the AIF loop selects opportunity.scan with competitive-intel context.
|
|
131
|
+
*/
|
|
132
|
+
export declare function runCompetitiveIntelScan(config: {
|
|
133
|
+
competitors: Array<{
|
|
134
|
+
name: string;
|
|
135
|
+
domain: string;
|
|
136
|
+
}>;
|
|
137
|
+
}): Promise<{
|
|
138
|
+
changes: ChangeEvent[];
|
|
139
|
+
digest?: IntelDigest;
|
|
140
|
+
}>;
|
|
@@ -0,0 +1,414 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Genesis v11.0 - Competitive Intelligence Service
|
|
4
|
+
*
|
|
5
|
+
* First revenue-generating service. Monitors competitor websites,
|
|
6
|
+
* detects changes, analyzes their significance, and produces
|
|
7
|
+
* actionable intelligence digests.
|
|
8
|
+
*
|
|
9
|
+
* Differentiator vs Visualping/Crayon/Klue:
|
|
10
|
+
* - Not just "page changed" but "WHY it changed and what it means"
|
|
11
|
+
* - Builds temporal knowledge graph of competitor moves
|
|
12
|
+
* - Uses Active Inference to prioritize which competitors to monitor
|
|
13
|
+
* - Learns what changes matter to the user over time
|
|
14
|
+
*
|
|
15
|
+
* MCP servers used: brave-search, firecrawl, memory, openai/anthropic
|
|
16
|
+
*
|
|
17
|
+
* Revenue model: $49-199/month per monitored set
|
|
18
|
+
*/
|
|
19
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
20
|
+
exports.CompetitiveIntelService = exports.DEFAULT_INTEL_CONFIG = void 0;
|
|
21
|
+
exports.createCompetitiveIntelService = createCompetitiveIntelService;
|
|
22
|
+
exports.getCompetitiveIntelService = getCompetitiveIntelService;
|
|
23
|
+
exports.runCompetitiveIntelScan = runCompetitiveIntelScan;
|
|
24
|
+
const index_js_1 = require("../mcp/index.js");
|
|
25
|
+
exports.DEFAULT_INTEL_CONFIG = {
|
|
26
|
+
competitors: [],
|
|
27
|
+
checkIntervalMs: 6 * 60 * 60 * 1000, // 6 hours
|
|
28
|
+
digestIntervalMs: 24 * 60 * 60 * 1000, // Daily
|
|
29
|
+
llmModel: 'gpt-4o-mini',
|
|
30
|
+
maxPagesPerCompetitor: 6,
|
|
31
|
+
};
|
|
32
|
+
// ============================================================================
|
|
33
|
+
// Competitive Intelligence Engine
|
|
34
|
+
// ============================================================================
|
|
35
|
+
class CompetitiveIntelService {
|
|
36
|
+
config;
|
|
37
|
+
competitors = new Map();
|
|
38
|
+
running = false;
|
|
39
|
+
lastDigest;
|
|
40
|
+
checkTimer;
|
|
41
|
+
constructor(config = {}) {
|
|
42
|
+
this.config = { ...exports.DEFAULT_INTEL_CONFIG, ...config };
|
|
43
|
+
this.initCompetitors();
|
|
44
|
+
}
|
|
45
|
+
// ============================================================================
|
|
46
|
+
// Public API
|
|
47
|
+
// ============================================================================
|
|
48
|
+
/**
|
|
49
|
+
* Start monitoring competitors.
|
|
50
|
+
*/
|
|
51
|
+
start() {
|
|
52
|
+
if (this.running)
|
|
53
|
+
return;
|
|
54
|
+
this.running = true;
|
|
55
|
+
// Run first check immediately
|
|
56
|
+
this.checkAll().catch(e => console.error('[CompIntel] Initial check failed:', e));
|
|
57
|
+
// Schedule periodic checks
|
|
58
|
+
this.checkTimer = setInterval(() => {
|
|
59
|
+
this.checkAll().catch(e => console.error('[CompIntel] Check failed:', e));
|
|
60
|
+
}, this.config.checkIntervalMs);
|
|
61
|
+
}
|
|
62
|
+
/**
|
|
63
|
+
* Stop monitoring.
|
|
64
|
+
*/
|
|
65
|
+
stop() {
|
|
66
|
+
this.running = false;
|
|
67
|
+
if (this.checkTimer) {
|
|
68
|
+
clearInterval(this.checkTimer);
|
|
69
|
+
this.checkTimer = undefined;
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
/**
|
|
73
|
+
* Add a competitor to monitor.
|
|
74
|
+
*/
|
|
75
|
+
addCompetitor(name, domain, pages) {
|
|
76
|
+
const competitor = {
|
|
77
|
+
name,
|
|
78
|
+
domain,
|
|
79
|
+
pages: this.inferPages(domain, pages),
|
|
80
|
+
changeHistory: [],
|
|
81
|
+
};
|
|
82
|
+
this.competitors.set(domain, competitor);
|
|
83
|
+
}
|
|
84
|
+
/**
|
|
85
|
+
* Force check all competitors now.
|
|
86
|
+
*/
|
|
87
|
+
async checkAll() {
|
|
88
|
+
const allChanges = [];
|
|
89
|
+
for (const [, competitor] of this.competitors) {
|
|
90
|
+
const changes = await this.checkCompetitor(competitor);
|
|
91
|
+
allChanges.push(...changes);
|
|
92
|
+
}
|
|
93
|
+
return allChanges;
|
|
94
|
+
}
|
|
95
|
+
/**
|
|
96
|
+
* Generate a digest of recent changes.
|
|
97
|
+
*/
|
|
98
|
+
async generateDigest(periodHours = 24) {
|
|
99
|
+
const now = Date.now();
|
|
100
|
+
const periodStart = now - periodHours * 60 * 60 * 1000;
|
|
101
|
+
const competitorDigests = [];
|
|
102
|
+
for (const [, competitor] of this.competitors) {
|
|
103
|
+
const recentChanges = competitor.changeHistory.filter(c => c.timestamp > periodStart);
|
|
104
|
+
if (recentChanges.length > 0) {
|
|
105
|
+
competitorDigests.push({
|
|
106
|
+
name: competitor.name,
|
|
107
|
+
changes: recentChanges,
|
|
108
|
+
trend: this.analyzeTrend(recentChanges),
|
|
109
|
+
});
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
// Generate key insights using LLM
|
|
113
|
+
const insights = await this.generateInsights(competitorDigests);
|
|
114
|
+
const digest = {
|
|
115
|
+
generated: now,
|
|
116
|
+
period: { from: periodStart, to: now },
|
|
117
|
+
competitors: competitorDigests,
|
|
118
|
+
keyInsights: insights.insights,
|
|
119
|
+
recommendations: insights.recommendations,
|
|
120
|
+
};
|
|
121
|
+
this.lastDigest = digest;
|
|
122
|
+
return digest;
|
|
123
|
+
}
|
|
124
|
+
/**
|
|
125
|
+
* Get all tracked competitors.
|
|
126
|
+
*/
|
|
127
|
+
getCompetitors() {
|
|
128
|
+
return [...this.competitors.values()];
|
|
129
|
+
}
|
|
130
|
+
/**
|
|
131
|
+
* Get recent changes across all competitors.
|
|
132
|
+
*/
|
|
133
|
+
getRecentChanges(hours = 24) {
|
|
134
|
+
const cutoff = Date.now() - hours * 60 * 60 * 1000;
|
|
135
|
+
const all = [];
|
|
136
|
+
for (const [, comp] of this.competitors) {
|
|
137
|
+
all.push(...comp.changeHistory.filter(c => c.timestamp > cutoff));
|
|
138
|
+
}
|
|
139
|
+
return all.sort((a, b) => b.timestamp - a.timestamp);
|
|
140
|
+
}
|
|
141
|
+
// ============================================================================
|
|
142
|
+
// Core Logic
|
|
143
|
+
// ============================================================================
|
|
144
|
+
/**
|
|
145
|
+
* Check a single competitor for changes.
|
|
146
|
+
*/
|
|
147
|
+
async checkCompetitor(competitor) {
|
|
148
|
+
const changes = [];
|
|
149
|
+
const mcp = (0, index_js_1.getMCPClient)();
|
|
150
|
+
for (const page of competitor.pages) {
|
|
151
|
+
try {
|
|
152
|
+
// Scrape current content via Firecrawl
|
|
153
|
+
const result = await mcp.call('firecrawl', 'firecrawl_scrape', {
|
|
154
|
+
url: page.url,
|
|
155
|
+
formats: ['markdown'],
|
|
156
|
+
onlyMainContent: true,
|
|
157
|
+
});
|
|
158
|
+
const currentContent = result?.data?.markdown
|
|
159
|
+
|| result?.markdown
|
|
160
|
+
|| JSON.stringify(result).slice(0, 5000);
|
|
161
|
+
// Compare with previous content
|
|
162
|
+
if (page.lastContent) {
|
|
163
|
+
const diff = this.computeDiff(page.lastContent, currentContent);
|
|
164
|
+
if (diff.changed) {
|
|
165
|
+
// Analyze the change with LLM
|
|
166
|
+
const analysis = await this.analyzeChange(competitor.name, page, diff.summary, page.lastContent, currentContent);
|
|
167
|
+
const event = {
|
|
168
|
+
timestamp: Date.now(),
|
|
169
|
+
pageUrl: page.url,
|
|
170
|
+
pageType: page.type,
|
|
171
|
+
changeType: diff.changeType,
|
|
172
|
+
summary: diff.summary,
|
|
173
|
+
significance: analysis.significance,
|
|
174
|
+
analysis: analysis.analysis,
|
|
175
|
+
};
|
|
176
|
+
changes.push(event);
|
|
177
|
+
competitor.changeHistory.push(event);
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
// Update stored content
|
|
181
|
+
page.lastContent = currentContent;
|
|
182
|
+
page.lastHash = this.simpleHash(currentContent);
|
|
183
|
+
page.lastChecked = Date.now();
|
|
184
|
+
}
|
|
185
|
+
catch (e) {
|
|
186
|
+
// Page failed to load — note but don't crash
|
|
187
|
+
if (page.lastContent) {
|
|
188
|
+
// Page was previously accessible, now failing = potential change
|
|
189
|
+
changes.push({
|
|
190
|
+
timestamp: Date.now(),
|
|
191
|
+
pageUrl: page.url,
|
|
192
|
+
pageType: page.type,
|
|
193
|
+
changeType: 'removed',
|
|
194
|
+
summary: `Page no longer accessible: ${e.message}`,
|
|
195
|
+
significance: 'medium',
|
|
196
|
+
analysis: 'Page may have been removed or restructured.',
|
|
197
|
+
});
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
competitor.lastChecked = Date.now();
|
|
202
|
+
return changes;
|
|
203
|
+
}
|
|
204
|
+
/**
|
|
205
|
+
* Call LLM with MCP fallback to direct HTTP.
|
|
206
|
+
*/
|
|
207
|
+
async callLLM(messages, maxTokens = 200) {
|
|
208
|
+
// Try MCP first
|
|
209
|
+
try {
|
|
210
|
+
const mcp = (0, index_js_1.getMCPClient)();
|
|
211
|
+
const result = await mcp.call('openai', 'openai_chat', {
|
|
212
|
+
model: this.config.llmModel,
|
|
213
|
+
messages,
|
|
214
|
+
temperature: 0.3,
|
|
215
|
+
max_tokens: maxTokens,
|
|
216
|
+
});
|
|
217
|
+
const r = result;
|
|
218
|
+
const content = r?.data?.choices?.[0]?.message?.content || r?.choices?.[0]?.message?.content;
|
|
219
|
+
if (content)
|
|
220
|
+
return content;
|
|
221
|
+
}
|
|
222
|
+
catch { /* fall through to direct */ }
|
|
223
|
+
// Fallback: direct OpenAI HTTP call
|
|
224
|
+
const apiKey = process.env.OPENAI_API_KEY;
|
|
225
|
+
if (!apiKey)
|
|
226
|
+
return '{}';
|
|
227
|
+
const resp = await fetch('https://api.openai.com/v1/chat/completions', {
|
|
228
|
+
method: 'POST',
|
|
229
|
+
headers: {
|
|
230
|
+
'Content-Type': 'application/json',
|
|
231
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
232
|
+
},
|
|
233
|
+
body: JSON.stringify({
|
|
234
|
+
model: this.config.llmModel,
|
|
235
|
+
messages,
|
|
236
|
+
temperature: 0.3,
|
|
237
|
+
max_tokens: maxTokens,
|
|
238
|
+
}),
|
|
239
|
+
});
|
|
240
|
+
if (!resp.ok)
|
|
241
|
+
return '{}';
|
|
242
|
+
const data = await resp.json();
|
|
243
|
+
return data?.choices?.[0]?.message?.content || '{}';
|
|
244
|
+
}
|
|
245
|
+
/**
|
|
246
|
+
* Analyze a detected change using LLM.
|
|
247
|
+
*/
|
|
248
|
+
async analyzeChange(competitorName, page, diffSummary, oldContent, newContent) {
|
|
249
|
+
try {
|
|
250
|
+
const content = await this.callLLM([
|
|
251
|
+
{
|
|
252
|
+
role: 'system',
|
|
253
|
+
content: `You are a competitive intelligence analyst. Analyze changes on competitor websites and determine their business significance. Respond with JSON: {"significance": "low|medium|high|critical", "analysis": "brief analysis"}`
|
|
254
|
+
},
|
|
255
|
+
{
|
|
256
|
+
role: 'user',
|
|
257
|
+
content: `Competitor: ${competitorName}\nPage type: ${page.type}\nURL: ${page.url}\n\nChange detected: ${diffSummary}\n\nOld content (first 1000 chars): ${oldContent.slice(0, 1000)}\n\nNew content (first 1000 chars): ${newContent.slice(0, 1000)}\n\nWhat is the business significance of this change?`
|
|
258
|
+
}
|
|
259
|
+
]);
|
|
260
|
+
const parsed = JSON.parse(content.match(/\{[\s\S]*\}/)?.[0] || '{}');
|
|
261
|
+
return {
|
|
262
|
+
significance: parsed.significance || 'medium',
|
|
263
|
+
analysis: parsed.analysis || 'Unable to analyze change.',
|
|
264
|
+
};
|
|
265
|
+
}
|
|
266
|
+
catch {
|
|
267
|
+
return { significance: 'medium', analysis: 'Analysis unavailable (LLM error).' };
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
/**
|
|
271
|
+
* Generate strategic insights from recent changes.
|
|
272
|
+
*/
|
|
273
|
+
async generateInsights(competitorData) {
|
|
274
|
+
if (competitorData.length === 0) {
|
|
275
|
+
return { insights: ['No significant changes detected.'], recommendations: [] };
|
|
276
|
+
}
|
|
277
|
+
try {
|
|
278
|
+
const changesSummary = competitorData.map(c => `${c.name}: ${c.changes.map(ch => `[${ch.significance}] ${ch.summary}`).join('; ')}`).join('\n');
|
|
279
|
+
const content = await this.callLLM([
|
|
280
|
+
{
|
|
281
|
+
role: 'system',
|
|
282
|
+
content: 'You are a strategic analyst. Given competitor changes, produce key insights and recommendations. Respond with JSON: {"insights": ["..."], "recommendations": ["..."]}'
|
|
283
|
+
},
|
|
284
|
+
{
|
|
285
|
+
role: 'user',
|
|
286
|
+
content: `Recent competitor changes:\n${changesSummary}\n\nProvide 2-4 key insights and 1-3 actionable recommendations.`
|
|
287
|
+
}
|
|
288
|
+
], 500);
|
|
289
|
+
const parsed = JSON.parse(content.match(/\{[\s\S]*\}/)?.[0] || '{}');
|
|
290
|
+
return {
|
|
291
|
+
insights: parsed.insights || ['Analysis unavailable.'],
|
|
292
|
+
recommendations: parsed.recommendations || [],
|
|
293
|
+
};
|
|
294
|
+
}
|
|
295
|
+
catch {
|
|
296
|
+
return { insights: ['Analysis unavailable.'], recommendations: [] };
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
// ============================================================================
|
|
300
|
+
// Helpers
|
|
301
|
+
// ============================================================================
|
|
302
|
+
initCompetitors() {
|
|
303
|
+
for (const comp of this.config.competitors) {
|
|
304
|
+
this.addCompetitor(comp.name, comp.domain, comp.pages);
|
|
305
|
+
}
|
|
306
|
+
}
|
|
307
|
+
inferPages(domain, explicitPages) {
|
|
308
|
+
if (explicitPages) {
|
|
309
|
+
return explicitPages.map(url => ({
|
|
310
|
+
url,
|
|
311
|
+
type: this.inferPageType(url),
|
|
312
|
+
}));
|
|
313
|
+
}
|
|
314
|
+
// Auto-infer common pages
|
|
315
|
+
const base = domain.startsWith('http') ? domain : `https://${domain}`;
|
|
316
|
+
return [
|
|
317
|
+
{ url: `${base}/pricing`, type: 'pricing' },
|
|
318
|
+
{ url: `${base}/changelog`, type: 'changelog' },
|
|
319
|
+
{ url: `${base}/blog`, type: 'blog' },
|
|
320
|
+
{ url: `${base}/careers`, type: 'jobs' },
|
|
321
|
+
{ url: `${base}/features`, type: 'features' },
|
|
322
|
+
{ url: base, type: 'landing' },
|
|
323
|
+
];
|
|
324
|
+
}
|
|
325
|
+
inferPageType(url) {
|
|
326
|
+
const lower = url.toLowerCase();
|
|
327
|
+
if (lower.includes('pric'))
|
|
328
|
+
return 'pricing';
|
|
329
|
+
if (lower.includes('changelog') || lower.includes('release'))
|
|
330
|
+
return 'changelog';
|
|
331
|
+
if (lower.includes('blog') || lower.includes('news'))
|
|
332
|
+
return 'blog';
|
|
333
|
+
if (lower.includes('career') || lower.includes('job'))
|
|
334
|
+
return 'jobs';
|
|
335
|
+
if (lower.includes('feature'))
|
|
336
|
+
return 'features';
|
|
337
|
+
if (lower.includes('doc'))
|
|
338
|
+
return 'docs';
|
|
339
|
+
return 'landing';
|
|
340
|
+
}
|
|
341
|
+
computeDiff(old, current) {
|
|
342
|
+
if (old === current)
|
|
343
|
+
return { changed: false, changeType: 'modified', summary: '' };
|
|
344
|
+
const oldLines = old.split('\n');
|
|
345
|
+
const newLines = current.split('\n');
|
|
346
|
+
const added = newLines.filter(l => !oldLines.includes(l)).length;
|
|
347
|
+
const removed = oldLines.filter(l => !newLines.includes(l)).length;
|
|
348
|
+
const totalChange = added + removed;
|
|
349
|
+
const totalLines = Math.max(oldLines.length, newLines.length);
|
|
350
|
+
const changeRatio = totalChange / Math.max(totalLines, 1);
|
|
351
|
+
let changeType;
|
|
352
|
+
if (changeRatio > 0.7)
|
|
353
|
+
changeType = 'major_rewrite';
|
|
354
|
+
else if (removed > added * 2)
|
|
355
|
+
changeType = 'removed';
|
|
356
|
+
else if (added > removed * 2)
|
|
357
|
+
changeType = 'added';
|
|
358
|
+
else
|
|
359
|
+
changeType = 'modified';
|
|
360
|
+
return {
|
|
361
|
+
changed: true,
|
|
362
|
+
changeType,
|
|
363
|
+
summary: `${added} lines added, ${removed} lines removed (${(changeRatio * 100).toFixed(0)}% changed)`,
|
|
364
|
+
};
|
|
365
|
+
}
|
|
366
|
+
analyzeTrend(changes) {
|
|
367
|
+
if (changes.length === 0)
|
|
368
|
+
return 'No activity';
|
|
369
|
+
const critical = changes.filter(c => c.significance === 'critical' || c.significance === 'high').length;
|
|
370
|
+
if (critical > 2)
|
|
371
|
+
return 'Major moves detected';
|
|
372
|
+
if (changes.length > 5)
|
|
373
|
+
return 'High activity';
|
|
374
|
+
if (changes.length > 2)
|
|
375
|
+
return 'Moderate activity';
|
|
376
|
+
return 'Low activity';
|
|
377
|
+
}
|
|
378
|
+
simpleHash(str) {
|
|
379
|
+
let hash = 0;
|
|
380
|
+
for (let i = 0; i < str.length; i++) {
|
|
381
|
+
const char = str.charCodeAt(i);
|
|
382
|
+
hash = ((hash << 5) - hash) + char;
|
|
383
|
+
hash = hash & hash; // Convert to 32bit integer
|
|
384
|
+
}
|
|
385
|
+
return hash.toString(36);
|
|
386
|
+
}
|
|
387
|
+
}
|
|
388
|
+
exports.CompetitiveIntelService = CompetitiveIntelService;
|
|
389
|
+
// ============================================================================
|
|
390
|
+
// Factory & Integration
|
|
391
|
+
// ============================================================================
|
|
392
|
+
let serviceInstance = null;
|
|
393
|
+
function createCompetitiveIntelService(config) {
|
|
394
|
+
return new CompetitiveIntelService(config);
|
|
395
|
+
}
|
|
396
|
+
function getCompetitiveIntelService(config) {
|
|
397
|
+
if (!serviceInstance) {
|
|
398
|
+
serviceInstance = createCompetitiveIntelService(config);
|
|
399
|
+
}
|
|
400
|
+
return serviceInstance;
|
|
401
|
+
}
|
|
402
|
+
/**
|
|
403
|
+
* Action handler for Active Inference integration.
|
|
404
|
+
* Called when the AIF loop selects opportunity.scan with competitive-intel context.
|
|
405
|
+
*/
|
|
406
|
+
async function runCompetitiveIntelScan(config) {
|
|
407
|
+
const service = createCompetitiveIntelService({ competitors: config.competitors });
|
|
408
|
+
const changes = await service.checkAll();
|
|
409
|
+
let digest;
|
|
410
|
+
if (changes.length > 0) {
|
|
411
|
+
digest = await service.generateDigest(24);
|
|
412
|
+
}
|
|
413
|
+
return { changes, digest };
|
|
414
|
+
}
|
package/package.json
CHANGED