genesis-ai-cli 7.24.2 → 8.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/dist/src/active-inference/deep-aif.d.ts +109 -0
  2. package/dist/src/active-inference/deep-aif.js +495 -0
  3. package/dist/src/agents/emergence-monitor.d.ts +152 -0
  4. package/dist/src/agents/emergence-monitor.js +827 -0
  5. package/dist/src/allostasis/index.d.ts +124 -0
  6. package/dist/src/allostasis/index.js +710 -0
  7. package/dist/src/causal/index.d.ts +121 -0
  8. package/dist/src/causal/index.js +775 -0
  9. package/dist/src/embodiment/haptic-feedback.d.ts +311 -0
  10. package/dist/src/embodiment/haptic-feedback.js +1018 -0
  11. package/dist/src/embodiment/robotic-control.d.ts +276 -0
  12. package/dist/src/embodiment/robotic-control.js +893 -0
  13. package/dist/src/governance/self-modification.d.ts +149 -0
  14. package/dist/src/governance/self-modification.js +448 -0
  15. package/dist/src/learning/capability-discovery.d.ts +118 -0
  16. package/dist/src/learning/capability-discovery.js +480 -0
  17. package/dist/src/learning/meta-rl.d.ts +122 -0
  18. package/dist/src/learning/meta-rl.js +434 -0
  19. package/dist/src/mcp-servers/simulation-mcp.d.ts +296 -0
  20. package/dist/src/mcp-servers/simulation-mcp.js +926 -0
  21. package/dist/src/metacognition/index.d.ts +122 -0
  22. package/dist/src/metacognition/index.js +625 -0
  23. package/dist/src/perception/multi-modal.d.ts +120 -0
  24. package/dist/src/perception/multi-modal.js +823 -0
  25. package/dist/src/reasoning/neurosymbolic.d.ts +197 -0
  26. package/dist/src/reasoning/neurosymbolic.js +621 -0
  27. package/dist/src/reasoning/socratic-questioning.d.ts +111 -0
  28. package/dist/src/reasoning/socratic-questioning.js +471 -0
  29. package/dist/src/world-model/contrastive-learning.d.ts +121 -0
  30. package/dist/src/world-model/contrastive-learning.js +468 -0
  31. package/dist/src/world-model/hierarchical-latent.d.ts +85 -0
  32. package/dist/src/world-model/hierarchical-latent.js +402 -0
  33. package/dist/src/world-model/mamba-ssm.d.ts +137 -0
  34. package/dist/src/world-model/mamba-ssm.js +470 -0
  35. package/dist/src/world-model/physics-informed.d.ts +86 -0
  36. package/dist/src/world-model/physics-informed.js +465 -0
  37. package/package.json +1 -1
@@ -0,0 +1,109 @@
1
+ /**
2
+ * Deep Active Inference - Continuous Belief States
3
+ *
4
+ * Based on: "Deep Active Inference for Partially Observable MDPs" (arxiv 2009.03622)
5
+ *
6
+ * Extends the discrete active inference with:
7
+ * - Continuous latent belief states (instead of discrete 5-level)
8
+ * - Neural network encoders/decoders
9
+ * - Learned transition models
10
+ * - Amortized inference for scalability
11
+ */
12
+ import { EventEmitter } from 'events';
13
+ export interface LatentState {
14
+ mean: number[];
15
+ logVar: number[];
16
+ dimension: number;
17
+ }
18
+ export interface DeepAIFConfig {
19
+ latentDim: number;
20
+ hiddenDim: number;
21
+ numLayers: number;
22
+ learningRate: number;
23
+ kldWeight: number;
24
+ freeEnergyHorizon: number;
25
+ temperature: number;
26
+ usePrecisionWeighting: boolean;
27
+ }
28
+ export interface Observation {
29
+ modality: string;
30
+ data: number[];
31
+ precision: number;
32
+ timestamp: number;
33
+ }
34
+ export interface Action {
35
+ type: string;
36
+ parameters: Record<string, any>;
37
+ embedding: number[];
38
+ }
39
+ export interface PolicyEvaluation {
40
+ policy: Action[];
41
+ expectedFreeEnergy: number;
42
+ epistemicValue: number;
43
+ pragmaticValue: number;
44
+ riskValue: number;
45
+ }
46
+ export interface BeliefUpdate {
47
+ prior: LatentState;
48
+ posterior: LatentState;
49
+ predictionError: number;
50
+ freeEnergy: number;
51
+ kldivergence: number;
52
+ }
53
+ export declare class DeepActiveInference extends EventEmitter {
54
+ private config;
55
+ private encoder;
56
+ private decoder;
57
+ private transition;
58
+ private policy;
59
+ private currentBelief;
60
+ private beliefHistory;
61
+ private observationBuffer;
62
+ private actionRegistry;
63
+ private freeEnergyHistory;
64
+ constructor(config?: Partial<DeepAIFConfig>);
65
+ private initializeActionRegistry;
66
+ /**
67
+ * Process new observation and update beliefs
68
+ */
69
+ updateBelief(observation: Observation): BeliefUpdate;
70
+ /**
71
+ * Select action by minimizing Expected Free Energy
72
+ */
73
+ selectAction(): {
74
+ action: Action;
75
+ evaluation: PolicyEvaluation;
76
+ };
77
+ /**
78
+ * Evaluate a policy (sequence of actions)
79
+ */
80
+ evaluatePolicy(policy: Action[]): PolicyEvaluation;
81
+ /**
82
+ * Predict future state given action
83
+ */
84
+ predictFuture(action: Action, steps?: number): LatentState[];
85
+ private observationToVector;
86
+ private combineBelief;
87
+ private klDivergence;
88
+ private reconstructionError;
89
+ private entropy;
90
+ private computeGoalAlignment;
91
+ getCurrentBelief(): LatentState;
92
+ getFreeEnergy(): number;
93
+ getAverageFreeEnergy(window?: number): number;
94
+ registerAction(action: Action): void;
95
+ getBeliefSummary(): Record<string, any>;
96
+ /**
97
+ * Create observation from MCP tool result
98
+ */
99
+ createObservation(modality: string, data: any, precision?: number): Observation;
100
+ /**
101
+ * Full inference cycle: observe → update → select → act
102
+ */
103
+ cycle(observation: Observation): Promise<{
104
+ belief: BeliefUpdate;
105
+ action: Action;
106
+ evaluation: PolicyEvaluation;
107
+ }>;
108
+ }
109
+ export default DeepActiveInference;
@@ -0,0 +1,495 @@
1
+ "use strict";
2
+ /**
3
+ * Deep Active Inference - Continuous Belief States
4
+ *
5
+ * Based on: "Deep Active Inference for Partially Observable MDPs" (arxiv 2009.03622)
6
+ *
7
+ * Extends the discrete active inference with:
8
+ * - Continuous latent belief states (instead of discrete 5-level)
9
+ * - Neural network encoders/decoders
10
+ * - Learned transition models
11
+ * - Amortized inference for scalability
12
+ */
13
+ Object.defineProperty(exports, "__esModule", { value: true });
14
+ exports.DeepActiveInference = void 0;
15
+ const events_1 = require("events");
16
+ // ============================================================================
17
+ // NEURAL NETWORK PRIMITIVES (Pure TypeScript, no external deps)
18
+ // ============================================================================
19
+ /**
20
+ * Simple dense layer with ReLU/Linear activation
21
+ */
22
+ class DenseLayer {
23
+ weights;
24
+ bias;
25
+ activation;
26
+ constructor(inputDim, outputDim, activation = 'relu') {
27
+ this.activation = activation;
28
+ // Xavier initialization
29
+ const scale = Math.sqrt(2.0 / (inputDim + outputDim));
30
+ this.weights = Array(outputDim).fill(0).map(() => Array(inputDim).fill(0).map(() => (Math.random() - 0.5) * 2 * scale));
31
+ this.bias = Array(outputDim).fill(0);
32
+ }
33
+ forward(input) {
34
+ const output = this.weights.map((row, i) => {
35
+ const sum = row.reduce((acc, w, j) => acc + w * input[j], 0) + this.bias[i];
36
+ switch (this.activation) {
37
+ case 'relu': return Math.max(0, sum);
38
+ case 'tanh': return Math.tanh(sum);
39
+ case 'softplus': return Math.log(1 + Math.exp(sum));
40
+ default: return sum;
41
+ }
42
+ });
43
+ return output;
44
+ }
45
+ // Gradient computation for backprop
46
+ backward(input, gradOutput) {
47
+ const preActivation = this.weights.map((row, i) => row.reduce((acc, w, j) => acc + w * input[j], 0) + this.bias[i]);
48
+ // Activation gradient
49
+ const activationGrad = preActivation.map((val, i) => {
50
+ switch (this.activation) {
51
+ case 'relu': return val > 0 ? gradOutput[i] : 0;
52
+ case 'tanh': return gradOutput[i] * (1 - Math.tanh(val) ** 2);
53
+ case 'softplus': return gradOutput[i] * (1 / (1 + Math.exp(-val)));
54
+ default: return gradOutput[i];
55
+ }
56
+ });
57
+ const gradWeights = this.weights.map((row, i) => row.map((_, j) => activationGrad[i] * input[j]));
58
+ const gradBias = activationGrad;
59
+ const gradInput = input.map((_, j) => this.weights.reduce((acc, row, i) => acc + row[j] * activationGrad[i], 0));
60
+ return { gradInput, gradWeights, gradBias };
61
+ }
62
+ updateWeights(gradWeights, gradBias, lr) {
63
+ this.weights = this.weights.map((row, i) => row.map((w, j) => w - lr * gradWeights[i][j]));
64
+ this.bias = this.bias.map((b, i) => b - lr * gradBias[i]);
65
+ }
66
+ }
67
+ /**
68
+ * Multi-layer perceptron
69
+ */
70
+ class MLP {
71
+ layers;
72
+ constructor(dims, hiddenActivation = 'relu', outputActivation = 'linear') {
73
+ this.layers = [];
74
+ for (let i = 0; i < dims.length - 1; i++) {
75
+ const isLast = i === dims.length - 2;
76
+ this.layers.push(new DenseLayer(dims[i], dims[i + 1], isLast ? outputActivation : hiddenActivation));
77
+ }
78
+ }
79
+ forward(input) {
80
+ return this.layers.reduce((x, layer) => layer.forward(x), input);
81
+ }
82
+ }
83
+ // ============================================================================
84
+ // ENCODER: Observation → Latent (Variational)
85
+ // ============================================================================
86
+ class VariationalEncoder {
87
+ encoder;
88
+ meanLayer;
89
+ logVarLayer;
90
+ constructor(inputDim, hiddenDim, latentDim, numLayers) {
91
+ const dims = [inputDim, ...Array(numLayers).fill(hiddenDim)];
92
+ this.encoder = new MLP(dims, 'relu', 'tanh');
93
+ this.meanLayer = new DenseLayer(hiddenDim, latentDim, 'linear');
94
+ this.logVarLayer = new DenseLayer(hiddenDim, latentDim, 'linear');
95
+ }
96
+ encode(observation) {
97
+ const hidden = this.encoder.forward(observation);
98
+ const mean = this.meanLayer.forward(hidden);
99
+ const logVar = this.logVarLayer.forward(hidden);
100
+ return {
101
+ mean,
102
+ logVar,
103
+ dimension: mean.length
104
+ };
105
+ }
106
+ // Reparameterization trick for differentiable sampling
107
+ sample(state) {
108
+ const epsilon = state.mean.map(() => gaussianRandom());
109
+ return state.mean.map((m, i) => m + Math.exp(0.5 * state.logVar[i]) * epsilon[i]);
110
+ }
111
+ }
112
+ // ============================================================================
113
+ // DECODER: Latent → Observation Prediction
114
+ // ============================================================================
115
+ class ObservationDecoder {
116
+ decoder;
117
+ constructor(latentDim, hiddenDim, outputDim, numLayers) {
118
+ const dims = [latentDim, ...Array(numLayers).fill(hiddenDim), outputDim];
119
+ this.decoder = new MLP(dims, 'relu', 'linear');
120
+ }
121
+ decode(latent) {
122
+ return this.decoder.forward(latent);
123
+ }
124
+ }
125
+ // ============================================================================
126
+ // TRANSITION MODEL: Latent × Action → Next Latent
127
+ // ============================================================================
128
+ class TransitionModel {
129
+ model;
130
+ meanLayer;
131
+ logVarLayer;
132
+ constructor(latentDim, actionDim, hiddenDim, numLayers) {
133
+ const inputDim = latentDim + actionDim;
134
+ const dims = [inputDim, ...Array(numLayers).fill(hiddenDim)];
135
+ this.model = new MLP(dims, 'relu', 'tanh');
136
+ this.meanLayer = new DenseLayer(hiddenDim, latentDim, 'linear');
137
+ this.logVarLayer = new DenseLayer(hiddenDim, latentDim, 'softplus'); // Softplus for positive variance
138
+ }
139
+ predict(currentLatent, action) {
140
+ const input = [...currentLatent, ...action];
141
+ const hidden = this.model.forward(input);
142
+ const mean = this.meanLayer.forward(hidden);
143
+ const logVar = this.logVarLayer.forward(hidden).map(v => Math.log(v + 1e-6)); // Convert to logVar
144
+ return {
145
+ mean,
146
+ logVar,
147
+ dimension: mean.length
148
+ };
149
+ }
150
+ }
151
+ // ============================================================================
152
+ // POLICY NETWORK: Latent → Action Distribution
153
+ // ============================================================================
154
+ class PolicyNetwork {
155
+ network;
156
+ actionEmbeddings;
157
+ constructor(latentDim, hiddenDim, numActions, actionDim, numLayers) {
158
+ const dims = [latentDim, ...Array(numLayers).fill(hiddenDim), numActions];
159
+ this.network = new MLP(dims, 'relu', 'linear');
160
+ this.actionEmbeddings = new Map();
161
+ }
162
+ getActionProbabilities(latent, temperature = 1.0) {
163
+ const logits = this.network.forward(latent);
164
+ return softmax(logits.map(l => l / temperature));
165
+ }
166
+ registerAction(actionType, embedding) {
167
+ this.actionEmbeddings.set(actionType, embedding);
168
+ }
169
+ getActionEmbedding(actionType) {
170
+ return this.actionEmbeddings.get(actionType) || Array(16).fill(0);
171
+ }
172
+ }
173
+ // ============================================================================
174
+ // DEEP ACTIVE INFERENCE ENGINE
175
+ // ============================================================================
176
+ class DeepActiveInference extends events_1.EventEmitter {
177
+ config;
178
+ encoder;
179
+ decoder;
180
+ transition;
181
+ policy;
182
+ currentBelief;
183
+ beliefHistory;
184
+ observationBuffer;
185
+ actionRegistry;
186
+ freeEnergyHistory;
187
+ constructor(config = {}) {
188
+ super();
189
+ this.config = {
190
+ latentDim: 64,
191
+ hiddenDim: 256,
192
+ numLayers: 3,
193
+ learningRate: 0.001,
194
+ kldWeight: 0.1,
195
+ freeEnergyHorizon: 5,
196
+ temperature: 1.0,
197
+ usePrecisionWeighting: true,
198
+ ...config
199
+ };
200
+ // Initialize networks
201
+ const obsDim = 128; // Default observation dimension
202
+ const actionDim = 16; // Action embedding dimension
203
+ const numActions = 8; // Number of discrete actions
204
+ this.encoder = new VariationalEncoder(obsDim, this.config.hiddenDim, this.config.latentDim, this.config.numLayers);
205
+ this.decoder = new ObservationDecoder(this.config.latentDim, this.config.hiddenDim, obsDim, this.config.numLayers);
206
+ this.transition = new TransitionModel(this.config.latentDim, actionDim, this.config.hiddenDim, this.config.numLayers);
207
+ this.policy = new PolicyNetwork(this.config.latentDim, this.config.hiddenDim, numActions, actionDim, this.config.numLayers);
208
+ // Initialize belief to prior (zero mean, unit variance)
209
+ this.currentBelief = {
210
+ mean: Array(this.config.latentDim).fill(0),
211
+ logVar: Array(this.config.latentDim).fill(0),
212
+ dimension: this.config.latentDim
213
+ };
214
+ this.beliefHistory = [];
215
+ this.observationBuffer = [];
216
+ this.actionRegistry = new Map();
217
+ this.freeEnergyHistory = [];
218
+ this.initializeActionRegistry();
219
+ }
220
+ initializeActionRegistry() {
221
+ const defaultActions = [
222
+ { type: 'sense.mcp', parameters: {}, embedding: randomVector(16) },
223
+ { type: 'recall.memory', parameters: {}, embedding: randomVector(16) },
224
+ { type: 'plan.goals', parameters: {}, embedding: randomVector(16) },
225
+ { type: 'verify.ethics', parameters: {}, embedding: randomVector(16) },
226
+ { type: 'execute.task', parameters: {}, embedding: randomVector(16) },
227
+ { type: 'execute.code', parameters: {}, embedding: randomVector(16) },
228
+ { type: 'adapt.code', parameters: {}, embedding: randomVector(16) },
229
+ { type: 'self.reflect', parameters: {}, embedding: randomVector(16) }
230
+ ];
231
+ defaultActions.forEach(action => {
232
+ this.actionRegistry.set(action.type, action);
233
+ this.policy.registerAction(action.type, action.embedding);
234
+ });
235
+ }
236
+ // ===========================================================================
237
+ // CORE ACTIVE INFERENCE LOOP
238
+ // ===========================================================================
239
+ /**
240
+ * Process new observation and update beliefs
241
+ */
242
+ updateBelief(observation) {
243
+ // Encode observation to get likelihood distribution
244
+ const obsVector = this.observationToVector(observation);
245
+ const likelihood = this.encoder.encode(obsVector);
246
+ // Precision-weighted belief update
247
+ const precision = this.config.usePrecisionWeighting ? observation.precision : 1.0;
248
+ // Compute posterior using precision-weighted combination
249
+ const posterior = this.combineBelief(this.currentBelief, likelihood, precision);
250
+ // Compute free energy components
251
+ const kld = this.klDivergence(posterior, this.currentBelief);
252
+ const reconstructed = this.decoder.decode(this.encoder.sample(posterior));
253
+ const reconError = this.reconstructionError(obsVector, reconstructed);
254
+ const freeEnergy = reconError + this.config.kldWeight * kld;
255
+ // Store history
256
+ const prior = { ...this.currentBelief };
257
+ this.currentBelief = posterior;
258
+ this.beliefHistory.push(posterior);
259
+ this.observationBuffer.push(observation);
260
+ this.freeEnergyHistory.push(freeEnergy);
261
+ const update = {
262
+ prior,
263
+ posterior,
264
+ predictionError: reconError,
265
+ freeEnergy,
266
+ kldivergence: kld
267
+ };
268
+ this.emit('beliefUpdate', update);
269
+ return update;
270
+ }
271
+ /**
272
+ * Select action by minimizing Expected Free Energy
273
+ */
274
+ selectAction() {
275
+ const actions = Array.from(this.actionRegistry.values());
276
+ const evaluations = [];
277
+ // Evaluate each possible action
278
+ for (const action of actions) {
279
+ const evaluation = this.evaluatePolicy([action]);
280
+ evaluations.push(evaluation);
281
+ }
282
+ // Select action with lowest expected free energy
283
+ const bestIdx = evaluations.reduce((best, curr, idx) => curr.expectedFreeEnergy < evaluations[best].expectedFreeEnergy ? idx : best, 0);
284
+ const selectedAction = actions[bestIdx];
285
+ const evaluation = evaluations[bestIdx];
286
+ this.emit('actionSelected', { action: selectedAction, evaluation });
287
+ return { action: selectedAction, evaluation };
288
+ }
289
+ /**
290
+ * Evaluate a policy (sequence of actions)
291
+ */
292
+ evaluatePolicy(policy) {
293
+ let totalEFE = 0;
294
+ let epistemicValue = 0;
295
+ let pragmaticValue = 0;
296
+ let riskValue = 0;
297
+ let predictedBelief = { ...this.currentBelief };
298
+ for (let t = 0; t < Math.min(policy.length, this.config.freeEnergyHorizon); t++) {
299
+ const action = policy[t];
300
+ const actionEmbedding = this.actionRegistry.get(action.type)?.embedding || randomVector(16);
301
+ // Predict next belief state
302
+ const nextBelief = this.transition.predict(this.encoder.sample(predictedBelief), actionEmbedding);
303
+ // Compute Expected Free Energy components
304
+ // 1. Epistemic value (information gain) - reduction in uncertainty
305
+ const entropyReduction = this.entropy(predictedBelief) - this.entropy(nextBelief);
306
+ epistemicValue += entropyReduction;
307
+ // 2. Pragmatic value (goal achievement) - alignment with preferred states
308
+ const goalAlignment = this.computeGoalAlignment(nextBelief);
309
+ pragmaticValue += goalAlignment;
310
+ // 3. Risk (uncertainty about outcomes)
311
+ const risk = this.entropy(nextBelief);
312
+ riskValue += risk;
313
+ // EFE = -epistemic - pragmatic + risk
314
+ const stepEFE = -entropyReduction - goalAlignment + 0.5 * risk;
315
+ totalEFE += stepEFE * Math.pow(0.99, t); // Discount future
316
+ predictedBelief = nextBelief;
317
+ }
318
+ return {
319
+ policy,
320
+ expectedFreeEnergy: totalEFE,
321
+ epistemicValue,
322
+ pragmaticValue,
323
+ riskValue
324
+ };
325
+ }
326
+ /**
327
+ * Predict future state given action
328
+ */
329
+ predictFuture(action, steps = 1) {
330
+ const predictions = [];
331
+ let currentState = this.encoder.sample(this.currentBelief);
332
+ const actionEmbedding = this.actionRegistry.get(action.type)?.embedding || randomVector(16);
333
+ for (let i = 0; i < steps; i++) {
334
+ const nextBelief = this.transition.predict(currentState, actionEmbedding);
335
+ predictions.push(nextBelief);
336
+ currentState = this.encoder.sample(nextBelief);
337
+ }
338
+ return predictions;
339
+ }
340
+ // ===========================================================================
341
+ // HELPER METHODS
342
+ // ===========================================================================
343
+ observationToVector(obs) {
344
+ // Pad or truncate to expected dimension
345
+ const targetDim = 128;
346
+ const vector = [...obs.data];
347
+ while (vector.length < targetDim)
348
+ vector.push(0);
349
+ return vector.slice(0, targetDim);
350
+ }
351
+ combineBelief(prior, likelihood, precision) {
352
+ // Precision-weighted combination of prior and likelihood
353
+ const priorPrec = prior.logVar.map(lv => Math.exp(-lv));
354
+ const likPrec = likelihood.logVar.map(lv => Math.exp(-lv) * precision);
355
+ const combinedPrec = priorPrec.map((pp, i) => pp + likPrec[i]);
356
+ const combinedVar = combinedPrec.map(p => 1 / p);
357
+ const combinedLogVar = combinedVar.map(v => Math.log(v + 1e-10));
358
+ const combinedMean = prior.mean.map((pm, i) => {
359
+ const priorContrib = pm * priorPrec[i];
360
+ const likContrib = likelihood.mean[i] * likPrec[i];
361
+ return (priorContrib + likContrib) / combinedPrec[i];
362
+ });
363
+ return {
364
+ mean: combinedMean,
365
+ logVar: combinedLogVar,
366
+ dimension: prior.dimension
367
+ };
368
+ }
369
+ klDivergence(q, p) {
370
+ // KL(q || p) for multivariate Gaussians
371
+ let kld = 0;
372
+ for (let i = 0; i < q.dimension; i++) {
373
+ const varQ = Math.exp(q.logVar[i]);
374
+ const varP = Math.exp(p.logVar[i]);
375
+ kld += Math.log(varP / varQ) + (varQ + (q.mean[i] - p.mean[i]) ** 2) / varP - 1;
376
+ }
377
+ return 0.5 * kld;
378
+ }
379
+ reconstructionError(original, reconstructed) {
380
+ return original.reduce((sum, val, i) => sum + (val - reconstructed[i]) ** 2, 0) / original.length;
381
+ }
382
+ entropy(belief) {
383
+ // Entropy of multivariate Gaussian: 0.5 * log(det(2πeΣ))
384
+ const logDet = belief.logVar.reduce((sum, lv) => sum + lv, 0);
385
+ return 0.5 * belief.dimension * (1 + Math.log(2 * Math.PI)) + 0.5 * logDet;
386
+ }
387
+ computeGoalAlignment(belief) {
388
+ // Simple goal alignment: how close is belief mean to goal state
389
+ // Goal state assumed to be positive in first few dimensions (viability)
390
+ const goalDims = Math.min(4, belief.dimension);
391
+ let alignment = 0;
392
+ for (let i = 0; i < goalDims; i++) {
393
+ alignment += Math.tanh(belief.mean[i]); // Saturating activation
394
+ }
395
+ return alignment / goalDims;
396
+ }
397
+ // ===========================================================================
398
+ // PUBLIC API
399
+ // ===========================================================================
400
+ getCurrentBelief() {
401
+ return { ...this.currentBelief };
402
+ }
403
+ getFreeEnergy() {
404
+ return this.freeEnergyHistory[this.freeEnergyHistory.length - 1] || 0;
405
+ }
406
+ getAverageFreeEnergy(window = 10) {
407
+ const recent = this.freeEnergyHistory.slice(-window);
408
+ return recent.reduce((a, b) => a + b, 0) / recent.length || 0;
409
+ }
410
+ registerAction(action) {
411
+ this.actionRegistry.set(action.type, action);
412
+ this.policy.registerAction(action.type, action.embedding);
413
+ }
414
+ getBeliefSummary() {
415
+ const belief = this.currentBelief;
416
+ return {
417
+ meanNorm: Math.sqrt(belief.mean.reduce((s, v) => s + v * v, 0)),
418
+ averageUncertainty: belief.logVar.reduce((s, v) => s + Math.exp(v), 0) / belief.dimension,
419
+ entropy: this.entropy(belief),
420
+ dimension: belief.dimension,
421
+ historyLength: this.beliefHistory.length,
422
+ currentFreeEnergy: this.getFreeEnergy(),
423
+ averageFreeEnergy: this.getAverageFreeEnergy()
424
+ };
425
+ }
426
+ /**
427
+ * Create observation from MCP tool result
428
+ */
429
+ createObservation(modality, data, precision = 0.8) {
430
+ // Convert data to vector representation
431
+ let vector;
432
+ if (Array.isArray(data)) {
433
+ vector = data.map(d => typeof d === 'number' ? d : hashToNumber(JSON.stringify(d)));
434
+ }
435
+ else if (typeof data === 'object') {
436
+ vector = Object.values(data).map(v => typeof v === 'number' ? v : hashToNumber(JSON.stringify(v)));
437
+ }
438
+ else if (typeof data === 'string') {
439
+ vector = stringToVector(data, 128);
440
+ }
441
+ else {
442
+ vector = [typeof data === 'number' ? data : 0];
443
+ }
444
+ return {
445
+ modality,
446
+ data: vector,
447
+ precision,
448
+ timestamp: Date.now()
449
+ };
450
+ }
451
+ /**
452
+ * Full inference cycle: observe → update → select → act
453
+ */
454
+ async cycle(observation) {
455
+ const belief = this.updateBelief(observation);
456
+ const { action, evaluation } = this.selectAction();
457
+ this.emit('cycle', { belief, action, evaluation });
458
+ return { belief, action, evaluation };
459
+ }
460
+ }
461
+ exports.DeepActiveInference = DeepActiveInference;
462
+ // ============================================================================
463
+ // UTILITY FUNCTIONS
464
+ // ============================================================================
465
+ function gaussianRandom() {
466
+ // Box-Muller transform
467
+ const u1 = Math.random();
468
+ const u2 = Math.random();
469
+ return Math.sqrt(-2 * Math.log(u1)) * Math.cos(2 * Math.PI * u2);
470
+ }
471
+ function softmax(logits) {
472
+ const maxLogit = Math.max(...logits);
473
+ const exps = logits.map(l => Math.exp(l - maxLogit));
474
+ const sum = exps.reduce((a, b) => a + b, 0);
475
+ return exps.map(e => e / sum);
476
+ }
477
+ function randomVector(dim) {
478
+ return Array(dim).fill(0).map(() => (Math.random() - 0.5) * 2);
479
+ }
480
+ function hashToNumber(str) {
481
+ let hash = 0;
482
+ for (let i = 0; i < str.length; i++) {
483
+ hash = ((hash << 5) - hash) + str.charCodeAt(i);
484
+ hash = hash & hash;
485
+ }
486
+ return Math.tanh(hash / 1000000);
487
+ }
488
+ function stringToVector(str, dim) {
489
+ const vector = Array(dim).fill(0);
490
+ for (let i = 0; i < str.length && i < dim; i++) {
491
+ vector[i] = (str.charCodeAt(i) - 64) / 64; // Normalize
492
+ }
493
+ return vector;
494
+ }
495
+ exports.default = DeepActiveInference;