genesis-ai-cli 7.4.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (227) hide show
  1. package/.env.example +78 -0
  2. package/README.md +282 -0
  3. package/dist/src/active-inference/actions.d.ts +75 -0
  4. package/dist/src/active-inference/actions.js +250 -0
  5. package/dist/src/active-inference/autonomous-loop.d.ts +103 -0
  6. package/dist/src/active-inference/autonomous-loop.js +289 -0
  7. package/dist/src/active-inference/core.d.ts +85 -0
  8. package/dist/src/active-inference/core.js +555 -0
  9. package/dist/src/active-inference/demo-autonomous-loop.d.ts +8 -0
  10. package/dist/src/active-inference/demo-autonomous-loop.js +338 -0
  11. package/dist/src/active-inference/demo-value-integration.d.ts +8 -0
  12. package/dist/src/active-inference/demo-value-integration.js +174 -0
  13. package/dist/src/active-inference/index.d.ts +32 -0
  14. package/dist/src/active-inference/index.js +88 -0
  15. package/dist/src/active-inference/integration.d.ts +114 -0
  16. package/dist/src/active-inference/integration.js +698 -0
  17. package/dist/src/active-inference/memory-integration.d.ts +51 -0
  18. package/dist/src/active-inference/memory-integration.js +232 -0
  19. package/dist/src/active-inference/observations.d.ts +67 -0
  20. package/dist/src/active-inference/observations.js +147 -0
  21. package/dist/src/active-inference/test-active-inference.d.ts +8 -0
  22. package/dist/src/active-inference/test-active-inference.js +320 -0
  23. package/dist/src/active-inference/test-value-integration.d.ts +6 -0
  24. package/dist/src/active-inference/test-value-integration.js +168 -0
  25. package/dist/src/active-inference/types.d.ts +150 -0
  26. package/dist/src/active-inference/types.js +59 -0
  27. package/dist/src/active-inference/value-integration.d.ts +164 -0
  28. package/dist/src/active-inference/value-integration.js +459 -0
  29. package/dist/src/agents/base-agent.d.ts +53 -0
  30. package/dist/src/agents/base-agent.js +178 -0
  31. package/dist/src/agents/builder.d.ts +67 -0
  32. package/dist/src/agents/builder.js +537 -0
  33. package/dist/src/agents/critic.d.ts +35 -0
  34. package/dist/src/agents/critic.js +322 -0
  35. package/dist/src/agents/ethicist.d.ts +54 -0
  36. package/dist/src/agents/ethicist.js +393 -0
  37. package/dist/src/agents/explorer.d.ts +26 -0
  38. package/dist/src/agents/explorer.js +216 -0
  39. package/dist/src/agents/feeling.d.ts +41 -0
  40. package/dist/src/agents/feeling.js +320 -0
  41. package/dist/src/agents/index.d.ts +111 -0
  42. package/dist/src/agents/index.js +222 -0
  43. package/dist/src/agents/memory.d.ts +69 -0
  44. package/dist/src/agents/memory.js +404 -0
  45. package/dist/src/agents/message-bus.d.ts +88 -0
  46. package/dist/src/agents/message-bus.js +267 -0
  47. package/dist/src/agents/narrator.d.ts +90 -0
  48. package/dist/src/agents/narrator.js +473 -0
  49. package/dist/src/agents/planner.d.ts +38 -0
  50. package/dist/src/agents/planner.js +341 -0
  51. package/dist/src/agents/predictor.d.ts +73 -0
  52. package/dist/src/agents/predictor.js +506 -0
  53. package/dist/src/agents/sensor.d.ts +88 -0
  54. package/dist/src/agents/sensor.js +377 -0
  55. package/dist/src/agents/test-agents.d.ts +6 -0
  56. package/dist/src/agents/test-agents.js +73 -0
  57. package/dist/src/agents/types.d.ts +194 -0
  58. package/dist/src/agents/types.js +7 -0
  59. package/dist/src/brain/index.d.ts +185 -0
  60. package/dist/src/brain/index.js +843 -0
  61. package/dist/src/brain/trace.d.ts +91 -0
  62. package/dist/src/brain/trace.js +327 -0
  63. package/dist/src/brain/types.d.ts +165 -0
  64. package/dist/src/brain/types.js +51 -0
  65. package/dist/src/cli/chat.d.ts +237 -0
  66. package/dist/src/cli/chat.js +1959 -0
  67. package/dist/src/cli/dispatcher.d.ts +182 -0
  68. package/dist/src/cli/dispatcher.js +718 -0
  69. package/dist/src/cli/human-loop.d.ts +170 -0
  70. package/dist/src/cli/human-loop.js +543 -0
  71. package/dist/src/cli/index.d.ts +12 -0
  72. package/dist/src/cli/index.js +28 -0
  73. package/dist/src/cli/interactive.d.ts +141 -0
  74. package/dist/src/cli/interactive.js +757 -0
  75. package/dist/src/cli/ui.d.ts +205 -0
  76. package/dist/src/cli/ui.js +632 -0
  77. package/dist/src/consciousness/attention-schema.d.ts +154 -0
  78. package/dist/src/consciousness/attention-schema.js +432 -0
  79. package/dist/src/consciousness/global-workspace.d.ts +149 -0
  80. package/dist/src/consciousness/global-workspace.js +422 -0
  81. package/dist/src/consciousness/index.d.ts +186 -0
  82. package/dist/src/consciousness/index.js +476 -0
  83. package/dist/src/consciousness/phi-calculator.d.ts +119 -0
  84. package/dist/src/consciousness/phi-calculator.js +445 -0
  85. package/dist/src/consciousness/phi-decisions.d.ts +169 -0
  86. package/dist/src/consciousness/phi-decisions.js +383 -0
  87. package/dist/src/consciousness/phi-monitor.d.ts +153 -0
  88. package/dist/src/consciousness/phi-monitor.js +465 -0
  89. package/dist/src/consciousness/types.d.ts +260 -0
  90. package/dist/src/consciousness/types.js +44 -0
  91. package/dist/src/daemon/dream-mode.d.ts +115 -0
  92. package/dist/src/daemon/dream-mode.js +470 -0
  93. package/dist/src/daemon/index.d.ts +162 -0
  94. package/dist/src/daemon/index.js +542 -0
  95. package/dist/src/daemon/maintenance.d.ts +139 -0
  96. package/dist/src/daemon/maintenance.js +549 -0
  97. package/dist/src/daemon/process.d.ts +82 -0
  98. package/dist/src/daemon/process.js +442 -0
  99. package/dist/src/daemon/scheduler.d.ts +90 -0
  100. package/dist/src/daemon/scheduler.js +494 -0
  101. package/dist/src/daemon/types.d.ts +213 -0
  102. package/dist/src/daemon/types.js +50 -0
  103. package/dist/src/epistemic/index.d.ts +74 -0
  104. package/dist/src/epistemic/index.js +225 -0
  105. package/dist/src/grounding/epistemic-stack.d.ts +100 -0
  106. package/dist/src/grounding/epistemic-stack.js +408 -0
  107. package/dist/src/grounding/feedback.d.ts +98 -0
  108. package/dist/src/grounding/feedback.js +276 -0
  109. package/dist/src/grounding/index.d.ts +123 -0
  110. package/dist/src/grounding/index.js +224 -0
  111. package/dist/src/grounding/verifier.d.ts +149 -0
  112. package/dist/src/grounding/verifier.js +484 -0
  113. package/dist/src/healing/detector.d.ts +110 -0
  114. package/dist/src/healing/detector.js +436 -0
  115. package/dist/src/healing/fixer.d.ts +138 -0
  116. package/dist/src/healing/fixer.js +572 -0
  117. package/dist/src/healing/index.d.ts +23 -0
  118. package/dist/src/healing/index.js +43 -0
  119. package/dist/src/hooks/index.d.ts +135 -0
  120. package/dist/src/hooks/index.js +317 -0
  121. package/dist/src/index.d.ts +23 -0
  122. package/dist/src/index.js +1266 -0
  123. package/dist/src/kernel/index.d.ts +155 -0
  124. package/dist/src/kernel/index.js +795 -0
  125. package/dist/src/kernel/invariants.d.ts +153 -0
  126. package/dist/src/kernel/invariants.js +355 -0
  127. package/dist/src/kernel/test-kernel.d.ts +6 -0
  128. package/dist/src/kernel/test-kernel.js +108 -0
  129. package/dist/src/kernel/test-real-mcp.d.ts +10 -0
  130. package/dist/src/kernel/test-real-mcp.js +295 -0
  131. package/dist/src/llm/index.d.ts +146 -0
  132. package/dist/src/llm/index.js +428 -0
  133. package/dist/src/llm/router.d.ts +136 -0
  134. package/dist/src/llm/router.js +510 -0
  135. package/dist/src/mcp/index.d.ts +85 -0
  136. package/dist/src/mcp/index.js +657 -0
  137. package/dist/src/mcp/resilient.d.ts +139 -0
  138. package/dist/src/mcp/resilient.js +417 -0
  139. package/dist/src/memory/cache.d.ts +118 -0
  140. package/dist/src/memory/cache.js +356 -0
  141. package/dist/src/memory/cognitive-workspace.d.ts +231 -0
  142. package/dist/src/memory/cognitive-workspace.js +521 -0
  143. package/dist/src/memory/consolidation.d.ts +99 -0
  144. package/dist/src/memory/consolidation.js +443 -0
  145. package/dist/src/memory/episodic.d.ts +114 -0
  146. package/dist/src/memory/episodic.js +394 -0
  147. package/dist/src/memory/forgetting.d.ts +134 -0
  148. package/dist/src/memory/forgetting.js +324 -0
  149. package/dist/src/memory/index.d.ts +211 -0
  150. package/dist/src/memory/index.js +367 -0
  151. package/dist/src/memory/indexer.d.ts +123 -0
  152. package/dist/src/memory/indexer.js +479 -0
  153. package/dist/src/memory/procedural.d.ts +136 -0
  154. package/dist/src/memory/procedural.js +479 -0
  155. package/dist/src/memory/semantic.d.ts +132 -0
  156. package/dist/src/memory/semantic.js +497 -0
  157. package/dist/src/memory/types.d.ts +193 -0
  158. package/dist/src/memory/types.js +15 -0
  159. package/dist/src/orchestrator.d.ts +65 -0
  160. package/dist/src/orchestrator.js +317 -0
  161. package/dist/src/persistence/index.d.ts +257 -0
  162. package/dist/src/persistence/index.js +763 -0
  163. package/dist/src/pipeline/executor.d.ts +51 -0
  164. package/dist/src/pipeline/executor.js +695 -0
  165. package/dist/src/pipeline/index.d.ts +7 -0
  166. package/dist/src/pipeline/index.js +11 -0
  167. package/dist/src/self-production.d.ts +67 -0
  168. package/dist/src/self-production.js +205 -0
  169. package/dist/src/subagents/executor.d.ts +58 -0
  170. package/dist/src/subagents/executor.js +283 -0
  171. package/dist/src/subagents/index.d.ts +37 -0
  172. package/dist/src/subagents/index.js +53 -0
  173. package/dist/src/subagents/registry.d.ts +23 -0
  174. package/dist/src/subagents/registry.js +167 -0
  175. package/dist/src/subagents/types.d.ts +79 -0
  176. package/dist/src/subagents/types.js +14 -0
  177. package/dist/src/tools/bash.d.ts +139 -0
  178. package/dist/src/tools/bash.js +583 -0
  179. package/dist/src/tools/edit.d.ts +125 -0
  180. package/dist/src/tools/edit.js +424 -0
  181. package/dist/src/tools/git.d.ts +179 -0
  182. package/dist/src/tools/git.js +504 -0
  183. package/dist/src/tools/index.d.ts +21 -0
  184. package/dist/src/tools/index.js +163 -0
  185. package/dist/src/types.d.ts +145 -0
  186. package/dist/src/types.js +7 -0
  187. package/dist/src/world-model/decoder.d.ts +163 -0
  188. package/dist/src/world-model/decoder.js +517 -0
  189. package/dist/src/world-model/digital-twin.d.ts +219 -0
  190. package/dist/src/world-model/digital-twin.js +695 -0
  191. package/dist/src/world-model/encoder.d.ts +141 -0
  192. package/dist/src/world-model/encoder.js +564 -0
  193. package/dist/src/world-model/index.d.ts +221 -0
  194. package/dist/src/world-model/index.js +772 -0
  195. package/dist/src/world-model/predictor.d.ts +161 -0
  196. package/dist/src/world-model/predictor.js +681 -0
  197. package/dist/src/world-model/test-value-jepa.d.ts +8 -0
  198. package/dist/src/world-model/test-value-jepa.js +430 -0
  199. package/dist/src/world-model/types.d.ts +341 -0
  200. package/dist/src/world-model/types.js +69 -0
  201. package/dist/src/world-model/value-jepa.d.ts +247 -0
  202. package/dist/src/world-model/value-jepa.js +622 -0
  203. package/dist/test/brain.test.d.ts +11 -0
  204. package/dist/test/brain.test.js +358 -0
  205. package/dist/test/cli/dispatcher.test.d.ts +4 -0
  206. package/dist/test/cli/dispatcher.test.js +332 -0
  207. package/dist/test/cli/human-loop.test.d.ts +4 -0
  208. package/dist/test/cli/human-loop.test.js +270 -0
  209. package/dist/test/grounding/feedback.test.d.ts +4 -0
  210. package/dist/test/grounding/feedback.test.js +462 -0
  211. package/dist/test/grounding/verifier.test.d.ts +4 -0
  212. package/dist/test/grounding/verifier.test.js +442 -0
  213. package/dist/test/grounding.test.d.ts +6 -0
  214. package/dist/test/grounding.test.js +246 -0
  215. package/dist/test/healing/detector.test.d.ts +4 -0
  216. package/dist/test/healing/detector.test.js +266 -0
  217. package/dist/test/healing/fixer.test.d.ts +4 -0
  218. package/dist/test/healing/fixer.test.js +369 -0
  219. package/dist/test/integration.test.d.ts +5 -0
  220. package/dist/test/integration.test.js +290 -0
  221. package/dist/test/tools/bash.test.d.ts +4 -0
  222. package/dist/test/tools/bash.test.js +348 -0
  223. package/dist/test/tools/edit.test.d.ts +4 -0
  224. package/dist/test/tools/edit.test.js +350 -0
  225. package/dist/test/tools/git.test.d.ts +4 -0
  226. package/dist/test/tools/git.test.js +350 -0
  227. package/package.json +60 -0
@@ -0,0 +1,622 @@
1
+ "use strict";
2
+ /**
3
+ * Genesis 6.2 - Value-Guided JEPA (Joint Embedding Predictive Architecture)
4
+ *
5
+ * Extends the world model with value functions for decision-making.
6
+ *
7
+ * Scientific foundations:
8
+ * - LeCun (2022): JEPA - predict in latent space, not pixel space
9
+ * - Friston: Free Energy Principle - minimize expected free energy
10
+ * - Hafner (2019): Dreamer - value learning in world models
11
+ * - Schmidhuber: Curiosity-driven learning via prediction error
12
+ *
13
+ * The value function evaluates predicted states to guide:
14
+ * 1. Action selection (which action leads to best outcome?)
15
+ * 2. Trajectory planning (which path maximizes value?)
16
+ * 3. Dream consolidation (which memories are most valuable?)
17
+ *
18
+ * Integration with Active Inference:
19
+ * - G(π) = E[log P(o|s)] - KL[Q(s)||P(s)] + Value(s)
20
+ * - Expected Free Energy includes value term for goal-directedness
21
+ */
22
+ Object.defineProperty(exports, "__esModule", { value: true });
23
+ exports.ValueGuidedJEPA = exports.ValueFunction = exports.DEFAULT_VALUE_CONFIG = void 0;
24
+ exports.createValueFunction = createValueFunction;
25
+ exports.getValueFunction = getValueFunction;
26
+ exports.resetValueFunction = resetValueFunction;
27
+ exports.createValueGuidedJEPA = createValueGuidedJEPA;
28
+ // ============================================================================
29
+ // Default Configuration
30
+ // ============================================================================
31
+ exports.DEFAULT_VALUE_CONFIG = {
32
+ gamma: 0.99,
33
+ weights: {
34
+ survival: 0.35, // Highest priority: stay alive
35
+ integrity: 0.25, // Maintain system coherence
36
+ progress: 0.20, // Make progress on goals
37
+ novelty: 0.10, // Explore and learn
38
+ efficiency: 0.10, // Be efficient
39
+ },
40
+ riskAversion: 0.5,
41
+ curiosityBonus: 0.1,
42
+ temperature: 1.0,
43
+ horizon: 10,
44
+ };
45
+ // ============================================================================
46
+ // Value Function
47
+ // ============================================================================
48
+ class ValueFunction {
49
+ config;
50
+ // Learned parameters (simplified - in production would use neural net)
51
+ survivalBasis = [];
52
+ integrityBasis = [];
53
+ progressBasis = [];
54
+ // Statistics for normalization
55
+ valueStats = {
56
+ mean: 0,
57
+ std: 1,
58
+ min: -1,
59
+ max: 1,
60
+ count: 0,
61
+ };
62
+ constructor(config = {}) {
63
+ this.config = { ...exports.DEFAULT_VALUE_CONFIG, ...config };
64
+ this.initializeBasis();
65
+ }
66
+ /**
67
+ * Initialize basis functions for value estimation
68
+ */
69
+ initializeBasis() {
70
+ // Simple linear basis - in production would be learned
71
+ const dim = 64; // Latent dimension
72
+ this.survivalBasis = Array(dim).fill(0).map(() => Math.random() * 0.1);
73
+ this.integrityBasis = Array(dim).fill(0).map(() => Math.random() * 0.1);
74
+ this.progressBasis = Array(dim).fill(0).map(() => Math.random() * 0.1);
75
+ }
76
+ /**
77
+ * Estimate value of a latent state
78
+ */
79
+ estimate(state, horizon = 0) {
80
+ const components = this.computeComponents(state);
81
+ // Weighted sum of components
82
+ const rawValue = this.config.weights.survival * components.survival +
83
+ this.config.weights.integrity * components.integrity +
84
+ this.config.weights.progress * components.progress +
85
+ this.config.weights.novelty * components.novelty +
86
+ this.config.weights.efficiency * components.efficiency;
87
+ // Apply discount
88
+ const discount = Math.pow(this.config.gamma, horizon);
89
+ const value = rawValue * discount;
90
+ // Estimate uncertainty from state uncertainty
91
+ const valueUncertainty = this.estimateUncertainty(state);
92
+ return {
93
+ value: this.clamp(value, -1, 1),
94
+ components,
95
+ valueUncertainty,
96
+ valueConfidence: 1 / (1 + valueUncertainty),
97
+ discount,
98
+ horizon,
99
+ };
100
+ }
101
+ /**
102
+ * Compute individual value components
103
+ */
104
+ computeComponents(state) {
105
+ const vec = state.vector;
106
+ // Survival: based on energy-related dimensions (assumed first few)
107
+ const survivalSignal = this.dotProduct(vec.slice(0, 16), this.survivalBasis.slice(0, 16));
108
+ const survival = this.sigmoid(survivalSignal);
109
+ // Integrity: based on coherence of latent representation
110
+ const integritySignal = this.computeCoherence(vec);
111
+ const integrity = this.sigmoid(integritySignal);
112
+ // Progress: based on goal-related dimensions
113
+ const progressSignal = this.dotProduct(vec.slice(16, 32), this.progressBasis.slice(0, 16));
114
+ const progress = this.sigmoid(progressSignal);
115
+ // Novelty: prediction error / surprise (use entropy as proxy)
116
+ const novelty = state.entropy ?? 0.5;
117
+ // Efficiency: inverse of confidence loss (high confidence = efficient encoding)
118
+ const efficiency = state.confidence ?? 0.5;
119
+ return {
120
+ survival: this.clamp(survival, 0, 1),
121
+ integrity: this.clamp(integrity, 0, 1),
122
+ progress: this.clamp(progress, 0, 1),
123
+ novelty: this.clamp(novelty, 0, 1),
124
+ efficiency: this.clamp(efficiency, 0, 1),
125
+ };
126
+ }
127
+ /**
128
+ * Estimate uncertainty in value estimate
129
+ */
130
+ estimateUncertainty(state) {
131
+ // Base uncertainty from state (use 1 - confidence as uncertainty proxy)
132
+ const stateUncertainty = 1 - (state.confidence ?? 0.5);
133
+ // Epistemic uncertainty (how well do we know this region?)
134
+ const noveltyFactor = this.computeNovelty(state);
135
+ // Combined uncertainty
136
+ return Math.min(1, stateUncertainty * 0.5 + noveltyFactor * 0.5);
137
+ }
138
+ /**
139
+ * Compute novelty of state (how different from seen states)
140
+ */
141
+ computeNovelty(state) {
142
+ // Simplified: use vector magnitude variation as proxy
143
+ const magnitude = Math.sqrt(this.dotProduct(state.vector, state.vector));
144
+ return Math.abs(magnitude - 1); // Deviation from unit sphere
145
+ }
146
+ /**
147
+ * Compute coherence of latent vector
148
+ */
149
+ computeCoherence(vec) {
150
+ // Coherence: how structured is the representation
151
+ // High coherence = smooth, low-frequency patterns
152
+ let coherence = 0;
153
+ for (let i = 1; i < vec.length; i++) {
154
+ coherence += Math.abs(vec[i] - vec[i - 1]);
155
+ }
156
+ return -coherence / vec.length; // Lower difference = higher coherence
157
+ }
158
+ // ============================================================================
159
+ // Q-Value and Advantage
160
+ // ============================================================================
161
+ /**
162
+ * Compute Q-value for state-action pair
163
+ */
164
+ computeQValue(state, action, predictedState) {
165
+ // V(s) - current state value
166
+ const currentValue = this.estimate(state);
167
+ // V(s') - predicted next state value
168
+ const nextValue = this.estimate(predictedState.state, 1);
169
+ // Q(s, a) = r + γV(s')
170
+ // Reward is implicit in value difference
171
+ const qValue = nextValue.value;
172
+ // Advantage A(s, a) = Q(s, a) - V(s)
173
+ const advantage = qValue - currentValue.value;
174
+ // Uncertainty propagates
175
+ const qUncertainty = Math.sqrt(currentValue.valueUncertainty ** 2 + nextValue.valueUncertainty ** 2);
176
+ return {
177
+ action,
178
+ qValue,
179
+ advantage,
180
+ qUncertainty,
181
+ predictedState,
182
+ };
183
+ }
184
+ /**
185
+ * Rank actions by Q-value
186
+ */
187
+ rankActions(actionValues) {
188
+ // Risk-adjusted ranking
189
+ return [...actionValues].sort((a, b) => {
190
+ const aRisk = a.qValue - this.config.riskAversion * a.qUncertainty;
191
+ const bRisk = b.qValue - this.config.riskAversion * b.qUncertainty;
192
+ return bRisk - aRisk; // Descending
193
+ });
194
+ }
195
+ /**
196
+ * Sample action using softmax over Q-values
197
+ */
198
+ sampleAction(actionValues) {
199
+ const temp = this.config.temperature;
200
+ // Compute softmax probabilities
201
+ const qValues = actionValues.map(av => av.qValue / temp);
202
+ const maxQ = Math.max(...qValues);
203
+ const expQ = qValues.map(q => Math.exp(q - maxQ));
204
+ const sumExpQ = expQ.reduce((a, b) => a + b, 0);
205
+ const probs = expQ.map(e => e / sumExpQ);
206
+ // Sample
207
+ const r = Math.random();
208
+ let cumProb = 0;
209
+ for (let i = 0; i < probs.length; i++) {
210
+ cumProb += probs[i];
211
+ if (r < cumProb) {
212
+ return actionValues[i];
213
+ }
214
+ }
215
+ return actionValues[actionValues.length - 1];
216
+ }
217
+ // ============================================================================
218
+ // Trajectory Valuation
219
+ // ============================================================================
220
+ /**
221
+ * Evaluate a trajectory with cumulative value
222
+ */
223
+ evaluateTrajectory(trajectory) {
224
+ const stepValues = [];
225
+ let totalValue = 0;
226
+ let sumSquared = 0;
227
+ let minValue = Infinity;
228
+ let maxValue = -Infinity;
229
+ // Evaluate each step (trajectory.states contains PredictedState, need .state for LatentState)
230
+ for (let t = 0; t < trajectory.states.length; t++) {
231
+ const value = this.estimate(trajectory.states[t].state, t);
232
+ stepValues.push(value);
233
+ totalValue += value.value;
234
+ sumSquared += value.value ** 2;
235
+ minValue = Math.min(minValue, value.value);
236
+ maxValue = Math.max(maxValue, value.value);
237
+ }
238
+ const n = trajectory.states.length;
239
+ const expectedValue = totalValue / Math.max(1, n);
240
+ const variance = (sumSquared / Math.max(1, n)) - (expectedValue ** 2);
241
+ const valueVariance = Math.max(0, variance);
242
+ // Risk-adjusted value
243
+ const riskAdjustedValue = expectedValue - this.config.riskAversion * Math.sqrt(valueVariance);
244
+ return {
245
+ ...trajectory,
246
+ totalValue,
247
+ expectedValue,
248
+ valueVariance,
249
+ stepValues,
250
+ minValue: minValue === Infinity ? 0 : minValue,
251
+ maxValue: maxValue === -Infinity ? 0 : maxValue,
252
+ riskAdjustedValue,
253
+ };
254
+ }
255
+ /**
256
+ * Select best trajectory from candidates
257
+ */
258
+ selectBestTrajectory(trajectories) {
259
+ const valued = trajectories.map(t => this.evaluateTrajectory(t));
260
+ // Sort by risk-adjusted value
261
+ valued.sort((a, b) => b.riskAdjustedValue - a.riskAdjustedValue);
262
+ return valued[0];
263
+ }
264
+ // ============================================================================
265
+ // Active Inference Integration
266
+ // ============================================================================
267
+ /**
268
+ * Compute Expected Free Energy for Active Inference
269
+ *
270
+ * G(π) = ambiguity + risk - pragmatic_value - epistemic_value + instrumental_value
271
+ */
272
+ computeExpectedFreeEnergy(currentState, policy, predictedStates, preferences // Preferred/goal state
273
+ ) {
274
+ // 1. Ambiguity: Expected uncertainty about observations
275
+ const ambiguity = this.computeAmbiguity(predictedStates);
276
+ // 2. Risk: KL divergence from preferences
277
+ const risk = this.computeRisk(predictedStates, preferences);
278
+ // 3. Pragmatic Value: Expected utility given preferences
279
+ const pragmaticValue = this.computePragmaticValue(predictedStates, preferences);
280
+ // 4. Epistemic Value: Information gain
281
+ const epistemicValue = this.computeEpistemicValue(currentState, predictedStates);
282
+ // 5. Instrumental Value: V(s) contribution
283
+ const instrumentalValue = this.computeInstrumentalValue(predictedStates);
284
+ // Expected Free Energy (to be minimized)
285
+ // Lower is better: ambiguity and risk are bad, values are good
286
+ const expectedFreeEnergy = ambiguity +
287
+ risk -
288
+ pragmaticValue -
289
+ epistemicValue -
290
+ instrumentalValue;
291
+ return {
292
+ expectedFreeEnergy,
293
+ ambiguity,
294
+ risk,
295
+ pragmaticValue,
296
+ epistemicValue,
297
+ instrumentalValue,
298
+ };
299
+ }
300
+ /**
301
+ * Ambiguity: Expected observation uncertainty
302
+ */
303
+ computeAmbiguity(predictedStates) {
304
+ let totalAmbiguity = 0;
305
+ for (let t = 0; t < predictedStates.length; t++) {
306
+ const discount = Math.pow(this.config.gamma, t);
307
+ // Use PredictedState.uncertainty (epistemic) or fallback to 1-confidence of the latent state
308
+ const uncertainty = predictedStates[t].uncertainty ?? (1 - (predictedStates[t].state.confidence ?? 0.5));
309
+ totalAmbiguity += discount * uncertainty;
310
+ }
311
+ return totalAmbiguity;
312
+ }
313
+ /**
314
+ * Risk: Divergence from preferred states
315
+ */
316
+ computeRisk(predictedStates, preferences) {
317
+ let totalRisk = 0;
318
+ for (let t = 0; t < predictedStates.length; t++) {
319
+ const discount = Math.pow(this.config.gamma, t);
320
+ const distance = this.stateDistance(predictedStates[t].state, preferences);
321
+ totalRisk += discount * distance;
322
+ }
323
+ return totalRisk;
324
+ }
325
+ /**
326
+ * Pragmatic Value: Alignment with preferences
327
+ */
328
+ computePragmaticValue(predictedStates, preferences) {
329
+ let totalPragmatic = 0;
330
+ for (let t = 0; t < predictedStates.length; t++) {
331
+ const discount = Math.pow(this.config.gamma, t);
332
+ const similarity = 1 - this.stateDistance(predictedStates[t].state, preferences);
333
+ totalPragmatic += discount * similarity;
334
+ }
335
+ return totalPragmatic;
336
+ }
337
+ /**
338
+ * Epistemic Value: Information gain from exploration
339
+ */
340
+ computeEpistemicValue(currentState, predictedStates) {
341
+ // Information gain = reduction in uncertainty (using 1 - confidence as uncertainty proxy)
342
+ const currentUncertainty = 1 - (currentState.confidence ?? 0.5);
343
+ let totalEpistemic = 0;
344
+ for (let t = 0; t < predictedStates.length; t++) {
345
+ const discount = Math.pow(this.config.gamma, t);
346
+ const predictedUncertainty = predictedStates[t].uncertainty ?? (1 - (predictedStates[t].state.confidence ?? 0.5));
347
+ // Positive if we expect to reduce uncertainty
348
+ const infoGain = Math.max(0, currentUncertainty - predictedUncertainty);
349
+ totalEpistemic += discount * infoGain * this.config.curiosityBonus;
350
+ }
351
+ return totalEpistemic;
352
+ }
353
+ /**
354
+ * Instrumental Value: V(s) contribution
355
+ */
356
+ computeInstrumentalValue(predictedStates) {
357
+ let totalInstrumental = 0;
358
+ for (let t = 0; t < predictedStates.length; t++) {
359
+ const value = this.estimate(predictedStates[t].state, t);
360
+ totalInstrumental += value.value;
361
+ }
362
+ return totalInstrumental;
363
+ }
364
+ /**
365
+ * Distance between two latent states
366
+ */
367
+ stateDistance(a, b) {
368
+ // Cosine distance
369
+ const dot = this.dotProduct(a.vector, b.vector);
370
+ const normA = Math.sqrt(this.dotProduct(a.vector, a.vector));
371
+ const normB = Math.sqrt(this.dotProduct(b.vector, b.vector));
372
+ const cosSim = dot / (normA * normB + 1e-8);
373
+ return (1 - cosSim) / 2; // Normalize to [0, 1]
374
+ }
375
+ // ============================================================================
376
+ // Learning (Simplified)
377
+ // ============================================================================
378
+ /**
379
+ * Update value function from observed returns
380
+ */
381
+ update(state, observedReturn, learningRate = 0.01) {
382
+ const predicted = this.estimate(state);
383
+ const error = observedReturn - predicted.value;
384
+ // Simple gradient update on basis vectors
385
+ // In production: backprop through neural network
386
+ for (let i = 0; i < Math.min(16, state.vector.length); i++) {
387
+ this.survivalBasis[i] += learningRate * error * state.vector[i] * this.config.weights.survival;
388
+ this.progressBasis[i] += learningRate * error * state.vector[i] * this.config.weights.progress;
389
+ }
390
+ // Update statistics
391
+ this.updateStats(observedReturn);
392
+ }
393
+ /**
394
+ * Update running statistics for normalization
395
+ */
396
+ updateStats(value) {
397
+ this.valueStats.count++;
398
+ const n = this.valueStats.count;
399
+ // Running mean and variance (Welford's algorithm)
400
+ const delta = value - this.valueStats.mean;
401
+ this.valueStats.mean += delta / n;
402
+ if (n > 1) {
403
+ const delta2 = value - this.valueStats.mean;
404
+ const newVar = ((n - 2) * this.valueStats.std ** 2 + delta * delta2) / (n - 1);
405
+ this.valueStats.std = Math.sqrt(Math.max(0, newVar));
406
+ }
407
+ this.valueStats.min = Math.min(this.valueStats.min, value);
408
+ this.valueStats.max = Math.max(this.valueStats.max, value);
409
+ }
410
+ // ============================================================================
411
+ // Utilities
412
+ // ============================================================================
413
+ dotProduct(a, b) {
414
+ let sum = 0;
415
+ const len = Math.min(a.length, b.length);
416
+ for (let i = 0; i < len; i++) {
417
+ sum += a[i] * b[i];
418
+ }
419
+ return sum;
420
+ }
421
+ sigmoid(x) {
422
+ return 1 / (1 + Math.exp(-x));
423
+ }
424
+ clamp(x, min, max) {
425
+ return Math.max(min, Math.min(max, x));
426
+ }
427
+ // ============================================================================
428
+ // Getters
429
+ // ============================================================================
430
+ getConfig() {
431
+ return { ...this.config };
432
+ }
433
+ getStats() {
434
+ return { ...this.valueStats };
435
+ }
436
+ /**
437
+ * Set preference weights
438
+ */
439
+ setWeights(weights) {
440
+ this.config.weights = { ...this.config.weights, ...weights };
441
+ // Normalize weights
442
+ const sum = Object.values(this.config.weights).reduce((a, b) => a + b, 0);
443
+ for (const key of Object.keys(this.config.weights)) {
444
+ this.config.weights[key] /= sum;
445
+ }
446
+ }
447
+ }
448
+ exports.ValueFunction = ValueFunction;
449
+ // ============================================================================
450
+ // Factory
451
+ // ============================================================================
452
+ function createValueFunction(config) {
453
+ return new ValueFunction(config);
454
+ }
455
+ // Singleton
456
+ let valueFunctionInstance = null;
457
+ function getValueFunction(config) {
458
+ if (!valueFunctionInstance) {
459
+ valueFunctionInstance = createValueFunction(config);
460
+ }
461
+ return valueFunctionInstance;
462
+ }
463
+ function resetValueFunction() {
464
+ valueFunctionInstance = null;
465
+ }
466
+ /**
467
+ * Value-Guided JEPA Predictor
468
+ *
469
+ * Wraps the WorldModelPredictor with value-guided prediction
470
+ */
471
+ class ValueGuidedJEPA {
472
+ predictor;
473
+ valueFunction;
474
+ constructor(predictor, valueFn) {
475
+ this.predictor = predictor;
476
+ this.valueFunction = valueFn ?? createValueFunction();
477
+ }
478
+ /**
479
+ * Predict next state and compute value
480
+ */
481
+ async predictWithValue(state, action) {
482
+ const predicted = await this.predictor.predict(state, action);
483
+ const value = this.valueFunction.estimate(predicted.state, 1);
484
+ return {
485
+ ...predicted,
486
+ value,
487
+ };
488
+ }
489
+ /**
490
+ * Generate value-guided trajectory
491
+ *
492
+ * Uses rollout with value-based action selection
493
+ */
494
+ async planWithValue(initialState, horizon, candidateActions, // Actions to consider at each step
495
+ preferences) {
496
+ const predictedStates = [];
497
+ const actions = [];
498
+ const stepValues = [this.valueFunction.estimate(initialState, 0)];
499
+ let currentState = initialState;
500
+ for (let t = 0; t < horizon && t < candidateActions.length; t++) {
501
+ const candidates = candidateActions[t];
502
+ // Evaluate each action
503
+ const actionValues = [];
504
+ for (const action of candidates) {
505
+ const predicted = await this.predictor.predict(currentState, action);
506
+ const av = this.valueFunction.computeQValue(currentState, action, predicted);
507
+ actionValues.push(av);
508
+ }
509
+ // Select best action
510
+ const ranked = this.valueFunction.rankActions(actionValues);
511
+ const best = ranked[0];
512
+ // Advance state
513
+ actions.push(best.action);
514
+ predictedStates.push(best.predictedState);
515
+ currentState = best.predictedState.state;
516
+ stepValues.push(this.valueFunction.estimate(currentState, t + 1));
517
+ }
518
+ // Build trajectory (matching the actual Trajectory interface)
519
+ const trajectory = {
520
+ id: `traj-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`,
521
+ initialState,
522
+ states: predictedStates,
523
+ actions,
524
+ totalProbability: predictedStates.reduce((p, s) => p * s.probability, 1),
525
+ horizon: predictedStates.length,
526
+ simulationTime: 0,
527
+ };
528
+ // Evaluate full trajectory
529
+ const valued = this.valueFunction.evaluateTrajectory(trajectory);
530
+ // Add step values
531
+ return {
532
+ ...valued,
533
+ stepValues,
534
+ };
535
+ }
536
+ /**
537
+ * Select action via Expected Free Energy minimization
538
+ *
539
+ * This is the core Active Inference decision rule
540
+ */
541
+ async selectActionActiveInference(currentState, candidateActions, preferences, horizon = 3) {
542
+ let bestAction = candidateActions[0];
543
+ let bestFE = {
544
+ expectedFreeEnergy: Infinity,
545
+ ambiguity: 0,
546
+ risk: 0,
547
+ pragmaticValue: 0,
548
+ epistemicValue: 0,
549
+ instrumentalValue: 0,
550
+ };
551
+ for (const action of candidateActions) {
552
+ // Simulate trajectory under this action
553
+ const predictedStates = [];
554
+ let state = currentState;
555
+ for (let t = 0; t < horizon; t++) {
556
+ const predicted = await this.predictor.predict(state, action);
557
+ predictedStates.push(predicted);
558
+ state = predicted.state;
559
+ }
560
+ // Compute Expected Free Energy
561
+ const fe = this.valueFunction.computeExpectedFreeEnergy(currentState, [action], predictedStates, preferences);
562
+ // Lower is better
563
+ if (fe.expectedFreeEnergy < bestFE.expectedFreeEnergy) {
564
+ bestFE = fe;
565
+ bestAction = action;
566
+ }
567
+ }
568
+ return { action: bestAction, freeEnergy: bestFE };
569
+ }
570
+ /**
571
+ * Dream-time consolidation with value-based prioritization
572
+ *
573
+ * Memories with higher value prediction error are replayed more
574
+ */
575
+ async dreamConsolidate(memories, epochs = 10) {
576
+ let totalReplays = 0;
577
+ let totalError = 0;
578
+ for (let epoch = 0; epoch < epochs; epoch++) {
579
+ // Prioritize by value prediction error
580
+ const prioritized = memories.map((m) => {
581
+ const predictedValue = this.valueFunction.estimate(m.nextState, 1);
582
+ const error = Math.abs(m.reward - predictedValue.value);
583
+ return { memory: m, priority: error };
584
+ });
585
+ // Sort by priority (higher error = more replay)
586
+ prioritized.sort((a, b) => b.priority - a.priority);
587
+ // Replay top memories
588
+ const replayCount = Math.ceil(memories.length * 0.3); // Replay top 30%
589
+ for (let i = 0; i < replayCount; i++) {
590
+ const { memory, priority } = prioritized[i];
591
+ // Update value function
592
+ this.valueFunction.update(memory.nextState, memory.reward, 0.01);
593
+ totalReplays++;
594
+ totalError += priority;
595
+ }
596
+ }
597
+ return {
598
+ replayCount: totalReplays,
599
+ avgError: totalError / Math.max(1, totalReplays),
600
+ };
601
+ }
602
+ // ============================================================================
603
+ // Getters
604
+ // ============================================================================
605
+ getValueFunction() {
606
+ return this.valueFunction;
607
+ }
608
+ getPredictor() {
609
+ return this.predictor;
610
+ }
611
+ }
612
+ exports.ValueGuidedJEPA = ValueGuidedJEPA;
613
+ // ============================================================================
614
+ // Factory for Value-Guided JEPA
615
+ // ============================================================================
616
+ async function createValueGuidedJEPA(valueFnConfig) {
617
+ // Import predictor dynamically to avoid circular deps
618
+ const { createWorldModelPredictor } = await import('./predictor.js');
619
+ const predictor = createWorldModelPredictor();
620
+ const valueFn = createValueFunction(valueFnConfig);
621
+ return new ValueGuidedJEPA(predictor, valueFn);
622
+ }
@@ -0,0 +1,11 @@
1
+ /**
2
+ * Tests for Brain Module (Phase 10: Neural Integration Layer)
3
+ *
4
+ * Tests the Cognitive Integration Layer:
5
+ * - Command routing (Supervisor pattern)
6
+ * - State management
7
+ * - Module transitions
8
+ * - Metrics tracking
9
+ * - Event system
10
+ */
11
+ export {};