genesis-ai-cli 7.4.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (227) hide show
  1. package/.env.example +78 -0
  2. package/README.md +282 -0
  3. package/dist/src/active-inference/actions.d.ts +75 -0
  4. package/dist/src/active-inference/actions.js +250 -0
  5. package/dist/src/active-inference/autonomous-loop.d.ts +103 -0
  6. package/dist/src/active-inference/autonomous-loop.js +289 -0
  7. package/dist/src/active-inference/core.d.ts +85 -0
  8. package/dist/src/active-inference/core.js +555 -0
  9. package/dist/src/active-inference/demo-autonomous-loop.d.ts +8 -0
  10. package/dist/src/active-inference/demo-autonomous-loop.js +338 -0
  11. package/dist/src/active-inference/demo-value-integration.d.ts +8 -0
  12. package/dist/src/active-inference/demo-value-integration.js +174 -0
  13. package/dist/src/active-inference/index.d.ts +32 -0
  14. package/dist/src/active-inference/index.js +88 -0
  15. package/dist/src/active-inference/integration.d.ts +114 -0
  16. package/dist/src/active-inference/integration.js +698 -0
  17. package/dist/src/active-inference/memory-integration.d.ts +51 -0
  18. package/dist/src/active-inference/memory-integration.js +232 -0
  19. package/dist/src/active-inference/observations.d.ts +67 -0
  20. package/dist/src/active-inference/observations.js +147 -0
  21. package/dist/src/active-inference/test-active-inference.d.ts +8 -0
  22. package/dist/src/active-inference/test-active-inference.js +320 -0
  23. package/dist/src/active-inference/test-value-integration.d.ts +6 -0
  24. package/dist/src/active-inference/test-value-integration.js +168 -0
  25. package/dist/src/active-inference/types.d.ts +150 -0
  26. package/dist/src/active-inference/types.js +59 -0
  27. package/dist/src/active-inference/value-integration.d.ts +164 -0
  28. package/dist/src/active-inference/value-integration.js +459 -0
  29. package/dist/src/agents/base-agent.d.ts +53 -0
  30. package/dist/src/agents/base-agent.js +178 -0
  31. package/dist/src/agents/builder.d.ts +67 -0
  32. package/dist/src/agents/builder.js +537 -0
  33. package/dist/src/agents/critic.d.ts +35 -0
  34. package/dist/src/agents/critic.js +322 -0
  35. package/dist/src/agents/ethicist.d.ts +54 -0
  36. package/dist/src/agents/ethicist.js +393 -0
  37. package/dist/src/agents/explorer.d.ts +26 -0
  38. package/dist/src/agents/explorer.js +216 -0
  39. package/dist/src/agents/feeling.d.ts +41 -0
  40. package/dist/src/agents/feeling.js +320 -0
  41. package/dist/src/agents/index.d.ts +111 -0
  42. package/dist/src/agents/index.js +222 -0
  43. package/dist/src/agents/memory.d.ts +69 -0
  44. package/dist/src/agents/memory.js +404 -0
  45. package/dist/src/agents/message-bus.d.ts +88 -0
  46. package/dist/src/agents/message-bus.js +267 -0
  47. package/dist/src/agents/narrator.d.ts +90 -0
  48. package/dist/src/agents/narrator.js +473 -0
  49. package/dist/src/agents/planner.d.ts +38 -0
  50. package/dist/src/agents/planner.js +341 -0
  51. package/dist/src/agents/predictor.d.ts +73 -0
  52. package/dist/src/agents/predictor.js +506 -0
  53. package/dist/src/agents/sensor.d.ts +88 -0
  54. package/dist/src/agents/sensor.js +377 -0
  55. package/dist/src/agents/test-agents.d.ts +6 -0
  56. package/dist/src/agents/test-agents.js +73 -0
  57. package/dist/src/agents/types.d.ts +194 -0
  58. package/dist/src/agents/types.js +7 -0
  59. package/dist/src/brain/index.d.ts +185 -0
  60. package/dist/src/brain/index.js +843 -0
  61. package/dist/src/brain/trace.d.ts +91 -0
  62. package/dist/src/brain/trace.js +327 -0
  63. package/dist/src/brain/types.d.ts +165 -0
  64. package/dist/src/brain/types.js +51 -0
  65. package/dist/src/cli/chat.d.ts +237 -0
  66. package/dist/src/cli/chat.js +1959 -0
  67. package/dist/src/cli/dispatcher.d.ts +182 -0
  68. package/dist/src/cli/dispatcher.js +718 -0
  69. package/dist/src/cli/human-loop.d.ts +170 -0
  70. package/dist/src/cli/human-loop.js +543 -0
  71. package/dist/src/cli/index.d.ts +12 -0
  72. package/dist/src/cli/index.js +28 -0
  73. package/dist/src/cli/interactive.d.ts +141 -0
  74. package/dist/src/cli/interactive.js +757 -0
  75. package/dist/src/cli/ui.d.ts +205 -0
  76. package/dist/src/cli/ui.js +632 -0
  77. package/dist/src/consciousness/attention-schema.d.ts +154 -0
  78. package/dist/src/consciousness/attention-schema.js +432 -0
  79. package/dist/src/consciousness/global-workspace.d.ts +149 -0
  80. package/dist/src/consciousness/global-workspace.js +422 -0
  81. package/dist/src/consciousness/index.d.ts +186 -0
  82. package/dist/src/consciousness/index.js +476 -0
  83. package/dist/src/consciousness/phi-calculator.d.ts +119 -0
  84. package/dist/src/consciousness/phi-calculator.js +445 -0
  85. package/dist/src/consciousness/phi-decisions.d.ts +169 -0
  86. package/dist/src/consciousness/phi-decisions.js +383 -0
  87. package/dist/src/consciousness/phi-monitor.d.ts +153 -0
  88. package/dist/src/consciousness/phi-monitor.js +465 -0
  89. package/dist/src/consciousness/types.d.ts +260 -0
  90. package/dist/src/consciousness/types.js +44 -0
  91. package/dist/src/daemon/dream-mode.d.ts +115 -0
  92. package/dist/src/daemon/dream-mode.js +470 -0
  93. package/dist/src/daemon/index.d.ts +162 -0
  94. package/dist/src/daemon/index.js +542 -0
  95. package/dist/src/daemon/maintenance.d.ts +139 -0
  96. package/dist/src/daemon/maintenance.js +549 -0
  97. package/dist/src/daemon/process.d.ts +82 -0
  98. package/dist/src/daemon/process.js +442 -0
  99. package/dist/src/daemon/scheduler.d.ts +90 -0
  100. package/dist/src/daemon/scheduler.js +494 -0
  101. package/dist/src/daemon/types.d.ts +213 -0
  102. package/dist/src/daemon/types.js +50 -0
  103. package/dist/src/epistemic/index.d.ts +74 -0
  104. package/dist/src/epistemic/index.js +225 -0
  105. package/dist/src/grounding/epistemic-stack.d.ts +100 -0
  106. package/dist/src/grounding/epistemic-stack.js +408 -0
  107. package/dist/src/grounding/feedback.d.ts +98 -0
  108. package/dist/src/grounding/feedback.js +276 -0
  109. package/dist/src/grounding/index.d.ts +123 -0
  110. package/dist/src/grounding/index.js +224 -0
  111. package/dist/src/grounding/verifier.d.ts +149 -0
  112. package/dist/src/grounding/verifier.js +484 -0
  113. package/dist/src/healing/detector.d.ts +110 -0
  114. package/dist/src/healing/detector.js +436 -0
  115. package/dist/src/healing/fixer.d.ts +138 -0
  116. package/dist/src/healing/fixer.js +572 -0
  117. package/dist/src/healing/index.d.ts +23 -0
  118. package/dist/src/healing/index.js +43 -0
  119. package/dist/src/hooks/index.d.ts +135 -0
  120. package/dist/src/hooks/index.js +317 -0
  121. package/dist/src/index.d.ts +23 -0
  122. package/dist/src/index.js +1266 -0
  123. package/dist/src/kernel/index.d.ts +155 -0
  124. package/dist/src/kernel/index.js +795 -0
  125. package/dist/src/kernel/invariants.d.ts +153 -0
  126. package/dist/src/kernel/invariants.js +355 -0
  127. package/dist/src/kernel/test-kernel.d.ts +6 -0
  128. package/dist/src/kernel/test-kernel.js +108 -0
  129. package/dist/src/kernel/test-real-mcp.d.ts +10 -0
  130. package/dist/src/kernel/test-real-mcp.js +295 -0
  131. package/dist/src/llm/index.d.ts +146 -0
  132. package/dist/src/llm/index.js +428 -0
  133. package/dist/src/llm/router.d.ts +136 -0
  134. package/dist/src/llm/router.js +510 -0
  135. package/dist/src/mcp/index.d.ts +85 -0
  136. package/dist/src/mcp/index.js +657 -0
  137. package/dist/src/mcp/resilient.d.ts +139 -0
  138. package/dist/src/mcp/resilient.js +417 -0
  139. package/dist/src/memory/cache.d.ts +118 -0
  140. package/dist/src/memory/cache.js +356 -0
  141. package/dist/src/memory/cognitive-workspace.d.ts +231 -0
  142. package/dist/src/memory/cognitive-workspace.js +521 -0
  143. package/dist/src/memory/consolidation.d.ts +99 -0
  144. package/dist/src/memory/consolidation.js +443 -0
  145. package/dist/src/memory/episodic.d.ts +114 -0
  146. package/dist/src/memory/episodic.js +394 -0
  147. package/dist/src/memory/forgetting.d.ts +134 -0
  148. package/dist/src/memory/forgetting.js +324 -0
  149. package/dist/src/memory/index.d.ts +211 -0
  150. package/dist/src/memory/index.js +367 -0
  151. package/dist/src/memory/indexer.d.ts +123 -0
  152. package/dist/src/memory/indexer.js +479 -0
  153. package/dist/src/memory/procedural.d.ts +136 -0
  154. package/dist/src/memory/procedural.js +479 -0
  155. package/dist/src/memory/semantic.d.ts +132 -0
  156. package/dist/src/memory/semantic.js +497 -0
  157. package/dist/src/memory/types.d.ts +193 -0
  158. package/dist/src/memory/types.js +15 -0
  159. package/dist/src/orchestrator.d.ts +65 -0
  160. package/dist/src/orchestrator.js +317 -0
  161. package/dist/src/persistence/index.d.ts +257 -0
  162. package/dist/src/persistence/index.js +763 -0
  163. package/dist/src/pipeline/executor.d.ts +51 -0
  164. package/dist/src/pipeline/executor.js +695 -0
  165. package/dist/src/pipeline/index.d.ts +7 -0
  166. package/dist/src/pipeline/index.js +11 -0
  167. package/dist/src/self-production.d.ts +67 -0
  168. package/dist/src/self-production.js +205 -0
  169. package/dist/src/subagents/executor.d.ts +58 -0
  170. package/dist/src/subagents/executor.js +283 -0
  171. package/dist/src/subagents/index.d.ts +37 -0
  172. package/dist/src/subagents/index.js +53 -0
  173. package/dist/src/subagents/registry.d.ts +23 -0
  174. package/dist/src/subagents/registry.js +167 -0
  175. package/dist/src/subagents/types.d.ts +79 -0
  176. package/dist/src/subagents/types.js +14 -0
  177. package/dist/src/tools/bash.d.ts +139 -0
  178. package/dist/src/tools/bash.js +583 -0
  179. package/dist/src/tools/edit.d.ts +125 -0
  180. package/dist/src/tools/edit.js +424 -0
  181. package/dist/src/tools/git.d.ts +179 -0
  182. package/dist/src/tools/git.js +504 -0
  183. package/dist/src/tools/index.d.ts +21 -0
  184. package/dist/src/tools/index.js +163 -0
  185. package/dist/src/types.d.ts +145 -0
  186. package/dist/src/types.js +7 -0
  187. package/dist/src/world-model/decoder.d.ts +163 -0
  188. package/dist/src/world-model/decoder.js +517 -0
  189. package/dist/src/world-model/digital-twin.d.ts +219 -0
  190. package/dist/src/world-model/digital-twin.js +695 -0
  191. package/dist/src/world-model/encoder.d.ts +141 -0
  192. package/dist/src/world-model/encoder.js +564 -0
  193. package/dist/src/world-model/index.d.ts +221 -0
  194. package/dist/src/world-model/index.js +772 -0
  195. package/dist/src/world-model/predictor.d.ts +161 -0
  196. package/dist/src/world-model/predictor.js +681 -0
  197. package/dist/src/world-model/test-value-jepa.d.ts +8 -0
  198. package/dist/src/world-model/test-value-jepa.js +430 -0
  199. package/dist/src/world-model/types.d.ts +341 -0
  200. package/dist/src/world-model/types.js +69 -0
  201. package/dist/src/world-model/value-jepa.d.ts +247 -0
  202. package/dist/src/world-model/value-jepa.js +622 -0
  203. package/dist/test/brain.test.d.ts +11 -0
  204. package/dist/test/brain.test.js +358 -0
  205. package/dist/test/cli/dispatcher.test.d.ts +4 -0
  206. package/dist/test/cli/dispatcher.test.js +332 -0
  207. package/dist/test/cli/human-loop.test.d.ts +4 -0
  208. package/dist/test/cli/human-loop.test.js +270 -0
  209. package/dist/test/grounding/feedback.test.d.ts +4 -0
  210. package/dist/test/grounding/feedback.test.js +462 -0
  211. package/dist/test/grounding/verifier.test.d.ts +4 -0
  212. package/dist/test/grounding/verifier.test.js +442 -0
  213. package/dist/test/grounding.test.d.ts +6 -0
  214. package/dist/test/grounding.test.js +246 -0
  215. package/dist/test/healing/detector.test.d.ts +4 -0
  216. package/dist/test/healing/detector.test.js +266 -0
  217. package/dist/test/healing/fixer.test.d.ts +4 -0
  218. package/dist/test/healing/fixer.test.js +369 -0
  219. package/dist/test/integration.test.d.ts +5 -0
  220. package/dist/test/integration.test.js +290 -0
  221. package/dist/test/tools/bash.test.d.ts +4 -0
  222. package/dist/test/tools/bash.test.js +348 -0
  223. package/dist/test/tools/edit.test.d.ts +4 -0
  224. package/dist/test/tools/edit.test.js +350 -0
  225. package/dist/test/tools/git.test.d.ts +4 -0
  226. package/dist/test/tools/git.test.js +350 -0
  227. package/package.json +60 -0
@@ -0,0 +1,59 @@
1
+ "use strict";
2
+ /**
3
+ * Genesis 6.1 - Active Inference Types
4
+ *
5
+ * Based on pymdp and Free Energy Principle (Friston)
6
+ *
7
+ * Core concepts:
8
+ * - Hidden states: What the system believes about the world
9
+ * - Observations: What the system perceives
10
+ * - Actions: What the system can do
11
+ * - Preferences: What the system wants (C matrix)
12
+ */
13
+ Object.defineProperty(exports, "__esModule", { value: true });
14
+ exports.DEFAULT_CONFIG = exports.ACTION_COUNT = exports.ACTIONS = exports.OBSERVATION_LABELS = exports.OBSERVATION_DIMS = exports.HIDDEN_STATE_LABELS = exports.HIDDEN_STATE_DIMS = void 0;
15
+ exports.HIDDEN_STATE_DIMS = {
16
+ viability: 5,
17
+ worldState: 4,
18
+ coupling: 5,
19
+ goalProgress: 4,
20
+ };
21
+ exports.HIDDEN_STATE_LABELS = {
22
+ viability: ['critical', 'low', 'medium', 'high', 'optimal'],
23
+ worldState: ['unknown', 'stable', 'changing', 'hostile'],
24
+ coupling: ['none', 'weak', 'medium', 'strong', 'synced'],
25
+ goalProgress: ['blocked', 'slow', 'onTrack', 'achieved'],
26
+ };
27
+ exports.OBSERVATION_DIMS = {
28
+ energy: 5,
29
+ phi: 4,
30
+ tool: 3,
31
+ coherence: 3,
32
+ task: 4,
33
+ };
34
+ exports.OBSERVATION_LABELS = {
35
+ energy: ['depleted', 'low', 'medium', 'high', 'full'],
36
+ phi: ['dormant', 'low', 'medium', 'high'],
37
+ tool: ['failed', 'partial', 'success'],
38
+ coherence: ['broken', 'degraded', 'consistent'],
39
+ task: ['none', 'pending', 'active', 'completed'],
40
+ };
41
+ exports.ACTIONS = [
42
+ 'sense.mcp',
43
+ 'recall.memory',
44
+ 'plan.goals',
45
+ 'verify.ethics',
46
+ 'execute.task',
47
+ 'dream.cycle',
48
+ 'rest.idle',
49
+ 'recharge',
50
+ ];
51
+ exports.ACTION_COUNT = exports.ACTIONS.length;
52
+ exports.DEFAULT_CONFIG = {
53
+ inferenceIterations: 16,
54
+ policyHorizon: 3,
55
+ actionTemperature: 1.0,
56
+ priorWeight: 0.1,
57
+ learningRateA: 0.01,
58
+ learningRateB: 0.01,
59
+ };
@@ -0,0 +1,164 @@
1
+ /**
2
+ * Genesis 6.2 - Value-Guided Active Inference Integration
3
+ *
4
+ * Connects the Value-Guided JEPA world model to the Active Inference engine.
5
+ *
6
+ * This integration enables:
7
+ * 1. Value-augmented policy selection (EFE + V(s))
8
+ * 2. World model predictions for trajectory simulation
9
+ * 3. Value learning from Action outcomes
10
+ * 4. Hybrid discrete-continuous inference
11
+ *
12
+ * Architecture:
13
+ * ```
14
+ * Observations → Active Inference Engine → Discrete Beliefs
15
+ * ↓
16
+ * World Model Encoder → Latent State
17
+ * ↓
18
+ * Value Function → V(s), Q(s,a)
19
+ * ↓
20
+ * Policy = softmax(-EFE + λV)
21
+ * ```
22
+ */
23
+ import { ActiveInferenceEngine } from './core.js';
24
+ import { Observation, Beliefs, Policy, ActionType } from './types.js';
25
+ import { ValueFunction, ValueGuidedJEPA, type ValueEstimate, type FreeEnergyDecomposition, type ValueFunctionConfig } from '../world-model/value-jepa.js';
26
+ import type { LatentState } from '../world-model/types.js';
27
+ export interface ValueIntegrationConfig {
28
+ valueWeight: number;
29
+ useWorldModelPredictions: boolean;
30
+ predictionHorizon: number;
31
+ valueLearningRate: number;
32
+ verbose: boolean;
33
+ valueFunctionConfig: Partial<ValueFunctionConfig>;
34
+ }
35
+ export declare const DEFAULT_VALUE_INTEGRATION_CONFIG: ValueIntegrationConfig;
36
+ export declare class ValueAugmentedEngine {
37
+ private aiEngine;
38
+ private valueFunction;
39
+ private jepa;
40
+ private config;
41
+ private latentState;
42
+ private stats;
43
+ private eventHandlers;
44
+ constructor(aiEngine?: ActiveInferenceEngine, config?: Partial<ValueIntegrationConfig>);
45
+ /**
46
+ * Initialize with JEPA for full world model integration
47
+ */
48
+ initializeJEPA(): Promise<void>;
49
+ /**
50
+ * Full inference cycle with value augmentation
51
+ */
52
+ step(observation: Observation): Promise<{
53
+ action: ActionType;
54
+ beliefs: Beliefs;
55
+ value: ValueEstimate;
56
+ policy: Policy;
57
+ }>;
58
+ /**
59
+ * Compute policy with value augmentation
60
+ *
61
+ * Policy ∝ exp(-EFE + λ * V(s'))
62
+ *
63
+ * Where V(s') is the expected value of the next state under each action.
64
+ */
65
+ computeValueAugmentedPolicy(beliefs: Beliefs): Promise<Policy>;
66
+ /**
67
+ * Compute value augmentation for a specific action
68
+ */
69
+ private computeActionValueAugment;
70
+ /**
71
+ * Heuristic value for actions when JEPA is not available
72
+ */
73
+ private heuristicActionValue;
74
+ /**
75
+ * Convert Active Inference action to World Model action
76
+ */
77
+ private createWorldModelAction;
78
+ /**
79
+ * Convert beliefs to latent state
80
+ */
81
+ private beliefsToLatentState;
82
+ /**
83
+ * Sample action from policy
84
+ */
85
+ private sampleFromPolicy;
86
+ /**
87
+ * Entropy of a probability distribution
88
+ */
89
+ private entropy;
90
+ /**
91
+ * Update value function from observed outcome
92
+ */
93
+ updateFromOutcome(previousLatent: LatentState, action: ActionType, outcome: {
94
+ success: boolean;
95
+ reward?: number;
96
+ newObservation: Observation;
97
+ }): void;
98
+ /**
99
+ * Convert observation to immediate reward signal
100
+ */
101
+ private observationToReward;
102
+ /**
103
+ * Compute full Expected Free Energy using Value-Guided JEPA
104
+ *
105
+ * This replaces the POMDP-based EFE with world model predictions
106
+ */
107
+ computeFullEFE(preferredState?: LatentState): Promise<Map<ActionType, FreeEnergyDecomposition>>;
108
+ /**
109
+ * Create preferred/goal latent state
110
+ */
111
+ private createPreferredState;
112
+ getAIEngine(): ActiveInferenceEngine;
113
+ getValueFunction(): ValueFunction;
114
+ getJEPA(): ValueGuidedJEPA | null;
115
+ getLatentState(): LatentState | null;
116
+ getStats(): {
117
+ averageValue: number;
118
+ valueStats: {
119
+ mean: number;
120
+ std: number;
121
+ min: number;
122
+ max: number;
123
+ count: number;
124
+ };
125
+ aiStats: {
126
+ inferenceCount: number;
127
+ averageSurprise: number;
128
+ actionCounts: {
129
+ [k: string]: number;
130
+ };
131
+ };
132
+ cycleCount: number;
133
+ totalValue: number;
134
+ valueUpdates: number;
135
+ trajectoryPredictions: number;
136
+ };
137
+ getConfig(): ValueIntegrationConfig;
138
+ on(handler: (event: ValueIntegrationEvent) => void): () => void;
139
+ private emit;
140
+ }
141
+ export interface ValueIntegrationEvent {
142
+ type: 'jepa_initialized' | 'step_complete' | 'value_updated' | 'efe_computed';
143
+ timestamp: Date;
144
+ data?: unknown;
145
+ }
146
+ export declare function createValueAugmentedEngine(config?: Partial<ValueIntegrationConfig>): ValueAugmentedEngine;
147
+ /**
148
+ * Create fully integrated engine with JEPA
149
+ */
150
+ export declare function createFullyIntegratedEngine(config?: Partial<ValueIntegrationConfig>): Promise<ValueAugmentedEngine>;
151
+ import { AutonomousLoop, type AutonomousLoopConfig } from './autonomous-loop.js';
152
+ /**
153
+ * Configuration for value-integrated autonomous loop
154
+ */
155
+ export interface ValueIntegratedLoopConfig extends AutonomousLoopConfig {
156
+ valueIntegration: Partial<ValueIntegrationConfig>;
157
+ }
158
+ /**
159
+ * Create an autonomous loop with value-guided decision making
160
+ */
161
+ export declare function createValueIntegratedLoop(config?: Partial<ValueIntegratedLoopConfig>): {
162
+ loop: AutonomousLoop;
163
+ valueEngine: ValueAugmentedEngine;
164
+ };
@@ -0,0 +1,459 @@
1
+ "use strict";
2
+ /**
3
+ * Genesis 6.2 - Value-Guided Active Inference Integration
4
+ *
5
+ * Connects the Value-Guided JEPA world model to the Active Inference engine.
6
+ *
7
+ * This integration enables:
8
+ * 1. Value-augmented policy selection (EFE + V(s))
9
+ * 2. World model predictions for trajectory simulation
10
+ * 3. Value learning from Action outcomes
11
+ * 4. Hybrid discrete-continuous inference
12
+ *
13
+ * Architecture:
14
+ * ```
15
+ * Observations → Active Inference Engine → Discrete Beliefs
16
+ * ↓
17
+ * World Model Encoder → Latent State
18
+ * ↓
19
+ * Value Function → V(s), Q(s,a)
20
+ * ↓
21
+ * Policy = softmax(-EFE + λV)
22
+ * ```
23
+ */
24
+ Object.defineProperty(exports, "__esModule", { value: true });
25
+ exports.ValueAugmentedEngine = exports.DEFAULT_VALUE_INTEGRATION_CONFIG = void 0;
26
+ exports.createValueAugmentedEngine = createValueAugmentedEngine;
27
+ exports.createFullyIntegratedEngine = createFullyIntegratedEngine;
28
+ exports.createValueIntegratedLoop = createValueIntegratedLoop;
29
+ const core_js_1 = require("./core.js");
30
+ const types_js_1 = require("./types.js");
31
+ const value_jepa_js_1 = require("../world-model/value-jepa.js");
32
+ exports.DEFAULT_VALUE_INTEGRATION_CONFIG = {
33
+ valueWeight: 0.5,
34
+ useWorldModelPredictions: true,
35
+ predictionHorizon: 3,
36
+ valueLearningRate: 0.01,
37
+ verbose: false,
38
+ valueFunctionConfig: {},
39
+ };
40
+ /**
41
+ * Mapping from Active Inference action to World Model action
42
+ */
43
+ const AI_TO_WM_ACTION = {
44
+ 'sense.mcp': 'observe',
45
+ 'recall.memory': 'query',
46
+ 'plan.goals': 'query',
47
+ 'verify.ethics': 'query',
48
+ 'execute.task': 'execute',
49
+ 'dream.cycle': 'transform',
50
+ 'rest.idle': 'observe',
51
+ 'recharge': 'transform',
52
+ };
53
+ // ============================================================================
54
+ // Value-Augmented Active Inference Engine
55
+ // ============================================================================
56
+ class ValueAugmentedEngine {
57
+ aiEngine;
58
+ valueFunction;
59
+ jepa = null;
60
+ config;
61
+ // Current latent state (synchronized with beliefs)
62
+ latentState = null;
63
+ // Statistics
64
+ stats = {
65
+ cycleCount: 0,
66
+ totalValue: 0,
67
+ valueUpdates: 0,
68
+ trajectoryPredictions: 0,
69
+ };
70
+ // Event handlers
71
+ eventHandlers = [];
72
+ constructor(aiEngine, config = {}) {
73
+ this.config = { ...exports.DEFAULT_VALUE_INTEGRATION_CONFIG, ...config };
74
+ this.aiEngine = aiEngine ?? (0, core_js_1.createActiveInferenceEngine)();
75
+ this.valueFunction = (0, value_jepa_js_1.createValueFunction)(this.config.valueFunctionConfig);
76
+ }
77
+ /**
78
+ * Initialize with JEPA for full world model integration
79
+ */
80
+ async initializeJEPA() {
81
+ this.jepa = await (0, value_jepa_js_1.createValueGuidedJEPA)(this.config.valueFunctionConfig);
82
+ this.emit({ type: 'jepa_initialized', timestamp: new Date() });
83
+ }
84
+ // ============================================================================
85
+ // Core Integration
86
+ // ============================================================================
87
+ /**
88
+ * Full inference cycle with value augmentation
89
+ */
90
+ async step(observation) {
91
+ this.stats.cycleCount++;
92
+ // 1. Update beliefs via Active Inference
93
+ const beliefs = this.aiEngine.inferStates(observation);
94
+ // 2. Convert beliefs to latent state
95
+ this.latentState = this.beliefsToLatentState(beliefs, observation);
96
+ // 3. Compute value of current state
97
+ const currentValue = this.valueFunction.estimate(this.latentState);
98
+ // 4. Compute value-augmented policy
99
+ const policy = await this.computeValueAugmentedPolicy(beliefs);
100
+ // 5. Sample action
101
+ const action = this.sampleFromPolicy(policy);
102
+ // Track statistics
103
+ this.stats.totalValue += currentValue.value;
104
+ this.emit({
105
+ type: 'step_complete',
106
+ timestamp: new Date(),
107
+ data: { action, beliefs, value: currentValue, policy },
108
+ });
109
+ return { action, beliefs, value: currentValue, policy };
110
+ }
111
+ /**
112
+ * Compute policy with value augmentation
113
+ *
114
+ * Policy ∝ exp(-EFE + λ * V(s'))
115
+ *
116
+ * Where V(s') is the expected value of the next state under each action.
117
+ */
118
+ async computeValueAugmentedPolicy(beliefs) {
119
+ // Get base EFE policy from Active Inference engine
120
+ const basePolicy = this.aiEngine.inferPolicies();
121
+ if (!this.latentState) {
122
+ return basePolicy; // No value augmentation without latent state
123
+ }
124
+ // Compute value augmentation for each action
125
+ const valueAugments = [];
126
+ for (let a = 0; a < types_js_1.ACTION_COUNT; a++) {
127
+ const actionType = types_js_1.ACTIONS[a];
128
+ const augment = await this.computeActionValueAugment(actionType);
129
+ valueAugments[a] = augment;
130
+ }
131
+ // Combine: policy ∝ exp(log(basePolicy) + λ * valueAugment)
132
+ const logPolicy = basePolicy.map((p, i) => Math.log(Math.max(p, 1e-10)) + this.config.valueWeight * valueAugments[i]);
133
+ // Softmax to get final policy
134
+ const maxLogP = Math.max(...logPolicy);
135
+ const expPolicy = logPolicy.map(lp => Math.exp(lp - maxLogP));
136
+ const sumExp = expPolicy.reduce((a, b) => a + b, 0);
137
+ const policy = expPolicy.map(e => e / sumExp);
138
+ if (this.config.verbose) {
139
+ console.log('[Value Integration] Base policy:', basePolicy.map(p => p.toFixed(3)));
140
+ console.log('[Value Integration] Value augments:', valueAugments.map(v => v.toFixed(3)));
141
+ console.log('[Value Integration] Final policy:', policy.map(p => p.toFixed(3)));
142
+ }
143
+ return policy;
144
+ }
145
+ /**
146
+ * Compute value augmentation for a specific action
147
+ */
148
+ async computeActionValueAugment(actionType) {
149
+ if (!this.latentState)
150
+ return 0;
151
+ if (this.jepa && this.config.useWorldModelPredictions) {
152
+ // Use JEPA for trajectory prediction
153
+ const wmAction = this.createWorldModelAction(actionType);
154
+ const predicted = await this.jepa.predictWithValue(this.latentState, wmAction);
155
+ this.stats.trajectoryPredictions++;
156
+ // Return expected value of next state
157
+ return predicted.value.value;
158
+ }
159
+ else {
160
+ // Simple heuristic based on action type
161
+ return this.heuristicActionValue(actionType);
162
+ }
163
+ }
164
+ /**
165
+ * Heuristic value for actions when JEPA is not available
166
+ */
167
+ heuristicActionValue(actionType) {
168
+ const beliefs = this.aiEngine.getBeliefs();
169
+ const state = this.aiEngine.getMostLikelyState();
170
+ // Value based on current state and action appropriateness
171
+ switch (actionType) {
172
+ case 'recharge':
173
+ // High value when energy is low
174
+ return state.viability === 'critical' ? 0.9 :
175
+ state.viability === 'low' ? 0.6 :
176
+ state.viability === 'medium' ? 0.2 : 0;
177
+ case 'rest.idle':
178
+ // Value at attractor (Wu Wei)
179
+ return state.viability === 'optimal' &&
180
+ state.worldState === 'stable' ? 0.8 : 0.1;
181
+ case 'execute.task':
182
+ // High value when ready to execute
183
+ return state.viability === 'optimal' &&
184
+ state.goalProgress !== 'achieved' ? 0.7 : 0.2;
185
+ case 'sense.mcp':
186
+ // Value for sensing when coupling is weak
187
+ return state.coupling === 'none' ? 0.5 :
188
+ state.coupling === 'weak' ? 0.4 : 0.2;
189
+ case 'plan.goals':
190
+ // Value when not blocked but not yet on track
191
+ return state.goalProgress === 'blocked' ? 0.6 :
192
+ state.goalProgress === 'slow' ? 0.5 : 0.2;
193
+ case 'recall.memory':
194
+ return state.worldState === 'unknown' ? 0.5 : 0.2;
195
+ case 'dream.cycle':
196
+ return state.worldState === 'changing' ? 0.4 : 0.1;
197
+ case 'verify.ethics':
198
+ return 0.3; // Always moderately valuable
199
+ default:
200
+ return 0;
201
+ }
202
+ }
203
+ /**
204
+ * Convert Active Inference action to World Model action
205
+ */
206
+ createWorldModelAction(actionType) {
207
+ const wmType = AI_TO_WM_ACTION[actionType] ?? 'observe';
208
+ return {
209
+ id: `ai-${actionType}-${Date.now()}`,
210
+ type: wmType,
211
+ parameters: { sourceAction: actionType },
212
+ agent: 'active-inference',
213
+ timestamp: new Date(),
214
+ };
215
+ }
216
+ /**
217
+ * Convert beliefs to latent state
218
+ */
219
+ beliefsToLatentState(beliefs, observation) {
220
+ // Create a latent vector from beliefs and observations
221
+ // This is a simplified mapping - in practice would use learned encoder
222
+ const vector = [];
223
+ // Encode beliefs (each factor as part of the vector)
224
+ vector.push(...beliefs.viability); // 5 dims
225
+ vector.push(...beliefs.worldState); // 4 dims
226
+ vector.push(...beliefs.coupling); // 5 dims
227
+ vector.push(...beliefs.goalProgress); // 4 dims = 18 dims total
228
+ // Encode observations
229
+ vector.push(observation.energy / 4); // Normalized
230
+ vector.push(observation.phi / 3);
231
+ vector.push(observation.tool / 2);
232
+ vector.push(observation.coherence / 2);
233
+ vector.push(observation.task / 3); // +5 dims = 23 dims
234
+ // Pad to standard latent dimension (64)
235
+ const targetDim = 64;
236
+ while (vector.length < targetDim) {
237
+ // Fill with derived features
238
+ const idx = vector.length;
239
+ if (idx < 30) {
240
+ // Cross-products of beliefs
241
+ const i = idx % beliefs.viability.length;
242
+ const j = idx % beliefs.worldState.length;
243
+ vector.push(beliefs.viability[i] * beliefs.worldState[j]);
244
+ }
245
+ else {
246
+ // Entropy-based features
247
+ vector.push(Math.random() * 0.1); // Small noise
248
+ }
249
+ }
250
+ // Compute confidence from belief certainty
251
+ const beliefEntropy = this.entropy(beliefs.viability) +
252
+ this.entropy(beliefs.worldState) +
253
+ this.entropy(beliefs.coupling) +
254
+ this.entropy(beliefs.goalProgress);
255
+ const maxEntropy = Math.log(5) + Math.log(4) + Math.log(5) + Math.log(4);
256
+ const confidence = 1 - (beliefEntropy / maxEntropy);
257
+ return {
258
+ vector,
259
+ dimensions: targetDim,
260
+ sourceModality: 'state',
261
+ sourceId: `ai-beliefs-${Date.now()}`,
262
+ timestamp: new Date(),
263
+ confidence,
264
+ entropy: beliefEntropy / maxEntropy,
265
+ };
266
+ }
267
+ /**
268
+ * Sample action from policy
269
+ */
270
+ sampleFromPolicy(policy) {
271
+ const r = Math.random();
272
+ let cumsum = 0;
273
+ for (let i = 0; i < policy.length; i++) {
274
+ cumsum += policy[i];
275
+ if (r < cumsum) {
276
+ return types_js_1.ACTIONS[i];
277
+ }
278
+ }
279
+ return types_js_1.ACTIONS[types_js_1.ACTIONS.length - 1];
280
+ }
281
+ /**
282
+ * Entropy of a probability distribution
283
+ */
284
+ entropy(probs) {
285
+ return -probs.reduce((acc, p) => {
286
+ if (p > 1e-10) {
287
+ return acc + p * Math.log(p);
288
+ }
289
+ return acc;
290
+ }, 0);
291
+ }
292
+ // ============================================================================
293
+ // Value Learning
294
+ // ============================================================================
295
+ /**
296
+ * Update value function from observed outcome
297
+ */
298
+ updateFromOutcome(previousLatent, action, outcome) {
299
+ if (!previousLatent)
300
+ return;
301
+ // Compute observed return
302
+ const observedReturn = outcome.reward ??
303
+ (outcome.success ? 0.5 : -0.2) +
304
+ this.observationToReward(outcome.newObservation);
305
+ // Update value function
306
+ this.valueFunction.update(previousLatent, observedReturn, this.config.valueLearningRate);
307
+ this.stats.valueUpdates++;
308
+ this.emit({
309
+ type: 'value_updated',
310
+ timestamp: new Date(),
311
+ data: { action, observedReturn, success: outcome.success },
312
+ });
313
+ }
314
+ /**
315
+ * Convert observation to immediate reward signal
316
+ */
317
+ observationToReward(observation) {
318
+ // Reward based on observation quality
319
+ let reward = 0;
320
+ // Energy reward
321
+ reward += (observation.energy - 2) * 0.2; // -0.4 to +0.4
322
+ // Phi reward
323
+ reward += (observation.phi - 1.5) * 0.1; // -0.15 to +0.15
324
+ // Tool success reward
325
+ reward += (observation.tool - 1) * 0.1; // -0.1 to +0.1
326
+ // Task progress reward
327
+ reward += (observation.task - 1) * 0.15; // -0.15 to +0.3
328
+ return Math.max(-1, Math.min(1, reward));
329
+ }
330
+ // ============================================================================
331
+ // Advanced: Active Inference with Full EFE from Value Function
332
+ // ============================================================================
333
+ /**
334
+ * Compute full Expected Free Energy using Value-Guided JEPA
335
+ *
336
+ * This replaces the POMDP-based EFE with world model predictions
337
+ */
338
+ async computeFullEFE(preferredState) {
339
+ if (!this.jepa || !this.latentState) {
340
+ throw new Error('JEPA and latent state required for full EFE');
341
+ }
342
+ const efeMap = new Map();
343
+ // Default preferred state: high energy, stable, synced, achieved
344
+ const preferences = preferredState ?? this.createPreferredState();
345
+ for (const actionType of types_js_1.ACTIONS) {
346
+ const wmAction = this.createWorldModelAction(actionType);
347
+ const { freeEnergy } = await this.jepa.selectActionActiveInference(this.latentState, [wmAction], preferences, this.config.predictionHorizon);
348
+ efeMap.set(actionType, freeEnergy);
349
+ }
350
+ return efeMap;
351
+ }
352
+ /**
353
+ * Create preferred/goal latent state
354
+ */
355
+ createPreferredState() {
356
+ // Preferred state: high energy, stable world, strong coupling, goal achieved
357
+ const preferredBeliefs = {
358
+ viability: [0, 0, 0.1, 0.3, 0.6], // Prefer optimal
359
+ worldState: [0, 0.7, 0.2, 0.1], // Prefer stable
360
+ coupling: [0, 0, 0.1, 0.3, 0.6], // Prefer synced
361
+ goalProgress: [0, 0, 0.2, 0.8], // Prefer achieved
362
+ };
363
+ const preferredObs = {
364
+ energy: 4,
365
+ phi: 3,
366
+ tool: 2,
367
+ coherence: 2,
368
+ task: 3,
369
+ };
370
+ return this.beliefsToLatentState(preferredBeliefs, preferredObs);
371
+ }
372
+ // ============================================================================
373
+ // Getters
374
+ // ============================================================================
375
+ getAIEngine() {
376
+ return this.aiEngine;
377
+ }
378
+ getValueFunction() {
379
+ return this.valueFunction;
380
+ }
381
+ getJEPA() {
382
+ return this.jepa;
383
+ }
384
+ getLatentState() {
385
+ return this.latentState;
386
+ }
387
+ getStats() {
388
+ return {
389
+ ...this.stats,
390
+ averageValue: this.stats.cycleCount > 0
391
+ ? this.stats.totalValue / this.stats.cycleCount
392
+ : 0,
393
+ valueStats: this.valueFunction.getStats(),
394
+ aiStats: this.aiEngine.getStats(),
395
+ };
396
+ }
397
+ getConfig() {
398
+ return { ...this.config };
399
+ }
400
+ // ============================================================================
401
+ // Event Handling
402
+ // ============================================================================
403
+ on(handler) {
404
+ this.eventHandlers.push(handler);
405
+ return () => {
406
+ const idx = this.eventHandlers.indexOf(handler);
407
+ if (idx >= 0)
408
+ this.eventHandlers.splice(idx, 1);
409
+ };
410
+ }
411
+ emit(event) {
412
+ for (const handler of this.eventHandlers) {
413
+ try {
414
+ handler(event);
415
+ }
416
+ catch (e) {
417
+ console.error('Value integration event handler error:', e);
418
+ }
419
+ }
420
+ }
421
+ }
422
+ exports.ValueAugmentedEngine = ValueAugmentedEngine;
423
+ // ============================================================================
424
+ // Factory
425
+ // ============================================================================
426
+ function createValueAugmentedEngine(config) {
427
+ return new ValueAugmentedEngine(undefined, config);
428
+ }
429
+ /**
430
+ * Create fully integrated engine with JEPA
431
+ */
432
+ async function createFullyIntegratedEngine(config) {
433
+ const engine = new ValueAugmentedEngine(undefined, config);
434
+ await engine.initializeJEPA();
435
+ return engine;
436
+ }
437
+ // ============================================================================
438
+ // Utility: Wrap existing AutonomousLoop with value integration
439
+ // ============================================================================
440
+ const autonomous_loop_js_1 = require("./autonomous-loop.js");
441
+ /**
442
+ * Create an autonomous loop with value-guided decision making
443
+ */
444
+ function createValueIntegratedLoop(config = {}) {
445
+ const { valueIntegration = {}, ...loopConfig } = config;
446
+ // Create value-augmented engine
447
+ const valueEngine = createValueAugmentedEngine(valueIntegration);
448
+ // Create autonomous loop
449
+ const loop = (0, autonomous_loop_js_1.createAutonomousLoop)(loopConfig);
450
+ // Hook value engine into the loop's inference step
451
+ loop.setCustomStepFunction(async (obs) => {
452
+ const result = await valueEngine.step(obs);
453
+ return {
454
+ action: result.action,
455
+ beliefs: result.beliefs,
456
+ };
457
+ });
458
+ return { loop, valueEngine };
459
+ }