genesis-ai-cli 7.4.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +78 -0
- package/README.md +282 -0
- package/dist/src/active-inference/actions.d.ts +75 -0
- package/dist/src/active-inference/actions.js +250 -0
- package/dist/src/active-inference/autonomous-loop.d.ts +103 -0
- package/dist/src/active-inference/autonomous-loop.js +289 -0
- package/dist/src/active-inference/core.d.ts +85 -0
- package/dist/src/active-inference/core.js +555 -0
- package/dist/src/active-inference/demo-autonomous-loop.d.ts +8 -0
- package/dist/src/active-inference/demo-autonomous-loop.js +338 -0
- package/dist/src/active-inference/demo-value-integration.d.ts +8 -0
- package/dist/src/active-inference/demo-value-integration.js +174 -0
- package/dist/src/active-inference/index.d.ts +32 -0
- package/dist/src/active-inference/index.js +88 -0
- package/dist/src/active-inference/integration.d.ts +114 -0
- package/dist/src/active-inference/integration.js +698 -0
- package/dist/src/active-inference/memory-integration.d.ts +51 -0
- package/dist/src/active-inference/memory-integration.js +232 -0
- package/dist/src/active-inference/observations.d.ts +67 -0
- package/dist/src/active-inference/observations.js +147 -0
- package/dist/src/active-inference/test-active-inference.d.ts +8 -0
- package/dist/src/active-inference/test-active-inference.js +320 -0
- package/dist/src/active-inference/test-value-integration.d.ts +6 -0
- package/dist/src/active-inference/test-value-integration.js +168 -0
- package/dist/src/active-inference/types.d.ts +150 -0
- package/dist/src/active-inference/types.js +59 -0
- package/dist/src/active-inference/value-integration.d.ts +164 -0
- package/dist/src/active-inference/value-integration.js +459 -0
- package/dist/src/agents/base-agent.d.ts +53 -0
- package/dist/src/agents/base-agent.js +178 -0
- package/dist/src/agents/builder.d.ts +67 -0
- package/dist/src/agents/builder.js +537 -0
- package/dist/src/agents/critic.d.ts +35 -0
- package/dist/src/agents/critic.js +322 -0
- package/dist/src/agents/ethicist.d.ts +54 -0
- package/dist/src/agents/ethicist.js +393 -0
- package/dist/src/agents/explorer.d.ts +26 -0
- package/dist/src/agents/explorer.js +216 -0
- package/dist/src/agents/feeling.d.ts +41 -0
- package/dist/src/agents/feeling.js +320 -0
- package/dist/src/agents/index.d.ts +111 -0
- package/dist/src/agents/index.js +222 -0
- package/dist/src/agents/memory.d.ts +69 -0
- package/dist/src/agents/memory.js +404 -0
- package/dist/src/agents/message-bus.d.ts +88 -0
- package/dist/src/agents/message-bus.js +267 -0
- package/dist/src/agents/narrator.d.ts +90 -0
- package/dist/src/agents/narrator.js +473 -0
- package/dist/src/agents/planner.d.ts +38 -0
- package/dist/src/agents/planner.js +341 -0
- package/dist/src/agents/predictor.d.ts +73 -0
- package/dist/src/agents/predictor.js +506 -0
- package/dist/src/agents/sensor.d.ts +88 -0
- package/dist/src/agents/sensor.js +377 -0
- package/dist/src/agents/test-agents.d.ts +6 -0
- package/dist/src/agents/test-agents.js +73 -0
- package/dist/src/agents/types.d.ts +194 -0
- package/dist/src/agents/types.js +7 -0
- package/dist/src/brain/index.d.ts +185 -0
- package/dist/src/brain/index.js +843 -0
- package/dist/src/brain/trace.d.ts +91 -0
- package/dist/src/brain/trace.js +327 -0
- package/dist/src/brain/types.d.ts +165 -0
- package/dist/src/brain/types.js +51 -0
- package/dist/src/cli/chat.d.ts +237 -0
- package/dist/src/cli/chat.js +1959 -0
- package/dist/src/cli/dispatcher.d.ts +182 -0
- package/dist/src/cli/dispatcher.js +718 -0
- package/dist/src/cli/human-loop.d.ts +170 -0
- package/dist/src/cli/human-loop.js +543 -0
- package/dist/src/cli/index.d.ts +12 -0
- package/dist/src/cli/index.js +28 -0
- package/dist/src/cli/interactive.d.ts +141 -0
- package/dist/src/cli/interactive.js +757 -0
- package/dist/src/cli/ui.d.ts +205 -0
- package/dist/src/cli/ui.js +632 -0
- package/dist/src/consciousness/attention-schema.d.ts +154 -0
- package/dist/src/consciousness/attention-schema.js +432 -0
- package/dist/src/consciousness/global-workspace.d.ts +149 -0
- package/dist/src/consciousness/global-workspace.js +422 -0
- package/dist/src/consciousness/index.d.ts +186 -0
- package/dist/src/consciousness/index.js +476 -0
- package/dist/src/consciousness/phi-calculator.d.ts +119 -0
- package/dist/src/consciousness/phi-calculator.js +445 -0
- package/dist/src/consciousness/phi-decisions.d.ts +169 -0
- package/dist/src/consciousness/phi-decisions.js +383 -0
- package/dist/src/consciousness/phi-monitor.d.ts +153 -0
- package/dist/src/consciousness/phi-monitor.js +465 -0
- package/dist/src/consciousness/types.d.ts +260 -0
- package/dist/src/consciousness/types.js +44 -0
- package/dist/src/daemon/dream-mode.d.ts +115 -0
- package/dist/src/daemon/dream-mode.js +470 -0
- package/dist/src/daemon/index.d.ts +162 -0
- package/dist/src/daemon/index.js +542 -0
- package/dist/src/daemon/maintenance.d.ts +139 -0
- package/dist/src/daemon/maintenance.js +549 -0
- package/dist/src/daemon/process.d.ts +82 -0
- package/dist/src/daemon/process.js +442 -0
- package/dist/src/daemon/scheduler.d.ts +90 -0
- package/dist/src/daemon/scheduler.js +494 -0
- package/dist/src/daemon/types.d.ts +213 -0
- package/dist/src/daemon/types.js +50 -0
- package/dist/src/epistemic/index.d.ts +74 -0
- package/dist/src/epistemic/index.js +225 -0
- package/dist/src/grounding/epistemic-stack.d.ts +100 -0
- package/dist/src/grounding/epistemic-stack.js +408 -0
- package/dist/src/grounding/feedback.d.ts +98 -0
- package/dist/src/grounding/feedback.js +276 -0
- package/dist/src/grounding/index.d.ts +123 -0
- package/dist/src/grounding/index.js +224 -0
- package/dist/src/grounding/verifier.d.ts +149 -0
- package/dist/src/grounding/verifier.js +484 -0
- package/dist/src/healing/detector.d.ts +110 -0
- package/dist/src/healing/detector.js +436 -0
- package/dist/src/healing/fixer.d.ts +138 -0
- package/dist/src/healing/fixer.js +572 -0
- package/dist/src/healing/index.d.ts +23 -0
- package/dist/src/healing/index.js +43 -0
- package/dist/src/hooks/index.d.ts +135 -0
- package/dist/src/hooks/index.js +317 -0
- package/dist/src/index.d.ts +23 -0
- package/dist/src/index.js +1266 -0
- package/dist/src/kernel/index.d.ts +155 -0
- package/dist/src/kernel/index.js +795 -0
- package/dist/src/kernel/invariants.d.ts +153 -0
- package/dist/src/kernel/invariants.js +355 -0
- package/dist/src/kernel/test-kernel.d.ts +6 -0
- package/dist/src/kernel/test-kernel.js +108 -0
- package/dist/src/kernel/test-real-mcp.d.ts +10 -0
- package/dist/src/kernel/test-real-mcp.js +295 -0
- package/dist/src/llm/index.d.ts +146 -0
- package/dist/src/llm/index.js +428 -0
- package/dist/src/llm/router.d.ts +136 -0
- package/dist/src/llm/router.js +510 -0
- package/dist/src/mcp/index.d.ts +85 -0
- package/dist/src/mcp/index.js +657 -0
- package/dist/src/mcp/resilient.d.ts +139 -0
- package/dist/src/mcp/resilient.js +417 -0
- package/dist/src/memory/cache.d.ts +118 -0
- package/dist/src/memory/cache.js +356 -0
- package/dist/src/memory/cognitive-workspace.d.ts +231 -0
- package/dist/src/memory/cognitive-workspace.js +521 -0
- package/dist/src/memory/consolidation.d.ts +99 -0
- package/dist/src/memory/consolidation.js +443 -0
- package/dist/src/memory/episodic.d.ts +114 -0
- package/dist/src/memory/episodic.js +394 -0
- package/dist/src/memory/forgetting.d.ts +134 -0
- package/dist/src/memory/forgetting.js +324 -0
- package/dist/src/memory/index.d.ts +211 -0
- package/dist/src/memory/index.js +367 -0
- package/dist/src/memory/indexer.d.ts +123 -0
- package/dist/src/memory/indexer.js +479 -0
- package/dist/src/memory/procedural.d.ts +136 -0
- package/dist/src/memory/procedural.js +479 -0
- package/dist/src/memory/semantic.d.ts +132 -0
- package/dist/src/memory/semantic.js +497 -0
- package/dist/src/memory/types.d.ts +193 -0
- package/dist/src/memory/types.js +15 -0
- package/dist/src/orchestrator.d.ts +65 -0
- package/dist/src/orchestrator.js +317 -0
- package/dist/src/persistence/index.d.ts +257 -0
- package/dist/src/persistence/index.js +763 -0
- package/dist/src/pipeline/executor.d.ts +51 -0
- package/dist/src/pipeline/executor.js +695 -0
- package/dist/src/pipeline/index.d.ts +7 -0
- package/dist/src/pipeline/index.js +11 -0
- package/dist/src/self-production.d.ts +67 -0
- package/dist/src/self-production.js +205 -0
- package/dist/src/subagents/executor.d.ts +58 -0
- package/dist/src/subagents/executor.js +283 -0
- package/dist/src/subagents/index.d.ts +37 -0
- package/dist/src/subagents/index.js +53 -0
- package/dist/src/subagents/registry.d.ts +23 -0
- package/dist/src/subagents/registry.js +167 -0
- package/dist/src/subagents/types.d.ts +79 -0
- package/dist/src/subagents/types.js +14 -0
- package/dist/src/tools/bash.d.ts +139 -0
- package/dist/src/tools/bash.js +583 -0
- package/dist/src/tools/edit.d.ts +125 -0
- package/dist/src/tools/edit.js +424 -0
- package/dist/src/tools/git.d.ts +179 -0
- package/dist/src/tools/git.js +504 -0
- package/dist/src/tools/index.d.ts +21 -0
- package/dist/src/tools/index.js +163 -0
- package/dist/src/types.d.ts +145 -0
- package/dist/src/types.js +7 -0
- package/dist/src/world-model/decoder.d.ts +163 -0
- package/dist/src/world-model/decoder.js +517 -0
- package/dist/src/world-model/digital-twin.d.ts +219 -0
- package/dist/src/world-model/digital-twin.js +695 -0
- package/dist/src/world-model/encoder.d.ts +141 -0
- package/dist/src/world-model/encoder.js +564 -0
- package/dist/src/world-model/index.d.ts +221 -0
- package/dist/src/world-model/index.js +772 -0
- package/dist/src/world-model/predictor.d.ts +161 -0
- package/dist/src/world-model/predictor.js +681 -0
- package/dist/src/world-model/test-value-jepa.d.ts +8 -0
- package/dist/src/world-model/test-value-jepa.js +430 -0
- package/dist/src/world-model/types.d.ts +341 -0
- package/dist/src/world-model/types.js +69 -0
- package/dist/src/world-model/value-jepa.d.ts +247 -0
- package/dist/src/world-model/value-jepa.js +622 -0
- package/dist/test/brain.test.d.ts +11 -0
- package/dist/test/brain.test.js +358 -0
- package/dist/test/cli/dispatcher.test.d.ts +4 -0
- package/dist/test/cli/dispatcher.test.js +332 -0
- package/dist/test/cli/human-loop.test.d.ts +4 -0
- package/dist/test/cli/human-loop.test.js +270 -0
- package/dist/test/grounding/feedback.test.d.ts +4 -0
- package/dist/test/grounding/feedback.test.js +462 -0
- package/dist/test/grounding/verifier.test.d.ts +4 -0
- package/dist/test/grounding/verifier.test.js +442 -0
- package/dist/test/grounding.test.d.ts +6 -0
- package/dist/test/grounding.test.js +246 -0
- package/dist/test/healing/detector.test.d.ts +4 -0
- package/dist/test/healing/detector.test.js +266 -0
- package/dist/test/healing/fixer.test.d.ts +4 -0
- package/dist/test/healing/fixer.test.js +369 -0
- package/dist/test/integration.test.d.ts +5 -0
- package/dist/test/integration.test.js +290 -0
- package/dist/test/tools/bash.test.d.ts +4 -0
- package/dist/test/tools/bash.test.js +348 -0
- package/dist/test/tools/edit.test.d.ts +4 -0
- package/dist/test/tools/edit.test.js +350 -0
- package/dist/test/tools/git.test.d.ts +4 -0
- package/dist/test/tools/git.test.js +350 -0
- package/package.json +60 -0
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Genesis 6.1 - Active Inference Types
|
|
4
|
+
*
|
|
5
|
+
* Based on pymdp and Free Energy Principle (Friston)
|
|
6
|
+
*
|
|
7
|
+
* Core concepts:
|
|
8
|
+
* - Hidden states: What the system believes about the world
|
|
9
|
+
* - Observations: What the system perceives
|
|
10
|
+
* - Actions: What the system can do
|
|
11
|
+
* - Preferences: What the system wants (C matrix)
|
|
12
|
+
*/
|
|
13
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
14
|
+
exports.DEFAULT_CONFIG = exports.ACTION_COUNT = exports.ACTIONS = exports.OBSERVATION_LABELS = exports.OBSERVATION_DIMS = exports.HIDDEN_STATE_LABELS = exports.HIDDEN_STATE_DIMS = void 0;
|
|
15
|
+
exports.HIDDEN_STATE_DIMS = {
|
|
16
|
+
viability: 5,
|
|
17
|
+
worldState: 4,
|
|
18
|
+
coupling: 5,
|
|
19
|
+
goalProgress: 4,
|
|
20
|
+
};
|
|
21
|
+
exports.HIDDEN_STATE_LABELS = {
|
|
22
|
+
viability: ['critical', 'low', 'medium', 'high', 'optimal'],
|
|
23
|
+
worldState: ['unknown', 'stable', 'changing', 'hostile'],
|
|
24
|
+
coupling: ['none', 'weak', 'medium', 'strong', 'synced'],
|
|
25
|
+
goalProgress: ['blocked', 'slow', 'onTrack', 'achieved'],
|
|
26
|
+
};
|
|
27
|
+
exports.OBSERVATION_DIMS = {
|
|
28
|
+
energy: 5,
|
|
29
|
+
phi: 4,
|
|
30
|
+
tool: 3,
|
|
31
|
+
coherence: 3,
|
|
32
|
+
task: 4,
|
|
33
|
+
};
|
|
34
|
+
exports.OBSERVATION_LABELS = {
|
|
35
|
+
energy: ['depleted', 'low', 'medium', 'high', 'full'],
|
|
36
|
+
phi: ['dormant', 'low', 'medium', 'high'],
|
|
37
|
+
tool: ['failed', 'partial', 'success'],
|
|
38
|
+
coherence: ['broken', 'degraded', 'consistent'],
|
|
39
|
+
task: ['none', 'pending', 'active', 'completed'],
|
|
40
|
+
};
|
|
41
|
+
exports.ACTIONS = [
|
|
42
|
+
'sense.mcp',
|
|
43
|
+
'recall.memory',
|
|
44
|
+
'plan.goals',
|
|
45
|
+
'verify.ethics',
|
|
46
|
+
'execute.task',
|
|
47
|
+
'dream.cycle',
|
|
48
|
+
'rest.idle',
|
|
49
|
+
'recharge',
|
|
50
|
+
];
|
|
51
|
+
exports.ACTION_COUNT = exports.ACTIONS.length;
|
|
52
|
+
exports.DEFAULT_CONFIG = {
|
|
53
|
+
inferenceIterations: 16,
|
|
54
|
+
policyHorizon: 3,
|
|
55
|
+
actionTemperature: 1.0,
|
|
56
|
+
priorWeight: 0.1,
|
|
57
|
+
learningRateA: 0.01,
|
|
58
|
+
learningRateB: 0.01,
|
|
59
|
+
};
|
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Genesis 6.2 - Value-Guided Active Inference Integration
|
|
3
|
+
*
|
|
4
|
+
* Connects the Value-Guided JEPA world model to the Active Inference engine.
|
|
5
|
+
*
|
|
6
|
+
* This integration enables:
|
|
7
|
+
* 1. Value-augmented policy selection (EFE + V(s))
|
|
8
|
+
* 2. World model predictions for trajectory simulation
|
|
9
|
+
* 3. Value learning from Action outcomes
|
|
10
|
+
* 4. Hybrid discrete-continuous inference
|
|
11
|
+
*
|
|
12
|
+
* Architecture:
|
|
13
|
+
* ```
|
|
14
|
+
* Observations → Active Inference Engine → Discrete Beliefs
|
|
15
|
+
* ↓
|
|
16
|
+
* World Model Encoder → Latent State
|
|
17
|
+
* ↓
|
|
18
|
+
* Value Function → V(s), Q(s,a)
|
|
19
|
+
* ↓
|
|
20
|
+
* Policy = softmax(-EFE + λV)
|
|
21
|
+
* ```
|
|
22
|
+
*/
|
|
23
|
+
import { ActiveInferenceEngine } from './core.js';
|
|
24
|
+
import { Observation, Beliefs, Policy, ActionType } from './types.js';
|
|
25
|
+
import { ValueFunction, ValueGuidedJEPA, type ValueEstimate, type FreeEnergyDecomposition, type ValueFunctionConfig } from '../world-model/value-jepa.js';
|
|
26
|
+
import type { LatentState } from '../world-model/types.js';
|
|
27
|
+
export interface ValueIntegrationConfig {
|
|
28
|
+
valueWeight: number;
|
|
29
|
+
useWorldModelPredictions: boolean;
|
|
30
|
+
predictionHorizon: number;
|
|
31
|
+
valueLearningRate: number;
|
|
32
|
+
verbose: boolean;
|
|
33
|
+
valueFunctionConfig: Partial<ValueFunctionConfig>;
|
|
34
|
+
}
|
|
35
|
+
export declare const DEFAULT_VALUE_INTEGRATION_CONFIG: ValueIntegrationConfig;
|
|
36
|
+
export declare class ValueAugmentedEngine {
|
|
37
|
+
private aiEngine;
|
|
38
|
+
private valueFunction;
|
|
39
|
+
private jepa;
|
|
40
|
+
private config;
|
|
41
|
+
private latentState;
|
|
42
|
+
private stats;
|
|
43
|
+
private eventHandlers;
|
|
44
|
+
constructor(aiEngine?: ActiveInferenceEngine, config?: Partial<ValueIntegrationConfig>);
|
|
45
|
+
/**
|
|
46
|
+
* Initialize with JEPA for full world model integration
|
|
47
|
+
*/
|
|
48
|
+
initializeJEPA(): Promise<void>;
|
|
49
|
+
/**
|
|
50
|
+
* Full inference cycle with value augmentation
|
|
51
|
+
*/
|
|
52
|
+
step(observation: Observation): Promise<{
|
|
53
|
+
action: ActionType;
|
|
54
|
+
beliefs: Beliefs;
|
|
55
|
+
value: ValueEstimate;
|
|
56
|
+
policy: Policy;
|
|
57
|
+
}>;
|
|
58
|
+
/**
|
|
59
|
+
* Compute policy with value augmentation
|
|
60
|
+
*
|
|
61
|
+
* Policy ∝ exp(-EFE + λ * V(s'))
|
|
62
|
+
*
|
|
63
|
+
* Where V(s') is the expected value of the next state under each action.
|
|
64
|
+
*/
|
|
65
|
+
computeValueAugmentedPolicy(beliefs: Beliefs): Promise<Policy>;
|
|
66
|
+
/**
|
|
67
|
+
* Compute value augmentation for a specific action
|
|
68
|
+
*/
|
|
69
|
+
private computeActionValueAugment;
|
|
70
|
+
/**
|
|
71
|
+
* Heuristic value for actions when JEPA is not available
|
|
72
|
+
*/
|
|
73
|
+
private heuristicActionValue;
|
|
74
|
+
/**
|
|
75
|
+
* Convert Active Inference action to World Model action
|
|
76
|
+
*/
|
|
77
|
+
private createWorldModelAction;
|
|
78
|
+
/**
|
|
79
|
+
* Convert beliefs to latent state
|
|
80
|
+
*/
|
|
81
|
+
private beliefsToLatentState;
|
|
82
|
+
/**
|
|
83
|
+
* Sample action from policy
|
|
84
|
+
*/
|
|
85
|
+
private sampleFromPolicy;
|
|
86
|
+
/**
|
|
87
|
+
* Entropy of a probability distribution
|
|
88
|
+
*/
|
|
89
|
+
private entropy;
|
|
90
|
+
/**
|
|
91
|
+
* Update value function from observed outcome
|
|
92
|
+
*/
|
|
93
|
+
updateFromOutcome(previousLatent: LatentState, action: ActionType, outcome: {
|
|
94
|
+
success: boolean;
|
|
95
|
+
reward?: number;
|
|
96
|
+
newObservation: Observation;
|
|
97
|
+
}): void;
|
|
98
|
+
/**
|
|
99
|
+
* Convert observation to immediate reward signal
|
|
100
|
+
*/
|
|
101
|
+
private observationToReward;
|
|
102
|
+
/**
|
|
103
|
+
* Compute full Expected Free Energy using Value-Guided JEPA
|
|
104
|
+
*
|
|
105
|
+
* This replaces the POMDP-based EFE with world model predictions
|
|
106
|
+
*/
|
|
107
|
+
computeFullEFE(preferredState?: LatentState): Promise<Map<ActionType, FreeEnergyDecomposition>>;
|
|
108
|
+
/**
|
|
109
|
+
* Create preferred/goal latent state
|
|
110
|
+
*/
|
|
111
|
+
private createPreferredState;
|
|
112
|
+
getAIEngine(): ActiveInferenceEngine;
|
|
113
|
+
getValueFunction(): ValueFunction;
|
|
114
|
+
getJEPA(): ValueGuidedJEPA | null;
|
|
115
|
+
getLatentState(): LatentState | null;
|
|
116
|
+
getStats(): {
|
|
117
|
+
averageValue: number;
|
|
118
|
+
valueStats: {
|
|
119
|
+
mean: number;
|
|
120
|
+
std: number;
|
|
121
|
+
min: number;
|
|
122
|
+
max: number;
|
|
123
|
+
count: number;
|
|
124
|
+
};
|
|
125
|
+
aiStats: {
|
|
126
|
+
inferenceCount: number;
|
|
127
|
+
averageSurprise: number;
|
|
128
|
+
actionCounts: {
|
|
129
|
+
[k: string]: number;
|
|
130
|
+
};
|
|
131
|
+
};
|
|
132
|
+
cycleCount: number;
|
|
133
|
+
totalValue: number;
|
|
134
|
+
valueUpdates: number;
|
|
135
|
+
trajectoryPredictions: number;
|
|
136
|
+
};
|
|
137
|
+
getConfig(): ValueIntegrationConfig;
|
|
138
|
+
on(handler: (event: ValueIntegrationEvent) => void): () => void;
|
|
139
|
+
private emit;
|
|
140
|
+
}
|
|
141
|
+
export interface ValueIntegrationEvent {
|
|
142
|
+
type: 'jepa_initialized' | 'step_complete' | 'value_updated' | 'efe_computed';
|
|
143
|
+
timestamp: Date;
|
|
144
|
+
data?: unknown;
|
|
145
|
+
}
|
|
146
|
+
export declare function createValueAugmentedEngine(config?: Partial<ValueIntegrationConfig>): ValueAugmentedEngine;
|
|
147
|
+
/**
|
|
148
|
+
* Create fully integrated engine with JEPA
|
|
149
|
+
*/
|
|
150
|
+
export declare function createFullyIntegratedEngine(config?: Partial<ValueIntegrationConfig>): Promise<ValueAugmentedEngine>;
|
|
151
|
+
import { AutonomousLoop, type AutonomousLoopConfig } from './autonomous-loop.js';
|
|
152
|
+
/**
|
|
153
|
+
* Configuration for value-integrated autonomous loop
|
|
154
|
+
*/
|
|
155
|
+
export interface ValueIntegratedLoopConfig extends AutonomousLoopConfig {
|
|
156
|
+
valueIntegration: Partial<ValueIntegrationConfig>;
|
|
157
|
+
}
|
|
158
|
+
/**
|
|
159
|
+
* Create an autonomous loop with value-guided decision making
|
|
160
|
+
*/
|
|
161
|
+
export declare function createValueIntegratedLoop(config?: Partial<ValueIntegratedLoopConfig>): {
|
|
162
|
+
loop: AutonomousLoop;
|
|
163
|
+
valueEngine: ValueAugmentedEngine;
|
|
164
|
+
};
|
|
@@ -0,0 +1,459 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Genesis 6.2 - Value-Guided Active Inference Integration
|
|
4
|
+
*
|
|
5
|
+
* Connects the Value-Guided JEPA world model to the Active Inference engine.
|
|
6
|
+
*
|
|
7
|
+
* This integration enables:
|
|
8
|
+
* 1. Value-augmented policy selection (EFE + V(s))
|
|
9
|
+
* 2. World model predictions for trajectory simulation
|
|
10
|
+
* 3. Value learning from Action outcomes
|
|
11
|
+
* 4. Hybrid discrete-continuous inference
|
|
12
|
+
*
|
|
13
|
+
* Architecture:
|
|
14
|
+
* ```
|
|
15
|
+
* Observations → Active Inference Engine → Discrete Beliefs
|
|
16
|
+
* ↓
|
|
17
|
+
* World Model Encoder → Latent State
|
|
18
|
+
* ↓
|
|
19
|
+
* Value Function → V(s), Q(s,a)
|
|
20
|
+
* ↓
|
|
21
|
+
* Policy = softmax(-EFE + λV)
|
|
22
|
+
* ```
|
|
23
|
+
*/
|
|
24
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
25
|
+
exports.ValueAugmentedEngine = exports.DEFAULT_VALUE_INTEGRATION_CONFIG = void 0;
|
|
26
|
+
exports.createValueAugmentedEngine = createValueAugmentedEngine;
|
|
27
|
+
exports.createFullyIntegratedEngine = createFullyIntegratedEngine;
|
|
28
|
+
exports.createValueIntegratedLoop = createValueIntegratedLoop;
|
|
29
|
+
const core_js_1 = require("./core.js");
|
|
30
|
+
const types_js_1 = require("./types.js");
|
|
31
|
+
const value_jepa_js_1 = require("../world-model/value-jepa.js");
|
|
32
|
+
exports.DEFAULT_VALUE_INTEGRATION_CONFIG = {
|
|
33
|
+
valueWeight: 0.5,
|
|
34
|
+
useWorldModelPredictions: true,
|
|
35
|
+
predictionHorizon: 3,
|
|
36
|
+
valueLearningRate: 0.01,
|
|
37
|
+
verbose: false,
|
|
38
|
+
valueFunctionConfig: {},
|
|
39
|
+
};
|
|
40
|
+
/**
|
|
41
|
+
* Mapping from Active Inference action to World Model action
|
|
42
|
+
*/
|
|
43
|
+
const AI_TO_WM_ACTION = {
|
|
44
|
+
'sense.mcp': 'observe',
|
|
45
|
+
'recall.memory': 'query',
|
|
46
|
+
'plan.goals': 'query',
|
|
47
|
+
'verify.ethics': 'query',
|
|
48
|
+
'execute.task': 'execute',
|
|
49
|
+
'dream.cycle': 'transform',
|
|
50
|
+
'rest.idle': 'observe',
|
|
51
|
+
'recharge': 'transform',
|
|
52
|
+
};
|
|
53
|
+
// ============================================================================
|
|
54
|
+
// Value-Augmented Active Inference Engine
|
|
55
|
+
// ============================================================================
|
|
56
|
+
class ValueAugmentedEngine {
|
|
57
|
+
aiEngine;
|
|
58
|
+
valueFunction;
|
|
59
|
+
jepa = null;
|
|
60
|
+
config;
|
|
61
|
+
// Current latent state (synchronized with beliefs)
|
|
62
|
+
latentState = null;
|
|
63
|
+
// Statistics
|
|
64
|
+
stats = {
|
|
65
|
+
cycleCount: 0,
|
|
66
|
+
totalValue: 0,
|
|
67
|
+
valueUpdates: 0,
|
|
68
|
+
trajectoryPredictions: 0,
|
|
69
|
+
};
|
|
70
|
+
// Event handlers
|
|
71
|
+
eventHandlers = [];
|
|
72
|
+
constructor(aiEngine, config = {}) {
|
|
73
|
+
this.config = { ...exports.DEFAULT_VALUE_INTEGRATION_CONFIG, ...config };
|
|
74
|
+
this.aiEngine = aiEngine ?? (0, core_js_1.createActiveInferenceEngine)();
|
|
75
|
+
this.valueFunction = (0, value_jepa_js_1.createValueFunction)(this.config.valueFunctionConfig);
|
|
76
|
+
}
|
|
77
|
+
/**
|
|
78
|
+
* Initialize with JEPA for full world model integration
|
|
79
|
+
*/
|
|
80
|
+
async initializeJEPA() {
|
|
81
|
+
this.jepa = await (0, value_jepa_js_1.createValueGuidedJEPA)(this.config.valueFunctionConfig);
|
|
82
|
+
this.emit({ type: 'jepa_initialized', timestamp: new Date() });
|
|
83
|
+
}
|
|
84
|
+
// ============================================================================
|
|
85
|
+
// Core Integration
|
|
86
|
+
// ============================================================================
|
|
87
|
+
/**
|
|
88
|
+
* Full inference cycle with value augmentation
|
|
89
|
+
*/
|
|
90
|
+
async step(observation) {
|
|
91
|
+
this.stats.cycleCount++;
|
|
92
|
+
// 1. Update beliefs via Active Inference
|
|
93
|
+
const beliefs = this.aiEngine.inferStates(observation);
|
|
94
|
+
// 2. Convert beliefs to latent state
|
|
95
|
+
this.latentState = this.beliefsToLatentState(beliefs, observation);
|
|
96
|
+
// 3. Compute value of current state
|
|
97
|
+
const currentValue = this.valueFunction.estimate(this.latentState);
|
|
98
|
+
// 4. Compute value-augmented policy
|
|
99
|
+
const policy = await this.computeValueAugmentedPolicy(beliefs);
|
|
100
|
+
// 5. Sample action
|
|
101
|
+
const action = this.sampleFromPolicy(policy);
|
|
102
|
+
// Track statistics
|
|
103
|
+
this.stats.totalValue += currentValue.value;
|
|
104
|
+
this.emit({
|
|
105
|
+
type: 'step_complete',
|
|
106
|
+
timestamp: new Date(),
|
|
107
|
+
data: { action, beliefs, value: currentValue, policy },
|
|
108
|
+
});
|
|
109
|
+
return { action, beliefs, value: currentValue, policy };
|
|
110
|
+
}
|
|
111
|
+
/**
|
|
112
|
+
* Compute policy with value augmentation
|
|
113
|
+
*
|
|
114
|
+
* Policy ∝ exp(-EFE + λ * V(s'))
|
|
115
|
+
*
|
|
116
|
+
* Where V(s') is the expected value of the next state under each action.
|
|
117
|
+
*/
|
|
118
|
+
async computeValueAugmentedPolicy(beliefs) {
|
|
119
|
+
// Get base EFE policy from Active Inference engine
|
|
120
|
+
const basePolicy = this.aiEngine.inferPolicies();
|
|
121
|
+
if (!this.latentState) {
|
|
122
|
+
return basePolicy; // No value augmentation without latent state
|
|
123
|
+
}
|
|
124
|
+
// Compute value augmentation for each action
|
|
125
|
+
const valueAugments = [];
|
|
126
|
+
for (let a = 0; a < types_js_1.ACTION_COUNT; a++) {
|
|
127
|
+
const actionType = types_js_1.ACTIONS[a];
|
|
128
|
+
const augment = await this.computeActionValueAugment(actionType);
|
|
129
|
+
valueAugments[a] = augment;
|
|
130
|
+
}
|
|
131
|
+
// Combine: policy ∝ exp(log(basePolicy) + λ * valueAugment)
|
|
132
|
+
const logPolicy = basePolicy.map((p, i) => Math.log(Math.max(p, 1e-10)) + this.config.valueWeight * valueAugments[i]);
|
|
133
|
+
// Softmax to get final policy
|
|
134
|
+
const maxLogP = Math.max(...logPolicy);
|
|
135
|
+
const expPolicy = logPolicy.map(lp => Math.exp(lp - maxLogP));
|
|
136
|
+
const sumExp = expPolicy.reduce((a, b) => a + b, 0);
|
|
137
|
+
const policy = expPolicy.map(e => e / sumExp);
|
|
138
|
+
if (this.config.verbose) {
|
|
139
|
+
console.log('[Value Integration] Base policy:', basePolicy.map(p => p.toFixed(3)));
|
|
140
|
+
console.log('[Value Integration] Value augments:', valueAugments.map(v => v.toFixed(3)));
|
|
141
|
+
console.log('[Value Integration] Final policy:', policy.map(p => p.toFixed(3)));
|
|
142
|
+
}
|
|
143
|
+
return policy;
|
|
144
|
+
}
|
|
145
|
+
/**
|
|
146
|
+
* Compute value augmentation for a specific action
|
|
147
|
+
*/
|
|
148
|
+
async computeActionValueAugment(actionType) {
|
|
149
|
+
if (!this.latentState)
|
|
150
|
+
return 0;
|
|
151
|
+
if (this.jepa && this.config.useWorldModelPredictions) {
|
|
152
|
+
// Use JEPA for trajectory prediction
|
|
153
|
+
const wmAction = this.createWorldModelAction(actionType);
|
|
154
|
+
const predicted = await this.jepa.predictWithValue(this.latentState, wmAction);
|
|
155
|
+
this.stats.trajectoryPredictions++;
|
|
156
|
+
// Return expected value of next state
|
|
157
|
+
return predicted.value.value;
|
|
158
|
+
}
|
|
159
|
+
else {
|
|
160
|
+
// Simple heuristic based on action type
|
|
161
|
+
return this.heuristicActionValue(actionType);
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
/**
|
|
165
|
+
* Heuristic value for actions when JEPA is not available
|
|
166
|
+
*/
|
|
167
|
+
heuristicActionValue(actionType) {
|
|
168
|
+
const beliefs = this.aiEngine.getBeliefs();
|
|
169
|
+
const state = this.aiEngine.getMostLikelyState();
|
|
170
|
+
// Value based on current state and action appropriateness
|
|
171
|
+
switch (actionType) {
|
|
172
|
+
case 'recharge':
|
|
173
|
+
// High value when energy is low
|
|
174
|
+
return state.viability === 'critical' ? 0.9 :
|
|
175
|
+
state.viability === 'low' ? 0.6 :
|
|
176
|
+
state.viability === 'medium' ? 0.2 : 0;
|
|
177
|
+
case 'rest.idle':
|
|
178
|
+
// Value at attractor (Wu Wei)
|
|
179
|
+
return state.viability === 'optimal' &&
|
|
180
|
+
state.worldState === 'stable' ? 0.8 : 0.1;
|
|
181
|
+
case 'execute.task':
|
|
182
|
+
// High value when ready to execute
|
|
183
|
+
return state.viability === 'optimal' &&
|
|
184
|
+
state.goalProgress !== 'achieved' ? 0.7 : 0.2;
|
|
185
|
+
case 'sense.mcp':
|
|
186
|
+
// Value for sensing when coupling is weak
|
|
187
|
+
return state.coupling === 'none' ? 0.5 :
|
|
188
|
+
state.coupling === 'weak' ? 0.4 : 0.2;
|
|
189
|
+
case 'plan.goals':
|
|
190
|
+
// Value when not blocked but not yet on track
|
|
191
|
+
return state.goalProgress === 'blocked' ? 0.6 :
|
|
192
|
+
state.goalProgress === 'slow' ? 0.5 : 0.2;
|
|
193
|
+
case 'recall.memory':
|
|
194
|
+
return state.worldState === 'unknown' ? 0.5 : 0.2;
|
|
195
|
+
case 'dream.cycle':
|
|
196
|
+
return state.worldState === 'changing' ? 0.4 : 0.1;
|
|
197
|
+
case 'verify.ethics':
|
|
198
|
+
return 0.3; // Always moderately valuable
|
|
199
|
+
default:
|
|
200
|
+
return 0;
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
/**
|
|
204
|
+
* Convert Active Inference action to World Model action
|
|
205
|
+
*/
|
|
206
|
+
createWorldModelAction(actionType) {
|
|
207
|
+
const wmType = AI_TO_WM_ACTION[actionType] ?? 'observe';
|
|
208
|
+
return {
|
|
209
|
+
id: `ai-${actionType}-${Date.now()}`,
|
|
210
|
+
type: wmType,
|
|
211
|
+
parameters: { sourceAction: actionType },
|
|
212
|
+
agent: 'active-inference',
|
|
213
|
+
timestamp: new Date(),
|
|
214
|
+
};
|
|
215
|
+
}
|
|
216
|
+
/**
|
|
217
|
+
* Convert beliefs to latent state
|
|
218
|
+
*/
|
|
219
|
+
beliefsToLatentState(beliefs, observation) {
|
|
220
|
+
// Create a latent vector from beliefs and observations
|
|
221
|
+
// This is a simplified mapping - in practice would use learned encoder
|
|
222
|
+
const vector = [];
|
|
223
|
+
// Encode beliefs (each factor as part of the vector)
|
|
224
|
+
vector.push(...beliefs.viability); // 5 dims
|
|
225
|
+
vector.push(...beliefs.worldState); // 4 dims
|
|
226
|
+
vector.push(...beliefs.coupling); // 5 dims
|
|
227
|
+
vector.push(...beliefs.goalProgress); // 4 dims = 18 dims total
|
|
228
|
+
// Encode observations
|
|
229
|
+
vector.push(observation.energy / 4); // Normalized
|
|
230
|
+
vector.push(observation.phi / 3);
|
|
231
|
+
vector.push(observation.tool / 2);
|
|
232
|
+
vector.push(observation.coherence / 2);
|
|
233
|
+
vector.push(observation.task / 3); // +5 dims = 23 dims
|
|
234
|
+
// Pad to standard latent dimension (64)
|
|
235
|
+
const targetDim = 64;
|
|
236
|
+
while (vector.length < targetDim) {
|
|
237
|
+
// Fill with derived features
|
|
238
|
+
const idx = vector.length;
|
|
239
|
+
if (idx < 30) {
|
|
240
|
+
// Cross-products of beliefs
|
|
241
|
+
const i = idx % beliefs.viability.length;
|
|
242
|
+
const j = idx % beliefs.worldState.length;
|
|
243
|
+
vector.push(beliefs.viability[i] * beliefs.worldState[j]);
|
|
244
|
+
}
|
|
245
|
+
else {
|
|
246
|
+
// Entropy-based features
|
|
247
|
+
vector.push(Math.random() * 0.1); // Small noise
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
// Compute confidence from belief certainty
|
|
251
|
+
const beliefEntropy = this.entropy(beliefs.viability) +
|
|
252
|
+
this.entropy(beliefs.worldState) +
|
|
253
|
+
this.entropy(beliefs.coupling) +
|
|
254
|
+
this.entropy(beliefs.goalProgress);
|
|
255
|
+
const maxEntropy = Math.log(5) + Math.log(4) + Math.log(5) + Math.log(4);
|
|
256
|
+
const confidence = 1 - (beliefEntropy / maxEntropy);
|
|
257
|
+
return {
|
|
258
|
+
vector,
|
|
259
|
+
dimensions: targetDim,
|
|
260
|
+
sourceModality: 'state',
|
|
261
|
+
sourceId: `ai-beliefs-${Date.now()}`,
|
|
262
|
+
timestamp: new Date(),
|
|
263
|
+
confidence,
|
|
264
|
+
entropy: beliefEntropy / maxEntropy,
|
|
265
|
+
};
|
|
266
|
+
}
|
|
267
|
+
/**
|
|
268
|
+
* Sample action from policy
|
|
269
|
+
*/
|
|
270
|
+
sampleFromPolicy(policy) {
|
|
271
|
+
const r = Math.random();
|
|
272
|
+
let cumsum = 0;
|
|
273
|
+
for (let i = 0; i < policy.length; i++) {
|
|
274
|
+
cumsum += policy[i];
|
|
275
|
+
if (r < cumsum) {
|
|
276
|
+
return types_js_1.ACTIONS[i];
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
return types_js_1.ACTIONS[types_js_1.ACTIONS.length - 1];
|
|
280
|
+
}
|
|
281
|
+
/**
|
|
282
|
+
* Entropy of a probability distribution
|
|
283
|
+
*/
|
|
284
|
+
entropy(probs) {
|
|
285
|
+
return -probs.reduce((acc, p) => {
|
|
286
|
+
if (p > 1e-10) {
|
|
287
|
+
return acc + p * Math.log(p);
|
|
288
|
+
}
|
|
289
|
+
return acc;
|
|
290
|
+
}, 0);
|
|
291
|
+
}
|
|
292
|
+
// ============================================================================
|
|
293
|
+
// Value Learning
|
|
294
|
+
// ============================================================================
|
|
295
|
+
/**
|
|
296
|
+
* Update value function from observed outcome
|
|
297
|
+
*/
|
|
298
|
+
updateFromOutcome(previousLatent, action, outcome) {
|
|
299
|
+
if (!previousLatent)
|
|
300
|
+
return;
|
|
301
|
+
// Compute observed return
|
|
302
|
+
const observedReturn = outcome.reward ??
|
|
303
|
+
(outcome.success ? 0.5 : -0.2) +
|
|
304
|
+
this.observationToReward(outcome.newObservation);
|
|
305
|
+
// Update value function
|
|
306
|
+
this.valueFunction.update(previousLatent, observedReturn, this.config.valueLearningRate);
|
|
307
|
+
this.stats.valueUpdates++;
|
|
308
|
+
this.emit({
|
|
309
|
+
type: 'value_updated',
|
|
310
|
+
timestamp: new Date(),
|
|
311
|
+
data: { action, observedReturn, success: outcome.success },
|
|
312
|
+
});
|
|
313
|
+
}
|
|
314
|
+
/**
|
|
315
|
+
* Convert observation to immediate reward signal
|
|
316
|
+
*/
|
|
317
|
+
observationToReward(observation) {
|
|
318
|
+
// Reward based on observation quality
|
|
319
|
+
let reward = 0;
|
|
320
|
+
// Energy reward
|
|
321
|
+
reward += (observation.energy - 2) * 0.2; // -0.4 to +0.4
|
|
322
|
+
// Phi reward
|
|
323
|
+
reward += (observation.phi - 1.5) * 0.1; // -0.15 to +0.15
|
|
324
|
+
// Tool success reward
|
|
325
|
+
reward += (observation.tool - 1) * 0.1; // -0.1 to +0.1
|
|
326
|
+
// Task progress reward
|
|
327
|
+
reward += (observation.task - 1) * 0.15; // -0.15 to +0.3
|
|
328
|
+
return Math.max(-1, Math.min(1, reward));
|
|
329
|
+
}
|
|
330
|
+
// ============================================================================
|
|
331
|
+
// Advanced: Active Inference with Full EFE from Value Function
|
|
332
|
+
// ============================================================================
|
|
333
|
+
/**
|
|
334
|
+
* Compute full Expected Free Energy using Value-Guided JEPA
|
|
335
|
+
*
|
|
336
|
+
* This replaces the POMDP-based EFE with world model predictions
|
|
337
|
+
*/
|
|
338
|
+
async computeFullEFE(preferredState) {
|
|
339
|
+
if (!this.jepa || !this.latentState) {
|
|
340
|
+
throw new Error('JEPA and latent state required for full EFE');
|
|
341
|
+
}
|
|
342
|
+
const efeMap = new Map();
|
|
343
|
+
// Default preferred state: high energy, stable, synced, achieved
|
|
344
|
+
const preferences = preferredState ?? this.createPreferredState();
|
|
345
|
+
for (const actionType of types_js_1.ACTIONS) {
|
|
346
|
+
const wmAction = this.createWorldModelAction(actionType);
|
|
347
|
+
const { freeEnergy } = await this.jepa.selectActionActiveInference(this.latentState, [wmAction], preferences, this.config.predictionHorizon);
|
|
348
|
+
efeMap.set(actionType, freeEnergy);
|
|
349
|
+
}
|
|
350
|
+
return efeMap;
|
|
351
|
+
}
|
|
352
|
+
/**
|
|
353
|
+
* Create preferred/goal latent state
|
|
354
|
+
*/
|
|
355
|
+
createPreferredState() {
|
|
356
|
+
// Preferred state: high energy, stable world, strong coupling, goal achieved
|
|
357
|
+
const preferredBeliefs = {
|
|
358
|
+
viability: [0, 0, 0.1, 0.3, 0.6], // Prefer optimal
|
|
359
|
+
worldState: [0, 0.7, 0.2, 0.1], // Prefer stable
|
|
360
|
+
coupling: [0, 0, 0.1, 0.3, 0.6], // Prefer synced
|
|
361
|
+
goalProgress: [0, 0, 0.2, 0.8], // Prefer achieved
|
|
362
|
+
};
|
|
363
|
+
const preferredObs = {
|
|
364
|
+
energy: 4,
|
|
365
|
+
phi: 3,
|
|
366
|
+
tool: 2,
|
|
367
|
+
coherence: 2,
|
|
368
|
+
task: 3,
|
|
369
|
+
};
|
|
370
|
+
return this.beliefsToLatentState(preferredBeliefs, preferredObs);
|
|
371
|
+
}
|
|
372
|
+
// ============================================================================
|
|
373
|
+
// Getters
|
|
374
|
+
// ============================================================================
|
|
375
|
+
getAIEngine() {
|
|
376
|
+
return this.aiEngine;
|
|
377
|
+
}
|
|
378
|
+
getValueFunction() {
|
|
379
|
+
return this.valueFunction;
|
|
380
|
+
}
|
|
381
|
+
getJEPA() {
|
|
382
|
+
return this.jepa;
|
|
383
|
+
}
|
|
384
|
+
getLatentState() {
|
|
385
|
+
return this.latentState;
|
|
386
|
+
}
|
|
387
|
+
getStats() {
|
|
388
|
+
return {
|
|
389
|
+
...this.stats,
|
|
390
|
+
averageValue: this.stats.cycleCount > 0
|
|
391
|
+
? this.stats.totalValue / this.stats.cycleCount
|
|
392
|
+
: 0,
|
|
393
|
+
valueStats: this.valueFunction.getStats(),
|
|
394
|
+
aiStats: this.aiEngine.getStats(),
|
|
395
|
+
};
|
|
396
|
+
}
|
|
397
|
+
getConfig() {
|
|
398
|
+
return { ...this.config };
|
|
399
|
+
}
|
|
400
|
+
// ============================================================================
|
|
401
|
+
// Event Handling
|
|
402
|
+
// ============================================================================
|
|
403
|
+
on(handler) {
|
|
404
|
+
this.eventHandlers.push(handler);
|
|
405
|
+
return () => {
|
|
406
|
+
const idx = this.eventHandlers.indexOf(handler);
|
|
407
|
+
if (idx >= 0)
|
|
408
|
+
this.eventHandlers.splice(idx, 1);
|
|
409
|
+
};
|
|
410
|
+
}
|
|
411
|
+
emit(event) {
|
|
412
|
+
for (const handler of this.eventHandlers) {
|
|
413
|
+
try {
|
|
414
|
+
handler(event);
|
|
415
|
+
}
|
|
416
|
+
catch (e) {
|
|
417
|
+
console.error('Value integration event handler error:', e);
|
|
418
|
+
}
|
|
419
|
+
}
|
|
420
|
+
}
|
|
421
|
+
}
|
|
422
|
+
exports.ValueAugmentedEngine = ValueAugmentedEngine;
|
|
423
|
+
// ============================================================================
|
|
424
|
+
// Factory
|
|
425
|
+
// ============================================================================
|
|
426
|
+
function createValueAugmentedEngine(config) {
|
|
427
|
+
return new ValueAugmentedEngine(undefined, config);
|
|
428
|
+
}
|
|
429
|
+
/**
|
|
430
|
+
* Create fully integrated engine with JEPA
|
|
431
|
+
*/
|
|
432
|
+
async function createFullyIntegratedEngine(config) {
|
|
433
|
+
const engine = new ValueAugmentedEngine(undefined, config);
|
|
434
|
+
await engine.initializeJEPA();
|
|
435
|
+
return engine;
|
|
436
|
+
}
|
|
437
|
+
// ============================================================================
|
|
438
|
+
// Utility: Wrap existing AutonomousLoop with value integration
|
|
439
|
+
// ============================================================================
|
|
440
|
+
const autonomous_loop_js_1 = require("./autonomous-loop.js");
|
|
441
|
+
/**
|
|
442
|
+
* Create an autonomous loop with value-guided decision making
|
|
443
|
+
*/
|
|
444
|
+
function createValueIntegratedLoop(config = {}) {
|
|
445
|
+
const { valueIntegration = {}, ...loopConfig } = config;
|
|
446
|
+
// Create value-augmented engine
|
|
447
|
+
const valueEngine = createValueAugmentedEngine(valueIntegration);
|
|
448
|
+
// Create autonomous loop
|
|
449
|
+
const loop = (0, autonomous_loop_js_1.createAutonomousLoop)(loopConfig);
|
|
450
|
+
// Hook value engine into the loop's inference step
|
|
451
|
+
loop.setCustomStepFunction(async (obs) => {
|
|
452
|
+
const result = await valueEngine.step(obs);
|
|
453
|
+
return {
|
|
454
|
+
action: result.action,
|
|
455
|
+
beliefs: result.beliefs,
|
|
456
|
+
};
|
|
457
|
+
});
|
|
458
|
+
return { loop, valueEngine };
|
|
459
|
+
}
|