genesis-ai-cli 7.4.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +78 -0
- package/README.md +282 -0
- package/dist/src/active-inference/actions.d.ts +75 -0
- package/dist/src/active-inference/actions.js +250 -0
- package/dist/src/active-inference/autonomous-loop.d.ts +103 -0
- package/dist/src/active-inference/autonomous-loop.js +289 -0
- package/dist/src/active-inference/core.d.ts +85 -0
- package/dist/src/active-inference/core.js +555 -0
- package/dist/src/active-inference/demo-autonomous-loop.d.ts +8 -0
- package/dist/src/active-inference/demo-autonomous-loop.js +338 -0
- package/dist/src/active-inference/demo-value-integration.d.ts +8 -0
- package/dist/src/active-inference/demo-value-integration.js +174 -0
- package/dist/src/active-inference/index.d.ts +32 -0
- package/dist/src/active-inference/index.js +88 -0
- package/dist/src/active-inference/integration.d.ts +114 -0
- package/dist/src/active-inference/integration.js +698 -0
- package/dist/src/active-inference/memory-integration.d.ts +51 -0
- package/dist/src/active-inference/memory-integration.js +232 -0
- package/dist/src/active-inference/observations.d.ts +67 -0
- package/dist/src/active-inference/observations.js +147 -0
- package/dist/src/active-inference/test-active-inference.d.ts +8 -0
- package/dist/src/active-inference/test-active-inference.js +320 -0
- package/dist/src/active-inference/test-value-integration.d.ts +6 -0
- package/dist/src/active-inference/test-value-integration.js +168 -0
- package/dist/src/active-inference/types.d.ts +150 -0
- package/dist/src/active-inference/types.js +59 -0
- package/dist/src/active-inference/value-integration.d.ts +164 -0
- package/dist/src/active-inference/value-integration.js +459 -0
- package/dist/src/agents/base-agent.d.ts +53 -0
- package/dist/src/agents/base-agent.js +178 -0
- package/dist/src/agents/builder.d.ts +67 -0
- package/dist/src/agents/builder.js +537 -0
- package/dist/src/agents/critic.d.ts +35 -0
- package/dist/src/agents/critic.js +322 -0
- package/dist/src/agents/ethicist.d.ts +54 -0
- package/dist/src/agents/ethicist.js +393 -0
- package/dist/src/agents/explorer.d.ts +26 -0
- package/dist/src/agents/explorer.js +216 -0
- package/dist/src/agents/feeling.d.ts +41 -0
- package/dist/src/agents/feeling.js +320 -0
- package/dist/src/agents/index.d.ts +111 -0
- package/dist/src/agents/index.js +222 -0
- package/dist/src/agents/memory.d.ts +69 -0
- package/dist/src/agents/memory.js +404 -0
- package/dist/src/agents/message-bus.d.ts +88 -0
- package/dist/src/agents/message-bus.js +267 -0
- package/dist/src/agents/narrator.d.ts +90 -0
- package/dist/src/agents/narrator.js +473 -0
- package/dist/src/agents/planner.d.ts +38 -0
- package/dist/src/agents/planner.js +341 -0
- package/dist/src/agents/predictor.d.ts +73 -0
- package/dist/src/agents/predictor.js +506 -0
- package/dist/src/agents/sensor.d.ts +88 -0
- package/dist/src/agents/sensor.js +377 -0
- package/dist/src/agents/test-agents.d.ts +6 -0
- package/dist/src/agents/test-agents.js +73 -0
- package/dist/src/agents/types.d.ts +194 -0
- package/dist/src/agents/types.js +7 -0
- package/dist/src/brain/index.d.ts +185 -0
- package/dist/src/brain/index.js +843 -0
- package/dist/src/brain/trace.d.ts +91 -0
- package/dist/src/brain/trace.js +327 -0
- package/dist/src/brain/types.d.ts +165 -0
- package/dist/src/brain/types.js +51 -0
- package/dist/src/cli/chat.d.ts +237 -0
- package/dist/src/cli/chat.js +1959 -0
- package/dist/src/cli/dispatcher.d.ts +182 -0
- package/dist/src/cli/dispatcher.js +718 -0
- package/dist/src/cli/human-loop.d.ts +170 -0
- package/dist/src/cli/human-loop.js +543 -0
- package/dist/src/cli/index.d.ts +12 -0
- package/dist/src/cli/index.js +28 -0
- package/dist/src/cli/interactive.d.ts +141 -0
- package/dist/src/cli/interactive.js +757 -0
- package/dist/src/cli/ui.d.ts +205 -0
- package/dist/src/cli/ui.js +632 -0
- package/dist/src/consciousness/attention-schema.d.ts +154 -0
- package/dist/src/consciousness/attention-schema.js +432 -0
- package/dist/src/consciousness/global-workspace.d.ts +149 -0
- package/dist/src/consciousness/global-workspace.js +422 -0
- package/dist/src/consciousness/index.d.ts +186 -0
- package/dist/src/consciousness/index.js +476 -0
- package/dist/src/consciousness/phi-calculator.d.ts +119 -0
- package/dist/src/consciousness/phi-calculator.js +445 -0
- package/dist/src/consciousness/phi-decisions.d.ts +169 -0
- package/dist/src/consciousness/phi-decisions.js +383 -0
- package/dist/src/consciousness/phi-monitor.d.ts +153 -0
- package/dist/src/consciousness/phi-monitor.js +465 -0
- package/dist/src/consciousness/types.d.ts +260 -0
- package/dist/src/consciousness/types.js +44 -0
- package/dist/src/daemon/dream-mode.d.ts +115 -0
- package/dist/src/daemon/dream-mode.js +470 -0
- package/dist/src/daemon/index.d.ts +162 -0
- package/dist/src/daemon/index.js +542 -0
- package/dist/src/daemon/maintenance.d.ts +139 -0
- package/dist/src/daemon/maintenance.js +549 -0
- package/dist/src/daemon/process.d.ts +82 -0
- package/dist/src/daemon/process.js +442 -0
- package/dist/src/daemon/scheduler.d.ts +90 -0
- package/dist/src/daemon/scheduler.js +494 -0
- package/dist/src/daemon/types.d.ts +213 -0
- package/dist/src/daemon/types.js +50 -0
- package/dist/src/epistemic/index.d.ts +74 -0
- package/dist/src/epistemic/index.js +225 -0
- package/dist/src/grounding/epistemic-stack.d.ts +100 -0
- package/dist/src/grounding/epistemic-stack.js +408 -0
- package/dist/src/grounding/feedback.d.ts +98 -0
- package/dist/src/grounding/feedback.js +276 -0
- package/dist/src/grounding/index.d.ts +123 -0
- package/dist/src/grounding/index.js +224 -0
- package/dist/src/grounding/verifier.d.ts +149 -0
- package/dist/src/grounding/verifier.js +484 -0
- package/dist/src/healing/detector.d.ts +110 -0
- package/dist/src/healing/detector.js +436 -0
- package/dist/src/healing/fixer.d.ts +138 -0
- package/dist/src/healing/fixer.js +572 -0
- package/dist/src/healing/index.d.ts +23 -0
- package/dist/src/healing/index.js +43 -0
- package/dist/src/hooks/index.d.ts +135 -0
- package/dist/src/hooks/index.js +317 -0
- package/dist/src/index.d.ts +23 -0
- package/dist/src/index.js +1266 -0
- package/dist/src/kernel/index.d.ts +155 -0
- package/dist/src/kernel/index.js +795 -0
- package/dist/src/kernel/invariants.d.ts +153 -0
- package/dist/src/kernel/invariants.js +355 -0
- package/dist/src/kernel/test-kernel.d.ts +6 -0
- package/dist/src/kernel/test-kernel.js +108 -0
- package/dist/src/kernel/test-real-mcp.d.ts +10 -0
- package/dist/src/kernel/test-real-mcp.js +295 -0
- package/dist/src/llm/index.d.ts +146 -0
- package/dist/src/llm/index.js +428 -0
- package/dist/src/llm/router.d.ts +136 -0
- package/dist/src/llm/router.js +510 -0
- package/dist/src/mcp/index.d.ts +85 -0
- package/dist/src/mcp/index.js +657 -0
- package/dist/src/mcp/resilient.d.ts +139 -0
- package/dist/src/mcp/resilient.js +417 -0
- package/dist/src/memory/cache.d.ts +118 -0
- package/dist/src/memory/cache.js +356 -0
- package/dist/src/memory/cognitive-workspace.d.ts +231 -0
- package/dist/src/memory/cognitive-workspace.js +521 -0
- package/dist/src/memory/consolidation.d.ts +99 -0
- package/dist/src/memory/consolidation.js +443 -0
- package/dist/src/memory/episodic.d.ts +114 -0
- package/dist/src/memory/episodic.js +394 -0
- package/dist/src/memory/forgetting.d.ts +134 -0
- package/dist/src/memory/forgetting.js +324 -0
- package/dist/src/memory/index.d.ts +211 -0
- package/dist/src/memory/index.js +367 -0
- package/dist/src/memory/indexer.d.ts +123 -0
- package/dist/src/memory/indexer.js +479 -0
- package/dist/src/memory/procedural.d.ts +136 -0
- package/dist/src/memory/procedural.js +479 -0
- package/dist/src/memory/semantic.d.ts +132 -0
- package/dist/src/memory/semantic.js +497 -0
- package/dist/src/memory/types.d.ts +193 -0
- package/dist/src/memory/types.js +15 -0
- package/dist/src/orchestrator.d.ts +65 -0
- package/dist/src/orchestrator.js +317 -0
- package/dist/src/persistence/index.d.ts +257 -0
- package/dist/src/persistence/index.js +763 -0
- package/dist/src/pipeline/executor.d.ts +51 -0
- package/dist/src/pipeline/executor.js +695 -0
- package/dist/src/pipeline/index.d.ts +7 -0
- package/dist/src/pipeline/index.js +11 -0
- package/dist/src/self-production.d.ts +67 -0
- package/dist/src/self-production.js +205 -0
- package/dist/src/subagents/executor.d.ts +58 -0
- package/dist/src/subagents/executor.js +283 -0
- package/dist/src/subagents/index.d.ts +37 -0
- package/dist/src/subagents/index.js +53 -0
- package/dist/src/subagents/registry.d.ts +23 -0
- package/dist/src/subagents/registry.js +167 -0
- package/dist/src/subagents/types.d.ts +79 -0
- package/dist/src/subagents/types.js +14 -0
- package/dist/src/tools/bash.d.ts +139 -0
- package/dist/src/tools/bash.js +583 -0
- package/dist/src/tools/edit.d.ts +125 -0
- package/dist/src/tools/edit.js +424 -0
- package/dist/src/tools/git.d.ts +179 -0
- package/dist/src/tools/git.js +504 -0
- package/dist/src/tools/index.d.ts +21 -0
- package/dist/src/tools/index.js +163 -0
- package/dist/src/types.d.ts +145 -0
- package/dist/src/types.js +7 -0
- package/dist/src/world-model/decoder.d.ts +163 -0
- package/dist/src/world-model/decoder.js +517 -0
- package/dist/src/world-model/digital-twin.d.ts +219 -0
- package/dist/src/world-model/digital-twin.js +695 -0
- package/dist/src/world-model/encoder.d.ts +141 -0
- package/dist/src/world-model/encoder.js +564 -0
- package/dist/src/world-model/index.d.ts +221 -0
- package/dist/src/world-model/index.js +772 -0
- package/dist/src/world-model/predictor.d.ts +161 -0
- package/dist/src/world-model/predictor.js +681 -0
- package/dist/src/world-model/test-value-jepa.d.ts +8 -0
- package/dist/src/world-model/test-value-jepa.js +430 -0
- package/dist/src/world-model/types.d.ts +341 -0
- package/dist/src/world-model/types.js +69 -0
- package/dist/src/world-model/value-jepa.d.ts +247 -0
- package/dist/src/world-model/value-jepa.js +622 -0
- package/dist/test/brain.test.d.ts +11 -0
- package/dist/test/brain.test.js +358 -0
- package/dist/test/cli/dispatcher.test.d.ts +4 -0
- package/dist/test/cli/dispatcher.test.js +332 -0
- package/dist/test/cli/human-loop.test.d.ts +4 -0
- package/dist/test/cli/human-loop.test.js +270 -0
- package/dist/test/grounding/feedback.test.d.ts +4 -0
- package/dist/test/grounding/feedback.test.js +462 -0
- package/dist/test/grounding/verifier.test.d.ts +4 -0
- package/dist/test/grounding/verifier.test.js +442 -0
- package/dist/test/grounding.test.d.ts +6 -0
- package/dist/test/grounding.test.js +246 -0
- package/dist/test/healing/detector.test.d.ts +4 -0
- package/dist/test/healing/detector.test.js +266 -0
- package/dist/test/healing/fixer.test.d.ts +4 -0
- package/dist/test/healing/fixer.test.js +369 -0
- package/dist/test/integration.test.d.ts +5 -0
- package/dist/test/integration.test.js +290 -0
- package/dist/test/tools/bash.test.d.ts +4 -0
- package/dist/test/tools/bash.test.js +348 -0
- package/dist/test/tools/edit.test.d.ts +4 -0
- package/dist/test/tools/edit.test.js +350 -0
- package/dist/test/tools/git.test.d.ts +4 -0
- package/dist/test/tools/git.test.js +350 -0
- package/package.json +60 -0
|
@@ -0,0 +1,430 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Genesis 6.2 - Value-Guided JEPA Tests
|
|
4
|
+
*
|
|
5
|
+
* Tests for the value function and value-guided prediction.
|
|
6
|
+
*
|
|
7
|
+
* Run: node --test dist/world-model/test-value-jepa.js
|
|
8
|
+
*/
|
|
9
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
10
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
11
|
+
};
|
|
12
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
13
|
+
const node_test_1 = require("node:test");
|
|
14
|
+
const node_assert_1 = __importDefault(require("node:assert"));
|
|
15
|
+
const value_jepa_js_1 = require("./value-jepa.js");
|
|
16
|
+
// ============================================================================
|
|
17
|
+
// Test Utilities
|
|
18
|
+
// ============================================================================
|
|
19
|
+
function createTestState(values) {
|
|
20
|
+
const dim = 64;
|
|
21
|
+
const vector = Array(dim).fill(0).map((_, i) => {
|
|
22
|
+
// First 16 dims: energy-related
|
|
23
|
+
if (i < 16)
|
|
24
|
+
return values?.energy ?? 0.5;
|
|
25
|
+
// Next 16 dims: progress-related
|
|
26
|
+
if (i < 32)
|
|
27
|
+
return values?.progress ?? 0.5;
|
|
28
|
+
// Rest: random
|
|
29
|
+
return Math.random() * 0.5;
|
|
30
|
+
});
|
|
31
|
+
return {
|
|
32
|
+
vector,
|
|
33
|
+
dimensions: dim,
|
|
34
|
+
sourceModality: 'state',
|
|
35
|
+
sourceId: `test-${Date.now()}`,
|
|
36
|
+
timestamp: new Date(),
|
|
37
|
+
confidence: values?.confidence ?? 0.7,
|
|
38
|
+
entropy: values?.entropy ?? 0.3,
|
|
39
|
+
};
|
|
40
|
+
}
|
|
41
|
+
function createTestAction(type = 'execute') {
|
|
42
|
+
return {
|
|
43
|
+
id: `act-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`,
|
|
44
|
+
type,
|
|
45
|
+
parameters: {},
|
|
46
|
+
agent: 'test',
|
|
47
|
+
timestamp: new Date(),
|
|
48
|
+
};
|
|
49
|
+
}
|
|
50
|
+
function createTestPrediction(state, action) {
|
|
51
|
+
return {
|
|
52
|
+
state,
|
|
53
|
+
action: action ?? createTestAction(),
|
|
54
|
+
probability: 0.8,
|
|
55
|
+
uncertainty: 0.2,
|
|
56
|
+
alternativeStates: [],
|
|
57
|
+
predictionTime: 10,
|
|
58
|
+
};
|
|
59
|
+
}
|
|
60
|
+
function createTestTrajectory(states, actions) {
|
|
61
|
+
const predictedStates = states.map((s, i) => createTestPrediction(s, actions?.[i]));
|
|
62
|
+
return {
|
|
63
|
+
id: `traj-${Date.now()}`,
|
|
64
|
+
initialState: states[0],
|
|
65
|
+
states: predictedStates,
|
|
66
|
+
actions: actions ?? states.slice(1).map(() => createTestAction()),
|
|
67
|
+
totalProbability: 1,
|
|
68
|
+
horizon: states.length,
|
|
69
|
+
simulationTime: 0,
|
|
70
|
+
};
|
|
71
|
+
}
|
|
72
|
+
// ============================================================================
|
|
73
|
+
// ValueFunction Tests
|
|
74
|
+
// ============================================================================
|
|
75
|
+
(0, node_test_1.describe)('ValueFunction', () => {
|
|
76
|
+
(0, node_test_1.beforeEach)(() => {
|
|
77
|
+
(0, value_jepa_js_1.resetValueFunction)();
|
|
78
|
+
});
|
|
79
|
+
(0, node_test_1.describe)('initialization', () => {
|
|
80
|
+
(0, node_test_1.it)('should create with default config', () => {
|
|
81
|
+
const vf = (0, value_jepa_js_1.createValueFunction)();
|
|
82
|
+
const config = vf.getConfig();
|
|
83
|
+
node_assert_1.default.strictEqual(config.gamma, value_jepa_js_1.DEFAULT_VALUE_CONFIG.gamma);
|
|
84
|
+
node_assert_1.default.strictEqual(config.riskAversion, value_jepa_js_1.DEFAULT_VALUE_CONFIG.riskAversion);
|
|
85
|
+
node_assert_1.default.ok(config.weights.survival > 0);
|
|
86
|
+
});
|
|
87
|
+
(0, node_test_1.it)('should accept custom config', () => {
|
|
88
|
+
const vf = (0, value_jepa_js_1.createValueFunction)({
|
|
89
|
+
gamma: 0.9,
|
|
90
|
+
riskAversion: 0.8,
|
|
91
|
+
});
|
|
92
|
+
const config = vf.getConfig();
|
|
93
|
+
node_assert_1.default.strictEqual(config.gamma, 0.9);
|
|
94
|
+
node_assert_1.default.strictEqual(config.riskAversion, 0.8);
|
|
95
|
+
});
|
|
96
|
+
(0, node_test_1.it)('should provide singleton via getValueFunction', () => {
|
|
97
|
+
const vf1 = (0, value_jepa_js_1.getValueFunction)();
|
|
98
|
+
const vf2 = (0, value_jepa_js_1.getValueFunction)();
|
|
99
|
+
node_assert_1.default.strictEqual(vf1, vf2);
|
|
100
|
+
});
|
|
101
|
+
});
|
|
102
|
+
(0, node_test_1.describe)('estimate', () => {
|
|
103
|
+
(0, node_test_1.it)('should return value in range [-1, 1]', () => {
|
|
104
|
+
const vf = (0, value_jepa_js_1.createValueFunction)();
|
|
105
|
+
const state = createTestState();
|
|
106
|
+
const estimate = vf.estimate(state);
|
|
107
|
+
node_assert_1.default.ok(estimate.value >= -1 && estimate.value <= 1, `Value ${estimate.value} out of range`);
|
|
108
|
+
});
|
|
109
|
+
(0, node_test_1.it)('should return all components in range [0, 1]', () => {
|
|
110
|
+
const vf = (0, value_jepa_js_1.createValueFunction)();
|
|
111
|
+
const state = createTestState();
|
|
112
|
+
const estimate = vf.estimate(state);
|
|
113
|
+
for (const [name, value] of Object.entries(estimate.components)) {
|
|
114
|
+
node_assert_1.default.ok(value >= 0 && value <= 1, `Component ${name} = ${value} out of range`);
|
|
115
|
+
}
|
|
116
|
+
});
|
|
117
|
+
(0, node_test_1.it)('should apply discount factor for future horizons', () => {
|
|
118
|
+
const vf = (0, value_jepa_js_1.createValueFunction)({ gamma: 0.9 });
|
|
119
|
+
const state = createTestState({ energy: 0.8 });
|
|
120
|
+
const now = vf.estimate(state, 0);
|
|
121
|
+
const future = vf.estimate(state, 5);
|
|
122
|
+
node_assert_1.default.ok(future.discount < now.discount, 'Future should have smaller discount');
|
|
123
|
+
node_assert_1.default.ok(Math.abs(future.discount - Math.pow(0.9, 5)) < 0.001);
|
|
124
|
+
});
|
|
125
|
+
(0, node_test_1.it)('should return valueUncertainty estimate', () => {
|
|
126
|
+
const vf = (0, value_jepa_js_1.createValueFunction)();
|
|
127
|
+
const state = createTestState({ confidence: 0.5 });
|
|
128
|
+
const estimate = vf.estimate(state);
|
|
129
|
+
node_assert_1.default.ok(estimate.valueUncertainty >= 0 && estimate.valueUncertainty <= 1);
|
|
130
|
+
node_assert_1.default.ok(estimate.valueConfidence >= 0 && estimate.valueConfidence <= 1);
|
|
131
|
+
});
|
|
132
|
+
(0, node_test_1.it)('should give higher survival value to high-energy states', () => {
|
|
133
|
+
const vf = (0, value_jepa_js_1.createValueFunction)();
|
|
134
|
+
const lowEnergy = createTestState({ energy: 0.1 });
|
|
135
|
+
const highEnergy = createTestState({ energy: 0.9 });
|
|
136
|
+
const lowEstimate = vf.estimate(lowEnergy);
|
|
137
|
+
const highEstimate = vf.estimate(highEnergy);
|
|
138
|
+
// Higher energy should give higher survival component
|
|
139
|
+
node_assert_1.default.ok(highEstimate.components.survival >= lowEstimate.components.survival, `High energy survival ${highEstimate.components.survival} should >= low ${lowEstimate.components.survival}`);
|
|
140
|
+
});
|
|
141
|
+
});
|
|
142
|
+
(0, node_test_1.describe)('Q-value computation', () => {
|
|
143
|
+
(0, node_test_1.it)('should compute Q-value for state-action pair', () => {
|
|
144
|
+
const vf = (0, value_jepa_js_1.createValueFunction)();
|
|
145
|
+
const state = createTestState();
|
|
146
|
+
const action = createTestAction();
|
|
147
|
+
const nextState = createTestState({ energy: 0.8 });
|
|
148
|
+
const predicted = createTestPrediction(nextState, action);
|
|
149
|
+
const qValue = vf.computeQValue(state, action, predicted);
|
|
150
|
+
node_assert_1.default.ok('qValue' in qValue);
|
|
151
|
+
node_assert_1.default.ok('advantage' in qValue);
|
|
152
|
+
node_assert_1.default.ok('qUncertainty' in qValue);
|
|
153
|
+
node_assert_1.default.strictEqual(qValue.action, action);
|
|
154
|
+
});
|
|
155
|
+
(0, node_test_1.it)('should compute positive advantage for improving actions', () => {
|
|
156
|
+
const vf = (0, value_jepa_js_1.createValueFunction)();
|
|
157
|
+
const state = createTestState({ energy: 0.3 });
|
|
158
|
+
const action = createTestAction('transform');
|
|
159
|
+
const nextState = createTestState({ energy: 0.8 });
|
|
160
|
+
const predicted = createTestPrediction(nextState, action);
|
|
161
|
+
const qValue = vf.computeQValue(state, action, predicted);
|
|
162
|
+
// Action that increases energy should have reasonable Q-value
|
|
163
|
+
node_assert_1.default.ok(qValue.qValue > vf.estimate(state).value * 0.9 || qValue.advantage > -0.5, 'Improving action should have reasonable Q-value');
|
|
164
|
+
});
|
|
165
|
+
});
|
|
166
|
+
(0, node_test_1.describe)('action ranking', () => {
|
|
167
|
+
(0, node_test_1.it)('should rank actions by Q-value', () => {
|
|
168
|
+
const vf = (0, value_jepa_js_1.createValueFunction)({ riskAversion: 0 });
|
|
169
|
+
// Create actions with different predicted outcomes
|
|
170
|
+
const actionValues = [
|
|
171
|
+
{
|
|
172
|
+
action: createTestAction('observe'),
|
|
173
|
+
qValue: 0.2,
|
|
174
|
+
advantage: -0.3,
|
|
175
|
+
qUncertainty: 0.1,
|
|
176
|
+
predictedState: createTestPrediction(createTestState({ energy: 0.2 })),
|
|
177
|
+
},
|
|
178
|
+
{
|
|
179
|
+
action: createTestAction('execute'),
|
|
180
|
+
qValue: 0.8,
|
|
181
|
+
advantage: 0.3,
|
|
182
|
+
qUncertainty: 0.1,
|
|
183
|
+
predictedState: createTestPrediction(createTestState({ energy: 0.8 })),
|
|
184
|
+
},
|
|
185
|
+
{
|
|
186
|
+
action: createTestAction('query'),
|
|
187
|
+
qValue: 0.5,
|
|
188
|
+
advantage: 0,
|
|
189
|
+
qUncertainty: 0.1,
|
|
190
|
+
predictedState: createTestPrediction(createTestState({ energy: 0.5 })),
|
|
191
|
+
},
|
|
192
|
+
];
|
|
193
|
+
const ranked = vf.rankActions(actionValues);
|
|
194
|
+
node_assert_1.default.strictEqual(ranked[0].action.type, 'execute');
|
|
195
|
+
node_assert_1.default.strictEqual(ranked[1].action.type, 'query');
|
|
196
|
+
node_assert_1.default.strictEqual(ranked[2].action.type, 'observe');
|
|
197
|
+
});
|
|
198
|
+
(0, node_test_1.it)('should prefer lower uncertainty when risk-averse', () => {
|
|
199
|
+
const vf = (0, value_jepa_js_1.createValueFunction)({ riskAversion: 1.0 });
|
|
200
|
+
const actionValues = [
|
|
201
|
+
{
|
|
202
|
+
action: createTestAction('execute'),
|
|
203
|
+
qValue: 0.6,
|
|
204
|
+
advantage: 0.1,
|
|
205
|
+
qUncertainty: 0.5, // High uncertainty
|
|
206
|
+
predictedState: createTestPrediction(createTestState()),
|
|
207
|
+
},
|
|
208
|
+
{
|
|
209
|
+
action: createTestAction('observe'),
|
|
210
|
+
qValue: 0.5,
|
|
211
|
+
advantage: 0,
|
|
212
|
+
qUncertainty: 0.1, // Low uncertainty
|
|
213
|
+
predictedState: createTestPrediction(createTestState()),
|
|
214
|
+
},
|
|
215
|
+
];
|
|
216
|
+
const ranked = vf.rankActions(actionValues);
|
|
217
|
+
// Safe (observe) should be preferred despite lower Q-value
|
|
218
|
+
node_assert_1.default.strictEqual(ranked[0].action.type, 'observe');
|
|
219
|
+
});
|
|
220
|
+
});
|
|
221
|
+
(0, node_test_1.describe)('trajectory evaluation', () => {
|
|
222
|
+
(0, node_test_1.it)('should evaluate trajectory with cumulative value', () => {
|
|
223
|
+
const vf = (0, value_jepa_js_1.createValueFunction)();
|
|
224
|
+
const states = [
|
|
225
|
+
createTestState({ energy: 0.5 }),
|
|
226
|
+
createTestState({ energy: 0.6 }),
|
|
227
|
+
createTestState({ energy: 0.7 }),
|
|
228
|
+
];
|
|
229
|
+
const trajectory = createTestTrajectory(states);
|
|
230
|
+
const valued = vf.evaluateTrajectory(trajectory);
|
|
231
|
+
node_assert_1.default.ok('totalValue' in valued);
|
|
232
|
+
node_assert_1.default.ok('expectedValue' in valued);
|
|
233
|
+
node_assert_1.default.ok('stepValues' in valued);
|
|
234
|
+
node_assert_1.default.strictEqual(valued.stepValues.length, 3);
|
|
235
|
+
node_assert_1.default.ok(valued.minValue <= valued.maxValue);
|
|
236
|
+
});
|
|
237
|
+
(0, node_test_1.it)('should select best trajectory from candidates', () => {
|
|
238
|
+
const vf = (0, value_jepa_js_1.createValueFunction)();
|
|
239
|
+
const goodStates = [
|
|
240
|
+
createTestState({ energy: 0.5 }),
|
|
241
|
+
createTestState({ energy: 0.8 }),
|
|
242
|
+
];
|
|
243
|
+
const goodTrajectory = createTestTrajectory(goodStates);
|
|
244
|
+
const badStates = [
|
|
245
|
+
createTestState({ energy: 0.5 }),
|
|
246
|
+
createTestState({ energy: 0.2 }),
|
|
247
|
+
];
|
|
248
|
+
const badTrajectory = createTestTrajectory(badStates);
|
|
249
|
+
const best = vf.selectBestTrajectory([badTrajectory, goodTrajectory]);
|
|
250
|
+
// Best should have higher expected value
|
|
251
|
+
const goodValued = vf.evaluateTrajectory(goodTrajectory);
|
|
252
|
+
node_assert_1.default.ok(best.expectedValue >= goodValued.expectedValue - 0.1, 'Selected trajectory should have high expected value');
|
|
253
|
+
});
|
|
254
|
+
});
|
|
255
|
+
(0, node_test_1.describe)('Expected Free Energy', () => {
|
|
256
|
+
(0, node_test_1.it)('should compute EFE components', () => {
|
|
257
|
+
const vf = (0, value_jepa_js_1.createValueFunction)();
|
|
258
|
+
const currentState = createTestState();
|
|
259
|
+
const policy = [createTestAction()];
|
|
260
|
+
const predictedStates = [
|
|
261
|
+
createTestPrediction(createTestState({ energy: 0.7 })),
|
|
262
|
+
];
|
|
263
|
+
const preferences = createTestState({ energy: 1.0 });
|
|
264
|
+
const efe = vf.computeExpectedFreeEnergy(currentState, policy, predictedStates, preferences);
|
|
265
|
+
node_assert_1.default.ok('expectedFreeEnergy' in efe);
|
|
266
|
+
node_assert_1.default.ok('ambiguity' in efe);
|
|
267
|
+
node_assert_1.default.ok('risk' in efe);
|
|
268
|
+
node_assert_1.default.ok('pragmaticValue' in efe);
|
|
269
|
+
node_assert_1.default.ok('epistemicValue' in efe);
|
|
270
|
+
node_assert_1.default.ok('instrumentalValue' in efe);
|
|
271
|
+
});
|
|
272
|
+
(0, node_test_1.it)('should have lower EFE for states closer to preferences', () => {
|
|
273
|
+
const vf = (0, value_jepa_js_1.createValueFunction)();
|
|
274
|
+
const currentState = createTestState({ energy: 0.5 });
|
|
275
|
+
const preferences = createTestState({ energy: 1.0 });
|
|
276
|
+
// Close to preferences
|
|
277
|
+
const closeState = createTestPrediction(createTestState({ energy: 0.9 }));
|
|
278
|
+
// Far from preferences
|
|
279
|
+
const farState = createTestPrediction(createTestState({ energy: 0.2 }));
|
|
280
|
+
const closeFE = vf.computeExpectedFreeEnergy(currentState, [createTestAction()], [closeState], preferences);
|
|
281
|
+
const farFE = vf.computeExpectedFreeEnergy(currentState, [createTestAction()], [farState], preferences);
|
|
282
|
+
// Close to preferences should have lower risk
|
|
283
|
+
node_assert_1.default.ok(closeFE.risk <= farFE.risk, `Close risk ${closeFE.risk} should <= far risk ${farFE.risk}`);
|
|
284
|
+
});
|
|
285
|
+
});
|
|
286
|
+
(0, node_test_1.describe)('learning', () => {
|
|
287
|
+
(0, node_test_1.it)('should update value function from observed returns', () => {
|
|
288
|
+
const vf = (0, value_jepa_js_1.createValueFunction)();
|
|
289
|
+
const state = createTestState({ energy: 0.5 });
|
|
290
|
+
// Update with high observed return
|
|
291
|
+
vf.update(state, 0.9, 0.1);
|
|
292
|
+
vf.update(state, 0.9, 0.1);
|
|
293
|
+
vf.update(state, 0.9, 0.1);
|
|
294
|
+
const stats = vf.getStats();
|
|
295
|
+
node_assert_1.default.ok(stats.count === 3);
|
|
296
|
+
});
|
|
297
|
+
(0, node_test_1.it)('should track value statistics', () => {
|
|
298
|
+
const vf = (0, value_jepa_js_1.createValueFunction)();
|
|
299
|
+
const state = createTestState();
|
|
300
|
+
vf.update(state, 0.5, 0.01);
|
|
301
|
+
vf.update(state, 0.6, 0.01);
|
|
302
|
+
vf.update(state, 0.7, 0.01);
|
|
303
|
+
const stats = vf.getStats();
|
|
304
|
+
node_assert_1.default.strictEqual(stats.count, 3);
|
|
305
|
+
node_assert_1.default.ok(stats.mean > 0.4 && stats.mean < 0.8);
|
|
306
|
+
node_assert_1.default.ok(stats.min <= 0.5);
|
|
307
|
+
node_assert_1.default.ok(stats.max >= 0.7);
|
|
308
|
+
});
|
|
309
|
+
});
|
|
310
|
+
(0, node_test_1.describe)('weight configuration', () => {
|
|
311
|
+
(0, node_test_1.it)('should allow setting preference weights', () => {
|
|
312
|
+
const vf = (0, value_jepa_js_1.createValueFunction)();
|
|
313
|
+
vf.setWeights({ survival: 0.9, novelty: 0.1 });
|
|
314
|
+
const config = vf.getConfig();
|
|
315
|
+
// Weights should be normalized
|
|
316
|
+
const sum = Object.values(config.weights).reduce((a, b) => a + b, 0);
|
|
317
|
+
node_assert_1.default.ok(Math.abs(sum - 1) < 0.001, 'Weights should sum to 1');
|
|
318
|
+
// Survival should be highest
|
|
319
|
+
node_assert_1.default.ok(config.weights.survival > config.weights.novelty);
|
|
320
|
+
});
|
|
321
|
+
});
|
|
322
|
+
});
|
|
323
|
+
// ============================================================================
|
|
324
|
+
// Action Sampling Tests
|
|
325
|
+
// ============================================================================
|
|
326
|
+
(0, node_test_1.describe)('Action Sampling', () => {
|
|
327
|
+
(0, node_test_1.it)('should sample action from distribution', () => {
|
|
328
|
+
const vf = (0, value_jepa_js_1.createValueFunction)({ temperature: 1.0 });
|
|
329
|
+
const actionValues = [
|
|
330
|
+
{
|
|
331
|
+
action: createTestAction('observe'),
|
|
332
|
+
qValue: 0.1,
|
|
333
|
+
advantage: 0,
|
|
334
|
+
qUncertainty: 0.1,
|
|
335
|
+
predictedState: createTestPrediction(createTestState()),
|
|
336
|
+
},
|
|
337
|
+
{
|
|
338
|
+
action: createTestAction('execute'),
|
|
339
|
+
qValue: 0.9,
|
|
340
|
+
advantage: 0.5,
|
|
341
|
+
qUncertainty: 0.1,
|
|
342
|
+
predictedState: createTestPrediction(createTestState()),
|
|
343
|
+
},
|
|
344
|
+
];
|
|
345
|
+
// Sample many times
|
|
346
|
+
const counts = { observe: 0, execute: 0 };
|
|
347
|
+
for (let i = 0; i < 100; i++) {
|
|
348
|
+
const sampled = vf.sampleAction(actionValues);
|
|
349
|
+
counts[sampled.action.type]++;
|
|
350
|
+
}
|
|
351
|
+
// Higher Q-value should be sampled more often
|
|
352
|
+
node_assert_1.default.ok(counts['execute'] > counts['observe'], `execute (${counts['execute']}) should be sampled more than observe (${counts['observe']})`);
|
|
353
|
+
});
|
|
354
|
+
(0, node_test_1.it)('should respect temperature parameter', () => {
|
|
355
|
+
// Low temperature = more deterministic
|
|
356
|
+
const vfLow = (0, value_jepa_js_1.createValueFunction)({ temperature: 0.1 });
|
|
357
|
+
// High temperature = more exploratory
|
|
358
|
+
const vfHigh = (0, value_jepa_js_1.createValueFunction)({ temperature: 5.0 });
|
|
359
|
+
const actionValues = [
|
|
360
|
+
{
|
|
361
|
+
action: createTestAction('observe'),
|
|
362
|
+
qValue: 0.4,
|
|
363
|
+
advantage: 0,
|
|
364
|
+
qUncertainty: 0.1,
|
|
365
|
+
predictedState: createTestPrediction(createTestState()),
|
|
366
|
+
},
|
|
367
|
+
{
|
|
368
|
+
action: createTestAction('execute'),
|
|
369
|
+
qValue: 0.6,
|
|
370
|
+
advantage: 0.1,
|
|
371
|
+
qUncertainty: 0.1,
|
|
372
|
+
predictedState: createTestPrediction(createTestState()),
|
|
373
|
+
},
|
|
374
|
+
];
|
|
375
|
+
// Count selections at low temperature
|
|
376
|
+
let lowTempExecuteCount = 0;
|
|
377
|
+
for (let i = 0; i < 50; i++) {
|
|
378
|
+
if (vfLow.sampleAction(actionValues).action.type === 'execute') {
|
|
379
|
+
lowTempExecuteCount++;
|
|
380
|
+
}
|
|
381
|
+
}
|
|
382
|
+
// Count selections at high temperature
|
|
383
|
+
let highTempExecuteCount = 0;
|
|
384
|
+
for (let i = 0; i < 50; i++) {
|
|
385
|
+
if (vfHigh.sampleAction(actionValues).action.type === 'execute') {
|
|
386
|
+
highTempExecuteCount++;
|
|
387
|
+
}
|
|
388
|
+
}
|
|
389
|
+
// Low temperature should select 'execute' more consistently
|
|
390
|
+
node_assert_1.default.ok(lowTempExecuteCount >= highTempExecuteCount * 0.8, `Low temp selections (${lowTempExecuteCount}) should be more deterministic than high temp (${highTempExecuteCount})`);
|
|
391
|
+
});
|
|
392
|
+
});
|
|
393
|
+
// ============================================================================
|
|
394
|
+
// Integration Tests
|
|
395
|
+
// ============================================================================
|
|
396
|
+
(0, node_test_1.describe)('Value-Guided JEPA Integration', () => {
|
|
397
|
+
(0, node_test_1.it)('should work end-to-end: estimate -> rank -> select', () => {
|
|
398
|
+
const vf = (0, value_jepa_js_1.createValueFunction)();
|
|
399
|
+
const state = createTestState({ energy: 0.5 });
|
|
400
|
+
// Create candidate actions with predictions
|
|
401
|
+
const candidates = ['observe', 'query', 'execute'];
|
|
402
|
+
// Compute Q-values for each action
|
|
403
|
+
const actionValues = candidates.map((type, i) => {
|
|
404
|
+
const action = createTestAction(type);
|
|
405
|
+
const nextEnergy = 0.3 + i * 0.2; // observe=0.3, query=0.5, execute=0.7
|
|
406
|
+
const nextState = createTestState({ energy: nextEnergy });
|
|
407
|
+
return vf.computeQValue(state, action, createTestPrediction(nextState, action));
|
|
408
|
+
});
|
|
409
|
+
// Rank actions
|
|
410
|
+
const ranked = vf.rankActions(actionValues);
|
|
411
|
+
// Select best
|
|
412
|
+
const best = ranked[0];
|
|
413
|
+
node_assert_1.default.ok(best.action.type);
|
|
414
|
+
node_assert_1.default.ok(best.qValue !== undefined);
|
|
415
|
+
});
|
|
416
|
+
(0, node_test_1.it)('should compute trajectory value consistently', () => {
|
|
417
|
+
const vf = (0, value_jepa_js_1.createValueFunction)();
|
|
418
|
+
// Create increasing value trajectory
|
|
419
|
+
const states = [0.4, 0.5, 0.6, 0.7, 0.8].map(e => createTestState({ energy: e }));
|
|
420
|
+
const trajectory = createTestTrajectory(states);
|
|
421
|
+
const valued = vf.evaluateTrajectory(trajectory);
|
|
422
|
+
// Values should generally have decreasing discount
|
|
423
|
+
for (let i = 1; i < valued.stepValues.length; i++) {
|
|
424
|
+
const prev = valued.stepValues[i - 1];
|
|
425
|
+
const curr = valued.stepValues[i];
|
|
426
|
+
node_assert_1.default.ok(curr.discount <= prev.discount, `Discount should decrease over time: ${prev.discount} -> ${curr.discount}`);
|
|
427
|
+
}
|
|
428
|
+
});
|
|
429
|
+
});
|
|
430
|
+
console.log('Value-Guided JEPA Tests - Ready to run');
|