genesis-ai-cli 7.4.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +78 -0
- package/README.md +282 -0
- package/dist/src/active-inference/actions.d.ts +75 -0
- package/dist/src/active-inference/actions.js +250 -0
- package/dist/src/active-inference/autonomous-loop.d.ts +103 -0
- package/dist/src/active-inference/autonomous-loop.js +289 -0
- package/dist/src/active-inference/core.d.ts +85 -0
- package/dist/src/active-inference/core.js +555 -0
- package/dist/src/active-inference/demo-autonomous-loop.d.ts +8 -0
- package/dist/src/active-inference/demo-autonomous-loop.js +338 -0
- package/dist/src/active-inference/demo-value-integration.d.ts +8 -0
- package/dist/src/active-inference/demo-value-integration.js +174 -0
- package/dist/src/active-inference/index.d.ts +32 -0
- package/dist/src/active-inference/index.js +88 -0
- package/dist/src/active-inference/integration.d.ts +114 -0
- package/dist/src/active-inference/integration.js +698 -0
- package/dist/src/active-inference/memory-integration.d.ts +51 -0
- package/dist/src/active-inference/memory-integration.js +232 -0
- package/dist/src/active-inference/observations.d.ts +67 -0
- package/dist/src/active-inference/observations.js +147 -0
- package/dist/src/active-inference/test-active-inference.d.ts +8 -0
- package/dist/src/active-inference/test-active-inference.js +320 -0
- package/dist/src/active-inference/test-value-integration.d.ts +6 -0
- package/dist/src/active-inference/test-value-integration.js +168 -0
- package/dist/src/active-inference/types.d.ts +150 -0
- package/dist/src/active-inference/types.js +59 -0
- package/dist/src/active-inference/value-integration.d.ts +164 -0
- package/dist/src/active-inference/value-integration.js +459 -0
- package/dist/src/agents/base-agent.d.ts +53 -0
- package/dist/src/agents/base-agent.js +178 -0
- package/dist/src/agents/builder.d.ts +67 -0
- package/dist/src/agents/builder.js +537 -0
- package/dist/src/agents/critic.d.ts +35 -0
- package/dist/src/agents/critic.js +322 -0
- package/dist/src/agents/ethicist.d.ts +54 -0
- package/dist/src/agents/ethicist.js +393 -0
- package/dist/src/agents/explorer.d.ts +26 -0
- package/dist/src/agents/explorer.js +216 -0
- package/dist/src/agents/feeling.d.ts +41 -0
- package/dist/src/agents/feeling.js +320 -0
- package/dist/src/agents/index.d.ts +111 -0
- package/dist/src/agents/index.js +222 -0
- package/dist/src/agents/memory.d.ts +69 -0
- package/dist/src/agents/memory.js +404 -0
- package/dist/src/agents/message-bus.d.ts +88 -0
- package/dist/src/agents/message-bus.js +267 -0
- package/dist/src/agents/narrator.d.ts +90 -0
- package/dist/src/agents/narrator.js +473 -0
- package/dist/src/agents/planner.d.ts +38 -0
- package/dist/src/agents/planner.js +341 -0
- package/dist/src/agents/predictor.d.ts +73 -0
- package/dist/src/agents/predictor.js +506 -0
- package/dist/src/agents/sensor.d.ts +88 -0
- package/dist/src/agents/sensor.js +377 -0
- package/dist/src/agents/test-agents.d.ts +6 -0
- package/dist/src/agents/test-agents.js +73 -0
- package/dist/src/agents/types.d.ts +194 -0
- package/dist/src/agents/types.js +7 -0
- package/dist/src/brain/index.d.ts +185 -0
- package/dist/src/brain/index.js +843 -0
- package/dist/src/brain/trace.d.ts +91 -0
- package/dist/src/brain/trace.js +327 -0
- package/dist/src/brain/types.d.ts +165 -0
- package/dist/src/brain/types.js +51 -0
- package/dist/src/cli/chat.d.ts +237 -0
- package/dist/src/cli/chat.js +1959 -0
- package/dist/src/cli/dispatcher.d.ts +182 -0
- package/dist/src/cli/dispatcher.js +718 -0
- package/dist/src/cli/human-loop.d.ts +170 -0
- package/dist/src/cli/human-loop.js +543 -0
- package/dist/src/cli/index.d.ts +12 -0
- package/dist/src/cli/index.js +28 -0
- package/dist/src/cli/interactive.d.ts +141 -0
- package/dist/src/cli/interactive.js +757 -0
- package/dist/src/cli/ui.d.ts +205 -0
- package/dist/src/cli/ui.js +632 -0
- package/dist/src/consciousness/attention-schema.d.ts +154 -0
- package/dist/src/consciousness/attention-schema.js +432 -0
- package/dist/src/consciousness/global-workspace.d.ts +149 -0
- package/dist/src/consciousness/global-workspace.js +422 -0
- package/dist/src/consciousness/index.d.ts +186 -0
- package/dist/src/consciousness/index.js +476 -0
- package/dist/src/consciousness/phi-calculator.d.ts +119 -0
- package/dist/src/consciousness/phi-calculator.js +445 -0
- package/dist/src/consciousness/phi-decisions.d.ts +169 -0
- package/dist/src/consciousness/phi-decisions.js +383 -0
- package/dist/src/consciousness/phi-monitor.d.ts +153 -0
- package/dist/src/consciousness/phi-monitor.js +465 -0
- package/dist/src/consciousness/types.d.ts +260 -0
- package/dist/src/consciousness/types.js +44 -0
- package/dist/src/daemon/dream-mode.d.ts +115 -0
- package/dist/src/daemon/dream-mode.js +470 -0
- package/dist/src/daemon/index.d.ts +162 -0
- package/dist/src/daemon/index.js +542 -0
- package/dist/src/daemon/maintenance.d.ts +139 -0
- package/dist/src/daemon/maintenance.js +549 -0
- package/dist/src/daemon/process.d.ts +82 -0
- package/dist/src/daemon/process.js +442 -0
- package/dist/src/daemon/scheduler.d.ts +90 -0
- package/dist/src/daemon/scheduler.js +494 -0
- package/dist/src/daemon/types.d.ts +213 -0
- package/dist/src/daemon/types.js +50 -0
- package/dist/src/epistemic/index.d.ts +74 -0
- package/dist/src/epistemic/index.js +225 -0
- package/dist/src/grounding/epistemic-stack.d.ts +100 -0
- package/dist/src/grounding/epistemic-stack.js +408 -0
- package/dist/src/grounding/feedback.d.ts +98 -0
- package/dist/src/grounding/feedback.js +276 -0
- package/dist/src/grounding/index.d.ts +123 -0
- package/dist/src/grounding/index.js +224 -0
- package/dist/src/grounding/verifier.d.ts +149 -0
- package/dist/src/grounding/verifier.js +484 -0
- package/dist/src/healing/detector.d.ts +110 -0
- package/dist/src/healing/detector.js +436 -0
- package/dist/src/healing/fixer.d.ts +138 -0
- package/dist/src/healing/fixer.js +572 -0
- package/dist/src/healing/index.d.ts +23 -0
- package/dist/src/healing/index.js +43 -0
- package/dist/src/hooks/index.d.ts +135 -0
- package/dist/src/hooks/index.js +317 -0
- package/dist/src/index.d.ts +23 -0
- package/dist/src/index.js +1266 -0
- package/dist/src/kernel/index.d.ts +155 -0
- package/dist/src/kernel/index.js +795 -0
- package/dist/src/kernel/invariants.d.ts +153 -0
- package/dist/src/kernel/invariants.js +355 -0
- package/dist/src/kernel/test-kernel.d.ts +6 -0
- package/dist/src/kernel/test-kernel.js +108 -0
- package/dist/src/kernel/test-real-mcp.d.ts +10 -0
- package/dist/src/kernel/test-real-mcp.js +295 -0
- package/dist/src/llm/index.d.ts +146 -0
- package/dist/src/llm/index.js +428 -0
- package/dist/src/llm/router.d.ts +136 -0
- package/dist/src/llm/router.js +510 -0
- package/dist/src/mcp/index.d.ts +85 -0
- package/dist/src/mcp/index.js +657 -0
- package/dist/src/mcp/resilient.d.ts +139 -0
- package/dist/src/mcp/resilient.js +417 -0
- package/dist/src/memory/cache.d.ts +118 -0
- package/dist/src/memory/cache.js +356 -0
- package/dist/src/memory/cognitive-workspace.d.ts +231 -0
- package/dist/src/memory/cognitive-workspace.js +521 -0
- package/dist/src/memory/consolidation.d.ts +99 -0
- package/dist/src/memory/consolidation.js +443 -0
- package/dist/src/memory/episodic.d.ts +114 -0
- package/dist/src/memory/episodic.js +394 -0
- package/dist/src/memory/forgetting.d.ts +134 -0
- package/dist/src/memory/forgetting.js +324 -0
- package/dist/src/memory/index.d.ts +211 -0
- package/dist/src/memory/index.js +367 -0
- package/dist/src/memory/indexer.d.ts +123 -0
- package/dist/src/memory/indexer.js +479 -0
- package/dist/src/memory/procedural.d.ts +136 -0
- package/dist/src/memory/procedural.js +479 -0
- package/dist/src/memory/semantic.d.ts +132 -0
- package/dist/src/memory/semantic.js +497 -0
- package/dist/src/memory/types.d.ts +193 -0
- package/dist/src/memory/types.js +15 -0
- package/dist/src/orchestrator.d.ts +65 -0
- package/dist/src/orchestrator.js +317 -0
- package/dist/src/persistence/index.d.ts +257 -0
- package/dist/src/persistence/index.js +763 -0
- package/dist/src/pipeline/executor.d.ts +51 -0
- package/dist/src/pipeline/executor.js +695 -0
- package/dist/src/pipeline/index.d.ts +7 -0
- package/dist/src/pipeline/index.js +11 -0
- package/dist/src/self-production.d.ts +67 -0
- package/dist/src/self-production.js +205 -0
- package/dist/src/subagents/executor.d.ts +58 -0
- package/dist/src/subagents/executor.js +283 -0
- package/dist/src/subagents/index.d.ts +37 -0
- package/dist/src/subagents/index.js +53 -0
- package/dist/src/subagents/registry.d.ts +23 -0
- package/dist/src/subagents/registry.js +167 -0
- package/dist/src/subagents/types.d.ts +79 -0
- package/dist/src/subagents/types.js +14 -0
- package/dist/src/tools/bash.d.ts +139 -0
- package/dist/src/tools/bash.js +583 -0
- package/dist/src/tools/edit.d.ts +125 -0
- package/dist/src/tools/edit.js +424 -0
- package/dist/src/tools/git.d.ts +179 -0
- package/dist/src/tools/git.js +504 -0
- package/dist/src/tools/index.d.ts +21 -0
- package/dist/src/tools/index.js +163 -0
- package/dist/src/types.d.ts +145 -0
- package/dist/src/types.js +7 -0
- package/dist/src/world-model/decoder.d.ts +163 -0
- package/dist/src/world-model/decoder.js +517 -0
- package/dist/src/world-model/digital-twin.d.ts +219 -0
- package/dist/src/world-model/digital-twin.js +695 -0
- package/dist/src/world-model/encoder.d.ts +141 -0
- package/dist/src/world-model/encoder.js +564 -0
- package/dist/src/world-model/index.d.ts +221 -0
- package/dist/src/world-model/index.js +772 -0
- package/dist/src/world-model/predictor.d.ts +161 -0
- package/dist/src/world-model/predictor.js +681 -0
- package/dist/src/world-model/test-value-jepa.d.ts +8 -0
- package/dist/src/world-model/test-value-jepa.js +430 -0
- package/dist/src/world-model/types.d.ts +341 -0
- package/dist/src/world-model/types.js +69 -0
- package/dist/src/world-model/value-jepa.d.ts +247 -0
- package/dist/src/world-model/value-jepa.js +622 -0
- package/dist/test/brain.test.d.ts +11 -0
- package/dist/test/brain.test.js +358 -0
- package/dist/test/cli/dispatcher.test.d.ts +4 -0
- package/dist/test/cli/dispatcher.test.js +332 -0
- package/dist/test/cli/human-loop.test.d.ts +4 -0
- package/dist/test/cli/human-loop.test.js +270 -0
- package/dist/test/grounding/feedback.test.d.ts +4 -0
- package/dist/test/grounding/feedback.test.js +462 -0
- package/dist/test/grounding/verifier.test.d.ts +4 -0
- package/dist/test/grounding/verifier.test.js +442 -0
- package/dist/test/grounding.test.d.ts +6 -0
- package/dist/test/grounding.test.js +246 -0
- package/dist/test/healing/detector.test.d.ts +4 -0
- package/dist/test/healing/detector.test.js +266 -0
- package/dist/test/healing/fixer.test.d.ts +4 -0
- package/dist/test/healing/fixer.test.js +369 -0
- package/dist/test/integration.test.d.ts +5 -0
- package/dist/test/integration.test.js +290 -0
- package/dist/test/tools/bash.test.d.ts +4 -0
- package/dist/test/tools/bash.test.js +348 -0
- package/dist/test/tools/edit.test.d.ts +4 -0
- package/dist/test/tools/edit.test.js +350 -0
- package/dist/test/tools/git.test.d.ts +4 -0
- package/dist/test/tools/git.test.js +350 -0
- package/package.json +60 -0
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Genesis 6.0 - Latent State Encoder
|
|
3
|
+
*
|
|
4
|
+
* JEPA-style encoder that maps multimodal inputs to a unified latent space.
|
|
5
|
+
*
|
|
6
|
+
* Key concept: All modalities (text, image, code, state) are encoded to
|
|
7
|
+
* the same latent representation, enabling cross-modal prediction and
|
|
8
|
+
* simulation.
|
|
9
|
+
*
|
|
10
|
+
* Architecture:
|
|
11
|
+
* - Modality-specific encoders (text, image, code, state, sensor)
|
|
12
|
+
* - Fusion layer for multimodal inputs
|
|
13
|
+
* - Compression and normalization
|
|
14
|
+
*
|
|
15
|
+
* References:
|
|
16
|
+
* - LeCun (2022). A Path Towards Autonomous Machine Intelligence
|
|
17
|
+
* - CLIP (Radford et al., 2021) - Cross-modal embeddings
|
|
18
|
+
* - JEPA - Joint Embedding Predictive Architecture
|
|
19
|
+
*
|
|
20
|
+
* Usage:
|
|
21
|
+
* ```typescript
|
|
22
|
+
* import { createLatentEncoder } from './world-model/encoder.js';
|
|
23
|
+
*
|
|
24
|
+
* const encoder = createLatentEncoder({ latentDim: 512 });
|
|
25
|
+
*
|
|
26
|
+
* // Encode text
|
|
27
|
+
* const textState = encoder.encode({ modality: 'text', data: 'Hello world' });
|
|
28
|
+
*
|
|
29
|
+
* // Encode image
|
|
30
|
+
* const imageState = encoder.encodeImage(imageData);
|
|
31
|
+
*
|
|
32
|
+
* // Fuse multiple modalities
|
|
33
|
+
* const fusedState = encoder.fuse([textState, imageState]);
|
|
34
|
+
* ```
|
|
35
|
+
*/
|
|
36
|
+
import { Modality, MultimodalInput, LatentState, EncoderConfig } from './types.js';
|
|
37
|
+
export interface ModalityEncoder {
|
|
38
|
+
modality: Modality;
|
|
39
|
+
encode(input: MultimodalInput): number[];
|
|
40
|
+
dimensionality: number;
|
|
41
|
+
}
|
|
42
|
+
export type EncoderEventType = 'encoded' | 'fused' | 'compressed' | 'error';
|
|
43
|
+
export type EncoderEventHandler = (event: {
|
|
44
|
+
type: EncoderEventType;
|
|
45
|
+
data?: unknown;
|
|
46
|
+
}) => void;
|
|
47
|
+
export declare class LatentEncoder {
|
|
48
|
+
private config;
|
|
49
|
+
private modalityEncoders;
|
|
50
|
+
private eventHandlers;
|
|
51
|
+
private encodingCount;
|
|
52
|
+
private fusionCount;
|
|
53
|
+
private totalEncodingTime;
|
|
54
|
+
constructor(config?: Partial<EncoderConfig>);
|
|
55
|
+
private initializeEncoders;
|
|
56
|
+
/**
|
|
57
|
+
* Encode any modality to latent space
|
|
58
|
+
*/
|
|
59
|
+
encode(input: MultimodalInput): LatentState;
|
|
60
|
+
/**
|
|
61
|
+
* Fuse multiple latent states into one
|
|
62
|
+
*/
|
|
63
|
+
fuse(states: LatentState[]): LatentState;
|
|
64
|
+
/**
|
|
65
|
+
* Encode text to latent vector
|
|
66
|
+
* Uses multi-scale hashing for semantic representation
|
|
67
|
+
*/
|
|
68
|
+
private encodeText;
|
|
69
|
+
/**
|
|
70
|
+
* Encode image to latent vector
|
|
71
|
+
* Placeholder - would integrate with Stability-AI MCP
|
|
72
|
+
*/
|
|
73
|
+
private encodeImage;
|
|
74
|
+
/**
|
|
75
|
+
* Encode code to latent vector
|
|
76
|
+
* AST-aware encoding
|
|
77
|
+
*/
|
|
78
|
+
private encodeCode;
|
|
79
|
+
/**
|
|
80
|
+
* Encode state to latent vector
|
|
81
|
+
*/
|
|
82
|
+
private encodeState;
|
|
83
|
+
/**
|
|
84
|
+
* Encode sensor data to latent vector
|
|
85
|
+
*/
|
|
86
|
+
private encodeSensor;
|
|
87
|
+
/**
|
|
88
|
+
* Compress vector using simple pooling
|
|
89
|
+
*/
|
|
90
|
+
private compress;
|
|
91
|
+
/**
|
|
92
|
+
* L2 normalize vector
|
|
93
|
+
*/
|
|
94
|
+
private normalize;
|
|
95
|
+
/**
|
|
96
|
+
* Calculate entropy of vector
|
|
97
|
+
*/
|
|
98
|
+
private calculateEntropy;
|
|
99
|
+
/**
|
|
100
|
+
* Calculate confidence based on vector properties
|
|
101
|
+
*/
|
|
102
|
+
private calculateConfidence;
|
|
103
|
+
/**
|
|
104
|
+
* Extract named features from vector
|
|
105
|
+
*/
|
|
106
|
+
private extractFeatures;
|
|
107
|
+
/**
|
|
108
|
+
* Simple string hash
|
|
109
|
+
*/
|
|
110
|
+
private simpleHash;
|
|
111
|
+
/**
|
|
112
|
+
* Flatten object to key-value pairs
|
|
113
|
+
*/
|
|
114
|
+
private flattenObject;
|
|
115
|
+
/**
|
|
116
|
+
* Generate source ID from input
|
|
117
|
+
*/
|
|
118
|
+
private generateSourceId;
|
|
119
|
+
/**
|
|
120
|
+
* Cosine similarity between two latent states
|
|
121
|
+
*/
|
|
122
|
+
similarity(a: LatentState, b: LatentState): number;
|
|
123
|
+
/**
|
|
124
|
+
* Euclidean distance between two latent states
|
|
125
|
+
*/
|
|
126
|
+
distance(a: LatentState, b: LatentState): number;
|
|
127
|
+
on(handler: EncoderEventHandler): () => void;
|
|
128
|
+
private emit;
|
|
129
|
+
stats(): {
|
|
130
|
+
encodingCount: number;
|
|
131
|
+
fusionCount: number;
|
|
132
|
+
avgEncodingTime: number;
|
|
133
|
+
latentDim: number;
|
|
134
|
+
modalities: Modality[];
|
|
135
|
+
};
|
|
136
|
+
/**
|
|
137
|
+
* Get configuration
|
|
138
|
+
*/
|
|
139
|
+
getConfig(): EncoderConfig;
|
|
140
|
+
}
|
|
141
|
+
export declare function createLatentEncoder(config?: Partial<EncoderConfig>): LatentEncoder;
|
|
@@ -0,0 +1,564 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Genesis 6.0 - Latent State Encoder
|
|
4
|
+
*
|
|
5
|
+
* JEPA-style encoder that maps multimodal inputs to a unified latent space.
|
|
6
|
+
*
|
|
7
|
+
* Key concept: All modalities (text, image, code, state) are encoded to
|
|
8
|
+
* the same latent representation, enabling cross-modal prediction and
|
|
9
|
+
* simulation.
|
|
10
|
+
*
|
|
11
|
+
* Architecture:
|
|
12
|
+
* - Modality-specific encoders (text, image, code, state, sensor)
|
|
13
|
+
* - Fusion layer for multimodal inputs
|
|
14
|
+
* - Compression and normalization
|
|
15
|
+
*
|
|
16
|
+
* References:
|
|
17
|
+
* - LeCun (2022). A Path Towards Autonomous Machine Intelligence
|
|
18
|
+
* - CLIP (Radford et al., 2021) - Cross-modal embeddings
|
|
19
|
+
* - JEPA - Joint Embedding Predictive Architecture
|
|
20
|
+
*
|
|
21
|
+
* Usage:
|
|
22
|
+
* ```typescript
|
|
23
|
+
* import { createLatentEncoder } from './world-model/encoder.js';
|
|
24
|
+
*
|
|
25
|
+
* const encoder = createLatentEncoder({ latentDim: 512 });
|
|
26
|
+
*
|
|
27
|
+
* // Encode text
|
|
28
|
+
* const textState = encoder.encode({ modality: 'text', data: 'Hello world' });
|
|
29
|
+
*
|
|
30
|
+
* // Encode image
|
|
31
|
+
* const imageState = encoder.encodeImage(imageData);
|
|
32
|
+
*
|
|
33
|
+
* // Fuse multiple modalities
|
|
34
|
+
* const fusedState = encoder.fuse([textState, imageState]);
|
|
35
|
+
* ```
|
|
36
|
+
*/
|
|
37
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
38
|
+
exports.LatentEncoder = void 0;
|
|
39
|
+
exports.createLatentEncoder = createLatentEncoder;
|
|
40
|
+
const crypto_1 = require("crypto");
|
|
41
|
+
const types_js_1 = require("./types.js");
|
|
42
|
+
class LatentEncoder {
|
|
43
|
+
config;
|
|
44
|
+
modalityEncoders = new Map();
|
|
45
|
+
eventHandlers = new Set();
|
|
46
|
+
// Statistics
|
|
47
|
+
encodingCount = 0;
|
|
48
|
+
fusionCount = 0;
|
|
49
|
+
totalEncodingTime = 0;
|
|
50
|
+
constructor(config = {}) {
|
|
51
|
+
this.config = { ...types_js_1.DEFAULT_ENCODER_CONFIG, ...config };
|
|
52
|
+
this.initializeEncoders();
|
|
53
|
+
}
|
|
54
|
+
// ============================================================================
|
|
55
|
+
// Initialization
|
|
56
|
+
// ============================================================================
|
|
57
|
+
initializeEncoders() {
|
|
58
|
+
// Text encoder - uses character-level + word-level hashing
|
|
59
|
+
this.modalityEncoders.set('text', {
|
|
60
|
+
modality: 'text',
|
|
61
|
+
dimensionality: this.config.latentDim,
|
|
62
|
+
encode: (input) => this.encodeText(input),
|
|
63
|
+
});
|
|
64
|
+
// Image encoder - placeholder for Stability-AI MCP integration
|
|
65
|
+
this.modalityEncoders.set('image', {
|
|
66
|
+
modality: 'image',
|
|
67
|
+
dimensionality: this.config.latentDim,
|
|
68
|
+
encode: (input) => this.encodeImage(input),
|
|
69
|
+
});
|
|
70
|
+
// Code encoder - AST-aware encoding
|
|
71
|
+
this.modalityEncoders.set('code', {
|
|
72
|
+
modality: 'code',
|
|
73
|
+
dimensionality: this.config.latentDim,
|
|
74
|
+
encode: (input) => this.encodeCode(input),
|
|
75
|
+
});
|
|
76
|
+
// State encoder - structured state encoding
|
|
77
|
+
this.modalityEncoders.set('state', {
|
|
78
|
+
modality: 'state',
|
|
79
|
+
dimensionality: this.config.latentDim,
|
|
80
|
+
encode: (input) => this.encodeState(input),
|
|
81
|
+
});
|
|
82
|
+
// Sensor encoder - MCP sensor data
|
|
83
|
+
this.modalityEncoders.set('sensor', {
|
|
84
|
+
modality: 'sensor',
|
|
85
|
+
dimensionality: this.config.latentDim,
|
|
86
|
+
encode: (input) => this.encodeSensor(input),
|
|
87
|
+
});
|
|
88
|
+
}
|
|
89
|
+
// ============================================================================
|
|
90
|
+
// Main Encoding
|
|
91
|
+
// ============================================================================
|
|
92
|
+
/**
|
|
93
|
+
* Encode any modality to latent space
|
|
94
|
+
*/
|
|
95
|
+
encode(input) {
|
|
96
|
+
const startTime = Date.now();
|
|
97
|
+
const encoder = this.modalityEncoders.get(input.modality);
|
|
98
|
+
if (!encoder) {
|
|
99
|
+
throw new Error(`No encoder for modality: ${input.modality}`);
|
|
100
|
+
}
|
|
101
|
+
// Encode to raw vector
|
|
102
|
+
let vector = encoder.encode(input);
|
|
103
|
+
// Apply compression if enabled
|
|
104
|
+
if (this.config.useCompression) {
|
|
105
|
+
vector = this.compress(vector);
|
|
106
|
+
}
|
|
107
|
+
// Normalize if enabled
|
|
108
|
+
if (this.config.normalizeOutput) {
|
|
109
|
+
vector = this.normalize(vector);
|
|
110
|
+
}
|
|
111
|
+
// Apply modality weight
|
|
112
|
+
const weight = this.config.modalityWeights[input.modality];
|
|
113
|
+
vector = vector.map((v) => v * weight);
|
|
114
|
+
// Calculate metadata
|
|
115
|
+
const entropy = this.calculateEntropy(vector);
|
|
116
|
+
const features = this.extractFeatures(vector, input.modality);
|
|
117
|
+
const state = {
|
|
118
|
+
vector,
|
|
119
|
+
dimensions: vector.length,
|
|
120
|
+
sourceModality: input.modality,
|
|
121
|
+
sourceId: this.generateSourceId(input),
|
|
122
|
+
timestamp: new Date(),
|
|
123
|
+
confidence: this.calculateConfidence(vector, input),
|
|
124
|
+
entropy,
|
|
125
|
+
features,
|
|
126
|
+
};
|
|
127
|
+
// Update stats
|
|
128
|
+
this.encodingCount++;
|
|
129
|
+
this.totalEncodingTime += Date.now() - startTime;
|
|
130
|
+
this.emit({ type: 'encoded', data: { modality: input.modality, dimensions: vector.length } });
|
|
131
|
+
return state;
|
|
132
|
+
}
|
|
133
|
+
/**
|
|
134
|
+
* Fuse multiple latent states into one
|
|
135
|
+
*/
|
|
136
|
+
fuse(states) {
|
|
137
|
+
if (states.length === 0) {
|
|
138
|
+
throw new Error('Cannot fuse empty state array');
|
|
139
|
+
}
|
|
140
|
+
if (states.length === 1) {
|
|
141
|
+
return states[0];
|
|
142
|
+
}
|
|
143
|
+
// Weighted average based on confidence
|
|
144
|
+
const totalConfidence = states.reduce((sum, s) => sum + s.confidence, 0);
|
|
145
|
+
const fusedVector = new Array(this.config.latentDim).fill(0);
|
|
146
|
+
for (const state of states) {
|
|
147
|
+
const weight = state.confidence / totalConfidence;
|
|
148
|
+
for (let i = 0; i < Math.min(state.vector.length, fusedVector.length); i++) {
|
|
149
|
+
fusedVector[i] += state.vector[i] * weight;
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
// Normalize fused result
|
|
153
|
+
const normalized = this.normalize(fusedVector);
|
|
154
|
+
// Combine features
|
|
155
|
+
const allFeatures = [];
|
|
156
|
+
for (const state of states) {
|
|
157
|
+
if (state.features) {
|
|
158
|
+
allFeatures.push(...state.features);
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
const fusedState = {
|
|
162
|
+
vector: normalized,
|
|
163
|
+
dimensions: normalized.length,
|
|
164
|
+
sourceModality: 'state', // Fused states are multimodal
|
|
165
|
+
sourceId: `fused-${states.map((s) => s.sourceId).join('-')}`,
|
|
166
|
+
timestamp: new Date(),
|
|
167
|
+
confidence: totalConfidence / states.length,
|
|
168
|
+
entropy: this.calculateEntropy(normalized),
|
|
169
|
+
features: allFeatures.slice(0, 10), // Limit features
|
|
170
|
+
};
|
|
171
|
+
this.fusionCount++;
|
|
172
|
+
this.emit({ type: 'fused', data: { inputCount: states.length } });
|
|
173
|
+
return fusedState;
|
|
174
|
+
}
|
|
175
|
+
// ============================================================================
|
|
176
|
+
// Modality-Specific Encoders
|
|
177
|
+
// ============================================================================
|
|
178
|
+
/**
|
|
179
|
+
* Encode text to latent vector
|
|
180
|
+
* Uses multi-scale hashing for semantic representation
|
|
181
|
+
*/
|
|
182
|
+
encodeText(input) {
|
|
183
|
+
const text = input.data;
|
|
184
|
+
const vector = new Array(this.config.latentDim).fill(0);
|
|
185
|
+
// Character-level features (first 1/3)
|
|
186
|
+
const charSection = Math.floor(this.config.latentDim / 3);
|
|
187
|
+
for (let i = 0; i < text.length && i < charSection; i++) {
|
|
188
|
+
const charCode = text.charCodeAt(i);
|
|
189
|
+
vector[i % charSection] += Math.sin(charCode * 0.1) * 0.5;
|
|
190
|
+
}
|
|
191
|
+
// Word-level features (middle 1/3)
|
|
192
|
+
const words = text.split(/\s+/);
|
|
193
|
+
const wordSection = charSection;
|
|
194
|
+
for (let i = 0; i < words.length; i++) {
|
|
195
|
+
const hash = this.simpleHash(words[i]);
|
|
196
|
+
const idx = charSection + (hash % wordSection);
|
|
197
|
+
vector[idx] += 1.0 / (i + 1); // Decay by position
|
|
198
|
+
}
|
|
199
|
+
// N-gram features (last 1/3)
|
|
200
|
+
const ngramSection = this.config.latentDim - 2 * charSection;
|
|
201
|
+
for (let n = 2; n <= 4; n++) {
|
|
202
|
+
for (let i = 0; i <= text.length - n; i++) {
|
|
203
|
+
const ngram = text.substring(i, i + n);
|
|
204
|
+
const hash = this.simpleHash(ngram);
|
|
205
|
+
const idx = 2 * charSection + (hash % ngramSection);
|
|
206
|
+
vector[idx] += 0.3 / n;
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
return vector;
|
|
210
|
+
}
|
|
211
|
+
/**
|
|
212
|
+
* Encode image to latent vector
|
|
213
|
+
* Placeholder - would integrate with Stability-AI MCP
|
|
214
|
+
*/
|
|
215
|
+
encodeImage(input) {
|
|
216
|
+
const vector = new Array(this.config.latentDim).fill(0);
|
|
217
|
+
// Simple encoding based on image metadata
|
|
218
|
+
// In production, would use actual image encoder
|
|
219
|
+
const data = input.data;
|
|
220
|
+
const hash = (0, crypto_1.createHash)('sha256').update(data).digest();
|
|
221
|
+
// Spread hash across vector
|
|
222
|
+
for (let i = 0; i < hash.length; i++) {
|
|
223
|
+
const idx = (i * 17) % this.config.latentDim;
|
|
224
|
+
vector[idx] = (hash[i] - 128) / 128;
|
|
225
|
+
}
|
|
226
|
+
// Add size features if available
|
|
227
|
+
if (input.width && input.height) {
|
|
228
|
+
vector[0] = Math.log(input.width) / 10;
|
|
229
|
+
vector[1] = Math.log(input.height) / 10;
|
|
230
|
+
vector[2] = input.width / input.height; // Aspect ratio
|
|
231
|
+
}
|
|
232
|
+
return vector;
|
|
233
|
+
}
|
|
234
|
+
/**
|
|
235
|
+
* Encode code to latent vector
|
|
236
|
+
* AST-aware encoding
|
|
237
|
+
*/
|
|
238
|
+
encodeCode(input) {
|
|
239
|
+
const code = input.data;
|
|
240
|
+
const language = input.language;
|
|
241
|
+
const vector = new Array(this.config.latentDim).fill(0);
|
|
242
|
+
// Language feature
|
|
243
|
+
const langHash = this.simpleHash(language);
|
|
244
|
+
vector[0] = (langHash % 100) / 100;
|
|
245
|
+
// Structural features
|
|
246
|
+
const lines = code.split('\n');
|
|
247
|
+
vector[1] = Math.log(lines.length + 1) / 10;
|
|
248
|
+
// Indentation pattern (structure indicator)
|
|
249
|
+
let totalIndent = 0;
|
|
250
|
+
for (const line of lines) {
|
|
251
|
+
const indent = line.match(/^\s*/)?.[0].length || 0;
|
|
252
|
+
totalIndent += indent;
|
|
253
|
+
}
|
|
254
|
+
vector[2] = totalIndent / (lines.length * 10);
|
|
255
|
+
// Keyword features
|
|
256
|
+
const keywords = ['function', 'class', 'if', 'for', 'while', 'return', 'import', 'export', 'const', 'let', 'var'];
|
|
257
|
+
for (let i = 0; i < keywords.length; i++) {
|
|
258
|
+
const count = (code.match(new RegExp(`\\b${keywords[i]}\\b`, 'g')) || []).length;
|
|
259
|
+
vector[10 + i] = Math.min(count / 10, 1);
|
|
260
|
+
}
|
|
261
|
+
// Token-level encoding
|
|
262
|
+
const tokens = code.split(/\s+|[{}()\[\];,]/);
|
|
263
|
+
const tokenSection = Math.floor(this.config.latentDim / 2);
|
|
264
|
+
for (let i = 0; i < tokens.length; i++) {
|
|
265
|
+
if (tokens[i]) {
|
|
266
|
+
const hash = this.simpleHash(tokens[i]);
|
|
267
|
+
const idx = 50 + (hash % tokenSection);
|
|
268
|
+
vector[idx] += 0.1 / Math.log(i + 2);
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
return vector;
|
|
272
|
+
}
|
|
273
|
+
/**
|
|
274
|
+
* Encode state to latent vector
|
|
275
|
+
*/
|
|
276
|
+
encodeState(input) {
|
|
277
|
+
const state = input.data;
|
|
278
|
+
const vector = new Array(this.config.latentDim).fill(0);
|
|
279
|
+
// Flatten state to key-value pairs
|
|
280
|
+
const pairs = this.flattenObject(state);
|
|
281
|
+
const numericValues = [];
|
|
282
|
+
const stringValues = [];
|
|
283
|
+
for (const [key, value] of pairs) {
|
|
284
|
+
if (typeof value === 'number') {
|
|
285
|
+
numericValues.push(value);
|
|
286
|
+
// Hash key to index
|
|
287
|
+
const idx = this.simpleHash(key) % this.config.latentDim;
|
|
288
|
+
vector[idx] = value / (Math.abs(value) + 1); // Normalize
|
|
289
|
+
}
|
|
290
|
+
else if (typeof value === 'string') {
|
|
291
|
+
stringValues.push(value);
|
|
292
|
+
const idx = this.simpleHash(key + value) % this.config.latentDim;
|
|
293
|
+
vector[idx] = 0.5;
|
|
294
|
+
}
|
|
295
|
+
else if (typeof value === 'boolean') {
|
|
296
|
+
const idx = this.simpleHash(key) % this.config.latentDim;
|
|
297
|
+
vector[idx] = value ? 1 : -1;
|
|
298
|
+
}
|
|
299
|
+
}
|
|
300
|
+
// Statistics of numeric values
|
|
301
|
+
if (numericValues.length > 0) {
|
|
302
|
+
const mean = numericValues.reduce((a, b) => a + b, 0) / numericValues.length;
|
|
303
|
+
const variance = numericValues.reduce((a, b) => a + (b - mean) ** 2, 0) / numericValues.length;
|
|
304
|
+
vector[0] = mean / (Math.abs(mean) + 1);
|
|
305
|
+
vector[1] = Math.sqrt(variance) / (Math.sqrt(variance) + 1);
|
|
306
|
+
}
|
|
307
|
+
return vector;
|
|
308
|
+
}
|
|
309
|
+
/**
|
|
310
|
+
* Encode sensor data to latent vector
|
|
311
|
+
*/
|
|
312
|
+
encodeSensor(input) {
|
|
313
|
+
const vector = new Array(this.config.latentDim).fill(0);
|
|
314
|
+
// Sensor type feature
|
|
315
|
+
const typeHash = this.simpleHash(input.sensorType);
|
|
316
|
+
vector[0] = (typeHash % 256) / 256;
|
|
317
|
+
// Source feature
|
|
318
|
+
const sourceHash = this.simpleHash(input.source);
|
|
319
|
+
vector[1] = (sourceHash % 256) / 256;
|
|
320
|
+
// Encode data based on type
|
|
321
|
+
const data = input.data;
|
|
322
|
+
if (typeof data === 'number') {
|
|
323
|
+
vector[2] = data / (Math.abs(data) + 1);
|
|
324
|
+
}
|
|
325
|
+
else if (typeof data === 'string') {
|
|
326
|
+
const textEncoding = this.encodeText({
|
|
327
|
+
modality: 'text',
|
|
328
|
+
data,
|
|
329
|
+
timestamp: input.timestamp,
|
|
330
|
+
});
|
|
331
|
+
// Blend text encoding into sensor vector
|
|
332
|
+
for (let i = 0; i < textEncoding.length; i++) {
|
|
333
|
+
vector[10 + (i % (this.config.latentDim - 10))] = textEncoding[i] * 0.5;
|
|
334
|
+
}
|
|
335
|
+
}
|
|
336
|
+
else if (Array.isArray(data)) {
|
|
337
|
+
for (let i = 0; i < data.length && i < this.config.latentDim - 10; i++) {
|
|
338
|
+
if (typeof data[i] === 'number') {
|
|
339
|
+
vector[10 + i] = data[i] / (Math.abs(data[i]) + 1);
|
|
340
|
+
}
|
|
341
|
+
}
|
|
342
|
+
}
|
|
343
|
+
else if (typeof data === 'object' && data !== null) {
|
|
344
|
+
const stateEncoding = this.encodeState({
|
|
345
|
+
modality: 'state',
|
|
346
|
+
data: data,
|
|
347
|
+
timestamp: input.timestamp,
|
|
348
|
+
});
|
|
349
|
+
for (let i = 0; i < stateEncoding.length; i++) {
|
|
350
|
+
vector[i] = (vector[i] + stateEncoding[i]) / 2;
|
|
351
|
+
}
|
|
352
|
+
}
|
|
353
|
+
return vector;
|
|
354
|
+
}
|
|
355
|
+
// ============================================================================
|
|
356
|
+
// Vector Operations
|
|
357
|
+
// ============================================================================
|
|
358
|
+
/**
|
|
359
|
+
* Compress vector using simple pooling
|
|
360
|
+
*/
|
|
361
|
+
compress(vector) {
|
|
362
|
+
const targetDim = Math.floor(vector.length * this.config.compressionRatio);
|
|
363
|
+
if (targetDim >= vector.length)
|
|
364
|
+
return vector;
|
|
365
|
+
const compressed = new Array(this.config.latentDim).fill(0);
|
|
366
|
+
const poolSize = Math.ceil(vector.length / this.config.latentDim);
|
|
367
|
+
for (let i = 0; i < this.config.latentDim; i++) {
|
|
368
|
+
const start = i * poolSize;
|
|
369
|
+
const end = Math.min(start + poolSize, vector.length);
|
|
370
|
+
let sum = 0;
|
|
371
|
+
let count = 0;
|
|
372
|
+
for (let j = start; j < end; j++) {
|
|
373
|
+
sum += vector[j];
|
|
374
|
+
count++;
|
|
375
|
+
}
|
|
376
|
+
compressed[i] = count > 0 ? sum / count : 0;
|
|
377
|
+
}
|
|
378
|
+
this.emit({ type: 'compressed', data: { from: vector.length, to: compressed.length } });
|
|
379
|
+
return compressed;
|
|
380
|
+
}
|
|
381
|
+
/**
|
|
382
|
+
* L2 normalize vector
|
|
383
|
+
*/
|
|
384
|
+
normalize(vector) {
|
|
385
|
+
const magnitude = Math.sqrt(vector.reduce((sum, v) => sum + v * v, 0));
|
|
386
|
+
if (magnitude === 0)
|
|
387
|
+
return vector;
|
|
388
|
+
return vector.map((v) => v / magnitude);
|
|
389
|
+
}
|
|
390
|
+
/**
|
|
391
|
+
* Calculate entropy of vector
|
|
392
|
+
*/
|
|
393
|
+
calculateEntropy(vector) {
|
|
394
|
+
// Normalize to probability-like values
|
|
395
|
+
const absSum = vector.reduce((sum, v) => sum + Math.abs(v), 0);
|
|
396
|
+
if (absSum === 0)
|
|
397
|
+
return 0;
|
|
398
|
+
const probs = vector.map((v) => Math.abs(v) / absSum);
|
|
399
|
+
let entropy = 0;
|
|
400
|
+
for (const p of probs) {
|
|
401
|
+
if (p > 0) {
|
|
402
|
+
entropy -= p * Math.log2(p);
|
|
403
|
+
}
|
|
404
|
+
}
|
|
405
|
+
// Normalize by max entropy
|
|
406
|
+
const maxEntropy = Math.log2(vector.length);
|
|
407
|
+
return entropy / maxEntropy;
|
|
408
|
+
}
|
|
409
|
+
/**
|
|
410
|
+
* Calculate confidence based on vector properties
|
|
411
|
+
*/
|
|
412
|
+
calculateConfidence(vector, input) {
|
|
413
|
+
// Base confidence from vector magnitude
|
|
414
|
+
const magnitude = Math.sqrt(vector.reduce((sum, v) => sum + v * v, 0));
|
|
415
|
+
let confidence = Math.min(magnitude, 1);
|
|
416
|
+
// Reduce confidence for sparse vectors
|
|
417
|
+
const nonZeroCount = vector.filter((v) => Math.abs(v) > 0.01).length;
|
|
418
|
+
const sparsity = nonZeroCount / vector.length;
|
|
419
|
+
confidence *= Math.sqrt(sparsity);
|
|
420
|
+
// Reduce confidence for old data
|
|
421
|
+
const age = Date.now() - input.timestamp.getTime();
|
|
422
|
+
const ageFactor = Math.exp(-age / 3600000); // Decay over 1 hour
|
|
423
|
+
confidence *= ageFactor;
|
|
424
|
+
return Math.min(1, Math.max(0, confidence));
|
|
425
|
+
}
|
|
426
|
+
/**
|
|
427
|
+
* Extract named features from vector
|
|
428
|
+
*/
|
|
429
|
+
extractFeatures(vector, modality) {
|
|
430
|
+
const features = [];
|
|
431
|
+
// Find top activations
|
|
432
|
+
const indexed = vector.map((v, i) => ({ value: Math.abs(v), index: i }));
|
|
433
|
+
indexed.sort((a, b) => b.value - a.value);
|
|
434
|
+
const topK = 5;
|
|
435
|
+
for (let i = 0; i < Math.min(topK, indexed.length); i++) {
|
|
436
|
+
if (indexed[i].value > 0.1) {
|
|
437
|
+
features.push({
|
|
438
|
+
name: `${modality}_feature_${i}`,
|
|
439
|
+
indices: [indexed[i].index],
|
|
440
|
+
activation: indexed[i].value,
|
|
441
|
+
});
|
|
442
|
+
}
|
|
443
|
+
}
|
|
444
|
+
return features;
|
|
445
|
+
}
|
|
446
|
+
// ============================================================================
|
|
447
|
+
// Utilities
|
|
448
|
+
// ============================================================================
|
|
449
|
+
/**
|
|
450
|
+
* Simple string hash
|
|
451
|
+
*/
|
|
452
|
+
simpleHash(str) {
|
|
453
|
+
let hash = 0;
|
|
454
|
+
for (let i = 0; i < str.length; i++) {
|
|
455
|
+
const char = str.charCodeAt(i);
|
|
456
|
+
hash = ((hash << 5) - hash) + char;
|
|
457
|
+
hash = hash & hash; // Convert to 32-bit integer
|
|
458
|
+
}
|
|
459
|
+
return Math.abs(hash);
|
|
460
|
+
}
|
|
461
|
+
/**
|
|
462
|
+
* Flatten object to key-value pairs
|
|
463
|
+
*/
|
|
464
|
+
flattenObject(obj, prefix = '') {
|
|
465
|
+
const pairs = [];
|
|
466
|
+
for (const [key, value] of Object.entries(obj)) {
|
|
467
|
+
const fullKey = prefix ? `${prefix}.${key}` : key;
|
|
468
|
+
if (value !== null && typeof value === 'object' && !Array.isArray(value)) {
|
|
469
|
+
pairs.push(...this.flattenObject(value, fullKey));
|
|
470
|
+
}
|
|
471
|
+
else {
|
|
472
|
+
pairs.push([fullKey, value]);
|
|
473
|
+
}
|
|
474
|
+
}
|
|
475
|
+
return pairs;
|
|
476
|
+
}
|
|
477
|
+
/**
|
|
478
|
+
* Generate source ID from input
|
|
479
|
+
*/
|
|
480
|
+
generateSourceId(input) {
|
|
481
|
+
const hash = (0, crypto_1.createHash)('md5')
|
|
482
|
+
.update(JSON.stringify(input.data).slice(0, 1000))
|
|
483
|
+
.digest('hex')
|
|
484
|
+
.slice(0, 8);
|
|
485
|
+
return `${input.modality}-${hash}`;
|
|
486
|
+
}
|
|
487
|
+
// ============================================================================
|
|
488
|
+
// Distance / Similarity
|
|
489
|
+
// ============================================================================
|
|
490
|
+
/**
|
|
491
|
+
* Cosine similarity between two latent states
|
|
492
|
+
*/
|
|
493
|
+
similarity(a, b) {
|
|
494
|
+
const minLen = Math.min(a.vector.length, b.vector.length);
|
|
495
|
+
let dotProduct = 0;
|
|
496
|
+
let magA = 0;
|
|
497
|
+
let magB = 0;
|
|
498
|
+
for (let i = 0; i < minLen; i++) {
|
|
499
|
+
dotProduct += a.vector[i] * b.vector[i];
|
|
500
|
+
magA += a.vector[i] * a.vector[i];
|
|
501
|
+
magB += b.vector[i] * b.vector[i];
|
|
502
|
+
}
|
|
503
|
+
const magnitude = Math.sqrt(magA) * Math.sqrt(magB);
|
|
504
|
+
if (magnitude === 0)
|
|
505
|
+
return 0;
|
|
506
|
+
return dotProduct / magnitude;
|
|
507
|
+
}
|
|
508
|
+
/**
|
|
509
|
+
* Euclidean distance between two latent states
|
|
510
|
+
*/
|
|
511
|
+
distance(a, b) {
|
|
512
|
+
const minLen = Math.min(a.vector.length, b.vector.length);
|
|
513
|
+
let sumSq = 0;
|
|
514
|
+
for (let i = 0; i < minLen; i++) {
|
|
515
|
+
const diff = a.vector[i] - b.vector[i];
|
|
516
|
+
sumSq += diff * diff;
|
|
517
|
+
}
|
|
518
|
+
return Math.sqrt(sumSq);
|
|
519
|
+
}
|
|
520
|
+
// ============================================================================
|
|
521
|
+
// Events
|
|
522
|
+
// ============================================================================
|
|
523
|
+
on(handler) {
|
|
524
|
+
this.eventHandlers.add(handler);
|
|
525
|
+
return () => this.eventHandlers.delete(handler);
|
|
526
|
+
}
|
|
527
|
+
emit(event) {
|
|
528
|
+
for (const handler of this.eventHandlers) {
|
|
529
|
+
try {
|
|
530
|
+
handler(event);
|
|
531
|
+
}
|
|
532
|
+
catch (err) {
|
|
533
|
+
console.error('Encoder event handler error:', err);
|
|
534
|
+
}
|
|
535
|
+
}
|
|
536
|
+
}
|
|
537
|
+
// ============================================================================
|
|
538
|
+
// Stats
|
|
539
|
+
// ============================================================================
|
|
540
|
+
stats() {
|
|
541
|
+
return {
|
|
542
|
+
encodingCount: this.encodingCount,
|
|
543
|
+
fusionCount: this.fusionCount,
|
|
544
|
+
avgEncodingTime: this.encodingCount > 0
|
|
545
|
+
? this.totalEncodingTime / this.encodingCount
|
|
546
|
+
: 0,
|
|
547
|
+
latentDim: this.config.latentDim,
|
|
548
|
+
modalities: Array.from(this.modalityEncoders.keys()),
|
|
549
|
+
};
|
|
550
|
+
}
|
|
551
|
+
/**
|
|
552
|
+
* Get configuration
|
|
553
|
+
*/
|
|
554
|
+
getConfig() {
|
|
555
|
+
return { ...this.config };
|
|
556
|
+
}
|
|
557
|
+
}
|
|
558
|
+
exports.LatentEncoder = LatentEncoder;
|
|
559
|
+
// ============================================================================
|
|
560
|
+
// Factory
|
|
561
|
+
// ============================================================================
|
|
562
|
+
function createLatentEncoder(config) {
|
|
563
|
+
return new LatentEncoder(config);
|
|
564
|
+
}
|