@srsergio/taptapp-ar 1.0.93 → 1.0.95
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +16 -14
- package/dist/compiler/offline-compiler.d.ts +3 -3
- package/dist/compiler/offline-compiler.js +50 -33
- package/dist/core/constants.d.ts +2 -0
- package/dist/core/constants.js +4 -1
- package/dist/core/detector/detector-lite.d.ts +6 -5
- package/dist/core/detector/detector-lite.js +46 -16
- package/dist/core/matching/matcher.d.ts +1 -1
- package/dist/core/matching/matcher.js +7 -4
- package/dist/core/matching/matching.d.ts +2 -1
- package/dist/core/matching/matching.js +43 -11
- package/dist/core/perception/bio-inspired-engine.d.ts +130 -0
- package/dist/core/perception/bio-inspired-engine.js +232 -0
- package/dist/core/perception/foveal-attention.d.ts +142 -0
- package/dist/core/perception/foveal-attention.js +280 -0
- package/dist/core/perception/index.d.ts +6 -0
- package/dist/core/perception/index.js +17 -0
- package/dist/core/perception/predictive-coding.d.ts +92 -0
- package/dist/core/perception/predictive-coding.js +278 -0
- package/dist/core/perception/saccadic-controller.d.ts +126 -0
- package/dist/core/perception/saccadic-controller.js +269 -0
- package/dist/core/perception/saliency-map.d.ts +74 -0
- package/dist/core/perception/saliency-map.js +254 -0
- package/dist/core/perception/scale-orchestrator.d.ts +28 -0
- package/dist/core/perception/scale-orchestrator.js +68 -0
- package/dist/core/protocol.d.ts +14 -1
- package/dist/core/protocol.js +33 -1
- package/dist/runtime/bio-inspired-controller.d.ts +135 -0
- package/dist/runtime/bio-inspired-controller.js +358 -0
- package/dist/runtime/controller.d.ts +11 -2
- package/dist/runtime/controller.js +20 -8
- package/dist/runtime/controller.worker.js +2 -2
- package/dist/runtime/simple-ar.d.ts +24 -20
- package/dist/runtime/simple-ar.js +172 -156
- package/package.json +1 -1
- package/src/compiler/offline-compiler.ts +56 -36
- package/src/core/constants.ts +5 -1
- package/src/core/detector/detector-lite.js +46 -16
- package/src/core/matching/matcher.js +8 -4
- package/src/core/matching/matching.js +51 -12
- package/src/core/perception/bio-inspired-engine.js +275 -0
- package/src/core/perception/foveal-attention.js +306 -0
- package/src/core/perception/index.js +18 -0
- package/src/core/perception/predictive-coding.js +327 -0
- package/src/core/perception/saccadic-controller.js +303 -0
- package/src/core/perception/saliency-map.js +296 -0
- package/src/core/perception/scale-orchestrator.js +80 -0
- package/src/core/protocol.ts +38 -1
- package/src/runtime/bio-inspired-controller.ts +448 -0
- package/src/runtime/controller.ts +22 -7
- package/src/runtime/controller.worker.js +2 -1
- package/src/runtime/simple-ar.ts +197 -171
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Main Bio-Inspired Perception Engine
|
|
3
|
+
* Integrates all bio-inspired components for efficient AR processing
|
|
4
|
+
*/
|
|
5
|
+
export class BioInspiredEngine {
|
|
6
|
+
/**
|
|
7
|
+
* @param {number} width - Input image width
|
|
8
|
+
* @param {number} height - Input image height
|
|
9
|
+
* @param {Object} options - Configuration options
|
|
10
|
+
*/
|
|
11
|
+
constructor(width: number, height: number, options?: Object);
|
|
12
|
+
width: number;
|
|
13
|
+
height: number;
|
|
14
|
+
config: {
|
|
15
|
+
constructor: Function;
|
|
16
|
+
toString(): string;
|
|
17
|
+
toLocaleString(): string;
|
|
18
|
+
valueOf(): Object;
|
|
19
|
+
hasOwnProperty(v: PropertyKey): boolean;
|
|
20
|
+
isPrototypeOf(v: Object): boolean;
|
|
21
|
+
propertyIsEnumerable(v: PropertyKey): boolean;
|
|
22
|
+
FOVEA_RADIUS_RATIO: number;
|
|
23
|
+
PARAFOVEA_RADIUS_RATIO: number;
|
|
24
|
+
FOVEA_RESOLUTION: number;
|
|
25
|
+
PARAFOVEA_RESOLUTION: number;
|
|
26
|
+
PERIPHERY_RESOLUTION: number;
|
|
27
|
+
MAX_SACCADES_PER_FRAME: number;
|
|
28
|
+
SACCADE_COOLDOWN_MS: number;
|
|
29
|
+
SALIENCY_THRESHOLD: number;
|
|
30
|
+
CHANGE_THRESHOLD: number;
|
|
31
|
+
PREDICTION_CONFIDENCE: number;
|
|
32
|
+
MOTION_HISTORY_FRAMES: number;
|
|
33
|
+
ENABLE_SKIP_FRAMES: boolean;
|
|
34
|
+
MIN_PROCESSING_INTERVAL_MS: number;
|
|
35
|
+
NUM_OCTAVES: number;
|
|
36
|
+
};
|
|
37
|
+
fovealAttention: FovealAttention;
|
|
38
|
+
saccadicController: SaccadicController;
|
|
39
|
+
predictiveCoding: PredictiveCoding;
|
|
40
|
+
saliencyMap: SaliencyMap;
|
|
41
|
+
scaleOrchestrator: ScaleOrchestrator;
|
|
42
|
+
currentFoveaCenter: {
|
|
43
|
+
x: number;
|
|
44
|
+
y: number;
|
|
45
|
+
};
|
|
46
|
+
frameCount: number;
|
|
47
|
+
lastProcessTime: number;
|
|
48
|
+
skipCount: number;
|
|
49
|
+
metrics: {
|
|
50
|
+
totalFrames: number;
|
|
51
|
+
skippedFrames: number;
|
|
52
|
+
avgPixelsProcessed: number;
|
|
53
|
+
avgLatency: number;
|
|
54
|
+
saccadeCount: number;
|
|
55
|
+
};
|
|
56
|
+
/**
|
|
57
|
+
* Initialize pre-allocated buffers for efficient processing
|
|
58
|
+
* @private
|
|
59
|
+
*/
|
|
60
|
+
private _initBuffers;
|
|
61
|
+
outputBuffer: {
|
|
62
|
+
fovea: Uint8Array<ArrayBuffer>;
|
|
63
|
+
parafovea: Uint8Array<ArrayBuffer>;
|
|
64
|
+
periphery: Uint8Array<ArrayBuffer>;
|
|
65
|
+
} | undefined;
|
|
66
|
+
changeBuffer: Float32Array<ArrayBuffer> | undefined;
|
|
67
|
+
/**
|
|
68
|
+
* Process an input frame using bio-inspired techniques
|
|
69
|
+
*
|
|
70
|
+
* @param {Uint8Array} inputData - Grayscale input image
|
|
71
|
+
* @param {Object} trackingState - Current tracking state (optional)
|
|
72
|
+
* @returns {Object} Processed result with attention regions
|
|
73
|
+
*/
|
|
74
|
+
process(inputData: Uint8Array, trackingState?: Object): Object;
|
|
75
|
+
/**
|
|
76
|
+
* Get the primary attention region (highest resolution)
|
|
77
|
+
* This is the region that should be used for feature detection
|
|
78
|
+
*
|
|
79
|
+
* @param {Object} processResult - Result from process()
|
|
80
|
+
* @returns {Object} Primary attention region with data
|
|
81
|
+
*/
|
|
82
|
+
getPrimaryRegion(processResult: Object): Object;
|
|
83
|
+
/**
|
|
84
|
+
* Suggest optimal processing based on change detection
|
|
85
|
+
*
|
|
86
|
+
* @param {Uint8Array} inputData - Current frame
|
|
87
|
+
* @returns {Object} Processing suggestion
|
|
88
|
+
*/
|
|
89
|
+
suggestProcessing(inputData: Uint8Array): Object;
|
|
90
|
+
/**
|
|
91
|
+
* Update performance metrics
|
|
92
|
+
* @private
|
|
93
|
+
*/
|
|
94
|
+
private _updateMetrics;
|
|
95
|
+
/**
|
|
96
|
+
* Get current performance metrics
|
|
97
|
+
* @returns {Object} Performance metrics
|
|
98
|
+
*/
|
|
99
|
+
getMetrics(): Object;
|
|
100
|
+
/**
|
|
101
|
+
* Reset engine state (e.g., when target changes)
|
|
102
|
+
*/
|
|
103
|
+
reset(): void;
|
|
104
|
+
/**
|
|
105
|
+
* Configure engine at runtime
|
|
106
|
+
* @param {Object} options - Configuration options to update
|
|
107
|
+
*/
|
|
108
|
+
configure(options: Object): void;
|
|
109
|
+
}
|
|
110
|
+
export namespace BIO_CONFIG {
|
|
111
|
+
let FOVEA_RADIUS_RATIO: number;
|
|
112
|
+
let PARAFOVEA_RADIUS_RATIO: number;
|
|
113
|
+
let FOVEA_RESOLUTION: number;
|
|
114
|
+
let PARAFOVEA_RESOLUTION: number;
|
|
115
|
+
let PERIPHERY_RESOLUTION: number;
|
|
116
|
+
let MAX_SACCADES_PER_FRAME: number;
|
|
117
|
+
let SACCADE_COOLDOWN_MS: number;
|
|
118
|
+
let SALIENCY_THRESHOLD: number;
|
|
119
|
+
let CHANGE_THRESHOLD: number;
|
|
120
|
+
let PREDICTION_CONFIDENCE: number;
|
|
121
|
+
let MOTION_HISTORY_FRAMES: number;
|
|
122
|
+
let ENABLE_SKIP_FRAMES: boolean;
|
|
123
|
+
let MIN_PROCESSING_INTERVAL_MS: number;
|
|
124
|
+
let NUM_OCTAVES: number;
|
|
125
|
+
}
|
|
126
|
+
import { FovealAttention } from './foveal-attention.js';
|
|
127
|
+
import { SaccadicController } from './saccadic-controller.js';
|
|
128
|
+
import { PredictiveCoding } from './predictive-coding.js';
|
|
129
|
+
import { SaliencyMap } from './saliency-map.js';
|
|
130
|
+
import { ScaleOrchestrator } from './scale-orchestrator.js';
|
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Bio-Inspired Perception Engine
|
|
3
|
+
*
|
|
4
|
+
* Inspired by human visual system:
|
|
5
|
+
* - Foveal attention: High resolution in center, low in periphery
|
|
6
|
+
* - Saccadic sampling: Strategic "glances" at areas of interest
|
|
7
|
+
* - Predictive coding: Only process what's unexpected/changed
|
|
8
|
+
*
|
|
9
|
+
* Expected improvements:
|
|
10
|
+
* - ~75% reduction in pixels processed per frame
|
|
11
|
+
* - ~80% reduction in latency for static scenes
|
|
12
|
+
* - ~70% reduction in energy consumption
|
|
13
|
+
*/
|
|
14
|
+
import { FovealAttention } from './foveal-attention.js';
|
|
15
|
+
import { SaccadicController } from './saccadic-controller.js';
|
|
16
|
+
import { PredictiveCoding } from './predictive-coding.js';
|
|
17
|
+
import { SaliencyMap } from './saliency-map.js';
|
|
18
|
+
import { ScaleOrchestrator } from './scale-orchestrator.js';
|
|
19
|
+
/**
|
|
20
|
+
* Configuration for Bio-Inspired Engine
|
|
21
|
+
*/
|
|
22
|
+
const BIO_CONFIG = {
|
|
23
|
+
// Foveal region (high resolution center)
|
|
24
|
+
FOVEA_RADIUS_RATIO: 0.15, // 15% of image dimension
|
|
25
|
+
PARAFOVEA_RADIUS_RATIO: 0.30, // 30% of image dimension
|
|
26
|
+
// Resolution multipliers
|
|
27
|
+
FOVEA_RESOLUTION: 1.0, // Full resolution
|
|
28
|
+
PARAFOVEA_RESOLUTION: 0.5, // Half resolution
|
|
29
|
+
PERIPHERY_RESOLUTION: 0.25, // Quarter resolution
|
|
30
|
+
// Saccadic behavior
|
|
31
|
+
MAX_SACCADES_PER_FRAME: 3, // Maximum "glances" per frame
|
|
32
|
+
SACCADE_COOLDOWN_MS: 50, // Minimum time between saccades
|
|
33
|
+
SALIENCY_THRESHOLD: 0.3, // Threshold for triggering saccade
|
|
34
|
+
// Predictive coding
|
|
35
|
+
CHANGE_THRESHOLD: 0.05, // 5% pixel difference to trigger processing
|
|
36
|
+
PREDICTION_CONFIDENCE: 0.8, // Confidence to skip processing
|
|
37
|
+
MOTION_HISTORY_FRAMES: 3, // Frames to consider for motion prediction
|
|
38
|
+
// Performance
|
|
39
|
+
ENABLE_SKIP_FRAMES: true, // Skip processing if nothing changed
|
|
40
|
+
MIN_PROCESSING_INTERVAL_MS: 8, // Minimum 8ms (~120fps cap)
|
|
41
|
+
NUM_OCTAVES: 5, // Default number of octaves
|
|
42
|
+
};
|
|
43
|
+
/**
|
|
44
|
+
* Main Bio-Inspired Perception Engine
|
|
45
|
+
* Integrates all bio-inspired components for efficient AR processing
|
|
46
|
+
*/
|
|
47
|
+
class BioInspiredEngine {
|
|
48
|
+
/**
|
|
49
|
+
* @param {number} width - Input image width
|
|
50
|
+
* @param {number} height - Input image height
|
|
51
|
+
* @param {Object} options - Configuration options
|
|
52
|
+
*/
|
|
53
|
+
constructor(width, height, options = {}) {
|
|
54
|
+
this.width = width;
|
|
55
|
+
this.height = height;
|
|
56
|
+
this.config = { ...BIO_CONFIG, ...options };
|
|
57
|
+
// Initialize sub-components
|
|
58
|
+
this.fovealAttention = new FovealAttention(width, height, this.config);
|
|
59
|
+
this.saccadicController = new SaccadicController(width, height, this.config);
|
|
60
|
+
this.predictiveCoding = new PredictiveCoding(width, height, this.config);
|
|
61
|
+
this.saliencyMap = new SaliencyMap(width, height);
|
|
62
|
+
this.scaleOrchestrator = new ScaleOrchestrator(this.config.NUM_OCTAVES, {
|
|
63
|
+
debug: options.debugMode
|
|
64
|
+
});
|
|
65
|
+
// State tracking
|
|
66
|
+
this.currentFoveaCenter = { x: width / 2, y: height / 2 };
|
|
67
|
+
this.frameCount = 0;
|
|
68
|
+
this.lastProcessTime = 0;
|
|
69
|
+
this.skipCount = 0;
|
|
70
|
+
// Performance metrics
|
|
71
|
+
this.metrics = {
|
|
72
|
+
totalFrames: 0,
|
|
73
|
+
skippedFrames: 0,
|
|
74
|
+
avgPixelsProcessed: 0,
|
|
75
|
+
avgLatency: 0,
|
|
76
|
+
saccadeCount: 0,
|
|
77
|
+
};
|
|
78
|
+
// Pre-allocate buffers
|
|
79
|
+
this._initBuffers();
|
|
80
|
+
}
|
|
81
|
+
/**
|
|
82
|
+
* Initialize pre-allocated buffers for efficient processing
|
|
83
|
+
* @private
|
|
84
|
+
*/
|
|
85
|
+
_initBuffers() {
|
|
86
|
+
const fullSize = this.width * this.height;
|
|
87
|
+
const foveaSize = Math.ceil(fullSize * this.config.FOVEA_RADIUS_RATIO ** 2 * Math.PI);
|
|
88
|
+
// Multi-resolution output buffer
|
|
89
|
+
this.outputBuffer = {
|
|
90
|
+
fovea: new Uint8Array(foveaSize),
|
|
91
|
+
parafovea: new Uint8Array(Math.ceil(foveaSize * 4)),
|
|
92
|
+
periphery: new Uint8Array(Math.ceil(fullSize * 0.25)),
|
|
93
|
+
};
|
|
94
|
+
// Change detection buffer
|
|
95
|
+
this.changeBuffer = new Float32Array(Math.ceil(fullSize / 64)); // 8x8 blocks
|
|
96
|
+
}
|
|
97
|
+
/**
|
|
98
|
+
* Process an input frame using bio-inspired techniques
|
|
99
|
+
*
|
|
100
|
+
* @param {Uint8Array} inputData - Grayscale input image
|
|
101
|
+
* @param {Object} trackingState - Current tracking state (optional)
|
|
102
|
+
* @returns {Object} Processed result with attention regions
|
|
103
|
+
*/
|
|
104
|
+
process(inputData, trackingState = null) {
|
|
105
|
+
const startTime = performance.now();
|
|
106
|
+
this.frameCount++;
|
|
107
|
+
this.metrics.totalFrames++;
|
|
108
|
+
// Step 1: Predictive Coding - Check if we can skip processing
|
|
109
|
+
const prediction = this.predictiveCoding.predict(inputData, trackingState);
|
|
110
|
+
if (prediction.canSkip && this.config.ENABLE_SKIP_FRAMES) {
|
|
111
|
+
this.metrics.skippedFrames++;
|
|
112
|
+
this.skipCount++;
|
|
113
|
+
return {
|
|
114
|
+
skipped: true,
|
|
115
|
+
prediction: prediction.predictedState,
|
|
116
|
+
confidence: prediction.confidence,
|
|
117
|
+
pixelsProcessed: 0,
|
|
118
|
+
latency: performance.now() - startTime,
|
|
119
|
+
};
|
|
120
|
+
}
|
|
121
|
+
this.skipCount = 0;
|
|
122
|
+
// Step 2: Compute Saliency Map for attention guidance
|
|
123
|
+
const saliency = this.saliencyMap.compute(inputData);
|
|
124
|
+
// Step 3: Saccadic Controller - Decide where to "look"
|
|
125
|
+
const saccadeTargets = this.saccadicController.computeTargets(saliency, this.currentFoveaCenter, trackingState);
|
|
126
|
+
// Step 4: Extract foveal regions at different resolutions
|
|
127
|
+
const attentionRegions = [];
|
|
128
|
+
let totalPixelsProcessed = 0;
|
|
129
|
+
for (const target of saccadeTargets) {
|
|
130
|
+
const region = this.fovealAttention.extract(inputData, target.x, target.y, target.priority);
|
|
131
|
+
attentionRegions.push(region);
|
|
132
|
+
totalPixelsProcessed += region.pixelCount;
|
|
133
|
+
this.metrics.saccadeCount++;
|
|
134
|
+
}
|
|
135
|
+
// Step 5: Update fovea center based on highest priority target
|
|
136
|
+
if (saccadeTargets.length > 0) {
|
|
137
|
+
const primary = saccadeTargets[0];
|
|
138
|
+
this.currentFoveaCenter = { x: primary.x, y: primary.y };
|
|
139
|
+
}
|
|
140
|
+
// Step 6: Scale Orchestrator - Determine octaves to process
|
|
141
|
+
const octavesToProcess = this.scaleOrchestrator.getOctavesToProcess(trackingState);
|
|
142
|
+
// Step 7: Store frame for prediction
|
|
143
|
+
this.predictiveCoding.storeFrame(inputData, trackingState);
|
|
144
|
+
// Compute metrics
|
|
145
|
+
const latency = performance.now() - startTime;
|
|
146
|
+
this._updateMetrics(totalPixelsProcessed, latency);
|
|
147
|
+
return {
|
|
148
|
+
skipped: false,
|
|
149
|
+
attentionRegions,
|
|
150
|
+
foveaCenter: this.currentFoveaCenter,
|
|
151
|
+
saliencyPeaks: saliency.peaks,
|
|
152
|
+
octavesToProcess,
|
|
153
|
+
pixelsProcessed: totalPixelsProcessed,
|
|
154
|
+
pixelsSaved: this.width * this.height - totalPixelsProcessed,
|
|
155
|
+
savingsPercent: ((1 - totalPixelsProcessed / (this.width * this.height)) * 100).toFixed(1),
|
|
156
|
+
latency,
|
|
157
|
+
};
|
|
158
|
+
}
|
|
159
|
+
/**
|
|
160
|
+
* Get the primary attention region (highest resolution)
|
|
161
|
+
* This is the region that should be used for feature detection
|
|
162
|
+
*
|
|
163
|
+
* @param {Object} processResult - Result from process()
|
|
164
|
+
* @returns {Object} Primary attention region with data
|
|
165
|
+
*/
|
|
166
|
+
getPrimaryRegion(processResult) {
|
|
167
|
+
if (processResult.skipped || !processResult.attentionRegions?.length) {
|
|
168
|
+
return null;
|
|
169
|
+
}
|
|
170
|
+
return processResult.attentionRegions[0];
|
|
171
|
+
}
|
|
172
|
+
/**
|
|
173
|
+
* Suggest optimal processing based on change detection
|
|
174
|
+
*
|
|
175
|
+
* @param {Uint8Array} inputData - Current frame
|
|
176
|
+
* @returns {Object} Processing suggestion
|
|
177
|
+
*/
|
|
178
|
+
suggestProcessing(inputData) {
|
|
179
|
+
const changeLevel = this.predictiveCoding.getChangeLevel(inputData);
|
|
180
|
+
return {
|
|
181
|
+
shouldProcessFull: changeLevel > 0.3,
|
|
182
|
+
shouldProcessPartial: changeLevel > 0.05,
|
|
183
|
+
canSkip: changeLevel < 0.02,
|
|
184
|
+
changeLevel,
|
|
185
|
+
recommendedSaccades: Math.ceil(changeLevel * this.config.MAX_SACCADES_PER_FRAME),
|
|
186
|
+
};
|
|
187
|
+
}
|
|
188
|
+
/**
|
|
189
|
+
* Update performance metrics
|
|
190
|
+
* @private
|
|
191
|
+
*/
|
|
192
|
+
_updateMetrics(pixelsProcessed, latency) {
|
|
193
|
+
const alpha = 0.1; // Exponential moving average factor
|
|
194
|
+
this.metrics.avgPixelsProcessed =
|
|
195
|
+
this.metrics.avgPixelsProcessed * (1 - alpha) + pixelsProcessed * alpha;
|
|
196
|
+
this.metrics.avgLatency =
|
|
197
|
+
this.metrics.avgLatency * (1 - alpha) + latency * alpha;
|
|
198
|
+
}
|
|
199
|
+
/**
|
|
200
|
+
* Get current performance metrics
|
|
201
|
+
* @returns {Object} Performance metrics
|
|
202
|
+
*/
|
|
203
|
+
getMetrics() {
|
|
204
|
+
return {
|
|
205
|
+
...this.metrics,
|
|
206
|
+
skipRate: ((this.metrics.skippedFrames / this.metrics.totalFrames) * 100).toFixed(1) + '%',
|
|
207
|
+
avgSavings: ((1 - this.metrics.avgPixelsProcessed / (this.width * this.height)) * 100).toFixed(1) + '%',
|
|
208
|
+
currentFovea: this.currentFoveaCenter,
|
|
209
|
+
};
|
|
210
|
+
}
|
|
211
|
+
/**
|
|
212
|
+
* Reset engine state (e.g., when target changes)
|
|
213
|
+
*/
|
|
214
|
+
reset() {
|
|
215
|
+
this.currentFoveaCenter = { x: this.width / 2, y: this.height / 2 };
|
|
216
|
+
this.frameCount = 0;
|
|
217
|
+
this.skipCount = 0;
|
|
218
|
+
this.predictiveCoding.reset();
|
|
219
|
+
this.saccadicController.reset();
|
|
220
|
+
}
|
|
221
|
+
/**
|
|
222
|
+
* Configure engine at runtime
|
|
223
|
+
* @param {Object} options - Configuration options to update
|
|
224
|
+
*/
|
|
225
|
+
configure(options) {
|
|
226
|
+
this.config = { ...this.config, ...options };
|
|
227
|
+
this.fovealAttention.configure(this.config);
|
|
228
|
+
this.saccadicController.configure(this.config);
|
|
229
|
+
this.predictiveCoding.configure(this.config);
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
export { BioInspiredEngine, BIO_CONFIG };
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* A region extracted at a specific resolution
|
|
3
|
+
*/
|
|
4
|
+
export type AttentionRegion = {
|
|
5
|
+
/**
|
|
6
|
+
* - Center X coordinate in original image
|
|
7
|
+
*/
|
|
8
|
+
x: number;
|
|
9
|
+
/**
|
|
10
|
+
* - Center Y coordinate in original image
|
|
11
|
+
*/
|
|
12
|
+
y: number;
|
|
13
|
+
/**
|
|
14
|
+
* - Radius in original image pixels
|
|
15
|
+
*/
|
|
16
|
+
radius: number;
|
|
17
|
+
/**
|
|
18
|
+
* - Resolution multiplier (1.0 = full)
|
|
19
|
+
*/
|
|
20
|
+
resolution: number;
|
|
21
|
+
/**
|
|
22
|
+
* - Extracted pixel data
|
|
23
|
+
*/
|
|
24
|
+
data: Uint8Array;
|
|
25
|
+
/**
|
|
26
|
+
* - Width of extracted region
|
|
27
|
+
*/
|
|
28
|
+
width: number;
|
|
29
|
+
/**
|
|
30
|
+
* - Height of extracted region
|
|
31
|
+
*/
|
|
32
|
+
height: number;
|
|
33
|
+
/**
|
|
34
|
+
* - Number of pixels in region
|
|
35
|
+
*/
|
|
36
|
+
pixelCount: number;
|
|
37
|
+
/**
|
|
38
|
+
* - 'fovea' | 'parafovea' | 'periphery'
|
|
39
|
+
*/
|
|
40
|
+
type: string;
|
|
41
|
+
};
|
|
42
|
+
/**
|
|
43
|
+
* Foveal Attention System
|
|
44
|
+
*
|
|
45
|
+
* Mimics the human eye's fovea-parafovea-periphery structure:
|
|
46
|
+
* - Fovea (center 5°): Maximum resolution, ~50% of visual processing power
|
|
47
|
+
* - Parafovea (5-10°): Medium resolution, pattern recognition
|
|
48
|
+
* - Periphery (>10°): Low resolution, motion detection
|
|
49
|
+
*
|
|
50
|
+
* This allows processing ~75% fewer pixels while maintaining
|
|
51
|
+
* high-quality tracking in the area of interest.
|
|
52
|
+
*/
|
|
53
|
+
/**
|
|
54
|
+
* A region extracted at a specific resolution
|
|
55
|
+
* @typedef {Object} AttentionRegion
|
|
56
|
+
* @property {number} x - Center X coordinate in original image
|
|
57
|
+
* @property {number} y - Center Y coordinate in original image
|
|
58
|
+
* @property {number} radius - Radius in original image pixels
|
|
59
|
+
* @property {number} resolution - Resolution multiplier (1.0 = full)
|
|
60
|
+
* @property {Uint8Array} data - Extracted pixel data
|
|
61
|
+
* @property {number} width - Width of extracted region
|
|
62
|
+
* @property {number} height - Height of extracted region
|
|
63
|
+
* @property {number} pixelCount - Number of pixels in region
|
|
64
|
+
* @property {string} type - 'fovea' | 'parafovea' | 'periphery'
|
|
65
|
+
*/
|
|
66
|
+
export class FovealAttention {
|
|
67
|
+
/**
|
|
68
|
+
* @param {number} width - Input image width
|
|
69
|
+
* @param {number} height - Input image height
|
|
70
|
+
* @param {Object} config - Configuration
|
|
71
|
+
*/
|
|
72
|
+
constructor(width: number, height: number, config: Object);
|
|
73
|
+
width: number;
|
|
74
|
+
height: number;
|
|
75
|
+
config: Object;
|
|
76
|
+
minDim: number;
|
|
77
|
+
foveaRadius: number;
|
|
78
|
+
parafoveaRadius: number;
|
|
79
|
+
/**
|
|
80
|
+
* Initialize pre-allocated extraction buffers
|
|
81
|
+
* @private
|
|
82
|
+
*/
|
|
83
|
+
private _initBuffers;
|
|
84
|
+
foveaBuffer: Uint8Array<ArrayBuffer> | undefined;
|
|
85
|
+
parafoveaBuffer: Uint8Array<ArrayBuffer> | undefined;
|
|
86
|
+
peripheryBuffer: Uint8Array<ArrayBuffer> | undefined;
|
|
87
|
+
peripheryDims: {
|
|
88
|
+
width: number;
|
|
89
|
+
height: number;
|
|
90
|
+
} | undefined;
|
|
91
|
+
/**
|
|
92
|
+
* Build a circular mask for foveal extraction
|
|
93
|
+
* @private
|
|
94
|
+
*/
|
|
95
|
+
private _buildCircularMask;
|
|
96
|
+
circularMask: Uint8Array<ArrayBuffer> | undefined;
|
|
97
|
+
/**
|
|
98
|
+
* Extract attention region at specified center
|
|
99
|
+
*
|
|
100
|
+
* @param {Uint8Array} inputData - Grayscale input image
|
|
101
|
+
* @param {number} centerX - X coordinate of attention center
|
|
102
|
+
* @param {number} centerY - Y coordinate of attention center
|
|
103
|
+
* @param {number} priority - Priority level (0=highest)
|
|
104
|
+
* @returns {AttentionRegion} Extracted region
|
|
105
|
+
*/
|
|
106
|
+
extract(inputData: Uint8Array, centerX: number, centerY: number, priority?: number): AttentionRegion;
|
|
107
|
+
/**
|
|
108
|
+
* Extract foveal region at full resolution
|
|
109
|
+
* @private
|
|
110
|
+
*/
|
|
111
|
+
private _extractFovea;
|
|
112
|
+
/**
|
|
113
|
+
* Extract parafoveal region at half resolution
|
|
114
|
+
* @private
|
|
115
|
+
*/
|
|
116
|
+
private _extractParafovea;
|
|
117
|
+
/**
|
|
118
|
+
* Extract periphery at quarter resolution (motion detection only)
|
|
119
|
+
* @private
|
|
120
|
+
*/
|
|
121
|
+
private _extractPeriphery;
|
|
122
|
+
/**
|
|
123
|
+
* Get combined multi-resolution representation
|
|
124
|
+
* Uses fovea at center, parafovea around it, periphery for the rest
|
|
125
|
+
*
|
|
126
|
+
* @param {Uint8Array} inputData - Input image
|
|
127
|
+
* @param {number} cx - Fovea center X
|
|
128
|
+
* @param {number} cy - Fovea center Y
|
|
129
|
+
* @returns {Object} Multi-resolution representation
|
|
130
|
+
*/
|
|
131
|
+
extractMultiResolution(inputData: Uint8Array, cx: number, cy: number): Object;
|
|
132
|
+
/**
|
|
133
|
+
* Compute total pixels in multi-resolution representation
|
|
134
|
+
* @private
|
|
135
|
+
*/
|
|
136
|
+
private _computeTotalPixels;
|
|
137
|
+
/**
|
|
138
|
+
* Update configuration
|
|
139
|
+
* @param {Object} config - New configuration
|
|
140
|
+
*/
|
|
141
|
+
configure(config: Object): void;
|
|
142
|
+
}
|