@srsergio/taptapp-ar 1.0.92 → 1.0.94

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/README.md +16 -14
  2. package/dist/compiler/offline-compiler.d.ts +3 -3
  3. package/dist/compiler/offline-compiler.js +50 -33
  4. package/dist/core/constants.d.ts +2 -0
  5. package/dist/core/constants.js +4 -1
  6. package/dist/core/detector/detector-lite.d.ts +6 -5
  7. package/dist/core/detector/detector-lite.js +46 -16
  8. package/dist/core/image-list.d.ts +24 -6
  9. package/dist/core/image-list.js +4 -4
  10. package/dist/core/matching/matcher.d.ts +1 -1
  11. package/dist/core/matching/matcher.js +7 -4
  12. package/dist/core/matching/matching.d.ts +2 -1
  13. package/dist/core/matching/matching.js +43 -11
  14. package/dist/core/perception/bio-inspired-engine.d.ts +130 -0
  15. package/dist/core/perception/bio-inspired-engine.js +232 -0
  16. package/dist/core/perception/foveal-attention.d.ts +142 -0
  17. package/dist/core/perception/foveal-attention.js +280 -0
  18. package/dist/core/perception/index.d.ts +6 -0
  19. package/dist/core/perception/index.js +17 -0
  20. package/dist/core/perception/predictive-coding.d.ts +92 -0
  21. package/dist/core/perception/predictive-coding.js +278 -0
  22. package/dist/core/perception/saccadic-controller.d.ts +126 -0
  23. package/dist/core/perception/saccadic-controller.js +269 -0
  24. package/dist/core/perception/saliency-map.d.ts +74 -0
  25. package/dist/core/perception/saliency-map.js +254 -0
  26. package/dist/core/perception/scale-orchestrator.d.ts +28 -0
  27. package/dist/core/perception/scale-orchestrator.js +68 -0
  28. package/dist/core/protocol.d.ts +14 -1
  29. package/dist/core/protocol.js +33 -1
  30. package/dist/runtime/bio-inspired-controller.d.ts +135 -0
  31. package/dist/runtime/bio-inspired-controller.js +358 -0
  32. package/dist/runtime/controller.d.ts +11 -2
  33. package/dist/runtime/controller.js +20 -8
  34. package/dist/runtime/controller.worker.js +2 -2
  35. package/package.json +1 -1
  36. package/src/compiler/offline-compiler.ts +56 -36
  37. package/src/core/constants.ts +5 -1
  38. package/src/core/detector/detector-lite.js +46 -16
  39. package/src/core/image-list.js +4 -4
  40. package/src/core/matching/matcher.js +8 -4
  41. package/src/core/matching/matching.js +51 -12
  42. package/src/core/perception/bio-inspired-engine.js +275 -0
  43. package/src/core/perception/foveal-attention.js +306 -0
  44. package/src/core/perception/index.js +18 -0
  45. package/src/core/perception/predictive-coding.js +327 -0
  46. package/src/core/perception/saccadic-controller.js +303 -0
  47. package/src/core/perception/saliency-map.js +296 -0
  48. package/src/core/perception/scale-orchestrator.js +80 -0
  49. package/src/core/protocol.ts +38 -1
  50. package/src/runtime/bio-inspired-controller.ts +448 -0
  51. package/src/runtime/controller.ts +22 -7
  52. package/src/runtime/controller.worker.js +2 -1
@@ -0,0 +1,254 @@
1
+ /**
2
+ * Saliency Map Computation
3
+ *
4
+ * Computes visual saliency - regions that "pop out" and attract attention.
5
+ * Used to guide saccadic attention to visually important areas.
6
+ *
7
+ * Implements a simplified Itti-Koch saliency model:
8
+ * - Intensity contrast
9
+ * - Edge density
10
+ * - Local complexity
11
+ *
12
+ * For AR tracking, high-saliency regions often contain:
13
+ * - Corners and edges (good for feature detection)
14
+ * - High-contrast areas (robust to lighting changes)
15
+ * - Texture-rich regions (distinctive for matching)
16
+ */
17
+ class SaliencyMap {
18
+ /**
19
+ * @param {number} width - Image width
20
+ * @param {number} height - Image height
21
+ */
22
+ constructor(width, height) {
23
+ this.width = width;
24
+ this.height = height;
25
+ // Downsampled dimensions for efficiency
26
+ this.scale = 8; // Process at 1/8 resolution
27
+ this.scaledW = Math.ceil(width / this.scale);
28
+ this.scaledH = Math.ceil(height / this.scale);
29
+ // Pre-allocate buffers
30
+ this.intensityMap = new Float32Array(this.scaledW * this.scaledH);
31
+ this.contrastMap = new Float32Array(this.scaledW * this.scaledH);
32
+ this.edgeMap = new Float32Array(this.scaledW * this.scaledH);
33
+ this.saliencyBuffer = new Float32Array(this.scaledW * this.scaledH);
34
+ // Peak detection parameters
35
+ this.maxPeaks = 5;
36
+ this.suppressionRadius = Math.max(this.scaledW, this.scaledH) * 0.15;
37
+ }
38
+ /**
39
+ * Compute saliency map for input image
40
+ *
41
+ * @param {Uint8Array} inputData - Grayscale input image
42
+ * @returns {Object} Saliency result with peaks
43
+ */
44
+ compute(inputData) {
45
+ // Step 1: Downsample and compute intensity
46
+ this._downsample(inputData);
47
+ // Step 2: Compute features
48
+ this._computeContrast();
49
+ this._computeEdges();
50
+ // Step 3: Combine into saliency map
51
+ this._combineSaliency();
52
+ // Step 4: Find peaks
53
+ const peaks = this._findPeaks();
54
+ return {
55
+ map: this.saliencyBuffer,
56
+ width: this.scaledW,
57
+ height: this.scaledH,
58
+ peaks,
59
+ maxSaliency: peaks.length > 0 ? peaks[0].value : 0,
60
+ };
61
+ }
62
+ /**
63
+ * Downsample input to working resolution
64
+ * @private
65
+ */
66
+ _downsample(inputData) {
67
+ const s = this.scale;
68
+ const w = this.width;
69
+ for (let sy = 0; sy < this.scaledH; sy++) {
70
+ const yStart = sy * s;
71
+ const yEnd = Math.min(yStart + s, this.height);
72
+ for (let sx = 0; sx < this.scaledW; sx++) {
73
+ const xStart = sx * s;
74
+ const xEnd = Math.min(xStart + s, this.width);
75
+ let sum = 0;
76
+ let count = 0;
77
+ for (let y = yStart; y < yEnd; y++) {
78
+ const rowOffset = y * w;
79
+ for (let x = xStart; x < xEnd; x++) {
80
+ sum += inputData[rowOffset + x];
81
+ count++;
82
+ }
83
+ }
84
+ this.intensityMap[sy * this.scaledW + sx] = sum / count / 255;
85
+ }
86
+ }
87
+ }
88
+ /**
89
+ * Compute local contrast map
90
+ * @private
91
+ */
92
+ _computeContrast() {
93
+ const w = this.scaledW;
94
+ const h = this.scaledH;
95
+ const intensity = this.intensityMap;
96
+ const contrast = this.contrastMap;
97
+ // 3x3 local contrast using center-surround
98
+ for (let y = 1; y < h - 1; y++) {
99
+ for (let x = 1; x < w - 1; x++) {
100
+ const idx = y * w + x;
101
+ const center = intensity[idx];
102
+ // Compute average of 8 neighbors
103
+ let surround = 0;
104
+ surround += intensity[(y - 1) * w + (x - 1)];
105
+ surround += intensity[(y - 1) * w + x];
106
+ surround += intensity[(y - 1) * w + (x + 1)];
107
+ surround += intensity[y * w + (x - 1)];
108
+ surround += intensity[y * w + (x + 1)];
109
+ surround += intensity[(y + 1) * w + (x - 1)];
110
+ surround += intensity[(y + 1) * w + x];
111
+ surround += intensity[(y + 1) * w + (x + 1)];
112
+ surround /= 8;
113
+ // Contrast is absolute difference
114
+ contrast[idx] = Math.abs(center - surround);
115
+ }
116
+ }
117
+ // Handle borders
118
+ for (let y = 0; y < h; y++) {
119
+ contrast[y * w] = 0;
120
+ contrast[y * w + w - 1] = 0;
121
+ }
122
+ for (let x = 0; x < w; x++) {
123
+ contrast[x] = 0;
124
+ contrast[(h - 1) * w + x] = 0;
125
+ }
126
+ }
127
+ /**
128
+ * Compute edge density map using Sobel-like operator
129
+ * @private
130
+ */
131
+ _computeEdges() {
132
+ const w = this.scaledW;
133
+ const h = this.scaledH;
134
+ const intensity = this.intensityMap;
135
+ const edges = this.edgeMap;
136
+ for (let y = 1; y < h - 1; y++) {
137
+ for (let x = 1; x < w - 1; x++) {
138
+ // Simplified Sobel
139
+ const gx = -intensity[(y - 1) * w + (x - 1)] + intensity[(y - 1) * w + (x + 1)] +
140
+ -2 * intensity[y * w + (x - 1)] + 2 * intensity[y * w + (x + 1)] +
141
+ -intensity[(y + 1) * w + (x - 1)] + intensity[(y + 1) * w + (x + 1)];
142
+ const gy = -intensity[(y - 1) * w + (x - 1)] - 2 * intensity[(y - 1) * w + x] - intensity[(y - 1) * w + (x + 1)] +
143
+ intensity[(y + 1) * w + (x - 1)] + 2 * intensity[(y + 1) * w + x] + intensity[(y + 1) * w + (x + 1)];
144
+ edges[y * w + x] = Math.sqrt(gx * gx + gy * gy) / 4; // Normalize
145
+ }
146
+ }
147
+ // Handle borders
148
+ for (let y = 0; y < h; y++) {
149
+ edges[y * w] = 0;
150
+ edges[y * w + w - 1] = 0;
151
+ }
152
+ for (let x = 0; x < w; x++) {
153
+ edges[x] = 0;
154
+ edges[(h - 1) * w + x] = 0;
155
+ }
156
+ }
157
+ /**
158
+ * Combine features into final saliency map
159
+ * @private
160
+ */
161
+ _combineSaliency() {
162
+ const n = this.saliencyBuffer.length;
163
+ const contrast = this.contrastMap;
164
+ const edges = this.edgeMap;
165
+ const saliency = this.saliencyBuffer;
166
+ // Weight: 60% contrast, 40% edges
167
+ for (let i = 0; i < n; i++) {
168
+ saliency[i] = contrast[i] * 0.6 + edges[i] * 0.4;
169
+ }
170
+ // Normalize to [0, 1]
171
+ let max = 0;
172
+ for (let i = 0; i < n; i++) {
173
+ max = Math.max(max, saliency[i]);
174
+ }
175
+ if (max > 0) {
176
+ for (let i = 0; i < n; i++) {
177
+ saliency[i] /= max;
178
+ }
179
+ }
180
+ }
181
+ /**
182
+ * Find peaks in saliency map using non-maximum suppression
183
+ * @private
184
+ */
185
+ _findPeaks() {
186
+ const w = this.scaledW;
187
+ const h = this.scaledH;
188
+ const saliency = this.saliencyBuffer;
189
+ const peaks = [];
190
+ const r = this.suppressionRadius;
191
+ const r2 = r * r;
192
+ // Find all local maxima
193
+ const candidates = [];
194
+ for (let y = 1; y < h - 1; y++) {
195
+ for (let x = 1; x < w - 1; x++) {
196
+ const idx = y * w + x;
197
+ const val = saliency[idx];
198
+ // Check if local maximum (8-connected)
199
+ if (val > saliency[(y - 1) * w + (x - 1)] &&
200
+ val > saliency[(y - 1) * w + x] &&
201
+ val > saliency[(y - 1) * w + (x + 1)] &&
202
+ val > saliency[y * w + (x - 1)] &&
203
+ val > saliency[y * w + (x + 1)] &&
204
+ val > saliency[(y + 1) * w + (x - 1)] &&
205
+ val > saliency[(y + 1) * w + x] &&
206
+ val > saliency[(y + 1) * w + (x + 1)]) {
207
+ candidates.push({ x, y, value: val });
208
+ }
209
+ }
210
+ }
211
+ // Sort by value descending
212
+ candidates.sort((a, b) => b.value - a.value);
213
+ // Non-maximum suppression
214
+ for (const cand of candidates) {
215
+ if (peaks.length >= this.maxPeaks)
216
+ break;
217
+ // Check if too close to existing peaks
218
+ let suppress = false;
219
+ for (const peak of peaks) {
220
+ const dx = cand.x - peak.x;
221
+ const dy = cand.y - peak.y;
222
+ if (dx * dx + dy * dy < r2) {
223
+ suppress = true;
224
+ break;
225
+ }
226
+ }
227
+ if (!suppress) {
228
+ // Convert to original image coordinates
229
+ peaks.push({
230
+ x: (cand.x + 0.5) * this.scale,
231
+ y: (cand.y + 0.5) * this.scale,
232
+ value: cand.value,
233
+ });
234
+ }
235
+ }
236
+ return peaks;
237
+ }
238
+ /**
239
+ * Get saliency value at a specific location
240
+ *
241
+ * @param {number} x - X coordinate in original image
242
+ * @param {number} y - Y coordinate in original image
243
+ * @returns {number} Saliency value (0-1)
244
+ */
245
+ getSaliencyAt(x, y) {
246
+ const sx = Math.floor(x / this.scale);
247
+ const sy = Math.floor(y / this.scale);
248
+ if (sx < 0 || sx >= this.scaledW || sy < 0 || sy >= this.scaledH) {
249
+ return 0;
250
+ }
251
+ return this.saliencyBuffer[sy * this.scaledW + sx];
252
+ }
253
+ }
254
+ export { SaliencyMap };
@@ -0,0 +1,28 @@
1
+ /**
2
+ * Scale Orchestrator
3
+ *
4
+ * Manages which octaves should be processed based on the current tracking state.
5
+ * Implements temporal consistency and interleave strategies to optimize performance.
6
+ */
7
+ export class ScaleOrchestrator {
8
+ constructor(numOctaves: any, options?: {});
9
+ numOctaves: any;
10
+ options: {
11
+ interleaveInterval: number;
12
+ hysteresis: number;
13
+ };
14
+ frameCount: number;
15
+ lastActiveOctave: number;
16
+ interleaveOctave: number;
17
+ /**
18
+ * Determine which octaves should be processed in the current frame
19
+ *
20
+ * @param {Object} trackingState - Current state of tracking
21
+ * @returns {number[]} Array of octave indices to process
22
+ */
23
+ getOctavesToProcess(trackingState?: Object): number[];
24
+ /**
25
+ * Reset orchestrator state
26
+ */
27
+ reset(): void;
28
+ }
@@ -0,0 +1,68 @@
1
+ /**
2
+ * Scale Orchestrator
3
+ *
4
+ * Manages which octaves should be processed based on the current tracking state.
5
+ * Implements temporal consistency and interleave strategies to optimize performance.
6
+ */
7
+ export class ScaleOrchestrator {
8
+ constructor(numOctaves, options = {}) {
9
+ this.numOctaves = numOctaves;
10
+ this.options = {
11
+ interleaveInterval: 10,
12
+ hysteresis: 1, // Number of adjacent octaves to keep
13
+ ...options
14
+ };
15
+ this.frameCount = 0;
16
+ this.lastActiveOctave = -1;
17
+ this.interleaveOctave = 0;
18
+ }
19
+ /**
20
+ * Determine which octaves should be processed in the current frame
21
+ *
22
+ * @param {Object} trackingState - Current state of tracking
23
+ * @returns {number[]} Array of octave indices to process
24
+ */
25
+ getOctavesToProcess(trackingState = null) {
26
+ this.frameCount++;
27
+ // Case 1: No tracking or lost tracking -> Process all octaves
28
+ if (!trackingState || !trackingState.isTracking || trackingState.activeOctave === undefined) {
29
+ this.lastActiveOctave = -1;
30
+ return Array.from({ length: this.numOctaves }, (_, i) => i);
31
+ }
32
+ const activeScale = trackingState.activeOctave;
33
+ this.lastActiveOctave = activeScale;
34
+ // Case 2: Active tracking -> Focus on current scale and neighbors
35
+ const octaves = new Set();
36
+ // Add current and adjacent scales (Hysteresis)
37
+ for (let i = -this.options.hysteresis; i <= this.options.hysteresis; i++) {
38
+ const octave = activeScale + i;
39
+ if (octave >= 0 && octave < this.numOctaves) {
40
+ octaves.add(octave);
41
+ }
42
+ }
43
+ // Case 3: Interleave - Periodically check a distant octave to ensure we don't "drift"
44
+ if (this.frameCount % this.options.interleaveInterval === 0) {
45
+ this.interleaveOctave = (this.interleaveOctave + 1) % this.numOctaves;
46
+ // If the interleave octave is already being processed, pick the next one
47
+ if (octaves.has(this.interleaveOctave)) {
48
+ this.interleaveOctave = (this.interleaveOctave + 1) % this.numOctaves;
49
+ }
50
+ octaves.add(this.interleaveOctave);
51
+ if (this.options.debug) {
52
+ console.log(`[ScaleOrchestrator] Interleave check on octave ${this.interleaveOctave}`);
53
+ }
54
+ }
55
+ const result = Array.from(octaves).sort((a, b) => a - b);
56
+ if (this.options.debug) {
57
+ console.log(`[ScaleOrchestrator] Active: ${activeScale}, Processing: [${result.join(', ')}]`);
58
+ }
59
+ return result;
60
+ }
61
+ /**
62
+ * Reset orchestrator state
63
+ */
64
+ reset() {
65
+ this.frameCount = 0;
66
+ this.lastActiveOctave = -1;
67
+ }
68
+ }
@@ -1,4 +1,4 @@
1
- export declare const CURRENT_VERSION = 9;
1
+ export declare const CURRENT_VERSION = 11;
2
2
  export declare const HDC_SEED = 322420463;
3
3
  /**
4
4
  * Morton Order calculation for spatial sorting
@@ -24,6 +24,19 @@ export declare function columnarize(points: any[], tree: any, width: number, hei
24
24
  hdc: number;
25
25
  t: any;
26
26
  };
27
+ /**
28
+ * Columnarizes point data with COMPACT 32-bit descriptors (XOR folding)
29
+ * Reduces descriptor storage by 50% with minimal accuracy loss
30
+ */
31
+ export declare function columnarizeCompact(points: any[], tree: any, width: number, height: number): {
32
+ x: Uint16Array<ArrayBuffer>;
33
+ y: Uint16Array<ArrayBuffer>;
34
+ a: Int16Array<ArrayBuffer>;
35
+ s: Uint8Array<ArrayBuffer>;
36
+ d: Uint32Array<ArrayBuffer>;
37
+ compact: number;
38
+ t: any;
39
+ };
27
40
  /**
28
41
  * Compacts hierarchical clustering tree into a minimal array structure
29
42
  */
@@ -1,5 +1,5 @@
1
1
  import * as msgpack from "@msgpack/msgpack";
2
- export const CURRENT_VERSION = 9; // Bumped for HDC support
2
+ export const CURRENT_VERSION = 11; // Bumped for Nanite virtualized features support
3
3
  export const HDC_SEED = 0x1337BEEF; // Default system seed
4
4
  /**
5
5
  * Morton Order calculation for spatial sorting
@@ -91,6 +91,38 @@ export function columnarize(points, tree, width, height, useHDC = false) {
91
91
  t: compactTree(tree.rootNode),
92
92
  };
93
93
  }
94
+ /**
95
+ * Columnarizes point data with COMPACT 32-bit descriptors (XOR folding)
96
+ * Reduces descriptor storage by 50% with minimal accuracy loss
97
+ */
98
+ export function columnarizeCompact(points, tree, width, height) {
99
+ const count = points.length;
100
+ const x = new Uint16Array(count);
101
+ const y = new Uint16Array(count);
102
+ const angle = new Int16Array(count);
103
+ const scale = new Uint8Array(count);
104
+ const descriptors = new Uint32Array(count); // 32-bit compact descriptors
105
+ for (let i = 0; i < count; i++) {
106
+ x[i] = Math.round((points[i].x / width) * 65535);
107
+ y[i] = Math.round((points[i].y / height) * 65535);
108
+ angle[i] = Math.round((points[i].angle / Math.PI) * 32767);
109
+ scale[i] = Math.round(Math.log2(points[i].scale || 1));
110
+ if (points[i].descriptors && points[i].descriptors.length >= 2) {
111
+ // XOR folding: Combine two 32-bit values into one 32-bit value
112
+ // This preserves discriminative power while halving storage
113
+ descriptors[i] = (points[i].descriptors[0] ^ points[i].descriptors[1]) >>> 0;
114
+ }
115
+ }
116
+ return {
117
+ x,
118
+ y,
119
+ a: angle,
120
+ s: scale,
121
+ d: descriptors,
122
+ compact: 1, // Flag to indicate compact 32-bit descriptors
123
+ t: compactTree(tree.rootNode),
124
+ };
125
+ }
94
126
  /**
95
127
  * Compacts hierarchical clustering tree into a minimal array structure
96
128
  */
@@ -0,0 +1,135 @@
1
+ /**
2
+ * Bio-Inspired Controller Adapter
3
+ *
4
+ * Wraps the standard Controller with Bio-Inspired Perception capabilities.
5
+ * Provides significant performance improvements while maintaining API compatibility.
6
+ *
7
+ * Key features:
8
+ * - Foveal attention: Processes only regions of interest at full resolution
9
+ * - Predictive coding: Skips processing when scene is static
10
+ * - Saccadic sampling: Strategic "glances" at high-saliency regions
11
+ *
12
+ * Usage:
13
+ * ```javascript
14
+ * import { BioInspiredController } from './bio-inspired-controller.js';
15
+ *
16
+ * const controller = new BioInspiredController({
17
+ * inputWidth: 640,
18
+ * inputHeight: 480,
19
+ * onUpdate: (data) => console.log(data),
20
+ * bioInspired: {
21
+ * enabled: true,
22
+ * aggressiveSkipping: true,
23
+ * }
24
+ * });
25
+ * ```
26
+ */
27
+ import { Controller, ControllerOptions } from './controller.js';
28
+ import { BIO_CONFIG } from '../core/perception/index.js';
29
+ /**
30
+ * Extended options for Bio-Inspired Controller
31
+ */
32
+ export interface BioInspiredControllerOptions extends ControllerOptions {
33
+ bioInspired?: {
34
+ enabled?: boolean;
35
+ aggressiveSkipping?: boolean;
36
+ foveaRadiusRatio?: number;
37
+ maxSaccades?: number;
38
+ };
39
+ }
40
+ /**
41
+ * Bio-Inspired Controller
42
+ *
43
+ * Extends the standard Controller with bio-inspired perception capabilities.
44
+ */
45
+ declare class BioInspiredController extends Controller {
46
+ private bioEngine;
47
+ private bioEnabled;
48
+ private bioMetricsInterval;
49
+ private lastBioResult;
50
+ constructor(options: BioInspiredControllerOptions);
51
+ /**
52
+ * Override processVideo to add bio-inspired perception
53
+ */
54
+ processVideo(input: any): void;
55
+ /**
56
+ * Handle a skipped frame using prediction
57
+ * @private
58
+ */
59
+ private _handleSkippedFrame;
60
+ /**
61
+ * Process frame using bio-inspired attention regions
62
+ * @private
63
+ */
64
+ private _processWithAttention;
65
+ /**
66
+ * Detect and match features, optionally limited to specific octaves
67
+ */
68
+ _detectAndMatch(inputData: any, targetIndexes: number[], octavesToProcess?: number[] | null): Promise<{
69
+ targetIndex: any;
70
+ modelViewTransform: any;
71
+ screenCoords: any;
72
+ worldCoords: any;
73
+ featurePoints: any;
74
+ }>;
75
+ /**
76
+ * Communicate with worker for matching phase
77
+ */
78
+ _workerMatch(featurePoints: any, targetIndexes: number[], inputData?: any, expectedScale?: number, octavesToProcess?: number[] | null): Promise<any>;
79
+ /**
80
+ * Override _trackAndUpdate to capture active octave for the next frame's orchestration
81
+ */
82
+ _trackAndUpdate(inputData: any, lastModelViewTransform: number[][], targetIndex: number): Promise<{
83
+ modelViewTransform: null;
84
+ screenCoords: never[];
85
+ reliabilities: never[];
86
+ stabilities: never[];
87
+ deformedMesh: null;
88
+ octaveIndex?: undefined;
89
+ } | {
90
+ modelViewTransform: null;
91
+ screenCoords: any[];
92
+ reliabilities: number[];
93
+ stabilities: number[];
94
+ deformedMesh?: undefined;
95
+ octaveIndex?: undefined;
96
+ } | {
97
+ modelViewTransform: any;
98
+ screenCoords: any[];
99
+ reliabilities: number[];
100
+ stabilities: number[];
101
+ deformedMesh: any;
102
+ octaveIndex: any;
103
+ }>;
104
+ /**
105
+ * Flatten a 3x4 matrix to Float32Array
106
+ * @private
107
+ */
108
+ private _flattenMatrix;
109
+ /**
110
+ * Unflatten Float32Array to 3x4 matrix
111
+ * @private
112
+ */
113
+ private _unflattenMatrix;
114
+ /**
115
+ * Get bio-inspired engine metrics
116
+ */
117
+ getBioMetrics(): Object | null;
118
+ /**
119
+ * Get last bio processing result
120
+ */
121
+ getLastBioResult(): any;
122
+ /**
123
+ * Enable/disable bio-inspired processing dynamically
124
+ */
125
+ setBioEnabled(enabled: boolean): void;
126
+ /**
127
+ * Configure bio-inspired engine at runtime
128
+ */
129
+ configureBio(options: Partial<typeof BIO_CONFIG>): void;
130
+ /**
131
+ * Override dispose to clean up bio engine
132
+ */
133
+ dispose(): void;
134
+ }
135
+ export { BioInspiredController };