@srsergio/taptapp-ar 1.0.60 → 1.0.62

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,7 +8,7 @@ class CropDetector {
8
8
  let minDimension = Math.min(width, height) / 2;
9
9
  let cropSize = Math.pow(2, Math.round(Math.log(minDimension) / Math.log(2)));
10
10
  this.cropSize = cropSize;
11
- this.detector = new DetectorLite(cropSize, cropSize, { useLSH: true });
11
+ this.detector = new DetectorLite(cropSize, cropSize, { useLSH: true, maxOctaves: 1 });
12
12
  this.lastRandomIndex = 4;
13
13
  }
14
14
  detect(input) {
@@ -9,6 +9,7 @@ export class DetectorLite {
9
9
  useGPU: any;
10
10
  useLSH: any;
11
11
  numOctaves: number;
12
+ maxFeaturesPerBucket: any;
12
13
  /**
13
14
  * Detecta características en una imagen en escala de grises
14
15
  * @param {Float32Array|Uint8Array} imageData - Datos de imagen (width * height)
@@ -15,7 +15,6 @@ import { computeLSH64, computeFullFREAK, packLSHIntoDescriptor } from "../utils/
15
15
  const PYRAMID_MIN_SIZE = 4; // Restored to 4 for better small-scale detection
16
16
  // PYRAMID_MAX_OCTAVE ya no es necesario, el límite lo da PYRAMID_MIN_SIZE
17
17
  const NUM_BUCKETS_PER_DIMENSION = 10;
18
- const MAX_FEATURES_PER_BUCKET = 30; // Maximized to ensure robustness in Moonshot mode
19
18
  const ORIENTATION_NUM_BINS = 36;
20
19
  const FREAK_EXPANSION_FACTOR = 7.0;
21
20
  // Global GPU mode flag
@@ -48,6 +47,9 @@ export class DetectorLite {
48
47
  break;
49
48
  }
50
49
  this.numOctaves = options.maxOctaves !== undefined ? Math.min(numOctaves, options.maxOctaves) : numOctaves;
50
+ // 🚀 SMART BITRATE (VBR): Internal logic to decide feature density based on scale
51
+ const scale = options.scale !== undefined ? options.scale : 1.0;
52
+ this.maxFeaturesPerBucket = options.maxFeaturesPerBucket || Math.max(4, Math.floor(12 * Math.sqrt(scale)));
51
53
  }
52
54
  /**
53
55
  * Detecta características en una imagen en escala de grises
@@ -305,7 +307,7 @@ export class DetectorLite {
305
307
  */
306
308
  _applyPrune(extremas) {
307
309
  const nBuckets = NUM_BUCKETS_PER_DIMENSION;
308
- const nFeatures = MAX_FEATURES_PER_BUCKET;
310
+ const nFeatures = this.maxFeaturesPerBucket;
309
311
  // Agrupar por buckets
310
312
  const buckets = [];
311
313
  for (let i = 0; i < nBuckets * nBuckets; i++) {
@@ -4,7 +4,7 @@ import { resize } from "./utils/images.js";
4
4
  * Un valor más bajo permite detectar imágenes más pequeñas pero aumenta el tiempo de procesamiento
5
5
  * @constant {number}
6
6
  */
7
- const MIN_IMAGE_PIXEL_SIZE = 32;
7
+ const MIN_IMAGE_PIXEL_SIZE = 40; // Increased to 40 to skip extremely small, noisy layers and reduce size
8
8
  /**
9
9
  * Construye una lista de imágenes con diferentes escalas para detección de características
10
10
  * @param {Object} inputImage - Imagen de entrada con propiedades width, height y data
@@ -16,9 +16,8 @@ const buildImageList = (inputImage) => {
16
16
  let c = minScale;
17
17
  while (true) {
18
18
  scaleList.push(c);
19
- // Optimization: Paso balanceado (aprox 1.5)
20
- // Mejor cobertura que 2.0, pero mucho más ligero que 1.41 o 1.26
21
- c *= Math.pow(2.0, 0.6);
19
+ // Optimization: More aggressive step (pow(2, 0.75) approx 1.68) for smaller exports
20
+ c *= Math.pow(2.0, 0.75);
22
21
  if (c >= 0.95) {
23
22
  c = 1;
24
23
  break;
@@ -64,40 +64,19 @@ parentPort.on('message', async (msg) => {
64
64
  else if (msg.type === 'match') {
65
65
  const { targetImage, percentPerImage, basePercent } = msg;
66
66
  try {
67
- // 🚀 MOONSHOT: Only run detector ONCE on full-res image.
68
- // DetectorLite internally builds a pyramid (octaves 1.0, 0.5, 0.25, etc.)
69
- const detector = new DetectorLite(targetImage.width, targetImage.height, {
70
- useLSH: true
71
- });
72
- parentPort.postMessage({ type: 'progress', percent: basePercent + percentPerImage * 0.1 });
73
- const { featurePoints: allPoints } = detector.detect(targetImage.data);
74
- parentPort.postMessage({ type: 'progress', percent: basePercent + percentPerImage * 0.5 });
75
- // Group points by their scale (octave)
76
- const scalesMap = new Map();
77
- for (const p of allPoints) {
78
- const octaveScale = p.scale;
79
- let list = scalesMap.get(octaveScale);
80
- if (!list) {
81
- list = [];
82
- scalesMap.set(octaveScale, list);
83
- }
84
- // Coordinates in p are already full-res.
85
- // We need them relative to the scaled image for the keyframe.
86
- list.push({
87
- ...p,
88
- x: p.x / octaveScale,
89
- y: p.y / octaveScale,
90
- scale: 1.0 // Keypoint scale is always 1.0 relative to its own keyframe image
91
- });
92
- }
93
- // Optional: Run another detector pass at an intermediate scale to improve coverage
94
- // (e.g. at 1/1.41 ratio) if tracking robustness suffers.
95
- // For now, let's stick to octaves for MAXIMUM speed.
67
+ const { buildImageList } = await import('./image-list.js');
68
+ const imageList = buildImageList(targetImage);
69
+ const percentPerScale = percentPerImage / imageList.length;
96
70
  const keyframes = [];
97
- const sortedScales = Array.from(scalesMap.keys()).sort((a, b) => a - b);
98
- const percentPerScale = (percentPerImage * 0.4) / sortedScales.length;
99
- for (const s of sortedScales) {
100
- const ps = scalesMap.get(s);
71
+ for (let i = 0; i < imageList.length; i++) {
72
+ const image = imageList[i];
73
+ // 🚀 SMART BITRATE (VBR): Now handled internally by DetectorLite via 'scale'
74
+ const detector = new DetectorLite(image.width, image.height, {
75
+ useLSH: true,
76
+ maxOctaves: 1,
77
+ scale: image.scale
78
+ });
79
+ const { featurePoints: ps } = detector.detect(image.data);
101
80
  const sortedPs = sortPoints(ps);
102
81
  const maximaPoints = sortedPs.filter((p) => p.maxima);
103
82
  const minimaPoints = sortedPs.filter((p) => !p.maxima);
@@ -108,13 +87,13 @@ parentPort.on('message', async (msg) => {
108
87
  minimaPoints,
109
88
  maximaPointsCluster,
110
89
  minimaPointsCluster,
111
- width: Math.round(targetImage.width / s),
112
- height: Math.round(targetImage.height / s),
113
- scale: 1.0 / s, // keyframe.scale is relative to full target image
90
+ width: image.width,
91
+ height: image.height,
92
+ scale: image.scale,
114
93
  });
115
94
  parentPort.postMessage({
116
95
  type: 'progress',
117
- percent: basePercent + percentPerImage * 0.6 + keyframes.length * percentPerScale
96
+ percent: basePercent + (i + 1) * percentPerScale
118
97
  });
119
98
  }
120
99
  parentPort.postMessage({
@@ -12,7 +12,10 @@ export declare class OfflineCompiler {
12
12
  constructor();
13
13
  _initNodeWorkers(): Promise<void>;
14
14
  compileImageTargets(images: any[], progressCallback: (p: number) => void): Promise<any>;
15
- _compileTarget(targetImages: any[], progressCallback: (p: number) => void): Promise<any[]>;
15
+ _compileTarget(targetImages: any[], progressCallback: (p: number) => void): Promise<{
16
+ matchingData: any;
17
+ trackingData: any;
18
+ }[]>;
16
19
  _compileMatch(targetImages: any[], progressCallback: (p: number) => void): Promise<any[]>;
17
20
  _compileTrack(targetImages: any[], progressCallback: (p: number) => void): Promise<any[]>;
18
21
  compileTrack({ progressCallback, targetImages, basePercent }: {
@@ -88,91 +88,14 @@ export class OfflineCompiler {
88
88
  async _compileTarget(targetImages, progressCallback) {
89
89
  if (isNode)
90
90
  await this._initNodeWorkers();
91
- if (this.workerPool) {
92
- const progressMap = new Float32Array(targetImages.length);
93
- const wrappedPromises = targetImages.map((targetImage, index) => {
94
- return this.workerPool.runTask({
95
- type: 'compile-all', // 🚀 MOONSHOT: Combined task
96
- targetImage,
97
- onProgress: (p) => {
98
- progressMap[index] = p;
99
- const sum = progressMap.reduce((a, b) => a + b, 0);
100
- progressCallback(sum / targetImages.length);
101
- }
102
- });
103
- });
104
- return Promise.all(wrappedPromises);
105
- }
106
- // 🚀 MOONSHOT BROWSER FALLBACK:
107
- // Combined detection to avoid redundant pyramid processing
108
- const results = [];
109
- for (let i = 0; i < targetImages.length; i++) {
110
- const targetImage = targetImages[i];
111
- // 1. Single Pass Detection + Pyramid Generation
112
- const detector = new DetectorLite(targetImage.width, targetImage.height, { useLSH: true });
113
- progressCallback((i / targetImages.length) * 100 + 10);
114
- const { featurePoints, pyramid } = detector.detect(targetImage.data);
115
- progressCallback((i / targetImages.length) * 100 + 40);
116
- // 2. Extract Tracking Data using the ALREADY BLURRED pyramid
117
- const trackingImageList = [];
118
- const targetSizes = [256, 128];
119
- for (const targetSize of targetSizes) {
120
- let bestLevel = 0;
121
- let minDiff = Math.abs(Math.min(targetImage.width, targetImage.height) - targetSize);
122
- for (let l = 1; l < pyramid.length; l++) {
123
- const img = pyramid[l][0];
124
- const diff = Math.abs(Math.min(img.width, img.height) - targetSize);
125
- if (diff < minDiff) {
126
- minDiff = diff;
127
- bestLevel = l;
128
- }
129
- }
130
- const levelImg = pyramid[bestLevel][0];
131
- trackingImageList.push({
132
- data: levelImg.data,
133
- width: levelImg.width,
134
- height: levelImg.height,
135
- scale: levelImg.width / targetImage.width
136
- });
137
- }
138
- const trackingData = extractTrackingFeatures(trackingImageList, () => { });
139
- progressCallback((i / targetImages.length) * 100 + 60);
140
- // 3. Build Keyframes for Matching (Group by scale)
141
- const scalesMap = new Map();
142
- for (const p of featurePoints) {
143
- const s = p.scale;
144
- let list = scalesMap.get(s);
145
- if (!list) {
146
- list = [];
147
- scalesMap.set(s, list);
148
- }
149
- list.push({ ...p, x: p.x / s, y: p.y / s, scale: 1.0 });
150
- }
151
- const keyframes = [];
152
- const sortedScales = Array.from(scalesMap.keys()).sort((a, b) => a - b);
153
- for (const s of sortedScales) {
154
- const ps = scalesMap.get(s);
155
- const maximaPoints = ps.filter((p) => p.maxima);
156
- const minimaPoints = ps.filter((p) => !p.maxima);
157
- const maximaPointsCluster = hierarchicalClusteringBuild({ points: maximaPoints });
158
- const minimaPointsCluster = hierarchicalClusteringBuild({ points: minimaPoints });
159
- keyframes.push({
160
- maximaPoints,
161
- minimaPoints,
162
- maximaPointsCluster,
163
- minimaPointsCluster,
164
- width: Math.round(targetImage.width / s),
165
- height: Math.round(targetImage.height / s),
166
- scale: 1.0 / s,
167
- });
168
- }
169
- results.push({
170
- matchingData: keyframes,
171
- trackingData: trackingData
172
- });
173
- progressCallback(((i + 1) / targetImages.length) * 100);
174
- }
175
- return results;
91
+ // Reverted: 'compile-all' combined task was causing issues with pyramid processing
92
+ // We go back to sequential match and track for reliability
93
+ const matchingResults = await this._compileMatch(targetImages, (p) => progressCallback(p * 0.5));
94
+ const trackingResults = await this._compileTrack(targetImages, (p) => progressCallback(50 + p * 0.5));
95
+ return targetImages.map((_, i) => ({
96
+ matchingData: matchingResults[i],
97
+ trackingData: trackingResults[i]
98
+ }));
176
99
  }
177
100
  async _compileMatch(targetImages, progressCallback) {
178
101
  const percentPerImage = 100 / targetImages.length;
@@ -203,7 +126,12 @@ export class OfflineCompiler {
203
126
  const percentPerImageScale = percentPerImage / imageList.length;
204
127
  const keyframes = [];
205
128
  for (const image of imageList) {
206
- const detector = new DetectorLite(image.width, image.height, { useLSH: true });
129
+ // 🚀 SMART BITRATE (VBR): Internalized in DetectorLite
130
+ const detector = new DetectorLite(image.width, image.height, {
131
+ useLSH: true,
132
+ maxOctaves: 1,
133
+ scale: image.scale
134
+ });
207
135
  const { featurePoints: ps } = detector.detect(image.data);
208
136
  const maximaPoints = ps.filter((p) => p.maxima);
209
137
  const minimaPoints = ps.filter((p) => !p.maxima);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@srsergio/taptapp-ar",
3
- "version": "1.0.60",
3
+ "version": "1.0.62",
4
4
  "author": "Sergio Lazaro <srsergiolazaro@gmail.com>",
5
5
  "license": "GPL-3.0",
6
6
  "description": "Ultra-fast, lightweight Augmented Reality Image Tracking SDK for the web. Features an optimized offline compiler, React components, and compatibility with Three.js/A-Frame. No heavy ML frameworks required.",
@@ -11,7 +11,7 @@ class CropDetector {
11
11
  let cropSize = Math.pow(2, Math.round(Math.log(minDimension) / Math.log(2)));
12
12
  this.cropSize = cropSize;
13
13
 
14
- this.detector = new DetectorLite(cropSize, cropSize, { useLSH: true });
14
+ this.detector = new DetectorLite(cropSize, cropSize, { useLSH: true, maxOctaves: 1 });
15
15
 
16
16
  this.lastRandomIndex = 4;
17
17
  }
@@ -19,7 +19,6 @@ const PYRAMID_MIN_SIZE = 4; // Restored to 4 for better small-scale detection
19
19
 
20
20
 
21
21
  const NUM_BUCKETS_PER_DIMENSION = 10;
22
- const MAX_FEATURES_PER_BUCKET = 30; // Maximized to ensure robustness in Moonshot mode
23
22
 
24
23
 
25
24
  const ORIENTATION_NUM_BINS = 36;
@@ -58,6 +57,10 @@ export class DetectorLite {
58
57
  }
59
58
 
60
59
  this.numOctaves = options.maxOctaves !== undefined ? Math.min(numOctaves, options.maxOctaves) : numOctaves;
60
+
61
+ // 🚀 SMART BITRATE (VBR): Internal logic to decide feature density based on scale
62
+ const scale = options.scale !== undefined ? options.scale : 1.0;
63
+ this.maxFeaturesPerBucket = options.maxFeaturesPerBucket || Math.max(4, Math.floor(12 * Math.sqrt(scale)));
61
64
  }
62
65
 
63
66
  /**
@@ -350,7 +353,7 @@ export class DetectorLite {
350
353
  */
351
354
  _applyPrune(extremas) {
352
355
  const nBuckets = NUM_BUCKETS_PER_DIMENSION;
353
- const nFeatures = MAX_FEATURES_PER_BUCKET;
356
+ const nFeatures = this.maxFeaturesPerBucket;
354
357
 
355
358
  // Agrupar por buckets
356
359
  const buckets = [];
@@ -5,7 +5,7 @@ import { resize } from "./utils/images.js";
5
5
  * Un valor más bajo permite detectar imágenes más pequeñas pero aumenta el tiempo de procesamiento
6
6
  * @constant {number}
7
7
  */
8
- const MIN_IMAGE_PIXEL_SIZE = 32;
8
+ const MIN_IMAGE_PIXEL_SIZE = 40; // Increased to 40 to skip extremely small, noisy layers and reduce size
9
9
 
10
10
 
11
11
 
@@ -21,9 +21,8 @@ const buildImageList = (inputImage) => {
21
21
  let c = minScale;
22
22
  while (true) {
23
23
  scaleList.push(c);
24
- // Optimization: Paso balanceado (aprox 1.5)
25
- // Mejor cobertura que 2.0, pero mucho más ligero que 1.41 o 1.26
26
- c *= Math.pow(2.0, 0.6);
24
+ // Optimization: More aggressive step (pow(2, 0.75) approx 1.68) for smaller exports
25
+ c *= Math.pow(2.0, 0.75);
27
26
  if (c >= 0.95) {
28
27
  c = 1;
29
28
  break;
@@ -71,49 +71,22 @@ parentPort.on('message', async (msg) => {
71
71
  const { targetImage, percentPerImage, basePercent } = msg;
72
72
 
73
73
  try {
74
- // 🚀 MOONSHOT: Only run detector ONCE on full-res image.
75
- // DetectorLite internally builds a pyramid (octaves 1.0, 0.5, 0.25, etc.)
76
- const detector = new DetectorLite(targetImage.width, targetImage.height, {
77
- useLSH: true
78
- });
79
-
80
- parentPort.postMessage({ type: 'progress', percent: basePercent + percentPerImage * 0.1 });
81
-
82
- const { featurePoints: allPoints } = detector.detect(targetImage.data);
83
-
84
- parentPort.postMessage({ type: 'progress', percent: basePercent + percentPerImage * 0.5 });
74
+ const { buildImageList } = await import('./image-list.js');
75
+ const imageList = buildImageList(targetImage);
76
+ const percentPerScale = percentPerImage / imageList.length;
77
+ const keyframes = [];
85
78
 
86
- // Group points by their scale (octave)
87
- const scalesMap = new Map();
88
- for (const p of allPoints) {
89
- const octaveScale = p.scale;
90
- let list = scalesMap.get(octaveScale);
91
- if (!list) {
92
- list = [];
93
- scalesMap.set(octaveScale, list);
94
- }
79
+ for (let i = 0; i < imageList.length; i++) {
80
+ const image = imageList[i];
95
81
 
96
- // Coordinates in p are already full-res.
97
- // We need them relative to the scaled image for the keyframe.
98
- list.push({
99
- ...p,
100
- x: p.x / octaveScale,
101
- y: p.y / octaveScale,
102
- scale: 1.0 // Keypoint scale is always 1.0 relative to its own keyframe image
82
+ // 🚀 SMART BITRATE (VBR): Now handled internally by DetectorLite via 'scale'
83
+ const detector = new DetectorLite(image.width, image.height, {
84
+ useLSH: true,
85
+ maxOctaves: 1,
86
+ scale: image.scale
103
87
  });
104
- }
88
+ const { featurePoints: ps } = detector.detect(image.data);
105
89
 
106
- // Optional: Run another detector pass at an intermediate scale to improve coverage
107
- // (e.g. at 1/1.41 ratio) if tracking robustness suffers.
108
- // For now, let's stick to octaves for MAXIMUM speed.
109
-
110
- const keyframes = [];
111
- const sortedScales = Array.from(scalesMap.keys()).sort((a, b) => a - b);
112
-
113
- const percentPerScale = (percentPerImage * 0.4) / sortedScales.length;
114
-
115
- for (const s of sortedScales) {
116
- const ps = scalesMap.get(s);
117
90
  const sortedPs = sortPoints(ps);
118
91
  const maximaPoints = sortedPs.filter((p) => p.maxima);
119
92
  const minimaPoints = sortedPs.filter((p) => !p.maxima);
@@ -126,14 +99,14 @@ parentPort.on('message', async (msg) => {
126
99
  minimaPoints,
127
100
  maximaPointsCluster,
128
101
  minimaPointsCluster,
129
- width: Math.round(targetImage.width / s),
130
- height: Math.round(targetImage.height / s),
131
- scale: 1.0 / s, // keyframe.scale is relative to full target image
102
+ width: image.width,
103
+ height: image.height,
104
+ scale: image.scale,
132
105
  });
133
106
 
134
107
  parentPort.postMessage({
135
108
  type: 'progress',
136
- percent: basePercent + percentPerImage * 0.6 + keyframes.length * percentPerScale
109
+ percent: basePercent + (i + 1) * percentPerScale
137
110
  });
138
111
  }
139
112
 
@@ -110,103 +110,15 @@ export class OfflineCompiler {
110
110
  async _compileTarget(targetImages: any[], progressCallback: (p: number) => void) {
111
111
  if (isNode) await this._initNodeWorkers();
112
112
 
113
- if (this.workerPool) {
114
- const progressMap = new Float32Array(targetImages.length);
115
- const wrappedPromises = targetImages.map((targetImage: any, index: number) => {
116
- return this.workerPool!.runTask({
117
- type: 'compile-all', // 🚀 MOONSHOT: Combined task
118
- targetImage,
119
- onProgress: (p: number) => {
120
- progressMap[index] = p;
121
- const sum = progressMap.reduce((a, b) => a + b, 0);
122
- progressCallback(sum / targetImages.length);
123
- }
124
- });
125
- });
126
- return Promise.all(wrappedPromises);
127
- }
128
-
129
- // 🚀 MOONSHOT BROWSER FALLBACK:
130
- // Combined detection to avoid redundant pyramid processing
131
- const results = [];
132
- for (let i = 0; i < targetImages.length; i++) {
133
- const targetImage = targetImages[i];
134
-
135
- // 1. Single Pass Detection + Pyramid Generation
136
- const detector = new DetectorLite(targetImage.width, targetImage.height, { useLSH: true });
137
- progressCallback((i / targetImages.length) * 100 + 10);
138
-
139
- const { featurePoints, pyramid }: any = detector.detect(targetImage.data);
140
- progressCallback((i / targetImages.length) * 100 + 40);
141
-
142
- // 2. Extract Tracking Data using the ALREADY BLURRED pyramid
143
- const trackingImageList: any[] = [];
144
- const targetSizes = [256, 128];
145
- for (const targetSize of targetSizes) {
146
- let bestLevel = 0;
147
- let minDiff = Math.abs(Math.min(targetImage.width, targetImage.height) - targetSize);
148
-
149
- for (let l = 1; l < pyramid.length; l++) {
150
- const img = pyramid[l][0];
151
- const diff = Math.abs(Math.min(img.width, img.height) - targetSize);
152
- if (diff < minDiff) {
153
- minDiff = diff;
154
- bestLevel = l;
155
- }
156
- }
157
-
158
- const levelImg = pyramid[bestLevel][0];
159
- trackingImageList.push({
160
- data: levelImg.data,
161
- width: levelImg.width,
162
- height: levelImg.height,
163
- scale: levelImg.width / targetImage.width
164
- });
165
- }
166
-
167
- const trackingData = extractTrackingFeatures(trackingImageList, () => { });
168
- progressCallback((i / targetImages.length) * 100 + 60);
169
-
170
- // 3. Build Keyframes for Matching (Group by scale)
171
- const scalesMap = new Map();
172
- for (const p of featurePoints) {
173
- const s = p.scale;
174
- let list = scalesMap.get(s);
175
- if (!list) {
176
- list = [];
177
- scalesMap.set(s, list);
178
- }
179
- list.push({ ...p, x: p.x / s, y: p.y / s, scale: 1.0 });
180
- }
181
-
182
- const keyframes = [];
183
- const sortedScales = Array.from(scalesMap.keys()).sort((a, b) => a - b);
184
- for (const s of sortedScales) {
185
- const ps = scalesMap.get(s);
186
- const maximaPoints = ps.filter((p: any) => p.maxima);
187
- const minimaPoints = ps.filter((p: any) => !p.maxima);
188
- const maximaPointsCluster = hierarchicalClusteringBuild({ points: maximaPoints });
189
- const minimaPointsCluster = hierarchicalClusteringBuild({ points: minimaPoints });
190
-
191
- keyframes.push({
192
- maximaPoints,
193
- minimaPoints,
194
- maximaPointsCluster,
195
- minimaPointsCluster,
196
- width: Math.round(targetImage.width / s),
197
- height: Math.round(targetImage.height / s),
198
- scale: 1.0 / s,
199
- });
200
- }
201
-
202
- results.push({
203
- matchingData: keyframes,
204
- trackingData: trackingData
205
- });
206
- progressCallback(((i + 1) / targetImages.length) * 100);
207
- }
208
-
209
- return results;
113
+ // Reverted: 'compile-all' combined task was causing issues with pyramid processing
114
+ // We go back to sequential match and track for reliability
115
+ const matchingResults = await this._compileMatch(targetImages, (p) => progressCallback(p * 0.5));
116
+ const trackingResults = await this._compileTrack(targetImages, (p) => progressCallback(50 + p * 0.5));
117
+
118
+ return targetImages.map((_, i) => ({
119
+ matchingData: matchingResults[i],
120
+ trackingData: trackingResults[i]
121
+ }));
210
122
  }
211
123
 
212
124
  async _compileMatch(targetImages: any[], progressCallback: (p: number) => void) {
@@ -243,7 +155,12 @@ export class OfflineCompiler {
243
155
  const keyframes = [];
244
156
 
245
157
  for (const image of imageList as any[]) {
246
- const detector = new DetectorLite(image.width, image.height, { useLSH: true });
158
+ // 🚀 SMART BITRATE (VBR): Internalized in DetectorLite
159
+ const detector = new DetectorLite(image.width, image.height, {
160
+ useLSH: true,
161
+ maxOctaves: 1,
162
+ scale: image.scale
163
+ });
247
164
  const { featurePoints: ps } = detector.detect(image.data);
248
165
 
249
166
  const maximaPoints = ps.filter((p: any) => p.maxima);