@srsergio/taptapp-ar 1.0.2 → 1.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +47 -45
- package/dist/compiler/aframe.js +0 -3
- package/dist/compiler/compiler-base.d.ts +3 -7
- package/dist/compiler/compiler-base.js +28 -14
- package/dist/compiler/compiler.js +1 -1
- package/dist/compiler/compiler.worker.js +1 -1
- package/dist/compiler/controller.js +4 -5
- package/dist/compiler/controller.worker.js +0 -2
- package/dist/compiler/detector/crop-detector.js +0 -2
- package/dist/compiler/detector/detector-lite.d.ts +73 -0
- package/dist/compiler/detector/detector-lite.js +430 -0
- package/dist/compiler/detector/detector.js +236 -243
- package/dist/compiler/detector/kernels/cpu/binomialFilter.js +0 -1
- package/dist/compiler/detector/kernels/cpu/computeLocalization.js +0 -4
- package/dist/compiler/detector/kernels/cpu/computeOrientationHistograms.js +0 -18
- package/dist/compiler/detector/kernels/cpu/fakeShader.js +1 -1
- package/dist/compiler/detector/kernels/cpu/prune.d.ts +7 -1
- package/dist/compiler/detector/kernels/cpu/prune.js +1 -42
- package/dist/compiler/detector/kernels/webgl/upsampleBilinear.js +2 -2
- package/dist/compiler/estimation/refine-estimate.js +0 -1
- package/dist/compiler/estimation/utils.d.ts +1 -1
- package/dist/compiler/estimation/utils.js +1 -14
- package/dist/compiler/image-list.js +4 -4
- package/dist/compiler/input-loader.js +2 -2
- package/dist/compiler/matching/hamming-distance.js +13 -13
- package/dist/compiler/matching/hierarchical-clustering.js +1 -1
- package/dist/compiler/matching/matching.d.ts +20 -4
- package/dist/compiler/matching/matching.js +67 -41
- package/dist/compiler/matching/ransacHomography.js +1 -2
- package/dist/compiler/node-worker.d.ts +1 -0
- package/dist/compiler/node-worker.js +84 -0
- package/dist/compiler/offline-compiler.d.ts +171 -6
- package/dist/compiler/offline-compiler.js +303 -421
- package/dist/compiler/tensorflow-setup.js +27 -1
- package/dist/compiler/three.js +3 -5
- package/dist/compiler/tracker/extract.d.ts +1 -0
- package/dist/compiler/tracker/extract.js +200 -244
- package/dist/compiler/tracker/tracker.d.ts +1 -1
- package/dist/compiler/tracker/tracker.js +13 -18
- package/dist/compiler/utils/cumsum.d.ts +4 -2
- package/dist/compiler/utils/cumsum.js +17 -19
- package/dist/compiler/utils/gpu-compute.d.ts +57 -0
- package/dist/compiler/utils/gpu-compute.js +262 -0
- package/dist/compiler/utils/images.d.ts +4 -4
- package/dist/compiler/utils/images.js +67 -53
- package/dist/compiler/utils/worker-pool.d.ts +14 -0
- package/dist/compiler/utils/worker-pool.js +84 -0
- package/package.json +11 -13
- package/src/compiler/aframe.js +2 -4
- package/src/compiler/compiler-base.js +29 -14
- package/src/compiler/compiler.js +1 -1
- package/src/compiler/compiler.worker.js +1 -1
- package/src/compiler/controller.js +4 -5
- package/src/compiler/controller.worker.js +0 -2
- package/src/compiler/detector/crop-detector.js +0 -2
- package/src/compiler/detector/detector-lite.js +494 -0
- package/src/compiler/detector/detector.js +1052 -1063
- package/src/compiler/detector/kernels/cpu/binomialFilter.js +0 -1
- package/src/compiler/detector/kernels/cpu/computeLocalization.js +0 -4
- package/src/compiler/detector/kernels/cpu/computeOrientationHistograms.js +0 -17
- package/src/compiler/detector/kernels/cpu/fakeShader.js +1 -1
- package/src/compiler/detector/kernels/cpu/prune.js +1 -37
- package/src/compiler/detector/kernels/webgl/upsampleBilinear.js +2 -2
- package/src/compiler/estimation/refine-estimate.js +0 -1
- package/src/compiler/estimation/utils.js +9 -24
- package/src/compiler/image-list.js +4 -4
- package/src/compiler/input-loader.js +2 -2
- package/src/compiler/matching/hamming-distance.js +11 -15
- package/src/compiler/matching/hierarchical-clustering.js +1 -1
- package/src/compiler/matching/matching.js +72 -42
- package/src/compiler/matching/ransacHomography.js +0 -2
- package/src/compiler/node-worker.js +93 -0
- package/src/compiler/offline-compiler.js +339 -504
- package/src/compiler/tensorflow-setup.js +29 -1
- package/src/compiler/three.js +3 -5
- package/src/compiler/tracker/extract.js +211 -267
- package/src/compiler/tracker/tracker.js +13 -22
- package/src/compiler/utils/cumsum.js +17 -19
- package/src/compiler/utils/gpu-compute.js +303 -0
- package/src/compiler/utils/images.js +84 -53
- package/src/compiler/utils/worker-pool.js +89 -0
- package/src/compiler/estimation/esimate-experiment.js +0 -316
- package/src/compiler/estimation/refine-estimate-experiment.js +0 -512
|
@@ -1,450 +1,332 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* @fileoverview Compilador Offline
|
|
2
|
+
* @fileoverview Compilador Offline Optimizado - Sin TensorFlow para máxima velocidad
|
|
3
3
|
*
|
|
4
|
-
* Este módulo implementa un
|
|
5
|
-
*
|
|
4
|
+
* Este módulo implementa un compilador de imágenes AR ultrarrápido
|
|
5
|
+
* que NO depende de TensorFlow, eliminando todos los problemas de
|
|
6
|
+
* inicialización, bloqueos y compatibilidad.
|
|
6
7
|
*
|
|
7
|
-
*
|
|
8
|
+
* Usa JavaScript puro para:
|
|
9
|
+
* - Extracción de features de tracking (extract.js)
|
|
10
|
+
* - Detección de features para matching (DetectorLite)
|
|
11
|
+
* - Clustering jerárquico para features
|
|
8
12
|
*
|
|
9
|
-
*
|
|
10
|
-
*
|
|
11
|
-
*
|
|
12
|
-
* - Detección automática de entorno (serverless/navegador/backend dedicado)
|
|
13
|
-
* - Precalentamiento agresivo para reducir cold starts
|
|
14
|
-
*
|
|
15
|
-
* 2. Gestión de Memoria:
|
|
16
|
-
* - Sistema de liberación ultra-agresiva de memoria con umbrales dinámicos
|
|
17
|
-
* - Monitoreo continuo del uso de tensores con cleanup automático
|
|
18
|
-
* - Estrategias de scope anidados para control preciso de recursos
|
|
19
|
-
* - Liberación proactiva entre operaciones intensivas
|
|
20
|
-
*
|
|
21
|
-
* 3. Optimizaciones de Rendimiento:
|
|
22
|
-
* - Precalentamiento estratégico del backend para eliminar latencia inicial
|
|
23
|
-
* - Ajustes específicos por backend con configuraciones óptimas por plataforma
|
|
24
|
-
* - Configuraciones especializadas para entornos backend de alto rendimiento
|
|
25
|
-
* - Reducción de precisión selectiva para operaciones no críticas
|
|
26
|
-
*
|
|
27
|
-
* 4. Procesamiento por Lotes:
|
|
28
|
-
* - Sistema adaptativo de tamaño de lotes basado en capacidad de hardware
|
|
29
|
-
* - Paralelización multinivel con control de concurrencia
|
|
30
|
-
* - Control de progreso granular con retroalimentación en tiempo real
|
|
31
|
-
* - Estrategias de división de trabajo para CPUs multi-núcleo
|
|
32
|
-
*
|
|
33
|
-
* 5. Gestión de Recursos:
|
|
34
|
-
* - Timeouts inteligentes con recuperación automática
|
|
35
|
-
* - Liberación proactiva de recursos con GC forzado estratégico
|
|
36
|
-
* - Manejo de errores robusto con recuperación de fallos
|
|
37
|
-
* - Monitoreo de rendimiento en tiempo real
|
|
38
|
-
*
|
|
39
|
-
* @requires tensorflow/tfjs
|
|
40
|
-
* @requires ./compiler-base.js
|
|
41
|
-
* @requires ./image-list.js
|
|
42
|
-
* @requires ./tracker/extract-utils.js
|
|
43
|
-
* @requires ./tensorflow-setup.js
|
|
13
|
+
* Funciona en:
|
|
14
|
+
* - Node.js (con workers opcionales)
|
|
15
|
+
* - Browser (sin workers)
|
|
44
16
|
*/
|
|
45
|
-
import {
|
|
46
|
-
import { buildTrackingImageList } from "./image-list.js";
|
|
17
|
+
import { buildTrackingImageList, buildImageList } from "./image-list.js";
|
|
47
18
|
import { extractTrackingFeatures } from "./tracker/extract-utils.js";
|
|
48
|
-
import {
|
|
49
|
-
import {
|
|
50
|
-
import
|
|
51
|
-
//
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
return setupPromise;
|
|
67
|
-
// Iniciar configuración y guardar la promesa
|
|
68
|
-
setupPromise = (async () => {
|
|
69
|
-
try {
|
|
70
|
-
console.time("⏱️ Configuración de TensorFlow");
|
|
71
|
-
const backend = await setupTensorFlow();
|
|
72
|
-
tensorflowBackend = backend;
|
|
73
|
-
console.timeEnd("⏱️ Configuración de TensorFlow");
|
|
74
|
-
return backend;
|
|
75
|
-
}
|
|
76
|
-
catch (error) {
|
|
77
|
-
console.error("Error crítico al configurar TensorFlow:", error);
|
|
78
|
-
return null;
|
|
19
|
+
import { DetectorLite } from "./detector/detector-lite.js";
|
|
20
|
+
import { build as hierarchicalClusteringBuild } from "./matching/hierarchical-clustering.js";
|
|
21
|
+
import * as msgpack from "@msgpack/msgpack";
|
|
22
|
+
// Detect environment
|
|
23
|
+
const isNode = typeof process !== "undefined" &&
|
|
24
|
+
process.versions != null &&
|
|
25
|
+
process.versions.node != null;
|
|
26
|
+
const CURRENT_VERSION = 3; // Protocol v3: High-performance Columnar Binary Format
|
|
27
|
+
/**
|
|
28
|
+
* Compilador offline optimizado sin TensorFlow
|
|
29
|
+
*/
|
|
30
|
+
export class OfflineCompiler {
|
|
31
|
+
constructor() {
|
|
32
|
+
this.data = null;
|
|
33
|
+
this.workerPool = null;
|
|
34
|
+
// Workers solo en Node.js (no en browser)
|
|
35
|
+
if (isNode) {
|
|
36
|
+
this._initNodeWorkers();
|
|
79
37
|
}
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
};
|
|
83
|
-
// Iniciar la configuración inmediatamente al importar el módulo
|
|
84
|
-
const tensorflowSetupPromise = setupTensorFlowAsync();
|
|
85
|
-
// Registrar los kernels necesarios para CPU (carga temprana)
|
|
86
|
-
import "./detector/kernels/cpu/index.js";
|
|
87
|
-
// Registrar los backends básicos
|
|
88
|
-
import "@tensorflow/tfjs-backend-cpu";
|
|
89
|
-
// Configuraciones avanzadas para maximizar rendimiento en backend
|
|
90
|
-
const enablePerformanceOptimizations = async () => {
|
|
91
|
-
try {
|
|
92
|
-
// Esperar a que TensorFlow esté configurado
|
|
93
|
-
await tensorflowSetupPromise;
|
|
94
|
-
// Optimizaciones específicas según el backend
|
|
95
|
-
const backend = tf.getBackend();
|
|
96
|
-
console.log(`⚙️ Optimizando agresivamente para backend: ${backend}`);
|
|
97
|
-
// Entorno serverless necesita configuraciones especiales
|
|
98
|
-
const isServerless = isServerlessEnvironment();
|
|
99
|
-
const isBackendDedicated = !isServerless && process.env.NODE_ENV === "production";
|
|
100
|
-
if (isBackendDedicated) {
|
|
101
|
-
console.log("🚀🚀 Entorno backend dedicado detectado, aplicando configuraciones de alto rendimiento");
|
|
102
|
-
// Configuraciones agresivas para backend dedicado
|
|
103
|
-
tf.ENV.set("CPU_HANDOFF_SIZE_THRESHOLD", 1024 * 1024 * 16); // 16MB - más memoria disponible
|
|
104
|
-
tf.ENV.set("WEBGL_SIZE_UPLOAD_UNIFORM", 16); // Mayor capacidad de transferencia
|
|
105
|
-
tf.ENV.set("WEBGL_DELETE_TEXTURE_THRESHOLD", 64); // Más texturas en memoria
|
|
106
|
-
// Configuraciones para maximizar throughput
|
|
107
|
-
tf.ENV.set("WEBGL_FLUSH_THRESHOLD", 10); // Menos flushes para mejor rendimiento
|
|
108
|
-
tf.ENV.set("KEEP_INTERMEDIATE_TENSORS", false); // Liberar intermedios agresivamente
|
|
109
|
-
tf.ENV.set("WEBGL_PACK_BINARY_OPERATIONS", true); // Empaquetar operaciones binarias
|
|
38
|
+
else {
|
|
39
|
+
console.log("🌐 OfflineCompiler: Browser mode (no workers)");
|
|
110
40
|
}
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
41
|
+
}
|
|
42
|
+
async _initNodeWorkers() {
|
|
43
|
+
try {
|
|
44
|
+
const [os, path, url, workerModule] = await Promise.all([
|
|
45
|
+
import("os"),
|
|
46
|
+
import("path"),
|
|
47
|
+
import("url"),
|
|
48
|
+
import("./utils/worker-pool.js")
|
|
49
|
+
]);
|
|
50
|
+
const __filename = url.fileURLToPath(import.meta.url);
|
|
51
|
+
const __dirname = path.dirname(__filename);
|
|
52
|
+
const workerPath = path.join(__dirname, "node-worker.js");
|
|
53
|
+
const numWorkers = Math.min(os.cpus().length, 4);
|
|
54
|
+
this.workerPool = new workerModule.WorkerPool(workerPath, numWorkers);
|
|
55
|
+
console.log(`🚀 OfflineCompiler: Node.js mode with ${numWorkers} workers`);
|
|
119
56
|
}
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
tf.ENV.set("DEBUG", false);
|
|
123
|
-
tf.ENV.set("CHECK_COMPUTATION_FOR_ERRORS", false); // Deshabilitar verificaciones para mayor velocidad
|
|
124
|
-
// Optimizar el uso de memoria con límites más altos
|
|
125
|
-
if (backend === "node") {
|
|
126
|
-
console.log("🔧 Aplicando optimizaciones avanzadas para Node.js backend");
|
|
57
|
+
catch (e) {
|
|
58
|
+
console.log("⚡ OfflineCompiler: Running without workers");
|
|
127
59
|
}
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
60
|
+
}
|
|
61
|
+
/**
|
|
62
|
+
* Compila una lista de imágenes objetivo
|
|
63
|
+
* @param {Array} images - Lista de imágenes {width, height, data}
|
|
64
|
+
* @param {Function} progressCallback - Callback de progreso (0-100)
|
|
65
|
+
* @returns {Promise<Array>} Datos compilados
|
|
66
|
+
*/
|
|
67
|
+
async compileImageTargets(images, progressCallback) {
|
|
68
|
+
console.time("⏱️ Compilación total");
|
|
69
|
+
const targetImages = [];
|
|
70
|
+
// Preparar imágenes
|
|
71
|
+
for (let i = 0; i < images.length; i++) {
|
|
72
|
+
const img = images[i];
|
|
73
|
+
if (!img || !img.width || !img.height || !img.data) {
|
|
74
|
+
throw new Error(`Imagen inválida en posición ${i}. Debe tener propiedades width, height y data.`);
|
|
75
|
+
}
|
|
76
|
+
// Convertir a escala de grises
|
|
77
|
+
const greyImageData = new Uint8Array(img.width * img.height);
|
|
78
|
+
if (img.data.length === img.width * img.height) {
|
|
79
|
+
greyImageData.set(img.data);
|
|
143
80
|
}
|
|
144
|
-
else if (
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
81
|
+
else if (img.data.length === img.width * img.height * 4) {
|
|
82
|
+
for (let j = 0; j < greyImageData.length; j++) {
|
|
83
|
+
const offset = j * 4;
|
|
84
|
+
greyImageData[j] = Math.floor((img.data[offset] + img.data[offset + 1] + img.data[offset + 2]) / 3);
|
|
85
|
+
}
|
|
148
86
|
}
|
|
149
87
|
else {
|
|
150
|
-
|
|
151
|
-
tf.ENV.set("WEBGL_MAX_TEXTURE_SIZE", 4096);
|
|
152
|
-
tf.ENV.set("WEBGL_MAX_TEXTURES_IN_SHADER", 12);
|
|
88
|
+
throw new Error(`Formato de datos de imagen no soportado en posición ${i}`);
|
|
153
89
|
}
|
|
90
|
+
targetImages.push({
|
|
91
|
+
data: greyImageData,
|
|
92
|
+
height: img.height,
|
|
93
|
+
width: img.width,
|
|
94
|
+
});
|
|
154
95
|
}
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
}
|
|
170
|
-
|
|
171
|
-
|
|
96
|
+
// Fase 1: Matching (50%)
|
|
97
|
+
console.time("⏱️ Fase Matching");
|
|
98
|
+
const matchingDataList = await this._compileMatch(targetImages, (percent) => {
|
|
99
|
+
progressCallback(percent * 0.5);
|
|
100
|
+
});
|
|
101
|
+
console.timeEnd("⏱️ Fase Matching");
|
|
102
|
+
// Fase 2: Tracking (50%)
|
|
103
|
+
console.time("⏱️ Fase Tracking");
|
|
104
|
+
const trackingDataList = await this._compileTrack(targetImages, (percent) => {
|
|
105
|
+
progressCallback(50 + percent * 0.5);
|
|
106
|
+
});
|
|
107
|
+
console.timeEnd("⏱️ Fase Tracking");
|
|
108
|
+
// Compilar resultado
|
|
109
|
+
this.data = targetImages.map((targetImage, i) => ({
|
|
110
|
+
targetImage: { width: targetImage.width, height: targetImage.height },
|
|
111
|
+
trackingData: trackingDataList[i],
|
|
112
|
+
matchingData: matchingDataList[i],
|
|
113
|
+
}));
|
|
114
|
+
console.timeEnd("⏱️ Compilación total");
|
|
115
|
+
return this.data;
|
|
116
|
+
}
|
|
117
|
+
/**
|
|
118
|
+
* Compila datos de matching usando DetectorLite (JS puro)
|
|
119
|
+
*/
|
|
120
|
+
async _compileMatch(targetImages, progressCallback) {
|
|
121
|
+
const percentPerImage = 100 / targetImages.length;
|
|
122
|
+
let currentPercent = 0;
|
|
123
|
+
const results = [];
|
|
124
|
+
// Procesar secuencialmente para evitar overhead de workers
|
|
125
|
+
// (los workers son útiles para muchas imágenes, pero añaden latencia)
|
|
126
|
+
for (let i = 0; i < targetImages.length; i++) {
|
|
127
|
+
const targetImage = targetImages[i];
|
|
128
|
+
const imageList = buildImageList(targetImage);
|
|
129
|
+
const percentPerScale = percentPerImage / imageList.length;
|
|
130
|
+
const keyframes = [];
|
|
131
|
+
for (const image of imageList) {
|
|
132
|
+
const detector = new DetectorLite(image.width, image.height);
|
|
133
|
+
const { featurePoints: ps } = detector.detect(image.data);
|
|
134
|
+
const maximaPoints = ps.filter((p) => p.maxima);
|
|
135
|
+
const minimaPoints = ps.filter((p) => !p.maxima);
|
|
136
|
+
const maximaPointsCluster = hierarchicalClusteringBuild({ points: maximaPoints });
|
|
137
|
+
const minimaPointsCluster = hierarchicalClusteringBuild({ points: minimaPoints });
|
|
138
|
+
keyframes.push({
|
|
139
|
+
maximaPoints,
|
|
140
|
+
minimaPoints,
|
|
141
|
+
maximaPointsCluster,
|
|
142
|
+
minimaPointsCluster,
|
|
143
|
+
width: image.width,
|
|
144
|
+
height: image.height,
|
|
145
|
+
scale: image.scale,
|
|
146
|
+
});
|
|
147
|
+
currentPercent += percentPerScale;
|
|
148
|
+
progressCallback(currentPercent);
|
|
172
149
|
}
|
|
150
|
+
results.push(keyframes);
|
|
173
151
|
}
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
152
|
+
return results;
|
|
153
|
+
}
|
|
154
|
+
/**
|
|
155
|
+
* Compila datos de tracking usando extractTrackingFeatures (JS puro)
|
|
156
|
+
*/
|
|
157
|
+
async _compileTrack(targetImages, progressCallback) {
|
|
158
|
+
const percentPerImage = 100 / targetImages.length;
|
|
159
|
+
let currentPercent = 0;
|
|
160
|
+
const results = [];
|
|
161
|
+
for (let i = 0; i < targetImages.length; i++) {
|
|
162
|
+
const targetImage = targetImages[i];
|
|
163
|
+
const imageList = buildTrackingImageList(targetImage);
|
|
164
|
+
const percentPerScale = percentPerImage / imageList.length;
|
|
165
|
+
const trackingData = extractTrackingFeatures(imageList, () => {
|
|
166
|
+
currentPercent += percentPerScale;
|
|
167
|
+
progressCallback(currentPercent);
|
|
168
|
+
});
|
|
169
|
+
results.push(trackingData);
|
|
170
|
+
}
|
|
171
|
+
return results;
|
|
172
|
+
}
|
|
173
|
+
/**
|
|
174
|
+
* Método público para compilar tracking (compatibilidad con API anterior)
|
|
175
|
+
* @param {Object} options - Opciones de compilación
|
|
176
|
+
* @param {Function} options.progressCallback - Callback de progreso
|
|
177
|
+
* @param {Array} options.targetImages - Lista de imágenes objetivo
|
|
178
|
+
* @param {number} options.basePercent - Porcentaje base
|
|
179
|
+
* @returns {Promise<Array>} Datos de tracking
|
|
180
|
+
*/
|
|
181
|
+
async compileTrack({ progressCallback, targetImages, basePercent = 0 }) {
|
|
182
|
+
return this._compileTrack(targetImages, (percent) => {
|
|
183
|
+
progressCallback(basePercent + percent * (100 - basePercent) / 100);
|
|
203
184
|
});
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
185
|
+
}
|
|
186
|
+
/**
|
|
187
|
+
* Método público para compilar matching (compatibilidad con API anterior)
|
|
188
|
+
*/
|
|
189
|
+
async compileMatch({ progressCallback, targetImages, basePercent = 0 }) {
|
|
190
|
+
return this._compileMatch(targetImages, (percent) => {
|
|
191
|
+
progressCallback(basePercent + percent * (50 - basePercent) / 100);
|
|
192
|
+
});
|
|
193
|
+
}
|
|
194
|
+
/**
|
|
195
|
+
* Exporta datos compilados en formato binario columnar optimizado
|
|
196
|
+
*/
|
|
197
|
+
exportData() {
|
|
198
|
+
if (!this.data) {
|
|
199
|
+
throw new Error("No hay datos compilados para exportar");
|
|
200
|
+
}
|
|
201
|
+
const dataList = this.data.map((item) => {
|
|
202
|
+
// Optimizamos MatchingData convirtiéndolo a formato columnar
|
|
203
|
+
const matchingData = item.matchingData.map((kf) => this._packKeyframe(kf));
|
|
204
|
+
// Optimizamos TrackingData (Zero-copy layout)
|
|
205
|
+
const trackingData = item.trackingData.map((td) => {
|
|
206
|
+
const count = td.points.length;
|
|
207
|
+
const px = new Float32Array(count);
|
|
208
|
+
const py = new Float32Array(count);
|
|
209
|
+
for (let i = 0; i < count; i++) {
|
|
210
|
+
px[i] = td.points[i].x;
|
|
211
|
+
py[i] = td.points[i].y;
|
|
231
212
|
}
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
], [3, 3, 1, 1]), 1, "same");
|
|
250
|
-
const edges = tf.sqrt(tf.add(tf.square(sobelX), tf.square(sobelY)));
|
|
251
|
-
await executeAndWait([sobelX, sobelY, edges]);
|
|
252
|
-
}
|
|
213
|
+
return {
|
|
214
|
+
w: td.width,
|
|
215
|
+
h: td.height,
|
|
216
|
+
s: td.scale,
|
|
217
|
+
px,
|
|
218
|
+
py,
|
|
219
|
+
d: td.data, // Grayscale pixel data (Uint8Array)
|
|
220
|
+
};
|
|
221
|
+
});
|
|
222
|
+
return {
|
|
223
|
+
targetImage: {
|
|
224
|
+
width: item.targetImage.width,
|
|
225
|
+
height: item.targetImage.height,
|
|
226
|
+
},
|
|
227
|
+
trackingData,
|
|
228
|
+
matchingData,
|
|
229
|
+
};
|
|
253
230
|
});
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
231
|
+
return msgpack.encode({
|
|
232
|
+
v: CURRENT_VERSION,
|
|
233
|
+
dataList,
|
|
234
|
+
});
|
|
235
|
+
}
|
|
236
|
+
_packKeyframe(kf) {
|
|
237
|
+
return {
|
|
238
|
+
w: kf.width,
|
|
239
|
+
h: kf.height,
|
|
240
|
+
s: kf.scale,
|
|
241
|
+
max: this._columnarize(kf.maximaPoints, kf.maximaPointsCluster),
|
|
242
|
+
min: this._columnarize(kf.minimaPoints, kf.minimaPointsCluster),
|
|
243
|
+
};
|
|
244
|
+
}
|
|
245
|
+
_columnarize(points, tree) {
|
|
246
|
+
const count = points.length;
|
|
247
|
+
const x = new Float32Array(count);
|
|
248
|
+
const y = new Float32Array(count);
|
|
249
|
+
const angle = new Float32Array(count);
|
|
250
|
+
const descriptors = new Uint8Array(count * 84); // 84 bytes per point (FREAK)
|
|
251
|
+
for (let i = 0; i < count; i++) {
|
|
252
|
+
x[i] = points[i].x;
|
|
253
|
+
y[i] = points[i].y;
|
|
254
|
+
angle[i] = points[i].angle;
|
|
255
|
+
descriptors.set(points[i].descriptors, i * 84);
|
|
263
256
|
}
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
257
|
+
return {
|
|
258
|
+
x,
|
|
259
|
+
y,
|
|
260
|
+
a: angle,
|
|
261
|
+
d: descriptors,
|
|
262
|
+
t: this._compactTree(tree.rootNode),
|
|
263
|
+
};
|
|
268
264
|
}
|
|
269
|
-
|
|
270
|
-
|
|
265
|
+
_compactTree(node) {
|
|
266
|
+
if (node.leaf) {
|
|
267
|
+
return [1, node.centerPointIndex || 0, node.pointIndexes];
|
|
268
|
+
}
|
|
269
|
+
return [0, node.centerPointIndex || 0, node.children.map((c) => this._compactTree(c))];
|
|
271
270
|
}
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
this.isServerless = isServerlessEnvironment();
|
|
281
|
-
if (this.isServerless) {
|
|
282
|
-
console.log("🚀 Compilador optimizado para entorno serverless");
|
|
271
|
+
/**
|
|
272
|
+
* Importa datos - Mantiene el formato columnar para máximo rendimiento (Zero-copy)
|
|
273
|
+
*/
|
|
274
|
+
importData(buffer) {
|
|
275
|
+
const content = msgpack.decode(new Uint8Array(buffer));
|
|
276
|
+
if (!content.v || content.v !== CURRENT_VERSION) {
|
|
277
|
+
console.error("Incompatible .mind version. Required: " + CURRENT_VERSION);
|
|
278
|
+
return [];
|
|
283
279
|
}
|
|
284
|
-
//
|
|
285
|
-
|
|
280
|
+
// Ya no de-columnarizamos aquí. Los motores (Tracker/Matcher)
|
|
281
|
+
// ahora están optimizados para leer directamente de los buffers.
|
|
282
|
+
this.data = content.dataList;
|
|
283
|
+
return this.data;
|
|
286
284
|
}
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
285
|
+
_unpackKeyframe(kf) {
|
|
286
|
+
return {
|
|
287
|
+
width: kf.w,
|
|
288
|
+
height: kf.h,
|
|
289
|
+
scale: kf.s,
|
|
290
|
+
maximaPoints: this._decolumnarize(kf.max),
|
|
291
|
+
minimaPoints: this._decolumnarize(kf.min),
|
|
292
|
+
maximaPointsCluster: { rootNode: this._expandTree(kf.max.t) },
|
|
293
|
+
minimaPointsCluster: { rootNode: this._expandTree(kf.min.t) },
|
|
294
|
+
};
|
|
295
|
+
}
|
|
296
|
+
_decolumnarize(col) {
|
|
297
|
+
const points = [];
|
|
298
|
+
const count = col.x.length;
|
|
299
|
+
for (let i = 0; i < count; i++) {
|
|
300
|
+
points.push({
|
|
301
|
+
x: col.x[i],
|
|
302
|
+
y: col.y[i],
|
|
303
|
+
angle: col.a[i],
|
|
304
|
+
descriptors: col.d.slice(i * 84, (i + 1) * 84),
|
|
305
|
+
});
|
|
291
306
|
}
|
|
292
|
-
return
|
|
307
|
+
return points;
|
|
293
308
|
}
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
// En serverless, siempre usar lotes más pequeños
|
|
317
|
-
// Estrategia adaptativa para tamaño de lote (CPU/GPU)
|
|
318
|
-
let batchSize = 1;
|
|
319
|
-
if (backend === "node") {
|
|
320
|
-
// Calcular tamaño óptimo basado en recursos
|
|
321
|
-
try {
|
|
322
|
-
const cpus = os.cpus().length;
|
|
323
|
-
const freeMem = os.freemem() / 1024 / 1024 / 1024; // GB libres
|
|
324
|
-
// Lógica de batch dinámico:
|
|
325
|
-
// - 1 núcleo: batch 1 (evitar sobrecarga)
|
|
326
|
-
// - 2-4 núcleos: batch 2-4 (balance carga/paralelismo)
|
|
327
|
-
// - >4 núcleos: batch escalable con memoria
|
|
328
|
-
batchSize =
|
|
329
|
-
cpus > 4
|
|
330
|
-
? Math.min(Math.floor(freeMem * 0.5), 8) // 0.5GB por batch
|
|
331
|
-
: Math.min(cpus, 4);
|
|
332
|
-
console.log(`🧠 Batch size calculado: ${batchSize} (${cpus} cores, ${freeMem.toFixed(1)}GB libres)`);
|
|
333
|
-
}
|
|
334
|
-
catch (e) {
|
|
335
|
-
console.warn("⚠️ Error cálculo batch size:", e);
|
|
336
|
-
batchSize = 2; // Fallback: equilibrio seguridad/performance
|
|
337
|
-
}
|
|
338
|
-
}
|
|
339
|
-
else if (this.isServerless) {
|
|
340
|
-
batchSize = 1; // Priorizar seguridad sobre performance
|
|
341
|
-
}
|
|
342
|
-
// Garantizar límites operativos seguros:
|
|
343
|
-
// - Mínimo: Evitar underflow en procesamiento
|
|
344
|
-
// - Máximo: Prevenir OOM (Out Of Memory)
|
|
345
|
-
batchSize = Math.max(1, Math.min(batchSize, 8));
|
|
346
|
-
console.log(`📊 Procesando imágenes en lotes de ${batchSize}`);
|
|
347
|
-
// Solicitar memoria mínima antes de empezar procesamiento intensivo
|
|
348
|
-
if (global.gc) {
|
|
349
|
-
try {
|
|
350
|
-
global.gc();
|
|
351
|
-
}
|
|
352
|
-
catch (e) {
|
|
353
|
-
// Ignorar errores
|
|
354
|
-
}
|
|
355
|
-
}
|
|
356
|
-
// Paralelismo para el procesamiento en lotes
|
|
357
|
-
for (let i = 0; i < targetImages.length; i += batchSize) {
|
|
358
|
-
// Procesar un lote de imágenes
|
|
359
|
-
const batch = targetImages.slice(i, Math.min(i + batchSize, targetImages.length));
|
|
360
|
-
// Imprimir información sobre el procesamiento por lotes
|
|
361
|
-
if (batch.length > 1) {
|
|
362
|
-
console.log(`🔄 Procesando lote ${Math.floor(i / batchSize) + 1}: ${batch.length} imágenes`);
|
|
363
|
-
}
|
|
364
|
-
// Usar tf.engine().startScope() para mejor control de memoria por lote
|
|
365
|
-
tf.engine().startScope();
|
|
366
|
-
try {
|
|
367
|
-
// Procesamiento paralelo de imágenes en el lote
|
|
368
|
-
const batchResults = await Promise.all(batch.map(async (targetImage) => {
|
|
369
|
-
const imageList = buildTrackingImageList(targetImage);
|
|
370
|
-
const percentPerAction = percentPerImage / imageList.length;
|
|
371
|
-
// Usar tf.tidy para liberar memoria automáticamente en cada imagen
|
|
372
|
-
return await tf.tidy(() => {
|
|
373
|
-
// Extraer características con monitoreo de progreso
|
|
374
|
-
const trackingData = extractTrackingFeatures(imageList, (index) => {
|
|
375
|
-
percent += percentPerAction;
|
|
376
|
-
progressCallback(basePercent + percent);
|
|
377
|
-
});
|
|
378
|
-
return trackingData;
|
|
379
|
-
});
|
|
380
|
-
}));
|
|
381
|
-
// Agregar resultados a la lista final
|
|
382
|
-
list.push(...batchResults);
|
|
383
|
-
}
|
|
384
|
-
finally {
|
|
385
|
-
// Asegurar que siempre se cierre el scope para evitar fugas de memoria
|
|
386
|
-
tf.engine().endScope();
|
|
387
|
-
}
|
|
388
|
-
// Liberar memoria entre lotes grandes o al final
|
|
389
|
-
// En serverless, liberar más agresivamente
|
|
390
|
-
if (i % (this.isServerless ? 2 : 5) === 0 || i === targetImages.length - 1) {
|
|
391
|
-
await tf.nextFrame(); // Permitir que el recolector de basura libere memoria
|
|
392
|
-
// Cálculo de presión de memoria adaptativa
|
|
393
|
-
const memoryInfo = tf.memory();
|
|
394
|
-
const totalMem = os.totalmem();
|
|
395
|
-
const freeMem = os.freemem();
|
|
396
|
-
const memPressure = 1 - freeMem / totalMem;
|
|
397
|
-
// Umbrales dinámicos basados en:
|
|
398
|
-
// 1. Tipo de backend (mayor tolerancia en GPU)
|
|
399
|
-
// 2. Presión de memoria actual
|
|
400
|
-
// 3. Entorno de ejecución (serverless vs dedicado)
|
|
401
|
-
const baseThreshold = backend === "webgl" ? 50 : 30;
|
|
402
|
-
const adaptiveThreshold = Math.floor(baseThreshold *
|
|
403
|
-
(1 - Math.min(memPressure, 0.5)) *
|
|
404
|
-
(this.isServerless ? 0.6 : 1) *
|
|
405
|
-
(this.isBackendDedicated ? 1.2 : 1));
|
|
406
|
-
console.log(`🧠 Memoria: ${(freeMem / 1024 / 1024).toFixed(1)}MB libres | ` +
|
|
407
|
-
`Presión: ${(memPressure * 100).toFixed(1)}% | ` +
|
|
408
|
-
`Umbral: ${adaptiveThreshold} tensores`);
|
|
409
|
-
if (memoryInfo.numTensors > adaptiveThreshold) {
|
|
410
|
-
// Estrategia de limpieza diferenciada
|
|
411
|
-
console.log(`🧹 Limpieza ${this.isServerless ? "conservadora" : "agresiva"}: ` +
|
|
412
|
-
`${memoryInfo.numTensors} tensores, ${(memoryInfo.numBytes / 1024 / 1024).toFixed(2)}MB`);
|
|
413
|
-
// Estrategia de limpieza diferenciada:
|
|
414
|
-
// - Serverless: Liberación temprana preventiva
|
|
415
|
-
// - Dedicado: Postergar GC para mejor throughput
|
|
416
|
-
tf.disposeVariables();
|
|
417
|
-
tf.dispose();
|
|
418
|
-
// Forzar recolección de basura en Node.js si está disponible
|
|
419
|
-
if (global.gc) {
|
|
420
|
-
try {
|
|
421
|
-
global.gc();
|
|
422
|
-
}
|
|
423
|
-
catch (e) {
|
|
424
|
-
// Ignorar errores si no está disponible
|
|
425
|
-
}
|
|
426
|
-
}
|
|
427
|
-
}
|
|
428
|
-
}
|
|
429
|
-
}
|
|
430
|
-
// Terminar medición de tiempo
|
|
431
|
-
console.timeEnd("⏱️ Tiempo de compilación de tracking");
|
|
432
|
-
// Liberar toda la memoria restante antes de finalizar
|
|
433
|
-
tf.dispose();
|
|
434
|
-
// Limpiar timeout si existía
|
|
435
|
-
if (compilationTimeout) {
|
|
436
|
-
clearTimeout(compilationTimeout);
|
|
437
|
-
}
|
|
438
|
-
resolve(list);
|
|
439
|
-
}
|
|
440
|
-
catch (error) {
|
|
441
|
-
// Limpiar timeout si existía
|
|
442
|
-
if (compilationTimeout) {
|
|
443
|
-
clearTimeout(compilationTimeout);
|
|
444
|
-
}
|
|
445
|
-
console.error("❌ Error en compilación:", error);
|
|
446
|
-
reject(error);
|
|
447
|
-
}
|
|
448
|
-
});
|
|
309
|
+
_expandTree(node) {
|
|
310
|
+
const isLeaf = node[0] === 1;
|
|
311
|
+
if (isLeaf) {
|
|
312
|
+
return {
|
|
313
|
+
leaf: true,
|
|
314
|
+
centerPointIndex: node[1],
|
|
315
|
+
pointIndexes: node[2],
|
|
316
|
+
};
|
|
317
|
+
}
|
|
318
|
+
return {
|
|
319
|
+
leaf: false,
|
|
320
|
+
centerPointIndex: node[1],
|
|
321
|
+
children: node[2].map((c) => this._expandTree(c)),
|
|
322
|
+
};
|
|
323
|
+
}
|
|
324
|
+
/**
|
|
325
|
+
* Destruye el pool de workers
|
|
326
|
+
*/
|
|
327
|
+
async destroy() {
|
|
328
|
+
if (this.workerPool) {
|
|
329
|
+
await this.workerPool.destroy();
|
|
330
|
+
}
|
|
449
331
|
}
|
|
450
332
|
}
|