@srsergio/taptapp-ar 1.0.2 → 1.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +47 -45
- package/dist/compiler/aframe.js +0 -3
- package/dist/compiler/compiler-base.d.ts +3 -7
- package/dist/compiler/compiler-base.js +28 -14
- package/dist/compiler/compiler.js +1 -1
- package/dist/compiler/compiler.worker.js +1 -1
- package/dist/compiler/controller.d.ts +4 -4
- package/dist/compiler/controller.js +4 -5
- package/dist/compiler/controller.worker.js +0 -2
- package/dist/compiler/detector/crop-detector.d.ts +12 -12
- package/dist/compiler/detector/crop-detector.js +0 -2
- package/dist/compiler/detector/detector-lite.d.ts +73 -0
- package/dist/compiler/detector/detector-lite.js +430 -0
- package/dist/compiler/detector/detector.d.ts +20 -21
- package/dist/compiler/detector/detector.js +236 -243
- package/dist/compiler/detector/kernels/cpu/binomialFilter.js +0 -1
- package/dist/compiler/detector/kernels/cpu/computeExtremaAngles.d.ts +1 -1
- package/dist/compiler/detector/kernels/cpu/computeLocalization.js +0 -4
- package/dist/compiler/detector/kernels/cpu/computeOrientationHistograms.js +0 -18
- package/dist/compiler/detector/kernels/cpu/fakeShader.js +1 -1
- package/dist/compiler/detector/kernels/cpu/prune.d.ts +7 -1
- package/dist/compiler/detector/kernels/cpu/prune.js +1 -42
- package/dist/compiler/detector/kernels/webgl/upsampleBilinear.d.ts +1 -1
- package/dist/compiler/detector/kernels/webgl/upsampleBilinear.js +2 -2
- package/dist/compiler/estimation/refine-estimate.js +0 -1
- package/dist/compiler/estimation/utils.d.ts +1 -1
- package/dist/compiler/estimation/utils.js +1 -14
- package/dist/compiler/image-list.js +4 -4
- package/dist/compiler/input-loader.d.ts +4 -5
- package/dist/compiler/input-loader.js +2 -2
- package/dist/compiler/matching/hamming-distance.js +13 -13
- package/dist/compiler/matching/hierarchical-clustering.js +1 -1
- package/dist/compiler/matching/matching.d.ts +20 -4
- package/dist/compiler/matching/matching.js +67 -41
- package/dist/compiler/matching/ransacHomography.js +1 -2
- package/dist/compiler/node-worker.d.ts +1 -0
- package/dist/compiler/node-worker.js +84 -0
- package/dist/compiler/offline-compiler.d.ts +171 -6
- package/dist/compiler/offline-compiler.js +303 -421
- package/dist/compiler/tensorflow-setup.d.ts +0 -1
- package/dist/compiler/tensorflow-setup.js +27 -1
- package/dist/compiler/three.d.ts +7 -12
- package/dist/compiler/three.js +3 -5
- package/dist/compiler/tracker/extract.d.ts +1 -0
- package/dist/compiler/tracker/extract.js +200 -244
- package/dist/compiler/tracker/tracker.d.ts +9 -17
- package/dist/compiler/tracker/tracker.js +13 -18
- package/dist/compiler/utils/cumsum.d.ts +4 -2
- package/dist/compiler/utils/cumsum.js +17 -19
- package/dist/compiler/utils/gpu-compute.d.ts +57 -0
- package/dist/compiler/utils/gpu-compute.js +262 -0
- package/dist/compiler/utils/images.d.ts +4 -4
- package/dist/compiler/utils/images.js +67 -53
- package/dist/compiler/utils/worker-pool.d.ts +13 -0
- package/dist/compiler/utils/worker-pool.js +84 -0
- package/package.json +12 -14
- package/src/compiler/aframe.js +2 -4
- package/src/compiler/compiler-base.js +29 -14
- package/src/compiler/compiler.js +1 -1
- package/src/compiler/compiler.worker.js +1 -1
- package/src/compiler/controller.js +4 -5
- package/src/compiler/controller.worker.js +0 -2
- package/src/compiler/detector/crop-detector.js +0 -2
- package/src/compiler/detector/detector-lite.js +494 -0
- package/src/compiler/detector/detector.js +1052 -1063
- package/src/compiler/detector/kernels/cpu/binomialFilter.js +0 -1
- package/src/compiler/detector/kernels/cpu/computeLocalization.js +0 -4
- package/src/compiler/detector/kernels/cpu/computeOrientationHistograms.js +0 -17
- package/src/compiler/detector/kernels/cpu/fakeShader.js +1 -1
- package/src/compiler/detector/kernels/cpu/prune.js +1 -37
- package/src/compiler/detector/kernels/webgl/upsampleBilinear.js +2 -2
- package/src/compiler/estimation/refine-estimate.js +0 -1
- package/src/compiler/estimation/utils.js +9 -24
- package/src/compiler/image-list.js +4 -4
- package/src/compiler/input-loader.js +2 -2
- package/src/compiler/matching/hamming-distance.js +11 -15
- package/src/compiler/matching/hierarchical-clustering.js +1 -1
- package/src/compiler/matching/matching.js +72 -42
- package/src/compiler/matching/ransacHomography.js +0 -2
- package/src/compiler/node-worker.js +93 -0
- package/src/compiler/offline-compiler.js +339 -504
- package/src/compiler/tensorflow-setup.js +29 -1
- package/src/compiler/three.js +3 -5
- package/src/compiler/tracker/extract.js +211 -267
- package/src/compiler/tracker/tracker.js +13 -22
- package/src/compiler/utils/cumsum.js +17 -19
- package/src/compiler/utils/gpu-compute.js +303 -0
- package/src/compiler/utils/images.js +84 -53
- package/src/compiler/utils/worker-pool.js +89 -0
- package/dist/compiler/estimation/esimate-experiment.d.ts +0 -5
- package/dist/compiler/estimation/esimate-experiment.js +0 -267
- package/dist/compiler/estimation/refine-estimate-experiment.d.ts +0 -6
- package/dist/compiler/estimation/refine-estimate-experiment.js +0 -429
- package/dist/react/AREditor.d.ts +0 -5
- package/dist/react/AREditor.js +0 -159
- package/dist/react/ProgressDialog.d.ts +0 -13
- package/dist/react/ProgressDialog.js +0 -57
- package/src/compiler/estimation/esimate-experiment.js +0 -316
- package/src/compiler/estimation/refine-estimate-experiment.js +0 -512
|
@@ -1,553 +1,388 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* @fileoverview Compilador Offline
|
|
3
|
-
*
|
|
4
|
-
* Este módulo implementa un
|
|
5
|
-
*
|
|
6
|
-
*
|
|
7
|
-
*
|
|
8
|
-
*
|
|
9
|
-
*
|
|
10
|
-
*
|
|
11
|
-
*
|
|
12
|
-
*
|
|
13
|
-
*
|
|
14
|
-
*
|
|
15
|
-
*
|
|
16
|
-
* - Sistema de liberación ultra-agresiva de memoria con umbrales dinámicos
|
|
17
|
-
* - Monitoreo continuo del uso de tensores con cleanup automático
|
|
18
|
-
* - Estrategias de scope anidados para control preciso de recursos
|
|
19
|
-
* - Liberación proactiva entre operaciones intensivas
|
|
20
|
-
*
|
|
21
|
-
* 3. Optimizaciones de Rendimiento:
|
|
22
|
-
* - Precalentamiento estratégico del backend para eliminar latencia inicial
|
|
23
|
-
* - Ajustes específicos por backend con configuraciones óptimas por plataforma
|
|
24
|
-
* - Configuraciones especializadas para entornos backend de alto rendimiento
|
|
25
|
-
* - Reducción de precisión selectiva para operaciones no críticas
|
|
26
|
-
*
|
|
27
|
-
* 4. Procesamiento por Lotes:
|
|
28
|
-
* - Sistema adaptativo de tamaño de lotes basado en capacidad de hardware
|
|
29
|
-
* - Paralelización multinivel con control de concurrencia
|
|
30
|
-
* - Control de progreso granular con retroalimentación en tiempo real
|
|
31
|
-
* - Estrategias de división de trabajo para CPUs multi-núcleo
|
|
32
|
-
*
|
|
33
|
-
* 5. Gestión de Recursos:
|
|
34
|
-
* - Timeouts inteligentes con recuperación automática
|
|
35
|
-
* - Liberación proactiva de recursos con GC forzado estratégico
|
|
36
|
-
* - Manejo de errores robusto con recuperación de fallos
|
|
37
|
-
* - Monitoreo de rendimiento en tiempo real
|
|
38
|
-
*
|
|
39
|
-
* @requires tensorflow/tfjs
|
|
40
|
-
* @requires ./compiler-base.js
|
|
41
|
-
* @requires ./image-list.js
|
|
42
|
-
* @requires ./tracker/extract-utils.js
|
|
43
|
-
* @requires ./tensorflow-setup.js
|
|
2
|
+
* @fileoverview Compilador Offline Optimizado - Sin TensorFlow para máxima velocidad
|
|
3
|
+
*
|
|
4
|
+
* Este módulo implementa un compilador de imágenes AR ultrarrápido
|
|
5
|
+
* que NO depende de TensorFlow, eliminando todos los problemas de
|
|
6
|
+
* inicialización, bloqueos y compatibilidad.
|
|
7
|
+
*
|
|
8
|
+
* Usa JavaScript puro para:
|
|
9
|
+
* - Extracción de features de tracking (extract.js)
|
|
10
|
+
* - Detección de features para matching (DetectorLite)
|
|
11
|
+
* - Clustering jerárquico para features
|
|
12
|
+
*
|
|
13
|
+
* Funciona en:
|
|
14
|
+
* - Node.js (con workers opcionales)
|
|
15
|
+
* - Browser (sin workers)
|
|
44
16
|
*/
|
|
45
17
|
|
|
46
|
-
import {
|
|
47
|
-
import { buildTrackingImageList } from "./image-list.js";
|
|
18
|
+
import { buildTrackingImageList, buildImageList } from "./image-list.js";
|
|
48
19
|
import { extractTrackingFeatures } from "./tracker/extract-utils.js";
|
|
49
|
-
import {
|
|
50
|
-
import {
|
|
51
|
-
import
|
|
52
|
-
|
|
53
|
-
//
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
//
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
try {
|
|
74
|
-
console.time("⏱️ Configuración de TensorFlow");
|
|
75
|
-
const backend = await setupTensorFlow();
|
|
76
|
-
tensorflowBackend = backend;
|
|
77
|
-
console.timeEnd("⏱️ Configuración de TensorFlow");
|
|
78
|
-
return backend;
|
|
79
|
-
} catch (error) {
|
|
80
|
-
console.error("Error crítico al configurar TensorFlow:", error);
|
|
81
|
-
return null;
|
|
20
|
+
import { DetectorLite } from "./detector/detector-lite.js";
|
|
21
|
+
import { build as hierarchicalClusteringBuild } from "./matching/hierarchical-clustering.js";
|
|
22
|
+
import * as msgpack from "@msgpack/msgpack";
|
|
23
|
+
|
|
24
|
+
// Detect environment
|
|
25
|
+
const isNode = typeof process !== "undefined" &&
|
|
26
|
+
process.versions != null &&
|
|
27
|
+
process.versions.node != null;
|
|
28
|
+
|
|
29
|
+
const CURRENT_VERSION = 3; // Protocol v3: High-performance Columnar Binary Format
|
|
30
|
+
|
|
31
|
+
/**
|
|
32
|
+
* Compilador offline optimizado sin TensorFlow
|
|
33
|
+
*/
|
|
34
|
+
export class OfflineCompiler {
|
|
35
|
+
constructor() {
|
|
36
|
+
this.data = null;
|
|
37
|
+
this.workerPool = null;
|
|
38
|
+
|
|
39
|
+
// Workers solo en Node.js (no en browser)
|
|
40
|
+
if (isNode) {
|
|
41
|
+
this._initNodeWorkers();
|
|
42
|
+
} else {
|
|
43
|
+
console.log("🌐 OfflineCompiler: Browser mode (no workers)");
|
|
82
44
|
}
|
|
83
|
-
}
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
const
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
// Optimizaciones específicas según el backend
|
|
104
|
-
const backend = tf.getBackend();
|
|
105
|
-
console.log(`⚙️ Optimizando agresivamente para backend: ${backend}`);
|
|
106
|
-
|
|
107
|
-
// Entorno serverless necesita configuraciones especiales
|
|
108
|
-
const isServerless = isServerlessEnvironment();
|
|
109
|
-
const isBackendDedicated = !isServerless && process.env.NODE_ENV === "production";
|
|
110
|
-
|
|
111
|
-
if (isBackendDedicated) {
|
|
112
|
-
console.log(
|
|
113
|
-
"🚀🚀 Entorno backend dedicado detectado, aplicando configuraciones de alto rendimiento",
|
|
114
|
-
);
|
|
115
|
-
|
|
116
|
-
// Configuraciones agresivas para backend dedicado
|
|
117
|
-
tf.ENV.set("CPU_HANDOFF_SIZE_THRESHOLD", 1024 * 1024 * 16); // 16MB - más memoria disponible
|
|
118
|
-
tf.ENV.set("WEBGL_SIZE_UPLOAD_UNIFORM", 16); // Mayor capacidad de transferencia
|
|
119
|
-
tf.ENV.set("WEBGL_DELETE_TEXTURE_THRESHOLD", 64); // Más texturas en memoria
|
|
120
|
-
|
|
121
|
-
// Configuraciones para maximizar throughput
|
|
122
|
-
tf.ENV.set("WEBGL_FLUSH_THRESHOLD", 10); // Menos flushes para mejor rendimiento
|
|
123
|
-
tf.ENV.set("KEEP_INTERMEDIATE_TENSORS", false); // Liberar intermedios agresivamente
|
|
124
|
-
tf.ENV.set("WEBGL_PACK_BINARY_OPERATIONS", true); // Empaquetar operaciones binarias
|
|
125
|
-
} else if (isServerless) {
|
|
126
|
-
console.log(
|
|
127
|
-
"🚀 Entorno serverless detectado, aplicando configuraciones de memoria restrictivas",
|
|
128
|
-
);
|
|
129
|
-
|
|
130
|
-
// En serverless aplicamos configuraciones más conservadoras para memoria
|
|
131
|
-
tf.ENV.set("CPU_HANDOFF_SIZE_THRESHOLD", 1024 * 1024 * 4); // 4MB
|
|
132
|
-
tf.ENV.set("WEBGL_SIZE_UPLOAD_UNIFORM", 4);
|
|
133
|
-
tf.ENV.set("WEBGL_DELETE_TEXTURE_THRESHOLD", 10);
|
|
134
|
-
|
|
135
|
-
// Menor precisión para mejor rendimiento
|
|
136
|
-
tf.ENV.set("WEBGL_RENDER_FLOAT32_ENABLED", false);
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
async _initNodeWorkers() {
|
|
48
|
+
try {
|
|
49
|
+
const [os, path, url, workerModule] = await Promise.all([
|
|
50
|
+
import("os"),
|
|
51
|
+
import("path"),
|
|
52
|
+
import("url"),
|
|
53
|
+
import("./utils/worker-pool.js")
|
|
54
|
+
]);
|
|
55
|
+
|
|
56
|
+
const __filename = url.fileURLToPath(import.meta.url);
|
|
57
|
+
const __dirname = path.dirname(__filename);
|
|
58
|
+
const workerPath = path.join(__dirname, "node-worker.js");
|
|
59
|
+
|
|
60
|
+
const numWorkers = Math.min(os.cpus().length, 4);
|
|
61
|
+
this.workerPool = new workerModule.WorkerPool(workerPath, numWorkers);
|
|
62
|
+
console.log(`🚀 OfflineCompiler: Node.js mode with ${numWorkers} workers`);
|
|
63
|
+
} catch (e) {
|
|
64
|
+
console.log("⚡ OfflineCompiler: Running without workers");
|
|
137
65
|
}
|
|
66
|
+
}
|
|
138
67
|
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
// Configuraciones de textura según entorno
|
|
159
|
-
if (isBackendDedicated) {
|
|
160
|
-
// En backend dedicado, usar valores más agresivos
|
|
161
|
-
tf.ENV.set("WEBGL_MAX_TEXTURE_SIZE", 8192); // Texturas más grandes
|
|
162
|
-
tf.ENV.set("WEBGL_MAX_TEXTURES_IN_SHADER", 16); // Más texturas por shader
|
|
163
|
-
} else if (isServerless) {
|
|
164
|
-
// En serverless, usamos valores más conservadores
|
|
165
|
-
tf.ENV.set("WEBGL_MAX_TEXTURE_SIZE", 2048);
|
|
166
|
-
tf.ENV.set("WEBGL_MAX_TEXTURES_IN_SHADER", 8);
|
|
167
|
-
} else {
|
|
168
|
-
// Entorno normal
|
|
169
|
-
tf.ENV.set("WEBGL_MAX_TEXTURE_SIZE", 4096);
|
|
170
|
-
tf.ENV.set("WEBGL_MAX_TEXTURES_IN_SHADER", 12);
|
|
68
|
+
/**
|
|
69
|
+
* Compila una lista de imágenes objetivo
|
|
70
|
+
* @param {Array} images - Lista de imágenes {width, height, data}
|
|
71
|
+
* @param {Function} progressCallback - Callback de progreso (0-100)
|
|
72
|
+
* @returns {Promise<Array>} Datos compilados
|
|
73
|
+
*/
|
|
74
|
+
async compileImageTargets(images, progressCallback) {
|
|
75
|
+
console.time("⏱️ Compilación total");
|
|
76
|
+
|
|
77
|
+
const targetImages = [];
|
|
78
|
+
|
|
79
|
+
// Preparar imágenes
|
|
80
|
+
for (let i = 0; i < images.length; i++) {
|
|
81
|
+
const img = images[i];
|
|
82
|
+
|
|
83
|
+
if (!img || !img.width || !img.height || !img.data) {
|
|
84
|
+
throw new Error(
|
|
85
|
+
`Imagen inválida en posición ${i}. Debe tener propiedades width, height y data.`
|
|
86
|
+
);
|
|
171
87
|
}
|
|
172
|
-
|
|
173
|
-
//
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
]),
|
|
185
|
-
)
|
|
186
|
-
) {
|
|
187
|
-
console.log("🚀 SIMD disponible, habilitando aceleración vectorial");
|
|
188
|
-
tf.ENV.set("WASM_HAS_SIMD_SUPPORT", true);
|
|
189
|
-
tf.ENV.set("WASM_HAS_MULTITHREAD_SUPPORT", true);
|
|
88
|
+
|
|
89
|
+
// Convertir a escala de grises
|
|
90
|
+
const greyImageData = new Uint8Array(img.width * img.height);
|
|
91
|
+
|
|
92
|
+
if (img.data.length === img.width * img.height) {
|
|
93
|
+
greyImageData.set(img.data);
|
|
94
|
+
} else if (img.data.length === img.width * img.height * 4) {
|
|
95
|
+
for (let j = 0; j < greyImageData.length; j++) {
|
|
96
|
+
const offset = j * 4;
|
|
97
|
+
greyImageData[j] = Math.floor(
|
|
98
|
+
(img.data[offset] + img.data[offset + 1] + img.data[offset + 2]) / 3
|
|
99
|
+
);
|
|
190
100
|
}
|
|
191
|
-
}
|
|
192
|
-
|
|
101
|
+
} else {
|
|
102
|
+
throw new Error(`Formato de datos de imagen no soportado en posición ${i}`);
|
|
193
103
|
}
|
|
104
|
+
|
|
105
|
+
targetImages.push({
|
|
106
|
+
data: greyImageData,
|
|
107
|
+
height: img.height,
|
|
108
|
+
width: img.width,
|
|
109
|
+
});
|
|
194
110
|
}
|
|
195
111
|
|
|
196
|
-
//
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
// Detectar si estamos en un entorno de backend dedicado
|
|
204
|
-
|
|
205
|
-
// Estrategia de precalentamiento adaptativa según entorno
|
|
206
|
-
const warmupStrategy = isBackendDedicated
|
|
207
|
-
? "aggressive"
|
|
208
|
-
: isServerless
|
|
209
|
-
? "minimal"
|
|
210
|
-
: "balanced";
|
|
211
|
-
console.log(`🔥 Aplicando estrategia de precalentamiento: ${warmupStrategy}`);
|
|
212
|
-
|
|
213
|
-
// Función para ejecutar y esperar operaciones tensores
|
|
214
|
-
const executeAndWait = async (tensors) => {
|
|
215
|
-
// Esperar a que todas las operaciones se completen
|
|
216
|
-
await Promise.all(tensors.map((t) => t.data()));
|
|
217
|
-
// Liberar memoria inmediatamente
|
|
218
|
-
tf.dispose(tensors);
|
|
219
|
-
};
|
|
112
|
+
// Fase 1: Matching (50%)
|
|
113
|
+
console.time("⏱️ Fase Matching");
|
|
114
|
+
const matchingDataList = await this._compileMatch(targetImages, (percent) => {
|
|
115
|
+
progressCallback(percent * 0.5);
|
|
116
|
+
});
|
|
117
|
+
console.timeEnd("⏱️ Fase Matching");
|
|
220
118
|
|
|
221
|
-
//
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
const b = tf.tensor([2, 2, 2, 2]);
|
|
226
|
-
const result = a.add(b);
|
|
227
|
-
const mult = a.mul(b);
|
|
228
|
-
const div = a.div(b);
|
|
229
|
-
await executeAndWait([result, mult, div]);
|
|
119
|
+
// Fase 2: Tracking (50%)
|
|
120
|
+
console.time("⏱️ Fase Tracking");
|
|
121
|
+
const trackingDataList = await this._compileTrack(targetImages, (percent) => {
|
|
122
|
+
progressCallback(50 + percent * 0.5);
|
|
230
123
|
});
|
|
124
|
+
console.timeEnd("⏱️ Fase Tracking");
|
|
231
125
|
|
|
232
|
-
//
|
|
233
|
-
|
|
234
|
-
|
|
126
|
+
// Compilar resultado
|
|
127
|
+
this.data = targetImages.map((targetImage, i) => ({
|
|
128
|
+
targetImage: { width: targetImage.width, height: targetImage.height },
|
|
129
|
+
trackingData: trackingDataList[i],
|
|
130
|
+
matchingData: matchingDataList[i],
|
|
131
|
+
}));
|
|
235
132
|
|
|
236
|
-
|
|
237
|
-
await tf.tidy(async () => {
|
|
238
|
-
// Crear imagen sintética para precalentamiento
|
|
239
|
-
const image = tf.ones([size, size, 3]);
|
|
133
|
+
console.timeEnd("⏱️ Compilación total");
|
|
240
134
|
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
const grayscale = image.mean(2, true); // Reducción de canal para escala de grises
|
|
244
|
-
await executeAndWait([normalized, grayscale]);
|
|
135
|
+
return this.data;
|
|
136
|
+
}
|
|
245
137
|
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
138
|
+
/**
|
|
139
|
+
* Compila datos de matching usando DetectorLite (JS puro)
|
|
140
|
+
*/
|
|
141
|
+
async _compileMatch(targetImages, progressCallback) {
|
|
142
|
+
const percentPerImage = 100 / targetImages.length;
|
|
143
|
+
let currentPercent = 0;
|
|
144
|
+
|
|
145
|
+
const results = [];
|
|
146
|
+
|
|
147
|
+
// Procesar secuencialmente para evitar overhead de workers
|
|
148
|
+
// (los workers son útiles para muchas imágenes, pero añaden latencia)
|
|
149
|
+
for (let i = 0; i < targetImages.length; i++) {
|
|
150
|
+
const targetImage = targetImages[i];
|
|
151
|
+
const imageList = buildImageList(targetImage);
|
|
152
|
+
const percentPerScale = percentPerImage / imageList.length;
|
|
153
|
+
|
|
154
|
+
const keyframes = [];
|
|
155
|
+
|
|
156
|
+
for (const image of imageList) {
|
|
157
|
+
const detector = new DetectorLite(image.width, image.height);
|
|
158
|
+
const { featurePoints: ps } = detector.detect(image.data);
|
|
159
|
+
|
|
160
|
+
const maximaPoints = ps.filter((p) => p.maxima);
|
|
161
|
+
const minimaPoints = ps.filter((p) => !p.maxima);
|
|
162
|
+
const maximaPointsCluster = hierarchicalClusteringBuild({ points: maximaPoints });
|
|
163
|
+
const minimaPointsCluster = hierarchicalClusteringBuild({ points: minimaPoints });
|
|
164
|
+
|
|
165
|
+
keyframes.push({
|
|
166
|
+
maximaPoints,
|
|
167
|
+
minimaPoints,
|
|
168
|
+
maximaPointsCluster,
|
|
169
|
+
minimaPointsCluster,
|
|
170
|
+
width: image.width,
|
|
171
|
+
height: image.height,
|
|
172
|
+
scale: image.scale,
|
|
173
|
+
});
|
|
174
|
+
|
|
175
|
+
currentPercent += percentPerScale;
|
|
176
|
+
progressCallback(currentPercent);
|
|
177
|
+
}
|
|
250
178
|
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
// Crear kernel aleatorio para convolución
|
|
254
|
-
const kernel = tf.randomNormal([kernelSize, kernelSize, 3, filters]);
|
|
179
|
+
results.push(keyframes);
|
|
180
|
+
}
|
|
255
181
|
|
|
256
|
-
|
|
257
|
-
|
|
182
|
+
return results;
|
|
183
|
+
}
|
|
258
184
|
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
185
|
+
/**
|
|
186
|
+
* Compila datos de tracking usando extractTrackingFeatures (JS puro)
|
|
187
|
+
*/
|
|
188
|
+
async _compileTrack(targetImages, progressCallback) {
|
|
189
|
+
const percentPerImage = 100 / targetImages.length;
|
|
190
|
+
let currentPercent = 0;
|
|
262
191
|
|
|
263
|
-
|
|
264
|
-
const activated = tf.relu(convResult);
|
|
192
|
+
const results = [];
|
|
265
193
|
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
194
|
+
for (let i = 0; i < targetImages.length; i++) {
|
|
195
|
+
const targetImage = targetImages[i];
|
|
196
|
+
const imageList = buildTrackingImageList(targetImage);
|
|
197
|
+
const percentPerScale = percentPerImage / imageList.length;
|
|
269
198
|
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
const cropped = tf.slice(image, [0, 0, 0], [size / 2, size / 2, 3]);
|
|
275
|
-
await executeAndWait([resized, cropped]);
|
|
276
|
-
|
|
277
|
-
// Operaciones de detección de bordes (aproximación)
|
|
278
|
-
const sobelX = tf.conv2d(
|
|
279
|
-
grayscale,
|
|
280
|
-
tf.tensor4d(
|
|
281
|
-
[
|
|
282
|
-
[-1, 0, 1],
|
|
283
|
-
[-2, 0, 2],
|
|
284
|
-
[-1, 0, 1],
|
|
285
|
-
],
|
|
286
|
-
[3, 3, 1, 1],
|
|
287
|
-
),
|
|
288
|
-
1,
|
|
289
|
-
"same",
|
|
290
|
-
);
|
|
291
|
-
const sobelY = tf.conv2d(
|
|
292
|
-
grayscale,
|
|
293
|
-
tf.tensor4d(
|
|
294
|
-
[
|
|
295
|
-
[-1, -2, -1],
|
|
296
|
-
[0, 0, 0],
|
|
297
|
-
[1, 2, 1],
|
|
298
|
-
],
|
|
299
|
-
[3, 3, 1, 1],
|
|
300
|
-
),
|
|
301
|
-
1,
|
|
302
|
-
"same",
|
|
303
|
-
);
|
|
304
|
-
const edges = tf.sqrt(tf.add(tf.square(sobelX), tf.square(sobelY)));
|
|
305
|
-
await executeAndWait([sobelX, sobelY, edges]);
|
|
306
|
-
}
|
|
307
|
-
});
|
|
199
|
+
const trackingData = extractTrackingFeatures(imageList, () => {
|
|
200
|
+
currentPercent += percentPerScale;
|
|
201
|
+
progressCallback(currentPercent);
|
|
202
|
+
});
|
|
308
203
|
|
|
309
|
-
|
|
310
|
-
if (global.gc) {
|
|
311
|
-
try {
|
|
312
|
-
global.gc();
|
|
313
|
-
console.log("♻️ Recolección de basura manual ejecutada");
|
|
314
|
-
} catch (e) {
|
|
315
|
-
// Ignorar errores si no está disponible
|
|
316
|
-
}
|
|
204
|
+
results.push(trackingData);
|
|
317
205
|
}
|
|
318
206
|
|
|
319
|
-
|
|
320
|
-
const memInfo = tf.memory();
|
|
321
|
-
console.log(
|
|
322
|
-
`📊 Estado de memoria post-precalentamiento: ${memInfo.numTensors} tensores, ${(memInfo.numBytes / (1024 * 1024)).toFixed(2)}MB`,
|
|
323
|
-
);
|
|
324
|
-
|
|
325
|
-
console.timeEnd("🔥 Precalentamiento estratégico");
|
|
326
|
-
} catch (error) {
|
|
327
|
-
console.warn("⚠️ No se pudieron aplicar todas las optimizaciones:", error);
|
|
207
|
+
return results;
|
|
328
208
|
}
|
|
329
|
-
};
|
|
330
209
|
|
|
331
|
-
|
|
332
|
-
|
|
210
|
+
/**
|
|
211
|
+
* Método público para compilar tracking (compatibilidad con API anterior)
|
|
212
|
+
* @param {Object} options - Opciones de compilación
|
|
213
|
+
* @param {Function} options.progressCallback - Callback de progreso
|
|
214
|
+
* @param {Array} options.targetImages - Lista de imágenes objetivo
|
|
215
|
+
* @param {number} options.basePercent - Porcentaje base
|
|
216
|
+
* @returns {Promise<Array>} Datos de tracking
|
|
217
|
+
*/
|
|
218
|
+
async compileTrack({ progressCallback, targetImages, basePercent = 0 }) {
|
|
219
|
+
return this._compileTrack(targetImages, (percent) => {
|
|
220
|
+
progressCallback(basePercent + percent * (100 - basePercent) / 100);
|
|
221
|
+
});
|
|
222
|
+
}
|
|
333
223
|
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
224
|
+
/**
|
|
225
|
+
* Método público para compilar matching (compatibilidad con API anterior)
|
|
226
|
+
*/
|
|
227
|
+
async compileMatch({ progressCallback, targetImages, basePercent = 0 }) {
|
|
228
|
+
return this._compileMatch(targetImages, (percent) => {
|
|
229
|
+
progressCallback(basePercent + percent * (50 - basePercent) / 100);
|
|
230
|
+
});
|
|
231
|
+
}
|
|
338
232
|
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
233
|
+
/**
|
|
234
|
+
* Exporta datos compilados en formato binario columnar optimizado
|
|
235
|
+
*/
|
|
236
|
+
exportData() {
|
|
237
|
+
if (!this.data) {
|
|
238
|
+
throw new Error("No hay datos compilados para exportar");
|
|
343
239
|
}
|
|
344
240
|
|
|
345
|
-
|
|
346
|
-
|
|
241
|
+
const dataList = this.data.map((item) => {
|
|
242
|
+
// Optimizamos MatchingData convirtiéndolo a formato columnar
|
|
243
|
+
const matchingData = item.matchingData.map((kf) => this._packKeyframe(kf));
|
|
244
|
+
|
|
245
|
+
// Optimizamos TrackingData (Zero-copy layout)
|
|
246
|
+
const trackingData = item.trackingData.map((td) => {
|
|
247
|
+
const count = td.points.length;
|
|
248
|
+
const px = new Float32Array(count);
|
|
249
|
+
const py = new Float32Array(count);
|
|
250
|
+
for (let i = 0; i < count; i++) {
|
|
251
|
+
px[i] = td.points[i].x;
|
|
252
|
+
py[i] = td.points[i].y;
|
|
253
|
+
}
|
|
254
|
+
return {
|
|
255
|
+
w: td.width,
|
|
256
|
+
h: td.height,
|
|
257
|
+
s: td.scale,
|
|
258
|
+
px,
|
|
259
|
+
py,
|
|
260
|
+
d: td.data, // Grayscale pixel data (Uint8Array)
|
|
261
|
+
};
|
|
262
|
+
});
|
|
263
|
+
|
|
264
|
+
return {
|
|
265
|
+
targetImage: {
|
|
266
|
+
width: item.targetImage.width,
|
|
267
|
+
height: item.targetImage.height,
|
|
268
|
+
},
|
|
269
|
+
trackingData,
|
|
270
|
+
matchingData,
|
|
271
|
+
};
|
|
272
|
+
});
|
|
273
|
+
|
|
274
|
+
return msgpack.encode({
|
|
275
|
+
v: CURRENT_VERSION,
|
|
276
|
+
dataList,
|
|
277
|
+
});
|
|
347
278
|
}
|
|
348
279
|
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
280
|
+
_packKeyframe(kf) {
|
|
281
|
+
return {
|
|
282
|
+
w: kf.width,
|
|
283
|
+
h: kf.height,
|
|
284
|
+
s: kf.scale,
|
|
285
|
+
max: this._columnarize(kf.maximaPoints, kf.maximaPointsCluster),
|
|
286
|
+
min: this._columnarize(kf.minimaPoints, kf.minimaPointsCluster),
|
|
287
|
+
};
|
|
355
288
|
}
|
|
356
289
|
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
),
|
|
371
|
-
);
|
|
372
|
-
}, timeoutSeconds * 1000);
|
|
373
|
-
}
|
|
290
|
+
_columnarize(points, tree) {
|
|
291
|
+
const count = points.length;
|
|
292
|
+
const x = new Float32Array(count);
|
|
293
|
+
const y = new Float32Array(count);
|
|
294
|
+
const angle = new Float32Array(count);
|
|
295
|
+
const descriptors = new Uint8Array(count * 84); // 84 bytes per point (FREAK)
|
|
296
|
+
|
|
297
|
+
for (let i = 0; i < count; i++) {
|
|
298
|
+
x[i] = points[i].x;
|
|
299
|
+
y[i] = points[i].y;
|
|
300
|
+
angle[i] = points[i].angle;
|
|
301
|
+
descriptors.set(points[i].descriptors, i * 84);
|
|
302
|
+
}
|
|
374
303
|
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
let percent = 0;
|
|
384
|
-
const list = [];
|
|
385
|
-
|
|
386
|
-
console.log(`🧮 Compilando con backend: ${backend}`);
|
|
387
|
-
|
|
388
|
-
// Optimizar el tamaño de lote según el backend disponible
|
|
389
|
-
// En serverless, siempre usar lotes más pequeños
|
|
390
|
-
// Estrategia adaptativa para tamaño de lote (CPU/GPU)
|
|
391
|
-
let batchSize = 1;
|
|
392
|
-
if (backend === "node") {
|
|
393
|
-
// Calcular tamaño óptimo basado en recursos
|
|
394
|
-
try {
|
|
395
|
-
const cpus = os.cpus().length;
|
|
396
|
-
const freeMem = os.freemem() / 1024 / 1024 / 1024; // GB libres
|
|
397
|
-
|
|
398
|
-
// Lógica de batch dinámico:
|
|
399
|
-
// - 1 núcleo: batch 1 (evitar sobrecarga)
|
|
400
|
-
// - 2-4 núcleos: batch 2-4 (balance carga/paralelismo)
|
|
401
|
-
// - >4 núcleos: batch escalable con memoria
|
|
402
|
-
batchSize =
|
|
403
|
-
cpus > 4
|
|
404
|
-
? Math.min(Math.floor(freeMem * 0.5), 8) // 0.5GB por batch
|
|
405
|
-
: Math.min(cpus, 4);
|
|
406
|
-
|
|
407
|
-
console.log(
|
|
408
|
-
`🧠 Batch size calculado: ${batchSize} (${cpus} cores, ${freeMem.toFixed(1)}GB libres)`,
|
|
409
|
-
);
|
|
410
|
-
} catch (e) {
|
|
411
|
-
console.warn("⚠️ Error cálculo batch size:", e);
|
|
412
|
-
batchSize = 2; // Fallback: equilibrio seguridad/performance
|
|
413
|
-
}
|
|
414
|
-
} else if (this.isServerless) {
|
|
415
|
-
batchSize = 1; // Priorizar seguridad sobre performance
|
|
416
|
-
}
|
|
304
|
+
return {
|
|
305
|
+
x,
|
|
306
|
+
y,
|
|
307
|
+
a: angle,
|
|
308
|
+
d: descriptors,
|
|
309
|
+
t: this._compactTree(tree.rootNode),
|
|
310
|
+
};
|
|
311
|
+
}
|
|
417
312
|
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
313
|
+
_compactTree(node) {
|
|
314
|
+
if (node.leaf) {
|
|
315
|
+
return [1, node.centerPointIndex || 0, node.pointIndexes];
|
|
316
|
+
}
|
|
317
|
+
return [0, node.centerPointIndex || 0, node.children.map((c) => this._compactTree(c))];
|
|
318
|
+
}
|
|
422
319
|
|
|
423
|
-
|
|
320
|
+
/**
|
|
321
|
+
* Importa datos - Mantiene el formato columnar para máximo rendimiento (Zero-copy)
|
|
322
|
+
*/
|
|
323
|
+
importData(buffer) {
|
|
324
|
+
const content = msgpack.decode(new Uint8Array(buffer));
|
|
424
325
|
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
} catch (e) {
|
|
430
|
-
// Ignorar errores
|
|
431
|
-
}
|
|
432
|
-
}
|
|
326
|
+
if (!content.v || content.v !== CURRENT_VERSION) {
|
|
327
|
+
console.error("Incompatible .mind version. Required: " + CURRENT_VERSION);
|
|
328
|
+
return [];
|
|
329
|
+
}
|
|
433
330
|
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
const batch = targetImages.slice(i, Math.min(i + batchSize, targetImages.length));
|
|
438
|
-
|
|
439
|
-
// Imprimir información sobre el procesamiento por lotes
|
|
440
|
-
if (batch.length > 1) {
|
|
441
|
-
console.log(
|
|
442
|
-
`🔄 Procesando lote ${Math.floor(i / batchSize) + 1}: ${batch.length} imágenes`,
|
|
443
|
-
);
|
|
444
|
-
}
|
|
445
|
-
|
|
446
|
-
// Usar tf.engine().startScope() para mejor control de memoria por lote
|
|
447
|
-
tf.engine().startScope();
|
|
448
|
-
|
|
449
|
-
try {
|
|
450
|
-
// Procesamiento paralelo de imágenes en el lote
|
|
451
|
-
const batchResults = await Promise.all(
|
|
452
|
-
batch.map(async (targetImage) => {
|
|
453
|
-
const imageList = buildTrackingImageList(targetImage);
|
|
454
|
-
const percentPerAction = percentPerImage / imageList.length;
|
|
455
|
-
|
|
456
|
-
// Usar tf.tidy para liberar memoria automáticamente en cada imagen
|
|
457
|
-
return await tf.tidy(() => {
|
|
458
|
-
// Extraer características con monitoreo de progreso
|
|
459
|
-
const trackingData = extractTrackingFeatures(imageList, (index) => {
|
|
460
|
-
percent += percentPerAction;
|
|
461
|
-
progressCallback(basePercent + percent);
|
|
462
|
-
});
|
|
463
|
-
|
|
464
|
-
return trackingData;
|
|
465
|
-
});
|
|
466
|
-
}),
|
|
467
|
-
);
|
|
468
|
-
|
|
469
|
-
// Agregar resultados a la lista final
|
|
470
|
-
list.push(...batchResults);
|
|
471
|
-
} finally {
|
|
472
|
-
// Asegurar que siempre se cierre el scope para evitar fugas de memoria
|
|
473
|
-
tf.engine().endScope();
|
|
474
|
-
}
|
|
475
|
-
|
|
476
|
-
// Liberar memoria entre lotes grandes o al final
|
|
477
|
-
// En serverless, liberar más agresivamente
|
|
478
|
-
if (i % (this.isServerless ? 2 : 5) === 0 || i === targetImages.length - 1) {
|
|
479
|
-
await tf.nextFrame(); // Permitir que el recolector de basura libere memoria
|
|
480
|
-
|
|
481
|
-
// Cálculo de presión de memoria adaptativa
|
|
482
|
-
const memoryInfo = tf.memory();
|
|
483
|
-
const totalMem = os.totalmem();
|
|
484
|
-
const freeMem = os.freemem();
|
|
485
|
-
const memPressure = 1 - freeMem / totalMem;
|
|
486
|
-
|
|
487
|
-
// Umbrales dinámicos basados en:
|
|
488
|
-
// 1. Tipo de backend (mayor tolerancia en GPU)
|
|
489
|
-
// 2. Presión de memoria actual
|
|
490
|
-
// 3. Entorno de ejecución (serverless vs dedicado)
|
|
491
|
-
const baseThreshold = backend === "webgl" ? 50 : 30;
|
|
492
|
-
const adaptiveThreshold = Math.floor(
|
|
493
|
-
baseThreshold *
|
|
494
|
-
(1 - Math.min(memPressure, 0.5)) *
|
|
495
|
-
(this.isServerless ? 0.6 : 1) *
|
|
496
|
-
(this.isBackendDedicated ? 1.2 : 1),
|
|
497
|
-
);
|
|
498
|
-
|
|
499
|
-
console.log(
|
|
500
|
-
`🧠 Memoria: ${(freeMem / 1024 / 1024).toFixed(1)}MB libres | ` +
|
|
501
|
-
`Presión: ${(memPressure * 100).toFixed(1)}% | ` +
|
|
502
|
-
`Umbral: ${adaptiveThreshold} tensores`,
|
|
503
|
-
);
|
|
504
|
-
|
|
505
|
-
if (memoryInfo.numTensors > adaptiveThreshold) {
|
|
506
|
-
// Estrategia de limpieza diferenciada
|
|
507
|
-
console.log(
|
|
508
|
-
`🧹 Limpieza ${this.isServerless ? "conservadora" : "agresiva"}: ` +
|
|
509
|
-
`${memoryInfo.numTensors} tensores, ${(memoryInfo.numBytes / 1024 / 1024).toFixed(2)}MB`,
|
|
510
|
-
);
|
|
511
|
-
|
|
512
|
-
// Estrategia de limpieza diferenciada:
|
|
513
|
-
// - Serverless: Liberación temprana preventiva
|
|
514
|
-
// - Dedicado: Postergar GC para mejor throughput
|
|
515
|
-
tf.disposeVariables();
|
|
516
|
-
tf.dispose();
|
|
517
|
-
|
|
518
|
-
// Forzar recolección de basura en Node.js si está disponible
|
|
519
|
-
if (global.gc) {
|
|
520
|
-
try {
|
|
521
|
-
global.gc();
|
|
522
|
-
} catch (e) {
|
|
523
|
-
// Ignorar errores si no está disponible
|
|
524
|
-
}
|
|
525
|
-
}
|
|
526
|
-
}
|
|
527
|
-
}
|
|
528
|
-
}
|
|
331
|
+
// Ya no de-columnarizamos aquí. Los motores (Tracker/Matcher)
|
|
332
|
+
// ahora están optimizados para leer directamente de los buffers.
|
|
333
|
+
this.data = content.dataList;
|
|
529
334
|
|
|
530
|
-
|
|
531
|
-
|
|
335
|
+
return this.data;
|
|
336
|
+
}
|
|
532
337
|
|
|
533
|
-
|
|
534
|
-
|
|
338
|
+
_unpackKeyframe(kf) {
|
|
339
|
+
return {
|
|
340
|
+
width: kf.w,
|
|
341
|
+
height: kf.h,
|
|
342
|
+
scale: kf.s,
|
|
343
|
+
maximaPoints: this._decolumnarize(kf.max),
|
|
344
|
+
minimaPoints: this._decolumnarize(kf.min),
|
|
345
|
+
maximaPointsCluster: { rootNode: this._expandTree(kf.max.t) },
|
|
346
|
+
minimaPointsCluster: { rootNode: this._expandTree(kf.min.t) },
|
|
347
|
+
};
|
|
348
|
+
}
|
|
535
349
|
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
350
|
+
_decolumnarize(col) {
|
|
351
|
+
const points = [];
|
|
352
|
+
const count = col.x.length;
|
|
353
|
+
for (let i = 0; i < count; i++) {
|
|
354
|
+
points.push({
|
|
355
|
+
x: col.x[i],
|
|
356
|
+
y: col.y[i],
|
|
357
|
+
angle: col.a[i],
|
|
358
|
+
descriptors: col.d.slice(i * 84, (i + 1) * 84),
|
|
359
|
+
});
|
|
360
|
+
}
|
|
361
|
+
return points;
|
|
362
|
+
}
|
|
540
363
|
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
364
|
+
_expandTree(node) {
|
|
365
|
+
const isLeaf = node[0] === 1;
|
|
366
|
+
if (isLeaf) {
|
|
367
|
+
return {
|
|
368
|
+
leaf: true,
|
|
369
|
+
centerPointIndex: node[1],
|
|
370
|
+
pointIndexes: node[2],
|
|
371
|
+
};
|
|
372
|
+
}
|
|
373
|
+
return {
|
|
374
|
+
leaf: false,
|
|
375
|
+
centerPointIndex: node[1],
|
|
376
|
+
children: node[2].map((c) => this._expandTree(c)),
|
|
377
|
+
};
|
|
378
|
+
}
|
|
547
379
|
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
380
|
+
/**
|
|
381
|
+
* Destruye el pool de workers
|
|
382
|
+
*/
|
|
383
|
+
async destroy() {
|
|
384
|
+
if (this.workerPool) {
|
|
385
|
+
await this.workerPool.destroy();
|
|
386
|
+
}
|
|
552
387
|
}
|
|
553
388
|
}
|