@srsergio/taptapp-ar 1.1.1 → 1.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/compiler/node-worker.js +1 -197
- package/dist/compiler/offline-compiler.js +1 -207
- package/dist/core/constants.js +1 -38
- package/dist/core/detector/crop-detector.js +1 -88
- package/dist/core/detector/detector-lite.js +1 -455
- package/dist/core/detector/freak.js +1 -89
- package/dist/core/estimation/estimate.js +1 -16
- package/dist/core/estimation/estimator.js +1 -30
- package/dist/core/estimation/morph-refinement.js +1 -116
- package/dist/core/estimation/non-rigid-refine.js +1 -70
- package/dist/core/estimation/pnp-solver.js +1 -109
- package/dist/core/estimation/refine-estimate.js +1 -311
- package/dist/core/estimation/utils.js +1 -67
- package/dist/core/features/auto-rotation-feature.js +1 -30
- package/dist/core/features/crop-detection-feature.js +1 -26
- package/dist/core/features/feature-base.js +1 -1
- package/dist/core/features/feature-manager.js +1 -55
- package/dist/core/features/one-euro-filter-feature.js +1 -44
- package/dist/core/features/temporal-filter-feature.js +1 -57
- package/dist/core/image-list.js +1 -54
- package/dist/core/input-loader.js +1 -87
- package/dist/core/matching/hamming-distance.js +1 -66
- package/dist/core/matching/hdc.js +1 -102
- package/dist/core/matching/hierarchical-clustering.js +1 -130
- package/dist/core/matching/hough.js +1 -170
- package/dist/core/matching/matcher.js +1 -66
- package/dist/core/matching/matching.js +1 -401
- package/dist/core/matching/ransacHomography.js +1 -132
- package/dist/core/perception/bio-inspired-engine.js +1 -232
- package/dist/core/perception/foveal-attention.js +1 -280
- package/dist/core/perception/index.js +1 -17
- package/dist/core/perception/predictive-coding.js +1 -278
- package/dist/core/perception/saccadic-controller.js +1 -269
- package/dist/core/perception/saliency-map.js +1 -254
- package/dist/core/perception/scale-orchestrator.js +1 -68
- package/dist/core/protocol.js +1 -254
- package/dist/core/tracker/extract-utils.js +1 -29
- package/dist/core/tracker/extract.js +1 -306
- package/dist/core/tracker/tracker.js +1 -352
- package/dist/core/utils/cumsum.js +1 -37
- package/dist/core/utils/delaunay.js +1 -125
- package/dist/core/utils/geometry.js +1 -101
- package/dist/core/utils/gpu-compute.js +1 -231
- package/dist/core/utils/homography.js +1 -138
- package/dist/core/utils/images.js +1 -108
- package/dist/core/utils/lsh-binarizer.js +1 -37
- package/dist/core/utils/lsh-direct.js +1 -76
- package/dist/core/utils/projection.js +1 -51
- package/dist/core/utils/randomizer.js +1 -25
- package/dist/core/utils/worker-pool.js +1 -89
- package/dist/index.js +1 -7
- package/dist/libs/one-euro-filter.js +1 -70
- package/dist/react/TaptappAR.js +1 -151
- package/dist/react/types.js +1 -16
- package/dist/react/use-ar.js +1 -118
- package/dist/runtime/aframe.js +1 -272
- package/dist/runtime/bio-inspired-controller.js +1 -358
- package/dist/runtime/controller.js +1 -592
- package/dist/runtime/controller.worker.js +1 -93
- package/dist/runtime/index.js +1 -5
- package/dist/runtime/three.js +1 -304
- package/dist/runtime/track.js +1 -381
- package/package.json +10 -4
|
@@ -1,67 +1 @@
|
|
|
1
|
-
const buildModelViewProjectionTransform
|
|
2
|
-
// assume the projectTransform has the following format:
|
|
3
|
-
// [[fx, 0, cx],
|
|
4
|
-
// [0, fy, cy]
|
|
5
|
-
// [0, 0, 1]]
|
|
6
|
-
const modelViewProjectionTransform = [
|
|
7
|
-
[
|
|
8
|
-
projectionTransform[0][0] * modelViewTransform[0][0] +
|
|
9
|
-
projectionTransform[0][2] * modelViewTransform[2][0],
|
|
10
|
-
projectionTransform[0][0] * modelViewTransform[0][1] +
|
|
11
|
-
projectionTransform[0][2] * modelViewTransform[2][1],
|
|
12
|
-
projectionTransform[0][0] * modelViewTransform[0][2] +
|
|
13
|
-
projectionTransform[0][2] * modelViewTransform[2][2],
|
|
14
|
-
projectionTransform[0][0] * modelViewTransform[0][3] +
|
|
15
|
-
projectionTransform[0][2] * modelViewTransform[2][3],
|
|
16
|
-
],
|
|
17
|
-
[
|
|
18
|
-
projectionTransform[1][1] * modelViewTransform[1][0] +
|
|
19
|
-
projectionTransform[1][2] * modelViewTransform[2][0],
|
|
20
|
-
projectionTransform[1][1] * modelViewTransform[1][1] +
|
|
21
|
-
projectionTransform[1][2] * modelViewTransform[2][1],
|
|
22
|
-
projectionTransform[1][1] * modelViewTransform[1][2] +
|
|
23
|
-
projectionTransform[1][2] * modelViewTransform[2][2],
|
|
24
|
-
projectionTransform[1][1] * modelViewTransform[1][3] +
|
|
25
|
-
projectionTransform[1][2] * modelViewTransform[2][3],
|
|
26
|
-
],
|
|
27
|
-
[
|
|
28
|
-
modelViewTransform[2][0],
|
|
29
|
-
modelViewTransform[2][1],
|
|
30
|
-
modelViewTransform[2][2],
|
|
31
|
-
modelViewTransform[2][3],
|
|
32
|
-
],
|
|
33
|
-
];
|
|
34
|
-
return modelViewProjectionTransform;
|
|
35
|
-
/*
|
|
36
|
-
// this is the full computation if the projectTransform does not look like the expected format, but more computations
|
|
37
|
-
//
|
|
38
|
-
const modelViewProjectionTransform = [[],[],[]];
|
|
39
|
-
for (let j = 0; j < 3; j++ ) {
|
|
40
|
-
for (let i = 0; i < 4; i++) {
|
|
41
|
-
modelViewProjectionTransform[j][i] = projectionTransform[j][0] * modelViewTransform[0][i]
|
|
42
|
-
+ projectionTransform[j][1] * modelViewTransform[1][i]
|
|
43
|
-
+ projectionTransform[j][2] * modelViewTransform[2][i];
|
|
44
|
-
}
|
|
45
|
-
}
|
|
46
|
-
return modelViewProjectionTransform;
|
|
47
|
-
*/
|
|
48
|
-
};
|
|
49
|
-
const applyModelViewProjectionTransform = (modelViewProjectionTransform, x, y, _z) => {
|
|
50
|
-
// assume z is zero
|
|
51
|
-
const ux = modelViewProjectionTransform[0][0] * x +
|
|
52
|
-
modelViewProjectionTransform[0][1] * y +
|
|
53
|
-
modelViewProjectionTransform[0][3];
|
|
54
|
-
const uy = modelViewProjectionTransform[1][0] * x +
|
|
55
|
-
modelViewProjectionTransform[1][1] * y +
|
|
56
|
-
modelViewProjectionTransform[1][3];
|
|
57
|
-
const uz = modelViewProjectionTransform[2][0] * x +
|
|
58
|
-
modelViewProjectionTransform[2][1] * y +
|
|
59
|
-
modelViewProjectionTransform[2][3];
|
|
60
|
-
return { x: ux, y: uy, z: uz };
|
|
61
|
-
};
|
|
62
|
-
const computeScreenCoordiate = (modelViewProjectionTransform, x, y, z) => {
|
|
63
|
-
const { x: ux, y: uy, z: uz, } = applyModelViewProjectionTransform(modelViewProjectionTransform, x, y, z);
|
|
64
|
-
//if( Math.abs(uz) < 0.000001 ) return null;
|
|
65
|
-
return { x: ux / uz, y: uy / uz };
|
|
66
|
-
};
|
|
67
|
-
export { buildModelViewProjectionTransform, applyModelViewProjectionTransform, computeScreenCoordiate, };
|
|
1
|
+
const t=(t,x)=>[[t[0][0]*x[0][0]+t[0][2]*x[2][0],t[0][0]*x[0][1]+t[0][2]*x[2][1],t[0][0]*x[0][2]+t[0][2]*x[2][2],t[0][0]*x[0][3]+t[0][2]*x[2][3]],[t[1][1]*x[1][0]+t[1][2]*x[2][0],t[1][1]*x[1][1]+t[1][2]*x[2][1],t[1][1]*x[1][2]+t[1][2]*x[2][2],t[1][1]*x[1][3]+t[1][2]*x[2][3]],[x[2][0],x[2][1],x[2][2],x[2][3]]],x=(t,x,n,o)=>({x:t[0][0]*x+t[0][1]*n+t[0][3],y:t[1][0]*x+t[1][1]*n+t[1][3],z:t[2][0]*x+t[2][1]*n+t[2][3]}),n=(t,n,o,r)=>{const{x:y,y:c,z:e}=x(t,n,o);return{x:y/e,y:c/e}};export{t as buildModelViewProjectionTransform,x as applyModelViewProjectionTransform,n as computeScreenCoordiate};
|
|
@@ -1,30 +1 @@
|
|
|
1
|
-
export class AutoRotationFeature {
|
|
2
|
-
id = "auto-rotation";
|
|
3
|
-
name = "Auto Rotation Matrix";
|
|
4
|
-
description = "Automatically adjusts the world matrix if the input video is rotated (e.g. portrait mode).";
|
|
5
|
-
enabled = true;
|
|
6
|
-
inputWidth = 0;
|
|
7
|
-
inputHeight = 0;
|
|
8
|
-
init(context) {
|
|
9
|
-
this.inputWidth = context.inputWidth;
|
|
10
|
-
this.inputHeight = context.inputHeight;
|
|
11
|
-
}
|
|
12
|
-
filterWorldMatrix(targetIndex, worldMatrix) {
|
|
13
|
-
if (!this.enabled)
|
|
14
|
-
return worldMatrix;
|
|
15
|
-
// Check if input is rotated (this logic might need the actual current input dimensions)
|
|
16
|
-
// For now, we'll assume the controller passes the 'isRotated' info or we detect it
|
|
17
|
-
// But since this is a matrix post-process, we can just apply it if needed.
|
|
18
|
-
return worldMatrix;
|
|
19
|
-
}
|
|
20
|
-
// We might need a way to pass the 'currentInput' to the feature.
|
|
21
|
-
// Actually, the controller can just call this if it detects rotation.
|
|
22
|
-
rotate(m) {
|
|
23
|
-
return [
|
|
24
|
-
-m[1], m[0], m[2], m[3],
|
|
25
|
-
-m[5], m[4], m[6], m[7],
|
|
26
|
-
-m[9], m[8], m[10], m[11],
|
|
27
|
-
-m[13], m[12], m[14], m[15],
|
|
28
|
-
];
|
|
29
|
-
}
|
|
30
|
-
}
|
|
1
|
+
export class AutoRotationFeature{id="auto-rotation";name="Auto Rotation Matrix";description="Automatically adjusts the world matrix if the input video is rotated (e.g. portrait mode).";enabled=!0;inputWidth=0;inputHeight=0;init(t){this.inputWidth=t.inputWidth,this.inputHeight=t.inputHeight}filterWorldMatrix(t,i){return this.enabled,i}rotate(t){return[-t[1],t[0],t[2],t[3],-t[5],t[4],t[6],t[7],-t[9],t[8],t[10],t[11],-t[13],t[12],t[14],t[15]]}}
|
|
@@ -1,26 +1 @@
|
|
|
1
|
-
import
|
|
2
|
-
export class CropDetectionFeature {
|
|
3
|
-
id = "crop-detection";
|
|
4
|
-
name = "Crop Detection";
|
|
5
|
-
description = "Optimizes detection by focusing on areas with motion, reducing CPU usage.";
|
|
6
|
-
enabled = true;
|
|
7
|
-
cropDetector = null;
|
|
8
|
-
debugMode = false;
|
|
9
|
-
init(context) {
|
|
10
|
-
this.debugMode = context.debugMode;
|
|
11
|
-
this.cropDetector = new CropDetector(context.inputWidth, context.inputHeight, this.debugMode);
|
|
12
|
-
}
|
|
13
|
-
detect(inputData, isMoving = true) {
|
|
14
|
-
if (!this.enabled || !this.cropDetector) {
|
|
15
|
-
// Fallback to full detection if disabled?
|
|
16
|
-
// Actually CropDetector.detect is just full detection.
|
|
17
|
-
// We'll expose the methods here.
|
|
18
|
-
}
|
|
19
|
-
if (isMoving && this.enabled) {
|
|
20
|
-
return this.cropDetector.detectMoving(inputData);
|
|
21
|
-
}
|
|
22
|
-
else {
|
|
23
|
-
return this.cropDetector.detect(inputData);
|
|
24
|
-
}
|
|
25
|
-
}
|
|
26
|
-
}
|
|
1
|
+
import{CropDetector as e}from"../detector/crop-detector.js";export class CropDetectionFeature{id="crop-detection";name="Crop Detection";description="Optimizes detection by focusing on areas with motion, reducing CPU usage.";enabled=!0;cropDetector=null;debugMode=!1;init(t){this.debugMode=t.debugMode,this.cropDetector=new e(t.inputWidth,t.inputHeight,this.debugMode)}detect(e,t=!0){return!this.enabled||this.cropDetector,t&&this.enabled?this.cropDetector.detectMoving(e):this.cropDetector.detect(e)}}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
export
|
|
1
|
+
export{};
|
|
@@ -1,55 +1 @@
|
|
|
1
|
-
export class FeatureManager {
|
|
2
|
-
features = [];
|
|
3
|
-
addFeature(feature) {
|
|
4
|
-
this.features.push(feature);
|
|
5
|
-
}
|
|
6
|
-
getFeature(id) {
|
|
7
|
-
return this.features.find(f => f.id === id);
|
|
8
|
-
}
|
|
9
|
-
init(context) {
|
|
10
|
-
for (const feature of this.features) {
|
|
11
|
-
if (feature.enabled && feature.init) {
|
|
12
|
-
feature.init(context);
|
|
13
|
-
}
|
|
14
|
-
}
|
|
15
|
-
}
|
|
16
|
-
beforeProcess(inputData) {
|
|
17
|
-
for (const feature of this.features) {
|
|
18
|
-
if (feature.enabled && feature.beforeProcess) {
|
|
19
|
-
feature.beforeProcess(inputData);
|
|
20
|
-
}
|
|
21
|
-
}
|
|
22
|
-
}
|
|
23
|
-
applyWorldMatrixFilters(targetIndex, worldMatrix, context) {
|
|
24
|
-
let result = worldMatrix;
|
|
25
|
-
for (const feature of this.features) {
|
|
26
|
-
if (feature.enabled && feature.filterWorldMatrix) {
|
|
27
|
-
result = feature.filterWorldMatrix(targetIndex, result, context);
|
|
28
|
-
}
|
|
29
|
-
}
|
|
30
|
-
return result;
|
|
31
|
-
}
|
|
32
|
-
shouldShow(targetIndex, isTracking) {
|
|
33
|
-
let show = isTracking;
|
|
34
|
-
for (const feature of this.features) {
|
|
35
|
-
if (feature.enabled && feature.shouldShow) {
|
|
36
|
-
show = feature.shouldShow(targetIndex, isTracking);
|
|
37
|
-
}
|
|
38
|
-
}
|
|
39
|
-
return show;
|
|
40
|
-
}
|
|
41
|
-
notifyUpdate(data) {
|
|
42
|
-
for (const feature of this.features) {
|
|
43
|
-
if (feature.enabled && feature.onUpdate) {
|
|
44
|
-
feature.onUpdate(data);
|
|
45
|
-
}
|
|
46
|
-
}
|
|
47
|
-
}
|
|
48
|
-
dispose() {
|
|
49
|
-
for (const feature of this.features) {
|
|
50
|
-
if (feature.dispose) {
|
|
51
|
-
feature.dispose();
|
|
52
|
-
}
|
|
53
|
-
}
|
|
54
|
-
}
|
|
55
|
-
}
|
|
1
|
+
export class FeatureManager{features=[];addFeature(e){this.features.push(e)}getFeature(e){return this.features.find(t=>t.id===e)}init(e){for(const t of this.features)t.enabled&&t.init&&t.init(e)}beforeProcess(e){for(const t of this.features)t.enabled&&t.beforeProcess&&t.beforeProcess(e)}applyWorldMatrixFilters(e,t,s){let r=t;for(const t of this.features)t.enabled&&t.filterWorldMatrix&&(r=t.filterWorldMatrix(e,r,s));return r}shouldShow(e,t){let s=t;for(const r of this.features)r.enabled&&r.shouldShow&&(s=r.shouldShow(e,t));return s}notifyUpdate(e){for(const t of this.features)t.enabled&&t.onUpdate&&t.onUpdate(e)}dispose(){for(const e of this.features)e.dispose&&e.dispose()}}
|
|
@@ -1,44 +1 @@
|
|
|
1
|
-
import
|
|
2
|
-
export class OneEuroFilterFeature {
|
|
3
|
-
id = "one-euro-filter";
|
|
4
|
-
name = "One Euro Filter";
|
|
5
|
-
description = "Smooths the tracking matrix to reduce jitter using a One Euro Filter.";
|
|
6
|
-
enabled = true;
|
|
7
|
-
filters = [];
|
|
8
|
-
minCutOff;
|
|
9
|
-
beta;
|
|
10
|
-
constructor(minCutOff = 0.5, beta = 0.1) {
|
|
11
|
-
this.minCutOff = minCutOff;
|
|
12
|
-
this.beta = beta;
|
|
13
|
-
}
|
|
14
|
-
init(context) {
|
|
15
|
-
// We'll initialize filters lazily or based on target count if known
|
|
16
|
-
}
|
|
17
|
-
getFilter(targetIndex) {
|
|
18
|
-
if (!this.filters[targetIndex]) {
|
|
19
|
-
this.filters[targetIndex] = new OneEuroFilter({
|
|
20
|
-
minCutOff: this.minCutOff,
|
|
21
|
-
beta: this.beta
|
|
22
|
-
});
|
|
23
|
-
}
|
|
24
|
-
return this.filters[targetIndex];
|
|
25
|
-
}
|
|
26
|
-
filterWorldMatrix(targetIndex, worldMatrix, context) {
|
|
27
|
-
if (!this.enabled)
|
|
28
|
-
return worldMatrix;
|
|
29
|
-
const filter = this.getFilter(targetIndex);
|
|
30
|
-
const stability = context?.stability ?? 1.0;
|
|
31
|
-
// Dynamic Cutoff: If points are very stable (1.0), use higher cutoff (less responsiveness loss).
|
|
32
|
-
// If points are unstable (0.3), use much lower cutoff (heavy smoothing).
|
|
33
|
-
// We use a squared curve for even more aggressive suppression of jitter on unstable points.
|
|
34
|
-
const dynamicMinCutOff = this.minCutOff * (0.05 + Math.pow(stability, 2) * 0.95);
|
|
35
|
-
filter.minCutOff = dynamicMinCutOff;
|
|
36
|
-
filter.beta = this.beta;
|
|
37
|
-
return filter.filter(Date.now(), worldMatrix);
|
|
38
|
-
}
|
|
39
|
-
onUpdate(data) {
|
|
40
|
-
if (data.type === "reset" && data.targetIndex !== undefined) {
|
|
41
|
-
this.filters[data.targetIndex]?.reset();
|
|
42
|
-
}
|
|
43
|
-
}
|
|
44
|
-
}
|
|
1
|
+
import{OneEuroFilter as t}from"../../libs/one-euro-filter.js";export class OneEuroFilterFeature{id="one-euro-filter";name="One Euro Filter";description="Smooths the tracking matrix to reduce jitter using a One Euro Filter.";enabled=!0;filters=[];minCutOff;beta;constructor(t=.5,e=.1){this.minCutOff=t,this.beta=e}init(t){}getFilter(e){return this.filters[e]||(this.filters[e]=new t({minCutOff:this.minCutOff,beta:this.beta})),this.filters[e]}filterWorldMatrix(t,e,i){if(!this.enabled)return e;const r=this.getFilter(t),s=i?.stability??1,n=this.minCutOff*(.05+.95*Math.pow(s,2));return r.minCutOff=n,r.beta=this.beta,r.filter(Date.now(),e)}onUpdate(t){"reset"===t.type&&void 0!==t.targetIndex&&this.filters[t.targetIndex]?.reset()}}
|
|
@@ -1,57 +1 @@
|
|
|
1
|
-
export class TemporalFilterFeature {
|
|
2
|
-
id = "temporal-filter";
|
|
3
|
-
name = "Temporal Filter";
|
|
4
|
-
description = "Provides warmup tolerance (to avoid false positives) and miss tolerance (to maintain tracking during brief occlusions).";
|
|
5
|
-
enabled = true;
|
|
6
|
-
states = [];
|
|
7
|
-
warmupTolerance;
|
|
8
|
-
missTolerance;
|
|
9
|
-
onToggleShowing;
|
|
10
|
-
constructor(warmup = 2, miss = 5, onToggleShowing) {
|
|
11
|
-
this.warmupTolerance = warmup;
|
|
12
|
-
this.missTolerance = miss;
|
|
13
|
-
this.onToggleShowing = onToggleShowing;
|
|
14
|
-
}
|
|
15
|
-
getState(targetIndex) {
|
|
16
|
-
if (!this.states[targetIndex]) {
|
|
17
|
-
this.states[targetIndex] = {
|
|
18
|
-
showing: false,
|
|
19
|
-
trackCount: 0,
|
|
20
|
-
trackMiss: 0,
|
|
21
|
-
};
|
|
22
|
-
}
|
|
23
|
-
return this.states[targetIndex];
|
|
24
|
-
}
|
|
25
|
-
shouldShow(targetIndex, isTracking) {
|
|
26
|
-
if (!this.enabled)
|
|
27
|
-
return isTracking;
|
|
28
|
-
const state = this.getState(targetIndex);
|
|
29
|
-
if (!state.showing) {
|
|
30
|
-
if (isTracking) {
|
|
31
|
-
state.trackMiss = 0;
|
|
32
|
-
state.trackCount += 1;
|
|
33
|
-
if (state.trackCount > this.warmupTolerance) {
|
|
34
|
-
state.showing = true;
|
|
35
|
-
this.onToggleShowing?.(targetIndex, true);
|
|
36
|
-
}
|
|
37
|
-
}
|
|
38
|
-
else {
|
|
39
|
-
state.trackCount = 0;
|
|
40
|
-
}
|
|
41
|
-
}
|
|
42
|
-
else {
|
|
43
|
-
if (!isTracking) {
|
|
44
|
-
state.trackCount = 0;
|
|
45
|
-
state.trackMiss += 1;
|
|
46
|
-
if (state.trackMiss > this.missTolerance) {
|
|
47
|
-
state.showing = false;
|
|
48
|
-
this.onToggleShowing?.(targetIndex, false);
|
|
49
|
-
}
|
|
50
|
-
}
|
|
51
|
-
else {
|
|
52
|
-
state.trackMiss = 0;
|
|
53
|
-
}
|
|
54
|
-
}
|
|
55
|
-
return state.showing;
|
|
56
|
-
}
|
|
57
|
-
}
|
|
1
|
+
export class TemporalFilterFeature{id="temporal-filter";name="Temporal Filter";description="Provides warmup tolerance (to avoid false positives) and miss tolerance (to maintain tracking during brief occlusions).";enabled=!0;states=[];warmupTolerance;missTolerance;onToggleShowing;constructor(t=2,s=5,e){this.warmupTolerance=t,this.missTolerance=s,this.onToggleShowing=e}getState(t){return this.states[t]||(this.states[t]={showing:!1,trackCount:0,trackMiss:0}),this.states[t]}shouldShow(t,s){if(!this.enabled)return s;const e=this.getState(t);return e.showing?s?e.trackMiss=0:(e.trackCount=0,e.trackMiss+=1,e.trackMiss>this.missTolerance&&(e.showing=!1,this.onToggleShowing?.(t,!1))):s?(e.trackMiss=0,e.trackCount+=1,e.trackCount>this.warmupTolerance&&(e.showing=!0,this.onToggleShowing?.(t,!0))):e.trackCount=0,e.showing}}
|
package/dist/core/image-list.js
CHANGED
|
@@ -1,54 +1 @@
|
|
|
1
|
-
import
|
|
2
|
-
import { AR_CONFIG } from "./constants.js";
|
|
3
|
-
/**
|
|
4
|
-
* Tamaño mínimo de píxeles para el procesamiento de imágenes
|
|
5
|
-
* Un valor más bajo permite detectar imágenes más pequeñas pero aumenta el tiempo de procesamiento
|
|
6
|
-
* @constant {number}
|
|
7
|
-
*/
|
|
8
|
-
const MIN_IMAGE_PIXEL_SIZE = AR_CONFIG.MIN_IMAGE_PIXEL_SIZE;
|
|
9
|
-
/**
|
|
10
|
-
* Construye una lista de imágenes con diferentes escalas para detección de características
|
|
11
|
-
* @param {{width: number, height: number, data: any}} inputImage - Imagen de entrada con propiedades width, height y data
|
|
12
|
-
* @returns {Array<{data: Uint8Array, width: number, height: number, scale: number}>} Lista de imágenes escaladas con propiedades data, width, height y scale
|
|
13
|
-
*/
|
|
14
|
-
const buildImageList = (inputImage) => {
|
|
15
|
-
const minScale = MIN_IMAGE_PIXEL_SIZE / Math.min(inputImage.width, inputImage.height);
|
|
16
|
-
const scaleList = [];
|
|
17
|
-
let c = minScale;
|
|
18
|
-
while (true) {
|
|
19
|
-
scaleList.push(c);
|
|
20
|
-
// Optimization: Paso balanceado (aprox 1.5)
|
|
21
|
-
// Mejor cobertura que 2.0, pero mucho más ligero que 1.41 o 1.26
|
|
22
|
-
c *= Math.pow(2.0, AR_CONFIG.SCALE_STEP_EXPONENT);
|
|
23
|
-
if (c >= 0.95) {
|
|
24
|
-
c = 1;
|
|
25
|
-
break;
|
|
26
|
-
}
|
|
27
|
-
}
|
|
28
|
-
scaleList.push(c);
|
|
29
|
-
scaleList.reverse();
|
|
30
|
-
const imageList = [];
|
|
31
|
-
for (let i = 0; i < scaleList.length; i++) {
|
|
32
|
-
imageList.push(Object.assign(resize({ image: inputImage, ratio: scaleList[i] }), { scale: scaleList[i] }));
|
|
33
|
-
}
|
|
34
|
-
return imageList;
|
|
35
|
-
};
|
|
36
|
-
/**
|
|
37
|
-
* Construye una lista optimizada de imágenes para tracking
|
|
38
|
-
* Genera dos versiones escaladas (256px y 128px) para tracking eficiente
|
|
39
|
-
* @param {{width: number, height: number, data: any}} inputImage - Imagen de entrada con propiedades width, height y data
|
|
40
|
-
* @returns {Array<{data: Uint8Array, width: number, height: number, scale: number}>} Lista de imágenes escaladas para tracking
|
|
41
|
-
*/
|
|
42
|
-
const buildTrackingImageList = (inputImage) => {
|
|
43
|
-
const minDimension = Math.min(inputImage.width, inputImage.height);
|
|
44
|
-
const scaleList = [];
|
|
45
|
-
const imageList = [];
|
|
46
|
-
// Generamos versiones de 256px y 128px para tracking robusto a diferentes distancias
|
|
47
|
-
scaleList.push(AR_CONFIG.TRACKING_DOWNSCALE_LEVEL_1 / minDimension);
|
|
48
|
-
scaleList.push(AR_CONFIG.TRACKING_DOWNSCALE_LEVEL_2 / minDimension);
|
|
49
|
-
for (let i = 0; i < scaleList.length; i++) {
|
|
50
|
-
imageList.push(Object.assign(resize({ image: inputImage, ratio: scaleList[i] }), { scale: scaleList[i] }));
|
|
51
|
-
}
|
|
52
|
-
return imageList;
|
|
53
|
-
};
|
|
54
|
-
export { buildImageList, buildTrackingImageList };
|
|
1
|
+
import{resize as t}from"./utils/images.js";import{AR_CONFIG as s}from"./constants.js";const e=s.MIN_IMAGE_PIXEL_SIZE,h=h=>{const i=[];let r=e/Math.min(h.width,h.height);for(;;)if(i.push(r),r*=Math.pow(2,s.SCALE_STEP_EXPONENT),r>=.95){r=1;break}i.push(r),i.reverse();const o=[];for(let s=0;s<i.length;s++)o.push(Object.assign(t({image:h,ratio:i[s]}),{scale:i[s]}));return o},i=e=>{const h=Math.min(e.width,e.height),i=[],r=[];i.push(s.TRACKING_DOWNSCALE_LEVEL_1/h),i.push(s.TRACKING_DOWNSCALE_LEVEL_2/h);for(let s=0;s<i.length;s++)r.push(Object.assign(t({image:e,ratio:i[s]}),{scale:i[s]}));return r};export{h as buildImageList,i as buildTrackingImageList};
|
|
@@ -1,87 +1 @@
|
|
|
1
|
-
|
|
2
|
-
* InputLoader - Maneja la carga de imágenes y video sin TensorFlow
|
|
3
|
-
*/
|
|
4
|
-
class InputLoader {
|
|
5
|
-
constructor(width, height) {
|
|
6
|
-
this.width = width;
|
|
7
|
-
this.height = height;
|
|
8
|
-
this.grayscaleBuffer = new Uint8Array(width * height);
|
|
9
|
-
if (typeof document !== "undefined") {
|
|
10
|
-
const canvas = document.createElement("canvas");
|
|
11
|
-
canvas.width = width;
|
|
12
|
-
canvas.height = height;
|
|
13
|
-
this.context = canvas.getContext("2d", { willReadFrequently: true, alpha: false });
|
|
14
|
-
}
|
|
15
|
-
}
|
|
16
|
-
/**
|
|
17
|
-
* Carga una imagen o video y devuelve los datos en escala de grises
|
|
18
|
-
* @param {HTMLVideoElement|HTMLImageElement|ImageData|Uint8Array} input - La fuente de entrada
|
|
19
|
-
* @returns {Uint8Array} Datos de imagen en escala de grises (width * height)
|
|
20
|
-
*/
|
|
21
|
-
loadInput(input) {
|
|
22
|
-
// Si ya es un Uint8Array de escala de grises, lo devolvemos
|
|
23
|
-
if (input instanceof Uint8Array && input.length === this.width * this.height) {
|
|
24
|
-
return input;
|
|
25
|
-
}
|
|
26
|
-
// Si es ImageData, convertimos a escala de grises directamente
|
|
27
|
-
if (typeof ImageData !== "undefined" && input instanceof ImageData) {
|
|
28
|
-
this._convertToGrayscale(input.data, input.width, input.height);
|
|
29
|
-
return this.grayscaleBuffer;
|
|
30
|
-
}
|
|
31
|
-
// En el navegador, usamos canvas para procesar video/imágenes
|
|
32
|
-
if (this.context) {
|
|
33
|
-
this.context.clearRect(0, 0, this.width, this.height);
|
|
34
|
-
const isInputRotated = input.width === this.height && input.height === this.width;
|
|
35
|
-
const inputW = isInputRotated ? input.height : input.width;
|
|
36
|
-
const inputH = isInputRotated ? input.width : input.height;
|
|
37
|
-
const inputAspect = inputW / inputH;
|
|
38
|
-
const canvasAspect = this.width / this.height;
|
|
39
|
-
let sx = 0, sy = 0, sw = inputW, sh = inputH;
|
|
40
|
-
if (inputAspect > canvasAspect) {
|
|
41
|
-
// Input is wider than canvas - crop sides
|
|
42
|
-
sw = inputH * canvasAspect;
|
|
43
|
-
sx = (inputW - sw) / 2;
|
|
44
|
-
}
|
|
45
|
-
else if (inputAspect < canvasAspect) {
|
|
46
|
-
// Input is taller than canvas - crop top/bottom
|
|
47
|
-
sh = inputW / canvasAspect;
|
|
48
|
-
sy = (inputH - sh) / 2;
|
|
49
|
-
}
|
|
50
|
-
if (isInputRotated) {
|
|
51
|
-
this.context.save();
|
|
52
|
-
this.context.translate(this.width / 2, this.height / 2);
|
|
53
|
-
this.context.rotate(Math.PI / 2);
|
|
54
|
-
// Map source crop (relative to rotated input)
|
|
55
|
-
// Since input is already rotated, we crop based on the rotated dimensions
|
|
56
|
-
this.context.drawImage(input, sx, sy, sw, sh, -this.height / 2, -this.width / 2, this.height, this.width);
|
|
57
|
-
this.context.restore();
|
|
58
|
-
}
|
|
59
|
-
else {
|
|
60
|
-
this.context.drawImage(input, sx, sy, sw, sh, 0, 0, this.width, this.height);
|
|
61
|
-
}
|
|
62
|
-
const imageData = this.context.getImageData(0, 0, this.width, this.height);
|
|
63
|
-
this._convertToGrayscale(imageData.data, this.width, this.height);
|
|
64
|
-
return this.grayscaleBuffer;
|
|
65
|
-
}
|
|
66
|
-
// Fallback para Node.js o entornos sin DOM
|
|
67
|
-
if (input.data && input.data instanceof Uint8Array) {
|
|
68
|
-
this._convertToGrayscale(input.data, input.width || this.width, input.height || this.height);
|
|
69
|
-
return this.grayscaleBuffer;
|
|
70
|
-
}
|
|
71
|
-
throw new Error("Input no soportado o entorno sin Canvas");
|
|
72
|
-
}
|
|
73
|
-
/**
|
|
74
|
-
* Convierte datos RGBA a escala de grises optimizada (reutilizando buffer)
|
|
75
|
-
*/
|
|
76
|
-
_convertToGrayscale(rgbaData, width, height) {
|
|
77
|
-
const grayscale = this.grayscaleBuffer;
|
|
78
|
-
const len = (width * height);
|
|
79
|
-
// Optimized loop with bitwise ops
|
|
80
|
-
for (let i = 0; i < len; i++) {
|
|
81
|
-
const offset = i << 2;
|
|
82
|
-
// Formula de luminosidad estándar: 0.299R + 0.587G + 0.114B (scaled by 256)
|
|
83
|
-
grayscale[i] = (rgbaData[offset] * 77 + rgbaData[offset + 1] * 150 + rgbaData[offset + 2] * 29) >> 8;
|
|
84
|
-
}
|
|
85
|
-
}
|
|
86
|
-
}
|
|
87
|
-
export { InputLoader };
|
|
1
|
+
class t{constructor(t,h){if(this.width=t,this.height=h,this.grayscaleBuffer=new Uint8Array(t*h),"undefined"!=typeof document){const i=document.createElement("canvas");i.width=t,i.height=h,this.context=i.getContext("2d",{willReadFrequently:!0,alpha:!1})}}loadInput(t){if(t instanceof Uint8Array&&t.length===this.width*this.height)return t;if("undefined"!=typeof ImageData&&t instanceof ImageData)return this._convertToGrayscale(t.data,t.width,t.height),this.grayscaleBuffer;if(this.context){this.context.clearRect(0,0,this.width,this.height);const h=t.width===this.height&&t.height===this.width,i=h?t.height:t.width,e=h?t.width:t.height,s=i/e,a=this.width/this.height;let n=0,r=0,o=i,c=e;s>a?(o=e*a,n=(i-o)/2):s<a&&(c=i/a,r=(e-c)/2),h?(this.context.save(),this.context.translate(this.width/2,this.height/2),this.context.rotate(Math.PI/2),this.context.drawImage(t,n,r,o,c,-this.height/2,-this.width/2,this.height,this.width),this.context.restore()):this.context.drawImage(t,n,r,o,c,0,0,this.width,this.height);const d=this.context.getImageData(0,0,this.width,this.height);return this._convertToGrayscale(d.data,this.width,this.height),this.grayscaleBuffer}if(t.data&&t.data instanceof Uint8Array)return this._convertToGrayscale(t.data,t.width||this.width,t.height||this.height),this.grayscaleBuffer;throw new Error("Input no soportado o entorno sin Canvas")}_convertToGrayscale(t,h,i){const e=this.grayscaleBuffer,s=h*i;for(let h=0;h<s;h++){const i=h<<2;e[h]=77*t[i]+150*t[i+1]+29*t[i+2]>>8}}}export{t as InputLoader};
|
|
@@ -1,66 +1 @@
|
|
|
1
|
-
|
|
2
|
-
const BIT_COUNT_8 = new Uint8Array(256);
|
|
3
|
-
for (let i = 0; i < 256; i++) {
|
|
4
|
-
let c = 0, n = i;
|
|
5
|
-
while (n > 0) {
|
|
6
|
-
n &= (n - 1);
|
|
7
|
-
c++;
|
|
8
|
-
}
|
|
9
|
-
BIT_COUNT_8[i] = c;
|
|
10
|
-
}
|
|
11
|
-
/**
|
|
12
|
-
* 🚀 Moonshot Optimized Popcount
|
|
13
|
-
* Uses a slightly faster bitwise sequence for 32-bit integers
|
|
14
|
-
*/
|
|
15
|
-
function popcount32(n) {
|
|
16
|
-
n = n >>> 0; // Force unsigned
|
|
17
|
-
n -= (n >>> 1) & 0x55555555;
|
|
18
|
-
n = (n & 0x33333333) + ((n >>> 2) & 0x33333333);
|
|
19
|
-
return (((n + (n >>> 4)) & 0x0F0F0F0F) * 0x01010101) >>> 24;
|
|
20
|
-
}
|
|
21
|
-
/**
|
|
22
|
-
* Super-optimized Hamming distance for 64-bit LSH (2x Uint32)
|
|
23
|
-
* NO OBJECTS, NO OPTIONS, JUST PURE SPEED.
|
|
24
|
-
*/
|
|
25
|
-
const compute64 = (v1, v1Idx, v2, v2Idx) => {
|
|
26
|
-
// Inline XOR and popcount for maximum speed
|
|
27
|
-
let x1 = (v1[v1Idx] ^ v2[v2Idx]) >>> 0;
|
|
28
|
-
let x2 = (v1[v1Idx + 1] ^ v2[v2Idx + 1]) >>> 0;
|
|
29
|
-
// Popcount 1
|
|
30
|
-
x1 -= (x1 >>> 1) & 0x55555555;
|
|
31
|
-
x1 = (x1 & 0x33333333) + ((x1 >>> 2) & 0x33333333);
|
|
32
|
-
const count1 = (((x1 + (x1 >>> 4)) & 0x0F0F0F0F) * 0x01010101) >>> 24;
|
|
33
|
-
// Popcount 2
|
|
34
|
-
x2 -= (x2 >>> 1) & 0x55555555;
|
|
35
|
-
x2 = (x2 & 0x33333333) + ((x2 >>> 2) & 0x33333333);
|
|
36
|
-
const count2 = (((x2 + (x2 >>> 4)) & 0x0F0F0F0F) * 0x01010101) >>> 24;
|
|
37
|
-
return count1 + count2;
|
|
38
|
-
};
|
|
39
|
-
/**
|
|
40
|
-
* Generic compute for backward compatibility
|
|
41
|
-
*/
|
|
42
|
-
const compute = (options) => {
|
|
43
|
-
const { v1, v2, v1Offset = 0, v2Offset = 0 } = options;
|
|
44
|
-
const v2Len = v2.length - v2Offset;
|
|
45
|
-
if (v2Len === 2) {
|
|
46
|
-
return compute64(v1, v1Offset, v2, v2Offset);
|
|
47
|
-
}
|
|
48
|
-
// Protocol V4: 84-byte descriptors (Uint8Array)
|
|
49
|
-
if (v2Len === 84) {
|
|
50
|
-
let d = 0;
|
|
51
|
-
for (let i = 0; i < 84; i++) {
|
|
52
|
-
d += BIT_COUNT_8[v1[v1Offset + i] ^ v2[v2Offset + i]];
|
|
53
|
-
}
|
|
54
|
-
return d;
|
|
55
|
-
}
|
|
56
|
-
// Protocol V5.1: 128-bit LSH (4 x Uint32)
|
|
57
|
-
if (v2Len === 4) {
|
|
58
|
-
return popcount32(v1[v1Offset] ^ v2[v2Offset]) +
|
|
59
|
-
popcount32(v1[v1Offset + 1] ^ v2[v2Offset + 1]) +
|
|
60
|
-
popcount32(v1[v1Offset + 2] ^ v2[v2Offset + 2]) +
|
|
61
|
-
popcount32(v1[v1Offset + 3] ^ v2[v2Offset + 3]);
|
|
62
|
-
}
|
|
63
|
-
return popcount32(v1[v1Offset] ^ v2[v2Offset]) +
|
|
64
|
-
popcount32(v1[v1Offset + 1] ^ v2[v2Offset + 1]);
|
|
65
|
-
};
|
|
66
|
-
export { compute, compute64 };
|
|
1
|
+
const t=new Uint8Array(256);for(let r=0;r<256;r++){let e=0,n=r;for(;n>0;)n&=n-1,e++;t[r]=e}function r(t){return t>>>=0,16843009*((t=(858993459&(t-=t>>>1&1431655765))+(t>>>2&858993459))+(t>>>4)&252645135)>>>24}const e=(t,r,e,n)=>{let f=(t[r]^e[n])>>>0,o=(t[r+1]^e[n+1])>>>0;return f-=f>>>1&1431655765,f=(858993459&f)+(f>>>2&858993459),o-=o>>>1&1431655765,o=(858993459&o)+(o>>>2&858993459),(16843009*(f+(f>>>4)&252645135)>>>24)+(16843009*(o+(o>>>4)&252645135)>>>24)},n=n=>{const{v1:f,v2:o,v1Offset:l=0,v2Offset:u=0}=n,s=o.length-u;if(2===s)return e(f,l,o,u);if(84===s){let r=0;for(let e=0;e<84;e++)r+=t[f[l+e]^o[u+e]];return r}return 4===s?r(f[l]^o[u])+r(f[l+1]^o[u+1])+r(f[l+2]^o[u+2])+r(f[l+3]^o[u+3]):r(f[l]^o[u])+r(f[l+1]^o[u+1])};export{n as compute,e as compute64};
|
|
@@ -1,102 +1 @@
|
|
|
1
|
-
|
|
2
|
-
* Hyperdimensional Computing (HDC) Core for AR
|
|
3
|
-
*
|
|
4
|
-
* Provides ultra-fast, ultra-compressed feature matching using
|
|
5
|
-
* High-Dimensional Random Vectors.
|
|
6
|
-
*/
|
|
7
|
-
export const HDC_DIMENSION = 1024; // bits
|
|
8
|
-
export const HDC_WORDS = HDC_DIMENSION / 32;
|
|
9
|
-
/**
|
|
10
|
-
* Deterministic Random Number Generator (PCG-like)
|
|
11
|
-
*/
|
|
12
|
-
class PRNG {
|
|
13
|
-
state;
|
|
14
|
-
constructor(seed) {
|
|
15
|
-
this.state = seed;
|
|
16
|
-
}
|
|
17
|
-
next() {
|
|
18
|
-
this.state = (this.state * 1664525 + 1013904223) >>> 0;
|
|
19
|
-
return this.state / 0xFFFFFFFF;
|
|
20
|
-
}
|
|
21
|
-
}
|
|
22
|
-
/**
|
|
23
|
-
* Generates a deterministic basis of Hypervectors
|
|
24
|
-
*/
|
|
25
|
-
export function generateBasis(seed, count) {
|
|
26
|
-
const prng = new PRNG(seed);
|
|
27
|
-
const basis = [];
|
|
28
|
-
for (let i = 0; i < count; i++) {
|
|
29
|
-
const hv = new Uint32Array(HDC_WORDS);
|
|
30
|
-
for (let j = 0; j < HDC_WORDS; j++) {
|
|
31
|
-
hv[j] = (prng.next() * 0xFFFFFFFF) >>> 0;
|
|
32
|
-
}
|
|
33
|
-
basis.push(hv);
|
|
34
|
-
}
|
|
35
|
-
return basis;
|
|
36
|
-
}
|
|
37
|
-
/**
|
|
38
|
-
* Projects a 64-bit descriptor into the Hyperdimensional Space
|
|
39
|
-
* Uses "Random Projection" logic (Locality Sensitive Hashing in HDC)
|
|
40
|
-
*/
|
|
41
|
-
export function projectDescriptor(desc, basis) {
|
|
42
|
-
const result = new Uint32Array(HDC_WORDS);
|
|
43
|
-
// For each bit in the HDC space
|
|
44
|
-
for (let i = 0; i < HDC_DIMENSION; i++) {
|
|
45
|
-
const wordIdx = i >>> 5;
|
|
46
|
-
const bitIdx = i & 31;
|
|
47
|
-
// This is a simplified random projection
|
|
48
|
-
// In a real HDC system, we'd use more complex binding
|
|
49
|
-
// But for Vanilla JS performance, we use bitwise voting
|
|
50
|
-
let sum = 0;
|
|
51
|
-
const b = basis[i % basis.length];
|
|
52
|
-
// Dot product between descriptor and basis vector (subset)
|
|
53
|
-
// desc[0] and desc[1] are the 64 bits
|
|
54
|
-
for (let j = 0; j < 2; j++) {
|
|
55
|
-
sum += popcount(desc[j] & b[j]);
|
|
56
|
-
}
|
|
57
|
-
if (sum > 16) { // Threshold for "firing"
|
|
58
|
-
result[wordIdx] |= (1 << bitIdx);
|
|
59
|
-
}
|
|
60
|
-
}
|
|
61
|
-
return result;
|
|
62
|
-
}
|
|
63
|
-
/**
|
|
64
|
-
* Compresses an HDC vector into an "Ultra-Short Signature" (32 bits)
|
|
65
|
-
* This allows storing 1000 points in just 4KB of descriptors.
|
|
66
|
-
*/
|
|
67
|
-
export function compressToSignature(hv) {
|
|
68
|
-
// FNV-1a Hash for robust 32-bit compression
|
|
69
|
-
let h1 = 0x811c9dc5;
|
|
70
|
-
for (let i = 0; i < hv.length; i++) {
|
|
71
|
-
h1 ^= hv[i];
|
|
72
|
-
h1 = Math.imul(h1, 0x01000193);
|
|
73
|
-
}
|
|
74
|
-
return h1 >>> 0;
|
|
75
|
-
}
|
|
76
|
-
function popcount(n) {
|
|
77
|
-
n = n - ((n >> 1) & 0x55555555);
|
|
78
|
-
n = (n & 0x33333333) + ((n >> 2) & 0x33333333);
|
|
79
|
-
return (((n + (n >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
|
|
80
|
-
}
|
|
81
|
-
/**
|
|
82
|
-
* Bundles multiple points into a single Global Hypervector (The "Image DNA")
|
|
83
|
-
* This allows checking if an image is present with ONE vector comparison.
|
|
84
|
-
*/
|
|
85
|
-
export function bundle(hvs) {
|
|
86
|
-
const global = new Uint32Array(HDC_WORDS);
|
|
87
|
-
const threshold = hvs.length / 2;
|
|
88
|
-
const counters = new Uint16Array(HDC_DIMENSION);
|
|
89
|
-
for (const hv of hvs) {
|
|
90
|
-
for (let i = 0; i < HDC_DIMENSION; i++) {
|
|
91
|
-
if (hv[i >>> 5] & (1 << (i & 31))) {
|
|
92
|
-
counters[i]++;
|
|
93
|
-
}
|
|
94
|
-
}
|
|
95
|
-
}
|
|
96
|
-
for (let i = 0; i < HDC_DIMENSION; i++) {
|
|
97
|
-
if (counters[i] > threshold) {
|
|
98
|
-
global[i >>> 5] |= (1 << (i & 31));
|
|
99
|
-
}
|
|
100
|
-
}
|
|
101
|
-
return global;
|
|
102
|
-
}
|
|
1
|
+
export const HDC_DIMENSION=1024;export const HDC_WORDS=32;class t{state;constructor(t){this.state=t}next(){return this.state=1664525*this.state+1013904223>>>0,this.state/4294967295}}export function generateBasis(e,n){const r=new t(e),o=[];for(let t=0;t<n;t++){const t=new Uint32Array(32);for(let e=0;e<32;e++)t[e]=4294967295*r.next()>>>0;o.push(t)}return o}export function projectDescriptor(t,n){const r=new Uint32Array(32);for(let o=0;o<1024;o++){const s=o>>>5,c=31&o;let i=0;const u=n[o%n.length];for(let n=0;n<2;n++)i+=e(t[n]&u[n]);i>16&&(r[s]|=1<<c)}return r}export function compressToSignature(t){let e=2166136261;for(let n=0;n<t.length;n++)e^=t[n],e=Math.imul(e,16777619);return e>>>0}function e(t){return 16843009*((t=(858993459&(t-=t>>1&1431655765))+(t>>2&858993459))+(t>>4)&252645135)>>24}export function bundle(t){const e=new Uint32Array(32),n=t.length/2,r=new Uint16Array(1024);for(const e of t)for(let t=0;t<1024;t++)e[t>>>5]&1<<(31&t)&&r[t]++;for(let t=0;t<1024;t++)r[t]>n&&(e[t>>>5]|=1<<(31&t));return e}
|