@srsergio/taptapp-ar 1.0.3 → 1.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/compiler/compiler-base.d.ts +1 -1
- package/dist/compiler/controller.d.ts +4 -4
- package/dist/compiler/detector/crop-detector.d.ts +12 -12
- package/dist/compiler/detector/detector.d.ts +20 -21
- package/dist/compiler/detector/kernels/cpu/computeExtremaAngles.d.ts +1 -1
- package/dist/compiler/detector/kernels/webgl/upsampleBilinear.d.ts +1 -1
- package/dist/compiler/input-loader.d.ts +4 -5
- package/dist/compiler/offline-compiler.d.ts +1 -1
- package/dist/compiler/tensorflow-setup.d.ts +0 -1
- package/dist/compiler/three.d.ts +7 -12
- package/dist/compiler/tracker/tracker.d.ts +8 -16
- package/dist/compiler/utils/worker-pool.d.ts +3 -4
- package/package.json +2 -2
- package/dist/compiler/estimation/esimate-experiment.d.ts +0 -5
- package/dist/compiler/estimation/esimate-experiment.js +0 -267
- package/dist/compiler/estimation/refine-estimate-experiment.d.ts +0 -6
- package/dist/compiler/estimation/refine-estimate-experiment.js +0 -429
- package/dist/react/AREditor.d.ts +0 -5
- package/dist/react/AREditor.js +0 -159
- package/dist/react/ProgressDialog.d.ts +0 -13
- package/dist/react/ProgressDialog.js +0 -57
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
export class CompilerBase {
|
|
2
2
|
data: any[] | null;
|
|
3
3
|
compileImageTargets(images: any, progressCallback: any): Promise<any>;
|
|
4
|
-
exportData():
|
|
4
|
+
exportData(): any;
|
|
5
5
|
importData(buffer: any): any[];
|
|
6
6
|
createProcessCanvas(): void;
|
|
7
7
|
compileTrack(): Promise<never[]>;
|
|
@@ -60,12 +60,12 @@ export class Controller {
|
|
|
60
60
|
descriptors: any[];
|
|
61
61
|
}[];
|
|
62
62
|
debugExtra: {
|
|
63
|
-
pyramidImages:
|
|
64
|
-
dogPyramidImages:
|
|
65
|
-
extremasResults:
|
|
63
|
+
pyramidImages: any[][];
|
|
64
|
+
dogPyramidImages: any[];
|
|
65
|
+
extremasResults: any[];
|
|
66
66
|
extremaAngles: any;
|
|
67
67
|
prunedExtremas: number[][];
|
|
68
|
-
localizedExtremas:
|
|
68
|
+
localizedExtremas: any;
|
|
69
69
|
} | null;
|
|
70
70
|
}>;
|
|
71
71
|
match(featurePoints: any, targetIndex: any): Promise<{
|
|
@@ -17,12 +17,12 @@ export class CropDetector {
|
|
|
17
17
|
descriptors: any[];
|
|
18
18
|
}[];
|
|
19
19
|
debugExtra: {
|
|
20
|
-
pyramidImages:
|
|
21
|
-
dogPyramidImages:
|
|
22
|
-
extremasResults:
|
|
20
|
+
pyramidImages: any[][];
|
|
21
|
+
dogPyramidImages: any[];
|
|
22
|
+
extremasResults: any[];
|
|
23
23
|
extremaAngles: any;
|
|
24
24
|
prunedExtremas: number[][];
|
|
25
|
-
localizedExtremas:
|
|
25
|
+
localizedExtremas: any;
|
|
26
26
|
} | null;
|
|
27
27
|
};
|
|
28
28
|
detectMoving(inputImageT: any): {
|
|
@@ -35,12 +35,12 @@ export class CropDetector {
|
|
|
35
35
|
descriptors: any[];
|
|
36
36
|
}[];
|
|
37
37
|
debugExtra: {
|
|
38
|
-
pyramidImages:
|
|
39
|
-
dogPyramidImages:
|
|
40
|
-
extremasResults:
|
|
38
|
+
pyramidImages: any[][];
|
|
39
|
+
dogPyramidImages: any[];
|
|
40
|
+
extremasResults: any[];
|
|
41
41
|
extremaAngles: any;
|
|
42
42
|
prunedExtremas: number[][];
|
|
43
|
-
localizedExtremas:
|
|
43
|
+
localizedExtremas: any;
|
|
44
44
|
} | null;
|
|
45
45
|
};
|
|
46
46
|
_detect(inputImageT: any, startX: any, startY: any): {
|
|
@@ -53,12 +53,12 @@ export class CropDetector {
|
|
|
53
53
|
descriptors: any[];
|
|
54
54
|
}[];
|
|
55
55
|
debugExtra: {
|
|
56
|
-
pyramidImages:
|
|
57
|
-
dogPyramidImages:
|
|
58
|
-
extremasResults:
|
|
56
|
+
pyramidImages: any[][];
|
|
57
|
+
dogPyramidImages: any[];
|
|
58
|
+
extremasResults: any[];
|
|
59
59
|
extremaAngles: any;
|
|
60
60
|
prunedExtremas: number[][];
|
|
61
|
-
localizedExtremas:
|
|
61
|
+
localizedExtremas: any;
|
|
62
62
|
} | null;
|
|
63
63
|
};
|
|
64
64
|
}
|
|
@@ -16,12 +16,12 @@ export class Detector {
|
|
|
16
16
|
descriptors: any[];
|
|
17
17
|
}[];
|
|
18
18
|
debugExtra: {
|
|
19
|
-
pyramidImages:
|
|
20
|
-
dogPyramidImages:
|
|
21
|
-
extremasResults:
|
|
19
|
+
pyramidImages: any[][];
|
|
20
|
+
dogPyramidImages: any[];
|
|
21
|
+
extremasResults: any[];
|
|
22
22
|
extremaAngles: any;
|
|
23
23
|
prunedExtremas: number[][];
|
|
24
|
-
localizedExtremas:
|
|
24
|
+
localizedExtremas: any;
|
|
25
25
|
} | null;
|
|
26
26
|
};
|
|
27
27
|
/**
|
|
@@ -39,60 +39,59 @@ export class Detector {
|
|
|
39
39
|
descriptors: any[];
|
|
40
40
|
}[];
|
|
41
41
|
debugExtra: {
|
|
42
|
-
pyramidImages:
|
|
43
|
-
dogPyramidImages:
|
|
44
|
-
extremasResults:
|
|
42
|
+
pyramidImages: any[][];
|
|
43
|
+
dogPyramidImages: any[];
|
|
44
|
+
extremasResults: any[];
|
|
45
45
|
extremaAngles: any;
|
|
46
46
|
prunedExtremas: number[][];
|
|
47
|
-
localizedExtremas:
|
|
47
|
+
localizedExtremas: any;
|
|
48
48
|
} | null;
|
|
49
49
|
};
|
|
50
|
-
_computeFreakDescriptors(extremaFreaks: any):
|
|
51
|
-
_computeExtremaFreak(pyramidImagesT: any, prunedExtremas: any, prunedExtremasAngles: any):
|
|
50
|
+
_computeFreakDescriptors(extremaFreaks: any): any;
|
|
51
|
+
_computeExtremaFreak(pyramidImagesT: any, prunedExtremas: any, prunedExtremasAngles: any): any;
|
|
52
52
|
/**
|
|
53
53
|
*
|
|
54
54
|
* @param {tf.Tensor<tf.Rank>} histograms
|
|
55
55
|
* @returns
|
|
56
56
|
*/
|
|
57
|
-
_computeExtremaAngles(histograms: tf.Tensor<tf.Rank>):
|
|
57
|
+
_computeExtremaAngles(histograms: tf.Tensor<tf.Rank>): any;
|
|
58
58
|
/**
|
|
59
59
|
*
|
|
60
60
|
* @param {tf.Tensor<tf.Rank>} prunedExtremasT
|
|
61
61
|
* @param {tf.Tensor<tf.Rank>[]} pyramidImagesT
|
|
62
62
|
* @returns
|
|
63
63
|
*/
|
|
64
|
-
_computeOrientationHistograms(prunedExtremasT: tf.Tensor<tf.Rank>, pyramidImagesT: tf.Tensor<tf.Rank>[]):
|
|
65
|
-
_smoothHistograms(histograms: any):
|
|
64
|
+
_computeOrientationHistograms(prunedExtremasT: tf.Tensor<tf.Rank>, pyramidImagesT: tf.Tensor<tf.Rank>[]): any;
|
|
65
|
+
_smoothHistograms(histograms: any): any;
|
|
66
66
|
/**
|
|
67
67
|
*
|
|
68
68
|
* @param {number[][]} prunedExtremasList
|
|
69
69
|
* @param {tf.Tensor<tf.Rank>[]} dogPyramidImagesT
|
|
70
70
|
* @returns
|
|
71
71
|
*/
|
|
72
|
-
_computeLocalization(prunedExtremasList: number[][], dogPyramidImagesT: tf.Tensor<tf.Rank>[]):
|
|
72
|
+
_computeLocalization(prunedExtremasList: number[][], dogPyramidImagesT: tf.Tensor<tf.Rank>[]): any;
|
|
73
73
|
/**
|
|
74
74
|
*
|
|
75
75
|
* @param {tf.Tensor<tf.Rank>[]} extremasResultsT
|
|
76
76
|
* @returns
|
|
77
77
|
*/
|
|
78
78
|
_applyPrune(extremasResultsT: tf.Tensor<tf.Rank>[]): number[][];
|
|
79
|
-
_buildExtremas(image0: any, image1: any, image2: any):
|
|
79
|
+
_buildExtremas(image0: any, image1: any, image2: any): any;
|
|
80
80
|
/**
|
|
81
81
|
*
|
|
82
82
|
* @param {tf.Tensor<tf.Rank>} image1
|
|
83
83
|
* @param {tf.Tensor<tf.Rank>} image2
|
|
84
84
|
* @returns
|
|
85
85
|
*/
|
|
86
|
-
_differenceImageBinomial(image1: tf.Tensor<tf.Rank>, image2: tf.Tensor<tf.Rank>):
|
|
87
|
-
_applyFilter(image: any):
|
|
88
|
-
_downsampleBilinear(image: any):
|
|
86
|
+
_differenceImageBinomial(image1: tf.Tensor<tf.Rank>, image2: tf.Tensor<tf.Rank>): any;
|
|
87
|
+
_applyFilter(image: any): any;
|
|
88
|
+
_downsampleBilinear(image: any): any;
|
|
89
89
|
/**
|
|
90
90
|
*
|
|
91
91
|
* @param {tf.MathBackendWebGL.GPGPUProgram} program
|
|
92
92
|
* @param {*} inputs
|
|
93
93
|
* @returns
|
|
94
94
|
*/
|
|
95
|
-
_compileAndRun(program: tf.MathBackendWebGL.GPGPUProgram, inputs: any):
|
|
96
|
-
_runWebGLProgram(program: any, inputs: any, outputType: any):
|
|
95
|
+
_compileAndRun(program: tf.MathBackendWebGL.GPGPUProgram, inputs: any): any;
|
|
96
|
+
_runWebGLProgram(program: any, inputs: any, outputType: any): any;
|
|
97
97
|
}
|
|
98
|
-
import * as tf from "@tensorflow/tfjs";
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
export function computeExtremaAnglesImpl(histogram: any): Float32Array<any>;
|
|
2
|
-
export function computeExtremaAngles(args: any):
|
|
2
|
+
export function computeExtremaAngles(args: any): any;
|
|
3
3
|
export namespace computeExtremaAnglesConfig {
|
|
4
4
|
export let kernelName: string;
|
|
5
5
|
export let backendName: string;
|
|
@@ -10,14 +10,13 @@ export class InputLoader {
|
|
|
10
10
|
userCode: string;
|
|
11
11
|
};
|
|
12
12
|
tempPixelHandle: any;
|
|
13
|
-
_loadInput(input: any):
|
|
14
|
-
loadInput(input: any):
|
|
13
|
+
_loadInput(input: any): any;
|
|
14
|
+
loadInput(input: any): any;
|
|
15
15
|
buildProgram(width: any, height: any): {
|
|
16
16
|
variableNames: string[];
|
|
17
17
|
outputShape: any[];
|
|
18
18
|
userCode: string;
|
|
19
19
|
};
|
|
20
|
-
_compileAndRun(program: any, inputs: any):
|
|
21
|
-
_runWebGLProgram(program: any, inputs: any, outputType: any):
|
|
20
|
+
_compileAndRun(program: any, inputs: any): any;
|
|
21
|
+
_runWebGLProgram(program: any, inputs: any, outputType: any): any;
|
|
22
22
|
}
|
|
23
|
-
import * as tf from "@tensorflow/tfjs";
|
package/dist/compiler/three.d.ts
CHANGED
|
@@ -24,17 +24,17 @@ export class MindARThree {
|
|
|
24
24
|
userDeviceId: any;
|
|
25
25
|
environmentDeviceId: any;
|
|
26
26
|
shouldFaceUser: boolean;
|
|
27
|
-
scene:
|
|
28
|
-
cssScene:
|
|
29
|
-
renderer:
|
|
30
|
-
cssRenderer:
|
|
31
|
-
camera:
|
|
27
|
+
scene: any;
|
|
28
|
+
cssScene: any;
|
|
29
|
+
renderer: any;
|
|
30
|
+
cssRenderer: any;
|
|
31
|
+
camera: any;
|
|
32
32
|
anchors: any[];
|
|
33
33
|
start(): Promise<void>;
|
|
34
34
|
stop(): void;
|
|
35
35
|
switchCamera(): void;
|
|
36
36
|
addAnchor(targetIndex: any): {
|
|
37
|
-
group:
|
|
37
|
+
group: any;
|
|
38
38
|
targetIndex: any;
|
|
39
39
|
onTargetFound: null;
|
|
40
40
|
onTargetLost: null;
|
|
@@ -43,7 +43,7 @@ export class MindARThree {
|
|
|
43
43
|
visible: boolean;
|
|
44
44
|
};
|
|
45
45
|
addCSSAnchor(targetIndex: any): {
|
|
46
|
-
group:
|
|
46
|
+
group: any;
|
|
47
47
|
targetIndex: any;
|
|
48
48
|
onTargetFound: null;
|
|
49
49
|
onTargetLost: null;
|
|
@@ -58,9 +58,4 @@ export class MindARThree {
|
|
|
58
58
|
postMatrixs: any[] | undefined;
|
|
59
59
|
resize(): void;
|
|
60
60
|
}
|
|
61
|
-
import { Scene } from "three";
|
|
62
|
-
import { WebGLRenderer } from "three";
|
|
63
|
-
import { CSS3DRenderer } from "three/addons/renderers/CSS3DRenderer.js";
|
|
64
|
-
import { PerspectiveCamera } from "three";
|
|
65
|
-
import { Group } from "three";
|
|
66
61
|
import { Controller } from "./controller.js";
|
|
@@ -5,9 +5,9 @@ export class Tracker {
|
|
|
5
5
|
projectionTransform: any;
|
|
6
6
|
debugMode: boolean;
|
|
7
7
|
trackingKeyframeList: any[];
|
|
8
|
-
featurePointsListT:
|
|
9
|
-
imagePixelsListT:
|
|
10
|
-
imagePropertiesListT:
|
|
8
|
+
featurePointsListT: any[];
|
|
9
|
+
imagePixelsListT: any[];
|
|
10
|
+
imagePropertiesListT: any[];
|
|
11
11
|
kernelCaches: {};
|
|
12
12
|
dummyRun(inputT: any): void;
|
|
13
13
|
track(inputImageT: any, lastModelViewTransform: any, targetIndex: any): {
|
|
@@ -22,17 +22,9 @@ export class Tracker {
|
|
|
22
22
|
}[];
|
|
23
23
|
debugExtra: {};
|
|
24
24
|
};
|
|
25
|
-
_computeMatching(featurePointsT: any, imagePixelsT: any, imagePropertiesT: any, projectedImageT: any):
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
_buildAdjustedModelViewTransform(modelViewProjectionTransform: any): tf.Tensor<tf.Rank>;
|
|
31
|
-
_prebuild(trackingFrame: any, maxCount: any): {
|
|
32
|
-
featurePoints: tf.Tensor<tf.Rank>;
|
|
33
|
-
imagePixels: tf.Tensor<tf.Rank>;
|
|
34
|
-
imageProperties: tf.Tensor<tf.Rank>;
|
|
35
|
-
};
|
|
36
|
-
_compileAndRun(program: any, inputs: any): tf.Tensor<tf.Rank>;
|
|
25
|
+
_computeMatching(featurePointsT: any, imagePixelsT: any, imagePropertiesT: any, projectedImageT: any): any;
|
|
26
|
+
_computeProjection(modelViewProjectionTransformT: any, inputImageT: any, targetIndex: any): any;
|
|
27
|
+
_buildAdjustedModelViewTransform(modelViewProjectionTransform: any): any;
|
|
28
|
+
_prebuild(trackingFrame: any, maxCount: any): any;
|
|
29
|
+
_compileAndRun(program: any, inputs: any): any;
|
|
37
30
|
}
|
|
38
|
-
import * as tf from "@tensorflow/tfjs";
|
|
@@ -1,14 +1,13 @@
|
|
|
1
1
|
export class WorkerPool {
|
|
2
|
-
constructor(workerPath: any, poolSize?:
|
|
2
|
+
constructor(workerPath: any, poolSize?: any);
|
|
3
3
|
workerPath: any;
|
|
4
|
-
poolSize:
|
|
4
|
+
poolSize: any;
|
|
5
5
|
workers: any[];
|
|
6
6
|
queue: any[];
|
|
7
7
|
activeWorkers: number;
|
|
8
8
|
runTask(taskData: any): Promise<any>;
|
|
9
|
-
_createWorker():
|
|
9
|
+
_createWorker(): any;
|
|
10
10
|
_executeTask(worker: any, task: any): void;
|
|
11
11
|
_finishTask(worker: any, callback: any, result: any): void;
|
|
12
12
|
destroy(): Promise<void>;
|
|
13
13
|
}
|
|
14
|
-
import { Worker } from 'node:worker_threads';
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@srsergio/taptapp-ar",
|
|
3
|
-
"version": "1.0.
|
|
3
|
+
"version": "1.0.4",
|
|
4
4
|
"description": "AR Visualizer and Compiler for Astro and React",
|
|
5
5
|
"repository": {
|
|
6
6
|
"type": "git",
|
|
@@ -56,4 +56,4 @@
|
|
|
56
56
|
"publishConfig": {
|
|
57
57
|
"access": "public"
|
|
58
58
|
}
|
|
59
|
-
}
|
|
59
|
+
}
|
|
@@ -1,267 +0,0 @@
|
|
|
1
|
-
// try to implement https://hal.inria.fr/inria-00174036/PDF/RR-6303.pdf
|
|
2
|
-
import { Matrix, inverse } from "ml-matrix";
|
|
3
|
-
import { SVD } from "svd-js";
|
|
4
|
-
import { solveHomography } from "../utils/homography";
|
|
5
|
-
import { computeScreenCoordiate } from "./utils";
|
|
6
|
-
const opposites_of_minors = (M, row, col) => {
|
|
7
|
-
let x1 = col === 0 ? 1 : 0;
|
|
8
|
-
let x2 = col === 2 ? 1 : 2;
|
|
9
|
-
let y1 = row === 0 ? 1 : 0;
|
|
10
|
-
let y2 = row === 2 ? 1 : 2;
|
|
11
|
-
return M[y1][x2] * M[y2][x1] - M[y1][x1] * M[y2][x2];
|
|
12
|
-
};
|
|
13
|
-
const findRmatFrom_tstar_n = (H, tstar, n, v) => {
|
|
14
|
-
// computes R = H( I - (2/v)*te_star*ne_t )
|
|
15
|
-
const twoDivV = 2 / v;
|
|
16
|
-
const tmp = [
|
|
17
|
-
[1 - twoDivV * tstar[0] * n[0], 0 - twoDivV * tstar[0] * n[1], 0 - twoDivV * tstar[0] * n[2]],
|
|
18
|
-
[0 - twoDivV * tstar[1] * n[0], 1 - twoDivV * tstar[1] * n[1], 0 - twoDivV * tstar[1] * n[2]],
|
|
19
|
-
[0 - twoDivV * tstar[2] * n[0], 0 - twoDivV * tstar[2] * n[1], 1 - twoDivV * tstar[2] * n[2]],
|
|
20
|
-
];
|
|
21
|
-
const R = [
|
|
22
|
-
[0, 0, 0],
|
|
23
|
-
[0, 0, 0],
|
|
24
|
-
[0, 0, 0],
|
|
25
|
-
];
|
|
26
|
-
for (let i = 0; i < 3; i++) {
|
|
27
|
-
for (let j = 0; j < 3; j++) {
|
|
28
|
-
for (let k = 0; k < 3; k++) {
|
|
29
|
-
R[i][j] += H[i][k] * tmp[k][j];
|
|
30
|
-
}
|
|
31
|
-
}
|
|
32
|
-
}
|
|
33
|
-
//const R = H.mmul( new Matrix(tmp));
|
|
34
|
-
return R;
|
|
35
|
-
};
|
|
36
|
-
const estimate = ({ screenCoords, worldCoords, projectionTransform }) => {
|
|
37
|
-
const Harray = solveHomography(worldCoords.map((p) => [p.x, p.y]), screenCoords.map((p) => [p.x, p.y]));
|
|
38
|
-
const G = new Matrix([
|
|
39
|
-
[Harray[0], Harray[1], Harray[2]],
|
|
40
|
-
[Harray[3], Harray[4], Harray[5]],
|
|
41
|
-
[Harray[6], Harray[7], Harray[8]],
|
|
42
|
-
]);
|
|
43
|
-
const K = new Matrix(projectionTransform);
|
|
44
|
-
const KInv = inverse(K);
|
|
45
|
-
const KInvArr = KInv.to2DArray();
|
|
46
|
-
const KArr = K.to2DArray();
|
|
47
|
-
const Hhat = KInv.mmul(G).mmul(K);
|
|
48
|
-
const { q } = SVD(Hhat.to2DArray());
|
|
49
|
-
const H = Hhat.div(q[1]);
|
|
50
|
-
const HTH = H.transpose().mmul(H);
|
|
51
|
-
const S = Matrix.sub(HTH, Matrix.eye(3, 3)).to2DArray();
|
|
52
|
-
console.log("G", G);
|
|
53
|
-
console.log("svd q", q);
|
|
54
|
-
console.log("Hhat", Hhat);
|
|
55
|
-
console.log("H", H);
|
|
56
|
-
console.log("HTH", HTH);
|
|
57
|
-
console.log("S", S);
|
|
58
|
-
// M00, M11, M22
|
|
59
|
-
const M00 = opposites_of_minors(S, 0, 0);
|
|
60
|
-
const M11 = opposites_of_minors(S, 1, 1);
|
|
61
|
-
const M22 = opposites_of_minors(S, 2, 2);
|
|
62
|
-
const rtM00 = Math.sqrt(M00);
|
|
63
|
-
const rtM11 = Math.sqrt(M11);
|
|
64
|
-
const rtM22 = Math.sqrt(M22);
|
|
65
|
-
// M01, M12, M02
|
|
66
|
-
const M01 = opposites_of_minors(S, 0, 1);
|
|
67
|
-
const e01 = M01 >= 0 ? 1 : -1;
|
|
68
|
-
const M12 = opposites_of_minors(S, 1, 2);
|
|
69
|
-
const e12 = M12 >= 0 ? 1 : -1;
|
|
70
|
-
const M02 = opposites_of_minors(S, 0, 2);
|
|
71
|
-
const e02 = M02 >= 0 ? 1 : -1;
|
|
72
|
-
let maxIndex = 0;
|
|
73
|
-
if (Math.abs(S[1][1]) > Math.abs(S[maxIndex][maxIndex]))
|
|
74
|
-
maxIndex = 1;
|
|
75
|
-
if (Math.abs(S[2][2]) > Math.abs(S[maxIndex][maxIndex]))
|
|
76
|
-
maxIndex = 2;
|
|
77
|
-
console.log("rtM00", rtM00, rtM11, rtM22);
|
|
78
|
-
console.log("M01", M01, M12, M02, e01, e12, e02);
|
|
79
|
-
let npa = [0, 0, 0];
|
|
80
|
-
let npb = [0, 0, 0];
|
|
81
|
-
console.log("max index", maxIndex);
|
|
82
|
-
if (maxIndex === 0) {
|
|
83
|
-
npa[0] = npb[0] = S[0][0];
|
|
84
|
-
npa[1] = S[0][1] + rtM22;
|
|
85
|
-
npb[1] = S[0][1] - rtM22;
|
|
86
|
-
npa[2] = S[0][2] + e12 * rtM11;
|
|
87
|
-
npb[2] = S[0][2] - e12 * rtM11;
|
|
88
|
-
}
|
|
89
|
-
else if (maxIndex === 1) {
|
|
90
|
-
npa[0] = S[0][1] + rtM22;
|
|
91
|
-
npb[0] = S[0][1] - rtM22;
|
|
92
|
-
npa[1] = npb[1] = S[1][1];
|
|
93
|
-
npa[2] = S[1][2] - e02 * rtM00;
|
|
94
|
-
npb[2] = S[1][2] + e02 * rtM00;
|
|
95
|
-
}
|
|
96
|
-
else if (maxIndex === 2) {
|
|
97
|
-
npa[0] = S[0][2] + e01 * rtM11;
|
|
98
|
-
npb[0] = S[0][2] - e01 * rtM11;
|
|
99
|
-
npa[1] = S[1][2] + rtM00;
|
|
100
|
-
npb[1] = S[1][2] - rtM00;
|
|
101
|
-
npa[2] = npb[2] = S[2][2];
|
|
102
|
-
}
|
|
103
|
-
console.log("npa", npa);
|
|
104
|
-
console.log("npb", npb);
|
|
105
|
-
const traceS = S[0][0] + S[1][1] + S[2][2];
|
|
106
|
-
const v = 2.0 * Math.sqrt(1 + traceS - M00 - M11 - M22);
|
|
107
|
-
const ESii = S[maxIndex][maxIndex] >= 0 ? 1 : -1;
|
|
108
|
-
const r_2 = 2 + traceS + v;
|
|
109
|
-
const nt_2 = 2 + traceS - v;
|
|
110
|
-
const r = Math.sqrt(r_2);
|
|
111
|
-
const n_t = Math.sqrt(nt_2);
|
|
112
|
-
console.log("r n_t", r, n_t);
|
|
113
|
-
const npaNorm = Math.sqrt(npa[0] * npa[0] + npa[1] * npa[1] + npa[2] * npa[2]);
|
|
114
|
-
const npbNorm = Math.sqrt(npb[0] * npb[0] + npb[1] * npb[1] + npb[2] * npb[2]);
|
|
115
|
-
const na = [npa[0] / npaNorm, npa[1] / npaNorm, npa[2] / npaNorm];
|
|
116
|
-
const nb = [npb[0] / npbNorm, npb[1] / npbNorm, npb[2] / npbNorm];
|
|
117
|
-
console.log("na nb", na, nb);
|
|
118
|
-
const half_nt = 0.5 * n_t;
|
|
119
|
-
const esii_t_r = ESii * r;
|
|
120
|
-
const ta_star = [];
|
|
121
|
-
for (let i = 0; i < 3; i++) {
|
|
122
|
-
ta_star[i] = half_nt * (esii_t_r * nb[i] - n_t * na[i]);
|
|
123
|
-
}
|
|
124
|
-
const tb_star = [];
|
|
125
|
-
for (let i = 0; i < 3; i++) {
|
|
126
|
-
tb_star[i] = half_nt * (esii_t_r * na[i] - n_t * nb[i]);
|
|
127
|
-
}
|
|
128
|
-
const HArr = H.to2DArray();
|
|
129
|
-
console.log("ta_star", ta_star, tb_star);
|
|
130
|
-
/*
|
|
131
|
-
"""solutions = []
|
|
132
|
-
# Ra, ta
|
|
133
|
-
R = findRmatFrom_tstar_n(H, ta_star, na, v)
|
|
134
|
-
t = R.dot(ta_star)
|
|
135
|
-
solutions.append((R, t, na))
|
|
136
|
-
# Ra, -ta
|
|
137
|
-
solutions.append((R, -t, -na))
|
|
138
|
-
# Rb, tb
|
|
139
|
-
R = findRmatFrom_tstar_n(H, tb_star, nb, v)
|
|
140
|
-
t = R.dot(tb_star)
|
|
141
|
-
solutions.append((R, t, nb))
|
|
142
|
-
# Rb, -tb
|
|
143
|
-
solutions.append((R, -t, -nb))
|
|
144
|
-
*/
|
|
145
|
-
const findT = (R1, ta_star) => {
|
|
146
|
-
const t = [
|
|
147
|
-
R1[0][0] * ta_star[0] + R1[0][1] * ta_star[1] + R1[0][2] * ta_star[2],
|
|
148
|
-
R1[1][0] * ta_star[0] + R1[1][1] * ta_star[1] + R1[1][2] * ta_star[2],
|
|
149
|
-
R1[2][0] * ta_star[0] + R1[2][1] * ta_star[1] + R1[2][2] * ta_star[2],
|
|
150
|
-
];
|
|
151
|
-
return t;
|
|
152
|
-
};
|
|
153
|
-
const Ra = findRmatFrom_tstar_n(HArr, ta_star, na, v);
|
|
154
|
-
const ta = findT(Ra, ta_star);
|
|
155
|
-
const nta = [-ta[0], -ta[1], -ta[2]];
|
|
156
|
-
console.log("RaTRa", new Matrix(Ra).transpose().mmul(new Matrix(Ra)));
|
|
157
|
-
const Rb = findRmatFrom_tstar_n(HArr, tb_star, nb, v);
|
|
158
|
-
const tb = findT(Rb, tb_star);
|
|
159
|
-
const ntb = [-tb[0], -tb[1], -tb[2]];
|
|
160
|
-
const findModelViewProjectionTransform = (R, t) => {
|
|
161
|
-
const transform = [
|
|
162
|
-
[R[0][0], R[0][1], R[0][2], t[0]],
|
|
163
|
-
[R[1][0], R[1][1], R[1][2], t[1]],
|
|
164
|
-
[R[2][0], R[2][1], R[2][2], t[2]],
|
|
165
|
-
];
|
|
166
|
-
return transform;
|
|
167
|
-
const modelViewProjectionTransform = [[], [], []];
|
|
168
|
-
for (let j = 0; j < 3; j++) {
|
|
169
|
-
for (let i = 0; i < 4; i++) {
|
|
170
|
-
modelViewProjectionTransform[j][i] =
|
|
171
|
-
KArr[j][0] * transform[0][i] +
|
|
172
|
-
KArr[j][1] * transform[1][i] +
|
|
173
|
-
KArr[j][2] * transform[2][i];
|
|
174
|
-
}
|
|
175
|
-
}
|
|
176
|
-
return modelViewProjectionTransform;
|
|
177
|
-
};
|
|
178
|
-
console.log("Ra ta", Ra, ta);
|
|
179
|
-
console.log("Rb tb", Rb, tb);
|
|
180
|
-
const tnT = new Matrix([
|
|
181
|
-
[ta[0] * na[0], ta[0] * na[1], ta[0] * na[2]],
|
|
182
|
-
[ta[1] * na[0], ta[1] * na[1], ta[1] * na[2]],
|
|
183
|
-
[ta[2] * na[0], ta[2] * na[1], ta[2] * na[2]],
|
|
184
|
-
]);
|
|
185
|
-
const RtnT = new Matrix(Ra).add(tnT);
|
|
186
|
-
console.log("tnT", tnT);
|
|
187
|
-
console.log("RtnT", RtnT);
|
|
188
|
-
const modelViewProjectionTransforms = [];
|
|
189
|
-
modelViewProjectionTransforms.push(findModelViewProjectionTransform(Ra, ta));
|
|
190
|
-
modelViewProjectionTransforms.push(findModelViewProjectionTransform(Ra, nta));
|
|
191
|
-
modelViewProjectionTransforms.push(findModelViewProjectionTransform(Rb, tb));
|
|
192
|
-
modelViewProjectionTransforms.push(findModelViewProjectionTransform(Rb, ntb));
|
|
193
|
-
const applyMatrix = (K, pt) => {
|
|
194
|
-
let kx = K[0][0] * pt[0] + K[0][1] * pt[1] + K[0][2];
|
|
195
|
-
let ky = K[1][0] * pt[0] + K[1][1] * pt[1] + K[1][2];
|
|
196
|
-
let kz = K[2][0] * pt[0] + K[2][1] * pt[1] + K[2][2];
|
|
197
|
-
kx /= kz;
|
|
198
|
-
ky /= kz;
|
|
199
|
-
return [kx, ky];
|
|
200
|
-
};
|
|
201
|
-
for (let s = 0; s < modelViewProjectionTransforms.length; s++) {
|
|
202
|
-
console.log("solution", s);
|
|
203
|
-
const modelViewProjectionTransform = modelViewProjectionTransforms[s];
|
|
204
|
-
for (let i = 0; i < worldCoords.length; i++) {
|
|
205
|
-
let world = applyMatrix(KInvArr, [worldCoords[i].x, worldCoords[i].y]);
|
|
206
|
-
let world2 = applyMatrix(RtnT.to2DArray(), world);
|
|
207
|
-
let screen = applyMatrix(KInvArr, [screenCoords[i].x, screenCoords[i].y]);
|
|
208
|
-
console.log("map", worldCoords[i], screenCoords[i]);
|
|
209
|
-
console.log("mapped", world, world2, screen);
|
|
210
|
-
//const mapped = computeScreenCoordiate(modelViewProjectionTransform, worldCoords[i].x, worldCoords[i].y, 0);
|
|
211
|
-
//console.log("mapped", worldCoords[i], screenCoords[i], mapped);
|
|
212
|
-
//console.log("mapped", worldCoords[i], screenCoords[i], kx2, ky2, mapped);
|
|
213
|
-
}
|
|
214
|
-
}
|
|
215
|
-
for (let s = 0; s < modelViewProjectionTransforms.length; s++) {
|
|
216
|
-
console.log("mvp solution", s);
|
|
217
|
-
const modelViewProjectionTransform = modelViewProjectionTransforms[s];
|
|
218
|
-
for (let i = 0; i < worldCoords.length; i++) {
|
|
219
|
-
let world = applyMatrix(KInvArr, [worldCoords[i].x, worldCoords[i].y]);
|
|
220
|
-
let screen = applyMatrix(KInvArr, [screenCoords[i].x, screenCoords[i].y]);
|
|
221
|
-
//const mapped = computeScreenCoordiate(modelViewProjectionTransform, worldCoords[i].x, worldCoords[i].y, 0);
|
|
222
|
-
const mapped = computeScreenCoordiate(modelViewProjectionTransform, world[0], world[1], 0);
|
|
223
|
-
console.log("mapped", worldCoords[i], screenCoords[i], world, screen, mapped);
|
|
224
|
-
}
|
|
225
|
-
}
|
|
226
|
-
return null;
|
|
227
|
-
/*
|
|
228
|
-
|
|
229
|
-
const R1 = findRmatFrom_tstar_n(HArr, ta_star, na, v);
|
|
230
|
-
const R2 = findRmatFrom_tstar_n(HArr, tb_star, nb, v);
|
|
231
|
-
console.log("R1", R1);
|
|
232
|
-
console.log("R2", R2);
|
|
233
|
-
|
|
234
|
-
const t = [
|
|
235
|
-
R1[0][0] * ta_star[0] + R1[0][1] * ta_star[1] + R1[0][2] * ta_star[2],
|
|
236
|
-
R1[1][0] * ta_star[0] + R1[1][1] * ta_star[1] + R1[1][2] * ta_star[2],
|
|
237
|
-
R1[2][0] * ta_star[0] + R1[2][1] * ta_star[1] + R1[2][2] * ta_star[2]
|
|
238
|
-
]
|
|
239
|
-
|
|
240
|
-
const R = R2;
|
|
241
|
-
|
|
242
|
-
const modelViewProjectionTransform = [
|
|
243
|
-
[R[0][0], R[0][1], R[0][2], t[0]],
|
|
244
|
-
[R[1][0], R[1][1], R[1][2], t[0]],
|
|
245
|
-
[R[2][0], R[2][1], R[2][2], t[0]],
|
|
246
|
-
];
|
|
247
|
-
*/
|
|
248
|
-
for (let i = 0; i < worldCoords.length; i++) {
|
|
249
|
-
const mapped = computeScreenCoordiate(modelViewProjectionTransform, worldCoords[i].x, worldCoords[i].y, 0);
|
|
250
|
-
console.log("mapped", worldCoords[i], screenCoords[i], mapped);
|
|
251
|
-
}
|
|
252
|
-
// this is the full computation if the projectTransform does not look like the expected format, but more computations
|
|
253
|
-
const modelViewTransform = [[], [], []];
|
|
254
|
-
for (let j = 0; j < 3; j++) {
|
|
255
|
-
for (let i = 0; i < 4; i++) {
|
|
256
|
-
modelViewTransform[j][i] =
|
|
257
|
-
KInvArr[j][0] * modelViewProjectionTransform[0][i] +
|
|
258
|
-
KInvArr[j][1] * modelViewProjectionTransform[1][i] +
|
|
259
|
-
KInvArr[j][2] * modelViewProjectionTransform[2][i];
|
|
260
|
-
}
|
|
261
|
-
}
|
|
262
|
-
console.log("KInvArr", KInvArr);
|
|
263
|
-
console.log("modelViewProjectionTransform", modelViewProjectionTransform);
|
|
264
|
-
console.log("modelViewTransform", modelViewTransform);
|
|
265
|
-
return modelViewTransform;
|
|
266
|
-
};
|
|
267
|
-
export { estimate };
|
|
@@ -1,429 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Trying to do normalization before running ICP
|
|
3
|
-
* i.e. make coodinates centroid at origin and avg distance from origin is sqrt(2)
|
|
4
|
-
*
|
|
5
|
-
* can we get rid of projectionTransform, and just do ICP on modelViewTransform?
|
|
6
|
-
*
|
|
7
|
-
* but couldn't make it work yet. Can someone with theoretical knowledge on ICP reach out to help?, particularly Multiview Levenberg-Marquardt ICP
|
|
8
|
-
* I have problem understanding the jacobian and things like that
|
|
9
|
-
*
|
|
10
|
-
*/
|
|
11
|
-
import { Matrix, inverse } from "ml-matrix";
|
|
12
|
-
import { applyModelViewProjectionTransform, buildModelViewProjectionTransform, computeScreenCoordiate, } from "./utils.js";
|
|
13
|
-
const TRACKING_THRESH = 5.0; // default
|
|
14
|
-
const K2_FACTOR = 4.0; // Question: should it be relative to the size of the screen instead of hardcoded?
|
|
15
|
-
const ICP_MAX_LOOP = 10;
|
|
16
|
-
const ICP_BREAK_LOOP_ERROR_THRESH = 0.1;
|
|
17
|
-
const ICP_BREAK_LOOP_ERROR_RATIO_THRESH = 0.99;
|
|
18
|
-
const ICP_BREAK_LOOP_ERROR_THRESH2 = 4.0;
|
|
19
|
-
// some temporary/intermediate variables used later. Declare them beforehand to reduce new object allocations
|
|
20
|
-
let mat = [[], [], []];
|
|
21
|
-
let J_U_Xc = [[], []]; // 2x3
|
|
22
|
-
let J_Xc_S = [[], [], []]; // 3x6
|
|
23
|
-
const refineEstimate = ({ initialModelViewTransform, projectionTransform, worldCoords: inWorldCoords, screenCoords: inScreenCoords, }) => {
|
|
24
|
-
const { normalizedCoords: worldCoords, param: worldParam } = normalizePoints(inWorldCoords);
|
|
25
|
-
const { normalizedCoords: screenCoords, param: screenParam } = normalizePoints(inScreenCoords);
|
|
26
|
-
const modelViewProjectionTransform = buildModelViewProjectionTransform(projectionTransform, initialModelViewTransform);
|
|
27
|
-
const normModelViewProjectionTransform = _getNormalizedModelViewTransform(modelViewProjectionTransform, worldParam, screenParam);
|
|
28
|
-
/*
|
|
29
|
-
* porjection matrix
|
|
30
|
-
* [k00, 0, k02]
|
|
31
|
-
* K = [ 0, k11, k12]
|
|
32
|
-
* [ 0, 0, 1]
|
|
33
|
-
*
|
|
34
|
-
* [1/k00, 0, -k02/k00]
|
|
35
|
-
* inv(K) = [ 0, 1/k11, -k12/k11]
|
|
36
|
-
* [ 0, 0, 1]
|
|
37
|
-
*
|
|
38
|
-
*
|
|
39
|
-
* denote modelViewProjectionTransform as A,
|
|
40
|
-
* since A = K * M, M = inv(K) * A
|
|
41
|
-
*
|
|
42
|
-
* [a00 / k00 - a20 * k02/k00, a01 / k00 - k02/k00 * a21, a02 / k00 - k02/k00 * a22, a03 / k00 - k02/k00 * a23]
|
|
43
|
-
* M = [a10 / k11 - a20 * k12/k11, a11 / k11 - k12/k11 * a21, a13 / k11 - k12/k11 * a22, a13 / k11 - k12/111 * a23]
|
|
44
|
-
* [ a20 , a21, a22, a23]
|
|
45
|
-
*/
|
|
46
|
-
const a = normModelViewProjectionTransform;
|
|
47
|
-
const k = projectionTransform;
|
|
48
|
-
const normModelViewTransform = [
|
|
49
|
-
[
|
|
50
|
-
a[0][0] / k[0][0] - (a[2][0] * k[0][2]) / k[0][0],
|
|
51
|
-
a[0][1] / k[0][0] - (a[2][1] * k[0][2]) / k[0][0],
|
|
52
|
-
a[0][2] / k[0][0] - (a[2][2] * k[0][2]) / k[0][0],
|
|
53
|
-
a[0][3] / k[0][0] - (a[2][3] * k[0][2]) / k[0][0],
|
|
54
|
-
],
|
|
55
|
-
[
|
|
56
|
-
a[1][0] / k[1][1] - (a[2][0] * k[1][2]) / k[1][1],
|
|
57
|
-
a[1][1] / k[1][1] - (a[2][1] * k[1][2]) / k[1][1],
|
|
58
|
-
a[1][2] / k[1][1] - (a[2][2] * k[1][2]) / k[1][1],
|
|
59
|
-
a[1][3] / k[1][1] - (a[2][3] * k[1][2]) / k[1][1],
|
|
60
|
-
],
|
|
61
|
-
[a[2][0], a[2][1], a[2][2], a[2][3]],
|
|
62
|
-
];
|
|
63
|
-
const inlierProbs = [1.0, 0.8, 0.6, 0.4, 0.0];
|
|
64
|
-
let updatedModelViewTransform = normModelViewTransform;
|
|
65
|
-
let finalModelViewTransform = null;
|
|
66
|
-
for (let i = 0; i < inlierProbs.length; i++) {
|
|
67
|
-
const ret = _doICP({
|
|
68
|
-
initialModelViewTransform: updatedModelViewTransform,
|
|
69
|
-
projectionTransform,
|
|
70
|
-
worldCoords,
|
|
71
|
-
screenCoords,
|
|
72
|
-
inlierProb: inlierProbs[i],
|
|
73
|
-
});
|
|
74
|
-
updatedModelViewTransform = ret.modelViewTransform;
|
|
75
|
-
if (ret.err < TRACKING_THRESH) {
|
|
76
|
-
finalModelViewTransform = updatedModelViewTransform;
|
|
77
|
-
break;
|
|
78
|
-
}
|
|
79
|
-
}
|
|
80
|
-
if (finalModelViewTransform === null)
|
|
81
|
-
return null;
|
|
82
|
-
const denormModelViewTransform = _getDenormalizedModelViewTransform(finalModelViewTransform, worldParam, screenParam);
|
|
83
|
-
return denormModelViewTransform;
|
|
84
|
-
};
|
|
85
|
-
// ICP iteration
|
|
86
|
-
// Question: can someone provide theoretical reference / mathematical proof for the following computations?
|
|
87
|
-
// I'm unable to derive the Jacobian
|
|
88
|
-
const _doICP = ({ initialModelViewTransform, projectionTransform, worldCoords, screenCoords, inlierProb, }) => {
|
|
89
|
-
const isRobustMode = inlierProb < 1;
|
|
90
|
-
let modelViewTransform = initialModelViewTransform;
|
|
91
|
-
let err0 = 0.0;
|
|
92
|
-
let err1 = 0.0;
|
|
93
|
-
let E = new Array(worldCoords.length);
|
|
94
|
-
let E2 = new Array(worldCoords.length);
|
|
95
|
-
let dxs = new Array(worldCoords.length);
|
|
96
|
-
let dys = new Array(worldCoords.length);
|
|
97
|
-
for (let l = 0; l <= ICP_MAX_LOOP; l++) {
|
|
98
|
-
const modelViewProjectionTransform = buildModelViewProjectionTransform(projectionTransform, modelViewTransform);
|
|
99
|
-
for (let n = 0; n < worldCoords.length; n++) {
|
|
100
|
-
const u = computeScreenCoordiate(modelViewProjectionTransform, worldCoords[n].x, worldCoords[n].y, worldCoords[n].z);
|
|
101
|
-
const dx = screenCoords[n].x - u.x;
|
|
102
|
-
const dy = screenCoords[n].y - u.y;
|
|
103
|
-
console.log("icp err", worldCoords[n], u, screenCoords[n]);
|
|
104
|
-
dxs[n] = dx;
|
|
105
|
-
dys[n] = dy;
|
|
106
|
-
E[n] = dx * dx + dy * dy;
|
|
107
|
-
}
|
|
108
|
-
let K2; // robust mode only
|
|
109
|
-
err1 = 0.0;
|
|
110
|
-
if (isRobustMode) {
|
|
111
|
-
const inlierNum = Math.max(3, Math.floor(worldCoords.length * inlierProb) - 1);
|
|
112
|
-
for (let n = 0; n < worldCoords.length; n++) {
|
|
113
|
-
E2[n] = E[n];
|
|
114
|
-
}
|
|
115
|
-
E2.sort((a, b) => {
|
|
116
|
-
return a - b;
|
|
117
|
-
});
|
|
118
|
-
K2 = Math.max(E2[inlierNum] * K2_FACTOR, 16.0);
|
|
119
|
-
for (let n = 0; n < worldCoords.length; n++) {
|
|
120
|
-
if (E2[n] > K2)
|
|
121
|
-
err1 += K2 / 6;
|
|
122
|
-
else
|
|
123
|
-
err1 += (K2 / 6.0) * (1.0 - (1.0 - E2[n] / K2) * (1.0 - E2[n] / K2) * (1.0 - E2[n] / K2));
|
|
124
|
-
}
|
|
125
|
-
}
|
|
126
|
-
else {
|
|
127
|
-
for (let n = 0; n < worldCoords.length; n++) {
|
|
128
|
-
err1 += E[n];
|
|
129
|
-
}
|
|
130
|
-
}
|
|
131
|
-
err1 /= worldCoords.length;
|
|
132
|
-
console.log("icp loop", inlierProb, l, err1);
|
|
133
|
-
if (err1 < ICP_BREAK_LOOP_ERROR_THRESH)
|
|
134
|
-
break;
|
|
135
|
-
//if (l > 0 && err1 < ICP_BREAK_LOOP_ERROR_THRESH2 && err1/err0 > ICP_BREAK_LOOP_ERROR_RATIO_THRESH) break;
|
|
136
|
-
if (l > 0 && err1 / err0 > ICP_BREAK_LOOP_ERROR_RATIO_THRESH)
|
|
137
|
-
break;
|
|
138
|
-
if (l === ICP_MAX_LOOP)
|
|
139
|
-
break;
|
|
140
|
-
err0 = err1;
|
|
141
|
-
const dU = [];
|
|
142
|
-
const allJ_U_S = [];
|
|
143
|
-
for (let n = 0; n < worldCoords.length; n++) {
|
|
144
|
-
if (isRobustMode && E[n] > K2) {
|
|
145
|
-
continue;
|
|
146
|
-
}
|
|
147
|
-
const J_U_S = _getJ_U_S({
|
|
148
|
-
modelViewProjectionTransform,
|
|
149
|
-
modelViewTransform,
|
|
150
|
-
projectionTransform,
|
|
151
|
-
worldCoord: worldCoords[n],
|
|
152
|
-
});
|
|
153
|
-
if (isRobustMode) {
|
|
154
|
-
const W = (1.0 - E[n] / K2) * (1.0 - E[n] / K2);
|
|
155
|
-
for (let j = 0; j < 2; j++) {
|
|
156
|
-
for (let i = 0; i < 6; i++) {
|
|
157
|
-
J_U_S[j][i] *= W;
|
|
158
|
-
}
|
|
159
|
-
}
|
|
160
|
-
dU.push([dxs[n] * W]);
|
|
161
|
-
dU.push([dys[n] * W]);
|
|
162
|
-
}
|
|
163
|
-
else {
|
|
164
|
-
dU.push([dxs[n]]);
|
|
165
|
-
dU.push([dys[n]]);
|
|
166
|
-
}
|
|
167
|
-
for (let i = 0; i < J_U_S.length; i++) {
|
|
168
|
-
allJ_U_S.push(J_U_S[i]);
|
|
169
|
-
}
|
|
170
|
-
}
|
|
171
|
-
const dS = _getDeltaS({ dU, J_U_S: allJ_U_S });
|
|
172
|
-
if (dS === null)
|
|
173
|
-
break;
|
|
174
|
-
modelViewTransform = _updateModelViewTransform({ modelViewTransform, dS });
|
|
175
|
-
}
|
|
176
|
-
return { modelViewTransform, err: err1 };
|
|
177
|
-
};
|
|
178
|
-
const _updateModelViewTransform = ({ modelViewTransform, dS }) => {
|
|
179
|
-
let ra = dS[0] * dS[0] + dS[1] * dS[1] + dS[2] * dS[2];
|
|
180
|
-
let q0, q1, q2;
|
|
181
|
-
if (ra < 0.000001) {
|
|
182
|
-
q0 = 1.0;
|
|
183
|
-
q1 = 0.0;
|
|
184
|
-
q2 = 0.0;
|
|
185
|
-
ra = 0.0;
|
|
186
|
-
}
|
|
187
|
-
else {
|
|
188
|
-
ra = Math.sqrt(ra);
|
|
189
|
-
q0 = dS[0] / ra;
|
|
190
|
-
q1 = dS[1] / ra;
|
|
191
|
-
q2 = dS[2] / ra;
|
|
192
|
-
}
|
|
193
|
-
const cra = Math.cos(ra);
|
|
194
|
-
const sra = Math.sin(ra);
|
|
195
|
-
const one_cra = 1.0 - cra;
|
|
196
|
-
mat[0][0] = q0 * q0 * one_cra + cra;
|
|
197
|
-
mat[0][1] = q0 * q1 * one_cra - q2 * sra;
|
|
198
|
-
mat[0][2] = q0 * q2 * one_cra + q1 * sra;
|
|
199
|
-
mat[0][3] = dS[3];
|
|
200
|
-
mat[1][0] = q1 * q0 * one_cra + q2 * sra;
|
|
201
|
-
mat[1][1] = q1 * q1 * one_cra + cra;
|
|
202
|
-
mat[1][2] = q1 * q2 * one_cra - q0 * sra;
|
|
203
|
-
mat[1][3] = dS[4];
|
|
204
|
-
mat[2][0] = q2 * q0 * one_cra - q1 * sra;
|
|
205
|
-
mat[2][1] = q2 * q1 * one_cra + q0 * sra;
|
|
206
|
-
mat[2][2] = q2 * q2 * one_cra + cra;
|
|
207
|
-
mat[2][3] = dS[5];
|
|
208
|
-
const mat2 = [[], [], []];
|
|
209
|
-
for (let j = 0; j < 3; j++) {
|
|
210
|
-
for (let i = 0; i < 4; i++) {
|
|
211
|
-
mat2[j][i] =
|
|
212
|
-
modelViewTransform[j][0] * mat[0][i] +
|
|
213
|
-
modelViewTransform[j][1] * mat[1][i] +
|
|
214
|
-
modelViewTransform[j][2] * mat[2][i];
|
|
215
|
-
}
|
|
216
|
-
mat2[j][3] += modelViewTransform[j][3];
|
|
217
|
-
}
|
|
218
|
-
return mat2;
|
|
219
|
-
};
|
|
220
|
-
const _getDeltaS = ({ dU, J_U_S }) => {
|
|
221
|
-
const J = new Matrix(J_U_S);
|
|
222
|
-
const U = new Matrix(dU);
|
|
223
|
-
const JT = J.transpose();
|
|
224
|
-
const JTJ = JT.mmul(J);
|
|
225
|
-
const JTU = JT.mmul(U);
|
|
226
|
-
let JTJInv;
|
|
227
|
-
try {
|
|
228
|
-
JTJInv = inverse(JTJ);
|
|
229
|
-
}
|
|
230
|
-
catch (e) {
|
|
231
|
-
return null;
|
|
232
|
-
}
|
|
233
|
-
const S = JTJInv.mmul(JTU);
|
|
234
|
-
return S.to1DArray();
|
|
235
|
-
};
|
|
236
|
-
const _getJ_U_S = ({ modelViewProjectionTransform, modelViewTransform, projectionTransform, worldCoord, }) => {
|
|
237
|
-
const T = modelViewTransform;
|
|
238
|
-
const { x, y, z } = worldCoord;
|
|
239
|
-
const u = applyModelViewProjectionTransform(modelViewProjectionTransform, x, y, z);
|
|
240
|
-
const z2 = u.z * u.z;
|
|
241
|
-
J_U_Xc[0][0] = (projectionTransform[0][0] * u.z - projectionTransform[2][0] * u.x) / z2;
|
|
242
|
-
J_U_Xc[0][1] = (projectionTransform[0][1] * u.z - projectionTransform[2][1] * u.x) / z2;
|
|
243
|
-
J_U_Xc[0][2] = (projectionTransform[0][2] * u.z - projectionTransform[2][2] * u.x) / z2;
|
|
244
|
-
J_U_Xc[1][0] = (projectionTransform[1][0] * u.z - projectionTransform[2][0] * u.y) / z2;
|
|
245
|
-
J_U_Xc[1][1] = (projectionTransform[1][1] * u.z - projectionTransform[2][1] * u.y) / z2;
|
|
246
|
-
J_U_Xc[1][2] = (projectionTransform[1][2] * u.z - projectionTransform[2][2] * u.y) / z2;
|
|
247
|
-
J_Xc_S[0][0] = T[0][2] * y;
|
|
248
|
-
J_Xc_S[0][1] = -T[0][2] * x;
|
|
249
|
-
J_Xc_S[0][2] = T[0][1] * x - T[0][0] * y;
|
|
250
|
-
J_Xc_S[0][3] = T[0][0];
|
|
251
|
-
J_Xc_S[0][4] = T[0][1];
|
|
252
|
-
J_Xc_S[0][5] = T[0][2];
|
|
253
|
-
J_Xc_S[1][0] = T[1][2] * y;
|
|
254
|
-
J_Xc_S[1][1] = -T[1][2] * x;
|
|
255
|
-
J_Xc_S[1][2] = T[1][1] * x - T[1][0] * y;
|
|
256
|
-
J_Xc_S[1][3] = T[1][0];
|
|
257
|
-
J_Xc_S[1][4] = T[1][1];
|
|
258
|
-
J_Xc_S[1][5] = T[1][2];
|
|
259
|
-
J_Xc_S[2][0] = T[2][2] * y;
|
|
260
|
-
J_Xc_S[2][1] = -T[2][2] * x;
|
|
261
|
-
J_Xc_S[2][2] = T[2][1] * x - T[2][0] * y;
|
|
262
|
-
J_Xc_S[2][3] = T[2][0];
|
|
263
|
-
J_Xc_S[2][4] = T[2][1];
|
|
264
|
-
J_Xc_S[2][5] = T[2][2];
|
|
265
|
-
const J_U_S = [[], []];
|
|
266
|
-
for (let j = 0; j < 2; j++) {
|
|
267
|
-
for (let i = 0; i < 6; i++) {
|
|
268
|
-
J_U_S[j][i] = 0.0;
|
|
269
|
-
for (let k = 0; k < 3; k++) {
|
|
270
|
-
J_U_S[j][i] += J_U_Xc[j][k] * J_Xc_S[k][i];
|
|
271
|
-
}
|
|
272
|
-
}
|
|
273
|
-
}
|
|
274
|
-
return J_U_S;
|
|
275
|
-
};
|
|
276
|
-
const _getNormalizedModelViewTransform = (modelViewTransform, worldParam, screenParam) => {
|
|
277
|
-
/*
|
|
278
|
-
* notations:
|
|
279
|
-
* m: modelViewTransform,
|
|
280
|
-
* [x,y,z,1]: world coordinates
|
|
281
|
-
* [x',y',z',1]: screen coordinates
|
|
282
|
-
*
|
|
283
|
-
* By normalizing coordinates with meanX, meanY and scale s, it means to transform the coordinates to
|
|
284
|
-
* note that z doesn't scale up, otherwise screen point doesn't scale, e.g. x' = x / z
|
|
285
|
-
* [s*(x-meanX)]
|
|
286
|
-
* [s*(y-meanY)]
|
|
287
|
-
* [z ]
|
|
288
|
-
* [1 ]
|
|
289
|
-
*
|
|
290
|
-
* Let's define transformation T, such that
|
|
291
|
-
* `normalizedP = T * P`
|
|
292
|
-
*
|
|
293
|
-
* [s * (x - meanX)] [s, 0, 0, -s*meanX] [x]
|
|
294
|
-
* [s * (y - meanY)] = [0, s, 0, -s*meanY] * [y]
|
|
295
|
-
* [z ] [0, 0, 1, 0] [z]
|
|
296
|
-
* [1 ] [0, 0, 0, 1] [1]
|
|
297
|
-
*
|
|
298
|
-
* and `P = inv(T) * normalizedP`
|
|
299
|
-
*
|
|
300
|
-
* [x] [1/s, 0 , 0, meanX] [s * (x - meanX)]
|
|
301
|
-
* [y] = [0 , 1/s, 0, meanY] * [s * (y - meanY)]
|
|
302
|
-
* [z] [0 , 0 , 1, 0] [z ]
|
|
303
|
-
* [1] [0 , 0 , 0, 1] [1 ]
|
|
304
|
-
*
|
|
305
|
-
*
|
|
306
|
-
* Before normalizating coordinates, the following holds:
|
|
307
|
-
* M * P = P' (P is world coordinate, and P' is screen coordinate)
|
|
308
|
-
*
|
|
309
|
-
* -> M * inv(T) * T * P = inv(T') * T' * P'
|
|
310
|
-
* -> T' * M * inv(T) * T * P = T' * P'
|
|
311
|
-
* here, T * P, and T' * P' are normalized coordaintes for world and screen, so, the modelViewTransform for normalized coordinates would be:
|
|
312
|
-
*
|
|
313
|
-
* Mnorm = T' * M * inv(T) =
|
|
314
|
-
*
|
|
315
|
-
* [s', 0, 0, -s'*meanX'] [m00, m01, m02, m03] [1/s, 0, 0, meanX]
|
|
316
|
-
* [ 0, s', 0, -s'*meanY'] * [m10, m11, m12, m13] * [ 0, 1/s, 0, meanY]
|
|
317
|
-
* [ 0, 0, 1, 0] [m20, m21, m22, m23] [ 0, 0, 1, 0]
|
|
318
|
-
* [ 0, 0, 0, 1] [0, 0, 0, 1] [ 0, 0, 0, 1]
|
|
319
|
-
*
|
|
320
|
-
* =
|
|
321
|
-
*
|
|
322
|
-
* [m00 * s'/s, m01 * s'/s, m02 * s', m00*s'*meanX + m01*s'*meanY + m03*s' - meanX'*s']
|
|
323
|
-
* [m10 * s'/s, m11 * s'/s, m12 * s', m10*s'*meanX + m11*s'*meanY + m13*s' - meanY'*s']
|
|
324
|
-
* [m20 / s , m21 / s , m22 , m20 *meanX + m21 *meanY + m23 ]
|
|
325
|
-
* [ 0, 0, 0, 1]
|
|
326
|
-
*
|
|
327
|
-
*/
|
|
328
|
-
const m = modelViewTransform;
|
|
329
|
-
const ss = screenParam.s / worldParam.s;
|
|
330
|
-
const normModelViewTransform = [
|
|
331
|
-
[
|
|
332
|
-
m[0][0] * ss,
|
|
333
|
-
m[0][1] * ss,
|
|
334
|
-
m[0][2] * screenParam.s,
|
|
335
|
-
(m[0][0] * worldParam.meanX + m[0][1] * worldParam.meanY + m[0][3] - screenParam.meanX) *
|
|
336
|
-
screenParam.s,
|
|
337
|
-
],
|
|
338
|
-
[
|
|
339
|
-
m[1][0] * ss,
|
|
340
|
-
m[1][1] * ss,
|
|
341
|
-
m[1][2] * screenParam.s,
|
|
342
|
-
(m[1][0] * worldParam.meanX + m[1][1] * worldParam.meanY + m[1][3] - screenParam.meanY) *
|
|
343
|
-
screenParam.s,
|
|
344
|
-
],
|
|
345
|
-
[
|
|
346
|
-
m[2][0] / worldParam.s,
|
|
347
|
-
m[2][1] / worldParam.s,
|
|
348
|
-
m[2][2],
|
|
349
|
-
m[2][0] * worldParam.meanX + m[2][1] * worldParam.meanY + m[2][3],
|
|
350
|
-
],
|
|
351
|
-
];
|
|
352
|
-
return normModelViewTransform;
|
|
353
|
-
};
|
|
354
|
-
const _getDenormalizedModelViewTransform = (modelViewTransform, worldParam, screenParam) => {
|
|
355
|
-
/*
|
|
356
|
-
* Refer to _getNormalizedModelViewTransform, we have
|
|
357
|
-
*
|
|
358
|
-
* Mnorm = T' * M * inv(T)
|
|
359
|
-
*
|
|
360
|
-
* Therefore,
|
|
361
|
-
*
|
|
362
|
-
* M = inv(T') * Mnorm * T
|
|
363
|
-
*
|
|
364
|
-
* [1/s', 0, 0, meanX'] [m00, m01, m02, m03] [s, 0, 0, -s*meanX]
|
|
365
|
-
* [0 , 1/s', 0, meanY'] * [m10, m11, m12, m13] * [0, s, 0, -s*meanY]
|
|
366
|
-
* [0 , 0 , 1, 0] [m20, m21, m22, m23] [0, 0, 1, 0]
|
|
367
|
-
* [0 , 0 , 0, 1] [0, 0, 0, 1] [0, 0, 0, 1]
|
|
368
|
-
*
|
|
369
|
-
* =
|
|
370
|
-
*
|
|
371
|
-
* [m00*s/s', m01*s/s', m02/s', (-m00*s*meanX -m01*s*meanY+m03)/s' + meanX'],
|
|
372
|
-
* [m10*s/s', m11*s/s', m12/s', (-m10*s*meanX -m11*s*meanY+m13)/s' + meanY'],
|
|
373
|
-
* [m20*s ', m21*s ', m22 , -m20*s*meanX -m21*s*meanY+m23) ],
|
|
374
|
-
* [0 , 0, 0, 1]
|
|
375
|
-
*
|
|
376
|
-
*/
|
|
377
|
-
const m = modelViewTransform;
|
|
378
|
-
const ss = worldParam.s / screenParam.s;
|
|
379
|
-
const sMeanX = worldParam.s * worldParam.meanX;
|
|
380
|
-
const sMeanY = worldParam.s * worldParam.meanY;
|
|
381
|
-
const denormModelViewTransform = [
|
|
382
|
-
[
|
|
383
|
-
m[0][0] * ss,
|
|
384
|
-
m[0][1] * ss,
|
|
385
|
-
m[0][2] / screenParam.s,
|
|
386
|
-
(-m[0][0] * sMeanX - m[0][1] * sMeanY + m[0][3]) / screenParam.s + screenParam.meanX,
|
|
387
|
-
],
|
|
388
|
-
[
|
|
389
|
-
m[1][0] * ss,
|
|
390
|
-
m[1][1] * ss,
|
|
391
|
-
m[1][2] / screenParam.s,
|
|
392
|
-
(-m[1][0] * sMeanX - m[1][1] * sMeanY + m[1][3]) / screenParam.s + screenParam.meanY,
|
|
393
|
-
],
|
|
394
|
-
[
|
|
395
|
-
m[2][0] * worldParam.s,
|
|
396
|
-
m[2][1] * worldParam.s,
|
|
397
|
-
m[2][2],
|
|
398
|
-
-m[2][0] * sMeanX - m[2][1] * sMeanY + m[2][3],
|
|
399
|
-
],
|
|
400
|
-
];
|
|
401
|
-
return denormModelViewTransform;
|
|
402
|
-
};
|
|
403
|
-
// centroid at origin and avg distance from origin is sqrt(2)
|
|
404
|
-
const normalizePoints = (coords) => {
|
|
405
|
-
let sumX = 0;
|
|
406
|
-
let sumY = 0;
|
|
407
|
-
for (let i = 0; i < coords.length; i++) {
|
|
408
|
-
sumX += coords[i].x;
|
|
409
|
-
sumY += coords[i].y;
|
|
410
|
-
}
|
|
411
|
-
let meanX = sumX / coords.length;
|
|
412
|
-
let meanY = sumY / coords.length;
|
|
413
|
-
let sumDiff = 0;
|
|
414
|
-
for (let i = 0; i < coords.length; i++) {
|
|
415
|
-
const diffX = coords[i].x - meanX;
|
|
416
|
-
const diffY = coords[i].y - meanY;
|
|
417
|
-
sumDiff += Math.sqrt(diffX * diffX + diffY * diffY);
|
|
418
|
-
}
|
|
419
|
-
let s = (Math.sqrt(2) * coords.length) / sumDiff;
|
|
420
|
-
const normalizedCoords = [];
|
|
421
|
-
for (let i = 0; i < coords.length; i++) {
|
|
422
|
-
normalizedCoords.push({
|
|
423
|
-
x: (coords[i].x - meanX) * s,
|
|
424
|
-
y: (coords[i].y - meanY) * s,
|
|
425
|
-
});
|
|
426
|
-
}
|
|
427
|
-
return { normalizedCoords, param: { meanX, meanY, s } };
|
|
428
|
-
};
|
|
429
|
-
export { refineEstimate };
|
package/dist/react/AREditor.d.ts
DELETED
package/dist/react/AREditor.js
DELETED
|
@@ -1,159 +0,0 @@
|
|
|
1
|
-
import { jsx as _jsx, jsxs as _jsxs } from "react/jsx-runtime";
|
|
2
|
-
import { useState, useRef, useCallback } from "react";
|
|
3
|
-
import { customAlphabet } from "nanoid";
|
|
4
|
-
import { Image, Video, Upload, Camera, LoaderCircle } from "lucide-react";
|
|
5
|
-
const ALLOWED_MIME_TYPES = ["image/jpeg", "image/png", "image/webp"];
|
|
6
|
-
const ALLOWED_VIDEO_TYPES = ["video/mp4", "video/webm"];
|
|
7
|
-
const useFileUpload = (allowedTypes) => {
|
|
8
|
-
const [fileState, setFileState] = useState({ file: null, preview: "" });
|
|
9
|
-
const [dimensions, setDimensions] = useState({});
|
|
10
|
-
const fileInputRef = useRef(null);
|
|
11
|
-
const handleFileChange = useCallback((file) => {
|
|
12
|
-
if (fileState.preview) {
|
|
13
|
-
URL.revokeObjectURL(fileState.preview);
|
|
14
|
-
}
|
|
15
|
-
if (!file) {
|
|
16
|
-
setFileState({ file: null, preview: "" });
|
|
17
|
-
return;
|
|
18
|
-
}
|
|
19
|
-
// Para archivos .mind, validar la extensión en lugar del tipo MIME
|
|
20
|
-
if (allowedTypes.includes(".mind")) {
|
|
21
|
-
if (!file.name.toLowerCase().endsWith(".mind")) {
|
|
22
|
-
throw new Error("El archivo debe tener extensión .mind");
|
|
23
|
-
}
|
|
24
|
-
}
|
|
25
|
-
else if (!allowedTypes.includes(file.type)) {
|
|
26
|
-
throw new Error("Tipo de archivo no permitido");
|
|
27
|
-
}
|
|
28
|
-
if (file.type.includes("video")) {
|
|
29
|
-
const video = document.createElement("video");
|
|
30
|
-
video.src = URL.createObjectURL(file);
|
|
31
|
-
}
|
|
32
|
-
console.log("Archivo cargado:", {
|
|
33
|
-
nombre: file.name,
|
|
34
|
-
tamaño: (file.size / 1024).toFixed(2) + " KB",
|
|
35
|
-
tipo: file.type || "application/octet-stream",
|
|
36
|
-
});
|
|
37
|
-
const preview = URL.createObjectURL(file);
|
|
38
|
-
if (file.type.includes("video")) {
|
|
39
|
-
const video = document.createElement("video");
|
|
40
|
-
video.src = URL.createObjectURL(file);
|
|
41
|
-
video.addEventListener("loadedmetadata", () => {
|
|
42
|
-
const width = video.videoWidth;
|
|
43
|
-
const height = video.videoHeight;
|
|
44
|
-
setDimensions({ width, height });
|
|
45
|
-
console.log("Ancho y alto del video:", width, height);
|
|
46
|
-
});
|
|
47
|
-
}
|
|
48
|
-
setFileState({ file, preview });
|
|
49
|
-
}, [allowedTypes, fileState.preview]);
|
|
50
|
-
const reset = useCallback(() => {
|
|
51
|
-
if (fileState.preview) {
|
|
52
|
-
URL.revokeObjectURL(fileState.preview);
|
|
53
|
-
}
|
|
54
|
-
setFileState({ file: null, preview: "" });
|
|
55
|
-
if (fileInputRef.current) {
|
|
56
|
-
fileInputRef.current.value = "";
|
|
57
|
-
}
|
|
58
|
-
}, [fileState.preview]);
|
|
59
|
-
return { fileState, handleFileChange, reset, fileInputRef, dimensions };
|
|
60
|
-
};
|
|
61
|
-
const useUploadFile = () => {
|
|
62
|
-
const uploadFile = async (file, type) => {
|
|
63
|
-
const customNanoid = customAlphabet("1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ", 21);
|
|
64
|
-
const id = customNanoid();
|
|
65
|
-
const formData = new FormData();
|
|
66
|
-
formData.append("file", file);
|
|
67
|
-
const endpoint = type === "video"
|
|
68
|
-
? `https://r2-worker.sergiolazaromondargo.workers.dev/video/${id}`
|
|
69
|
-
: type === "mind"
|
|
70
|
-
? `https://r2-worker.sergiolazaromondargo.workers.dev/mind/${id}`
|
|
71
|
-
: `https://r2-worker.sergiolazaromondargo.workers.dev/${id}`;
|
|
72
|
-
const response = await fetch(endpoint, {
|
|
73
|
-
method: "PUT",
|
|
74
|
-
body: formData,
|
|
75
|
-
});
|
|
76
|
-
if (!response.ok) {
|
|
77
|
-
throw new Error(`Error al subir ${type}: ${response.status} ${response.statusText}`);
|
|
78
|
-
}
|
|
79
|
-
return await response.json();
|
|
80
|
-
};
|
|
81
|
-
return { uploadFile };
|
|
82
|
-
};
|
|
83
|
-
export const AREditor = ({ adminId }) => {
|
|
84
|
-
const { fileState: imageState, handleFileChange: handleImageChange, reset: resetImage, fileInputRef: imageInputRef, } = useFileUpload(ALLOWED_MIME_TYPES);
|
|
85
|
-
const { fileState: mindState, handleFileChange: handleMindChange, reset: resetMind, fileInputRef: mindInputRef, } = useFileUpload([".mind"]);
|
|
86
|
-
const { fileState: videoState, handleFileChange: handleVideoChange, reset: resetVideo, fileInputRef: videoInputRef, dimensions: videoDimensions, } = useFileUpload(ALLOWED_VIDEO_TYPES);
|
|
87
|
-
const [videoScale, setVideoScale] = useState(1);
|
|
88
|
-
const [loading, setLoading] = useState(false);
|
|
89
|
-
const [error, setError] = useState("");
|
|
90
|
-
const { uploadFile } = useUploadFile();
|
|
91
|
-
const handleSave = async () => {
|
|
92
|
-
try {
|
|
93
|
-
setLoading(true);
|
|
94
|
-
setError("");
|
|
95
|
-
if (!imageState.file || !mindState.file || !videoState.file) {
|
|
96
|
-
throw new Error("Se requieren una imagen, un archivo .mind y un video");
|
|
97
|
-
}
|
|
98
|
-
const [imageResult, mindResult, videoResult] = await Promise.all([
|
|
99
|
-
uploadFile(imageState.file, "image"),
|
|
100
|
-
uploadFile(mindState.file, "mind"),
|
|
101
|
-
uploadFile(videoState.file, "video"),
|
|
102
|
-
]);
|
|
103
|
-
const data = {
|
|
104
|
-
adminId,
|
|
105
|
-
data: [
|
|
106
|
-
{
|
|
107
|
-
id: `photos-${Date.now()}`,
|
|
108
|
-
type: "photos",
|
|
109
|
-
images: [{ image: imageResult.url, fileId: imageResult.fileId }],
|
|
110
|
-
},
|
|
111
|
-
{
|
|
112
|
-
id: `videoNative-${Date.now()}`,
|
|
113
|
-
type: "videoNative",
|
|
114
|
-
url: videoResult.url,
|
|
115
|
-
fileId: videoResult.fileId,
|
|
116
|
-
scale: videoScale,
|
|
117
|
-
width: videoDimensions.width,
|
|
118
|
-
height: videoDimensions.height,
|
|
119
|
-
},
|
|
120
|
-
{
|
|
121
|
-
id: `ar-${Date.now()}`,
|
|
122
|
-
type: "ar",
|
|
123
|
-
url: mindResult.url,
|
|
124
|
-
fileId: mindResult.fileId,
|
|
125
|
-
},
|
|
126
|
-
],
|
|
127
|
-
type: "ar",
|
|
128
|
-
};
|
|
129
|
-
const response = await fetch("/api/updateadmin.json", {
|
|
130
|
-
method: "POST",
|
|
131
|
-
headers: { "Content-Type": "application/json" },
|
|
132
|
-
body: JSON.stringify(data),
|
|
133
|
-
});
|
|
134
|
-
if (!response.ok) {
|
|
135
|
-
throw new Error(`Error actualizando datos AR: ${response.status}`);
|
|
136
|
-
}
|
|
137
|
-
alert("¡Guardado exitosamente!");
|
|
138
|
-
resetImage();
|
|
139
|
-
resetMind();
|
|
140
|
-
resetVideo();
|
|
141
|
-
}
|
|
142
|
-
catch (error) {
|
|
143
|
-
setError(error.message);
|
|
144
|
-
}
|
|
145
|
-
finally {
|
|
146
|
-
setLoading(false);
|
|
147
|
-
}
|
|
148
|
-
};
|
|
149
|
-
const FileUploadSection = ({ type, icon: Icon, fileState, inputRef, onFileChange, allowedTypes, label, }) => (_jsxs("div", { className: "group relative overflow-hidden rounded-xl shadow-lg bg-white/80 backdrop-blur-sm transition-all duration-300 hover:shadow-xl hover:scale-[1.02] border border-gray-100", children: [_jsx("input", { ref: inputRef, type: "file", accept: allowedTypes.join(","), onChange: (e) => {
|
|
150
|
-
try {
|
|
151
|
-
const file = e.target.files?.[0] || null;
|
|
152
|
-
onFileChange(file);
|
|
153
|
-
}
|
|
154
|
-
catch (error) {
|
|
155
|
-
setError(error.message);
|
|
156
|
-
}
|
|
157
|
-
}, className: "hidden" }), !fileState.file ? (_jsxs("label", { htmlFor: inputRef.current?.id, onClick: () => inputRef.current?.click(), className: "flex cursor-pointer flex-col items-center justify-center p-10 bg-gradient-to-br from-gray-50 to-white transition-colors group-hover:from-blue-50 group-hover:to-purple-50", children: [_jsx("div", { className: "transform transition-transform duration-300 group-hover:scale-110", children: _jsx(Icon, { className: "h-16 w-16 text-gray-400 group-hover:text-blue-500" }) }), _jsx("span", { className: "mt-4 text-lg font-medium bg-gradient-to-r from-gray-600 to-gray-800 bg-clip-text text-transparent group-hover:from-blue-600 group-hover:to-purple-600", children: label }), _jsx("span", { className: "mt-2 text-sm text-gray-400 group-hover:text-gray-500", children: allowedTypes.join(", ") })] })) : (_jsxs("div", { className: "p-6 space-y-4", children: [_jsx("div", { className: "relative aspect-video w-full overflow-hidden rounded-lg ring-1 ring-gray-100", children: type === "video" ? (_jsx("video", { src: fileState.preview, controls: true, className: "h-full w-full object-cover", children: "Tu navegador no soporta la reproducci\u00F3n de videos." })) : type === "image" ? (_jsx("img", { src: fileState.preview, alt: "Preview", className: "h-full w-full object-cover" })) : (_jsxs("div", { className: "flex h-full flex-col items-center justify-center space-y-3 bg-gradient-to-br from-blue-50 to-purple-50 p-4", children: [_jsx("div", { className: "flex items-center justify-center rounded-full bg-gradient-to-r from-blue-400 to-purple-400 p-3", children: _jsx(Upload, { className: "h-6 w-6 text-white" }) }), _jsxs("div", { className: "text-center space-y-2", children: [_jsx("span", { className: "block text-lg font-medium text-gray-600", children: fileState.file.name }), _jsxs("span", { className: "block text-sm text-gray-500", children: ["Tama\u00F1o: ", (fileState.file.size / 1024).toFixed(2), " KB"] }), _jsx("span", { className: "mt-1 block text-sm font-medium text-green-600", children: "\u2713 Archivo AR cargado correctamente" })] })] })) }), _jsxs("div", { className: "flex items-center justify-between", children: [_jsxs("div", { className: "flex items-center space-x-3", children: [_jsx(Icon, { className: "h-5 w-5 text-blue-500" }), _jsx("span", { className: "text-sm font-medium text-gray-600", children: fileState.file.name })] }), _jsx("button", { onClick: () => onFileChange(null), className: "rounded-full bg-gradient-to-r from-blue-500 to-purple-500 px-4 py-2 text-sm font-medium text-white shadow-md transition-all hover:from-blue-600 hover:to-purple-600 hover:shadow-lg active:scale-95", children: "Cambiar" })] })] }))] }));
|
|
158
|
-
return (_jsx("div", { className: "min-h-screen w-full bg-gradient-to-br from-blue-50 via-white to-purple-50 p-4 md:p-8", children: _jsxs("div", { className: "mx-auto max-w-3xl rounded-3xl bg-white/90 backdrop-blur-md p-6 md:p-10 shadow-2xl ring-1 ring-black/10", children: [_jsxs("div", { className: "flex flex-col items-center justify-center space-y-8", children: [_jsx("div", { className: "rounded-2xl bg-gradient-to-br from-blue-500 to-purple-500 p-6 shadow-xl shadow-blue-300/30 hover:scale-105 transition-transform", children: _jsx(Camera, { className: "h-12 w-12 text-white" }) }), _jsx("h1", { className: "bg-gradient-to-r from-blue-600 to-purple-600 bg-clip-text text-5xl font-bold text-transparent text-center", children: "Editor de Experiencia AR" }), _jsx("p", { className: "text-2xl text-gray-600 text-center font-light", children: "Crea una experiencia de realidad aumentada \u00FAnica" })] }), _jsxs("div", { className: "mt-12 space-y-8", children: [_jsx(FileUploadSection, { type: "image", icon: Image, fileState: imageState, inputRef: imageInputRef, onFileChange: handleImageChange, allowedTypes: ALLOWED_MIME_TYPES, label: "Haz clic para seleccionar una imagen" }), _jsx(FileUploadSection, { type: "mind", icon: Upload, fileState: mindState, inputRef: mindInputRef, onFileChange: handleMindChange, allowedTypes: [".mind"], label: "Haz clic para seleccionar archivo .mind" }), _jsx(FileUploadSection, { type: "video", icon: Video, fileState: videoState, inputRef: videoInputRef, onFileChange: handleVideoChange, allowedTypes: ALLOWED_VIDEO_TYPES, label: "Haz clic para seleccionar un video" }), _jsxs("div", { className: "space-y-4 rounded-2xl border border-gray-200/50 bg-white/90 backdrop-blur-md p-8 shadow-lg ring-1 ring-black/10", children: [_jsxs("label", { className: "flex items-center justify-between text-2xl font-semibold text-gray-800", children: [_jsx("span", { children: "Escala del Video" }), _jsxs("span", { className: "bg-gradient-to-r from-blue-600 to-purple-600 bg-clip-text text-transparent font-bold", children: [videoScale, "x"] })] }), _jsxs("div", { className: "relative py-8", children: [_jsx("div", { className: "absolute h-3 w-full rounded-full bg-gradient-to-r from-blue-400 to-purple-400 opacity-20" }), _jsx("div", { className: "absolute h-3 rounded-full bg-gradient-to-r from-blue-400 to-purple-400 shadow-lg", style: { width: `${(videoScale / 2) * 100}%` } }), _jsx("input", { type: "range", min: "0.1", max: "2", step: "0.1", value: videoScale, onChange: (e) => setVideoScale(Number(e.target.value)), className: "relative h-3 w-full cursor-pointer appearance-none rounded-lg bg-transparent focus:outline-none focus:ring-2 focus:ring-blue-400 focus:ring-offset-4", style: { WebkitAppearance: "none" } })] })] })] }), error && (_jsx("div", { className: "mt-6 rounded-xl bg-red-50 p-4 text-red-700 shadow-sm ring-1 ring-red-100", children: error })), _jsx("button", { onClick: handleSave, disabled: loading, className: "mt-10 w-full rounded-xl bg-gradient-to-r from-blue-600 to-purple-600 py-5 text-xl font-semibold text-white shadow-xl transition-all hover:from-blue-700 hover:to-purple-700 disabled:from-gray-400 disabled:to-gray-400 disabled:shadow-none focus:outline-none focus:ring-2 focus:ring-blue-400 focus:ring-offset-4", children: loading ? (_jsxs("div", { className: "flex items-center justify-center space-x-3", children: [_jsx(LoaderCircle, { className: "h-7 w-7 animate-spin" }), _jsx("span", { children: "Guardando..." })] })) : ("Guardar") })] }) }));
|
|
159
|
-
};
|
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
interface ProgressDialogProps {
|
|
2
|
-
open: boolean;
|
|
3
|
-
imageStatus: "pending" | "processing" | "completed" | "error";
|
|
4
|
-
videoStatus: "pending" | "processing" | "completed" | "error";
|
|
5
|
-
arProcessingStatus: "pending" | "processing" | "completed" | "error";
|
|
6
|
-
arUploadStatus: "pending" | "processing" | "completed" | "error";
|
|
7
|
-
imageProgress?: number;
|
|
8
|
-
videoProgress?: number;
|
|
9
|
-
arProcessingProgress?: number;
|
|
10
|
-
arUploadProgress?: number;
|
|
11
|
-
}
|
|
12
|
-
export declare function ProgressDialog({ open, imageStatus, videoStatus, arProcessingStatus, arUploadStatus, imageProgress, videoProgress, arProcessingProgress, arUploadProgress, }: ProgressDialogProps): import("react/jsx-runtime").JSX.Element | null;
|
|
13
|
-
export {};
|
|
@@ -1,57 +0,0 @@
|
|
|
1
|
-
import { jsx as _jsx, jsxs as _jsxs } from "react/jsx-runtime";
|
|
2
|
-
import { useEffect } from "react";
|
|
3
|
-
import { CheckCircle2, Loader2, Upload, Image as ImageIcon, Video as VideoIcon, } from "lucide-react";
|
|
4
|
-
export function ProgressDialog({ open, imageStatus, videoStatus, arProcessingStatus, arUploadStatus, imageProgress = 0, videoProgress = 0, arProcessingProgress = 0, arUploadProgress = 0, }) {
|
|
5
|
-
// Configurar las etapas del progreso
|
|
6
|
-
const stages = [
|
|
7
|
-
{
|
|
8
|
-
label: "Subiendo imagen",
|
|
9
|
-
status: imageStatus,
|
|
10
|
-
progress: imageProgress,
|
|
11
|
-
icon: _jsx(ImageIcon, { className: "h-5 w-5" }),
|
|
12
|
-
},
|
|
13
|
-
{
|
|
14
|
-
label: "Subiendo video",
|
|
15
|
-
status: videoStatus,
|
|
16
|
-
progress: videoProgress,
|
|
17
|
-
icon: _jsx(VideoIcon, { className: "h-5 w-5" }),
|
|
18
|
-
},
|
|
19
|
-
{
|
|
20
|
-
label: "Procesando imagen para AR",
|
|
21
|
-
status: arProcessingStatus,
|
|
22
|
-
progress: arProcessingProgress,
|
|
23
|
-
icon: _jsx(Loader2, { className: "h-5 w-5" }),
|
|
24
|
-
},
|
|
25
|
-
{
|
|
26
|
-
label: "Subiendo experiencia AR",
|
|
27
|
-
status: arUploadStatus,
|
|
28
|
-
progress: arUploadProgress,
|
|
29
|
-
icon: _jsx(Upload, { className: "h-5 w-5" }),
|
|
30
|
-
},
|
|
31
|
-
];
|
|
32
|
-
// Calcular el progreso total (promedio de todos los procesos)
|
|
33
|
-
const completedSteps = stages.filter((stage) => stage.status === "completed").length;
|
|
34
|
-
const totalProgress = (imageProgress + videoProgress + arProcessingProgress + arUploadProgress) / 4;
|
|
35
|
-
const overallProgress = Math.min(Math.max(totalProgress, completedSteps * 25), 100);
|
|
36
|
-
// Bloquear el scroll cuando el modal está abierto
|
|
37
|
-
useEffect(() => {
|
|
38
|
-
if (open) {
|
|
39
|
-
document.body.style.overflow = "hidden";
|
|
40
|
-
}
|
|
41
|
-
else {
|
|
42
|
-
document.body.style.overflow = "auto";
|
|
43
|
-
}
|
|
44
|
-
return () => {
|
|
45
|
-
document.body.style.overflow = "auto";
|
|
46
|
-
};
|
|
47
|
-
}, [open]);
|
|
48
|
-
if (!open)
|
|
49
|
-
return null;
|
|
50
|
-
return (_jsx("div", { className: "fixed inset-0 z-50 flex items-center justify-center overflow-y-auto bg-black bg-opacity-50 p-4", children: _jsxs("div", { className: "relative max-h-full w-full max-w-md overflow-hidden rounded-lg bg-white p-6 shadow-xl", children: [_jsx("div", { className: "mb-4 flex items-center justify-between", children: _jsx("h2", { className: "text-center font-semibold text-xl", children: "Guardando experiencia AR" }) }), _jsx("div", { className: "mt-2", children: _jsx("div", { className: "relative pt-1", children: _jsxs("div", { className: "mb-6", children: [_jsx("div", { className: "h-2 w-full rounded-full bg-gray-200", children: _jsx("div", { className: "h-2 rounded-full bg-blue-600 transition-all duration-500", style: { width: `${overallProgress}%` } }) }), _jsxs("div", { className: "mt-2 text-center text-gray-600 text-sm", children: ["Progreso total: ", Math.round(overallProgress), "%"] })] }) }) }), _jsx("div", { className: "mt-2 space-y-5", children: stages.map((stage, index) => (_jsx("div", { className: "relative", children: _jsxs("div", { className: "flex items-center", children: [_jsx("div", { className: `mr-3 flex h-10 w-10 flex-shrink-0 items-center justify-center rounded-full ${stage.status === "completed"
|
|
51
|
-
? "bg-green-100"
|
|
52
|
-
: stage.status === "processing"
|
|
53
|
-
? "bg-blue-100"
|
|
54
|
-
: stage.status === "error"
|
|
55
|
-
? "bg-red-100"
|
|
56
|
-
: "bg-gray-100"}`, children: stage.status === "completed" ? (_jsx(CheckCircle2, { className: "h-6 w-6 text-green-600" })) : stage.status === "processing" ? (_jsx("div", { className: "text-blue-600", children: stage.icon })) : stage.status === "error" ? (_jsx("div", { className: "text-red-600", children: stage.icon })) : (_jsx("div", { className: "text-gray-400", children: stage.icon })) }), _jsxs("div", { className: "min-w-0 flex-1", children: [_jsx("div", { className: "font-medium text-sm", children: stage.label }), stage.status === "processing" && (_jsxs("div", { className: "mt-1", children: [_jsx("div", { className: "h-1.5 w-full rounded-full bg-gray-200", children: _jsx("div", { className: "h-1.5 rounded-full bg-blue-600 transition-all duration-300", style: { width: `${stage.progress || 0}%` } }) }), _jsxs("div", { className: "mt-1 text-gray-500 text-xs", children: [Math.round(stage.progress || 0), "%"] })] })), stage.status === "completed" && (_jsx("div", { className: "mt-1 text-green-600 text-xs", children: "Completado" })), stage.status === "error" && (_jsx("div", { className: "mt-1 text-red-600 text-xs", children: "Error" })), stage.status === "pending" && (_jsx("div", { className: "mt-1 text-gray-500 text-xs", children: "Pendiente" }))] })] }) }, index))) }), completedSteps === stages.length && (_jsxs("div", { className: "mt-4 rounded-lg bg-green-50 p-4 text-center", children: [_jsx(CheckCircle2, { className: "mx-auto mb-2 h-8 w-8 text-green-500" }), _jsx("p", { className: "font-medium text-green-800", children: "\u00A1Experiencia AR guardada con \u00E9xito!" }), _jsx("p", { className: "text-green-700 text-sm", children: "Tu contenido est\u00E1 listo para ser visualizado." })] }))] }) }));
|
|
57
|
-
}
|