@srsergio/taptapp-ar 1.0.2 → 1.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +47 -45
- package/dist/compiler/aframe.js +0 -3
- package/dist/compiler/compiler-base.d.ts +3 -7
- package/dist/compiler/compiler-base.js +28 -14
- package/dist/compiler/compiler.js +1 -1
- package/dist/compiler/compiler.worker.js +1 -1
- package/dist/compiler/controller.d.ts +4 -4
- package/dist/compiler/controller.js +4 -5
- package/dist/compiler/controller.worker.js +0 -2
- package/dist/compiler/detector/crop-detector.d.ts +12 -12
- package/dist/compiler/detector/crop-detector.js +0 -2
- package/dist/compiler/detector/detector-lite.d.ts +73 -0
- package/dist/compiler/detector/detector-lite.js +430 -0
- package/dist/compiler/detector/detector.d.ts +20 -21
- package/dist/compiler/detector/detector.js +236 -243
- package/dist/compiler/detector/kernels/cpu/binomialFilter.js +0 -1
- package/dist/compiler/detector/kernels/cpu/computeExtremaAngles.d.ts +1 -1
- package/dist/compiler/detector/kernels/cpu/computeLocalization.js +0 -4
- package/dist/compiler/detector/kernels/cpu/computeOrientationHistograms.js +0 -18
- package/dist/compiler/detector/kernels/cpu/fakeShader.js +1 -1
- package/dist/compiler/detector/kernels/cpu/prune.d.ts +7 -1
- package/dist/compiler/detector/kernels/cpu/prune.js +1 -42
- package/dist/compiler/detector/kernels/webgl/upsampleBilinear.d.ts +1 -1
- package/dist/compiler/detector/kernels/webgl/upsampleBilinear.js +2 -2
- package/dist/compiler/estimation/refine-estimate.js +0 -1
- package/dist/compiler/estimation/utils.d.ts +1 -1
- package/dist/compiler/estimation/utils.js +1 -14
- package/dist/compiler/image-list.js +4 -4
- package/dist/compiler/input-loader.d.ts +4 -5
- package/dist/compiler/input-loader.js +2 -2
- package/dist/compiler/matching/hamming-distance.js +13 -13
- package/dist/compiler/matching/hierarchical-clustering.js +1 -1
- package/dist/compiler/matching/matching.d.ts +20 -4
- package/dist/compiler/matching/matching.js +67 -41
- package/dist/compiler/matching/ransacHomography.js +1 -2
- package/dist/compiler/node-worker.d.ts +1 -0
- package/dist/compiler/node-worker.js +84 -0
- package/dist/compiler/offline-compiler.d.ts +171 -6
- package/dist/compiler/offline-compiler.js +303 -421
- package/dist/compiler/tensorflow-setup.d.ts +0 -1
- package/dist/compiler/tensorflow-setup.js +27 -1
- package/dist/compiler/three.d.ts +7 -12
- package/dist/compiler/three.js +3 -5
- package/dist/compiler/tracker/extract.d.ts +1 -0
- package/dist/compiler/tracker/extract.js +200 -244
- package/dist/compiler/tracker/tracker.d.ts +9 -17
- package/dist/compiler/tracker/tracker.js +13 -18
- package/dist/compiler/utils/cumsum.d.ts +4 -2
- package/dist/compiler/utils/cumsum.js +17 -19
- package/dist/compiler/utils/gpu-compute.d.ts +57 -0
- package/dist/compiler/utils/gpu-compute.js +262 -0
- package/dist/compiler/utils/images.d.ts +4 -4
- package/dist/compiler/utils/images.js +67 -53
- package/dist/compiler/utils/worker-pool.d.ts +13 -0
- package/dist/compiler/utils/worker-pool.js +84 -0
- package/package.json +12 -14
- package/src/compiler/aframe.js +2 -4
- package/src/compiler/compiler-base.js +29 -14
- package/src/compiler/compiler.js +1 -1
- package/src/compiler/compiler.worker.js +1 -1
- package/src/compiler/controller.js +4 -5
- package/src/compiler/controller.worker.js +0 -2
- package/src/compiler/detector/crop-detector.js +0 -2
- package/src/compiler/detector/detector-lite.js +494 -0
- package/src/compiler/detector/detector.js +1052 -1063
- package/src/compiler/detector/kernels/cpu/binomialFilter.js +0 -1
- package/src/compiler/detector/kernels/cpu/computeLocalization.js +0 -4
- package/src/compiler/detector/kernels/cpu/computeOrientationHistograms.js +0 -17
- package/src/compiler/detector/kernels/cpu/fakeShader.js +1 -1
- package/src/compiler/detector/kernels/cpu/prune.js +1 -37
- package/src/compiler/detector/kernels/webgl/upsampleBilinear.js +2 -2
- package/src/compiler/estimation/refine-estimate.js +0 -1
- package/src/compiler/estimation/utils.js +9 -24
- package/src/compiler/image-list.js +4 -4
- package/src/compiler/input-loader.js +2 -2
- package/src/compiler/matching/hamming-distance.js +11 -15
- package/src/compiler/matching/hierarchical-clustering.js +1 -1
- package/src/compiler/matching/matching.js +72 -42
- package/src/compiler/matching/ransacHomography.js +0 -2
- package/src/compiler/node-worker.js +93 -0
- package/src/compiler/offline-compiler.js +339 -504
- package/src/compiler/tensorflow-setup.js +29 -1
- package/src/compiler/three.js +3 -5
- package/src/compiler/tracker/extract.js +211 -267
- package/src/compiler/tracker/tracker.js +13 -22
- package/src/compiler/utils/cumsum.js +17 -19
- package/src/compiler/utils/gpu-compute.js +303 -0
- package/src/compiler/utils/images.js +84 -53
- package/src/compiler/utils/worker-pool.js +89 -0
- package/dist/compiler/estimation/esimate-experiment.d.ts +0 -5
- package/dist/compiler/estimation/esimate-experiment.js +0 -267
- package/dist/compiler/estimation/refine-estimate-experiment.d.ts +0 -6
- package/dist/compiler/estimation/refine-estimate-experiment.js +0 -429
- package/dist/react/AREditor.d.ts +0 -5
- package/dist/react/AREditor.js +0 -159
- package/dist/react/ProgressDialog.d.ts +0 -13
- package/dist/react/ProgressDialog.js +0 -57
- package/src/compiler/estimation/esimate-experiment.js +0 -316
- package/src/compiler/estimation/refine-estimate-experiment.js +0 -512
|
@@ -1,429 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Trying to do normalization before running ICP
|
|
3
|
-
* i.e. make coodinates centroid at origin and avg distance from origin is sqrt(2)
|
|
4
|
-
*
|
|
5
|
-
* can we get rid of projectionTransform, and just do ICP on modelViewTransform?
|
|
6
|
-
*
|
|
7
|
-
* but couldn't make it work yet. Can someone with theoretical knowledge on ICP reach out to help?, particularly Multiview Levenberg-Marquardt ICP
|
|
8
|
-
* I have problem understanding the jacobian and things like that
|
|
9
|
-
*
|
|
10
|
-
*/
|
|
11
|
-
import { Matrix, inverse } from "ml-matrix";
|
|
12
|
-
import { applyModelViewProjectionTransform, buildModelViewProjectionTransform, computeScreenCoordiate, } from "./utils.js";
|
|
13
|
-
const TRACKING_THRESH = 5.0; // default
|
|
14
|
-
const K2_FACTOR = 4.0; // Question: should it be relative to the size of the screen instead of hardcoded?
|
|
15
|
-
const ICP_MAX_LOOP = 10;
|
|
16
|
-
const ICP_BREAK_LOOP_ERROR_THRESH = 0.1;
|
|
17
|
-
const ICP_BREAK_LOOP_ERROR_RATIO_THRESH = 0.99;
|
|
18
|
-
const ICP_BREAK_LOOP_ERROR_THRESH2 = 4.0;
|
|
19
|
-
// some temporary/intermediate variables used later. Declare them beforehand to reduce new object allocations
|
|
20
|
-
let mat = [[], [], []];
|
|
21
|
-
let J_U_Xc = [[], []]; // 2x3
|
|
22
|
-
let J_Xc_S = [[], [], []]; // 3x6
|
|
23
|
-
const refineEstimate = ({ initialModelViewTransform, projectionTransform, worldCoords: inWorldCoords, screenCoords: inScreenCoords, }) => {
|
|
24
|
-
const { normalizedCoords: worldCoords, param: worldParam } = normalizePoints(inWorldCoords);
|
|
25
|
-
const { normalizedCoords: screenCoords, param: screenParam } = normalizePoints(inScreenCoords);
|
|
26
|
-
const modelViewProjectionTransform = buildModelViewProjectionTransform(projectionTransform, initialModelViewTransform);
|
|
27
|
-
const normModelViewProjectionTransform = _getNormalizedModelViewTransform(modelViewProjectionTransform, worldParam, screenParam);
|
|
28
|
-
/*
|
|
29
|
-
* porjection matrix
|
|
30
|
-
* [k00, 0, k02]
|
|
31
|
-
* K = [ 0, k11, k12]
|
|
32
|
-
* [ 0, 0, 1]
|
|
33
|
-
*
|
|
34
|
-
* [1/k00, 0, -k02/k00]
|
|
35
|
-
* inv(K) = [ 0, 1/k11, -k12/k11]
|
|
36
|
-
* [ 0, 0, 1]
|
|
37
|
-
*
|
|
38
|
-
*
|
|
39
|
-
* denote modelViewProjectionTransform as A,
|
|
40
|
-
* since A = K * M, M = inv(K) * A
|
|
41
|
-
*
|
|
42
|
-
* [a00 / k00 - a20 * k02/k00, a01 / k00 - k02/k00 * a21, a02 / k00 - k02/k00 * a22, a03 / k00 - k02/k00 * a23]
|
|
43
|
-
* M = [a10 / k11 - a20 * k12/k11, a11 / k11 - k12/k11 * a21, a13 / k11 - k12/k11 * a22, a13 / k11 - k12/111 * a23]
|
|
44
|
-
* [ a20 , a21, a22, a23]
|
|
45
|
-
*/
|
|
46
|
-
const a = normModelViewProjectionTransform;
|
|
47
|
-
const k = projectionTransform;
|
|
48
|
-
const normModelViewTransform = [
|
|
49
|
-
[
|
|
50
|
-
a[0][0] / k[0][0] - (a[2][0] * k[0][2]) / k[0][0],
|
|
51
|
-
a[0][1] / k[0][0] - (a[2][1] * k[0][2]) / k[0][0],
|
|
52
|
-
a[0][2] / k[0][0] - (a[2][2] * k[0][2]) / k[0][0],
|
|
53
|
-
a[0][3] / k[0][0] - (a[2][3] * k[0][2]) / k[0][0],
|
|
54
|
-
],
|
|
55
|
-
[
|
|
56
|
-
a[1][0] / k[1][1] - (a[2][0] * k[1][2]) / k[1][1],
|
|
57
|
-
a[1][1] / k[1][1] - (a[2][1] * k[1][2]) / k[1][1],
|
|
58
|
-
a[1][2] / k[1][1] - (a[2][2] * k[1][2]) / k[1][1],
|
|
59
|
-
a[1][3] / k[1][1] - (a[2][3] * k[1][2]) / k[1][1],
|
|
60
|
-
],
|
|
61
|
-
[a[2][0], a[2][1], a[2][2], a[2][3]],
|
|
62
|
-
];
|
|
63
|
-
const inlierProbs = [1.0, 0.8, 0.6, 0.4, 0.0];
|
|
64
|
-
let updatedModelViewTransform = normModelViewTransform;
|
|
65
|
-
let finalModelViewTransform = null;
|
|
66
|
-
for (let i = 0; i < inlierProbs.length; i++) {
|
|
67
|
-
const ret = _doICP({
|
|
68
|
-
initialModelViewTransform: updatedModelViewTransform,
|
|
69
|
-
projectionTransform,
|
|
70
|
-
worldCoords,
|
|
71
|
-
screenCoords,
|
|
72
|
-
inlierProb: inlierProbs[i],
|
|
73
|
-
});
|
|
74
|
-
updatedModelViewTransform = ret.modelViewTransform;
|
|
75
|
-
if (ret.err < TRACKING_THRESH) {
|
|
76
|
-
finalModelViewTransform = updatedModelViewTransform;
|
|
77
|
-
break;
|
|
78
|
-
}
|
|
79
|
-
}
|
|
80
|
-
if (finalModelViewTransform === null)
|
|
81
|
-
return null;
|
|
82
|
-
const denormModelViewTransform = _getDenormalizedModelViewTransform(finalModelViewTransform, worldParam, screenParam);
|
|
83
|
-
return denormModelViewTransform;
|
|
84
|
-
};
|
|
85
|
-
// ICP iteration
|
|
86
|
-
// Question: can someone provide theoretical reference / mathematical proof for the following computations?
|
|
87
|
-
// I'm unable to derive the Jacobian
|
|
88
|
-
const _doICP = ({ initialModelViewTransform, projectionTransform, worldCoords, screenCoords, inlierProb, }) => {
|
|
89
|
-
const isRobustMode = inlierProb < 1;
|
|
90
|
-
let modelViewTransform = initialModelViewTransform;
|
|
91
|
-
let err0 = 0.0;
|
|
92
|
-
let err1 = 0.0;
|
|
93
|
-
let E = new Array(worldCoords.length);
|
|
94
|
-
let E2 = new Array(worldCoords.length);
|
|
95
|
-
let dxs = new Array(worldCoords.length);
|
|
96
|
-
let dys = new Array(worldCoords.length);
|
|
97
|
-
for (let l = 0; l <= ICP_MAX_LOOP; l++) {
|
|
98
|
-
const modelViewProjectionTransform = buildModelViewProjectionTransform(projectionTransform, modelViewTransform);
|
|
99
|
-
for (let n = 0; n < worldCoords.length; n++) {
|
|
100
|
-
const u = computeScreenCoordiate(modelViewProjectionTransform, worldCoords[n].x, worldCoords[n].y, worldCoords[n].z);
|
|
101
|
-
const dx = screenCoords[n].x - u.x;
|
|
102
|
-
const dy = screenCoords[n].y - u.y;
|
|
103
|
-
console.log("icp err", worldCoords[n], u, screenCoords[n]);
|
|
104
|
-
dxs[n] = dx;
|
|
105
|
-
dys[n] = dy;
|
|
106
|
-
E[n] = dx * dx + dy * dy;
|
|
107
|
-
}
|
|
108
|
-
let K2; // robust mode only
|
|
109
|
-
err1 = 0.0;
|
|
110
|
-
if (isRobustMode) {
|
|
111
|
-
const inlierNum = Math.max(3, Math.floor(worldCoords.length * inlierProb) - 1);
|
|
112
|
-
for (let n = 0; n < worldCoords.length; n++) {
|
|
113
|
-
E2[n] = E[n];
|
|
114
|
-
}
|
|
115
|
-
E2.sort((a, b) => {
|
|
116
|
-
return a - b;
|
|
117
|
-
});
|
|
118
|
-
K2 = Math.max(E2[inlierNum] * K2_FACTOR, 16.0);
|
|
119
|
-
for (let n = 0; n < worldCoords.length; n++) {
|
|
120
|
-
if (E2[n] > K2)
|
|
121
|
-
err1 += K2 / 6;
|
|
122
|
-
else
|
|
123
|
-
err1 += (K2 / 6.0) * (1.0 - (1.0 - E2[n] / K2) * (1.0 - E2[n] / K2) * (1.0 - E2[n] / K2));
|
|
124
|
-
}
|
|
125
|
-
}
|
|
126
|
-
else {
|
|
127
|
-
for (let n = 0; n < worldCoords.length; n++) {
|
|
128
|
-
err1 += E[n];
|
|
129
|
-
}
|
|
130
|
-
}
|
|
131
|
-
err1 /= worldCoords.length;
|
|
132
|
-
console.log("icp loop", inlierProb, l, err1);
|
|
133
|
-
if (err1 < ICP_BREAK_LOOP_ERROR_THRESH)
|
|
134
|
-
break;
|
|
135
|
-
//if (l > 0 && err1 < ICP_BREAK_LOOP_ERROR_THRESH2 && err1/err0 > ICP_BREAK_LOOP_ERROR_RATIO_THRESH) break;
|
|
136
|
-
if (l > 0 && err1 / err0 > ICP_BREAK_LOOP_ERROR_RATIO_THRESH)
|
|
137
|
-
break;
|
|
138
|
-
if (l === ICP_MAX_LOOP)
|
|
139
|
-
break;
|
|
140
|
-
err0 = err1;
|
|
141
|
-
const dU = [];
|
|
142
|
-
const allJ_U_S = [];
|
|
143
|
-
for (let n = 0; n < worldCoords.length; n++) {
|
|
144
|
-
if (isRobustMode && E[n] > K2) {
|
|
145
|
-
continue;
|
|
146
|
-
}
|
|
147
|
-
const J_U_S = _getJ_U_S({
|
|
148
|
-
modelViewProjectionTransform,
|
|
149
|
-
modelViewTransform,
|
|
150
|
-
projectionTransform,
|
|
151
|
-
worldCoord: worldCoords[n],
|
|
152
|
-
});
|
|
153
|
-
if (isRobustMode) {
|
|
154
|
-
const W = (1.0 - E[n] / K2) * (1.0 - E[n] / K2);
|
|
155
|
-
for (let j = 0; j < 2; j++) {
|
|
156
|
-
for (let i = 0; i < 6; i++) {
|
|
157
|
-
J_U_S[j][i] *= W;
|
|
158
|
-
}
|
|
159
|
-
}
|
|
160
|
-
dU.push([dxs[n] * W]);
|
|
161
|
-
dU.push([dys[n] * W]);
|
|
162
|
-
}
|
|
163
|
-
else {
|
|
164
|
-
dU.push([dxs[n]]);
|
|
165
|
-
dU.push([dys[n]]);
|
|
166
|
-
}
|
|
167
|
-
for (let i = 0; i < J_U_S.length; i++) {
|
|
168
|
-
allJ_U_S.push(J_U_S[i]);
|
|
169
|
-
}
|
|
170
|
-
}
|
|
171
|
-
const dS = _getDeltaS({ dU, J_U_S: allJ_U_S });
|
|
172
|
-
if (dS === null)
|
|
173
|
-
break;
|
|
174
|
-
modelViewTransform = _updateModelViewTransform({ modelViewTransform, dS });
|
|
175
|
-
}
|
|
176
|
-
return { modelViewTransform, err: err1 };
|
|
177
|
-
};
|
|
178
|
-
const _updateModelViewTransform = ({ modelViewTransform, dS }) => {
|
|
179
|
-
let ra = dS[0] * dS[0] + dS[1] * dS[1] + dS[2] * dS[2];
|
|
180
|
-
let q0, q1, q2;
|
|
181
|
-
if (ra < 0.000001) {
|
|
182
|
-
q0 = 1.0;
|
|
183
|
-
q1 = 0.0;
|
|
184
|
-
q2 = 0.0;
|
|
185
|
-
ra = 0.0;
|
|
186
|
-
}
|
|
187
|
-
else {
|
|
188
|
-
ra = Math.sqrt(ra);
|
|
189
|
-
q0 = dS[0] / ra;
|
|
190
|
-
q1 = dS[1] / ra;
|
|
191
|
-
q2 = dS[2] / ra;
|
|
192
|
-
}
|
|
193
|
-
const cra = Math.cos(ra);
|
|
194
|
-
const sra = Math.sin(ra);
|
|
195
|
-
const one_cra = 1.0 - cra;
|
|
196
|
-
mat[0][0] = q0 * q0 * one_cra + cra;
|
|
197
|
-
mat[0][1] = q0 * q1 * one_cra - q2 * sra;
|
|
198
|
-
mat[0][2] = q0 * q2 * one_cra + q1 * sra;
|
|
199
|
-
mat[0][3] = dS[3];
|
|
200
|
-
mat[1][0] = q1 * q0 * one_cra + q2 * sra;
|
|
201
|
-
mat[1][1] = q1 * q1 * one_cra + cra;
|
|
202
|
-
mat[1][2] = q1 * q2 * one_cra - q0 * sra;
|
|
203
|
-
mat[1][3] = dS[4];
|
|
204
|
-
mat[2][0] = q2 * q0 * one_cra - q1 * sra;
|
|
205
|
-
mat[2][1] = q2 * q1 * one_cra + q0 * sra;
|
|
206
|
-
mat[2][2] = q2 * q2 * one_cra + cra;
|
|
207
|
-
mat[2][3] = dS[5];
|
|
208
|
-
const mat2 = [[], [], []];
|
|
209
|
-
for (let j = 0; j < 3; j++) {
|
|
210
|
-
for (let i = 0; i < 4; i++) {
|
|
211
|
-
mat2[j][i] =
|
|
212
|
-
modelViewTransform[j][0] * mat[0][i] +
|
|
213
|
-
modelViewTransform[j][1] * mat[1][i] +
|
|
214
|
-
modelViewTransform[j][2] * mat[2][i];
|
|
215
|
-
}
|
|
216
|
-
mat2[j][3] += modelViewTransform[j][3];
|
|
217
|
-
}
|
|
218
|
-
return mat2;
|
|
219
|
-
};
|
|
220
|
-
const _getDeltaS = ({ dU, J_U_S }) => {
|
|
221
|
-
const J = new Matrix(J_U_S);
|
|
222
|
-
const U = new Matrix(dU);
|
|
223
|
-
const JT = J.transpose();
|
|
224
|
-
const JTJ = JT.mmul(J);
|
|
225
|
-
const JTU = JT.mmul(U);
|
|
226
|
-
let JTJInv;
|
|
227
|
-
try {
|
|
228
|
-
JTJInv = inverse(JTJ);
|
|
229
|
-
}
|
|
230
|
-
catch (e) {
|
|
231
|
-
return null;
|
|
232
|
-
}
|
|
233
|
-
const S = JTJInv.mmul(JTU);
|
|
234
|
-
return S.to1DArray();
|
|
235
|
-
};
|
|
236
|
-
const _getJ_U_S = ({ modelViewProjectionTransform, modelViewTransform, projectionTransform, worldCoord, }) => {
|
|
237
|
-
const T = modelViewTransform;
|
|
238
|
-
const { x, y, z } = worldCoord;
|
|
239
|
-
const u = applyModelViewProjectionTransform(modelViewProjectionTransform, x, y, z);
|
|
240
|
-
const z2 = u.z * u.z;
|
|
241
|
-
J_U_Xc[0][0] = (projectionTransform[0][0] * u.z - projectionTransform[2][0] * u.x) / z2;
|
|
242
|
-
J_U_Xc[0][1] = (projectionTransform[0][1] * u.z - projectionTransform[2][1] * u.x) / z2;
|
|
243
|
-
J_U_Xc[0][2] = (projectionTransform[0][2] * u.z - projectionTransform[2][2] * u.x) / z2;
|
|
244
|
-
J_U_Xc[1][0] = (projectionTransform[1][0] * u.z - projectionTransform[2][0] * u.y) / z2;
|
|
245
|
-
J_U_Xc[1][1] = (projectionTransform[1][1] * u.z - projectionTransform[2][1] * u.y) / z2;
|
|
246
|
-
J_U_Xc[1][2] = (projectionTransform[1][2] * u.z - projectionTransform[2][2] * u.y) / z2;
|
|
247
|
-
J_Xc_S[0][0] = T[0][2] * y;
|
|
248
|
-
J_Xc_S[0][1] = -T[0][2] * x;
|
|
249
|
-
J_Xc_S[0][2] = T[0][1] * x - T[0][0] * y;
|
|
250
|
-
J_Xc_S[0][3] = T[0][0];
|
|
251
|
-
J_Xc_S[0][4] = T[0][1];
|
|
252
|
-
J_Xc_S[0][5] = T[0][2];
|
|
253
|
-
J_Xc_S[1][0] = T[1][2] * y;
|
|
254
|
-
J_Xc_S[1][1] = -T[1][2] * x;
|
|
255
|
-
J_Xc_S[1][2] = T[1][1] * x - T[1][0] * y;
|
|
256
|
-
J_Xc_S[1][3] = T[1][0];
|
|
257
|
-
J_Xc_S[1][4] = T[1][1];
|
|
258
|
-
J_Xc_S[1][5] = T[1][2];
|
|
259
|
-
J_Xc_S[2][0] = T[2][2] * y;
|
|
260
|
-
J_Xc_S[2][1] = -T[2][2] * x;
|
|
261
|
-
J_Xc_S[2][2] = T[2][1] * x - T[2][0] * y;
|
|
262
|
-
J_Xc_S[2][3] = T[2][0];
|
|
263
|
-
J_Xc_S[2][4] = T[2][1];
|
|
264
|
-
J_Xc_S[2][5] = T[2][2];
|
|
265
|
-
const J_U_S = [[], []];
|
|
266
|
-
for (let j = 0; j < 2; j++) {
|
|
267
|
-
for (let i = 0; i < 6; i++) {
|
|
268
|
-
J_U_S[j][i] = 0.0;
|
|
269
|
-
for (let k = 0; k < 3; k++) {
|
|
270
|
-
J_U_S[j][i] += J_U_Xc[j][k] * J_Xc_S[k][i];
|
|
271
|
-
}
|
|
272
|
-
}
|
|
273
|
-
}
|
|
274
|
-
return J_U_S;
|
|
275
|
-
};
|
|
276
|
-
const _getNormalizedModelViewTransform = (modelViewTransform, worldParam, screenParam) => {
|
|
277
|
-
/*
|
|
278
|
-
* notations:
|
|
279
|
-
* m: modelViewTransform,
|
|
280
|
-
* [x,y,z,1]: world coordinates
|
|
281
|
-
* [x',y',z',1]: screen coordinates
|
|
282
|
-
*
|
|
283
|
-
* By normalizing coordinates with meanX, meanY and scale s, it means to transform the coordinates to
|
|
284
|
-
* note that z doesn't scale up, otherwise screen point doesn't scale, e.g. x' = x / z
|
|
285
|
-
* [s*(x-meanX)]
|
|
286
|
-
* [s*(y-meanY)]
|
|
287
|
-
* [z ]
|
|
288
|
-
* [1 ]
|
|
289
|
-
*
|
|
290
|
-
* Let's define transformation T, such that
|
|
291
|
-
* `normalizedP = T * P`
|
|
292
|
-
*
|
|
293
|
-
* [s * (x - meanX)] [s, 0, 0, -s*meanX] [x]
|
|
294
|
-
* [s * (y - meanY)] = [0, s, 0, -s*meanY] * [y]
|
|
295
|
-
* [z ] [0, 0, 1, 0] [z]
|
|
296
|
-
* [1 ] [0, 0, 0, 1] [1]
|
|
297
|
-
*
|
|
298
|
-
* and `P = inv(T) * normalizedP`
|
|
299
|
-
*
|
|
300
|
-
* [x] [1/s, 0 , 0, meanX] [s * (x - meanX)]
|
|
301
|
-
* [y] = [0 , 1/s, 0, meanY] * [s * (y - meanY)]
|
|
302
|
-
* [z] [0 , 0 , 1, 0] [z ]
|
|
303
|
-
* [1] [0 , 0 , 0, 1] [1 ]
|
|
304
|
-
*
|
|
305
|
-
*
|
|
306
|
-
* Before normalizating coordinates, the following holds:
|
|
307
|
-
* M * P = P' (P is world coordinate, and P' is screen coordinate)
|
|
308
|
-
*
|
|
309
|
-
* -> M * inv(T) * T * P = inv(T') * T' * P'
|
|
310
|
-
* -> T' * M * inv(T) * T * P = T' * P'
|
|
311
|
-
* here, T * P, and T' * P' are normalized coordaintes for world and screen, so, the modelViewTransform for normalized coordinates would be:
|
|
312
|
-
*
|
|
313
|
-
* Mnorm = T' * M * inv(T) =
|
|
314
|
-
*
|
|
315
|
-
* [s', 0, 0, -s'*meanX'] [m00, m01, m02, m03] [1/s, 0, 0, meanX]
|
|
316
|
-
* [ 0, s', 0, -s'*meanY'] * [m10, m11, m12, m13] * [ 0, 1/s, 0, meanY]
|
|
317
|
-
* [ 0, 0, 1, 0] [m20, m21, m22, m23] [ 0, 0, 1, 0]
|
|
318
|
-
* [ 0, 0, 0, 1] [0, 0, 0, 1] [ 0, 0, 0, 1]
|
|
319
|
-
*
|
|
320
|
-
* =
|
|
321
|
-
*
|
|
322
|
-
* [m00 * s'/s, m01 * s'/s, m02 * s', m00*s'*meanX + m01*s'*meanY + m03*s' - meanX'*s']
|
|
323
|
-
* [m10 * s'/s, m11 * s'/s, m12 * s', m10*s'*meanX + m11*s'*meanY + m13*s' - meanY'*s']
|
|
324
|
-
* [m20 / s , m21 / s , m22 , m20 *meanX + m21 *meanY + m23 ]
|
|
325
|
-
* [ 0, 0, 0, 1]
|
|
326
|
-
*
|
|
327
|
-
*/
|
|
328
|
-
const m = modelViewTransform;
|
|
329
|
-
const ss = screenParam.s / worldParam.s;
|
|
330
|
-
const normModelViewTransform = [
|
|
331
|
-
[
|
|
332
|
-
m[0][0] * ss,
|
|
333
|
-
m[0][1] * ss,
|
|
334
|
-
m[0][2] * screenParam.s,
|
|
335
|
-
(m[0][0] * worldParam.meanX + m[0][1] * worldParam.meanY + m[0][3] - screenParam.meanX) *
|
|
336
|
-
screenParam.s,
|
|
337
|
-
],
|
|
338
|
-
[
|
|
339
|
-
m[1][0] * ss,
|
|
340
|
-
m[1][1] * ss,
|
|
341
|
-
m[1][2] * screenParam.s,
|
|
342
|
-
(m[1][0] * worldParam.meanX + m[1][1] * worldParam.meanY + m[1][3] - screenParam.meanY) *
|
|
343
|
-
screenParam.s,
|
|
344
|
-
],
|
|
345
|
-
[
|
|
346
|
-
m[2][0] / worldParam.s,
|
|
347
|
-
m[2][1] / worldParam.s,
|
|
348
|
-
m[2][2],
|
|
349
|
-
m[2][0] * worldParam.meanX + m[2][1] * worldParam.meanY + m[2][3],
|
|
350
|
-
],
|
|
351
|
-
];
|
|
352
|
-
return normModelViewTransform;
|
|
353
|
-
};
|
|
354
|
-
const _getDenormalizedModelViewTransform = (modelViewTransform, worldParam, screenParam) => {
|
|
355
|
-
/*
|
|
356
|
-
* Refer to _getNormalizedModelViewTransform, we have
|
|
357
|
-
*
|
|
358
|
-
* Mnorm = T' * M * inv(T)
|
|
359
|
-
*
|
|
360
|
-
* Therefore,
|
|
361
|
-
*
|
|
362
|
-
* M = inv(T') * Mnorm * T
|
|
363
|
-
*
|
|
364
|
-
* [1/s', 0, 0, meanX'] [m00, m01, m02, m03] [s, 0, 0, -s*meanX]
|
|
365
|
-
* [0 , 1/s', 0, meanY'] * [m10, m11, m12, m13] * [0, s, 0, -s*meanY]
|
|
366
|
-
* [0 , 0 , 1, 0] [m20, m21, m22, m23] [0, 0, 1, 0]
|
|
367
|
-
* [0 , 0 , 0, 1] [0, 0, 0, 1] [0, 0, 0, 1]
|
|
368
|
-
*
|
|
369
|
-
* =
|
|
370
|
-
*
|
|
371
|
-
* [m00*s/s', m01*s/s', m02/s', (-m00*s*meanX -m01*s*meanY+m03)/s' + meanX'],
|
|
372
|
-
* [m10*s/s', m11*s/s', m12/s', (-m10*s*meanX -m11*s*meanY+m13)/s' + meanY'],
|
|
373
|
-
* [m20*s ', m21*s ', m22 , -m20*s*meanX -m21*s*meanY+m23) ],
|
|
374
|
-
* [0 , 0, 0, 1]
|
|
375
|
-
*
|
|
376
|
-
*/
|
|
377
|
-
const m = modelViewTransform;
|
|
378
|
-
const ss = worldParam.s / screenParam.s;
|
|
379
|
-
const sMeanX = worldParam.s * worldParam.meanX;
|
|
380
|
-
const sMeanY = worldParam.s * worldParam.meanY;
|
|
381
|
-
const denormModelViewTransform = [
|
|
382
|
-
[
|
|
383
|
-
m[0][0] * ss,
|
|
384
|
-
m[0][1] * ss,
|
|
385
|
-
m[0][2] / screenParam.s,
|
|
386
|
-
(-m[0][0] * sMeanX - m[0][1] * sMeanY + m[0][3]) / screenParam.s + screenParam.meanX,
|
|
387
|
-
],
|
|
388
|
-
[
|
|
389
|
-
m[1][0] * ss,
|
|
390
|
-
m[1][1] * ss,
|
|
391
|
-
m[1][2] / screenParam.s,
|
|
392
|
-
(-m[1][0] * sMeanX - m[1][1] * sMeanY + m[1][3]) / screenParam.s + screenParam.meanY,
|
|
393
|
-
],
|
|
394
|
-
[
|
|
395
|
-
m[2][0] * worldParam.s,
|
|
396
|
-
m[2][1] * worldParam.s,
|
|
397
|
-
m[2][2],
|
|
398
|
-
-m[2][0] * sMeanX - m[2][1] * sMeanY + m[2][3],
|
|
399
|
-
],
|
|
400
|
-
];
|
|
401
|
-
return denormModelViewTransform;
|
|
402
|
-
};
|
|
403
|
-
// centroid at origin and avg distance from origin is sqrt(2)
|
|
404
|
-
const normalizePoints = (coords) => {
|
|
405
|
-
let sumX = 0;
|
|
406
|
-
let sumY = 0;
|
|
407
|
-
for (let i = 0; i < coords.length; i++) {
|
|
408
|
-
sumX += coords[i].x;
|
|
409
|
-
sumY += coords[i].y;
|
|
410
|
-
}
|
|
411
|
-
let meanX = sumX / coords.length;
|
|
412
|
-
let meanY = sumY / coords.length;
|
|
413
|
-
let sumDiff = 0;
|
|
414
|
-
for (let i = 0; i < coords.length; i++) {
|
|
415
|
-
const diffX = coords[i].x - meanX;
|
|
416
|
-
const diffY = coords[i].y - meanY;
|
|
417
|
-
sumDiff += Math.sqrt(diffX * diffX + diffY * diffY);
|
|
418
|
-
}
|
|
419
|
-
let s = (Math.sqrt(2) * coords.length) / sumDiff;
|
|
420
|
-
const normalizedCoords = [];
|
|
421
|
-
for (let i = 0; i < coords.length; i++) {
|
|
422
|
-
normalizedCoords.push({
|
|
423
|
-
x: (coords[i].x - meanX) * s,
|
|
424
|
-
y: (coords[i].y - meanY) * s,
|
|
425
|
-
});
|
|
426
|
-
}
|
|
427
|
-
return { normalizedCoords, param: { meanX, meanY, s } };
|
|
428
|
-
};
|
|
429
|
-
export { refineEstimate };
|
package/dist/react/AREditor.d.ts
DELETED
package/dist/react/AREditor.js
DELETED
|
@@ -1,159 +0,0 @@
|
|
|
1
|
-
import { jsx as _jsx, jsxs as _jsxs } from "react/jsx-runtime";
|
|
2
|
-
import { useState, useRef, useCallback } from "react";
|
|
3
|
-
import { customAlphabet } from "nanoid";
|
|
4
|
-
import { Image, Video, Upload, Camera, LoaderCircle } from "lucide-react";
|
|
5
|
-
const ALLOWED_MIME_TYPES = ["image/jpeg", "image/png", "image/webp"];
|
|
6
|
-
const ALLOWED_VIDEO_TYPES = ["video/mp4", "video/webm"];
|
|
7
|
-
const useFileUpload = (allowedTypes) => {
|
|
8
|
-
const [fileState, setFileState] = useState({ file: null, preview: "" });
|
|
9
|
-
const [dimensions, setDimensions] = useState({});
|
|
10
|
-
const fileInputRef = useRef(null);
|
|
11
|
-
const handleFileChange = useCallback((file) => {
|
|
12
|
-
if (fileState.preview) {
|
|
13
|
-
URL.revokeObjectURL(fileState.preview);
|
|
14
|
-
}
|
|
15
|
-
if (!file) {
|
|
16
|
-
setFileState({ file: null, preview: "" });
|
|
17
|
-
return;
|
|
18
|
-
}
|
|
19
|
-
// Para archivos .mind, validar la extensión en lugar del tipo MIME
|
|
20
|
-
if (allowedTypes.includes(".mind")) {
|
|
21
|
-
if (!file.name.toLowerCase().endsWith(".mind")) {
|
|
22
|
-
throw new Error("El archivo debe tener extensión .mind");
|
|
23
|
-
}
|
|
24
|
-
}
|
|
25
|
-
else if (!allowedTypes.includes(file.type)) {
|
|
26
|
-
throw new Error("Tipo de archivo no permitido");
|
|
27
|
-
}
|
|
28
|
-
if (file.type.includes("video")) {
|
|
29
|
-
const video = document.createElement("video");
|
|
30
|
-
video.src = URL.createObjectURL(file);
|
|
31
|
-
}
|
|
32
|
-
console.log("Archivo cargado:", {
|
|
33
|
-
nombre: file.name,
|
|
34
|
-
tamaño: (file.size / 1024).toFixed(2) + " KB",
|
|
35
|
-
tipo: file.type || "application/octet-stream",
|
|
36
|
-
});
|
|
37
|
-
const preview = URL.createObjectURL(file);
|
|
38
|
-
if (file.type.includes("video")) {
|
|
39
|
-
const video = document.createElement("video");
|
|
40
|
-
video.src = URL.createObjectURL(file);
|
|
41
|
-
video.addEventListener("loadedmetadata", () => {
|
|
42
|
-
const width = video.videoWidth;
|
|
43
|
-
const height = video.videoHeight;
|
|
44
|
-
setDimensions({ width, height });
|
|
45
|
-
console.log("Ancho y alto del video:", width, height);
|
|
46
|
-
});
|
|
47
|
-
}
|
|
48
|
-
setFileState({ file, preview });
|
|
49
|
-
}, [allowedTypes, fileState.preview]);
|
|
50
|
-
const reset = useCallback(() => {
|
|
51
|
-
if (fileState.preview) {
|
|
52
|
-
URL.revokeObjectURL(fileState.preview);
|
|
53
|
-
}
|
|
54
|
-
setFileState({ file: null, preview: "" });
|
|
55
|
-
if (fileInputRef.current) {
|
|
56
|
-
fileInputRef.current.value = "";
|
|
57
|
-
}
|
|
58
|
-
}, [fileState.preview]);
|
|
59
|
-
return { fileState, handleFileChange, reset, fileInputRef, dimensions };
|
|
60
|
-
};
|
|
61
|
-
const useUploadFile = () => {
|
|
62
|
-
const uploadFile = async (file, type) => {
|
|
63
|
-
const customNanoid = customAlphabet("1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ", 21);
|
|
64
|
-
const id = customNanoid();
|
|
65
|
-
const formData = new FormData();
|
|
66
|
-
formData.append("file", file);
|
|
67
|
-
const endpoint = type === "video"
|
|
68
|
-
? `https://r2-worker.sergiolazaromondargo.workers.dev/video/${id}`
|
|
69
|
-
: type === "mind"
|
|
70
|
-
? `https://r2-worker.sergiolazaromondargo.workers.dev/mind/${id}`
|
|
71
|
-
: `https://r2-worker.sergiolazaromondargo.workers.dev/${id}`;
|
|
72
|
-
const response = await fetch(endpoint, {
|
|
73
|
-
method: "PUT",
|
|
74
|
-
body: formData,
|
|
75
|
-
});
|
|
76
|
-
if (!response.ok) {
|
|
77
|
-
throw new Error(`Error al subir ${type}: ${response.status} ${response.statusText}`);
|
|
78
|
-
}
|
|
79
|
-
return await response.json();
|
|
80
|
-
};
|
|
81
|
-
return { uploadFile };
|
|
82
|
-
};
|
|
83
|
-
export const AREditor = ({ adminId }) => {
|
|
84
|
-
const { fileState: imageState, handleFileChange: handleImageChange, reset: resetImage, fileInputRef: imageInputRef, } = useFileUpload(ALLOWED_MIME_TYPES);
|
|
85
|
-
const { fileState: mindState, handleFileChange: handleMindChange, reset: resetMind, fileInputRef: mindInputRef, } = useFileUpload([".mind"]);
|
|
86
|
-
const { fileState: videoState, handleFileChange: handleVideoChange, reset: resetVideo, fileInputRef: videoInputRef, dimensions: videoDimensions, } = useFileUpload(ALLOWED_VIDEO_TYPES);
|
|
87
|
-
const [videoScale, setVideoScale] = useState(1);
|
|
88
|
-
const [loading, setLoading] = useState(false);
|
|
89
|
-
const [error, setError] = useState("");
|
|
90
|
-
const { uploadFile } = useUploadFile();
|
|
91
|
-
const handleSave = async () => {
|
|
92
|
-
try {
|
|
93
|
-
setLoading(true);
|
|
94
|
-
setError("");
|
|
95
|
-
if (!imageState.file || !mindState.file || !videoState.file) {
|
|
96
|
-
throw new Error("Se requieren una imagen, un archivo .mind y un video");
|
|
97
|
-
}
|
|
98
|
-
const [imageResult, mindResult, videoResult] = await Promise.all([
|
|
99
|
-
uploadFile(imageState.file, "image"),
|
|
100
|
-
uploadFile(mindState.file, "mind"),
|
|
101
|
-
uploadFile(videoState.file, "video"),
|
|
102
|
-
]);
|
|
103
|
-
const data = {
|
|
104
|
-
adminId,
|
|
105
|
-
data: [
|
|
106
|
-
{
|
|
107
|
-
id: `photos-${Date.now()}`,
|
|
108
|
-
type: "photos",
|
|
109
|
-
images: [{ image: imageResult.url, fileId: imageResult.fileId }],
|
|
110
|
-
},
|
|
111
|
-
{
|
|
112
|
-
id: `videoNative-${Date.now()}`,
|
|
113
|
-
type: "videoNative",
|
|
114
|
-
url: videoResult.url,
|
|
115
|
-
fileId: videoResult.fileId,
|
|
116
|
-
scale: videoScale,
|
|
117
|
-
width: videoDimensions.width,
|
|
118
|
-
height: videoDimensions.height,
|
|
119
|
-
},
|
|
120
|
-
{
|
|
121
|
-
id: `ar-${Date.now()}`,
|
|
122
|
-
type: "ar",
|
|
123
|
-
url: mindResult.url,
|
|
124
|
-
fileId: mindResult.fileId,
|
|
125
|
-
},
|
|
126
|
-
],
|
|
127
|
-
type: "ar",
|
|
128
|
-
};
|
|
129
|
-
const response = await fetch("/api/updateadmin.json", {
|
|
130
|
-
method: "POST",
|
|
131
|
-
headers: { "Content-Type": "application/json" },
|
|
132
|
-
body: JSON.stringify(data),
|
|
133
|
-
});
|
|
134
|
-
if (!response.ok) {
|
|
135
|
-
throw new Error(`Error actualizando datos AR: ${response.status}`);
|
|
136
|
-
}
|
|
137
|
-
alert("¡Guardado exitosamente!");
|
|
138
|
-
resetImage();
|
|
139
|
-
resetMind();
|
|
140
|
-
resetVideo();
|
|
141
|
-
}
|
|
142
|
-
catch (error) {
|
|
143
|
-
setError(error.message);
|
|
144
|
-
}
|
|
145
|
-
finally {
|
|
146
|
-
setLoading(false);
|
|
147
|
-
}
|
|
148
|
-
};
|
|
149
|
-
const FileUploadSection = ({ type, icon: Icon, fileState, inputRef, onFileChange, allowedTypes, label, }) => (_jsxs("div", { className: "group relative overflow-hidden rounded-xl shadow-lg bg-white/80 backdrop-blur-sm transition-all duration-300 hover:shadow-xl hover:scale-[1.02] border border-gray-100", children: [_jsx("input", { ref: inputRef, type: "file", accept: allowedTypes.join(","), onChange: (e) => {
|
|
150
|
-
try {
|
|
151
|
-
const file = e.target.files?.[0] || null;
|
|
152
|
-
onFileChange(file);
|
|
153
|
-
}
|
|
154
|
-
catch (error) {
|
|
155
|
-
setError(error.message);
|
|
156
|
-
}
|
|
157
|
-
}, className: "hidden" }), !fileState.file ? (_jsxs("label", { htmlFor: inputRef.current?.id, onClick: () => inputRef.current?.click(), className: "flex cursor-pointer flex-col items-center justify-center p-10 bg-gradient-to-br from-gray-50 to-white transition-colors group-hover:from-blue-50 group-hover:to-purple-50", children: [_jsx("div", { className: "transform transition-transform duration-300 group-hover:scale-110", children: _jsx(Icon, { className: "h-16 w-16 text-gray-400 group-hover:text-blue-500" }) }), _jsx("span", { className: "mt-4 text-lg font-medium bg-gradient-to-r from-gray-600 to-gray-800 bg-clip-text text-transparent group-hover:from-blue-600 group-hover:to-purple-600", children: label }), _jsx("span", { className: "mt-2 text-sm text-gray-400 group-hover:text-gray-500", children: allowedTypes.join(", ") })] })) : (_jsxs("div", { className: "p-6 space-y-4", children: [_jsx("div", { className: "relative aspect-video w-full overflow-hidden rounded-lg ring-1 ring-gray-100", children: type === "video" ? (_jsx("video", { src: fileState.preview, controls: true, className: "h-full w-full object-cover", children: "Tu navegador no soporta la reproducci\u00F3n de videos." })) : type === "image" ? (_jsx("img", { src: fileState.preview, alt: "Preview", className: "h-full w-full object-cover" })) : (_jsxs("div", { className: "flex h-full flex-col items-center justify-center space-y-3 bg-gradient-to-br from-blue-50 to-purple-50 p-4", children: [_jsx("div", { className: "flex items-center justify-center rounded-full bg-gradient-to-r from-blue-400 to-purple-400 p-3", children: _jsx(Upload, { className: "h-6 w-6 text-white" }) }), _jsxs("div", { className: "text-center space-y-2", children: [_jsx("span", { className: "block text-lg font-medium text-gray-600", children: fileState.file.name }), _jsxs("span", { className: "block text-sm text-gray-500", children: ["Tama\u00F1o: ", (fileState.file.size / 1024).toFixed(2), " KB"] }), _jsx("span", { className: "mt-1 block text-sm font-medium text-green-600", children: "\u2713 Archivo AR cargado correctamente" })] })] })) }), _jsxs("div", { className: "flex items-center justify-between", children: [_jsxs("div", { className: "flex items-center space-x-3", children: [_jsx(Icon, { className: "h-5 w-5 text-blue-500" }), _jsx("span", { className: "text-sm font-medium text-gray-600", children: fileState.file.name })] }), _jsx("button", { onClick: () => onFileChange(null), className: "rounded-full bg-gradient-to-r from-blue-500 to-purple-500 px-4 py-2 text-sm font-medium text-white shadow-md transition-all hover:from-blue-600 hover:to-purple-600 hover:shadow-lg active:scale-95", children: "Cambiar" })] })] }))] }));
|
|
158
|
-
return (_jsx("div", { className: "min-h-screen w-full bg-gradient-to-br from-blue-50 via-white to-purple-50 p-4 md:p-8", children: _jsxs("div", { className: "mx-auto max-w-3xl rounded-3xl bg-white/90 backdrop-blur-md p-6 md:p-10 shadow-2xl ring-1 ring-black/10", children: [_jsxs("div", { className: "flex flex-col items-center justify-center space-y-8", children: [_jsx("div", { className: "rounded-2xl bg-gradient-to-br from-blue-500 to-purple-500 p-6 shadow-xl shadow-blue-300/30 hover:scale-105 transition-transform", children: _jsx(Camera, { className: "h-12 w-12 text-white" }) }), _jsx("h1", { className: "bg-gradient-to-r from-blue-600 to-purple-600 bg-clip-text text-5xl font-bold text-transparent text-center", children: "Editor de Experiencia AR" }), _jsx("p", { className: "text-2xl text-gray-600 text-center font-light", children: "Crea una experiencia de realidad aumentada \u00FAnica" })] }), _jsxs("div", { className: "mt-12 space-y-8", children: [_jsx(FileUploadSection, { type: "image", icon: Image, fileState: imageState, inputRef: imageInputRef, onFileChange: handleImageChange, allowedTypes: ALLOWED_MIME_TYPES, label: "Haz clic para seleccionar una imagen" }), _jsx(FileUploadSection, { type: "mind", icon: Upload, fileState: mindState, inputRef: mindInputRef, onFileChange: handleMindChange, allowedTypes: [".mind"], label: "Haz clic para seleccionar archivo .mind" }), _jsx(FileUploadSection, { type: "video", icon: Video, fileState: videoState, inputRef: videoInputRef, onFileChange: handleVideoChange, allowedTypes: ALLOWED_VIDEO_TYPES, label: "Haz clic para seleccionar un video" }), _jsxs("div", { className: "space-y-4 rounded-2xl border border-gray-200/50 bg-white/90 backdrop-blur-md p-8 shadow-lg ring-1 ring-black/10", children: [_jsxs("label", { className: "flex items-center justify-between text-2xl font-semibold text-gray-800", children: [_jsx("span", { children: "Escala del Video" }), _jsxs("span", { className: "bg-gradient-to-r from-blue-600 to-purple-600 bg-clip-text text-transparent font-bold", children: [videoScale, "x"] })] }), _jsxs("div", { className: "relative py-8", children: [_jsx("div", { className: "absolute h-3 w-full rounded-full bg-gradient-to-r from-blue-400 to-purple-400 opacity-20" }), _jsx("div", { className: "absolute h-3 rounded-full bg-gradient-to-r from-blue-400 to-purple-400 shadow-lg", style: { width: `${(videoScale / 2) * 100}%` } }), _jsx("input", { type: "range", min: "0.1", max: "2", step: "0.1", value: videoScale, onChange: (e) => setVideoScale(Number(e.target.value)), className: "relative h-3 w-full cursor-pointer appearance-none rounded-lg bg-transparent focus:outline-none focus:ring-2 focus:ring-blue-400 focus:ring-offset-4", style: { WebkitAppearance: "none" } })] })] })] }), error && (_jsx("div", { className: "mt-6 rounded-xl bg-red-50 p-4 text-red-700 shadow-sm ring-1 ring-red-100", children: error })), _jsx("button", { onClick: handleSave, disabled: loading, className: "mt-10 w-full rounded-xl bg-gradient-to-r from-blue-600 to-purple-600 py-5 text-xl font-semibold text-white shadow-xl transition-all hover:from-blue-700 hover:to-purple-700 disabled:from-gray-400 disabled:to-gray-400 disabled:shadow-none focus:outline-none focus:ring-2 focus:ring-blue-400 focus:ring-offset-4", children: loading ? (_jsxs("div", { className: "flex items-center justify-center space-x-3", children: [_jsx(LoaderCircle, { className: "h-7 w-7 animate-spin" }), _jsx("span", { children: "Guardando..." })] })) : ("Guardar") })] }) }));
|
|
159
|
-
};
|
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
interface ProgressDialogProps {
|
|
2
|
-
open: boolean;
|
|
3
|
-
imageStatus: "pending" | "processing" | "completed" | "error";
|
|
4
|
-
videoStatus: "pending" | "processing" | "completed" | "error";
|
|
5
|
-
arProcessingStatus: "pending" | "processing" | "completed" | "error";
|
|
6
|
-
arUploadStatus: "pending" | "processing" | "completed" | "error";
|
|
7
|
-
imageProgress?: number;
|
|
8
|
-
videoProgress?: number;
|
|
9
|
-
arProcessingProgress?: number;
|
|
10
|
-
arUploadProgress?: number;
|
|
11
|
-
}
|
|
12
|
-
export declare function ProgressDialog({ open, imageStatus, videoStatus, arProcessingStatus, arUploadStatus, imageProgress, videoProgress, arProcessingProgress, arUploadProgress, }: ProgressDialogProps): import("react/jsx-runtime").JSX.Element | null;
|
|
13
|
-
export {};
|