xrblocks 0.5.0 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/build/xrblocks.js CHANGED
@@ -14,9 +14,9 @@
14
14
  * limitations under the License.
15
15
  *
16
16
  * @file xrblocks.js
17
- * @version v0.5.0
18
- * @commitid c2f4b09
19
- * @builddate 2025-12-04T15:14:30.184Z
17
+ * @version v0.6.0
18
+ * @commitid 64e2279
19
+ * @builddate 2025-12-19T21:53:04.057Z
20
20
  * @description XR Blocks SDK, built from source with the above commit ID.
21
21
  * @agent When using with Gemini to create XR apps, use **Gemini Canvas** mode,
22
22
  * and follow rules below:
@@ -286,7 +286,7 @@ class GenerateSkyboxTool extends Tool {
286
286
  async execute(args) {
287
287
  try {
288
288
  const image = await this.ai.generate('Generate a 360 equirectangular skybox image for the prompt of:' +
289
- args.prompt, 'image', 'Generate a 360 equirectangular skybox image for the prompt', 'gemini-2.5-flash-image-preview');
289
+ args.prompt, 'image', 'Generate a 360 equirectangular skybox image for the prompt');
290
290
  if (image) {
291
291
  console.log('Applying texture...');
292
292
  this.scene.background = new THREE.TextureLoader().load(image);
@@ -1365,7 +1365,7 @@ class Gemini extends BaseAIModel {
1365
1365
  }
1366
1366
  return { text: response.text || null };
1367
1367
  }
1368
- async generate(prompt, type = 'image', systemInstruction = 'Generate an image', model = 'gemini-2.5-flash-image-preview') {
1368
+ async generate(prompt, type = 'image', systemInstruction = 'Generate an image', model = 'gemini-2.5-flash-image') {
1369
1369
  if (!this.isAvailable())
1370
1370
  return;
1371
1371
  let contents;
@@ -1674,7 +1674,7 @@ class AI extends Script {
1674
1674
  * In XR mode, show a 3D UI to instruct users to get an API key.
1675
1675
  */
1676
1676
  triggerKeyPopup() { }
1677
- async generate(prompt, type = 'image', systemInstruction = 'Generate an image', model = 'gemini-2.5-flash-image-preview') {
1677
+ async generate(prompt, type = 'image', systemInstruction = 'Generate an image', model = undefined) {
1678
1678
  return this.model.generate(prompt, type, systemInstruction, model);
1679
1679
  }
1680
1680
  /**
@@ -1849,6 +1849,27 @@ function deepMerge(obj1, obj2) {
1849
1849
  }
1850
1850
  }
1851
1851
 
1852
+ /**
1853
+ * Default parameters for rgb to depth projection.
1854
+ * For RGB and depth, 4:3 and 1:1, respectively.
1855
+ */
1856
+ const DEFAULT_RGB_TO_DEPTH_PARAMS = {
1857
+ scale: 1,
1858
+ scaleX: 0.75,
1859
+ scaleY: 0.63,
1860
+ translateU: 0.2,
1861
+ translateV: -0.02,
1862
+ k1: -0.046,
1863
+ k2: 0,
1864
+ k3: 0,
1865
+ p1: 0,
1866
+ p2: 0,
1867
+ xc: 0,
1868
+ yc: 0,
1869
+ };
1870
+ /**
1871
+ * Configuration options for the device camera.
1872
+ */
1852
1873
  class DeviceCameraOptions {
1853
1874
  constructor(options) {
1854
1875
  this.enabled = false;
@@ -1856,10 +1877,14 @@ class DeviceCameraOptions {
1856
1877
  * Hint for performance optimization on frequent captures.
1857
1878
  */
1858
1879
  this.willCaptureFrequently = false;
1880
+ /**
1881
+ * Parameters for RGB to depth UV mapping given different aspect ratios.
1882
+ */
1883
+ this.rgbToDepthParams = { ...DEFAULT_RGB_TO_DEPTH_PARAMS };
1859
1884
  deepMerge(this, options);
1860
1885
  }
1861
1886
  }
1862
- // Base configuration for all common capture settings
1887
+ // Base configuration for all common capture settings.
1863
1888
  const baseCaptureOptions = {
1864
1889
  enabled: true,
1865
1890
  videoConstraints: {
@@ -3050,6 +3075,9 @@ class Depth {
3050
3075
  // Whether we're counting the number of depth clients.
3051
3076
  this.depthClientsInitialized = false;
3052
3077
  this.depthClients = new Set();
3078
+ this.depthProjectionMatrices = [];
3079
+ this.depthViewMatrices = [];
3080
+ this.depthViewProjectionMatrices = [];
3053
3081
  if (Depth.instance) {
3054
3082
  return Depth.instance;
3055
3083
  }
@@ -3132,16 +3160,34 @@ class Depth {
3132
3160
  vertexPosition.multiplyScalar(-depth / vertexPosition.z);
3133
3161
  return vertexPosition;
3134
3162
  }
3135
- updateCPUDepthData(depthData, view_id = 0) {
3136
- this.cpuDepthData[view_id] = depthData;
3163
+ updateDepthMatrices(depthData, viewId) {
3164
+ // Populate depth view and projection matrices.
3165
+ while (viewId >= this.depthViewMatrices.length) {
3166
+ this.depthViewMatrices.push(new THREE.Matrix4());
3167
+ this.depthViewProjectionMatrices.push(new THREE.Matrix4());
3168
+ this.depthProjectionMatrices.push(new THREE.Matrix4());
3169
+ }
3170
+ if (depthData.projectionMatrix && depthData.transform) {
3171
+ this.depthProjectionMatrices[viewId].fromArray(depthData.projectionMatrix);
3172
+ this.depthViewMatrices[viewId].fromArray(depthData.transform.inverse.matrix);
3173
+ }
3174
+ else {
3175
+ const camera = this.renderer.xr?.getCamera()?.cameras?.[viewId] ?? this.camera;
3176
+ this.depthProjectionMatrices[viewId].copy(camera.projectionMatrix);
3177
+ this.depthViewMatrices[viewId].copy(camera.matrixWorldInverse);
3178
+ }
3179
+ this.depthViewProjectionMatrices[viewId].multiplyMatrices(this.depthProjectionMatrices[viewId], this.depthViewMatrices[viewId]);
3180
+ }
3181
+ updateCPUDepthData(depthData, viewId = 0) {
3182
+ this.cpuDepthData[viewId] = depthData;
3137
3183
  // Workaround for b/382679381.
3138
3184
  this.rawValueToMeters = depthData.rawValueToMeters;
3139
3185
  if (this.options.useFloat32) {
3140
3186
  this.rawValueToMeters = 1.0;
3141
3187
  }
3142
3188
  // Updates Depth Array.
3143
- if (this.depthArray[view_id] == null) {
3144
- this.depthArray[view_id] = this.options.useFloat32
3189
+ if (this.depthArray[viewId] == null) {
3190
+ this.depthArray[viewId] = this.options.useFloat32
3145
3191
  ? new Float32Array(depthData.data)
3146
3192
  : new Uint16Array(depthData.data);
3147
3193
  this.width = depthData.width;
@@ -3149,20 +3195,21 @@ class Depth {
3149
3195
  }
3150
3196
  else {
3151
3197
  // Copies the data from an ArrayBuffer to the existing TypedArray.
3152
- this.depthArray[view_id].set(this.options.useFloat32
3198
+ this.depthArray[viewId].set(this.options.useFloat32
3153
3199
  ? new Float32Array(depthData.data)
3154
3200
  : new Uint16Array(depthData.data));
3155
3201
  }
3156
3202
  // Updates Depth Texture.
3157
3203
  if (this.options.depthTexture.enabled && this.depthTextures) {
3158
- this.depthTextures.updateData(depthData, view_id);
3204
+ this.depthTextures.updateData(depthData, viewId);
3159
3205
  }
3160
- if (this.options.depthMesh.enabled && this.depthMesh && view_id == 0) {
3206
+ if (this.options.depthMesh.enabled && this.depthMesh && viewId == 0) {
3161
3207
  this.depthMesh.updateDepth(depthData);
3162
3208
  }
3209
+ this.updateDepthMatrices(depthData, viewId);
3163
3210
  }
3164
- updateGPUDepthData(depthData, view_id = 0) {
3165
- this.gpuDepthData[view_id] = depthData;
3211
+ updateGPUDepthData(depthData, viewId = 0) {
3212
+ this.gpuDepthData[viewId] = depthData;
3166
3213
  // Workaround for b/382679381.
3167
3214
  this.rawValueToMeters = depthData.rawValueToMeters;
3168
3215
  if (this.options.useFloat32) {
@@ -3175,8 +3222,8 @@ class Depth {
3175
3222
  ? this.depthMesh.convertGPUToGPU(depthData)
3176
3223
  : null;
3177
3224
  if (cpuDepth) {
3178
- if (this.depthArray[view_id] == null) {
3179
- this.depthArray[view_id] = this.options.useFloat32
3225
+ if (this.depthArray[viewId] == null) {
3226
+ this.depthArray[viewId] = this.options.useFloat32
3180
3227
  ? new Float32Array(cpuDepth.data)
3181
3228
  : new Uint16Array(cpuDepth.data);
3182
3229
  this.width = cpuDepth.width;
@@ -3184,16 +3231,16 @@ class Depth {
3184
3231
  }
3185
3232
  else {
3186
3233
  // Copies the data from an ArrayBuffer to the existing TypedArray.
3187
- this.depthArray[view_id].set(this.options.useFloat32
3234
+ this.depthArray[viewId].set(this.options.useFloat32
3188
3235
  ? new Float32Array(cpuDepth.data)
3189
3236
  : new Uint16Array(cpuDepth.data));
3190
3237
  }
3191
3238
  }
3192
3239
  // Updates Depth Texture.
3193
3240
  if (this.options.depthTexture.enabled && this.depthTextures) {
3194
- this.depthTextures.updateNativeTexture(depthData, this.renderer, view_id);
3241
+ this.depthTextures.updateNativeTexture(depthData, this.renderer, viewId);
3195
3242
  }
3196
- if (this.options.depthMesh.enabled && this.depthMesh && view_id == 0) {
3243
+ if (this.options.depthMesh.enabled && this.depthMesh && viewId == 0) {
3197
3244
  if (cpuDepth) {
3198
3245
  this.depthMesh.updateDepth(cpuDepth);
3199
3246
  }
@@ -3201,11 +3248,12 @@ class Depth {
3201
3248
  this.depthMesh.updateGPUDepth(depthData);
3202
3249
  }
3203
3250
  }
3251
+ this.updateDepthMatrices(depthData, viewId);
3204
3252
  }
3205
- getTexture(view_id) {
3253
+ getTexture(viewId) {
3206
3254
  if (!this.options.depthTexture.enabled)
3207
3255
  return undefined;
3208
- return this.depthTextures?.get(view_id);
3256
+ return this.depthTextures?.get(viewId);
3209
3257
  }
3210
3258
  update(frame) {
3211
3259
  if (!this.options.enabled)
@@ -3239,16 +3287,9 @@ class Depth {
3239
3287
  return;
3240
3288
  }
3241
3289
  }
3242
- if (this.xrRefSpace == null) {
3243
- session.requestReferenceSpace('local').then((refSpace) => {
3244
- this.xrRefSpace = refSpace;
3245
- });
3246
- session.addEventListener('end', () => {
3247
- this.xrRefSpace = undefined;
3248
- });
3249
- }
3250
- else {
3251
- const pose = frame.getViewerPose(this.xrRefSpace);
3290
+ const xrRefSpace = this.renderer.xr.getReferenceSpace();
3291
+ if (xrRefSpace) {
3292
+ const pose = frame.getViewerPose(xrRefSpace);
3252
3293
  if (pose) {
3253
3294
  for (let view_id = 0; view_id < pose.views.length; ++view_id) {
3254
3295
  const view = pose.views[view_id];
@@ -3316,24 +3357,6 @@ const aspectRatios = {
3316
3357
  depth: 1.0,
3317
3358
  RGB: 4 / 3,
3318
3359
  };
3319
- /**
3320
- * Parameters for RGB to depth UV mapping (manually calibrated for aspect
3321
- * ratios. For RGB and depth, 4:3 and 1:1, respectively.
3322
- */
3323
- const rgbToDepthParams = {
3324
- scale: 1,
3325
- scaleX: 0.75,
3326
- scaleY: 0.63,
3327
- translateU: 0.2,
3328
- translateV: -0.02,
3329
- k1: -0.046,
3330
- k2: 0,
3331
- k3: 0,
3332
- p1: 0,
3333
- p2: 0,
3334
- xc: 0,
3335
- yc: 0,
3336
- };
3337
3360
  /**
3338
3361
  * Maps a UV coordinate from a RGB space to a destination depth space,
3339
3362
  * applying Brown-Conrady distortion and affine transformations based on
@@ -3341,10 +3364,10 @@ const rgbToDepthParams = {
3341
3364
  *
3342
3365
  * @param rgbUv - The RGB UV coordinate, e.g., \{ u: 0.5, v: 0.5 \}.
3343
3366
  * @param xrDeviceCamera - The device camera instance.
3344
- * @returns The transformed UV coordinate in the depth image space, or null if
3367
+ * @returns The transformed UV coordinate in the render camera clip space, or null if
3345
3368
  * inputs are invalid.
3346
3369
  */
3347
- function transformRgbToDepthUv(rgbUv, xrDeviceCamera) {
3370
+ function transformRgbToRenderCameraClip(rgbUv, xrDeviceCamera) {
3348
3371
  if (xrDeviceCamera?.simulatorCamera) {
3349
3372
  // The simulator camera crops the viewport image to match its aspect ratio,
3350
3373
  // while the depth map covers the entire viewport, so we adjust for this.
@@ -3362,51 +3385,73 @@ function transformRgbToDepthUv(rgbUv, xrDeviceCamera) {
3362
3385
  const relativeHeight = viewportAspect / cameraAspect;
3363
3386
  v = v * relativeHeight + (1.0 - relativeHeight) / 2.0;
3364
3387
  }
3365
- return { u, v: 1.0 - v };
3388
+ return new THREE.Vector2(2 * u - 1, 2 * v - 1);
3366
3389
  }
3367
3390
  if (!aspectRatios || !aspectRatios.depth || !aspectRatios.RGB) {
3368
3391
  console.error('Invalid aspect ratios provided.');
3369
3392
  return null;
3370
3393
  }
3371
- // Determine the relative scaling required to fit the overlay within the base
3394
+ const params = xrDeviceCamera?.rgbToDepthParams ?? DEFAULT_RGB_TO_DEPTH_PARAMS;
3395
+ // Determine the relative scaling required to fit the overlay within the base.
3372
3396
  let relativeScaleX, relativeScaleY;
3373
3397
  if (aspectRatios.depth > aspectRatios.RGB) {
3374
- // Base is wider than overlay ("letterboxing")
3398
+ // Base is wider than overlay ("letterboxing").
3375
3399
  relativeScaleY = 1.0;
3376
3400
  relativeScaleX = aspectRatios.RGB / aspectRatios.depth;
3377
3401
  }
3378
3402
  else {
3379
- // Base is narrower than overlay ("pillarboxing")
3403
+ // Base is narrower than overlay ("pillarboxing").
3380
3404
  relativeScaleX = 1.0;
3381
3405
  relativeScaleY = aspectRatios.depth / aspectRatios.RGB;
3382
3406
  }
3383
- // Convert input source UV [0, 1] to a normalized coordinate space [-0.5, 0.5]
3407
+ // Convert input source UV [0, 1] to normalized coordinates in [-0.5, 0.5].
3384
3408
  const u_norm = rgbUv.u - 0.5;
3385
3409
  const v_norm = rgbUv.v - 0.5;
3386
- // Apply the FORWARD Brown-Conrady distortion model
3387
- const u_centered = u_norm - rgbToDepthParams.xc;
3388
- const v_centered = v_norm - rgbToDepthParams.yc;
3410
+ // Apply the FORWARD Brown-Conrady distortion model.
3411
+ const u_centered = u_norm - params.xc;
3412
+ const v_centered = v_norm - params.yc;
3389
3413
  const r2 = u_centered * u_centered + v_centered * v_centered;
3390
- const radial = 1 +
3391
- rgbToDepthParams.k1 * r2 +
3392
- rgbToDepthParams.k2 * r2 * r2 +
3393
- rgbToDepthParams.k3 * r2 * r2 * r2;
3394
- const tanX = 2 * rgbToDepthParams.p1 * u_centered * v_centered +
3395
- rgbToDepthParams.p2 * (r2 + 2 * u_centered * u_centered);
3396
- const tanY = rgbToDepthParams.p1 * (r2 + 2 * v_centered * v_centered) +
3397
- 2 * rgbToDepthParams.p2 * u_centered * v_centered;
3398
- const u_distorted = u_centered * radial + tanX + rgbToDepthParams.xc;
3399
- const v_distorted = v_centered * radial + tanY + rgbToDepthParams.yc;
3400
- // Apply initial aspect ratio scaling and translation
3401
- const u_fitted = u_distorted * relativeScaleX + rgbToDepthParams.translateU;
3402
- const v_fitted = v_distorted * relativeScaleY + rgbToDepthParams.translateV;
3403
- // Apply the final user-controlled scaling (zoom and stretch)
3404
- const finalNormX = u_fitted * rgbToDepthParams.scale * rgbToDepthParams.scaleX;
3405
- const finalNormY = v_fitted * rgbToDepthParams.scale * rgbToDepthParams.scaleY;
3406
- // Convert the final normalized coordinate back to a UV coordinate [0, 1]
3407
- const finalU = finalNormX + 0.5;
3408
- const finalV = finalNormY + 0.5;
3409
- return { u: finalU, v: 1.0 - finalV };
3414
+ const radial = 1 + params.k1 * r2 + params.k2 * r2 * r2 + params.k3 * r2 * r2 * r2;
3415
+ const tanX = 2 * params.p1 * u_centered * v_centered +
3416
+ params.p2 * (r2 + 2 * u_centered * u_centered);
3417
+ const tanY = params.p1 * (r2 + 2 * v_centered * v_centered) +
3418
+ 2 * params.p2 * u_centered * v_centered;
3419
+ const u_distorted = u_centered * radial + tanX + params.xc;
3420
+ const v_distorted = v_centered * radial + tanY + params.yc;
3421
+ // Apply initial aspect ratio scaling and translation.
3422
+ const u_fitted = u_distorted * relativeScaleX + params.translateU;
3423
+ const v_fitted = v_distorted * relativeScaleY + params.translateV;
3424
+ // Apply the final user-controlled scaling (zoom and stretch).
3425
+ const finalNormX = u_fitted * params.scale * params.scaleX;
3426
+ const finalNormY = v_fitted * params.scale * params.scaleY;
3427
+ return new THREE.Vector2(2 * finalNormX, 2 * finalNormY);
3428
+ }
3429
+ /**
3430
+ * Maps a UV coordinate from a RGB space to a destination depth space,
3431
+ * applying Brown-Conrady distortion and affine transformations based on
3432
+ * aspect ratios. If the simulator camera is used, no transformation is applied.
3433
+ *
3434
+ * @param rgbUv - The RGB UV coordinate, e.g., \{ u: 0.5, v: 0.5 \}.
3435
+ * @param renderCameraWorldFromClip - Render camera world from clip, i.e. inverse of the View Projection matrix.
3436
+ * @param depthCameraClipFromWorld - Depth camera clip from world, i.e.
3437
+ * @param xrDeviceCamera - The device camera instance.
3438
+ * @returns The transformed UV coordinate in the depth image space, or null if
3439
+ * inputs are invalid.
3440
+ */
3441
+ function transformRgbToDepthUv(rgbUv, renderCameraWorldFromClip, depthCameraClipFromWorld, xrDeviceCamera) {
3442
+ // Render camera clip space coordinates.
3443
+ const clipCoords = transformRgbToRenderCameraClip(rgbUv, xrDeviceCamera);
3444
+ if (!clipCoords) {
3445
+ return null;
3446
+ }
3447
+ // Backwards project from the render camera to depth camera.
3448
+ const depthClipCoord = new THREE.Vector4(clipCoords.x, clipCoords.y, 1, 1);
3449
+ depthClipCoord.applyMatrix4(renderCameraWorldFromClip);
3450
+ depthClipCoord.applyMatrix4(depthCameraClipFromWorld);
3451
+ depthClipCoord.multiplyScalar(1 / depthClipCoord.w);
3452
+ const finalU = 0.5 * depthClipCoord.x + 0.5;
3453
+ const finalV = 1.0 - (0.5 * depthClipCoord.y + 0.5);
3454
+ return { u: finalU, v: finalV };
3410
3455
  }
3411
3456
  /**
3412
3457
  * Retrieves the world space position of a given RGB UV coordinate.
@@ -3416,19 +3461,30 @@ function transformRgbToDepthUv(rgbUv, xrDeviceCamera) {
3416
3461
  *
3417
3462
  * @param rgbUv - The RGB UV coordinate, e.g., \{ u: 0.5, v: 0.5 \}.
3418
3463
  * @param depthArray - Array containing depth data.
3419
- * @param viewProjectionMatrix - XRView object with corresponding
3464
+ * @param projectionMatrix - XRView object with corresponding
3420
3465
  * projection matrix.
3421
- * @param matrixWorld - Matrix for view-to-world translation.
3466
+ * @param matrixWorld - Rendering camera's model matrix.
3422
3467
  * @param xrDeviceCamera - The device camera instance.
3423
3468
  * @param xrDepth - The SDK's Depth module.
3424
3469
  * @returns Vertex at (u, v) in world space.
3425
3470
  */
3426
- function transformRgbUvToWorld(rgbUv, depthArray, viewProjectionMatrix, matrixWorld, xrDeviceCamera, xrDepth = Depth.instance) {
3427
- if (!depthArray || !viewProjectionMatrix || !matrixWorld || !xrDepth)
3428
- return null;
3429
- const depthUV = transformRgbToDepthUv(rgbUv, xrDeviceCamera);
3471
+ function transformRgbUvToWorld(rgbUv, depthArray, projectionMatrix, matrixWorld, xrDeviceCamera, xrDepth = Depth.instance) {
3472
+ if (!depthArray || !projectionMatrix || !matrixWorld || !xrDepth) {
3473
+ throw new Error('Missing parameter in transformRgbUvToWorld');
3474
+ }
3475
+ const worldFromClip = matrixWorld
3476
+ .clone()
3477
+ .invert()
3478
+ .premultiply(projectionMatrix)
3479
+ .invert();
3480
+ const depthProjectionMatrixInverse = xrDepth.depthProjectionMatrices[0]
3481
+ .clone()
3482
+ .invert();
3483
+ const depthClipFromWorld = xrDepth.depthViewProjectionMatrices[0];
3484
+ const depthModelMatrix = xrDepth.depthViewMatrices[0].clone().invert();
3485
+ const depthUV = transformRgbToDepthUv(rgbUv, worldFromClip, depthClipFromWorld, xrDeviceCamera);
3430
3486
  if (!depthUV) {
3431
- return null;
3487
+ throw new Error('Failed to get depth UV');
3432
3488
  }
3433
3489
  const { u: depthU, v: depthV } = depthUV;
3434
3490
  const depthX = Math.round(clamp(depthU * xrDepth.width, 0, xrDepth.width - 1));
@@ -3439,12 +3495,13 @@ function transformRgbUvToWorld(rgbUv, depthArray, viewProjectionMatrix, matrixWo
3439
3495
  // Convert UV to normalized device coordinates and create a point on the near
3440
3496
  // plane.
3441
3497
  const viewSpacePosition = new THREE.Vector3(2.0 * (depthU - 0.5), 2.0 * (depthV - 0.5), -1);
3442
- const viewProjectionMatrixInverse = viewProjectionMatrix.clone().invert();
3443
3498
  // Unproject the point from clip space to view space and scale it along the
3444
3499
  // ray from the camera to the correct depth. Camera looks down -Z axis.
3445
- viewSpacePosition.applyMatrix4(viewProjectionMatrixInverse);
3500
+ viewSpacePosition.applyMatrix4(depthProjectionMatrixInverse);
3446
3501
  viewSpacePosition.multiplyScalar(-depthInMeters / viewSpacePosition.z);
3447
- const worldPosition = viewSpacePosition.clone().applyMatrix4(matrixWorld);
3502
+ const worldPosition = viewSpacePosition
3503
+ .clone()
3504
+ .applyMatrix4(depthModelMatrix);
3448
3505
  return worldPosition;
3449
3506
  }
3450
3507
  /**
@@ -3667,12 +3724,13 @@ class XRDeviceCamera extends VideoStream {
3667
3724
  /**
3668
3725
  * @param options - The configuration options.
3669
3726
  */
3670
- constructor({ videoConstraints = { facingMode: 'environment' }, willCaptureFrequently = false, } = {}) {
3727
+ constructor({ videoConstraints = { facingMode: 'environment' }, willCaptureFrequently = false, rgbToDepthParams = DEFAULT_RGB_TO_DEPTH_PARAMS, } = {}) {
3671
3728
  super({ willCaptureFrequently });
3672
3729
  this.isInitializing_ = false;
3673
3730
  this.availableDevices_ = [];
3674
3731
  this.currentDeviceIndex_ = -1;
3675
3732
  this.videoConstraints_ = { ...videoConstraints };
3733
+ this.rgbToDepthParams = rgbToDepthParams;
3676
3734
  }
3677
3735
  /**
3678
3736
  * Retrieves the list of available video input devices.
@@ -3724,7 +3782,7 @@ class XRDeviceCamera extends VideoStream {
3724
3782
  return;
3725
3783
  this.isInitializing_ = true;
3726
3784
  this.setState_(StreamState.INITIALIZING);
3727
- // Reset state for the new stream
3785
+ // Reset state for the new stream.
3728
3786
  this.currentTrackSettings_ = undefined;
3729
3787
  this.currentDeviceIndex_ = -1;
3730
3788
  try {
@@ -3758,7 +3816,7 @@ class XRDeviceCamera extends VideoStream {
3758
3816
  if (!videoTracks.length) {
3759
3817
  throw new Error('MediaStream has no video tracks.');
3760
3818
  }
3761
- // After the stream is active, we can get the ID of the track
3819
+ // After the stream is active, we can get the track ID.
3762
3820
  const activeTrack = videoTracks[0];
3763
3821
  this.currentTrackSettings_ = activeTrack.getSettings();
3764
3822
  console.debug('Active track settings:', this.currentTrackSettings_);
@@ -3768,10 +3826,10 @@ class XRDeviceCamera extends VideoStream {
3768
3826
  else {
3769
3827
  console.warn('Stream started without deviceId as it was unavailable');
3770
3828
  }
3771
- this.stop_(); // Stop any previous stream before starting new one
3829
+ this.stop_(); // Stop any previous stream before starting new one.
3772
3830
  this.stream_ = stream;
3773
3831
  this.video_.srcObject = stream;
3774
- this.video_.src = ''; // Required for some browsers to reset the src
3832
+ this.video_.src = ''; // Required for some browsers to reset the src.
3775
3833
  await new Promise((resolve, reject) => {
3776
3834
  this.video_.onloadedmetadata = () => {
3777
3835
  this.handleVideoStreamLoadedMetadata(resolve, reject, true);
@@ -3783,7 +3841,7 @@ class XRDeviceCamera extends VideoStream {
3783
3841
  };
3784
3842
  this.video_.play();
3785
3843
  });
3786
- // Once the stream is loaded and dimensions are known, set the final state
3844
+ // Once stream is loaded and dimensions are known, set the final state.
3787
3845
  const details = {
3788
3846
  width: this.width,
3789
3847
  height: this.height,
@@ -3805,7 +3863,7 @@ class XRDeviceCamera extends VideoStream {
3805
3863
  /**
3806
3864
  * Sets the active camera by its device ID. Removes potentially conflicting
3807
3865
  * constraints such as facingMode.
3808
- * @param deviceId - Device id.
3866
+ * @param deviceId - Device ID
3809
3867
  */
3810
3868
  async setDeviceId(deviceId) {
3811
3869
  const newIndex = this.availableDevices_.findIndex((device) => device.deviceId === deviceId);
@@ -6423,6 +6481,13 @@ function computePinch(context, config) {
6423
6481
  const index = getJoint(context, 'index-finger-tip');
6424
6482
  if (!thumb || !index)
6425
6483
  return undefined;
6484
+ const supportMetrics = ['middle', 'ring', 'pinky']
6485
+ .map((finger) => computeFingerMetric(context, finger))
6486
+ .filter(Boolean);
6487
+ const supportCurl = supportMetrics.length > 0
6488
+ ? average(supportMetrics.map((metrics) => metrics.curlRatio))
6489
+ : 1;
6490
+ const supportPenalty = clamp01((supportCurl - 1.05) / 0.35);
6426
6491
  const handScale = estimateHandScale(context);
6427
6492
  const threshold = config.threshold ?? Math.max(0.018, handScale * 0.35);
6428
6493
  const distance = thumb.distanceTo(index);
@@ -6431,10 +6496,12 @@ function computePinch(context, config) {
6431
6496
  }
6432
6497
  const tightness = clamp01(1 - distance / (threshold * 0.85));
6433
6498
  const loosePenalty = clamp01(1 - distance / (threshold * 1.4));
6434
- const confidence = clamp01(distance <= threshold ? tightness : loosePenalty * 0.4);
6499
+ let confidence = clamp01(distance <= threshold ? tightness : loosePenalty * 0.4);
6500
+ confidence *= 1 - supportPenalty * 0.45;
6501
+ confidence = clamp01(confidence);
6435
6502
  return {
6436
6503
  confidence,
6437
- data: { distance, threshold },
6504
+ data: { distance, threshold, supportPenalty },
6438
6505
  };
6439
6506
  }
6440
6507
  function computeOpenPalm(context, config) {
@@ -6443,21 +6510,29 @@ function computeOpenPalm(context, config) {
6443
6510
  return undefined;
6444
6511
  const handScale = estimateHandScale(context);
6445
6512
  const palmWidth = getPalmWidth(context) ?? handScale * 0.85;
6513
+ const palmUp = getPalmUp(context);
6446
6514
  const extensionScores = fingerMetrics.map(({ tipDistance }) => clamp01((tipDistance - handScale * 0.5) / (handScale * 0.45)));
6447
6515
  const straightnessScores = fingerMetrics.map(({ curlRatio }) => clamp01((curlRatio - 1.1) / 0.5));
6516
+ const orientationScore = palmUp && fingerMetrics.length
6517
+ ? average(fingerMetrics.map((metrics) => fingerAlignmentScore(context, metrics, palmUp)))
6518
+ : 0.5;
6448
6519
  const neighbors = getAdjacentFingerDistances(context);
6449
6520
  const spreadScore = neighbors.average !== Infinity && palmWidth > EPSILON
6450
6521
  ? clamp01((neighbors.average - palmWidth * 0.55) / (palmWidth * 0.35))
6451
6522
  : 0;
6452
6523
  const extensionScore = average(extensionScores);
6453
6524
  const straightScore = average(straightnessScores);
6454
- const confidence = clamp01(extensionScore * 0.5 + straightScore * 0.3 + spreadScore * 0.2);
6525
+ const confidence = clamp01(extensionScore * 0.4 +
6526
+ straightScore * 0.25 +
6527
+ spreadScore * 0.2 +
6528
+ orientationScore * 0.15);
6455
6529
  return {
6456
6530
  confidence,
6457
6531
  data: {
6458
6532
  extensionScore,
6459
6533
  straightScore,
6460
6534
  spreadScore,
6535
+ orientationScore,
6461
6536
  threshold: config.threshold,
6462
6537
  },
6463
6538
  };
@@ -6474,15 +6549,26 @@ function computeFist(context, config) {
6474
6549
  const clusterScore = neighbors.average !== Infinity && palmWidth > EPSILON
6475
6550
  ? clamp01((palmWidth * 0.5 - neighbors.average) / (palmWidth * 0.35))
6476
6551
  : 0;
6552
+ const thumbTip = getJoint(context, 'thumb-tip');
6553
+ const indexBase = getFingerJoint(context, 'index', 'phalanx-proximal') ??
6554
+ getFingerJoint(context, 'index', 'metacarpal');
6555
+ const thumbWrapScore = thumbTip && indexBase && palmWidth > EPSILON
6556
+ ? clamp01((palmWidth * 0.55 - thumbTip.distanceTo(indexBase)) /
6557
+ (palmWidth * 0.35))
6558
+ : 0;
6477
6559
  const tipScore = clamp01((handScale * 0.55 - tipAverage) / (handScale * 0.25));
6478
6560
  const curlScore = clamp01((1.08 - curlAverage) / 0.25);
6479
- const confidence = clamp01(tipScore * 0.5 + curlScore * 0.35 + clusterScore * 0.15);
6561
+ const confidence = clamp01(tipScore * 0.45 +
6562
+ curlScore * 0.3 +
6563
+ clusterScore * 0.1 +
6564
+ thumbWrapScore * 0.15);
6480
6565
  return {
6481
6566
  confidence,
6482
6567
  data: {
6483
6568
  tipAverage,
6484
6569
  curlAverage,
6485
6570
  clusterScore,
6571
+ thumbWrapScore,
6486
6572
  threshold: config.threshold,
6487
6573
  },
6488
6574
  };
@@ -6519,8 +6605,8 @@ function computeThumbsUp(context, config) {
6519
6605
  orientationScore = clamp01((alignment - 0.35) / 0.35);
6520
6606
  }
6521
6607
  }
6522
- const confidence = clamp01(thumbExtendedScore * 0.35 +
6523
- curledScore * 0.3 +
6608
+ const confidence = clamp01(thumbExtendedScore * 0.3 +
6609
+ curledScore * 0.35 +
6524
6610
  orientationScore * 0.2 +
6525
6611
  separationScore * 0.15);
6526
6612
  return {
@@ -6544,17 +6630,33 @@ function computePoint(context, config) {
6544
6630
  if (!otherMetrics.length)
6545
6631
  return undefined;
6546
6632
  const handScale = estimateHandScale(context);
6633
+ const palmWidth = getPalmWidth(context) ?? handScale * 0.85;
6634
+ const palmUp = getPalmUp(context);
6547
6635
  const indexCurlScore = clamp01((indexMetrics.curlRatio - 1.2) / 0.35);
6548
6636
  const indexReachScore = clamp01((indexMetrics.tipDistance - handScale * 0.6) / (handScale * 0.25));
6637
+ const indexDirectionScore = palmUp && indexMetrics
6638
+ ? fingerAlignmentScore(context, indexMetrics, palmUp)
6639
+ : 0.4;
6549
6640
  const othersCurl = average(otherMetrics.map((metrics) => metrics.curlRatio));
6550
6641
  const othersCurledScore = clamp01((1.05 - othersCurl) / 0.25);
6551
- const confidence = clamp01(indexCurlScore * 0.45 + indexReachScore * 0.25 + othersCurledScore * 0.3);
6642
+ const thumbTip = getJoint(context, 'thumb-tip');
6643
+ const thumbTuckedScore = thumbTip && indexMetrics.metacarpal && palmWidth > EPSILON
6644
+ ? clamp01((palmWidth * 0.75 - thumbTip.distanceTo(indexMetrics.metacarpal)) /
6645
+ (palmWidth * 0.4))
6646
+ : 0.5;
6647
+ const confidence = clamp01(indexCurlScore * 0.35 +
6648
+ indexReachScore * 0.25 +
6649
+ othersCurledScore * 0.2 +
6650
+ indexDirectionScore * 0.1 +
6651
+ thumbTuckedScore * 0.1);
6552
6652
  return {
6553
6653
  confidence,
6554
6654
  data: {
6555
6655
  indexCurlScore,
6556
6656
  indexReachScore,
6557
6657
  othersCurledScore,
6658
+ indexDirectionScore,
6659
+ thumbTuckedScore,
6558
6660
  threshold: config.threshold,
6559
6661
  },
6560
6662
  };
@@ -6566,16 +6668,21 @@ function computeSpread(context, config) {
6566
6668
  const handScale = estimateHandScale(context);
6567
6669
  const palmWidth = getPalmWidth(context) ?? handScale * 0.85;
6568
6670
  const neighbors = getAdjacentFingerDistances(context);
6671
+ const palmUp = getPalmUp(context);
6569
6672
  const spreadScore = neighbors.average !== Infinity && palmWidth > EPSILON
6570
6673
  ? clamp01((neighbors.average - palmWidth * 0.6) / (palmWidth * 0.35))
6571
6674
  : 0;
6572
6675
  const extensionScore = clamp01((average(fingerMetrics.map((metrics) => metrics.curlRatio)) - 1.15) / 0.45);
6573
- const confidence = clamp01(spreadScore * 0.6 + extensionScore * 0.4);
6676
+ const orientationScore = palmUp && fingerMetrics.length
6677
+ ? average(fingerMetrics.map((metrics) => fingerAlignmentScore(context, metrics, palmUp)))
6678
+ : 0.5;
6679
+ const confidence = clamp01(spreadScore * 0.55 + extensionScore * 0.3 + orientationScore * 0.15);
6574
6680
  return {
6575
6681
  confidence,
6576
6682
  data: {
6577
6683
  spreadScore,
6578
6684
  extensionScore,
6685
+ orientationScore,
6579
6686
  threshold: config.threshold,
6580
6687
  },
6581
6688
  };
@@ -6703,6 +6810,16 @@ function getFingerJoint(context, finger, suffix) {
6703
6810
  const prefix = FINGER_PREFIX[finger];
6704
6811
  return getJoint(context, `${prefix}-${suffix}`);
6705
6812
  }
6813
+ function fingerAlignmentScore(context, metrics, palmUp) {
6814
+ const base = metrics.metacarpal ?? getJoint(context, 'wrist');
6815
+ if (!base)
6816
+ return 0;
6817
+ const direction = new THREE.Vector3().subVectors(metrics.tip, base);
6818
+ if (direction.lengthSq() === 0)
6819
+ return 0;
6820
+ direction.normalize();
6821
+ return clamp01((direction.dot(palmUp) - 0.35) / 0.5);
6822
+ }
6706
6823
  function clamp01(value) {
6707
6824
  return THREE.MathUtils.clamp(value, 0, 1);
6708
6825
  }
@@ -8131,6 +8248,15 @@ class SimulatorDepth {
8131
8248
  this.depthWidth = 160;
8132
8249
  this.depthHeight = 160;
8133
8250
  this.depthBufferSlice = new Float32Array();
8251
+ /**
8252
+ * If true, copies the rendering camera's projection matrix each frame.
8253
+ */
8254
+ this.autoUpdateDepthCameraProjection = true;
8255
+ /**
8256
+ * If true, copies the rendering camera's transform each frame.
8257
+ */
8258
+ this.autoUpdateDepthCameraTransform = true;
8259
+ this.projectionMatrixArray = new Float32Array(16);
8134
8260
  }
8135
8261
  /**
8136
8262
  * Initialize Simulator Depth.
@@ -8139,6 +8265,16 @@ class SimulatorDepth {
8139
8265
  this.renderer = renderer;
8140
8266
  this.camera = camera;
8141
8267
  this.depth = depth;
8268
+ if (this.camera instanceof THREE.PerspectiveCamera) {
8269
+ this.depthCamera = new THREE.PerspectiveCamera();
8270
+ }
8271
+ else if (this.camera instanceof THREE.OrthographicCamera) {
8272
+ this.depthCamera = new THREE.OrthographicCamera();
8273
+ }
8274
+ else {
8275
+ throw new Error('Unknown camera type');
8276
+ }
8277
+ this.depthCamera.copy(this.camera, /*recursive=*/ false);
8142
8278
  this.createRenderTarget();
8143
8279
  this.depthMaterial = new SimulatorDepthMaterial();
8144
8280
  }
@@ -8150,14 +8286,32 @@ class SimulatorDepth {
8150
8286
  this.depthBuffer = new Float32Array(this.depthWidth * this.depthHeight);
8151
8287
  }
8152
8288
  update() {
8289
+ this.updateDepthCamera();
8153
8290
  this.renderDepthScene();
8154
8291
  this.updateDepth();
8155
8292
  }
8293
+ updateDepthCamera() {
8294
+ const renderingCamera = this.camera;
8295
+ const depthCamera = this.depthCamera;
8296
+ if (this.autoUpdateDepthCameraProjection) {
8297
+ depthCamera.projectionMatrix.copy(renderingCamera.projectionMatrix);
8298
+ depthCamera.projectionMatrixInverse.copy(renderingCamera.projectionMatrixInverse);
8299
+ }
8300
+ if (this.autoUpdateDepthCameraTransform) {
8301
+ depthCamera.position.copy(renderingCamera.position);
8302
+ depthCamera.rotation.order = renderingCamera.rotation.order;
8303
+ depthCamera.quaternion.copy(renderingCamera.quaternion);
8304
+ depthCamera.scale.copy(renderingCamera.scale);
8305
+ depthCamera.matrix.copy(renderingCamera.matrix);
8306
+ depthCamera.matrixWorld.copy(renderingCamera.matrixWorld);
8307
+ depthCamera.matrixWorldInverse.copy(renderingCamera.matrixWorldInverse);
8308
+ }
8309
+ }
8156
8310
  renderDepthScene() {
8157
8311
  const originalRenderTarget = this.renderer.getRenderTarget();
8158
8312
  this.renderer.setRenderTarget(this.depthRenderTarget);
8159
8313
  this.simulatorScene.overrideMaterial = this.depthMaterial;
8160
- this.renderer.render(this.simulatorScene, this.camera);
8314
+ this.renderer.render(this.simulatorScene, this.depthCamera);
8161
8315
  this.simulatorScene.overrideMaterial = null;
8162
8316
  this.renderer.setRenderTarget(originalRenderTarget);
8163
8317
  }
@@ -8182,11 +8336,14 @@ class SimulatorDepth {
8182
8336
  // Copy the temp slice (original row i) to row j
8183
8337
  this.depthBuffer.set(this.depthBufferSlice, j_offset);
8184
8338
  }
8339
+ this.depthCamera.projectionMatrix.toArray(this.projectionMatrixArray);
8185
8340
  const depthData = {
8186
8341
  width: this.depthWidth,
8187
8342
  height: this.depthHeight,
8188
8343
  data: this.depthBuffer.buffer,
8189
8344
  rawValueToMeters: 1.0,
8345
+ projectionMatrix: this.projectionMatrixArray,
8346
+ transform: new XRRigidTransform(this.depthCamera.position, this.depthCamera.quaternion),
8190
8347
  };
8191
8348
  this.depth.updateCPUDepthData(depthData, 0);
8192
8349
  }
@@ -11796,7 +11953,7 @@ class TextView extends View {
11796
11953
  }
11797
11954
  setTextColor(color) {
11798
11955
  if (Text && this.textObj instanceof Text) {
11799
- this.textObj.color = color;
11956
+ this.textObj.color = getColorHex(color);
11800
11957
  }
11801
11958
  }
11802
11959
  /**
@@ -12186,7 +12343,7 @@ class TextButton extends TextView {
12186
12343
  */
12187
12344
  constructor(options = {}) {
12188
12345
  const geometry = new THREE.PlaneGeometry(1, 1);
12189
- const colorVec4 = getVec4ByColorString(options.backgroundColor ?? '#00000000');
12346
+ const colorVec4 = getVec4ByColorString(options.backgroundColor ?? '#000000');
12190
12347
  const { opacity = 0.0, radius = SquircleShader.uniforms.uRadius.value, boxSize = SquircleShader.uniforms.uBoxSize.value, } = options;
12191
12348
  const uniforms = {
12192
12349
  ...SquircleShader.uniforms,
@@ -12236,6 +12393,9 @@ class TextButton extends TextView {
12236
12393
  // Applies our own overrides to the default values.
12237
12394
  this.fontSize = options.fontSize ?? this.fontSize;
12238
12395
  this.fontColor = options.fontColor ?? this.fontColor;
12396
+ this.hoverColor = options.hoverColor ?? this.hoverColor;
12397
+ this.selectedFontColor =
12398
+ options.selectedFontColor ?? this.selectedFontColor;
12239
12399
  this.width = options.width ?? this.width;
12240
12400
  this.height = options.height ?? this.height;
12241
12401
  }
@@ -12258,20 +12418,19 @@ class TextButton extends TextView {
12258
12418
  if (!this.textObj) {
12259
12419
  return;
12260
12420
  }
12261
- if (this.textObj) {
12262
- this.textObj.renderOrder = this.renderOrder + 1;
12263
- }
12421
+ // Update render order to ensure text appears on top of the button mesh
12422
+ this.textObj.renderOrder = this.renderOrder + 1;
12264
12423
  const ux = this.ux;
12265
12424
  if (ux.isHovered()) {
12266
12425
  if (ux.isSelected()) {
12267
- this.setTextColor(0x666666);
12426
+ this.setTextColor(this.selectedFontColor);
12268
12427
  }
12269
12428
  else {
12270
- this.setTextColor(0xaaaaaa);
12429
+ this.setTextColor(this.hoverColor);
12271
12430
  }
12272
12431
  }
12273
12432
  else {
12274
- this.setTextColor(0xffffff);
12433
+ this.setTextColor(this.fontColor);
12275
12434
  this.uniforms.uOpacity.value = this.defaultOpacity * this.opacity;
12276
12435
  }
12277
12436
  }
@@ -13887,6 +14046,7 @@ class ObjectDetector extends Script {
13887
14046
  }
13888
14047
  if (this.options.objects.showDebugVisualizations) {
13889
14048
  this._visualizeBoundingBoxesOnImage(base64Image, parsedResponse);
14049
+ this._visualizeDepthMap(cachedDepthArray);
13890
14050
  }
13891
14051
  const detectionPromises = parsedResponse.map(async (item) => {
13892
14052
  const { ymin, xmin, ymax, xmax, objectName, ...additionalData } = item || {};
@@ -13935,7 +14095,7 @@ class ObjectDetector extends Script {
13935
14095
  * Retrieves a list of currently detected objects.
13936
14096
  *
13937
14097
  * @param label - The semantic label to filter by (e.g., 'chair'). If null,
13938
- * all objects are returned.
14098
+ * all objects are returned.
13939
14099
  * @returns An array of `Object` instances.
13940
14100
  */
13941
14101
  get(label = null) {
@@ -13972,8 +14132,7 @@ class ObjectDetector extends Script {
13972
14132
  * Draws the detected bounding boxes on the input image and triggers a
13973
14133
  * download for debugging.
13974
14134
  * @param base64Image - The base64 encoded input image.
13975
- * @param detections - The array of detected objects from the
13976
- * AI response.
14135
+ * @param detections - The array of detected objects from the AI response.
13977
14136
  */
13978
14137
  _visualizeBoundingBoxesOnImage(base64Image, detections) {
13979
14138
  const img = new Image();
@@ -14022,6 +14181,71 @@ class ObjectDetector extends Script {
14022
14181
  };
14023
14182
  img.src = base64Image;
14024
14183
  }
14184
+ /**
14185
+ * Generates a visual representation of the depth map, normalized to 0-1 range,
14186
+ * and triggers a download for debugging.
14187
+ * @param depthArray - The raw depth data array.
14188
+ */
14189
+ _visualizeDepthMap(depthArray) {
14190
+ const width = this.depth.width;
14191
+ const height = this.depth.height;
14192
+ if (!width || !height || depthArray.length === 0) {
14193
+ console.warn('Cannot visualize depth map: missing dimensions or data.');
14194
+ return;
14195
+ }
14196
+ // 1. Find Min/Max for normalization (ignoring 0/invalid depth).
14197
+ let min = Infinity;
14198
+ let max = -Infinity;
14199
+ for (let i = 0; i < depthArray.length; ++i) {
14200
+ const val = depthArray[i];
14201
+ if (val > 0) {
14202
+ if (val < min)
14203
+ min = val;
14204
+ if (val > max)
14205
+ max = val;
14206
+ }
14207
+ }
14208
+ // Handle edge case where no valid depth exists.
14209
+ if (min === Infinity) {
14210
+ min = 0;
14211
+ max = 1;
14212
+ }
14213
+ if (min === max) {
14214
+ max = min + 1; // Avoid divide by zero
14215
+ }
14216
+ // 2. Create Canvas.
14217
+ const canvas = document.createElement('canvas');
14218
+ canvas.width = width;
14219
+ canvas.height = height;
14220
+ const ctx = canvas.getContext('2d');
14221
+ const imageData = ctx.createImageData(width, height);
14222
+ const data = imageData.data;
14223
+ // 3. Fill Pixels.
14224
+ for (let i = 0; i < depthArray.length; ++i) {
14225
+ const raw = depthArray[i];
14226
+ // Normalize to 0-1.
14227
+ // Typically 0 means invalid/sky in some depth APIs, so we keep it black.
14228
+ // Otherwise, map [min, max] to [0, 1].
14229
+ const normalized = raw === 0 ? 0 : (raw - min) / (max - min);
14230
+ const byteVal = Math.floor(normalized * 255);
14231
+ const stride = i * 4;
14232
+ data[stride] = byteVal; // R
14233
+ data[stride + 1] = byteVal; // G
14234
+ data[stride + 2] = byteVal; // B
14235
+ data[stride + 3] = 255; // Alpha
14236
+ }
14237
+ ctx.putImageData(imageData, 0, 0);
14238
+ // 4. Download.
14239
+ const timestamp = new Date()
14240
+ .toISOString()
14241
+ .slice(0, 19)
14242
+ .replace('T', '_')
14243
+ .replace(/:/g, '-');
14244
+ const link = document.createElement('a');
14245
+ link.download = `depth_debug_${timestamp}.png`;
14246
+ link.href = canvas.toDataURL('image/png');
14247
+ link.click();
14248
+ }
14025
14249
  /**
14026
14250
  * Creates a simple debug visualization for an object based on its position
14027
14251
  * (center of its 2D detection bounding box).
@@ -17086,5 +17310,5 @@ class VideoFileStream extends VideoStream {
17086
17310
  }
17087
17311
  }
17088
17312
 
17089
- export { AI, AIOptions, AVERAGE_IPD_METERS, ActiveControllers, Agent, AnimatableNumber, AudioListener, AudioPlayer, BACK, BackgroundMusic, CategoryVolumes, Col, Core, CoreSound, DEFAULT_DEVICE_CAMERA_HEIGHT, DEFAULT_DEVICE_CAMERA_WIDTH, DOWN, Depth, DepthMesh, DepthMeshOptions, DepthOptions, DepthTextures, DetectedObject, DetectedPlane, DeviceCameraOptions, DragManager, DragMode, ExitButton, FORWARD, FreestandingSlider, GazeController, Gemini, GeminiOptions, GenerateSkyboxTool, GestureRecognition, GestureRecognitionOptions, GetWeatherTool, Grid, HAND_BONE_IDX_CONNECTION_MAP, HAND_JOINT_COUNT, HAND_JOINT_IDX_CONNECTION_MAP, HAND_JOINT_NAMES, Handedness, Hands, HandsOptions, HorizontalPager, IconButton, IconView, ImageView, Input, InputOptions, Keycodes, LEFT, LEFT_VIEW_ONLY_LAYER, LabelView, Lighting, LightingOptions, LoadingSpinnerManager, MaterialSymbolsView, MeshScript, ModelLoader, ModelViewer, MouseController, NEXT_SIMULATOR_MODE, NUM_HANDS, OCCLUDABLE_ITEMS_LAYER, ObjectDetector, ObjectsOptions, OcclusionPass, OcclusionUtils, OpenAI, OpenAIOptions, Options, PageIndicator, Pager, PagerState, Panel, PanelMesh, Physics, PhysicsOptions, PinchOnButtonAction, PlaneDetector, PlanesOptions, RIGHT, RIGHT_VIEW_ONLY_LAYER, Registry, Reticle, ReticleOptions, RotationRaycastMesh, Row, SIMULATOR_HAND_POSE_NAMES, SIMULATOR_HAND_POSE_TO_JOINTS_LEFT, SIMULATOR_HAND_POSE_TO_JOINTS_RIGHT, SOUND_PRESETS, ScreenshotSynthesizer, Script, ScriptMixin, ScriptsManager, ScrollingTroikaTextView, SetSimulatorModeEvent, ShowHandsAction, Simulator, SimulatorCamera, SimulatorControlMode, SimulatorControllerState, SimulatorControls, SimulatorDepth, SimulatorDepthMaterial, SimulatorHandPose, SimulatorHandPoseChangeRequestEvent, SimulatorHands, SimulatorInterface, SimulatorMediaDeviceInfo, SimulatorMode, SimulatorOptions, SimulatorRenderMode, SimulatorScene, SimulatorUser, SimulatorUserAction, SketchPanel, SkyboxAgent, SoundOptions, SoundSynthesizer, SpatialAudio, SpatialPanel, SpeechRecognizer, SpeechRecognizerOptions, SpeechSynthesizer, SpeechSynthesizerOptions, SplatAnchor, StreamState, TextButton, TextScrollerState, TextView, Tool, UI, UI_OVERLAY_LAYER, UP, UX, User, VIEW_DEPTH_GAP, VerticalPager, VideoFileStream, VideoStream, VideoView, View, VolumeCategory, WaitFrame, WalkTowardsPanelAction, World, WorldOptions, XRButton, XRDeviceCamera, XREffects, XRPass, XRTransitionOptions, XR_BLOCKS_ASSETS_PATH, ZERO_VECTOR3, add, ai, aspectRatios, callInitWithDependencyInjection, clamp, clampRotationToAngle, core, cropImage, extractYaw, getColorHex, getDeltaTime, getUrlParamBool, getUrlParamFloat, getUrlParamInt, getUrlParameter, getVec4ByColorString, getXrCameraLeft, getXrCameraRight, init, initScript, lerp, loadStereoImageAsTextures, loadingSpinnerManager, lookAtRotation, objectIsDescendantOf, parseBase64DataURL, placeObjectAtIntersectionFacingTarget, print, rgbToDepthParams, scene, showOnlyInLeftEye, showOnlyInRightEye, showReticleOnDepthMesh, transformRgbToDepthUv, transformRgbUvToWorld, traverseUtil, uninitScript, urlParams, user, world, xrDepthMeshOptions, xrDepthMeshPhysicsOptions, xrDepthMeshVisualizationOptions, xrDeviceCameraEnvironmentContinuousOptions, xrDeviceCameraEnvironmentOptions, xrDeviceCameraUserContinuousOptions, xrDeviceCameraUserOptions };
17313
+ export { AI, AIOptions, AVERAGE_IPD_METERS, ActiveControllers, Agent, AnimatableNumber, AudioListener, AudioPlayer, BACK, BackgroundMusic, CategoryVolumes, Col, Core, CoreSound, DEFAULT_DEVICE_CAMERA_HEIGHT, DEFAULT_DEVICE_CAMERA_WIDTH, DEFAULT_RGB_TO_DEPTH_PARAMS, DOWN, Depth, DepthMesh, DepthMeshOptions, DepthOptions, DepthTextures, DetectedObject, DetectedPlane, DeviceCameraOptions, DragManager, DragMode, ExitButton, FORWARD, FreestandingSlider, GazeController, Gemini, GeminiOptions, GenerateSkyboxTool, GestureRecognition, GestureRecognitionOptions, GetWeatherTool, Grid, HAND_BONE_IDX_CONNECTION_MAP, HAND_JOINT_COUNT, HAND_JOINT_IDX_CONNECTION_MAP, HAND_JOINT_NAMES, Handedness, Hands, HandsOptions, HorizontalPager, IconButton, IconView, ImageView, Input, InputOptions, Keycodes, LEFT, LEFT_VIEW_ONLY_LAYER, LabelView, Lighting, LightingOptions, LoadingSpinnerManager, MaterialSymbolsView, MeshScript, ModelLoader, ModelViewer, MouseController, NEXT_SIMULATOR_MODE, NUM_HANDS, OCCLUDABLE_ITEMS_LAYER, ObjectDetector, ObjectsOptions, OcclusionPass, OcclusionUtils, OpenAI, OpenAIOptions, Options, PageIndicator, Pager, PagerState, Panel, PanelMesh, Physics, PhysicsOptions, PinchOnButtonAction, PlaneDetector, PlanesOptions, RIGHT, RIGHT_VIEW_ONLY_LAYER, Registry, Reticle, ReticleOptions, RotationRaycastMesh, Row, SIMULATOR_HAND_POSE_NAMES, SIMULATOR_HAND_POSE_TO_JOINTS_LEFT, SIMULATOR_HAND_POSE_TO_JOINTS_RIGHT, SOUND_PRESETS, ScreenshotSynthesizer, Script, ScriptMixin, ScriptsManager, ScrollingTroikaTextView, SetSimulatorModeEvent, ShowHandsAction, Simulator, SimulatorCamera, SimulatorControlMode, SimulatorControllerState, SimulatorControls, SimulatorDepth, SimulatorDepthMaterial, SimulatorHandPose, SimulatorHandPoseChangeRequestEvent, SimulatorHands, SimulatorInterface, SimulatorMediaDeviceInfo, SimulatorMode, SimulatorOptions, SimulatorRenderMode, SimulatorScene, SimulatorUser, SimulatorUserAction, SketchPanel, SkyboxAgent, SoundOptions, SoundSynthesizer, SpatialAudio, SpatialPanel, SpeechRecognizer, SpeechRecognizerOptions, SpeechSynthesizer, SpeechSynthesizerOptions, SplatAnchor, StreamState, TextButton, TextScrollerState, TextView, Tool, UI, UI_OVERLAY_LAYER, UP, UX, User, VIEW_DEPTH_GAP, VerticalPager, VideoFileStream, VideoStream, VideoView, View, VolumeCategory, WaitFrame, WalkTowardsPanelAction, World, WorldOptions, XRButton, XRDeviceCamera, XREffects, XRPass, XRTransitionOptions, XR_BLOCKS_ASSETS_PATH, ZERO_VECTOR3, add, ai, aspectRatios, callInitWithDependencyInjection, clamp, clampRotationToAngle, core, cropImage, extractYaw, getColorHex, getDeltaTime, getUrlParamBool, getUrlParamFloat, getUrlParamInt, getUrlParameter, getVec4ByColorString, getXrCameraLeft, getXrCameraRight, init, initScript, lerp, loadStereoImageAsTextures, loadingSpinnerManager, lookAtRotation, objectIsDescendantOf, parseBase64DataURL, placeObjectAtIntersectionFacingTarget, print, scene, showOnlyInLeftEye, showOnlyInRightEye, showReticleOnDepthMesh, transformRgbToDepthUv, transformRgbToRenderCameraClip, transformRgbUvToWorld, traverseUtil, uninitScript, urlParams, user, world, xrDepthMeshOptions, xrDepthMeshPhysicsOptions, xrDepthMeshVisualizationOptions, xrDeviceCameraEnvironmentContinuousOptions, xrDeviceCameraEnvironmentOptions, xrDeviceCameraUserContinuousOptions, xrDeviceCameraUserOptions };
17090
17314
  //# sourceMappingURL=xrblocks.js.map