typegpu 0.6.0 → 0.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/chunk-2VTISQYP.cjs +10 -0
- package/chunk-2VTISQYP.cjs.map +1 -0
- package/chunk-CYZBRBPD.cjs +6 -0
- package/chunk-CYZBRBPD.cjs.map +1 -0
- package/chunk-SAMYKEUN.js +6 -0
- package/chunk-SAMYKEUN.js.map +1 -0
- package/chunk-WP6W72RY.js +10 -0
- package/chunk-WP6W72RY.js.map +1 -0
- package/data/index.cjs +1 -1
- package/data/index.cjs.map +1 -1
- package/data/index.d.cts +17 -5
- package/data/index.d.ts +17 -5
- package/data/index.js +1 -1
- package/index.cjs +18 -29
- package/index.cjs.map +1 -1
- package/index.d.cts +38 -3
- package/index.d.ts +38 -3
- package/index.js +18 -29
- package/index.js.map +1 -1
- package/{matrix-S8W4dk8I.d.ts → matrix-4h_aOtIE.d.cts} +9 -9
- package/{matrix-BN6ObiMv.d.cts → matrix-Domrg-ap.d.ts} +9 -9
- package/package.json +1 -1
- package/std/index.cjs +1 -1
- package/std/index.cjs.map +1 -1
- package/std/index.d.cts +274 -112
- package/std/index.d.ts +274 -112
- package/std/index.js +1 -1
- package/std/index.js.map +1 -1
- package/{tgpuComputeFn-DOUjhQua.d.ts → tgpuConstant-BSgcF4zi.d.cts} +3058 -2938
- package/{tgpuComputeFn-DOUjhQua.d.cts → tgpuConstant-BSgcF4zi.d.ts} +3058 -2938
- package/chunk-APTRHY5Y.js +0 -41
- package/chunk-APTRHY5Y.js.map +0 -1
- package/chunk-RC26P3MP.cjs +0 -2
- package/chunk-RC26P3MP.cjs.map +0 -1
- package/chunk-RK6TMFVW.js +0 -2
- package/chunk-RK6TMFVW.js.map +0 -1
- package/chunk-S62QJDM6.js +0 -2
- package/chunk-S62QJDM6.js.map +0 -1
- package/chunk-TG4A6AM3.cjs +0 -2
- package/chunk-TG4A6AM3.cjs.map +0 -1
- package/chunk-UMHCZDPA.cjs +0 -41
- package/chunk-UMHCZDPA.cjs.map +0 -1
@@ -1,50 +1,50 @@
|
|
1
|
-
import {
|
1
|
+
import { bu as Mat2x2f, bv as Mat3x3f, bw as Mat4x4f, br as m2x2f, bs as m3x3f, bt as m4x4f, c_ as DualFn, bD as v3f } from './tgpuConstant-BSgcF4zi.cjs';
|
2
2
|
|
3
3
|
/**
|
4
4
|
* Returns a 2-by-2 identity matrix.
|
5
5
|
* @returns {m2x2f} The result matrix.
|
6
6
|
*/
|
7
|
-
declare const identity2:
|
7
|
+
declare const identity2: DualFn<() => m2x2f>;
|
8
8
|
/**
|
9
9
|
* Returns a 3-by-3 identity matrix.
|
10
10
|
* @returns {m3x3f} The result matrix.
|
11
11
|
*/
|
12
|
-
declare const identity3:
|
12
|
+
declare const identity3: DualFn<() => m3x3f>;
|
13
13
|
/**
|
14
14
|
* Returns a 4-by-4 identity matrix.
|
15
15
|
* @returns {m4x4f} The result matrix.
|
16
16
|
*/
|
17
|
-
declare const identity4:
|
17
|
+
declare const identity4: DualFn<() => m4x4f>;
|
18
18
|
/**
|
19
19
|
* Creates a 4-by-4 matrix which translates by the given vector v.
|
20
20
|
* @param {v3f} vector - The vector by which to translate.
|
21
21
|
* @returns {m4x4f} The translation matrix.
|
22
22
|
*/
|
23
|
-
declare const translation4:
|
23
|
+
declare const translation4: DualFn<(vector: v3f) => m4x4f>;
|
24
24
|
/**
|
25
25
|
* Creates a 4-by-4 matrix which scales in each dimension by an amount given by the corresponding entry in the given vector.
|
26
26
|
* @param {v3f} vector - A vector of three entries specifying the factor by which to scale in each dimension.
|
27
27
|
* @returns {m4x4f} The scaling matrix.
|
28
28
|
*/
|
29
|
-
declare const scaling4:
|
29
|
+
declare const scaling4: DualFn<(vector: v3f) => m4x4f>;
|
30
30
|
/**
|
31
31
|
* Creates a 4-by-4 matrix which rotates around the x-axis by the given angle.
|
32
32
|
* @param {number} angle - The angle by which to rotate (in radians).
|
33
33
|
* @returns {m4x4f} The rotation matrix.
|
34
34
|
*/
|
35
|
-
declare const rotationX4:
|
35
|
+
declare const rotationX4: DualFn<(a: number) => m4x4f>;
|
36
36
|
/**
|
37
37
|
* Creates a 4-by-4 matrix which rotates around the y-axis by the given angle.
|
38
38
|
* @param {number} angle - The angle by which to rotate (in radians).
|
39
39
|
* @returns {m4x4f} The rotation matrix.
|
40
40
|
*/
|
41
|
-
declare const rotationY4:
|
41
|
+
declare const rotationY4: DualFn<(a: number) => m4x4f>;
|
42
42
|
/**
|
43
43
|
* Creates a 4-by-4 matrix which rotates around the z-axis by the given angle.
|
44
44
|
* @param {number} angle - The angle by which to rotate (in radians).
|
45
45
|
* @returns {m4x4f} The rotation matrix.
|
46
46
|
*/
|
47
|
-
declare const rotationZ4:
|
47
|
+
declare const rotationZ4: DualFn<(a: number) => m4x4f>;
|
48
48
|
/**
|
49
49
|
* Schema representing mat2x2f - a matrix with 2 rows and 2 columns, with elements of type f32.
|
50
50
|
* Also a constructor function for this matrix type.
|
@@ -1,50 +1,50 @@
|
|
1
|
-
import {
|
1
|
+
import { bu as Mat2x2f, bv as Mat3x3f, bw as Mat4x4f, br as m2x2f, bs as m3x3f, bt as m4x4f, c_ as DualFn, bD as v3f } from './tgpuConstant-BSgcF4zi.js';
|
2
2
|
|
3
3
|
/**
|
4
4
|
* Returns a 2-by-2 identity matrix.
|
5
5
|
* @returns {m2x2f} The result matrix.
|
6
6
|
*/
|
7
|
-
declare const identity2:
|
7
|
+
declare const identity2: DualFn<() => m2x2f>;
|
8
8
|
/**
|
9
9
|
* Returns a 3-by-3 identity matrix.
|
10
10
|
* @returns {m3x3f} The result matrix.
|
11
11
|
*/
|
12
|
-
declare const identity3:
|
12
|
+
declare const identity3: DualFn<() => m3x3f>;
|
13
13
|
/**
|
14
14
|
* Returns a 4-by-4 identity matrix.
|
15
15
|
* @returns {m4x4f} The result matrix.
|
16
16
|
*/
|
17
|
-
declare const identity4:
|
17
|
+
declare const identity4: DualFn<() => m4x4f>;
|
18
18
|
/**
|
19
19
|
* Creates a 4-by-4 matrix which translates by the given vector v.
|
20
20
|
* @param {v3f} vector - The vector by which to translate.
|
21
21
|
* @returns {m4x4f} The translation matrix.
|
22
22
|
*/
|
23
|
-
declare const translation4:
|
23
|
+
declare const translation4: DualFn<(vector: v3f) => m4x4f>;
|
24
24
|
/**
|
25
25
|
* Creates a 4-by-4 matrix which scales in each dimension by an amount given by the corresponding entry in the given vector.
|
26
26
|
* @param {v3f} vector - A vector of three entries specifying the factor by which to scale in each dimension.
|
27
27
|
* @returns {m4x4f} The scaling matrix.
|
28
28
|
*/
|
29
|
-
declare const scaling4:
|
29
|
+
declare const scaling4: DualFn<(vector: v3f) => m4x4f>;
|
30
30
|
/**
|
31
31
|
* Creates a 4-by-4 matrix which rotates around the x-axis by the given angle.
|
32
32
|
* @param {number} angle - The angle by which to rotate (in radians).
|
33
33
|
* @returns {m4x4f} The rotation matrix.
|
34
34
|
*/
|
35
|
-
declare const rotationX4:
|
35
|
+
declare const rotationX4: DualFn<(a: number) => m4x4f>;
|
36
36
|
/**
|
37
37
|
* Creates a 4-by-4 matrix which rotates around the y-axis by the given angle.
|
38
38
|
* @param {number} angle - The angle by which to rotate (in radians).
|
39
39
|
* @returns {m4x4f} The rotation matrix.
|
40
40
|
*/
|
41
|
-
declare const rotationY4:
|
41
|
+
declare const rotationY4: DualFn<(a: number) => m4x4f>;
|
42
42
|
/**
|
43
43
|
* Creates a 4-by-4 matrix which rotates around the z-axis by the given angle.
|
44
44
|
* @param {number} angle - The angle by which to rotate (in radians).
|
45
45
|
* @returns {m4x4f} The rotation matrix.
|
46
46
|
*/
|
47
|
-
declare const rotationZ4:
|
47
|
+
declare const rotationZ4: DualFn<(a: number) => m4x4f>;
|
48
48
|
/**
|
49
49
|
* Schema representing mat2x2f - a matrix with 2 rows and 2 columns, with elements of type f32.
|
50
50
|
* Also a constructor function for this matrix type.
|
package/package.json
CHANGED
package/std/index.cjs
CHANGED
@@ -1,2 +1,2 @@
|
|
1
|
-
"use strict";Object.defineProperty(exports, "__esModule", {value: true}); function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { newObj[key] = obj[key]; } } } newObj.default = obj; return newObj; } }var _chunkTG4A6AM3cjs = require('../chunk-TG4A6AM3.cjs');var _chunkUMHCZDPAcjs = require('../chunk-UMHCZDPA.cjs');var Ve=_chunkUMHCZDPAcjs.ia.call(void 0, ()=>{throw new Error("discard() can only be used on the GPU.")},()=>_chunkUMHCZDPAcjs.fa.call(void 0, "discard;",_chunkUMHCZDPAcjs.C),"discard");var Ue=_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>_chunkTG4A6AM3cjs.f.call(void 0, _chunkUMHCZDPAcjs.Ra.translation(t),e),(e,t)=>({value:`(${_chunkUMHCZDPAcjs.Ra.translation(t).value} * ${e.value})`,dataType:e.dataType}),"translate4"),Ee= exports.scale4 =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>_chunkTG4A6AM3cjs.f.call(void 0, _chunkUMHCZDPAcjs.Ra.scaling(t),e),(e,t)=>({value:`(${_chunkUMHCZDPAcjs.Ra.scaling(t).value} * ${e.value})`,dataType:e.dataType}),"scale4"),Oe= exports.rotateX4 =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>_chunkTG4A6AM3cjs.f.call(void 0, _chunkUMHCZDPAcjs.Ra.rotationX(t),e),(e,t)=>({value:`(${_chunkUMHCZDPAcjs.Ra.rotationX(t).value} * ${e.value})`,dataType:e.dataType}),"rotateX4"),Le= exports.rotateY4 =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>_chunkTG4A6AM3cjs.f.call(void 0, _chunkUMHCZDPAcjs.Ra.rotationY(t),e),(e,t)=>({value:`(${_chunkUMHCZDPAcjs.Ra.rotationY(t).value} * ${e.value})`,dataType:e.dataType}),"rotateY4"),_e= exports.rotateZ4 =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>_chunkTG4A6AM3cjs.f.call(void 0, _chunkUMHCZDPAcjs.Ra.rotationZ(t),e),(e,t)=>({value:`(${_chunkUMHCZDPAcjs.Ra.rotationZ(t).value} * ${e.value})`,dataType:e.dataType}),"rotateZ4");function g(e){return e.dataType.type.includes("2")?_chunkUMHCZDPAcjs.va:e.dataType.type.includes("3")?_chunkUMHCZDPAcjs.Aa:_chunkUMHCZDPAcjs.Fa}var Pe=_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>k(b(e,t)),(e,t)=>_chunkUMHCZDPAcjs.fa.call(void 0, `all(${e.value} == ${t.value})`,_chunkUMHCZDPAcjs.la),"allEq"),b= exports.eq =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>_chunkTG4A6AM3cjs.a.eq[e.kind](e,t),(e,t)=>_chunkUMHCZDPAcjs.fa.call(void 0, `(${e.value} == ${t.value})`,g(e)),"eq"),he= exports.ne =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>m(b(e,t)),(e,t)=>_chunkUMHCZDPAcjs.fa.call(void 0, `(${e.value} != ${t.value})`,g(e)),"ne"),w= exports.lt =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>_chunkTG4A6AM3cjs.a.lt[e.kind](e,t),(e,t)=>_chunkUMHCZDPAcjs.fa.call(void 0, `(${e.value} < ${t.value})`,g(e)),"lt"),Ge= exports.le =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>B(w(e,t),b(e,t)),(e,t)=>_chunkUMHCZDPAcjs.fa.call(void 0, `(${e.value} <= ${t.value})`,g(e)),"le"),Ne= exports.gt =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>J(m(w(e,t)),m(b(e,t))),(e,t)=>_chunkUMHCZDPAcjs.fa.call(void 0, `(${e.value} > ${t.value})`,g(e)),"gt"),Fe= exports.ge =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>m(w(e,t)),(e,t)=>_chunkUMHCZDPAcjs.fa.call(void 0, `(${e.value} >= ${t.value})`,g(e)),"ge"),m= exports.not =_chunkUMHCZDPAcjs.ia.call(void 0, e=>_chunkTG4A6AM3cjs.a.neg[e.kind](e),e=>_chunkUMHCZDPAcjs.fa.call(void 0, `!(${e.value})`,e.dataType),"not"),B= exports.or =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>_chunkTG4A6AM3cjs.a.or[e.kind](e,t),(e,t)=>_chunkUMHCZDPAcjs.fa.call(void 0, `(${e.value} | ${t.value})`,e.dataType),"or"),J= exports.and =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>m(B(m(e),m(t))),(e,t)=>_chunkUMHCZDPAcjs.fa.call(void 0, `(${e.value} & ${t.value})`,e.dataType),"and"),k= exports.all =_chunkUMHCZDPAcjs.ia.call(void 0, e=>_chunkTG4A6AM3cjs.a.all[e.kind](e),e=>_chunkUMHCZDPAcjs.fa.call(void 0, `all(${e.value})`,_chunkUMHCZDPAcjs.la),"all"),We= exports.any =_chunkUMHCZDPAcjs.ia.call(void 0, e=>!k(m(e)),e=>_chunkUMHCZDPAcjs.fa.call(void 0, `any(${e.value})`,_chunkUMHCZDPAcjs.la),"any"),Xe= exports.isCloseTo =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t,o=.01)=>typeof e=="number"&&typeof t=="number"?Math.abs(e-t)<o:_chunkUMHCZDPAcjs.D.call(void 0, e)&&_chunkUMHCZDPAcjs.D.call(void 0, t)?_chunkTG4A6AM3cjs.a.isCloseToZero[e.kind](_chunkTG4A6AM3cjs.e.call(void 0, e,t),o):!1,(e,t,o=_chunkUMHCZDPAcjs.fa.call(void 0, .01,_chunkUMHCZDPAcjs.pa))=>_chunkTG4A6AM3cjs.b.call(void 0, e)&&_chunkTG4A6AM3cjs.b.call(void 0, t)?_chunkUMHCZDPAcjs.fa.call(void 0, `(abs(f32(${e.value}) - f32(${t.value})) <= ${o.value})`,_chunkUMHCZDPAcjs.la):!_chunkTG4A6AM3cjs.b.call(void 0, e)&&!_chunkTG4A6AM3cjs.b.call(void 0, t)?_chunkUMHCZDPAcjs.fa.call(void 0, `all(abs(${e.value} - ${t.value}) <= (${e.value} - ${e.value}) + ${o.value})`,_chunkUMHCZDPAcjs.la):_chunkUMHCZDPAcjs.fa.call(void 0, "false",_chunkUMHCZDPAcjs.la),"isCloseTo"),Je= exports.select =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t,o)=>typeof o=="boolean"?o?t:e:_chunkTG4A6AM3cjs.a.select[e.kind](e,t,o),(e,t,o)=>_chunkUMHCZDPAcjs.fa.call(void 0, `select(${e.value}, ${t.value}, ${o.value})`,e.dataType),"select");var Me=_chunkUMHCZDPAcjs.ia.call(void 0, ()=>console.warn("workgroupBarrier is a no-op outside of GPU mode."),()=>_chunkUMHCZDPAcjs.fa.call(void 0, "workgroupBarrier()",_chunkUMHCZDPAcjs.C),"workgroupBarrier"),qe= exports.storageBarrier =_chunkUMHCZDPAcjs.ia.call(void 0, ()=>console.warn("storageBarrier is a no-op outside of GPU mode."),()=>_chunkUMHCZDPAcjs.fa.call(void 0, "storageBarrier()",_chunkUMHCZDPAcjs.C),"storageBarrier"),Ce= exports.textureBarrier =_chunkUMHCZDPAcjs.ia.call(void 0, ()=>console.warn("textureBarrier is a no-op outside of GPU mode."),()=>_chunkUMHCZDPAcjs.fa.call(void 0, "textureBarrier()",_chunkUMHCZDPAcjs.C),"textureBarrier"),Ze= exports.atomicLoad =_chunkUMHCZDPAcjs.ia.call(void 0, e=>{throw new Error("Atomic operations are not supported outside of GPU mode.")},e=>{if(_chunkUMHCZDPAcjs.M.call(void 0, e.dataType)&&e.dataType.type==="atomic")return _chunkUMHCZDPAcjs.fa.call(void 0, `atomicLoad(&${e.value})`,e.dataType.inner);throw new Error(`Invalid atomic type: ${JSON.stringify(e.dataType,null,2)}`)},"atomicLoad"),Ye= exports.atomicStore =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>{throw new Error("Atomic operations are not supported outside of GPU mode.")},(e,t)=>{if(!_chunkUMHCZDPAcjs.M.call(void 0, e.dataType)||e.dataType.type!=="atomic")throw new Error(`Invalid atomic type: ${JSON.stringify(e.dataType,null,2)}`);return _chunkUMHCZDPAcjs.fa.call(void 0, `atomicStore(&${e.value}, ${t.value})`,_chunkUMHCZDPAcjs.C)},"atomicStore"),f=(e,t)=>e.dataType.type==="atomic"&&e.dataType.inner.type==="i32"?[e.dataType,_chunkUMHCZDPAcjs.oa]:[e.dataType,_chunkUMHCZDPAcjs.ma],je= exports.atomicAdd =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>{throw new Error("Atomic operations are not supported outside of GPU mode.")},(e,t)=>{if(_chunkUMHCZDPAcjs.M.call(void 0, e.dataType)&&e.dataType.type==="atomic")return _chunkUMHCZDPAcjs.fa.call(void 0, `atomicAdd(&${e.value}, ${t.value})`,e.dataType.inner);throw new Error(`Invalid atomic type: ${JSON.stringify(e.dataType,null,2)}`)},"atomicAdd",f),Re= exports.atomicSub =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>{throw new Error("Atomic operations are not supported outside of GPU mode.")},(e,t)=>{if(_chunkUMHCZDPAcjs.M.call(void 0, e.dataType)&&e.dataType.type==="atomic")return _chunkUMHCZDPAcjs.fa.call(void 0, `atomicSub(&${e.value}, ${t.value})`,e.dataType.inner);throw new Error(`Invalid atomic type: ${JSON.stringify(e.dataType,null,2)}`)},"atomicSub",f),ze= exports.atomicMax =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>{throw new Error("Atomic operations are not supported outside of GPU mode.")},(e,t)=>{if(_chunkUMHCZDPAcjs.M.call(void 0, e.dataType)&&e.dataType.type==="atomic")return _chunkUMHCZDPAcjs.fa.call(void 0, `atomicMax(&${e.value}, ${t.value})`,e.dataType.inner);throw new Error(`Invalid atomic type: ${JSON.stringify(e.dataType,null,2)}`)},"atomicMax",f),He= exports.atomicMin =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>{throw new Error("Atomic operations are not supported outside of GPU mode.")},(e,t)=>{if(_chunkUMHCZDPAcjs.M.call(void 0, e.dataType)&&e.dataType.type==="atomic")return _chunkUMHCZDPAcjs.fa.call(void 0, `atomicMin(&${e.value}, ${t.value})`,e.dataType.inner);throw new Error(`Invalid atomic type: ${JSON.stringify(e.dataType,null,2)}`)},"atomicMin",f),Ke= exports.atomicAnd =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>{throw new Error("Atomic operations are not supported outside of GPU mode.")},(e,t)=>{if(_chunkUMHCZDPAcjs.M.call(void 0, e.dataType)&&e.dataType.type==="atomic")return _chunkUMHCZDPAcjs.fa.call(void 0, `atomicAnd(&${e.value}, ${t.value})`,e.dataType.inner);throw new Error(`Invalid atomic type: ${JSON.stringify(e.dataType,null,2)}`)},"atomicAnd",f),Qe= exports.atomicOr =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>{throw new Error("Atomic operations are not supported outside of GPU mode.")},(e,t)=>{if(_chunkUMHCZDPAcjs.M.call(void 0, e.dataType)&&e.dataType.type==="atomic")return _chunkUMHCZDPAcjs.fa.call(void 0, `atomicOr(&${e.value}, ${t.value})`,e.dataType.inner);throw new Error(`Invalid atomic type: ${JSON.stringify(e.dataType,null,2)}`)},"atomicOr",f),et= exports.atomicXor =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>{throw new Error("Atomic operations are not supported outside of GPU mode.")},(e,t)=>{if(_chunkUMHCZDPAcjs.M.call(void 0, e.dataType)&&e.dataType.type==="atomic")return _chunkUMHCZDPAcjs.fa.call(void 0, `atomicXor(&${e.value}, ${t.value})`,e.dataType.inner);throw new Error(`Invalid atomic type: ${JSON.stringify(e.dataType,null,2)}`)},"atomicXor",f);var tt=_chunkUMHCZDPAcjs.ia.call(void 0, e=>e.length,e=>_chunkUMHCZDPAcjs.P.call(void 0, e.dataType)&&_chunkUMHCZDPAcjs.N.call(void 0, e.dataType.inner)&&e.dataType.inner.elementCount>0?_chunkUMHCZDPAcjs.fa.call(void 0, String(e.dataType.inner.elementCount),_chunkUMHCZDPAcjs.ja):_chunkUMHCZDPAcjs.fa.call(void 0, `arrayLength(${e.value})`,_chunkUMHCZDPAcjs.ma),"arrayLength",e=>[_chunkUMHCZDPAcjs.Ta.call(void 0, e.dataType)]);var _typedbinary = require('typed-binary'); var d = _interopRequireWildcard(_typedbinary);var rt=_chunkUMHCZDPAcjs.ia.call(void 0, e=>{let t=new ArrayBuffer(4);new d.BufferWriter(t).writeUint32(e);let n=new d.BufferReader(t);return _chunkUMHCZDPAcjs.ra.call(void 0, n.readFloat16(),n.readFloat16())},e=>_chunkUMHCZDPAcjs.fa.call(void 0, `unpack2x16float(${e.value})`,_chunkUMHCZDPAcjs.ra),"unpack2x16float"),at= exports.pack2x16float =_chunkUMHCZDPAcjs.ia.call(void 0, e=>{let t=new ArrayBuffer(4),o=new d.BufferWriter(t);o.writeFloat16(e.x),o.writeFloat16(e.y);let n=new d.BufferReader(t);return _chunkUMHCZDPAcjs.ma.call(void 0, n.readUint32())},e=>_chunkUMHCZDPAcjs.fa.call(void 0, `pack2x16float(${e.value})`,_chunkUMHCZDPAcjs.ma),"pack2x16float"),ot= exports.unpack4x8unorm =_chunkUMHCZDPAcjs.ia.call(void 0, e=>{let t=new ArrayBuffer(4);new d.BufferWriter(t).writeUint32(e);let n=new d.BufferReader(t);return _chunkUMHCZDPAcjs.Ba.call(void 0, n.readUint8()/255,n.readUint8()/255,n.readUint8()/255,n.readUint8()/255)},e=>_chunkUMHCZDPAcjs.fa.call(void 0, `unpack4x8unorm(${e.value})`,_chunkUMHCZDPAcjs.Ba),"unpack4x8unorm"),nt= exports.pack4x8unorm =_chunkUMHCZDPAcjs.ia.call(void 0, e=>{let t=new ArrayBuffer(4),o=new d.BufferWriter(t);o.writeUint8(e.x*255),o.writeUint8(e.y*255),o.writeUint8(e.z*255),o.writeUint8(e.w*255);let n=new d.BufferReader(t);return _chunkUMHCZDPAcjs.ma.call(void 0, n.readUint32())},e=>_chunkUMHCZDPAcjs.fa.call(void 0, `pack4x8unorm(${e.value})`,_chunkUMHCZDPAcjs.ma),"pack4x8unorm");var ut=_chunkUMHCZDPAcjs.ia.call(void 0, (e,t,o,n,u)=>{throw new Error("Texture sampling is not supported outside of GPU mode.")},(e,t,o,n,u)=>{let l=[e,t,o];return n!==void 0&&l.push(n),u!==void 0&&l.push(u),_chunkUMHCZDPAcjs.fa.call(void 0, `textureSample(${l.map($=>$.value).join(", ")})`,_chunkUMHCZDPAcjs.Ba)},"textureSample"),pt= exports.textureSampleLevel =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t,o,n,u)=>{throw new Error("Texture sampling is not supported outside of GPU mode.")},(e,t,o,n,u)=>{let l=[e,t,o,n];return u!==void 0&&l.push(u),_chunkUMHCZDPAcjs.fa.call(void 0, `textureSampleLevel(${l.map($=>$.value).join(", ")})`,_chunkUMHCZDPAcjs.Ba)},"textureSampleLevel"),it={u32:_chunkUMHCZDPAcjs.Ea,i32:_chunkUMHCZDPAcjs.Da,f32:_chunkUMHCZDPAcjs.Ba},Tt= exports.textureLoad =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t,o)=>{throw new Error("Texture loading is not supported outside of GPU mode.")},(e,t,o)=>{let n=[e,t];o!==void 0&&n.push(o);let u=e.dataType;return _chunkUMHCZDPAcjs.fa.call(void 0, `textureLoad(${n.map(l=>l.value).join(", ")})`,"texelDataType"in u?u.texelDataType:it[u.channelDataType.type])},"textureLoad"),dt= exports.textureStore =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t,o,n)=>{throw new Error("Texture storing is not supported outside of GPU mode.")},(e,t,o,n)=>_chunkUMHCZDPAcjs.fa.call(void 0, `textureStore(${[e,t,o,n].filter(u=>u!==void 0).map(u=>u.value).join(", ")})`,_chunkUMHCZDPAcjs.C),"textureStore"),st= exports.textureDimensions =_chunkUMHCZDPAcjs.ia.call(void 0, (e,t)=>{throw new Error("Texture dimensions are not supported outside of GPU mode.")},(e,t)=>{let o=e.dataType.dimension;return _chunkUMHCZDPAcjs.fa.call(void 0, `textureDimensions(${e.value}${t!==void 0?`, ${t.value}`:""})`,o==="1d"?_chunkUMHCZDPAcjs.ma:o==="3d"?_chunkUMHCZDPAcjs.za:_chunkUMHCZDPAcjs.ua)},"textureDimensions");exports.abs = _chunkTG4A6AM3cjs.h; exports.acos = _chunkTG4A6AM3cjs.j; exports.acosh = _chunkTG4A6AM3cjs.k; exports.add = _chunkTG4A6AM3cjs.d; exports.all = k; exports.allEq = Pe; exports.and = J; exports.any = We; exports.arrayLength = tt; exports.asin = _chunkTG4A6AM3cjs.l; exports.atan2 = _chunkTG4A6AM3cjs.i; exports.atomicAdd = je; exports.atomicAnd = Ke; exports.atomicLoad = Ze; exports.atomicMax = ze; exports.atomicMin = He; exports.atomicOr = Qe; exports.atomicStore = Ye; exports.atomicSub = Re; exports.atomicXor = et; exports.ceil = _chunkTG4A6AM3cjs.m; exports.clamp = _chunkTG4A6AM3cjs.n; exports.cos = _chunkTG4A6AM3cjs.o; exports.cosh = _chunkTG4A6AM3cjs.p; exports.cross = _chunkTG4A6AM3cjs.q; exports.discard = Ve; exports.distance = _chunkTG4A6AM3cjs.H; exports.div = _chunkTG4A6AM3cjs.g; exports.dot = _chunkTG4A6AM3cjs.r; exports.eq = b; exports.exp = _chunkTG4A6AM3cjs.C; exports.exp2 = _chunkTG4A6AM3cjs.D; exports.floor = _chunkTG4A6AM3cjs.t; exports.fract = _chunkTG4A6AM3cjs.u; exports.ge = Fe; exports.gt = Ne; exports.identity2 = _chunkUMHCZDPAcjs.Ha; exports.identity3 = _chunkUMHCZDPAcjs.Ia; exports.identity4 = _chunkUMHCZDPAcjs.Ja; exports.isCloseTo = Xe; exports.le = Ge; exports.length = _chunkTG4A6AM3cjs.v; exports.log = _chunkTG4A6AM3cjs.w; exports.log2 = _chunkTG4A6AM3cjs.x; exports.lt = w; exports.max = _chunkTG4A6AM3cjs.y; exports.min = _chunkTG4A6AM3cjs.z; exports.mix = _chunkTG4A6AM3cjs.F; exports.mul = _chunkTG4A6AM3cjs.f; exports.ne = he; exports.neg = _chunkTG4A6AM3cjs.I; exports.normalize = _chunkTG4A6AM3cjs.s; exports.not = m; exports.or = B; exports.pack2x16float = at; exports.pack4x8unorm = nt; exports.pow = _chunkTG4A6AM3cjs.E; exports.reflect = _chunkTG4A6AM3cjs.G; exports.rotateX4 = Oe; exports.rotateY4 = Le; exports.rotateZ4 = _e; exports.rotationX4 = _chunkUMHCZDPAcjs.Ma; exports.rotationY4 = _chunkUMHCZDPAcjs.Na; exports.rotationZ4 = _chunkUMHCZDPAcjs.Oa; exports.scale4 = Ee; exports.scaling4 = _chunkUMHCZDPAcjs.La; exports.select = Je; exports.sign = _chunkTG4A6AM3cjs.A; exports.sin = _chunkTG4A6AM3cjs.B; exports.sqrt = _chunkTG4A6AM3cjs.J; exports.storageBarrier = qe; exports.sub = _chunkTG4A6AM3cjs.e; exports.tanh = _chunkTG4A6AM3cjs.K; exports.textureBarrier = Ce; exports.textureDimensions = st; exports.textureLoad = Tt; exports.textureSample = ut; exports.textureSampleLevel = pt; exports.textureStore = dt; exports.translate4 = Ue; exports.translation4 = _chunkUMHCZDPAcjs.Ka; exports.unpack2x16float = rt; exports.unpack4x8unorm = ot; exports.workgroupBarrier = Me;
|
1
|
+
"use strict";Object.defineProperty(exports, "__esModule", {value: true}); function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { newObj[key] = obj[key]; } } } newObj.default = obj; return newObj; } }var _chunk2VTISQYPcjs = require('../chunk-2VTISQYP.cjs');var de=_chunk2VTISQYPcjs.ya.call(void 0, ()=>{throw new Error("`discard` relies on GPU resources and cannot be executed outside of a draw call")},()=>_chunk2VTISQYPcjs.wa.call(void 0, "discard;",_chunk2VTISQYPcjs.z),"discard");var ye=_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?Math.abs(e):_chunk2VTISQYPcjs.mc.abs[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`abs(${e})`,e.dataType),"abs"),fe= exports.acos =_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?Math.acos(e):_chunk2VTISQYPcjs.mc.acos[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`acos(${e})`,e.dataType),"acos"),le= exports.acosh =_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?Math.acosh(e):_chunk2VTISQYPcjs.mc.acosh[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`acosh(${e})`,e.dataType),"acosh"),xe= exports.asin =_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?Math.asin(e):_chunk2VTISQYPcjs.mc.asin[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`asin(${e})`,e.dataType),"asin"),be= exports.asinh =_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?Math.asinh(e):_chunk2VTISQYPcjs.mc.asinh[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`asinh(${e})`,e.dataType),"asinh"),he= exports.atan =_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?Math.atan(e):_chunk2VTISQYPcjs.mc.atan[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`atan(${e})`,e.dataType),"atan"),we= exports.atanh =_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?Math.atanh(e):_chunk2VTISQYPcjs.mc.atanh[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`atanh(${e})`,e.dataType),"atanh"),ge= exports.atan2 =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>typeof e=="number"&&typeof t=="number"?Math.atan2(e,t):_chunk2VTISQYPcjs.mc.atan2[e.kind](e,t),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`atan2(${e}, ${t})`,e.dataType),"atan2","unify"),Ie= exports.ceil =_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?Math.ceil(e):_chunk2VTISQYPcjs.mc.ceil[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`ceil(${e})`,e.dataType),"ceil"),ve= exports.clamp =_chunk2VTISQYPcjs.ya.call(void 0, (e,t,a)=>typeof e=="number"?Math.min(Math.max(t,e),a):_chunk2VTISQYPcjs.mc.clamp[e.kind](e,t,a),(e,t,a)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`clamp(${e}, ${t}, ${a})`,e.dataType),"clamp"),$e= exports.cos =_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?Math.cos(e):_chunk2VTISQYPcjs.mc.cos[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`cos(${e})`,e.dataType),"cos"),Ae= exports.cosh =_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?Math.cosh(e):_chunk2VTISQYPcjs.mc.cosh[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`cosh(${e})`,e.dataType),"cosh"),Se= exports.countLeadingZeros =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("CPU implementation for countLeadingZeros not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`countLeadingZeros(${e})`,e.dataType),"countLeadingZeros"),Ve= exports.countOneBits =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("CPU implementation for countOneBits not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`countOneBits(${e})`,e.dataType),"countOneBits"),Fe= exports.countTrailingZeros =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("CPU implementation for countTrailingZeros not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`countTrailingZeros(${e})`,e.dataType),"countTrailingZeros"),Pe= exports.cross =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>_chunk2VTISQYPcjs.mc.cross[e.kind](e,t),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`cross(${e}, ${t})`,e.dataType),"cross"),Ee= exports.degrees =_chunk2VTISQYPcjs.ya.call(void 0, e=>{if(typeof e=="number")return e*180/Math.PI;throw new Error("CPU implementation for degrees on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`degrees(${e})`,e.dataType),"degrees"),Ue= exports.determinant =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("CPU implementation for determinant not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`determinant(${e})`,_chunk2VTISQYPcjs.Fa),"determinant"),ke= exports.distance =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>typeof e=="number"&&typeof t=="number"?Math.abs(e-t):re(_chunk2VTISQYPcjs.oc.call(void 0, e,t)),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`distance(${e}, ${t})`,e.dataType.type==="f16"||e.dataType.type.endsWith("h")?_chunk2VTISQYPcjs.Ga:_chunk2VTISQYPcjs.Fa),"distance"),ne= exports.dot =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>_chunk2VTISQYPcjs.mc.dot[e.kind](e,t),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`dot(${e}, ${t})`,_chunk2VTISQYPcjs.Fa),"dot"),De= exports.dot4U8Packed =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>{throw new Error("CPU implementation for dot4U8Packed not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`dot4U8Packed(${e}, ${t})`,_chunk2VTISQYPcjs.Ca),"dot4U8Packed",[_chunk2VTISQYPcjs.Ca,_chunk2VTISQYPcjs.Ca]),Be= exports.dot4I8Packed =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>{throw new Error("CPU implementation for dot4I8Packed not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`dot4I8Packed(${e}, ${t})`,_chunk2VTISQYPcjs.Ea),"dot4I8Packed",[_chunk2VTISQYPcjs.Ea,_chunk2VTISQYPcjs.Ea]),Ce= exports.exp =_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?Math.exp(e):_chunk2VTISQYPcjs.mc.exp[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`exp(${e})`,e.dataType),"exp"),Oe= exports.exp2 =_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?2**e:_chunk2VTISQYPcjs.mc.exp2[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`exp2(${e})`,e.dataType),"exp2"),Me= exports.extractBits =_chunk2VTISQYPcjs.ya.call(void 0, (e,t,a)=>{throw new Error("CPU implementation for extractBits not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},(e,t,a)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`extractBits(${e}, ${t}, ${a})`,e.dataType),"extractBits",(e,t,a)=>[e.dataType,_chunk2VTISQYPcjs.Ca,_chunk2VTISQYPcjs.Ca]),Ge= exports.faceForward =_chunk2VTISQYPcjs.ya.call(void 0, (e,t,a)=>{throw new Error("CPU implementation for faceForward not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},(e,t,a)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`faceForward(${e}, ${t}, ${a})`,e.dataType),"faceForward"),Ne= exports.firstLeadingBit =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("CPU implementation for firstLeadingBit not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`firstLeadingBit(${e})`,e.dataType),"firstLeadingBit"),Le= exports.firstTrailingBit =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("CPU implementation for firstTrailingBit not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`firstTrailingBit(${e})`,e.dataType),"firstTrailingBit"),qe= exports.floor =_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?Math.floor(e):_chunk2VTISQYPcjs.mc.floor[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`floor(${e})`,e.dataType),"floor"),_e= exports.fma =_chunk2VTISQYPcjs.ya.call(void 0, (e,t,a)=>{if(typeof e=="number")return e*t+a;throw new Error("CPU implementation for fma on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},(e,t,a)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`fma(${e}, ${t}, ${a})`,e.dataType),"fma"),Re= exports.fract =_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?e-Math.floor(e):_chunk2VTISQYPcjs.mc.fract[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`fract(${e})`,e.dataType),"fract"),We={f32:_chunk2VTISQYPcjs.kc.call(void 0, {fract:_chunk2VTISQYPcjs.Fa,exp:_chunk2VTISQYPcjs.Ea}),f16:_chunk2VTISQYPcjs.kc.call(void 0, {fract:_chunk2VTISQYPcjs.Ga,exp:_chunk2VTISQYPcjs.Ea}),abstractFloat:_chunk2VTISQYPcjs.kc.call(void 0, {fract:_chunk2VTISQYPcjs.Aa,exp:_chunk2VTISQYPcjs.za}),vec2f:_chunk2VTISQYPcjs.kc.call(void 0, {fract:_chunk2VTISQYPcjs.Ia,exp:_chunk2VTISQYPcjs.Ka}),vec3f:_chunk2VTISQYPcjs.kc.call(void 0, {fract:_chunk2VTISQYPcjs.Na,exp:_chunk2VTISQYPcjs.Pa}),vec4f:_chunk2VTISQYPcjs.kc.call(void 0, {fract:_chunk2VTISQYPcjs.Sa,exp:_chunk2VTISQYPcjs.Ua}),vec2h:_chunk2VTISQYPcjs.kc.call(void 0, {fract:_chunk2VTISQYPcjs.Ja,exp:_chunk2VTISQYPcjs.Ka}),vec3h:_chunk2VTISQYPcjs.kc.call(void 0, {fract:_chunk2VTISQYPcjs.Oa,exp:_chunk2VTISQYPcjs.Pa}),vec4h:_chunk2VTISQYPcjs.kc.call(void 0, {fract:_chunk2VTISQYPcjs.Ta,exp:_chunk2VTISQYPcjs.Ua})},Ze= exports.frexp =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("CPU implementation for frexp not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},e=>{let t=We[e.dataType.type];if(!t)throw new Error(`Unsupported data type for frexp: ${e.dataType.type}. Supported types are f32, f16, abstractFloat, vec2f, vec3f, vec4f, vec2h, vec3h, vec4h.`);return _chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`frexp(${e})`,t)},"frexp"),ze= exports.insertBits =_chunk2VTISQYPcjs.ya.call(void 0, (e,t,a,i)=>{throw new Error("CPU implementation for insertBits not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},(e,t,a,i)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`insertBits(${e}, ${t}, ${a}, ${i})`,e.dataType),"insertBits"),Xe= exports.inverseSqrt =_chunk2VTISQYPcjs.ya.call(void 0, e=>{if(typeof e=="number")return 1/Math.sqrt(e);throw new Error("CPU implementation for inverseSqrt on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`inverseSqrt(${e})`,e.dataType),"inverseSqrt"),Je= exports.ldexp =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>{throw new Error("CPU implementation for ldexp not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`ldexp(${e}, ${t})`,e.dataType),"ldexp",(e,t)=>{switch(e.dataType.type){case"abstractFloat":return[_chunk2VTISQYPcjs.Aa,_chunk2VTISQYPcjs.za];case"f32":case"f16":return[e.dataType,_chunk2VTISQYPcjs.Ea];case"vec2f":case"vec2h":return[e.dataType,_chunk2VTISQYPcjs.Ka];case"vec3f":case"vec3h":return[e.dataType,_chunk2VTISQYPcjs.Pa];case"vec4f":case"vec4h":return[e.dataType,_chunk2VTISQYPcjs.Ua];default:throw new Error(`Unsupported data type for ldexp: ${e.dataType.type}. Supported types are abstractFloat, f32, f16, vec2f, vec2h, vec3f, vec3h, vec4f, vec4h.`)}}),re= exports.length =_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?Math.abs(e):_chunk2VTISQYPcjs.mc.length[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`length(${e})`,_chunk2VTISQYPcjs.Fa),"length"),je= exports.log =_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?Math.log(e):_chunk2VTISQYPcjs.mc.log[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`log(${e})`,e.dataType),"log"),Ye= exports.log2 =_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?Math.log2(e):_chunk2VTISQYPcjs.mc.log2[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`log2(${e})`,e.dataType),"log2"),He= exports.max =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>typeof e=="number"?Math.max(e,t):_chunk2VTISQYPcjs.mc.max[e.kind](e,t),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`max(${e}, ${t})`,e.dataType),"max","unify"),Ke= exports.min =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>typeof e=="number"?Math.min(e,t):_chunk2VTISQYPcjs.mc.min[e.kind](e,t),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`min(${e}, ${t})`,e.dataType),"min","unify"),Qe= exports.mix =_chunk2VTISQYPcjs.ya.call(void 0, (e,t,a)=>{if(typeof e=="number"){if(typeof a!="number"||typeof t!="number")throw new Error("When e1 and e2 are numbers, the blend factor must be a number.");return e*(1-a)+t*a}if(typeof e=="number"||typeof t=="number")throw new Error("e1 and e2 need to both be vectors of the same kind.");return _chunk2VTISQYPcjs.mc.mix[e.kind](e,t,a)},(e,t,a)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`mix(${e}, ${t}, ${a})`,e.dataType),"mix"),et={f32:_chunk2VTISQYPcjs.kc.call(void 0, {fract:_chunk2VTISQYPcjs.Fa,whole:_chunk2VTISQYPcjs.Fa}),f16:_chunk2VTISQYPcjs.kc.call(void 0, {fract:_chunk2VTISQYPcjs.Ga,whole:_chunk2VTISQYPcjs.Ga}),abstractFloat:_chunk2VTISQYPcjs.kc.call(void 0, {fract:_chunk2VTISQYPcjs.Aa,whole:_chunk2VTISQYPcjs.Aa}),vec2f:_chunk2VTISQYPcjs.kc.call(void 0, {fract:_chunk2VTISQYPcjs.Ia,whole:_chunk2VTISQYPcjs.Ia}),vec3f:_chunk2VTISQYPcjs.kc.call(void 0, {fract:_chunk2VTISQYPcjs.Na,whole:_chunk2VTISQYPcjs.Na}),vec4f:_chunk2VTISQYPcjs.kc.call(void 0, {fract:_chunk2VTISQYPcjs.Sa,whole:_chunk2VTISQYPcjs.Sa}),vec2h:_chunk2VTISQYPcjs.kc.call(void 0, {fract:_chunk2VTISQYPcjs.Ja,whole:_chunk2VTISQYPcjs.Ja}),vec3h:_chunk2VTISQYPcjs.kc.call(void 0, {fract:_chunk2VTISQYPcjs.Oa,whole:_chunk2VTISQYPcjs.Oa}),vec4h:_chunk2VTISQYPcjs.kc.call(void 0, {fract:_chunk2VTISQYPcjs.Ta,whole:_chunk2VTISQYPcjs.Ta})},tt= exports.modf =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("CPU implementation for modf not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},e=>{let t=et[e.dataType.type];if(!t)throw new Error(`Unsupported data type for modf: ${e.dataType.type}. Supported types are f32, f16, abstractFloat, vec2f, vec3f, vec4f, vec2h, vec3h, vec4h.`);return _chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`modf(${e})`,t)},"modf"),nt= exports.normalize =_chunk2VTISQYPcjs.ya.call(void 0, e=>_chunk2VTISQYPcjs.mc.normalize[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`normalize(${e})`,e.dataType),"normalize"),rt= exports.pow =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>{if(typeof e=="number"&&typeof t=="number")return e**t;if(typeof e=="object"&&typeof t=="object"&&"kind"in e&&"kind"in t)return _chunk2VTISQYPcjs.mc.pow[e.kind](e,t);throw new Error("Invalid arguments to pow()")},(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`pow(${e}, ${t})`,e.dataType),"pow"),ot= exports.quantizeToF16 =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("CPU implementation for quantizeToF16 not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`quantizeToF16(${e})`,e.dataType),"quantizeToF16"),at= exports.radians =_chunk2VTISQYPcjs.ya.call(void 0, e=>{if(typeof e=="number")return e*Math.PI/180;throw new Error("CPU implementation for radians on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`radians(${e})`,e.dataType),"radians"),st= exports.reflect =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>_chunk2VTISQYPcjs.oc.call(void 0, e,_chunk2VTISQYPcjs.pc.call(void 0, 2*ne(t,e),t)),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`reflect(${e}, ${t})`,e.dataType),"reflect"),pt= exports.refract =_chunk2VTISQYPcjs.ya.call(void 0, (e,t,a)=>{throw new Error("CPU implementation for refract not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},(e,t,a)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`refract(${e}, ${t}, ${a})`,e.dataType),"refract",(e,t,a)=>[e.dataType,t.dataType,e.dataType.type==="f16"||e.dataType.type.endsWith("h")?_chunk2VTISQYPcjs.Ga:_chunk2VTISQYPcjs.Fa]),it= exports.reverseBits =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("CPU implementation for reverseBits not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`reverseBits(${e})`,e.dataType),"reverseBits"),ct= exports.round =_chunk2VTISQYPcjs.ya.call(void 0, e=>{if(typeof e=="number")return Math.round(e);throw new Error("CPU implementation for round on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`round(${e})`,e.dataType),"round"),mt= exports.saturate =_chunk2VTISQYPcjs.ya.call(void 0, e=>{if(typeof e=="number")return Math.max(0,Math.min(1,e));throw new Error("CPU implementation for saturate on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`saturate(${e})`,e.dataType),"saturate"),Tt= exports.sign =_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?Math.sign(e):_chunk2VTISQYPcjs.mc.sign[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`sign(${e})`,e.dataType),"sign"),ut= exports.sin =_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?Math.sin(e):_chunk2VTISQYPcjs.mc.sin[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`sin(${e})`,e.dataType),"sin"),dt= exports.sinh =_chunk2VTISQYPcjs.ya.call(void 0, e=>{if(typeof e=="number")return Math.sinh(e);throw new Error("CPU implementation for sinh on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`sinh(${e})`,e.dataType),"sinh"),yt= exports.smoothstep =_chunk2VTISQYPcjs.ya.call(void 0, (e,t,a)=>typeof a=="number"?_chunk2VTISQYPcjs.lc.call(void 0, e,t,a):_chunk2VTISQYPcjs.mc.smoothstep[a.kind](e,t,a),(e,t,a)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`smoothstep(${e}, ${t}, ${a})`,a.dataType),"smoothstep"),ft= exports.sqrt =_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?Math.sqrt(e):_chunk2VTISQYPcjs.mc.sqrt[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`sqrt(${e})`,e.dataType),"sqrt"),lt= exports.step =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>{if(typeof e=="number")return e<=t?1:0;throw new Error("CPU implementation for step on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`step(${e}, ${t})`,e.dataType),"step"),xt= exports.tan =_chunk2VTISQYPcjs.ya.call(void 0, e=>{if(typeof e=="number")return Math.tan(e);throw new Error("CPU implementation for tan on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`tan(${e})`,e.dataType),"tan"),bt= exports.tanh =_chunk2VTISQYPcjs.ya.call(void 0, e=>typeof e=="number"?Math.tanh(e):_chunk2VTISQYPcjs.mc.tanh[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`tanh(${e})`,e.dataType),"tanh"),ht= exports.transpose =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("CPU implementation for transpose not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`transpose(${e})`,e.dataType),"transpose"),wt= exports.trunc =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("CPU implementation for trunc not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`trunc(${e})`,e.dataType),"trunc");var P=_chunk2VTISQYPcjs.pc[_chunk2VTISQYPcjs.a].jsImpl,gt=_chunk2VTISQYPcjs.$a[_chunk2VTISQYPcjs.a].jsImpl,It=_chunk2VTISQYPcjs.$a[_chunk2VTISQYPcjs.a].gpuImpl,vt=_chunk2VTISQYPcjs.ab[_chunk2VTISQYPcjs.a].jsImpl,$t=_chunk2VTISQYPcjs.ab[_chunk2VTISQYPcjs.a].gpuImpl,At=_chunk2VTISQYPcjs.bb[_chunk2VTISQYPcjs.a].jsImpl,St=_chunk2VTISQYPcjs.bb[_chunk2VTISQYPcjs.a].gpuImpl,Vt=_chunk2VTISQYPcjs.cb[_chunk2VTISQYPcjs.a].jsImpl,Ft=_chunk2VTISQYPcjs.cb[_chunk2VTISQYPcjs.a].gpuImpl,Pt=_chunk2VTISQYPcjs.db[_chunk2VTISQYPcjs.a].jsImpl,Et=_chunk2VTISQYPcjs.db[_chunk2VTISQYPcjs.a].gpuImpl,Ut= exports.translate4 =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>P(gt(t),e),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`(${It(t)} * ${e})`,e.dataType),"translate4"),kt= exports.scale4 =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>P(vt(t),e),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`(${$t(t)} * ${e})`,e.dataType),"scale4"),Dt= exports.rotateX4 =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>P(At(t),e),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`(${St(t)} * ${e})`,e.dataType),"rotateX4"),Bt= exports.rotateY4 =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>P(Vt(t),e),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`(${Ft(t)} * ${e})`,e.dataType),"rotateY4"),Ct= exports.rotateZ4 =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>P(Pt(t),e),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`(${Et(t)} * ${e})`,e.dataType),"rotateZ4");function $(e){return e.dataType.type.includes("2")?_chunk2VTISQYPcjs.Ma:e.dataType.type.includes("3")?_chunk2VTISQYPcjs.Ra:_chunk2VTISQYPcjs.Wa}var Ot=_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>Z(E(e,t)),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`all(${e} == ${t})`,_chunk2VTISQYPcjs.Ba),"allEq"),E= exports.eq =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>_chunk2VTISQYPcjs.mc.eq[e.kind](e,t),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`(${e} == ${t})`,$(e)),"eq"),Mt= exports.ne =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>x(E(e,t)),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`(${e} != ${t})`,$(e)),"ne"),_= exports.lt =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>_chunk2VTISQYPcjs.mc.lt[e.kind](e,t),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`(${e} < ${t})`,$(e)),"lt"),Gt= exports.le =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>W(_(e,t),E(e,t)),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`(${e} <= ${t})`,$(e)),"le"),Nt= exports.gt =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>oe(x(_(e,t)),x(E(e,t))),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`(${e} > ${t})`,$(e)),"gt"),Lt= exports.ge =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>x(_(e,t)),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`(${e} >= ${t})`,$(e)),"ge"),x= exports.not =_chunk2VTISQYPcjs.ya.call(void 0, e=>_chunk2VTISQYPcjs.mc.neg[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`!(${e})`,e.dataType),"not"),W= exports.or =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>_chunk2VTISQYPcjs.mc.or[e.kind](e,t),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`(${e} | ${t})`,e.dataType),"or"),oe= exports.and =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>x(W(x(e),x(t))),(e,t)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`(${e} & ${t})`,e.dataType),"and"),Z= exports.all =_chunk2VTISQYPcjs.ya.call(void 0, e=>_chunk2VTISQYPcjs.mc.all[e.kind](e),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`all(${e})`,_chunk2VTISQYPcjs.Ba),"all"),qt= exports.any =_chunk2VTISQYPcjs.ya.call(void 0, e=>!Z(x(e)),e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`any(${e})`,_chunk2VTISQYPcjs.Ba),"any"),_t= exports.isCloseTo =_chunk2VTISQYPcjs.ya.call(void 0, (e,t,a=.01)=>typeof e=="number"&&typeof t=="number"?Math.abs(e-t)<a:_chunk2VTISQYPcjs.A.call(void 0, e)&&_chunk2VTISQYPcjs.A.call(void 0, t)?_chunk2VTISQYPcjs.mc.isCloseToZero[e.kind](_chunk2VTISQYPcjs.oc.call(void 0, e,t),a):!1,(e,t,a=_chunk2VTISQYPcjs.wa.call(void 0, .01,_chunk2VTISQYPcjs.Fa))=>_chunk2VTISQYPcjs.va.call(void 0, e)&&_chunk2VTISQYPcjs.va.call(void 0, t)?_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`(abs(f32(${e}) - f32(${t})) <= ${a})`,_chunk2VTISQYPcjs.Ba):!_chunk2VTISQYPcjs.va.call(void 0, e)&&!_chunk2VTISQYPcjs.va.call(void 0, t)?_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`all(abs(${e} - ${t}) <= (${e} - ${e}) + ${a})`,_chunk2VTISQYPcjs.Ba):_chunk2VTISQYPcjs.wa.call(void 0, "false",_chunk2VTISQYPcjs.Ba),"isCloseTo"),Rt= exports.select =_chunk2VTISQYPcjs.ya.call(void 0, (e,t,a)=>typeof a=="boolean"?a?t:e:_chunk2VTISQYPcjs.mc.select[e.kind](e,t,a),(e,t,a)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`select(${e}, ${t}, ${a})`,e.dataType),"select");var Wt=_chunk2VTISQYPcjs.ya.call(void 0, ()=>console.warn("workgroupBarrier is a no-op outside of CODEGEN mode."),()=>_chunk2VTISQYPcjs.wa.call(void 0, "workgroupBarrier()",_chunk2VTISQYPcjs.z),"workgroupBarrier"),Zt= exports.storageBarrier =_chunk2VTISQYPcjs.ya.call(void 0, ()=>console.warn("storageBarrier is a no-op outside of CODEGEN mode."),()=>_chunk2VTISQYPcjs.wa.call(void 0, "storageBarrier()",_chunk2VTISQYPcjs.z),"storageBarrier"),zt= exports.textureBarrier =_chunk2VTISQYPcjs.ya.call(void 0, ()=>console.warn("textureBarrier is a no-op outside of CODEGEN mode."),()=>_chunk2VTISQYPcjs.wa.call(void 0, "textureBarrier()",_chunk2VTISQYPcjs.z),"textureBarrier"),Xt= exports.atomicLoad =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("Atomic operations are not supported outside of CODEGEN mode.")},e=>{if(_chunk2VTISQYPcjs.I.call(void 0, e.dataType)&&e.dataType.type==="atomic")return _chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`atomicLoad(&${e})`,e.dataType.inner);throw new Error(`Invalid atomic type: ${JSON.stringify(e.dataType,null,2)}`)},"atomicLoad"),Jt= exports.atomicStore =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>{throw new Error("Atomic operations are not supported outside of CODEGEN mode.")},(e,t)=>{if(!_chunk2VTISQYPcjs.I.call(void 0, e.dataType)||e.dataType.type!=="atomic")throw new Error(`Invalid atomic type: ${JSON.stringify(e.dataType,null,2)}`);return _chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`atomicStore(&${e}, ${t})`,_chunk2VTISQYPcjs.z)},"atomicStore"),w=(e,t)=>e.dataType.type==="atomic"&&e.dataType.inner.type==="i32"?[e.dataType,_chunk2VTISQYPcjs.Ea]:[e.dataType,_chunk2VTISQYPcjs.Ca],jt= exports.atomicAdd =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>{throw new Error("Atomic operations are not supported outside of CODEGEN mode.")},(e,t)=>{if(_chunk2VTISQYPcjs.I.call(void 0, e.dataType)&&e.dataType.type==="atomic")return _chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`atomicAdd(&${e}, ${t})`,e.dataType.inner);throw new Error(`Invalid atomic type: ${JSON.stringify(e.dataType,null,2)}`)},"atomicAdd",w),Yt= exports.atomicSub =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>{throw new Error("Atomic operations are not supported outside of CODEGEN mode.")},(e,t)=>{if(_chunk2VTISQYPcjs.I.call(void 0, e.dataType)&&e.dataType.type==="atomic")return _chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`atomicSub(&${e}, ${t})`,e.dataType.inner);throw new Error(`Invalid atomic type: ${JSON.stringify(e.dataType,null,2)}`)},"atomicSub",w),Ht= exports.atomicMax =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>{throw new Error("Atomic operations are not supported outside of CODEGEN mode.")},(e,t)=>{if(_chunk2VTISQYPcjs.I.call(void 0, e.dataType)&&e.dataType.type==="atomic")return _chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`atomicMax(&${e}, ${t})`,e.dataType.inner);throw new Error(`Invalid atomic type: ${JSON.stringify(e.dataType,null,2)}`)},"atomicMax",w),Kt= exports.atomicMin =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>{throw new Error("Atomic operations are not supported outside of CODEGEN mode.")},(e,t)=>{if(_chunk2VTISQYPcjs.I.call(void 0, e.dataType)&&e.dataType.type==="atomic")return _chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`atomicMin(&${e}, ${t})`,e.dataType.inner);throw new Error(`Invalid atomic type: ${JSON.stringify(e.dataType,null,2)}`)},"atomicMin",w),Qt= exports.atomicAnd =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>{throw new Error("Atomic operations are not supported outside of CODEGEN mode.")},(e,t)=>{if(_chunk2VTISQYPcjs.I.call(void 0, e.dataType)&&e.dataType.type==="atomic")return _chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`atomicAnd(&${e}, ${t})`,e.dataType.inner);throw new Error(`Invalid atomic type: ${JSON.stringify(e.dataType,null,2)}`)},"atomicAnd",w),en= exports.atomicOr =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>{throw new Error("Atomic operations are not supported outside of CODEGEN mode.")},(e,t)=>{if(_chunk2VTISQYPcjs.I.call(void 0, e.dataType)&&e.dataType.type==="atomic")return _chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`atomicOr(&${e}, ${t})`,e.dataType.inner);throw new Error(`Invalid atomic type: ${JSON.stringify(e.dataType,null,2)}`)},"atomicOr",w),tn= exports.atomicXor =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>{throw new Error("Atomic operations are not supported outside of CODEGEN mode.")},(e,t)=>{if(_chunk2VTISQYPcjs.I.call(void 0, e.dataType)&&e.dataType.type==="atomic")return _chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`atomicXor(&${e}, ${t})`,e.dataType.inner);throw new Error(`Invalid atomic type: ${JSON.stringify(e.dataType,null,2)}`)},"atomicXor",w);var nn=_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("Derivative builtins are not allowed on the cpu")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`dpdx(${e})`,e.dataType),"dpdx"),rn= exports.dpdxCoarse =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("Derivative builtins are not allowed on the cpu")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`dpdxCoarse(${e})`,e.dataType),"dpdxCoarse"),on= exports.dpdxFine =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("Derivative builtins are not allowed on the cpu")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`dpdxFine(${e})`,e.dataType),"dpdxFine"),an= exports.dpdy =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("Derivative builtins are not allowed on the cpu")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`dpdy(${e})`,e.dataType),"dpdy"),sn= exports.dpdyCoarse =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("Derivative builtins are not allowed on the cpu")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`dpdyCoarse(${e})`,e.dataType),"dpdyCoarse"),pn= exports.dpdyFine =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("Derivative builtins are not allowed on the cpu")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`dpdyFine(${e})`,e.dataType),"dpdyFine"),cn= exports.fwidth =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("Derivative builtins are not allowed on the cpu")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`fwidth(${e})`,e.dataType),"fwidth"),mn= exports.fwidthCoarse =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("Derivative builtins are not allowed on the cpu")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`fwidthCoarse(${e})`,e.dataType),"fwidthCoarse"),Tn= exports.fwidthFine =_chunk2VTISQYPcjs.ya.call(void 0, e=>{throw new Error("Derivative builtins are not allowed on the cpu")},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`fwidthFine(${e})`,e.dataType),"fwidthFine");var un=_chunk2VTISQYPcjs.ya.call(void 0, e=>e.length,e=>_chunk2VTISQYPcjs.L.call(void 0, e.dataType)&&_chunk2VTISQYPcjs.J.call(void 0, e.dataType.inner)&&e.dataType.inner.elementCount>0?_chunk2VTISQYPcjs.wa.call(void 0, String(e.dataType.inner.elementCount),_chunk2VTISQYPcjs.za):_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`arrayLength(${e})`,_chunk2VTISQYPcjs.Ca),"arrayLength",e=>[_chunk2VTISQYPcjs.tc.call(void 0, e.dataType)]);var _typedbinary = require('typed-binary'); var y = _interopRequireWildcard(_typedbinary);var dn=_chunk2VTISQYPcjs.ya.call(void 0, e=>{let t=new ArrayBuffer(4);new y.BufferWriter(t).writeUint32(e);let i=new y.BufferReader(t);return _chunk2VTISQYPcjs.Ia.call(void 0, i.readFloat16(),i.readFloat16())},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`unpack2x16float(${e})`,_chunk2VTISQYPcjs.Ia),"unpack2x16float"),yn= exports.pack2x16float =_chunk2VTISQYPcjs.ya.call(void 0, e=>{let t=new ArrayBuffer(4),a=new y.BufferWriter(t);a.writeFloat16(e.x),a.writeFloat16(e.y);let i=new y.BufferReader(t);return _chunk2VTISQYPcjs.Ca.call(void 0, i.readUint32())},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`pack2x16float(${e})`,_chunk2VTISQYPcjs.Ca),"pack2x16float"),fn= exports.unpack4x8unorm =_chunk2VTISQYPcjs.ya.call(void 0, e=>{let t=new ArrayBuffer(4);new y.BufferWriter(t).writeUint32(e);let i=new y.BufferReader(t);return _chunk2VTISQYPcjs.Sa.call(void 0, i.readUint8()/255,i.readUint8()/255,i.readUint8()/255,i.readUint8()/255)},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`unpack4x8unorm(${e})`,_chunk2VTISQYPcjs.Sa),"unpack4x8unorm"),ln= exports.pack4x8unorm =_chunk2VTISQYPcjs.ya.call(void 0, e=>{let t=new ArrayBuffer(4),a=new y.BufferWriter(t);a.writeUint8(e.x*255),a.writeUint8(e.y*255),a.writeUint8(e.z*255),a.writeUint8(e.w*255);let i=new y.BufferReader(t);return _chunk2VTISQYPcjs.Ca.call(void 0, i.readUint32())},e=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`pack4x8unorm(${e})`,_chunk2VTISQYPcjs.Ca),"pack4x8unorm");var xn=_chunk2VTISQYPcjs.ya.call(void 0, (e,t,a,i,ae)=>{throw new Error("Texture sampling relies on GPU resources and cannot be executed outside of a draw call")},(...e)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`textureSample(${e})`,_chunk2VTISQYPcjs.Sa),"textureSample"),bn= exports.textureSampleLevel =_chunk2VTISQYPcjs.ya.call(void 0, (e,t,a,i,ae)=>{throw new Error("Texture sampling relies on GPU resources and cannot be executed outside of a draw call")},(...e)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`textureSampleLevel(${e})`,_chunk2VTISQYPcjs.Sa),"textureSampleLevel"),hn={u32:_chunk2VTISQYPcjs.Va,i32:_chunk2VTISQYPcjs.Ua,f32:_chunk2VTISQYPcjs.Sa},wn= exports.textureLoad =_chunk2VTISQYPcjs.ya.call(void 0, (e,t,a)=>{throw new Error("`textureLoad` relies on GPU resources and cannot be executed outside of a draw call")},(...e)=>{let a=e[0].dataType;return _chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`textureLoad(${e})`,"texelDataType"in a?a.texelDataType:hn[a.channelDataType.type])},"textureLoad"),gn= exports.textureStore =_chunk2VTISQYPcjs.ya.call(void 0, (e,t,a,i)=>{throw new Error("`textureStore` relies on GPU resources and cannot be executed outside of a draw call")},(...e)=>_chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`textureStore(${e})`,_chunk2VTISQYPcjs.z),"textureStore"),In= exports.textureDimensions =_chunk2VTISQYPcjs.ya.call(void 0, (e,t)=>{throw new Error("`textureDimensions` relies on GPU resources and cannot be executed outside of a draw call")},(...e)=>{let a=e[0].dataType.dimension;return _chunk2VTISQYPcjs.wa.call(void 0, _chunk2VTISQYPcjs.xa`textureDimensions(${e})`,a==="1d"?_chunk2VTISQYPcjs.Ca:a==="3d"?_chunk2VTISQYPcjs.Qa:_chunk2VTISQYPcjs.La)},"textureDimensions");exports.abs = ye; exports.acos = fe; exports.acosh = le; exports.add = _chunk2VTISQYPcjs.nc; exports.all = Z; exports.allEq = Ot; exports.and = oe; exports.any = qt; exports.arrayLength = un; exports.asin = xe; exports.asinh = be; exports.atan = he; exports.atan2 = ge; exports.atanh = we; exports.atomicAdd = jt; exports.atomicAnd = Qt; exports.atomicLoad = Xt; exports.atomicMax = Ht; exports.atomicMin = Kt; exports.atomicOr = en; exports.atomicStore = Jt; exports.atomicSub = Yt; exports.atomicXor = tn; exports.ceil = Ie; exports.clamp = ve; exports.cos = $e; exports.cosh = Ae; exports.countLeadingZeros = Se; exports.countOneBits = Ve; exports.countTrailingZeros = Fe; exports.cross = Pe; exports.degrees = Ee; exports.determinant = Ue; exports.discard = de; exports.distance = ke; exports.div = _chunk2VTISQYPcjs.qc; exports.dot = ne; exports.dot4I8Packed = Be; exports.dot4U8Packed = De; exports.dpdx = nn; exports.dpdxCoarse = rn; exports.dpdxFine = on; exports.dpdy = an; exports.dpdyCoarse = sn; exports.dpdyFine = pn; exports.eq = E; exports.exp = Ce; exports.exp2 = Oe; exports.extractBits = Me; exports.faceForward = Ge; exports.firstLeadingBit = Ne; exports.firstTrailingBit = Le; exports.floor = qe; exports.fma = _e; exports.fract = Re; exports.frexp = Ze; exports.fwidth = cn; exports.fwidthCoarse = mn; exports.fwidthFine = Tn; exports.ge = Lt; exports.gt = Nt; exports.identity2 = _chunk2VTISQYPcjs.Ya; exports.identity3 = _chunk2VTISQYPcjs.Za; exports.identity4 = _chunk2VTISQYPcjs._a; exports.insertBits = ze; exports.inverseSqrt = Xe; exports.isCloseTo = _t; exports.ldexp = Je; exports.le = Gt; exports.length = re; exports.log = je; exports.log2 = Ye; exports.lt = _; exports.max = He; exports.min = Ke; exports.mix = Qe; exports.mod = _chunk2VTISQYPcjs.rc; exports.modf = tt; exports.mul = _chunk2VTISQYPcjs.pc; exports.ne = Mt; exports.neg = _chunk2VTISQYPcjs.sc; exports.normalize = nt; exports.not = x; exports.or = W; exports.pack2x16float = yn; exports.pack4x8unorm = ln; exports.pow = rt; exports.quantizeToF16 = ot; exports.radians = at; exports.reflect = st; exports.refract = pt; exports.reverseBits = it; exports.rotateX4 = Dt; exports.rotateY4 = Bt; exports.rotateZ4 = Ct; exports.rotationX4 = _chunk2VTISQYPcjs.bb; exports.rotationY4 = _chunk2VTISQYPcjs.cb; exports.rotationZ4 = _chunk2VTISQYPcjs.db; exports.round = ct; exports.saturate = mt; exports.scale4 = kt; exports.scaling4 = _chunk2VTISQYPcjs.ab; exports.select = Rt; exports.sign = Tt; exports.sin = ut; exports.sinh = dt; exports.smoothstep = yt; exports.sqrt = ft; exports.step = lt; exports.storageBarrier = Zt; exports.sub = _chunk2VTISQYPcjs.oc; exports.tan = xt; exports.tanh = bt; exports.textureBarrier = zt; exports.textureDimensions = In; exports.textureLoad = wn; exports.textureSample = xn; exports.textureSampleLevel = bn; exports.textureStore = gn; exports.translate4 = Ut; exports.translation4 = _chunk2VTISQYPcjs.$a; exports.transpose = ht; exports.trunc = wt; exports.unpack2x16float = dn; exports.unpack4x8unorm = fn; exports.workgroupBarrier = Wt;
|
2
2
|
//# sourceMappingURL=index.cjs.map
|
package/std/index.cjs.map
CHANGED
@@ -1 +1 @@
|
|
1
|
-
{"version":3,"sources":["/Users/konradreczko/TypeGPU/wigsill/packages/typegpu/dist/std/index.cjs","../../src/std/discard.ts","../../src/std/matrix.ts","../../src/std/boolean.ts","../../src/std/atomic.ts"],"names":["discard","createDualImpl","snip","Void","translate4","matrix","vector","mul","mat4x4f","scale4","rotateX4","angle","rotateY4","rotateZ4","correspondingBooleanVectorSchema","value","vec2b","vec3b","vec4b","allEq","lhs","rhs","all","eq","bool","VectorOps","ne","not","lt","le","or","gt","and","ge","any","isCloseTo","precision","isVecInstance","sub","f32","isSnippetNumeric","select","f","cond","workgroupBarrier","storageBarrier","textureBarrier","atomicLoad","a","isWgslData"],"mappings":"AAAA,oZAA2T,yDAAsR,ICIpkBA,EAAAA,CAAUC,kCAAAA,CAErB,CAAA,EAAa,CACX,MAAM,IAAI,KAAA,CAAM,wCAAwC,CAC1D,CAAA,CAEA,CAAA,CAAA,EAAMC,kCAAAA,UAAK,CAAYC,mBAAI,CAAA,CAC3B,SACF,CAAA,CCAO,IAAMC,EAAAA,CAAaH,kCAAAA,CAEvBI,CAAAA,CAAeC,CAAAA,CAAAA,EACPC,iCAAAA,oBAAIC,CAAQ,WAAA,CAAYF,CAAM,CAAA,CAAGD,CAAM,CAAA,CAGhD,CAACA,CAAAA,CAAQC,CAAAA,CAAAA,EAAAA,CAAY,CACnB,KAAA,CAAO,CAAA,CAAA,EACJE,oBAAAA,CAAQ,WAAA,CAAYF,CAAwB,CAAA,CAC1C,KACL,CAAA,GAAA,EAAMD,CAAAA,CAAO,KAAK,CAAA,CAAA,CAAA,CAClB,QAAA,CAAUA,CAAAA,CAAO,QACnB,CAAA,CAAA,CACA,YACF,CAAA,CAQaI,EAAAA,kBAASR,kCAAAA,CAEnBI,CAAAA,CAAeC,CAAAA,CAAAA,EACPC,iCAAAA,oBAAIC,CAAQ,OAAA,CAAQF,CAAM,CAAA,CAAGD,CAAM,CAAA,CAG5C,CAACA,CAAAA,CAAQC,CAAAA,CAAAA,EAAAA,CAAY,CACnB,KAAA,CAAO,CAAA,CAAA,EACJE,oBAAAA,CAAQ,OAAA,CAAQF,CAAwB,CAAA,CACtC,KACL,CAAA,GAAA,EAAMD,CAAAA,CAAO,KAAK,CAAA,CAAA,CAAA,CAClB,QAAA,CAAUA,CAAAA,CAAO,QACnB,CAAA,CAAA,CACA,QACF,CAAA,CAQaK,EAAAA,oBAAWT,kCAAAA,CAErBI,CAAAA,CAAeM,CAAAA,CAAAA,EACPJ,iCAAAA,oBAAIC,CAAQ,SAAA,CAAUG,CAAK,CAAA,CAAGN,CAAM,CAAA,CAG7C,CAACA,CAAAA,CAAQM,CAAAA,CAAAA,EAAAA,CAAW,CAClB,KAAA,CAAO,CAAA,CAAA,EACJH,oBAAAA,CAAQ,SAAA,CAAUG,CAA0B,CAAA,CAC1C,KACL,CAAA,GAAA,EAAMN,CAAAA,CAAO,KAAK,CAAA,CAAA,CAAA,CAClB,QAAA,CAAUA,CAAAA,CAAO,QACnB,CAAA,CAAA,CACA,UACF,CAAA,CAQaO,EAAAA,oBAAWX,kCAAAA,CAErBI,CAAAA,CAAeM,CAAAA,CAAAA,EACPJ,iCAAAA,oBAAIC,CAAQ,SAAA,CAAUG,CAAK,CAAA,CAAGN,CAAM,CAAA,CAG7C,CAACA,CAAAA,CAAQM,CAAAA,CAAAA,EAAAA,CAAW,CAClB,KAAA,CAAO,CAAA,CAAA,EACJH,oBAAAA,CAAQ,SAAA,CAAUG,CAA0B,CAAA,CAC1C,KACL,CAAA,GAAA,EAAMN,CAAAA,CAAO,KAAK,CAAA,CAAA,CAAA,CAClB,QAAA,CAAUA,CAAAA,CAAO,QACnB,CAAA,CAAA,CACA,UACF,CAAA,CAQaQ,EAAAA,oBAAWZ,kCAAAA,CAErBI,CAAAA,CAAeM,CAAAA,CAAAA,EACPJ,iCAAAA,oBAAIC,CAAQ,SAAA,CAAUG,CAAK,CAAA,CAAGN,CAAM,CAAA,CAG7C,CAACA,CAAAA,CAAQM,CAAAA,CAAAA,EAAAA,CAAW,CAClB,KAAA,CAAO,CAAA,CAAA,EACJH,oBAAAA,CAAQ,SAAA,CAAUG,CAA0B,CAAA,CAC1C,KACL,CAAA,GAAA,EAAMN,CAAAA,CAAO,KAAK,CAAA,CAAA,CAAA,CAClB,QAAA,CAAUA,CAAAA,CAAO,QACnB,CAAA,CAAA,CACA,UACF,CAAA,CC/FA,SAASS,CAAAA,CAAiCC,CAAAA,CAAgB,CACxD,OAAIA,CAAAA,CAAM,QAAA,CAAS,IAAA,CAAK,QAAA,CAAS,GAAG,CAAA,CAC3BC,oBAAAA,CAELD,CAAAA,CAAM,QAAA,CAAS,IAAA,CAAK,QAAA,CAAS,GAAG,CAAA,CAC3BE,oBAAAA,CAEFC,oBACT,CAWO,IAAMC,EAAAA,CAAQlB,kCAAAA,CAEQmB,CAAAA,CAAQC,CAAAA,CAAAA,EAAWC,CAAAA,CAAIC,CAAAA,CAAGH,CAAAA,CAAKC,CAAG,CAAC,CAAA,CAE9D,CAACD,CAAAA,CAAKC,CAAAA,CAAAA,EAAQnB,kCAAAA,CAAK,IAAA,EAAOkB,CAAAA,CAAI,KAAK,CAAA,IAAA,EAAOC,CAAAA,CAAI,KAAK,CAAA,CAAA,CAAA,CAAKG,oBAAI,CAAA,CAC5D,OACF,CAAA,CAWaD,CAAAA,cAAKtB,kCAAAA,CAEWmB,CAAAA,CAAQC,CAAAA,CAAAA,EACjCI,mBAAAA,CAAU,EAAA,CAAGL,CAAAA,CAAI,IAAI,CAAA,CAAEA,CAAAA,CAAKC,CAAG,CAAA,CAEjC,CAACD,CAAAA,CAAKC,CAAAA,CAAAA,EACJnB,kCAAAA,CACE,CAAA,EAAIkB,CAAAA,CAAI,KAAK,CAAA,IAAA,EAAOC,CAAAA,CAAI,KAAK,CAAA,CAAA,CAAA,CAC7BP,CAAAA,CAAiCM,CAAG,CACtC,CAAA,CACF,IACF,CAAA,CAUaM,EAAAA,cAAKzB,kCAAAA,CAEWmB,CAAAA,CAAQC,CAAAA,CAAAA,EAAWM,CAAAA,CAAIJ,CAAAA,CAAGH,CAAAA,CAAKC,CAAG,CAAC,CAAA,CAE9D,CAACD,CAAAA,CAAKC,CAAAA,CAAAA,EACJnB,kCAAAA,CACE,CAAA,EAAIkB,CAAAA,CAAI,KAAK,CAAA,IAAA,EAAOC,CAAAA,CAAI,KAAK,CAAA,CAAA,CAAA,CAC7BP,CAAAA,CAAiCM,CAAG,CACtC,CAAA,CACF,IACF,CAAA,CAUaQ,CAAAA,cAAK3B,kCAAAA,CAEkBmB,CAAAA,CAAQC,CAAAA,CAAAA,EACxCI,mBAAAA,CAAU,EAAA,CAAGL,CAAAA,CAAI,IAAI,CAAA,CAAEA,CAAAA,CAAKC,CAAG,CAAA,CAEjC,CAACD,CAAAA,CAAKC,CAAAA,CAAAA,EACJnB,kCAAAA,CACE,CAAA,EAAIkB,CAAAA,CAAI,KAAK,CAAA,GAAA,EAAMC,CAAAA,CAAI,KAAK,CAAA,CAAA,CAAA,CAC5BP,CAAAA,CAAiCM,CAAG,CACtC,CAAA,CACF,IACF,CAAA,CAUaS,EAAAA,cAAK5B,kCAAAA,CAEkBmB,CAAAA,CAAQC,CAAAA,CAAAA,EACxCS,CAAAA,CAAGF,CAAAA,CAAGR,CAAAA,CAAKC,CAAG,CAAA,CAAGE,CAAAA,CAAGH,CAAAA,CAAKC,CAAG,CAAC,CAAA,CAE/B,CAACD,CAAAA,CAAKC,CAAAA,CAAAA,EACJnB,kCAAAA,CACE,CAAA,EAAIkB,CAAAA,CAAI,KAAK,CAAA,IAAA,EAAOC,CAAAA,CAAI,KAAK,CAAA,CAAA,CAAA,CAC7BP,CAAAA,CAAiCM,CAAG,CACtC,CAAA,CACF,IACF,CAAA,CAUaW,EAAAA,cAAK9B,kCAAAA,CAEkBmB,CAAAA,CAAQC,CAAAA,CAAAA,EACxCW,CAAAA,CAAIL,CAAAA,CAAIC,CAAAA,CAAGR,CAAAA,CAAKC,CAAG,CAAC,CAAA,CAAGM,CAAAA,CAAIJ,CAAAA,CAAGH,CAAAA,CAAKC,CAAG,CAAC,CAAC,CAAA,CAE1C,CAACD,CAAAA,CAAKC,CAAAA,CAAAA,EACJnB,kCAAAA,CACE,CAAA,EAAIkB,CAAAA,CAAI,KAAK,CAAA,GAAA,EAAMC,CAAAA,CAAI,KAAK,CAAA,CAAA,CAAA,CAC5BP,CAAAA,CAAiCM,CAAG,CACtC,CAAA,CACF,IACF,CAAA,CAUaa,EAAAA,cAAKhC,kCAAAA,CAEkBmB,CAAAA,CAAQC,CAAAA,CAAAA,EAAWM,CAAAA,CAAIC,CAAAA,CAAGR,CAAAA,CAAKC,CAAG,CAAC,CAAA,CAErE,CAACD,CAAAA,CAAKC,CAAAA,CAAAA,EACJnB,kCAAAA,CACE,CAAA,EAAIkB,CAAAA,CAAI,KAAK,CAAA,IAAA,EAAOC,CAAAA,CAAI,KAAK,CAAA,CAAA,CAAA,CAC7BP,CAAAA,CAAiCM,CAAG,CACtC,CAAA,CACF,IACF,CAAA,CAUaO,CAAAA,eAAM1B,kCAAAA,CAEiBc,EAChCU,mBAAAA,CAAU,GAAA,CAAIV,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAEhCA,CAAAA,EAAUb,kCAAAA,CAAK,EAAA,EAAKa,CAAAA,CAAM,KAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACnD,KACF,CAAA,CAQae,CAAAA,cAAK7B,kCAAAA,CAEkBmB,CAAAA,CAAQC,CAAAA,CAAAA,EACxCI,mBAAAA,CAAU,EAAA,CAAGL,CAAAA,CAAI,IAAI,CAAA,CAAEA,CAAAA,CAAKC,CAAG,CAAA,CAEjC,CAACD,CAAAA,CAAKC,CAAAA,CAAAA,EAAQnB,kCAAAA,CAAK,CAAA,EAAIkB,CAAAA,CAAI,KAAK,CAAA,GAAA,EAAMC,CAAAA,CAAI,KAAK,CAAA,CAAA,CAAA,CAAKD,CAAAA,CAAI,QAAQ,CAAA,CAChE,IACF,CAAA,CAQaY,CAAAA,eAAM/B,kCAAAA,CAEiBmB,CAAAA,CAAQC,CAAAA,CAAAA,EACxCM,CAAAA,CAAIG,CAAAA,CAAGH,CAAAA,CAAIP,CAAG,CAAA,CAAGO,CAAAA,CAAIN,CAAG,CAAC,CAAC,CAAA,CAE5B,CAACD,CAAAA,CAAKC,CAAAA,CAAAA,EAAQnB,kCAAAA,CAAK,CAAA,EAAIkB,CAAAA,CAAI,KAAK,CAAA,GAAA,EAAMC,CAAAA,CAAI,KAAK,CAAA,CAAA,CAAA,CAAKD,CAAAA,CAAI,QAAQ,CAAA,CAChE,KACF,CAAA,CAUaE,CAAAA,eAAMrB,kCAAAA,CAEhBc,EAAiCU,mBAAAA,CAAU,GAAA,CAAIV,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAEhEA,CAAAA,EAAUb,kCAAAA,CAAK,IAAA,EAAOa,CAAAA,CAAM,KAAK,CAAA,CAAA,CAAA,CAAKS,oBAAI,CAAA,CAC3C,KACF,CAAA,CAQaU,EAAAA,eAAMjC,kCAAAA,CAEhBc,EAAiC,CAACO,CAAAA,CAAIK,CAAAA,CAAIZ,CAAK,CAAC,CAAA,CAEhDA,CAAAA,EAAUb,kCAAAA,CAAK,IAAA,EAAOa,CAAAA,CAAM,KAAK,CAAA,CAAA,CAAA,CAAKS,oBAAI,CAAA,CAC3C,KACF,CAAA,CAaaW,EAAAA,qBAAYlC,kCAAAA,CAGrBmB,CAAAA,CACAC,CAAAA,CACAe,CAAAA,CAAY,GAAA,CAAA,EAER,OAAOhB,CAAAA,EAAQ,QAAA,EAAY,OAAOC,CAAAA,EAAQ,QAAA,CACrC,IAAA,CAAK,GAAA,CAAID,CAAAA,CAAMC,CAAG,CAAA,CAAIe,CAAAA,CAE3BC,iCAAAA,CAAiB,CAAA,EAAKA,iCAAAA,CAAiB,CAAA,CAClCZ,mBAAAA,CAAU,aAAA,CAAcL,CAAAA,CAAI,IAAI,CAAA,CACrCkB,iCAAAA,CAAIlB,CAAKC,CAAG,CAAA,CACZe,CACF,CAAA,CAEK,CAAA,CAAA,CAGT,CAAChB,CAAAA,CAAKC,CAAAA,CAAKe,CAAAA,CAAYlC,kCAAAA,GAAK,CAAMqC,oBAAG,CAAA,CAAA,EAC/BC,iCAAAA,CAAoB,CAAA,EAAKA,iCAAAA,CAAoB,CAAA,CACxCtC,kCAAAA,CACL,SAAA,EAAYkB,CAAAA,CAAI,KAAK,CAAA,QAAA,EAAWC,CAAAA,CAAI,KAAK,CAAA,MAAA,EAASe,CAAAA,CAAU,KAAK,CAAA,CAAA,CAAA,CACjEZ,oBACF,CAAA,CAEE,CAACgB,iCAAAA,CAAoB,CAAA,EAAK,CAACA,iCAAAA,CAAoB,CAAA,CAC1CtC,kCAAAA,CAGL,QAAA,EAAWkB,CAAAA,CAAI,KAAK,CAAA,GAAA,EAAMC,CAAAA,CAAI,KAAK,CAAA,MAAA,EAASD,CAAAA,CAAI,KAAK,CAAA,GAAA,EAAMA,CAAAA,CAAI,KAAK,CAAA,IAAA,EAAOgB,CAAAA,CAAU,KAAK,CAAA,CAAA,CAAA,CAC1FZ,oBACF,CAAA,CAEKtB,kCAAAA,OAAK,CAASsB,oBAAI,CAAA,CAE3B,WACF,CAAA,CAsBaiB,EAAAA,kBAAyBxC,kCAAAA,CAGlCyC,CAAAA,CACA,CAAA,CACAC,CAAAA,CAAAA,EAEI,OAAOA,CAAAA,EAAS,SAAA,CACXA,CAAAA,CAAO,CAAA,CAAID,CAAAA,CAEbjB,mBAAAA,CAAU,MAAA,CAAQiB,CAAAA,CAAqB,IAAI,CAAA,CAChDA,CAAAA,CACA,CAAA,CACAC,CACF,CAAA,CAGF,CAACD,CAAAA,CAAG,CAAA,CAAGC,CAAAA,CAAAA,EACLzC,kCAAAA,CAAK,OAAA,EAAUwC,CAAAA,CAAE,KAAK,CAAA,EAAA,EAAK,CAAA,CAAE,KAAK,CAAA,EAAA,EAAKC,CAAAA,CAAK,KAAK,CAAA,CAAA,CAAA,CAAKD,CAAAA,CAAE,QAAQ,CAAA,CAClE,QACF,CAAA,CCtUO,IAAME,EAAAA,CAAmB3C,kCAAAA,CAE9B,CAAA,EAAM,OAAA,CAAQ,IAAA,CAAK,kDAAkD,CAAA,CAErE,CAAA,CAAA,EAAMC,kCAAAA,oBAAK,CAAsBC,mBAAI,CAAA,CACrC,kBACF,CAAA,CAEa0C,EAAAA,0BAAiB5C,kCAAAA,CAE5B,CAAA,EAAM,OAAA,CAAQ,IAAA,CAAK,gDAAgD,CAAA,CAEnE,CAAA,CAAA,EAAMC,kCAAAA,kBAAK,CAAoBC,mBAAI,CAAA,CACnC,gBACF,CAAA,CAEa2C,EAAAA,0BAAiB7C,kCAAAA,CAE5B,CAAA,EAAM,OAAA,CAAQ,IAAA,CAAK,gDAAgD,CAAA,CAEnE,CAAA,CAAA,EAAMC,kCAAAA,kBAAK,CAAoBC,mBAAI,CAAA,CACnC,gBACF,CAAA,CAEa4C,EAAAA,sBAAa9C,kCAAAA,CAEF+C,EAAiB,CACrC,MAAM,IAAI,KAAA,CAAM,0DAA0D,CAC5E,CAAA,CAECA,CAAAA,EAAM,CACL,EAAA,CAAIC,iCAAAA,CAAWD,CAAE,QAAQ,CAAA,EAAKA,CAAAA,CAAE,QAAA,CAAS,IAAA,GAAS,QAAA,CAChD,OAAO9C,kCAAAA,CAAK,YAAA,EAAe8C,CAAAA,CAAE,KAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAE,QAAA,CAAS,KAAK,CAAA,CAEzD,MAAM,IAAI,KAAA,CACR,CAAA,qBAAA,EAAwB,IAAA,CAAK,SAAA,CAAUA,CAAAA,CAAE,QAAA,CAAU,IAAA,CAAM,CAAC,CAAC,CAAA,CAAA","file":"/Users/konradreczko/TypeGPU/wigsill/packages/typegpu/dist/std/index.cjs","sourcesContent":[null,"import { snip } from '../data/dataTypes.ts';\nimport { Void } from '../data/wgslTypes.ts';\nimport { createDualImpl } from '../shared/generators.ts';\n\nexport const discard = createDualImpl(\n // CPU\n (): never => {\n throw new Error('discard() can only be used on the GPU.');\n },\n // GPU\n () => snip('discard;', Void),\n 'discard',\n);\n","import type { Snippet } from '../data/dataTypes.ts';\nimport { mat4x4f } from '../data/matrix.ts';\nimport type { m4x4f, v3f } from '../data/wgslTypes.ts';\nimport { createDualImpl } from '../shared/generators.ts';\nimport { mul } from './numeric.ts';\n\n/**\n * Translates the given 4-by-4 matrix by the given vector.\n * @param {m4x4f} matrix - The matrix to be modified.\n * @param {v3f} vector - The vector by which to translate the matrix.\n * @returns {m4x4f} The translated matrix.\n */\nexport const translate4 = createDualImpl(\n // CPU implementation\n (matrix: m4x4f, vector: v3f) => {\n return mul(mat4x4f.translation(vector), matrix);\n },\n // GPU implementation\n (matrix, vector) => ({\n value: `(${\n (mat4x4f.translation(vector as unknown as v3f) as unknown as Snippet)\n .value\n } * ${matrix.value})`,\n dataType: matrix.dataType,\n }),\n 'translate4',\n);\n\n/**\n * Scales the given 4-by-4 matrix in each dimension by an amount given by the corresponding entry in the given vector.\n * @param {m4x4f} matrix - The matrix to be modified.\n * @param {v3f} vector - A vector of three entries specifying the factor by which to scale in each dimension.\n * @returns {m4x4f} The scaled matrix.\n */\nexport const scale4 = createDualImpl(\n // CPU implementation\n (matrix: m4x4f, vector: v3f) => {\n return mul(mat4x4f.scaling(vector), matrix);\n },\n // GPU implementation\n (matrix, vector) => ({\n value: `(${\n (mat4x4f.scaling(vector as unknown as v3f) as unknown as Snippet)\n .value\n } * ${matrix.value})`,\n dataType: matrix.dataType,\n }),\n 'scale4',\n);\n\n/**\n * Rotates the given 4-by-4 matrix around the x-axis by the given angle.\n * @param {m4x4f} matrix - The matrix to be modified.\n * @param {number} angle - The angle by which to rotate (in radians).\n * @returns {m4x4f} The rotated matrix.\n */\nexport const rotateX4 = createDualImpl(\n // CPU implementation\n (matrix: m4x4f, angle: number) => {\n return mul(mat4x4f.rotationX(angle), matrix);\n },\n // GPU implementation\n (matrix, angle) => ({\n value: `(${\n (mat4x4f.rotationX(angle as unknown as number) as unknown as Snippet)\n .value\n } * ${matrix.value})`,\n dataType: matrix.dataType,\n }),\n 'rotateX4',\n);\n\n/**\n * Rotates the given 4-by-4 matrix around the y-axis by the given angle.\n * @param {m4x4f} matrix - The matrix to be modified.\n * @param {number} angle - The angle by which to rotate (in radians).\n * @returns {m4x4f} The rotated matrix.\n */\nexport const rotateY4 = createDualImpl(\n // CPU implementation\n (matrix: m4x4f, angle: number) => {\n return mul(mat4x4f.rotationY(angle), matrix);\n },\n // GPU implementation\n (matrix, angle) => ({\n value: `(${\n (mat4x4f.rotationY(angle as unknown as number) as unknown as Snippet)\n .value\n } * ${matrix.value})`,\n dataType: matrix.dataType,\n }),\n 'rotateY4',\n);\n\n/**\n * Rotates the given 4-by-4 matrix around the z-axis by the given angle.\n * @param {m4x4f} matrix - The matrix to be modified.\n * @param {number} angle - The angle by which to rotate (in radians).\n * @returns {m4x4f} The rotated matrix.\n */\nexport const rotateZ4 = createDualImpl(\n // CPU implementation\n (matrix: m4x4f, angle: number) => {\n return mul(mat4x4f.rotationZ(angle), matrix);\n },\n // GPU implementation\n (matrix, angle) => ({\n value: `(${\n (mat4x4f.rotationZ(angle as unknown as number) as unknown as Snippet)\n .value\n } * ${matrix.value})`,\n dataType: matrix.dataType,\n }),\n 'rotateZ4',\n);\n","import { snip, type Snippet } from '../data/dataTypes.ts';\nimport { bool, f32 } from '../data/numeric.ts';\nimport { vec2b, vec3b, vec4b } from '../data/vector.ts';\nimport { VectorOps } from '../data/vectorOps.ts';\nimport {\n type AnyBooleanVecInstance,\n type AnyFloatVecInstance,\n type AnyNumericVecInstance,\n type AnyVec2Instance,\n type AnyVec3Instance,\n type AnyVecInstance,\n isVecInstance,\n type v2b,\n type v3b,\n type v4b,\n} from '../data/wgslTypes.ts';\nimport { createDualImpl } from '../shared/generators.ts';\nimport { isSnippetNumeric, sub } from './numeric.ts';\n\nfunction correspondingBooleanVectorSchema(value: Snippet) {\n if (value.dataType.type.includes('2')) {\n return vec2b;\n }\n if (value.dataType.type.includes('3')) {\n return vec3b;\n }\n return vec4b;\n}\n\n// comparison\n\n/**\n * Checks whether `lhs == rhs` on all components.\n * Equivalent to `all(eq(lhs, rhs))`.\n * @example\n * allEq(vec2f(0.0, 1.0), vec2f(0.0, 2.0)) // returns false\n * allEq(vec3u(0, 1, 2), vec3u(0, 1, 2)) // returns true\n */\nexport const allEq = createDualImpl(\n // CPU implementation\n <T extends AnyVecInstance>(lhs: T, rhs: T) => all(eq(lhs, rhs)),\n // GPU implementation\n (lhs, rhs) => snip(`all(${lhs.value} == ${rhs.value})`, bool),\n 'allEq',\n);\n\n/**\n * Checks **component-wise** whether `lhs == rhs`.\n * This function does **not** return `bool`, for that use-case, wrap the result in `all`, or use `allEq`.\n * @example\n * eq(vec2f(0.0, 1.0), vec2f(0.0, 2.0)) // returns vec2b(true, false)\n * eq(vec3u(0, 1, 2), vec3u(2, 1, 0)) // returns vec3b(false, true, false)\n * all(eq(vec4i(4, 3, 2, 1), vec4i(4, 3, 2, 1))) // returns true\n * allEq(vec4i(4, 3, 2, 1), vec4i(4, 3, 2, 1)) // returns true\n */\nexport const eq = createDualImpl(\n // CPU implementation\n <T extends AnyVecInstance>(lhs: T, rhs: T) =>\n VectorOps.eq[lhs.kind](lhs, rhs),\n // GPU implementation\n (lhs, rhs) =>\n snip(\n `(${lhs.value} == ${rhs.value})`,\n correspondingBooleanVectorSchema(lhs),\n ),\n 'eq',\n);\n\n/**\n * Checks **component-wise** whether `lhs != rhs`.\n * This function does **not** return `bool`, for that use-case, wrap the result in `any`.\n * @example\n * ne(vec2f(0.0, 1.0), vec2f(0.0, 2.0)) // returns vec2b(false, true)\n * ne(vec3u(0, 1, 2), vec3u(2, 1, 0)) // returns vec3b(true, false, true)\n * any(ne(vec4i(4, 3, 2, 1), vec4i(4, 2, 2, 1))) // returns true\n */\nexport const ne = createDualImpl(\n // CPU implementation\n <T extends AnyVecInstance>(lhs: T, rhs: T) => not(eq(lhs, rhs)),\n // GPU implementation\n (lhs, rhs) =>\n snip(\n `(${lhs.value} != ${rhs.value})`,\n correspondingBooleanVectorSchema(lhs),\n ),\n 'ne',\n);\n\n/**\n * Checks **component-wise** whether `lhs < rhs`.\n * This function does **not** return `bool`, for that use-case, wrap the result in `all`.\n * @example\n * lt(vec2f(0.0, 0.0), vec2f(0.0, 1.0)) // returns vec2b(false, true)\n * lt(vec3u(0, 1, 2), vec3u(2, 1, 0)) // returns vec3b(true, false, false)\n * all(lt(vec4i(1, 2, 3, 4), vec4i(2, 3, 4, 5))) // returns true\n */\nexport const lt = createDualImpl(\n // CPU implementation\n <T extends AnyNumericVecInstance>(lhs: T, rhs: T) =>\n VectorOps.lt[lhs.kind](lhs, rhs),\n // GPU implementation\n (lhs, rhs) =>\n snip(\n `(${lhs.value} < ${rhs.value})`,\n correspondingBooleanVectorSchema(lhs),\n ),\n 'lt',\n);\n\n/**\n * Checks **component-wise** whether `lhs <= rhs`.\n * This function does **not** return `bool`, for that use-case, wrap the result in `all`.\n * @example\n * le(vec2f(0.0, 0.0), vec2f(0.0, 1.0)) // returns vec2b(true, true)\n * le(vec3u(0, 1, 2), vec3u(2, 1, 0)) // returns vec3b(true, true, false)\n * all(le(vec4i(1, 2, 3, 4), vec4i(2, 3, 3, 5))) // returns true\n */\nexport const le = createDualImpl(\n // CPU implementation\n <T extends AnyNumericVecInstance>(lhs: T, rhs: T) =>\n or(lt(lhs, rhs), eq(lhs, rhs)),\n // GPU implementation\n (lhs, rhs) =>\n snip(\n `(${lhs.value} <= ${rhs.value})`,\n correspondingBooleanVectorSchema(lhs),\n ),\n 'le',\n);\n\n/**\n * Checks **component-wise** whether `lhs > rhs`.\n * This function does **not** return `bool`, for that use-case, wrap the result in `all`.\n * @example\n * gt(vec2f(0.0, 0.0), vec2f(0.0, 1.0)) // returns vec2b(false, false)\n * gt(vec3u(0, 1, 2), vec3u(2, 1, 0)) // returns vec3b(false, false, true)\n * all(gt(vec4i(2, 3, 4, 5), vec4i(1, 2, 3, 4))) // returns true\n */\nexport const gt = createDualImpl(\n // CPU implementation\n <T extends AnyNumericVecInstance>(lhs: T, rhs: T) =>\n and(not(lt(lhs, rhs)), not(eq(lhs, rhs))),\n // GPU implementation\n (lhs, rhs) =>\n snip(\n `(${lhs.value} > ${rhs.value})`,\n correspondingBooleanVectorSchema(lhs),\n ),\n 'gt',\n);\n\n/**\n * Checks **component-wise** whether `lhs >= rhs`.\n * This function does **not** return `bool`, for that use-case, wrap the result in `all`.\n * @example\n * ge(vec2f(0.0, 0.0), vec2f(0.0, 1.0)) // returns vec2b(true, false)\n * ge(vec3u(0, 1, 2), vec3u(2, 1, 0)) // returns vec3b(false, true, true)\n * all(ge(vec4i(2, 2, 4, 5), vec4i(1, 2, 3, 4))) // returns true\n */\nexport const ge = createDualImpl(\n // CPU implementation\n <T extends AnyNumericVecInstance>(lhs: T, rhs: T) => not(lt(lhs, rhs)),\n // GPU implementation\n (lhs, rhs) =>\n snip(\n `(${lhs.value} >= ${rhs.value})`,\n correspondingBooleanVectorSchema(lhs),\n ),\n 'ge',\n);\n\n// logical ops\n\n/**\n * Returns **component-wise** `!value`.\n * @example\n * not(vec2b(false, true)) // returns vec2b(true, false)\n * not(vec3b(true, true, false)) // returns vec3b(false, false, true)\n */\nexport const not = createDualImpl(\n // CPU implementation\n <T extends AnyBooleanVecInstance>(value: T): T =>\n VectorOps.neg[value.kind](value),\n // GPU implementation\n (value) => snip(`!(${value.value})`, value.dataType),\n 'not',\n);\n\n/**\n * Returns **component-wise** logical `or` result.\n * @example\n * or(vec2b(false, true), vec2b(false, false)) // returns vec2b(false, true)\n * or(vec3b(true, true, false), vec3b(false, true, false)) // returns vec3b(true, true, false)\n */\nexport const or = createDualImpl(\n // CPU implementation\n <T extends AnyBooleanVecInstance>(lhs: T, rhs: T) =>\n VectorOps.or[lhs.kind](lhs, rhs),\n // GPU implementation\n (lhs, rhs) => snip(`(${lhs.value} | ${rhs.value})`, lhs.dataType),\n 'or',\n);\n\n/**\n * Returns **component-wise** logical `and` result.\n * @example\n * and(vec2b(false, true), vec2b(true, true)) // returns vec2b(false, true)\n * and(vec3b(true, true, false), vec3b(false, true, false)) // returns vec3b(false, true, false)\n */\nexport const and = createDualImpl(\n // CPU implementation\n <T extends AnyBooleanVecInstance>(lhs: T, rhs: T) =>\n not(or(not(lhs), not(rhs))),\n // GPU implementation\n (lhs, rhs) => snip(`(${lhs.value} & ${rhs.value})`, lhs.dataType),\n 'and',\n);\n\n// logical aggregation\n\n/**\n * Returns `true` if each component of `value` is true.\n * @example\n * all(vec2b(false, true)) // returns false\n * all(vec3b(true, true, true)) // returns true\n */\nexport const all = createDualImpl(\n // CPU implementation\n (value: AnyBooleanVecInstance) => VectorOps.all[value.kind](value),\n // GPU implementation\n (value) => snip(`all(${value.value})`, bool),\n 'all',\n);\n\n/**\n * Returns `true` if any component of `value` is true.\n * @example\n * any(vec2b(false, true)) // returns true\n * any(vec3b(false, false, false)) // returns false\n */\nexport const any = createDualImpl(\n // CPU implementation\n (value: AnyBooleanVecInstance) => !all(not(value)),\n // GPU implementation\n (value) => snip(`any(${value.value})`, bool),\n 'any',\n);\n\n// other\n\n/**\n * Checks whether the given elements differ by at most the `precision` value.\n * Checks all elements of `lhs` and `rhs` if arguments are vectors.\n * @example\n * isCloseTo(0, 0.1) // returns false\n * isCloseTo(vec3f(0, 0, 0), vec3f(0.002, -0.009, 0)) // returns true\n *\n * @param {number} precision argument that specifies the maximum allowed difference, 0.01 by default.\n */\nexport const isCloseTo = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(\n lhs: T,\n rhs: T,\n precision = 0.01,\n ) => {\n if (typeof lhs === 'number' && typeof rhs === 'number') {\n return Math.abs(lhs - rhs) < precision;\n }\n if (isVecInstance(lhs) && isVecInstance(rhs)) {\n return VectorOps.isCloseToZero[lhs.kind](\n sub(lhs, rhs),\n precision,\n );\n }\n return false;\n },\n // GPU implementation\n (lhs, rhs, precision = snip(0.01, f32)) => {\n if (isSnippetNumeric(lhs) && isSnippetNumeric(rhs)) {\n return snip(\n `(abs(f32(${lhs.value}) - f32(${rhs.value})) <= ${precision.value})`,\n bool,\n );\n }\n if (!isSnippetNumeric(lhs) && !isSnippetNumeric(rhs)) {\n return snip(\n // https://www.w3.org/TR/WGSL/#vector-multi-component:~:text=Binary%20arithmetic%20expressions%20with%20mixed%20scalar%20and%20vector%20operands\n // (a-a)+prec creates a vector of a.length elements, all equal to prec\n `all(abs(${lhs.value} - ${rhs.value}) <= (${lhs.value} - ${lhs.value}) + ${precision.value})`,\n bool,\n );\n }\n return snip('false', bool);\n },\n 'isCloseTo',\n);\n\nexport type SelectOverload = {\n <T extends number | boolean | AnyVecInstance>(f: T, t: T, cond: boolean): T;\n <T extends AnyVecInstance>(\n f: T,\n t: T,\n cond: T extends AnyVec2Instance ? v2b\n : T extends AnyVec3Instance ? v3b\n : v4b,\n ): T;\n};\n\n/**\n * Returns `t` if `cond` is `true`, and `f` otherwise.\n * Component-wise if `cond` is a vector.\n * @example\n * select(1, 2, false) // returns 1\n * select(1, 2, true) // returns 2\n * select(vec2i(1, 2), vec2i(3, 4), true) // returns vec2i(3, 4)\n * select(vec2i(1, 2), vec2i(3, 4), vec2b(false, true)) // returns vec2i(1, 4)\n */\nexport const select: SelectOverload = createDualImpl(\n // CPU implementation\n <T extends number | boolean | AnyVecInstance>(\n f: T,\n t: T,\n cond: AnyBooleanVecInstance | boolean,\n ) => {\n if (typeof cond === 'boolean') {\n return cond ? t : f;\n }\n return VectorOps.select[(f as AnyVecInstance).kind](\n f as AnyVecInstance,\n t as AnyVecInstance,\n cond,\n );\n },\n // GPU implementation\n (f, t, cond) =>\n snip(`select(${f.value}, ${t.value}, ${cond.value})`, f.dataType),\n 'select',\n);\n","import { snip, type Snippet } from '../data/dataTypes.ts';\nimport { i32, u32 } from '../data/numeric.ts';\nimport {\n type AnyWgslData,\n type atomicI32,\n type atomicU32,\n isWgslData,\n Void,\n} from '../data/wgslTypes.ts';\nimport { createDualImpl } from '../shared/generators.ts';\ntype AnyAtomic = atomicI32 | atomicU32;\n\nexport const workgroupBarrier = createDualImpl(\n // CPU implementation\n () => console.warn('workgroupBarrier is a no-op outside of GPU mode.'),\n // GPU implementation\n () => snip('workgroupBarrier()', Void),\n 'workgroupBarrier',\n);\n\nexport const storageBarrier = createDualImpl(\n // CPU implementation\n () => console.warn('storageBarrier is a no-op outside of GPU mode.'),\n // GPU implementation\n () => snip('storageBarrier()', Void),\n 'storageBarrier',\n);\n\nexport const textureBarrier = createDualImpl(\n // CPU implementation\n () => console.warn('textureBarrier is a no-op outside of GPU mode.'),\n // GPU implementation\n () => snip('textureBarrier()', Void),\n 'textureBarrier',\n);\n\nexport const atomicLoad = createDualImpl(\n // CPU implementation\n <T extends AnyAtomic>(a: T): number => {\n throw new Error('Atomic operations are not supported outside of GPU mode.');\n },\n // GPU implementation\n (a) => {\n if (isWgslData(a.dataType) && a.dataType.type === 'atomic') {\n return snip(`atomicLoad(&${a.value})`, a.dataType.inner);\n }\n throw new Error(\n `Invalid atomic type: ${JSON.stringify(a.dataType, null, 2)}`,\n );\n },\n 'atomicLoad',\n);\n\nexport const atomicStore = createDualImpl(\n // CPU implementation\n <T extends AnyAtomic>(a: T, value: number): void => {\n throw new Error('Atomic operations are not supported outside of GPU mode.');\n },\n // GPU implementation\n (a, value) => {\n if (!isWgslData(a.dataType) || a.dataType.type !== 'atomic') {\n throw new Error(\n `Invalid atomic type: ${JSON.stringify(a.dataType, null, 2)}`,\n );\n }\n return snip(`atomicStore(&${a.value}, ${value.value})`, Void);\n },\n 'atomicStore',\n);\n\nconst atomicTypeFn = (a: Snippet, _value: Snippet): AnyWgslData[] => {\n if (a.dataType.type === 'atomic' && a.dataType.inner.type === 'i32') {\n return [a.dataType, i32];\n }\n return [a.dataType as AnyWgslData, u32];\n};\n\nexport const atomicAdd = createDualImpl(\n // CPU implementation\n <T extends AnyAtomic>(a: T, value: number): number => {\n throw new Error('Atomic operations are not supported outside of GPU mode.');\n },\n // GPU implementation\n (a, value) => {\n if (isWgslData(a.dataType) && a.dataType.type === 'atomic') {\n return snip(`atomicAdd(&${a.value}, ${value.value})`, a.dataType.inner);\n }\n throw new Error(\n `Invalid atomic type: ${JSON.stringify(a.dataType, null, 2)}`,\n );\n },\n 'atomicAdd',\n atomicTypeFn,\n);\n\nexport const atomicSub = createDualImpl(\n // CPU implementation\n <T extends AnyAtomic>(a: T, value: number): number => {\n throw new Error('Atomic operations are not supported outside of GPU mode.');\n },\n // GPU implementation\n (a, value) => {\n if (isWgslData(a.dataType) && a.dataType.type === 'atomic') {\n return snip(`atomicSub(&${a.value}, ${value.value})`, a.dataType.inner);\n }\n throw new Error(\n `Invalid atomic type: ${JSON.stringify(a.dataType, null, 2)}`,\n );\n },\n 'atomicSub',\n atomicTypeFn,\n);\n\nexport const atomicMax = createDualImpl(\n // CPU implementation\n <T extends AnyAtomic>(a: T, value: number): number => {\n throw new Error('Atomic operations are not supported outside of GPU mode.');\n },\n // GPU implementation\n (a, value) => {\n if (isWgslData(a.dataType) && a.dataType.type === 'atomic') {\n return snip(`atomicMax(&${a.value}, ${value.value})`, a.dataType.inner);\n }\n throw new Error(\n `Invalid atomic type: ${JSON.stringify(a.dataType, null, 2)}`,\n );\n },\n 'atomicMax',\n atomicTypeFn,\n);\n\nexport const atomicMin = createDualImpl(\n // CPU implementation\n <T extends AnyAtomic>(a: T, value: number): number => {\n throw new Error('Atomic operations are not supported outside of GPU mode.');\n },\n // GPU implementation\n (a, value) => {\n if (isWgslData(a.dataType) && a.dataType.type === 'atomic') {\n return snip(`atomicMin(&${a.value}, ${value.value})`, a.dataType.inner);\n }\n throw new Error(\n `Invalid atomic type: ${JSON.stringify(a.dataType, null, 2)}`,\n );\n },\n 'atomicMin',\n atomicTypeFn,\n);\n\nexport const atomicAnd = createDualImpl(\n // CPU implementation\n <T extends AnyAtomic>(a: T, value: number): number => {\n throw new Error('Atomic operations are not supported outside of GPU mode.');\n },\n // GPU implementation\n (a, value) => {\n if (isWgslData(a.dataType) && a.dataType.type === 'atomic') {\n return snip(`atomicAnd(&${a.value}, ${value.value})`, a.dataType.inner);\n }\n throw new Error(\n `Invalid atomic type: ${JSON.stringify(a.dataType, null, 2)}`,\n );\n },\n 'atomicAnd',\n atomicTypeFn,\n);\n\nexport const atomicOr = createDualImpl(\n // CPU implementation\n <T extends AnyAtomic>(a: T, value: number): number => {\n throw new Error('Atomic operations are not supported outside of GPU mode.');\n },\n // GPU implementation\n (a, value) => {\n if (isWgslData(a.dataType) && a.dataType.type === 'atomic') {\n return snip(`atomicOr(&${a.value}, ${value.value})`, a.dataType.inner);\n }\n throw new Error(\n `Invalid atomic type: ${JSON.stringify(a.dataType, null, 2)}`,\n );\n },\n 'atomicOr',\n atomicTypeFn,\n);\n\nexport const atomicXor = createDualImpl(\n // CPU implementation\n <T extends AnyAtomic>(a: T, value: number): number => {\n throw new Error('Atomic operations are not supported outside of GPU mode.');\n },\n // GPU implementation\n (a, value) => {\n if (isWgslData(a.dataType) && a.dataType.type === 'atomic') {\n return snip(`atomicXor(&${a.value}, ${value.value})`, a.dataType.inner);\n }\n throw new Error(\n `Invalid atomic type: ${JSON.stringify(a.dataType, null, 2)}`,\n );\n },\n 'atomicXor',\n atomicTypeFn,\n);\n"]}
|
1
|
+
{"version":3,"sources":["/Users/iwo/Projects/wigsill/packages/typegpu/dist/std/index.cjs","../../src/std/discard.ts","../../src/std/numeric.ts","../../src/std/matrix.ts","../../src/std/boolean.ts","../../src/std/atomic.ts"],"names":["discard","createDualImpl","snip","Void","abs","value","VectorOps","stitch","acos","acosh","asin","asinh","atan","atanh","atan2","y","x","ceil","clamp","low","high","cos","cosh","countLeadingZeros","countOneBits","countTrailingZeros","cross","a","b","degrees","determinant","f32","distance","length","sub","f16","dot","lhs","rhs","dot4U8Packed","e1","e2","u32","dot4I8Packed","i32","exp","exp2","extractBits","offset","count","faceForward","e3","firstLeadingBit","firstTrailingBit","floor","fma","fract","FrexpResults","abstruct","abstractFloat","abstractInt","vec2f","vec2i","vec3f","vec3i","vec4f","vec4i","vec2h","vec3h","vec4h","frexp","returnType","insertBits","newbits","inverseSqrt","ldexp","_","log","log2","max","min","mix","ModfResult","modf","normalize","v","pow","base","exponent","quantizeToF16","radians","reflect","mul","refract","reverseBits","round","saturate","sign","sin","sinh","smoothstep","edge0","edge1","smoothstepScalar","sqrt","step","edge","tan","tanh","transpose","trunc","cpuMul","$internal","cpuTranslation4","translation4","gpuTranslation4","cpuScaling4","scaling4","gpuScaling4","cpuRotationX4","rotationX4","gpuRotationX4","cpuRotationY4","rotationY4","gpuRotationY4","cpuRotationZ4","rotationZ4","gpuRotationZ4","translate4","matrix","vector","scale4","rotateX4","angle","rotateY4","rotateZ4","correspondingBooleanVectorSchema","vec2b","vec3b","vec4b","allEq","all","eq","bool","ne","not","lt","le","or","gt","and","ge","any","isCloseTo","precision","isVecInstance","isSnippetNumeric","select","f","cond","workgroupBarrier","storageBarrier","textureBarrier","atomicLoad","isWgslData"],"mappings":"AAAA,oZAAqb,ICIxaA,EAAAA,CAAUC,kCAAAA,CAErB,CAAA,EAAa,CACX,MAAM,IAAI,KAAA,CACR,iFACF,CACF,CAAA,CAEA,CAAA,CAAA,EAAMC,kCAAAA,UAAK,CAAYC,mBAAI,CAAA,CAC3B,SACF,CAAA,CC8BO,IAAMC,EAAAA,CAAMH,kCAAAA,CAEWI,EACtB,OAAOA,CAAAA,EAAU,QAAA,CACZ,IAAA,CAAK,GAAA,CAAIA,CAAK,CAAA,CAEhBC,oBAAAA,CAAU,GAAA,CAAID,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAGvCA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,IAAAA,EAAaF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACrD,KACF,CAAA,CAEaG,EAAAA,gBAAOP,kCAAAA,CAEuBI,EACnC,OAAOA,CAAAA,EAAU,QAAA,CACZ,IAAA,CAAK,IAAA,CAAKA,CAAK,CAAA,CAEjBC,oBAAAA,CAAU,IAAA,CAAKD,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAGxCA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,KAAAA,EAAcF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACtD,MACF,CAAA,CAMaI,EAAAA,iBAAQR,kCAAAA,CAEsBI,EACnC,OAAOA,CAAAA,EAAU,QAAA,CACZ,IAAA,CAAK,KAAA,CAAMA,CAAK,CAAA,CAElBC,oBAAAA,CAAU,KAAA,CAAMD,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAGzCA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,MAAAA,EAAeF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACvD,OACF,CAAA,CAMaK,EAAAA,gBAAOT,kCAAAA,CAEuBI,EACnC,OAAOA,CAAAA,EAAU,QAAA,CACZ,IAAA,CAAK,IAAA,CAAKA,CAAK,CAAA,CAEjBC,oBAAAA,CAAU,IAAA,CAAKD,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAGxCA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,KAAAA,EAAcF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACtD,MACF,CAAA,CAMaM,EAAAA,iBAAQV,kCAAAA,CAEsBI,EACnC,OAAOA,CAAAA,EAAU,QAAA,CACZ,IAAA,CAAK,KAAA,CAAMA,CAAK,CAAA,CAElBC,oBAAAA,CAAU,KAAA,CAAMD,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAGzCA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,MAAAA,EAAeF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACvD,OACF,CAAA,CAMaO,EAAAA,gBAAOX,kCAAAA,CAEuBI,EACnC,OAAOA,CAAAA,EAAU,QAAA,CACZ,IAAA,CAAK,IAAA,CAAKA,CAAK,CAAA,CAEjBC,oBAAAA,CAAU,IAAA,CAAKD,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAGxCA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,KAAAA,EAAcF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACtD,MACF,CAAA,CAMaQ,EAAAA,iBAAQZ,kCAAAA,CAEsBI,EACnC,OAAOA,CAAAA,EAAU,QAAA,CACZ,IAAA,CAAK,KAAA,CAAMA,CAAK,CAAA,CAElBC,oBAAAA,CAAU,KAAA,CAAMD,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAGzCA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,MAAAA,EAAeF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACvD,OACF,CAAA,CAEaS,EAAAA,iBAAQb,kCAAAA,CAEsBc,CAAAA,CAAMC,CAAAA,CAAAA,EACzC,OAAOD,CAAAA,EAAM,QAAA,EAAY,OAAOC,CAAAA,EAAM,QAAA,CACjC,IAAA,CAAK,KAAA,CAAMD,CAAAA,CAAGC,CAAC,CAAA,CAEjBV,oBAAAA,CAAU,KAAA,CAAOS,CAAAA,CAA0B,IAAI,CAAA,CACpDA,CAAAA,CACAC,CACF,CAAA,CAGF,CAACD,CAAAA,CAAGC,CAAAA,CAAAA,EAAMd,kCAAAA,oBAAKK,CAAAA,MAAAA,EAAeQ,CAAC,CAAA,EAAA,EAAKC,CAAC,CAAA,CAAA,CAAA,CAAKD,CAAAA,CAAE,QAAQ,CAAA,CACpD,OAAA,CACA,OACF,CAAA,CAMaE,EAAAA,gBAAOhB,kCAAAA,CAEuBI,EACnC,OAAOA,CAAAA,EAAU,QAAA,CACZ,IAAA,CAAK,IAAA,CAAKA,CAAK,CAAA,CAEjBC,oBAAAA,CAAU,IAAA,CAAKD,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAGxCA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,KAAAA,EAAcF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACtD,MACF,CAAA,CAMaa,EAAAA,iBAAQjB,kCAAAA,CAESI,CAAAA,CAAUc,CAAAA,CAAQC,CAAAA,CAAAA,EACxC,OAAOf,CAAAA,EAAU,QAAA,CACZ,IAAA,CAAK,GAAA,CAAI,IAAA,CAAK,GAAA,CAAIc,CAAAA,CAAed,CAAK,CAAA,CAAGe,CAAc,CAAA,CAEzDd,oBAAAA,CAAU,KAAA,CAAMD,CAAAA,CAAM,IAAI,CAAA,CAC/BA,CAAAA,CACAc,CAAAA,CACAC,CACF,CAAA,CAGF,CAACf,CAAAA,CAAOc,CAAAA,CAAKC,CAAAA,CAAAA,EACXlB,kCAAAA,oBAAKK,CAAAA,MAAAA,EAAeF,CAAK,CAAA,EAAA,EAAKc,CAAG,CAAA,EAAA,EAAKC,CAAI,CAAA,CAAA,CAAA,CAAKf,CAAAA,CAAM,QAAQ,CAAA,CAC/D,OACF,CAAA,CAMagB,EAAAA,eAAMpB,kCAAAA,CAEwBI,EACnC,OAAOA,CAAAA,EAAU,QAAA,CACZ,IAAA,CAAK,GAAA,CAAIA,CAAK,CAAA,CAEhBC,oBAAAA,CAAU,GAAA,CAAID,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAGvCA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,IAAAA,EAAaF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACrD,KACF,CAAA,CAMaiB,EAAAA,gBAAOrB,kCAAAA,CAEuBI,EACnC,OAAOA,CAAAA,EAAU,QAAA,CACZ,IAAA,CAAK,IAAA,CAAKA,CAAK,CAAA,CAEjBC,oBAAAA,CAAU,IAAA,CAAKD,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAGxCA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,KAAAA,EAAcF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACtD,MACF,CAAA,CAEakB,EAAAA,6BAAoBtB,kCAAAA,CAEYI,EAAgB,CACzD,MAAM,IAAI,KAAA,CACR,4IACF,CACF,CAAA,CAECA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,kBAAAA,EAA2BF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACnE,mBACF,CAAA,CAEamB,EAAAA,wBAAevB,kCAAAA,CAEiBI,EAAgB,CACzD,MAAM,IAAI,KAAA,CACR,uIACF,CACF,CAAA,CAECA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,aAAAA,EAAsBF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CAC9D,cACF,CAAA,CAEaoB,EAAAA,8BAAqBxB,kCAAAA,CAEWI,EAAgB,CACzD,MAAM,IAAI,KAAA,CACR,6IACF,CACF,CAAA,CAECA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,mBAAAA,EAA4BF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACpE,oBACF,CAAA,CAMaqB,EAAAA,iBAAQzB,kCAAAA,CAEG0B,CAAAA,CAAMC,CAAAA,CAAAA,EAAYtB,oBAAAA,CAAU,KAAA,CAAMqB,CAAAA,CAAE,IAAI,CAAA,CAAEA,CAAAA,CAAGC,CAAC,CAAA,CAEpE,CAACD,CAAAA,CAAGC,CAAAA,CAAAA,EAAM1B,kCAAAA,oBAAKK,CAAAA,MAAAA,EAAeoB,CAAC,CAAA,EAAA,EAAKC,CAAC,CAAA,CAAA,CAAA,CAAKD,CAAAA,CAAE,QAAQ,CAAA,CACpD,OACF,CAAA,CAEaE,EAAAA,mBAAU5B,kCAAAA,CAEoBI,EAAgB,CACvD,EAAA,CAAI,OAAOA,CAAAA,EAAU,QAAA,CACnB,OAASA,CAAAA,CAAQ,GAAA,CAAO,IAAA,CAAK,EAAA,CAE/B,MAAM,IAAI,KAAA,CACR,6IACF,CACF,CAAA,CAECA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,QAAAA,EAAiBF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACzD,SACF,CAAA,CAEayB,EAAAA,uBAAc7B,kCAAAA,CAExBI,EAAkC,CACjC,MAAM,IAAI,KAAA,CACR,sIACF,CACF,CAAA,CAGCA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,YAAAA,EAAqBF,CAAK,CAAA,CAAA,CAAA,CAAK0B,oBAAG,CAAA,CAClD,aACF,CAAA,CAEaC,EAAAA,oBAAW/B,kCAAAA,CAEmB0B,CAAAA,CAAMC,CAAAA,CAAAA,EACzC,OAAOD,CAAAA,EAAM,QAAA,EAAY,OAAOC,CAAAA,EAAM,QAAA,CACjC,IAAA,CAAK,GAAA,CAAID,CAAAA,CAAIC,CAAC,CAAA,CAEhBK,EAAAA,CACLC,kCAAAA,CAAIP,CAA0BC,CAAwB,CACxD,CAAA,CAGF,CAACD,CAAAA,CAAGC,CAAAA,CAAAA,EACF1B,kCAAAA,oBACEK,CAAAA,SAAAA,EAAkBoB,CAAC,CAAA,EAAA,EAAKC,CAAC,CAAA,CAAA,CAAA,CACzBD,CAAAA,CAAE,QAAA,CAAS,IAAA,GAAS,KAAA,EAASA,CAAAA,CAAE,QAAA,CAAS,IAAA,CAAK,QAAA,CAAS,GAAG,CAAA,CAAIQ,oBAAAA,CAAMJ,oBACrE,CAAA,CACF,UACF,CAAA,CAMaK,EAAAA,eAAMnC,kCAAAA,CAEEoC,CAAAA,CAAQC,CAAAA,CAAAA,EACzBhC,oBAAAA,CAAU,GAAA,CAAI+B,CAAAA,CAAI,IAAI,CAAA,CAAEA,CAAAA,CAAKC,CAAG,CAAA,CAElC,CAACD,CAAAA,CAAKC,CAAAA,CAAAA,EAAQpC,kCAAAA,oBAAKK,CAAAA,IAAAA,EAAa8B,CAAG,CAAA,EAAA,EAAKC,CAAG,CAAA,CAAA,CAAA,CAAKP,oBAAG,CAAA,CACnD,KACF,CAAA,CAEaQ,EAAAA,wBAAetC,kCAAAA,CAEzBuC,CAAAA,CAAYC,CAAAA,CAAAA,EAAuB,CAClC,MAAM,IAAI,KAAA,CACR,uIACF,CACF,CAAA,CAEA,CAACD,CAAAA,CAAIC,CAAAA,CAAAA,EAAOvC,kCAAAA,oBAAKK,CAAAA,aAAAA,EAAsBiC,CAAE,CAAA,EAAA,EAAKC,CAAE,CAAA,CAAA,CAAA,CAAKC,oBAAG,CAAA,CACxD,cAAA,CACA,CAACA,oBAAAA,CAAKA,oBAAG,CACX,CAAA,CAEaC,EAAAA,wBAAe1C,kCAAAA,CAEzBuC,CAAAA,CAAYC,CAAAA,CAAAA,EAAuB,CAClC,MAAM,IAAI,KAAA,CACR,uIACF,CACF,CAAA,CAEA,CAACD,CAAAA,CAAIC,CAAAA,CAAAA,EAAOvC,kCAAAA,oBAAKK,CAAAA,aAAAA,EAAsBiC,CAAE,CAAA,EAAA,EAAKC,CAAE,CAAA,CAAA,CAAA,CAAKG,oBAAG,CAAA,CACxD,cAAA,CACA,CAACA,oBAAAA,CAAKA,oBAAG,CACX,CAAA,CAMaC,EAAAA,eAAM5C,kCAAAA,CAEwBI,EACnC,OAAOA,CAAAA,EAAU,QAAA,CACZ,IAAA,CAAK,GAAA,CAAIA,CAAK,CAAA,CAEhBC,oBAAAA,CAAU,GAAA,CAAID,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAGvCA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,IAAAA,EAAaF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACrD,KACF,CAAA,CAMayC,EAAAA,gBAAO7C,kCAAAA,CAEuBI,EACnC,OAAOA,CAAAA,EAAU,QAAA,CACX,CAAA,EAAKA,CAAAA,CAERC,oBAAAA,CAAU,IAAA,CAAKD,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAGxCA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,KAAAA,EAAcF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACtD,MACF,CAAA,CAEa0C,EAAAA,uBAAc9C,kCAAAA,CAGvB,CAAA,CACA+C,CAAAA,CACAC,CAAAA,CAAAA,EACM,CACN,MAAM,IAAI,KAAA,CACR,sIACF,CACF,CAAA,CAEA,CAAC,CAAA,CAAGD,CAAAA,CAAQC,CAAAA,CAAAA,EACV/C,kCAAAA,oBAAKK,CAAAA,YAAAA,EAAqB,CAAC,CAAA,EAAA,EAAKyC,CAAM,CAAA,EAAA,EAAKC,CAAK,CAAA,CAAA,CAAA,CAAK,CAAA,CAAE,QAAQ,CAAA,CACjE,aAAA,CACA,CAAC,CAAA,CAAGD,CAAAA,CAAQC,CAAAA,CAAAA,EAAU,CAAC,CAAA,CAAE,QAAA,CAAyBP,oBAAAA,CAAKA,oBAAG,CAC5D,CAAA,CAEaQ,EAAAA,uBAAcjD,kCAAAA,CAEOuC,CAAAA,CAAOC,CAAAA,CAAOU,CAAAA,CAAAA,EAAa,CACzD,MAAM,IAAI,KAAA,CACR,sIACF,CACF,CAAA,CAEA,CAACX,CAAAA,CAAIC,CAAAA,CAAIU,CAAAA,CAAAA,EAAOjD,kCAAAA,oBAAKK,CAAAA,YAAAA,EAAqBiC,CAAE,CAAA,EAAA,EAAKC,CAAE,CAAA,EAAA,EAAKU,CAAE,CAAA,CAAA,CAAA,CAAKX,CAAAA,CAAG,QAAQ,CAAA,CAC1E,aACF,CAAA,CAEaY,EAAAA,2BAAkBnD,kCAAAA,CAEcI,EAAgB,CACzD,MAAM,IAAI,KAAA,CACR,0IACF,CACF,CAAA,CAECA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,gBAAAA,EAAyBF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACjE,iBACF,CAAA,CAEagD,EAAAA,4BAAmBpD,kCAAAA,CAEaI,EAAgB,CACzD,MAAM,IAAI,KAAA,CACR,2IACF,CACF,CAAA,CAECA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,iBAAAA,EAA0BF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CAClE,kBACF,CAAA,CAMaiD,EAAAA,iBAAQrD,kCAAAA,CAEsBI,EACnC,OAAOA,CAAAA,EAAU,QAAA,CACZ,IAAA,CAAK,KAAA,CAAMA,CAAK,CAAA,CAElBC,oBAAAA,CAAU,KAAA,CAAMD,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAGzCA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,MAAAA,EAAeF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACvD,OACF,CAAA,CAEakD,EAAAA,eAAMtD,kCAAAA,CAEwBuC,CAAAA,CAAOC,CAAAA,CAAOU,CAAAA,CAAAA,EAAa,CAClE,EAAA,CAAI,OAAOX,CAAAA,EAAO,QAAA,CAChB,OAAQA,CAAAA,CAAMC,CAAAA,CAAiBU,CAAAA,CAEjC,MAAM,IAAI,KAAA,CACR,yIACF,CACF,CAAA,CAEA,CAACX,CAAAA,CAAIC,CAAAA,CAAIU,CAAAA,CAAAA,EAAOjD,kCAAAA,oBAAKK,CAAAA,IAAAA,EAAaiC,CAAE,CAAA,EAAA,EAAKC,CAAE,CAAA,EAAA,EAAKU,CAAE,CAAA,CAAA,CAAA,CAAKX,CAAAA,CAAG,QAAQ,CAAA,CAClE,KACF,CAAA,CAEagB,EAAAA,iBAAQvD,kCAAAA,CAEsB0B,EACnC,OAAOA,CAAAA,EAAM,QAAA,CACPA,CAAAA,CAAI,IAAA,CAAK,KAAA,CAAMA,CAAC,CAAA,CAEnBrB,oBAAAA,CAAU,KAAA,CAAMqB,CAAAA,CAAE,IAAI,CAAA,CAAEA,CAAC,CAAA,CAGjCA,CAAAA,EAAMzB,kCAAAA,oBAAKK,CAAAA,MAAAA,EAAeoB,CAAC,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAE,QAAQ,CAAA,CAC3C,OACF,CAAA,CAEM8B,EAAAA,CAAe,CACnB,GAAA,CAAKC,kCAAAA,CAAW,KAAA,CAAO3B,oBAAAA,CAAK,GAAA,CAAKa,oBAAI,CAAC,CAAA,CACtC,GAAA,CAAKc,kCAAAA,CAAW,KAAA,CAAOvB,oBAAAA,CAAK,GAAA,CAAKS,oBAAI,CAAC,CAAA,CACtC,aAAA,CAAec,kCAAAA,CAAW,KAAA,CAAOC,oBAAAA,CAAe,GAAA,CAAKC,oBAAY,CAAC,CAAA,CAClE,KAAA,CAAOF,kCAAAA,CAAW,KAAA,CAAOG,oBAAAA,CAAO,GAAA,CAAKC,oBAAM,CAAC,CAAA,CAC5C,KAAA,CAAOJ,kCAAAA,CAAW,KAAA,CAAOK,oBAAAA,CAAO,GAAA,CAAKC,oBAAM,CAAC,CAAA,CAC5C,KAAA,CAAON,kCAAAA,CAAW,KAAA,CAAOO,oBAAAA,CAAO,GAAA,CAAKC,oBAAM,CAAC,CAAA,CAC5C,KAAA,CAAOR,kCAAAA,CAAW,KAAA,CAAOS,oBAAAA,CAAO,GAAA,CAAKL,oBAAM,CAAC,CAAA,CAC5C,KAAA,CAAOJ,kCAAAA,CAAW,KAAA,CAAOU,oBAAAA,CAAO,GAAA,CAAKJ,oBAAM,CAAC,CAAA,CAC5C,KAAA,CAAON,kCAAAA,CAAW,KAAA,CAAOW,oBAAAA,CAAO,GAAA,CAAKH,oBAAM,CAAC,CAC9C,CAAA,CASaI,EAAAA,iBAAuBrE,kCAAAA,CAEjCI,EAGI,CACH,MAAM,IAAI,KAAA,CACR,gIACF,CACF,CAAA,CAECA,CAAAA,EAAU,CACT,IAAMkE,CAAAA,CACJd,EAAAA,CAAapD,CAAAA,CAAM,QAAA,CAAS,IAAiC,CAAA,CAE/D,EAAA,CAAI,CAACkE,CAAAA,CACH,MAAM,IAAI,KAAA,CACR,CAAA,iCAAA,EAAoClE,CAAAA,CAAM,QAAA,CAAS,IAAI,CAAA,wFAAA,CACzD,CAAA,CAGF,OAAOH,kCAAAA,oBAAKK,CAAAA,MAAAA,EAAeF,CAAK,CAAA,CAAA,CAAA,CAAKkE,CAAU,CACjD,CAAA,CACA,OACF,CAAA,CAEaC,EAAAA,sBAAavE,kCAAAA,CAGtB,CAAA,CACAwE,CAAAA,CACAzB,CAAAA,CACAC,CAAAA,CAAAA,EACM,CACN,MAAM,IAAI,KAAA,CACR,qIACF,CACF,CAAA,CAEA,CAAC,CAAA,CAAGwB,CAAAA,CAASzB,CAAAA,CAAQC,CAAAA,CAAAA,EACnB/C,kCAAAA,oBAAKK,CAAAA,WAAAA,EAAoB,CAAC,CAAA,EAAA,EAAKkE,CAAO,CAAA,EAAA,EAAKzB,CAAM,CAAA,EAAA,EAAKC,CAAK,CAAA,CAAA,CAAA,CAAK,CAAA,CAAE,QAAQ,CAAA,CAC5E,YACF,CAAA,CAEayB,EAAAA,uBAAczE,kCAAAA,CAEgBI,EAAgB,CACvD,EAAA,CAAI,OAAOA,CAAAA,EAAU,QAAA,CACnB,OAAQ,CAAA,CAAI,IAAA,CAAK,IAAA,CAAKA,CAAK,CAAA,CAE7B,MAAM,IAAI,KAAA,CACR,iJACF,CACF,CAAA,CAECA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,YAAAA,EAAqBF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CAC7D,aACF,CAAA,CAmBasE,EAAAA,iBAAuB1E,kCAAAA,CAGhCuC,CAAAA,CACAC,CAAAA,CAAAA,EACM,CACN,MAAM,IAAI,KAAA,CACR,gIACF,CACF,CAAA,CAEA,CAACD,CAAAA,CAAIC,CAAAA,CAAAA,EAAOvC,kCAAAA,oBAAKK,CAAAA,MAAAA,EAAeiC,CAAE,CAAA,EAAA,EAAKC,CAAE,CAAA,CAAA,CAAA,CAAKD,CAAAA,CAAG,QAAQ,CAAA,CACzD,OAAA,CACA,CAACA,CAAAA,CAAIoC,CAAAA,CAAAA,EAAM,CACT,MAAA,CAAQpC,CAAAA,CAAG,QAAA,CAAS,IAAA,CAAM,CACxB,IAAK,eAAA,CACH,MAAO,CAACmB,oBAAAA,CAAeC,oBAAW,CAAA,CACpC,IAAK,KAAA,CACL,IAAK,KAAA,CACH,MAAO,CAACpB,CAAAA,CAAG,QAAA,CAAUI,oBAAG,CAAA,CAC1B,IAAK,OAAA,CACL,IAAK,OAAA,CACH,MAAO,CAACJ,CAAAA,CAAG,QAAA,CAAUsB,oBAAK,CAAA,CAC5B,IAAK,OAAA,CACL,IAAK,OAAA,CACH,MAAO,CAACtB,CAAAA,CAAG,QAAA,CAAUwB,oBAAK,CAAA,CAC5B,IAAK,OAAA,CACL,IAAK,OAAA,CACH,MAAO,CAACxB,CAAAA,CAAG,QAAA,CAAU0B,oBAAK,CAAA,CAC5B,OAAA,CACE,MAAM,IAAI,KAAA,CACR,CAAA,iCAAA,EAAoC1B,CAAAA,CAAG,QAAA,CAAS,IAAI,CAAA,wFAAA,CACtD,CACJ,CACF,CACF,CAAA,CAMaP,EAAAA,kBAAShC,kCAAAA,CAEqBI,EACnC,OAAOA,CAAAA,EAAU,QAAA,CACZ,IAAA,CAAK,GAAA,CAAIA,CAAK,CAAA,CAEhBC,oBAAAA,CAAU,MAAA,CAAOD,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAG1CA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,OAAAA,EAAgBF,CAAK,CAAA,CAAA,CAAA,CAAK0B,oBAAG,CAAA,CAC7C,QACF,CAAA,CAMa8C,EAAAA,eAAM5E,kCAAAA,CAEwBI,EACnC,OAAOA,CAAAA,EAAU,QAAA,CACZ,IAAA,CAAK,GAAA,CAAIA,CAAK,CAAA,CAEhBC,oBAAAA,CAAU,GAAA,CAAID,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAGvCA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,IAAAA,EAAaF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACrD,KACF,CAAA,CAMayE,EAAAA,gBAAO7E,kCAAAA,CAEuBI,EACnC,OAAOA,CAAAA,EAAU,QAAA,CACZ,IAAA,CAAK,IAAA,CAAKA,CAAK,CAAA,CAEjBC,oBAAAA,CAAU,IAAA,CAAKD,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAGxCA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,KAAAA,EAAcF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACtD,MACF,CAAA,CAMa0E,EAAAA,eAAM9E,kCAAAA,CAEW0B,CAAAA,CAAMC,CAAAA,CAAAA,EAC5B,OAAOD,CAAAA,EAAM,QAAA,CACR,IAAA,CAAK,GAAA,CAAIA,CAAAA,CAAGC,CAAW,CAAA,CAEzBtB,oBAAAA,CAAU,GAAA,CAAIqB,CAAAA,CAAE,IAAI,CAAA,CAAEA,CAAAA,CAAGC,CAAW,CAAA,CAG7C,CAACD,CAAAA,CAAGC,CAAAA,CAAAA,EAAM1B,kCAAAA,oBAAKK,CAAAA,IAAAA,EAAaoB,CAAC,CAAA,EAAA,EAAKC,CAAC,CAAA,CAAA,CAAA,CAAKD,CAAAA,CAAE,QAAQ,CAAA,CAClD,KAAA,CACA,OACF,CAAA,CAMaqD,EAAAA,eAAM/E,kCAAAA,CAEW0B,CAAAA,CAAMC,CAAAA,CAAAA,EAC5B,OAAOD,CAAAA,EAAM,QAAA,CACR,IAAA,CAAK,GAAA,CAAIA,CAAAA,CAAGC,CAAW,CAAA,CAEzBtB,oBAAAA,CAAU,GAAA,CAAIqB,CAAAA,CAAE,IAAI,CAAA,CAAEA,CAAAA,CAAGC,CAAW,CAAA,CAG7C,CAACD,CAAAA,CAAGC,CAAAA,CAAAA,EAAM1B,kCAAAA,oBAAKK,CAAAA,IAAAA,EAAaoB,CAAC,CAAA,EAAA,EAAKC,CAAC,CAAA,CAAA,CAAA,CAAKD,CAAAA,CAAE,QAAQ,CAAA,CAClD,KAAA,CACA,OACF,CAAA,CAQasD,EAAAA,eAAmBhF,kCAAAA,CAEWuC,CAAAA,CAAOC,CAAAA,CAAOU,CAAAA,CAAAA,EAAsB,CAC3E,EAAA,CAAI,OAAOX,CAAAA,EAAO,QAAA,CAAU,CAC1B,EAAA,CAAI,OAAOW,CAAAA,EAAO,QAAA,EAAY,OAAOV,CAAAA,EAAO,QAAA,CAC1C,MAAM,IAAI,KAAA,CACR,gEACF,CAAA,CAEF,OAAQD,CAAAA,CAAAA,CAAM,CAAA,CAAIW,CAAAA,CAAAA,CAAMV,CAAAA,CAAKU,CAC/B,CAEA,EAAA,CAAI,OAAOX,CAAAA,EAAO,QAAA,EAAY,OAAOC,CAAAA,EAAO,QAAA,CAC1C,MAAM,IAAI,KAAA,CAAM,qDAAqD,CAAA,CAGvE,OAAOnC,oBAAAA,CAAU,GAAA,CAAIkC,CAAAA,CAAG,IAAI,CAAA,CAAEA,CAAAA,CAAIC,CAAAA,CAAIU,CAAE,CAC1C,CAAA,CAEA,CAACX,CAAAA,CAAIC,CAAAA,CAAIU,CAAAA,CAAAA,EAAOjD,kCAAAA,oBAAKK,CAAAA,IAAAA,EAAaiC,CAAE,CAAA,EAAA,EAAKC,CAAE,CAAA,EAAA,EAAKU,CAAE,CAAA,CAAA,CAAA,CAAKX,CAAAA,CAAG,QAAQ,CAAA,CAClE,KACF,CAAA,CAEM0C,EAAAA,CAAa,CACjB,GAAA,CAAKxB,kCAAAA,CAAW,KAAA,CAAO3B,oBAAAA,CAAK,KAAA,CAAOA,oBAAI,CAAC,CAAA,CACxC,GAAA,CAAK2B,kCAAAA,CAAW,KAAA,CAAOvB,oBAAAA,CAAK,KAAA,CAAOA,oBAAI,CAAC,CAAA,CACxC,aAAA,CAAeuB,kCAAAA,CAAW,KAAA,CAAOC,oBAAAA,CAAe,KAAA,CAAOA,oBAAc,CAAC,CAAA,CACtE,KAAA,CAAOD,kCAAAA,CAAW,KAAA,CAAOG,oBAAAA,CAAO,KAAA,CAAOA,oBAAM,CAAC,CAAA,CAC9C,KAAA,CAAOH,kCAAAA,CAAW,KAAA,CAAOK,oBAAAA,CAAO,KAAA,CAAOA,oBAAM,CAAC,CAAA,CAC9C,KAAA,CAAOL,kCAAAA,CAAW,KAAA,CAAOO,oBAAAA,CAAO,KAAA,CAAOA,oBAAM,CAAC,CAAA,CAC9C,KAAA,CAAOP,kCAAAA,CAAW,KAAA,CAAOS,oBAAAA,CAAO,KAAA,CAAOA,oBAAM,CAAC,CAAA,CAC9C,KAAA,CAAOT,kCAAAA,CAAW,KAAA,CAAOU,oBAAAA,CAAO,KAAA,CAAOA,oBAAM,CAAC,CAAA,CAC9C,KAAA,CAAOV,kCAAAA,CAAW,KAAA,CAAOW,oBAAAA,CAAO,KAAA,CAAOA,oBAAM,CAAC,CAChD,CAAA,CASac,EAAAA,gBAAqBlF,kCAAAA,CAESI,EAAa,CACpD,MAAM,IAAI,KAAA,CACR,+HACF,CACF,CAAA,CAECA,CAAAA,EAAU,CACT,IAAMkE,CAAAA,CACJW,EAAAA,CAAW7E,CAAAA,CAAM,QAAA,CAAS,IAA+B,CAAA,CAE3D,EAAA,CAAI,CAACkE,CAAAA,CACH,MAAM,IAAI,KAAA,CACR,CAAA,gCAAA,EAAmClE,CAAAA,CAAM,QAAA,CAAS,IAAI,CAAA,wFAAA,CACxD,CAAA,CAGF,OAAOH,kCAAAA,oBAAKK,CAAAA,KAAAA,EAAcF,CAAK,CAAA,CAAA,CAAA,CAAKkE,CAAU,CAChD,CAAA,CACA,MACF,CAAA,CAEaa,EAAAA,qBAAYnF,kCAAAA,CAESoF,EAAY/E,oBAAAA,CAAU,SAAA,CAAU+E,CAAAA,CAAE,IAAI,CAAA,CAAEA,CAAC,CAAA,CAExEA,CAAAA,EAAMnF,kCAAAA,oBAAKK,CAAAA,UAAAA,EAAmB8E,CAAC,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAE,QAAQ,CAAA,CAC/C,WACF,CAAA,CAOaC,EAAAA,eAAmBrF,kCAAAA,CAEWsF,CAAAA,CAASC,CAAAA,CAAAA,EAAmB,CACnE,EAAA,CAAI,OAAOD,CAAAA,EAAS,QAAA,EAAY,OAAOC,CAAAA,EAAa,QAAA,CAClD,OAAQD,CAAAA,EAAQC,CAAAA,CAElB,EAAA,CACE,OAAOD,CAAAA,EAAS,QAAA,EAChB,OAAOC,CAAAA,EAAa,QAAA,EACpB,MAAA,GAAUD,CAAAA,EACV,MAAA,GAAUC,CAAAA,CAEV,OAAOlF,oBAAAA,CAAU,GAAA,CAAIiF,CAAAA,CAAK,IAAI,CAAA,CAAEA,CAAAA,CAAMC,CAAQ,CAAA,CAEhD,MAAM,IAAI,KAAA,CAAM,4BAA4B,CAC9C,CAAA,CAEA,CAACD,CAAAA,CAAMC,CAAAA,CAAAA,EAAatF,kCAAAA,oBAAKK,CAAAA,IAAAA,EAAagF,CAAI,CAAA,EAAA,EAAKC,CAAQ,CAAA,CAAA,CAAA,CAAKD,CAAAA,CAAK,QAAQ,CAAA,CACzE,KACF,CAAA,CAEaE,EAAAA,yBAAgBxF,kCAAAA,CAEgBI,EAAgB,CACzD,MAAM,IAAI,KAAA,CACR,wIACF,CACF,CAAA,CAECA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,cAAAA,EAAuBF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CAC/D,eACF,CAAA,CAEaqF,EAAAA,mBAAUzF,kCAAAA,CAEoBI,EAAgB,CACvD,EAAA,CAAI,OAAOA,CAAAA,EAAU,QAAA,CACnB,OAASA,CAAAA,CAAQ,IAAA,CAAK,EAAA,CAAM,GAAA,CAE9B,MAAM,IAAI,KAAA,CACR,6IACF,CACF,CAAA,CAECA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,QAAAA,EAAiBF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACzD,SACF,CAAA,CAEasF,EAAAA,mBAAU1F,kCAAAA,CAEWuC,CAAAA,CAAOC,CAAAA,CAAAA,EACrCP,kCAAAA,CAAIM,CAAIoD,kCAAAA,CAAI,CAAIxD,EAAAA,CAAIK,CAAAA,CAAID,CAAE,CAAA,CAAGC,CAAE,CAAC,CAAA,CAElC,CAACD,CAAAA,CAAIC,CAAAA,CAAAA,EAAOvC,kCAAAA,oBAAKK,CAAAA,QAAAA,EAAiBiC,CAAE,CAAA,EAAA,EAAKC,CAAE,CAAA,CAAA,CAAA,CAAKD,CAAAA,CAAG,QAAQ,CAAA,CAC3D,SACF,CAAA,CAEaqD,EAAAA,mBAAU5F,kCAAAA,CAEWuC,CAAAA,CAAOC,CAAAA,CAAOU,CAAAA,CAAAA,EAAkB,CAC9D,MAAM,IAAI,KAAA,CACR,kIACF,CACF,CAAA,CAEA,CAACX,CAAAA,CAAIC,CAAAA,CAAIU,CAAAA,CAAAA,EAAOjD,kCAAAA,oBAAKK,CAAAA,QAAAA,EAAiBiC,CAAE,CAAA,EAAA,EAAKC,CAAE,CAAA,EAAA,EAAKU,CAAE,CAAA,CAAA,CAAA,CAAKX,CAAAA,CAAG,QAAQ,CAAA,CACtE,SAAA,CACA,CAACA,CAAAA,CAAIC,CAAAA,CAAIU,CAAAA,CAAAA,EAAO,CACdX,CAAAA,CAAG,QAAA,CACHC,CAAAA,CAAG,QAAA,CACHD,CAAAA,CAAG,QAAA,CAAS,IAAA,GAAS,KAAA,EAASA,CAAAA,CAAG,QAAA,CAAS,IAAA,CAAK,QAAA,CAAS,GAAG,CAAA,CAAIL,oBAAAA,CAAMJ,oBACvE,CACF,CAAA,CAEa+D,EAAAA,uBAAc7F,kCAAAA,CAEkBI,EAAgB,CACzD,MAAM,IAAI,KAAA,CACR,sIACF,CACF,CAAA,CAECA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,YAAAA,EAAqBF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CAC7D,aACF,CAAA,CAEa0F,EAAAA,iBAAQ9F,kCAAAA,CAEsBI,EAAgB,CACvD,EAAA,CAAI,OAAOA,CAAAA,EAAU,QAAA,CACnB,OAAO,IAAA,CAAK,KAAA,CAAMA,CAAK,CAAA,CAEzB,MAAM,IAAI,KAAA,CACR,2IACF,CACF,CAAA,CAECA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,MAAAA,EAAeF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACvD,OACF,CAAA,CAEa2F,EAAAA,oBAAW/F,kCAAAA,CAEmBI,EAAgB,CACvD,EAAA,CAAI,OAAOA,CAAAA,EAAU,QAAA,CACnB,OAAO,IAAA,CAAK,GAAA,CAAI,CAAA,CAAG,IAAA,CAAK,GAAA,CAAI,CAAA,CAAGA,CAAK,CAAC,CAAA,CAEvC,MAAM,IAAI,KAAA,CACR,8IACF,CACF,CAAA,CAECA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,SAAAA,EAAkBF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CAC1D,UACF,CAAA,CAEa4F,EAAAA,gBAAOhG,kCAAAA,CAGhB,EAEI,OAAO,CAAA,EAAM,QAAA,CACR,IAAA,CAAK,IAAA,CAAK,CAAC,CAAA,CAEbK,oBAAAA,CAAU,IAAA,CAAK,CAAA,CAAE,IAAI,CAAA,CAAE,CAAC,CAAA,CAGhC,CAAA,EAAMJ,kCAAAA,oBAAKK,CAAAA,KAAAA,EAAc,CAAC,CAAA,CAAA,CAAA,CAAK,CAAA,CAAE,QAAQ,CAAA,CAC1C,MACF,CAAA,CAMa2F,EAAAA,eAAMjG,kCAAAA,CAEwBI,EACnC,OAAOA,CAAAA,EAAU,QAAA,CACZ,IAAA,CAAK,GAAA,CAAIA,CAAK,CAAA,CAEhBC,oBAAAA,CAAU,GAAA,CAAID,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAGvCA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,IAAAA,EAAaF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACrD,KACF,CAAA,CAEa8F,EAAAA,gBAAOlG,kCAAAA,CAEuBI,EAAgB,CACvD,EAAA,CAAI,OAAOA,CAAAA,EAAU,QAAA,CACnB,OAAO,IAAA,CAAK,IAAA,CAAKA,CAAK,CAAA,CAExB,MAAM,IAAI,KAAA,CACR,0IACF,CACF,CAAA,CAECA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,KAAAA,EAAcF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACtD,MACF,CAAA,CAEa+F,EAAAA,sBAAanG,kCAAAA,CAEiBoG,CAAAA,CAAUC,CAAAA,CAAUtF,CAAAA,CAAAA,EACvD,OAAOA,CAAAA,EAAM,QAAA,CACRuF,kCAAAA,CACLF,CACAC,CAAAA,CACAtF,CACF,CAAA,CAEKV,oBAAAA,CAAU,UAAA,CAAWU,CAAAA,CAAE,IAAI,CAAA,CAChCqF,CAAAA,CACAC,CAAAA,CACAtF,CACF,CAAA,CAGF,CAACqF,CAAAA,CAAOC,CAAAA,CAAOtF,CAAAA,CAAAA,EACbd,kCAAAA,oBAAKK,CAAAA,WAAAA,EAAoB8F,CAAK,CAAA,EAAA,EAAKC,CAAK,CAAA,EAAA,EAAKtF,CAAC,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAE,QAAQ,CAAA,CAC/D,YACF,CAAA,CAEawF,EAAAA,gBAAOvG,kCAAAA,CAEuBI,EACnC,OAAOA,CAAAA,EAAU,QAAA,CACZ,IAAA,CAAK,IAAA,CAAKA,CAAK,CAAA,CAEjBC,oBAAAA,CAAU,IAAA,CAAKD,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAGxCA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,KAAAA,EAAcF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACtD,MACF,CAAA,CAEaoG,EAAAA,gBAAOxG,kCAAAA,CAEuByG,CAAAA,CAAS1F,CAAAA,CAAAA,EAAY,CAC5D,EAAA,CAAI,OAAO0F,CAAAA,EAAS,QAAA,CAClB,OAAQA,CAAAA,EAAS1F,CAAAA,CAAe,CAAA,CAAM,CAAA,CAExC,MAAM,IAAI,KAAA,CACR,0IACF,CACF,CAAA,CAEA,CAAC0F,CAAAA,CAAM1F,CAAAA,CAAAA,EAAMd,kCAAAA,oBAAKK,CAAAA,KAAAA,EAAcmG,CAAI,CAAA,EAAA,EAAK1F,CAAC,CAAA,CAAA,CAAA,CAAK0F,CAAAA,CAAK,QAAQ,CAAA,CAC5D,MACF,CAAA,CAEaC,EAAAA,eAAM1G,kCAAAA,CAEwBI,EAAgB,CACvD,EAAA,CAAI,OAAOA,CAAAA,EAAU,QAAA,CACnB,OAAO,IAAA,CAAK,GAAA,CAAIA,CAAK,CAAA,CAEvB,MAAM,IAAI,KAAA,CACR,yIACF,CACF,CAAA,CAECA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,IAAAA,EAAaF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACrD,KACF,CAAA,CAEauG,EAAAA,gBAAO3G,kCAAAA,CAEuBI,EACnC,OAAOA,CAAAA,EAAU,QAAA,CACZ,IAAA,CAAK,IAAA,CAAKA,CAAK,CAAA,CAEjBC,oBAAAA,CAAU,IAAA,CAAKD,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAGxCA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,KAAAA,EAAcF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACtD,MACF,CAAA,CAEawG,EAAAA,qBAAY5G,kCAAAA,CAEtB,EAAsB,CACrB,MAAM,IAAI,KAAA,CACR,oIACF,CACF,CAAA,CAEC,CAAA,EAAMC,kCAAAA,oBAAKK,CAAAA,UAAAA,EAAmB,CAAC,CAAA,CAAA,CAAA,CAAK,CAAA,CAAE,QAAQ,CAAA,CAC/C,WACF,CAAA,CAEauG,EAAAA,iBAAQ7G,kCAAAA,CAEsBI,EAAgB,CACvD,MAAM,IAAI,KAAA,CACR,gIACF,CACF,CAAA,CAECA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,MAAAA,EAAeF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACvD,OACF,CAAA,CCxiCA,IAAM0G,CAAAA,CAASnB,oBAAAA,CAAIoB,mBAAS,CAAA,CAAE,MAAA,CAExBC,EAAAA,CAAkBC,oBAAAA,CAAaF,mBAAS,CAAA,CAAE,MAAA,CAC1CG,EAAAA,CAAkBD,oBAAAA,CAAaF,mBAAS,CAAA,CAAE,OAAA,CAE1CI,EAAAA,CAAcC,oBAAAA,CAASL,mBAAS,CAAA,CAAE,MAAA,CAClCM,EAAAA,CAAcD,oBAAAA,CAASL,mBAAS,CAAA,CAAE,OAAA,CAElCO,EAAAA,CAAgBC,oBAAAA,CAAWR,mBAAS,CAAA,CAAE,MAAA,CACtCS,EAAAA,CAAgBD,oBAAAA,CAAWR,mBAAS,CAAA,CAAE,OAAA,CAEtCU,EAAAA,CAAgBC,oBAAAA,CAAWX,mBAAS,CAAA,CAAE,MAAA,CACtCY,EAAAA,CAAgBD,oBAAAA,CAAWX,mBAAS,CAAA,CAAE,OAAA,CAEtCa,EAAAA,CAAgBC,oBAAAA,CAAWd,mBAAS,CAAA,CAAE,MAAA,CACtCe,EAAAA,CAAgBD,oBAAAA,CAAWd,mBAAS,CAAA,CAAE,OAAA,CAQ/BgB,EAAAA,sBAAa/H,kCAAAA,CAEvBgI,CAAAA,CAAeC,CAAAA,CAAAA,EAAgBnB,CAAAA,CAAOE,EAAAA,CAAgBiB,CAAM,CAAA,CAAGD,CAAM,CAAA,CAEtE,CAACA,CAAAA,CAAQC,CAAAA,CAAAA,EACPhI,kCAAAA,oBAAKK,CAAAA,CAAAA,EAAU4G,EAAAA,CAAgBe,CAAM,CAAC,CAAA,GAAA,EAAMD,CAAM,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAO,QAAQ,CAAA,CACxE,YACF,CAAA,CAQaE,EAAAA,kBAASlI,kCAAAA,CAEnBgI,CAAAA,CAAeC,CAAAA,CAAAA,EAAgBnB,CAAAA,CAAOK,EAAAA,CAAYc,CAAM,CAAA,CAAGD,CAAM,CAAA,CAElE,CAACA,CAAAA,CAAQC,CAAAA,CAAAA,EACPhI,kCAAAA,oBAAKK,CAAAA,CAAAA,EAAW+G,EAAAA,CAAYY,CAAM,CAAE,CAAA,GAAA,EAAMD,CAAM,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAO,QAAQ,CAAA,CACtE,QACF,CAAA,CAQaG,EAAAA,oBAAWnI,kCAAAA,CAErBgI,CAAAA,CAAeI,CAAAA,CAAAA,EAAkBtB,CAAAA,CAAOQ,EAAAA,CAAcc,CAAK,CAAA,CAAGJ,CAAM,CAAA,CAErE,CAACA,CAAAA,CAAQI,CAAAA,CAAAA,EACPnI,kCAAAA,oBAAKK,CAAAA,CAAAA,EAAWkH,EAAAA,CAAcY,CAAK,CAAE,CAAA,GAAA,EAAMJ,CAAM,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAO,QAAQ,CAAA,CACvE,UACF,CAAA,CAQaK,EAAAA,oBAAWrI,kCAAAA,CAErBgI,CAAAA,CAAeI,CAAAA,CAAAA,EAAkBtB,CAAAA,CAAOW,EAAAA,CAAcW,CAAK,CAAA,CAAGJ,CAAM,CAAA,CAErE,CAACA,CAAAA,CAAQI,CAAAA,CAAAA,EACPnI,kCAAAA,oBAAKK,CAAAA,CAAAA,EAAWqH,EAAAA,CAAcS,CAAK,CAAE,CAAA,GAAA,EAAMJ,CAAM,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAO,QAAQ,CAAA,CACvE,UACF,CAAA,CAQaM,EAAAA,oBAAWtI,kCAAAA,CAErBgI,CAAAA,CAAeI,CAAAA,CAAAA,EAAkBtB,CAAAA,CAAOc,EAAAA,CAAcQ,CAAK,CAAA,CAAGJ,CAAM,CAAA,CAErE,CAACA,CAAAA,CAAQI,CAAAA,CAAAA,EACPnI,kCAAAA,oBAAKK,CAAAA,CAAAA,EAAWwH,EAAAA,CAAcM,CAAK,CAAE,CAAA,GAAA,EAAMJ,CAAM,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAO,QAAQ,CAAA,CACvE,UACF,CAAA,CCpFA,SAASO,CAAAA,CAAiCnI,CAAAA,CAAgB,CACxD,OAAIA,CAAAA,CAAM,QAAA,CAAS,IAAA,CAAK,QAAA,CAAS,GAAG,CAAA,CAC3BoI,oBAAAA,CAELpI,CAAAA,CAAM,QAAA,CAAS,IAAA,CAAK,QAAA,CAAS,GAAG,CAAA,CAC3BqI,oBAAAA,CAEFC,oBACT,CAWO,IAAMC,EAAAA,CAAQ3I,kCAAAA,CAEQoC,CAAAA,CAAQC,CAAAA,CAAAA,EAAWuG,CAAAA,CAAIC,CAAAA,CAAGzG,CAAAA,CAAKC,CAAG,CAAC,CAAA,CAE9D,CAACD,CAAAA,CAAKC,CAAAA,CAAAA,EAAQpC,kCAAAA,oBAAKK,CAAAA,IAAAA,EAAa8B,CAAG,CAAA,IAAA,EAAOC,CAAG,CAAA,CAAA,CAAA,CAAKyG,oBAAI,CAAA,CACtD,OACF,CAAA,CAWaD,CAAAA,cAAK7I,kCAAAA,CAEWoC,CAAAA,CAAQC,CAAAA,CAAAA,EACjChC,oBAAAA,CAAU,EAAA,CAAG+B,CAAAA,CAAI,IAAI,CAAA,CAAEA,CAAAA,CAAKC,CAAG,CAAA,CAEjC,CAACD,CAAAA,CAAKC,CAAAA,CAAAA,EACJpC,kCAAAA,oBAAKK,CAAAA,CAAAA,EAAU8B,CAAG,CAAA,IAAA,EAAOC,CAAG,CAAA,CAAA,CAAA,CAAKkG,CAAAA,CAAiCnG,CAAG,CAAC,CAAA,CACxE,IACF,CAAA,CAUa2G,EAAAA,cAAK/I,kCAAAA,CAEWoC,CAAAA,CAAQC,CAAAA,CAAAA,EAAW2G,CAAAA,CAAIH,CAAAA,CAAGzG,CAAAA,CAAKC,CAAG,CAAC,CAAA,CAE9D,CAACD,CAAAA,CAAKC,CAAAA,CAAAA,EACJpC,kCAAAA,oBAAKK,CAAAA,CAAAA,EAAU8B,CAAG,CAAA,IAAA,EAAOC,CAAG,CAAA,CAAA,CAAA,CAAKkG,CAAAA,CAAiCnG,CAAG,CAAC,CAAA,CACxE,IACF,CAAA,CAUa6G,CAAAA,cAAKjJ,kCAAAA,CAEkBoC,CAAAA,CAAQC,CAAAA,CAAAA,EACxChC,oBAAAA,CAAU,EAAA,CAAG+B,CAAAA,CAAI,IAAI,CAAA,CAAEA,CAAAA,CAAKC,CAAG,CAAA,CAEjC,CAACD,CAAAA,CAAKC,CAAAA,CAAAA,EACJpC,kCAAAA,oBAAKK,CAAAA,CAAAA,EAAU8B,CAAG,CAAA,GAAA,EAAMC,CAAG,CAAA,CAAA,CAAA,CAAKkG,CAAAA,CAAiCnG,CAAG,CAAC,CAAA,CACvE,IACF,CAAA,CAUa8G,EAAAA,cAAKlJ,kCAAAA,CAEkBoC,CAAAA,CAAQC,CAAAA,CAAAA,EACxC8G,CAAAA,CAAGF,CAAAA,CAAG7G,CAAAA,CAAKC,CAAG,CAAA,CAAGwG,CAAAA,CAAGzG,CAAAA,CAAKC,CAAG,CAAC,CAAA,CAE/B,CAACD,CAAAA,CAAKC,CAAAA,CAAAA,EACJpC,kCAAAA,oBAAKK,CAAAA,CAAAA,EAAU8B,CAAG,CAAA,IAAA,EAAOC,CAAG,CAAA,CAAA,CAAA,CAAKkG,CAAAA,CAAiCnG,CAAG,CAAC,CAAA,CACxE,IACF,CAAA,CAUagH,EAAAA,cAAKpJ,kCAAAA,CAEkBoC,CAAAA,CAAQC,CAAAA,CAAAA,EACxCgH,EAAAA,CAAIL,CAAAA,CAAIC,CAAAA,CAAG7G,CAAAA,CAAKC,CAAG,CAAC,CAAA,CAAG2G,CAAAA,CAAIH,CAAAA,CAAGzG,CAAAA,CAAKC,CAAG,CAAC,CAAC,CAAA,CAE1C,CAACD,CAAAA,CAAKC,CAAAA,CAAAA,EACJpC,kCAAAA,oBAAKK,CAAAA,CAAAA,EAAU8B,CAAG,CAAA,GAAA,EAAMC,CAAG,CAAA,CAAA,CAAA,CAAKkG,CAAAA,CAAiCnG,CAAG,CAAC,CAAA,CACvE,IACF,CAAA,CAUakH,EAAAA,cAAKtJ,kCAAAA,CAEkBoC,CAAAA,CAAQC,CAAAA,CAAAA,EAAW2G,CAAAA,CAAIC,CAAAA,CAAG7G,CAAAA,CAAKC,CAAG,CAAC,CAAA,CAErE,CAACD,CAAAA,CAAKC,CAAAA,CAAAA,EACJpC,kCAAAA,oBAAKK,CAAAA,CAAAA,EAAU8B,CAAG,CAAA,IAAA,EAAOC,CAAG,CAAA,CAAA,CAAA,CAAKkG,CAAAA,CAAiCnG,CAAG,CAAC,CAAA,CACxE,IACF,CAAA,CAUa4G,CAAAA,eAAMhJ,kCAAAA,CAEiBI,EAChCC,oBAAAA,CAAU,GAAA,CAAID,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAEhCA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,EAAAA,EAAWF,CAAK,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAM,QAAQ,CAAA,CACnD,KACF,CAAA,CAQa+I,CAAAA,cAAKnJ,kCAAAA,CAEkBoC,CAAAA,CAAQC,CAAAA,CAAAA,EACxChC,oBAAAA,CAAU,EAAA,CAAG+B,CAAAA,CAAI,IAAI,CAAA,CAAEA,CAAAA,CAAKC,CAAG,CAAA,CAEjC,CAACD,CAAAA,CAAKC,CAAAA,CAAAA,EAAQpC,kCAAAA,oBAAKK,CAAAA,CAAAA,EAAU8B,CAAG,CAAA,GAAA,EAAMC,CAAG,CAAA,CAAA,CAAA,CAAKD,CAAAA,CAAI,QAAQ,CAAA,CAC1D,IACF,CAAA,CAQaiH,EAAAA,eAAMrJ,kCAAAA,CAEiBoC,CAAAA,CAAQC,CAAAA,CAAAA,EACxC2G,CAAAA,CAAIG,CAAAA,CAAGH,CAAAA,CAAI5G,CAAG,CAAA,CAAG4G,CAAAA,CAAI3G,CAAG,CAAC,CAAC,CAAA,CAE5B,CAACD,CAAAA,CAAKC,CAAAA,CAAAA,EAAQpC,kCAAAA,oBAAKK,CAAAA,CAAAA,EAAU8B,CAAG,CAAA,GAAA,EAAMC,CAAG,CAAA,CAAA,CAAA,CAAKD,CAAAA,CAAI,QAAQ,CAAA,CAC1D,KACF,CAAA,CAUawG,CAAAA,eAAM5I,kCAAAA,CAEhBI,EAAiCC,oBAAAA,CAAU,GAAA,CAAID,CAAAA,CAAM,IAAI,CAAA,CAAEA,CAAK,CAAA,CAEhEA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,IAAAA,EAAaF,CAAK,CAAA,CAAA,CAAA,CAAK0I,oBAAI,CAAA,CAC3C,KACF,CAAA,CAQaS,EAAAA,eAAMvJ,kCAAAA,CAEhBI,EAAiC,CAACwI,CAAAA,CAAII,CAAAA,CAAI5I,CAAK,CAAC,CAAA,CAEhDA,CAAAA,EAAUH,kCAAAA,oBAAKK,CAAAA,IAAAA,EAAaF,CAAK,CAAA,CAAA,CAAA,CAAK0I,oBAAI,CAAA,CAC3C,KACF,CAAA,CAaaU,EAAAA,qBAAYxJ,kCAAAA,CAGrBoC,CAAAA,CACAC,CAAAA,CACAoH,CAAAA,CAAY,GAAA,CAAA,EAER,OAAOrH,CAAAA,EAAQ,QAAA,EAAY,OAAOC,CAAAA,EAAQ,QAAA,CACrC,IAAA,CAAK,GAAA,CAAID,CAAAA,CAAMC,CAAG,CAAA,CAAIoH,CAAAA,CAE3BC,iCAAAA,CAAiB,CAAA,EAAKA,iCAAAA,CAAiB,CAAA,CAClCrJ,oBAAAA,CAAU,aAAA,CAAc+B,CAAAA,CAAI,IAAI,CAAA,CACrCH,kCAAAA,CAAIG,CAAKC,CAAG,CAAA,CACZoH,CACF,CAAA,CAEK,CAAA,CAAA,CAGT,CAACrH,CAAAA,CAAKC,CAAAA,CAAKoH,CAAAA,CAAYxJ,kCAAAA,GAAK,CAAM6B,oBAAG,CAAA,CAAA,EAC/B6H,kCAAAA,CAAoB,CAAA,EAAKA,kCAAAA,CAAoB,CAAA,CACxC1J,kCAAAA,oBACLK,CAAAA,SAAAA,EAAkB8B,CAAG,CAAA,QAAA,EAAWC,CAAG,CAAA,MAAA,EAASoH,CAAS,CAAA,CAAA,CAAA,CACrDX,oBACF,CAAA,CAEE,CAACa,kCAAAA,CAAoB,CAAA,EAAK,CAACA,kCAAAA,CAAoB,CAAA,CAC1C1J,kCAAAA,oBAGLK,CAAAA,QAAAA,EAAiB8B,CAAG,CAAA,GAAA,EAAMC,CAAG,CAAA,MAAA,EAASD,CAAG,CAAA,GAAA,EAAMA,CAAG,CAAA,IAAA,EAAOqH,CAAS,CAAA,CAAA,CAAA,CAClEX,oBACF,CAAA,CAEK7I,kCAAAA,OAAK,CAAS6I,oBAAI,CAAA,CAE3B,WACF,CAAA,CAsBac,EAAAA,kBAAyB5J,kCAAAA,CAGlC6J,CAAAA,CACA,CAAA,CACAC,CAAAA,CAAAA,EAEI,OAAOA,CAAAA,EAAS,SAAA,CACXA,CAAAA,CAAO,CAAA,CAAID,CAAAA,CAEbxJ,oBAAAA,CAAU,MAAA,CAAQwJ,CAAAA,CAAqB,IAAI,CAAA,CAChDA,CAAAA,CACA,CAAA,CACAC,CACF,CAAA,CAGF,CAACD,CAAAA,CAAG,CAAA,CAAGC,CAAAA,CAAAA,EAAS7J,kCAAAA,oBAAKK,CAAAA,OAAAA,EAAgBuJ,CAAC,CAAA,EAAA,EAAK,CAAC,CAAA,EAAA,EAAKC,CAAI,CAAA,CAAA,CAAA,CAAKD,CAAAA,CAAE,QAAQ,CAAA,CACpE,QACF,CAAA,CCnTO,IAAME,EAAAA,CAAmB/J,kCAAAA,CAE9B,CAAA,EAAM,OAAA,CAAQ,IAAA,CAAK,sDAAsD,CAAA,CAEzE,CAAA,CAAA,EAAMC,kCAAAA,oBAAK,CAAsBC,mBAAI,CAAA,CACrC,kBACF,CAAA,CAEa8J,EAAAA,0BAAiBhK,kCAAAA,CAE5B,CAAA,EAAM,OAAA,CAAQ,IAAA,CAAK,oDAAoD,CAAA,CAEvE,CAAA,CAAA,EAAMC,kCAAAA,kBAAK,CAAoBC,mBAAI,CAAA,CACnC,gBACF,CAAA,CAEa+J,EAAAA,0BAAiBjK,kCAAAA,CAE5B,CAAA,EAAM,OAAA,CAAQ,IAAA,CAAK,oDAAoD,CAAA,CAEvE,CAAA,CAAA,EAAMC,kCAAAA,kBAAK,CAAoBC,mBAAI,CAAA,CACnC,gBACF,CAAA,CAEagK,EAAAA,sBAAalK,kCAAAA,CAEF0B,EAAiB,CACrC,MAAM,IAAI,KAAA,CACR,8DACF,CACF,CAAA,CAECA,CAAAA,EAAM,CACL,EAAA,CAAIyI,iCAAAA,CAAWzI,CAAE,QAAQ,CAAA,EAAKA,CAAAA,CAAE,QAAA,CAAS,IAAA,GAAS,QAAA,CAChD,OAAOzB,kCAAAA,oBAAKK,CAAAA,YAAAA,EAAqBoB,CAAC,CAAA,CAAA,CAAA,CAAKA,CAAAA,CAAE,QAAA,CAAS,KAAK,CAAA,CAEzD,MAAM,IAAI,KAAA,CACR,CAAA,qBAAA,EAAwB,IAAA,CAAK,SAAA,CAAUA,CAAAA,CAAE,QAAA,CAAU,IAAA,CAAM,CAAC,CAAC,CAAA,CAAA","file":"/Users/iwo/Projects/wigsill/packages/typegpu/dist/std/index.cjs","sourcesContent":[null,"import { snip } from '../data/snippet.ts';\nimport { Void } from '../data/wgslTypes.ts';\nimport { createDualImpl } from '../core/function/dualImpl.ts';\n\nexport const discard = createDualImpl(\n // CPU\n (): never => {\n throw new Error(\n '`discard` relies on GPU resources and cannot be executed outside of a draw call',\n );\n },\n // GPU\n () => snip('discard;', Void),\n 'discard',\n);\n","import { snip } from '../data/snippet.ts';\nimport { smoothstepScalar } from '../data/numberOps.ts';\nimport {\n abstractFloat,\n abstractInt,\n f16,\n f32,\n i32,\n u32,\n} from '../data/numeric.ts';\nimport { VectorOps } from '../data/vectorOps.ts';\nimport type {\n AnyFloat32VecInstance,\n AnyFloatVecInstance,\n AnyIntegerVecInstance,\n AnyMatInstance,\n AnyNumericVecInstance,\n AnySignedVecInstance,\n AnyWgslData,\n v2i,\n v3f,\n v3h,\n v3i,\n v4i,\n} from '../data/wgslTypes.ts';\nimport type { Infer } from '../shared/repr.ts';\nimport { createDualImpl } from '../core/function/dualImpl.ts';\nimport { abstruct } from '../data/struct.ts';\nimport { mul, sub } from './operators.ts';\nimport {\n vec2f,\n vec2h,\n vec2i,\n vec3f,\n vec3h,\n vec3i,\n vec4f,\n vec4h,\n vec4i,\n} from '../data/vector.ts';\nimport { stitch } from '../core/resolve/stitch.ts';\n\ntype NumVec = AnyNumericVecInstance;\n\nexport const abs = createDualImpl(\n // CPU implementation\n <T extends NumVec | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.abs(value) as T;\n }\n return VectorOps.abs[value.kind](value) as T;\n },\n // GPU implementation\n (value) => snip(stitch`abs(${value})`, value.dataType),\n 'abs',\n);\n\nexport const acos = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.acos(value) as T;\n }\n return VectorOps.acos[value.kind](value) as T;\n },\n // GPU implementation\n (value) => snip(stitch`acos(${value})`, value.dataType),\n 'acos',\n);\n\n/**\n * @privateRemarks\n * https://www.w3.org/TR/WGSL/#acosh-builtin\n */\nexport const acosh = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.acosh(value) as T;\n }\n return VectorOps.acosh[value.kind](value) as T;\n },\n // GPU implementation\n (value) => snip(stitch`acosh(${value})`, value.dataType),\n 'acosh',\n);\n\n/**\n * @privateRemarks\n * https://www.w3.org/TR/WGSL/#asin-builtin\n */\nexport const asin = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.asin(value) as T;\n }\n return VectorOps.asin[value.kind](value) as T;\n },\n // GPU implementation\n (value) => snip(stitch`asin(${value})`, value.dataType),\n 'asin',\n);\n\n/**\n * @privateRemarks\n * https://www.w3.org/TR/WGSL/#asinh-builtin\n */\nexport const asinh = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.asinh(value) as T;\n }\n return VectorOps.asinh[value.kind](value) as T;\n },\n // GPU implementation\n (value) => snip(stitch`asinh(${value})`, value.dataType),\n 'asinh',\n);\n\n/**\n * @privateRemarks\n * https://www.w3.org/TR/WGSL/#atan-builtin\n */\nexport const atan = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.atan(value) as T;\n }\n return VectorOps.atan[value.kind](value) as T;\n },\n // GPU implementation\n (value) => snip(stitch`atan(${value})`, value.dataType),\n 'atan',\n);\n\n/**\n * @privateRemarks\n * https://www.w3.org/TR/WGSL/#atanh-builtin\n */\nexport const atanh = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.atanh(value) as T;\n }\n return VectorOps.atanh[value.kind](value) as T;\n },\n // GPU implementation\n (value) => snip(stitch`atanh(${value})`, value.dataType),\n 'atanh',\n);\n\nexport const atan2 = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(y: T, x: T): T => {\n if (typeof y === 'number' && typeof x === 'number') {\n return Math.atan2(y, x) as T;\n }\n return VectorOps.atan2[(y as AnyFloatVecInstance).kind](\n y as never,\n x as never,\n ) as T;\n },\n // GPU implementation\n (y, x) => snip(stitch`atan2(${y}, ${x})`, y.dataType),\n 'atan2',\n 'unify',\n);\n\n/**\n * @privateRemarks\n * https://www.w3.org/TR/WGSL/#ceil-builtin\n */\nexport const ceil = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.ceil(value) as T;\n }\n return VectorOps.ceil[value.kind](value) as T;\n },\n // GPU implementation\n (value) => snip(stitch`ceil(${value})`, value.dataType),\n 'ceil',\n);\n\n/**\n * @privateRemarks\n * https://www.w3.org/TR/WGSL/#clamp\n */\nexport const clamp = createDualImpl(\n // CPU implementation\n <T extends NumVec | number>(value: T, low: T, high: T): T => {\n if (typeof value === 'number') {\n return Math.min(Math.max(low as number, value), high as number) as T;\n }\n return VectorOps.clamp[value.kind](\n value,\n low as NumVec,\n high as NumVec,\n ) as T;\n },\n // GPU implementation\n (value, low, high) =>\n snip(stitch`clamp(${value}, ${low}, ${high})`, value.dataType),\n 'clamp',\n);\n\n/**\n * @privateRemarks\n * https://www.w3.org/TR/WGSL/#cos-builtin\n */\nexport const cos = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.cos(value) as T;\n }\n return VectorOps.cos[value.kind](value) as T;\n },\n // GPU implementation\n (value) => snip(stitch`cos(${value})`, value.dataType),\n 'cos',\n);\n\n/**\n * @privateRemarks\n * https://www.w3.org/TR/WGSL/#cosh-builtin\n */\nexport const cosh = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.cosh(value) as T;\n }\n return VectorOps.cosh[value.kind](value) as T;\n },\n // GPU implementation\n (value) => snip(stitch`cosh(${value})`, value.dataType),\n 'cosh',\n);\n\nexport const countLeadingZeros = createDualImpl(\n // CPU implementation\n <T extends AnyIntegerVecInstance | number>(value: T): T => {\n throw new Error(\n 'CPU implementation for countLeadingZeros not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (value) => snip(stitch`countLeadingZeros(${value})`, value.dataType),\n 'countLeadingZeros',\n);\n\nexport const countOneBits = createDualImpl(\n // CPU implementation\n <T extends AnyIntegerVecInstance | number>(value: T): T => {\n throw new Error(\n 'CPU implementation for countOneBits not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (value) => snip(stitch`countOneBits(${value})`, value.dataType),\n 'countOneBits',\n);\n\nexport const countTrailingZeros = createDualImpl(\n // CPU implementation\n <T extends AnyIntegerVecInstance | number>(value: T): T => {\n throw new Error(\n 'CPU implementation for countTrailingZeros not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (value) => snip(stitch`countTrailingZeros(${value})`, value.dataType),\n 'countTrailingZeros',\n);\n\n/**\n * @privateRemarks\n * https://www.w3.org/TR/WGSL/#cross-builtin\n */\nexport const cross = createDualImpl(\n // CPU implementation\n <T extends v3f | v3h>(a: T, b: T): T => VectorOps.cross[a.kind](a, b),\n // GPU implementation\n (a, b) => snip(stitch`cross(${a}, ${b})`, a.dataType),\n 'cross',\n);\n\nexport const degrees = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return ((value * 180) / Math.PI) as T;\n }\n throw new Error(\n 'CPU implementation for degrees on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (value) => snip(stitch`degrees(${value})`, value.dataType),\n 'degrees',\n);\n\nexport const determinant = createDualImpl(\n // CPU implementation\n (value: AnyMatInstance): number => {\n throw new Error(\n 'CPU implementation for determinant not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n // TODO: The return type is potentially wrong here, it should return whatever the matrix element type is.\n (value) => snip(stitch`determinant(${value})`, f32),\n 'determinant',\n);\n\nexport const distance = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(a: T, b: T): number => {\n if (typeof a === 'number' && typeof b === 'number') {\n return Math.abs(a - b);\n }\n return length(\n sub(a as AnyFloatVecInstance, b as AnyFloatVecInstance),\n ) as number;\n },\n // GPU implementation\n (a, b) =>\n snip(\n stitch`distance(${a}, ${b})`,\n a.dataType.type === 'f16' || a.dataType.type.endsWith('h') ? f16 : f32,\n ),\n 'distance',\n);\n\n/**\n * @privateRemarks\n * https://www.w3.org/TR/WGSL/#dot-builtin\n */\nexport const dot = createDualImpl(\n // CPU implementation\n <T extends NumVec>(lhs: T, rhs: T): number =>\n VectorOps.dot[lhs.kind](lhs, rhs),\n // GPU implementation\n (lhs, rhs) => snip(stitch`dot(${lhs}, ${rhs})`, f32),\n 'dot',\n);\n\nexport const dot4U8Packed = createDualImpl(\n // CPU implementation\n (e1: number, e2: number): number => {\n throw new Error(\n 'CPU implementation for dot4U8Packed not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (e1, e2) => snip(stitch`dot4U8Packed(${e1}, ${e2})`, u32),\n 'dot4U8Packed',\n [u32, u32],\n);\n\nexport const dot4I8Packed = createDualImpl(\n // CPU implementation\n (e1: number, e2: number): number => {\n throw new Error(\n 'CPU implementation for dot4I8Packed not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (e1, e2) => snip(stitch`dot4I8Packed(${e1}, ${e2})`, i32),\n 'dot4I8Packed',\n [i32, i32],\n);\n\n/**\n * @privateRemarks\n * https://www.w3.org/TR/WGSL/#exp-builtin\n */\nexport const exp = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.exp(value) as T;\n }\n return VectorOps.exp[value.kind](value) as T;\n },\n // GPU implementation\n (value) => snip(stitch`exp(${value})`, value.dataType),\n 'exp',\n);\n\n/**\n * @privateRemarks\n * https://www.w3.org/TR/WGSL/#exp2-builtin\n */\nexport const exp2 = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return (2 ** value) as T;\n }\n return VectorOps.exp2[value.kind](value) as T;\n },\n // GPU implementation\n (value) => snip(stitch`exp2(${value})`, value.dataType),\n 'exp2',\n);\n\nexport const extractBits = createDualImpl(\n // CPU implementation\n <T extends AnyIntegerVecInstance | number>(\n e: T,\n offset: number,\n count: number,\n ): T => {\n throw new Error(\n 'CPU implementation for extractBits not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (e, offset, count) =>\n snip(stitch`extractBits(${e}, ${offset}, ${count})`, e.dataType),\n 'extractBits',\n (e, offset, count) => [e.dataType as AnyWgslData, u32, u32],\n);\n\nexport const faceForward = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance>(e1: T, e2: T, e3: T): T => {\n throw new Error(\n 'CPU implementation for faceForward not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (e1, e2, e3) => snip(stitch`faceForward(${e1}, ${e2}, ${e3})`, e1.dataType),\n 'faceForward',\n);\n\nexport const firstLeadingBit = createDualImpl(\n // CPU implementation\n <T extends AnyIntegerVecInstance | number>(value: T): T => {\n throw new Error(\n 'CPU implementation for firstLeadingBit not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (value) => snip(stitch`firstLeadingBit(${value})`, value.dataType),\n 'firstLeadingBit',\n);\n\nexport const firstTrailingBit = createDualImpl(\n // CPU implementation\n <T extends AnyIntegerVecInstance | number>(value: T): T => {\n throw new Error(\n 'CPU implementation for firstTrailingBit not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (value) => snip(stitch`firstTrailingBit(${value})`, value.dataType),\n 'firstTrailingBit',\n);\n\n/**\n * @privateRemarks\n * https://www.w3.org/TR/WGSL/#floor-builtin\n */\nexport const floor = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.floor(value) as T;\n }\n return VectorOps.floor[value.kind](value) as T;\n },\n // GPU implementation\n (value) => snip(stitch`floor(${value})`, value.dataType),\n 'floor',\n);\n\nexport const fma = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(e1: T, e2: T, e3: T): T => {\n if (typeof e1 === 'number') {\n return (e1 * (e2 as number) + (e3 as number)) as T;\n }\n throw new Error(\n 'CPU implementation for fma on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (e1, e2, e3) => snip(stitch`fma(${e1}, ${e2}, ${e3})`, e1.dataType),\n 'fma',\n);\n\nexport const fract = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(a: T): T => {\n if (typeof a === 'number') {\n return (a - Math.floor(a)) as T;\n }\n return VectorOps.fract[a.kind](a) as T;\n },\n // GPU implementation\n (a) => snip(stitch`fract(${a})`, a.dataType),\n 'fract',\n);\n\nconst FrexpResults = {\n f32: abstruct({ fract: f32, exp: i32 }),\n f16: abstruct({ fract: f16, exp: i32 }),\n abstractFloat: abstruct({ fract: abstractFloat, exp: abstractInt }),\n vec2f: abstruct({ fract: vec2f, exp: vec2i }),\n vec3f: abstruct({ fract: vec3f, exp: vec3i }),\n vec4f: abstruct({ fract: vec4f, exp: vec4i }),\n vec2h: abstruct({ fract: vec2h, exp: vec2i }),\n vec3h: abstruct({ fract: vec3h, exp: vec3i }),\n vec4h: abstruct({ fract: vec4h, exp: vec4i }),\n} as const;\n\ntype FrexpOverload = {\n (value: number): Infer<typeof FrexpResults['f32']>;\n <T extends AnyFloatVecInstance>(\n value: T,\n ): Infer<typeof FrexpResults[T['kind']]>;\n};\n\nexport const frexp: FrexpOverload = createDualImpl(\n // CPU implementation\n (value: number): {\n fract: number;\n exp: number;\n } => {\n throw new Error(\n 'CPU implementation for frexp not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (value) => {\n const returnType =\n FrexpResults[value.dataType.type as keyof typeof FrexpResults];\n\n if (!returnType) {\n throw new Error(\n `Unsupported data type for frexp: ${value.dataType.type}. Supported types are f32, f16, abstractFloat, vec2f, vec3f, vec4f, vec2h, vec3h, vec4h.`,\n );\n }\n\n return snip(stitch`frexp(${value})`, returnType);\n },\n 'frexp',\n);\n\nexport const insertBits = createDualImpl(\n // CPU implementation\n <T extends AnyIntegerVecInstance | number>(\n e: T,\n newbits: T,\n offset: number,\n count: number,\n ): T => {\n throw new Error(\n 'CPU implementation for insertBits not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (e, newbits, offset, count) =>\n snip(stitch`insertBits(${e}, ${newbits}, ${offset}, ${count})`, e.dataType),\n 'insertBits',\n);\n\nexport const inverseSqrt = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return (1 / Math.sqrt(value)) as T;\n }\n throw new Error(\n 'CPU implementation for inverseSqrt on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (value) => snip(stitch`inverseSqrt(${value})`, value.dataType),\n 'inverseSqrt',\n);\n\ntype FloatVecInstanceToIntVecInstance<T extends AnyFloatVecInstance> = {\n 'vec2f': v2i;\n 'vec3f': v3i;\n 'vec4f': v4i;\n 'vec2h': v2i;\n 'vec3h': v3i;\n 'vec4h': v4i;\n}[T['kind']];\n\ntype LdexpOverload = {\n (e1: number, e2: number): number;\n <T extends AnyFloatVecInstance>(\n e1: T,\n e2: FloatVecInstanceToIntVecInstance<T>,\n ): T;\n};\n\nexport const ldexp: LdexpOverload = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(\n e1: T,\n e2: AnyIntegerVecInstance | number,\n ): T => {\n throw new Error(\n 'CPU implementation for ldexp not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (e1, e2) => snip(stitch`ldexp(${e1}, ${e2})`, e1.dataType),\n 'ldexp',\n (e1, _) => {\n switch (e1.dataType.type) {\n case 'abstractFloat':\n return [abstractFloat, abstractInt];\n case 'f32':\n case 'f16':\n return [e1.dataType, i32];\n case 'vec2f':\n case 'vec2h':\n return [e1.dataType, vec2i];\n case 'vec3f':\n case 'vec3h':\n return [e1.dataType, vec3i];\n case 'vec4f':\n case 'vec4h':\n return [e1.dataType, vec4i];\n default:\n throw new Error(\n `Unsupported data type for ldexp: ${e1.dataType.type}. Supported types are abstractFloat, f32, f16, vec2f, vec2h, vec3f, vec3h, vec4f, vec4h.`,\n );\n }\n },\n);\n\n/**\n * @privateRemarks\n * https://www.w3.org/TR/WGSL/#length-builtin\n */\nexport const length = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): number => {\n if (typeof value === 'number') {\n return Math.abs(value);\n }\n return VectorOps.length[value.kind](value);\n },\n // GPU implementation\n (value) => snip(stitch`length(${value})`, f32),\n 'length',\n);\n\n/**\n * @privateRemarks\n * https://www.w3.org/TR/WGSL/#log-builtin\n */\nexport const log = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.log(value) as T;\n }\n return VectorOps.log[value.kind](value) as T;\n },\n // GPU implementation\n (value) => snip(stitch`log(${value})`, value.dataType),\n 'log',\n);\n\n/**\n * @privateRemarks\n * https://www.w3.org/TR/WGSL/#log2-builtin\n */\nexport const log2 = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.log2(value) as T;\n }\n return VectorOps.log2[value.kind](value) as T;\n },\n // GPU implementation\n (value) => snip(stitch`log2(${value})`, value.dataType),\n 'log2',\n);\n\n/**\n * @privateRemarks\n * https://www.w3.org/TR/WGSL/#max-float-builtin\n */\nexport const max = createDualImpl(\n // CPU implementation\n <T extends NumVec | number>(a: T, b: T): T => {\n if (typeof a === 'number') {\n return Math.max(a, b as number) as T;\n }\n return VectorOps.max[a.kind](a, b as NumVec) as T;\n },\n // GPU implementation\n (a, b) => snip(stitch`max(${a}, ${b})`, a.dataType),\n 'max',\n 'unify',\n);\n\n/**\n * @privateRemarks\n * https://www.w3.org/TR/WGSL/#min-float-builtin\n */\nexport const min = createDualImpl(\n // CPU implementation\n <T extends NumVec | number>(a: T, b: T): T => {\n if (typeof a === 'number') {\n return Math.min(a, b as number) as T;\n }\n return VectorOps.min[a.kind](a, b as NumVec) as T;\n },\n // GPU implementation\n (a, b) => snip(stitch`min(${a}, ${b})`, a.dataType),\n 'min',\n 'unify',\n);\n\ntype MixOverload = {\n (e1: number, e2: number, e3: number): number;\n <T extends AnyFloatVecInstance>(e1: T, e2: T, e3: number): T;\n <T extends AnyFloatVecInstance>(e1: T, e2: T, e3: T): T;\n};\n\nexport const mix: MixOverload = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(e1: T, e2: T, e3: T | number): T => {\n if (typeof e1 === 'number') {\n if (typeof e3 !== 'number' || typeof e2 !== 'number') {\n throw new Error(\n 'When e1 and e2 are numbers, the blend factor must be a number.',\n );\n }\n return (e1 * (1 - e3) + e2 * e3) as T;\n }\n\n if (typeof e1 === 'number' || typeof e2 === 'number') {\n throw new Error('e1 and e2 need to both be vectors of the same kind.');\n }\n\n return VectorOps.mix[e1.kind](e1, e2, e3) as T;\n },\n // GPU implementation\n (e1, e2, e3) => snip(stitch`mix(${e1}, ${e2}, ${e3})`, e1.dataType),\n 'mix',\n);\n\nconst ModfResult = {\n f32: abstruct({ fract: f32, whole: f32 }),\n f16: abstruct({ fract: f16, whole: f16 }),\n abstractFloat: abstruct({ fract: abstractFloat, whole: abstractFloat }),\n vec2f: abstruct({ fract: vec2f, whole: vec2f }),\n vec3f: abstruct({ fract: vec3f, whole: vec3f }),\n vec4f: abstruct({ fract: vec4f, whole: vec4f }),\n vec2h: abstruct({ fract: vec2h, whole: vec2h }),\n vec3h: abstruct({ fract: vec3h, whole: vec3h }),\n vec4h: abstruct({ fract: vec4h, whole: vec4h }),\n} as const;\n\ntype ModfOverload = {\n (value: number): Infer<typeof ModfResult['f32']>;\n <T extends AnyFloatVecInstance>(\n value: T,\n ): Infer<typeof ModfResult[T['kind']]>;\n};\n\nexport const modf: ModfOverload = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T) => {\n throw new Error(\n 'CPU implementation for modf not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (value) => {\n const returnType =\n ModfResult[value.dataType.type as keyof typeof ModfResult];\n\n if (!returnType) {\n throw new Error(\n `Unsupported data type for modf: ${value.dataType.type}. Supported types are f32, f16, abstractFloat, vec2f, vec3f, vec4f, vec2h, vec3h, vec4h.`,\n );\n }\n\n return snip(stitch`modf(${value})`, returnType);\n },\n 'modf',\n);\n\nexport const normalize = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance>(v: T): T => VectorOps.normalize[v.kind](v),\n // GPU implementation\n (v) => snip(stitch`normalize(${v})`, v.dataType),\n 'normalize',\n);\n\ntype PowOverload = {\n (base: number, exponent: number): number;\n <T extends AnyFloatVecInstance>(base: T, exponent: T): T;\n};\n\nexport const pow: PowOverload = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(base: T, exponent: T): T => {\n if (typeof base === 'number' && typeof exponent === 'number') {\n return (base ** exponent) as T;\n }\n if (\n typeof base === 'object' &&\n typeof exponent === 'object' &&\n 'kind' in base &&\n 'kind' in exponent\n ) {\n return VectorOps.pow[base.kind](base, exponent) as T;\n }\n throw new Error('Invalid arguments to pow()');\n },\n // GPU implementation\n (base, exponent) => snip(stitch`pow(${base}, ${exponent})`, base.dataType),\n 'pow',\n);\n\nexport const quantizeToF16 = createDualImpl(\n // CPU implementation\n <T extends AnyFloat32VecInstance | number>(value: T): T => {\n throw new Error(\n 'CPU implementation for quantizeToF16 not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (value) => snip(stitch`quantizeToF16(${value})`, value.dataType),\n 'quantizeToF16',\n);\n\nexport const radians = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return ((value * Math.PI) / 180) as T;\n }\n throw new Error(\n 'CPU implementation for radians on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (value) => snip(stitch`radians(${value})`, value.dataType),\n 'radians',\n);\n\nexport const reflect = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance>(e1: T, e2: T): T =>\n sub(e1, mul(2 * dot(e2, e1), e2)),\n // GPU implementation\n (e1, e2) => snip(stitch`reflect(${e1}, ${e2})`, e1.dataType),\n 'reflect',\n);\n\nexport const refract = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance>(e1: T, e2: T, e3: number): T => {\n throw new Error(\n 'CPU implementation for refract not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (e1, e2, e3) => snip(stitch`refract(${e1}, ${e2}, ${e3})`, e1.dataType),\n 'refract',\n (e1, e2, e3) => [\n e1.dataType as AnyWgslData,\n e2.dataType as AnyWgslData,\n e1.dataType.type === 'f16' || e1.dataType.type.endsWith('h') ? f16 : f32,\n ],\n);\n\nexport const reverseBits = createDualImpl(\n // CPU implementation\n <T extends AnyIntegerVecInstance | number>(value: T): T => {\n throw new Error(\n 'CPU implementation for reverseBits not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (value) => snip(stitch`reverseBits(${value})`, value.dataType),\n 'reverseBits',\n);\n\nexport const round = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.round(value) as T;\n }\n throw new Error(\n 'CPU implementation for round on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (value) => snip(stitch`round(${value})`, value.dataType),\n 'round',\n);\n\nexport const saturate = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.max(0, Math.min(1, value)) as T;\n }\n throw new Error(\n 'CPU implementation for saturate on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (value) => snip(stitch`saturate(${value})`, value.dataType),\n 'saturate',\n);\n\nexport const sign = createDualImpl(\n // CPU implementation\n <T extends AnySignedVecInstance | number>(\n e: T,\n ): T => {\n if (typeof e === 'number') {\n return Math.sign(e) as T;\n }\n return VectorOps.sign[e.kind](e) as T;\n },\n // GPU implementation\n (e) => snip(stitch`sign(${e})`, e.dataType),\n 'sign',\n);\n\n/**\n * @privateRemarks\n * https://www.w3.org/TR/WGSL/#sin-builtin\n */\nexport const sin = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.sin(value) as T;\n }\n return VectorOps.sin[value.kind](value) as T;\n },\n // GPU implementation\n (value) => snip(stitch`sin(${value})`, value.dataType),\n 'sin',\n);\n\nexport const sinh = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.sinh(value) as T;\n }\n throw new Error(\n 'CPU implementation for sinh on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (value) => snip(stitch`sinh(${value})`, value.dataType),\n 'sinh',\n);\n\nexport const smoothstep = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(edge0: T, edge1: T, x: T): T => {\n if (typeof x === 'number') {\n return smoothstepScalar(\n edge0 as number,\n edge1 as number,\n x as number,\n ) as T;\n }\n return VectorOps.smoothstep[x.kind](\n edge0 as AnyFloatVecInstance,\n edge1 as AnyFloatVecInstance,\n x as AnyFloatVecInstance,\n ) as T;\n },\n // GPU implementation\n (edge0, edge1, x) =>\n snip(stitch`smoothstep(${edge0}, ${edge1}, ${x})`, x.dataType),\n 'smoothstep',\n);\n\nexport const sqrt = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.sqrt(value) as T;\n }\n return VectorOps.sqrt[value.kind](value) as T;\n },\n // GPU implementation\n (value) => snip(stitch`sqrt(${value})`, value.dataType),\n 'sqrt',\n);\n\nexport const step = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(edge: T, x: T): T => {\n if (typeof edge === 'number') {\n return (edge <= (x as number) ? 1.0 : 0.0) as T;\n }\n throw new Error(\n 'CPU implementation for step on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (edge, x) => snip(stitch`step(${edge}, ${x})`, edge.dataType),\n 'step',\n);\n\nexport const tan = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.tan(value) as T;\n }\n throw new Error(\n 'CPU implementation for tan on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (value) => snip(stitch`tan(${value})`, value.dataType),\n 'tan',\n);\n\nexport const tanh = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n if (typeof value === 'number') {\n return Math.tanh(value) as T;\n }\n return VectorOps.tanh[value.kind](value) as T;\n },\n // GPU implementation\n (value) => snip(stitch`tanh(${value})`, value.dataType),\n 'tanh',\n);\n\nexport const transpose = createDualImpl(\n // CPU implementation\n (e: AnyMatInstance) => {\n throw new Error(\n 'CPU implementation for transpose not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (e) => snip(stitch`transpose(${e})`, e.dataType),\n 'transpose',\n);\n\nexport const trunc = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(value: T): T => {\n throw new Error(\n 'CPU implementation for trunc not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues',\n );\n },\n // GPU implementation\n (value) => snip(stitch`trunc(${value})`, value.dataType),\n 'trunc',\n);\n","import { stitch } from '../core/resolve/stitch.ts';\nimport { snip } from '../data/snippet.ts';\nimport {\n rotationX4,\n rotationY4,\n rotationZ4,\n scaling4,\n translation4,\n} from '../data/matrix.ts';\nimport type { m4x4f, v3f } from '../data/wgslTypes.ts';\nimport { createDualImpl } from '../core/function/dualImpl.ts';\nimport { mul } from './operators.ts';\nimport { $internal } from '../shared/symbols.ts';\n\nconst cpuMul = mul[$internal].jsImpl;\n\nconst cpuTranslation4 = translation4[$internal].jsImpl;\nconst gpuTranslation4 = translation4[$internal].gpuImpl;\n\nconst cpuScaling4 = scaling4[$internal].jsImpl;\nconst gpuScaling4 = scaling4[$internal].gpuImpl;\n\nconst cpuRotationX4 = rotationX4[$internal].jsImpl;\nconst gpuRotationX4 = rotationX4[$internal].gpuImpl;\n\nconst cpuRotationY4 = rotationY4[$internal].jsImpl;\nconst gpuRotationY4 = rotationY4[$internal].gpuImpl;\n\nconst cpuRotationZ4 = rotationZ4[$internal].jsImpl;\nconst gpuRotationZ4 = rotationZ4[$internal].gpuImpl;\n\n/**\n * Translates the given 4-by-4 matrix by the given vector.\n * @param {m4x4f} matrix - The matrix to be modified.\n * @param {v3f} vector - The vector by which to translate the matrix.\n * @returns {m4x4f} The translated matrix.\n */\nexport const translate4 = createDualImpl(\n // CPU implementation\n (matrix: m4x4f, vector: v3f) => cpuMul(cpuTranslation4(vector), matrix),\n // GPU implementation\n (matrix, vector) =>\n snip(stitch`(${gpuTranslation4(vector)} * ${matrix})`, matrix.dataType),\n 'translate4',\n);\n\n/**\n * Scales the given 4-by-4 matrix in each dimension by an amount given by the corresponding entry in the given vector.\n * @param {m4x4f} matrix - The matrix to be modified.\n * @param {v3f} vector - A vector of three entries specifying the factor by which to scale in each dimension.\n * @returns {m4x4f} The scaled matrix.\n */\nexport const scale4 = createDualImpl(\n // CPU implementation\n (matrix: m4x4f, vector: v3f) => cpuMul(cpuScaling4(vector), matrix),\n // GPU implementation\n (matrix, vector) =>\n snip(stitch`(${(gpuScaling4(vector))} * ${matrix})`, matrix.dataType),\n 'scale4',\n);\n\n/**\n * Rotates the given 4-by-4 matrix around the x-axis by the given angle.\n * @param {m4x4f} matrix - The matrix to be modified.\n * @param {number} angle - The angle by which to rotate (in radians).\n * @returns {m4x4f} The rotated matrix.\n */\nexport const rotateX4 = createDualImpl(\n // CPU implementation\n (matrix: m4x4f, angle: number) => cpuMul(cpuRotationX4(angle), matrix),\n // GPU implementation\n (matrix, angle) =>\n snip(stitch`(${(gpuRotationX4(angle))} * ${matrix})`, matrix.dataType),\n 'rotateX4',\n);\n\n/**\n * Rotates the given 4-by-4 matrix around the y-axis by the given angle.\n * @param {m4x4f} matrix - The matrix to be modified.\n * @param {number} angle - The angle by which to rotate (in radians).\n * @returns {m4x4f} The rotated matrix.\n */\nexport const rotateY4 = createDualImpl(\n // CPU implementation\n (matrix: m4x4f, angle: number) => cpuMul(cpuRotationY4(angle), matrix),\n // GPU implementation\n (matrix, angle) =>\n snip(stitch`(${(gpuRotationY4(angle))} * ${matrix})`, matrix.dataType),\n 'rotateY4',\n);\n\n/**\n * Rotates the given 4-by-4 matrix around the z-axis by the given angle.\n * @param {m4x4f} matrix - The matrix to be modified.\n * @param {number} angle - The angle by which to rotate (in radians).\n * @returns {m4x4f} The rotated matrix.\n */\nexport const rotateZ4 = createDualImpl(\n // CPU implementation\n (matrix: m4x4f, angle: number) => cpuMul(cpuRotationZ4(angle), matrix),\n // GPU implementation\n (matrix, angle) =>\n snip(stitch`(${(gpuRotationZ4(angle))} * ${matrix})`, matrix.dataType),\n 'rotateZ4',\n);\n","import { stitch } from '../core/resolve/stitch.ts';\nimport { isSnippetNumeric, snip, type Snippet } from '../data/snippet.ts';\nimport { bool, f32 } from '../data/numeric.ts';\nimport { vec2b, vec3b, vec4b } from '../data/vector.ts';\nimport { VectorOps } from '../data/vectorOps.ts';\nimport {\n type AnyBooleanVecInstance,\n type AnyFloatVecInstance,\n type AnyNumericVecInstance,\n type AnyVec2Instance,\n type AnyVec3Instance,\n type AnyVecInstance,\n isVecInstance,\n type v2b,\n type v3b,\n type v4b,\n} from '../data/wgslTypes.ts';\nimport { createDualImpl } from '../core/function/dualImpl.ts';\nimport { sub } from './operators.ts';\n\nfunction correspondingBooleanVectorSchema(value: Snippet) {\n if (value.dataType.type.includes('2')) {\n return vec2b;\n }\n if (value.dataType.type.includes('3')) {\n return vec3b;\n }\n return vec4b;\n}\n\n// comparison\n\n/**\n * Checks whether `lhs == rhs` on all components.\n * Equivalent to `all(eq(lhs, rhs))`.\n * @example\n * allEq(vec2f(0.0, 1.0), vec2f(0.0, 2.0)) // returns false\n * allEq(vec3u(0, 1, 2), vec3u(0, 1, 2)) // returns true\n */\nexport const allEq = createDualImpl(\n // CPU implementation\n <T extends AnyVecInstance>(lhs: T, rhs: T) => all(eq(lhs, rhs)),\n // GPU implementation\n (lhs, rhs) => snip(stitch`all(${lhs} == ${rhs})`, bool),\n 'allEq',\n);\n\n/**\n * Checks **component-wise** whether `lhs == rhs`.\n * This function does **not** return `bool`, for that use-case, wrap the result in `all`, or use `allEq`.\n * @example\n * eq(vec2f(0.0, 1.0), vec2f(0.0, 2.0)) // returns vec2b(true, false)\n * eq(vec3u(0, 1, 2), vec3u(2, 1, 0)) // returns vec3b(false, true, false)\n * all(eq(vec4i(4, 3, 2, 1), vec4i(4, 3, 2, 1))) // returns true\n * allEq(vec4i(4, 3, 2, 1), vec4i(4, 3, 2, 1)) // returns true\n */\nexport const eq = createDualImpl(\n // CPU implementation\n <T extends AnyVecInstance>(lhs: T, rhs: T) =>\n VectorOps.eq[lhs.kind](lhs, rhs),\n // GPU implementation\n (lhs, rhs) =>\n snip(stitch`(${lhs} == ${rhs})`, correspondingBooleanVectorSchema(lhs)),\n 'eq',\n);\n\n/**\n * Checks **component-wise** whether `lhs != rhs`.\n * This function does **not** return `bool`, for that use-case, wrap the result in `any`.\n * @example\n * ne(vec2f(0.0, 1.0), vec2f(0.0, 2.0)) // returns vec2b(false, true)\n * ne(vec3u(0, 1, 2), vec3u(2, 1, 0)) // returns vec3b(true, false, true)\n * any(ne(vec4i(4, 3, 2, 1), vec4i(4, 2, 2, 1))) // returns true\n */\nexport const ne = createDualImpl(\n // CPU implementation\n <T extends AnyVecInstance>(lhs: T, rhs: T) => not(eq(lhs, rhs)),\n // GPU implementation\n (lhs, rhs) =>\n snip(stitch`(${lhs} != ${rhs})`, correspondingBooleanVectorSchema(lhs)),\n 'ne',\n);\n\n/**\n * Checks **component-wise** whether `lhs < rhs`.\n * This function does **not** return `bool`, for that use-case, wrap the result in `all`.\n * @example\n * lt(vec2f(0.0, 0.0), vec2f(0.0, 1.0)) // returns vec2b(false, true)\n * lt(vec3u(0, 1, 2), vec3u(2, 1, 0)) // returns vec3b(true, false, false)\n * all(lt(vec4i(1, 2, 3, 4), vec4i(2, 3, 4, 5))) // returns true\n */\nexport const lt = createDualImpl(\n // CPU implementation\n <T extends AnyNumericVecInstance>(lhs: T, rhs: T) =>\n VectorOps.lt[lhs.kind](lhs, rhs),\n // GPU implementation\n (lhs, rhs) =>\n snip(stitch`(${lhs} < ${rhs})`, correspondingBooleanVectorSchema(lhs)),\n 'lt',\n);\n\n/**\n * Checks **component-wise** whether `lhs <= rhs`.\n * This function does **not** return `bool`, for that use-case, wrap the result in `all`.\n * @example\n * le(vec2f(0.0, 0.0), vec2f(0.0, 1.0)) // returns vec2b(true, true)\n * le(vec3u(0, 1, 2), vec3u(2, 1, 0)) // returns vec3b(true, true, false)\n * all(le(vec4i(1, 2, 3, 4), vec4i(2, 3, 3, 5))) // returns true\n */\nexport const le = createDualImpl(\n // CPU implementation\n <T extends AnyNumericVecInstance>(lhs: T, rhs: T) =>\n or(lt(lhs, rhs), eq(lhs, rhs)),\n // GPU implementation\n (lhs, rhs) =>\n snip(stitch`(${lhs} <= ${rhs})`, correspondingBooleanVectorSchema(lhs)),\n 'le',\n);\n\n/**\n * Checks **component-wise** whether `lhs > rhs`.\n * This function does **not** return `bool`, for that use-case, wrap the result in `all`.\n * @example\n * gt(vec2f(0.0, 0.0), vec2f(0.0, 1.0)) // returns vec2b(false, false)\n * gt(vec3u(0, 1, 2), vec3u(2, 1, 0)) // returns vec3b(false, false, true)\n * all(gt(vec4i(2, 3, 4, 5), vec4i(1, 2, 3, 4))) // returns true\n */\nexport const gt = createDualImpl(\n // CPU implementation\n <T extends AnyNumericVecInstance>(lhs: T, rhs: T) =>\n and(not(lt(lhs, rhs)), not(eq(lhs, rhs))),\n // GPU implementation\n (lhs, rhs) =>\n snip(stitch`(${lhs} > ${rhs})`, correspondingBooleanVectorSchema(lhs)),\n 'gt',\n);\n\n/**\n * Checks **component-wise** whether `lhs >= rhs`.\n * This function does **not** return `bool`, for that use-case, wrap the result in `all`.\n * @example\n * ge(vec2f(0.0, 0.0), vec2f(0.0, 1.0)) // returns vec2b(true, false)\n * ge(vec3u(0, 1, 2), vec3u(2, 1, 0)) // returns vec3b(false, true, true)\n * all(ge(vec4i(2, 2, 4, 5), vec4i(1, 2, 3, 4))) // returns true\n */\nexport const ge = createDualImpl(\n // CPU implementation\n <T extends AnyNumericVecInstance>(lhs: T, rhs: T) => not(lt(lhs, rhs)),\n // GPU implementation\n (lhs, rhs) =>\n snip(stitch`(${lhs} >= ${rhs})`, correspondingBooleanVectorSchema(lhs)),\n 'ge',\n);\n\n// logical ops\n\n/**\n * Returns **component-wise** `!value`.\n * @example\n * not(vec2b(false, true)) // returns vec2b(true, false)\n * not(vec3b(true, true, false)) // returns vec3b(false, false, true)\n */\nexport const not = createDualImpl(\n // CPU implementation\n <T extends AnyBooleanVecInstance>(value: T): T =>\n VectorOps.neg[value.kind](value),\n // GPU implementation\n (value) => snip(stitch`!(${value})`, value.dataType),\n 'not',\n);\n\n/**\n * Returns **component-wise** logical `or` result.\n * @example\n * or(vec2b(false, true), vec2b(false, false)) // returns vec2b(false, true)\n * or(vec3b(true, true, false), vec3b(false, true, false)) // returns vec3b(true, true, false)\n */\nexport const or = createDualImpl(\n // CPU implementation\n <T extends AnyBooleanVecInstance>(lhs: T, rhs: T) =>\n VectorOps.or[lhs.kind](lhs, rhs),\n // GPU implementation\n (lhs, rhs) => snip(stitch`(${lhs} | ${rhs})`, lhs.dataType),\n 'or',\n);\n\n/**\n * Returns **component-wise** logical `and` result.\n * @example\n * and(vec2b(false, true), vec2b(true, true)) // returns vec2b(false, true)\n * and(vec3b(true, true, false), vec3b(false, true, false)) // returns vec3b(false, true, false)\n */\nexport const and = createDualImpl(\n // CPU implementation\n <T extends AnyBooleanVecInstance>(lhs: T, rhs: T) =>\n not(or(not(lhs), not(rhs))),\n // GPU implementation\n (lhs, rhs) => snip(stitch`(${lhs} & ${rhs})`, lhs.dataType),\n 'and',\n);\n\n// logical aggregation\n\n/**\n * Returns `true` if each component of `value` is true.\n * @example\n * all(vec2b(false, true)) // returns false\n * all(vec3b(true, true, true)) // returns true\n */\nexport const all = createDualImpl(\n // CPU implementation\n (value: AnyBooleanVecInstance) => VectorOps.all[value.kind](value),\n // GPU implementation\n (value) => snip(stitch`all(${value})`, bool),\n 'all',\n);\n\n/**\n * Returns `true` if any component of `value` is true.\n * @example\n * any(vec2b(false, true)) // returns true\n * any(vec3b(false, false, false)) // returns false\n */\nexport const any = createDualImpl(\n // CPU implementation\n (value: AnyBooleanVecInstance) => !all(not(value)),\n // GPU implementation\n (value) => snip(stitch`any(${value})`, bool),\n 'any',\n);\n\n// other\n\n/**\n * Checks whether the given elements differ by at most the `precision` value.\n * Checks all elements of `lhs` and `rhs` if arguments are vectors.\n * @example\n * isCloseTo(0, 0.1) // returns false\n * isCloseTo(vec3f(0, 0, 0), vec3f(0.002, -0.009, 0)) // returns true\n *\n * @param {number} precision argument that specifies the maximum allowed difference, 0.01 by default.\n */\nexport const isCloseTo = createDualImpl(\n // CPU implementation\n <T extends AnyFloatVecInstance | number>(\n lhs: T,\n rhs: T,\n precision = 0.01,\n ) => {\n if (typeof lhs === 'number' && typeof rhs === 'number') {\n return Math.abs(lhs - rhs) < precision;\n }\n if (isVecInstance(lhs) && isVecInstance(rhs)) {\n return VectorOps.isCloseToZero[lhs.kind](\n sub(lhs, rhs),\n precision,\n );\n }\n return false;\n },\n // GPU implementation\n (lhs, rhs, precision = snip(0.01, f32)) => {\n if (isSnippetNumeric(lhs) && isSnippetNumeric(rhs)) {\n return snip(\n stitch`(abs(f32(${lhs}) - f32(${rhs})) <= ${precision})`,\n bool,\n );\n }\n if (!isSnippetNumeric(lhs) && !isSnippetNumeric(rhs)) {\n return snip(\n // https://www.w3.org/TR/WGSL/#vector-multi-component:~:text=Binary%20arithmetic%20expressions%20with%20mixed%20scalar%20and%20vector%20operands\n // (a-a)+prec creates a vector of a.length elements, all equal to prec\n stitch`all(abs(${lhs} - ${rhs}) <= (${lhs} - ${lhs}) + ${precision})`,\n bool,\n );\n }\n return snip('false', bool);\n },\n 'isCloseTo',\n);\n\nexport type SelectOverload = {\n <T extends number | boolean | AnyVecInstance>(f: T, t: T, cond: boolean): T;\n <T extends AnyVecInstance>(\n f: T,\n t: T,\n cond: T extends AnyVec2Instance ? v2b\n : T extends AnyVec3Instance ? v3b\n : v4b,\n ): T;\n};\n\n/**\n * Returns `t` if `cond` is `true`, and `f` otherwise.\n * Component-wise if `cond` is a vector.\n * @example\n * select(1, 2, false) // returns 1\n * select(1, 2, true) // returns 2\n * select(vec2i(1, 2), vec2i(3, 4), true) // returns vec2i(3, 4)\n * select(vec2i(1, 2), vec2i(3, 4), vec2b(false, true)) // returns vec2i(1, 4)\n */\nexport const select: SelectOverload = createDualImpl(\n // CPU implementation\n <T extends number | boolean | AnyVecInstance>(\n f: T,\n t: T,\n cond: AnyBooleanVecInstance | boolean,\n ) => {\n if (typeof cond === 'boolean') {\n return cond ? t : f;\n }\n return VectorOps.select[(f as AnyVecInstance).kind](\n f as AnyVecInstance,\n t as AnyVecInstance,\n cond,\n );\n },\n // GPU implementation\n (f, t, cond) => snip(stitch`select(${f}, ${t}, ${cond})`, f.dataType),\n 'select',\n);\n","import { stitch } from '../core/resolve/stitch.ts';\nimport { snip, type Snippet } from '../data/snippet.ts';\nimport { i32, u32 } from '../data/numeric.ts';\nimport {\n type AnyWgslData,\n type atomicI32,\n type atomicU32,\n isWgslData,\n Void,\n} from '../data/wgslTypes.ts';\nimport { createDualImpl } from '../core/function/dualImpl.ts';\ntype AnyAtomic = atomicI32 | atomicU32;\n\nexport const workgroupBarrier = createDualImpl(\n // CPU implementation\n () => console.warn('workgroupBarrier is a no-op outside of CODEGEN mode.'),\n // CODEGEN implementation\n () => snip('workgroupBarrier()', Void),\n 'workgroupBarrier',\n);\n\nexport const storageBarrier = createDualImpl(\n // CPU implementation\n () => console.warn('storageBarrier is a no-op outside of CODEGEN mode.'),\n // CODEGEN implementation\n () => snip('storageBarrier()', Void),\n 'storageBarrier',\n);\n\nexport const textureBarrier = createDualImpl(\n // CPU implementation\n () => console.warn('textureBarrier is a no-op outside of CODEGEN mode.'),\n // CODEGEN implementation\n () => snip('textureBarrier()', Void),\n 'textureBarrier',\n);\n\nexport const atomicLoad = createDualImpl(\n // CPU implementation\n <T extends AnyAtomic>(a: T): number => {\n throw new Error(\n 'Atomic operations are not supported outside of CODEGEN mode.',\n );\n },\n // CODEGEN implementation\n (a) => {\n if (isWgslData(a.dataType) && a.dataType.type === 'atomic') {\n return snip(stitch`atomicLoad(&${a})`, a.dataType.inner);\n }\n throw new Error(\n `Invalid atomic type: ${JSON.stringify(a.dataType, null, 2)}`,\n );\n },\n 'atomicLoad',\n);\n\nexport const atomicStore = createDualImpl(\n // CPU implementation\n <T extends AnyAtomic>(a: T, value: number): void => {\n throw new Error(\n 'Atomic operations are not supported outside of CODEGEN mode.',\n );\n },\n // CODEGEN implementation\n (a, value) => {\n if (!isWgslData(a.dataType) || a.dataType.type !== 'atomic') {\n throw new Error(\n `Invalid atomic type: ${JSON.stringify(a.dataType, null, 2)}`,\n );\n }\n return snip(stitch`atomicStore(&${a}, ${value})`, Void);\n },\n 'atomicStore',\n);\n\nconst atomicTypeFn = (a: Snippet, _value: Snippet): AnyWgslData[] => {\n if (a.dataType.type === 'atomic' && a.dataType.inner.type === 'i32') {\n return [a.dataType, i32];\n }\n return [a.dataType as AnyWgslData, u32];\n};\n\nexport const atomicAdd = createDualImpl(\n // CPU implementation\n <T extends AnyAtomic>(a: T, value: number): number => {\n throw new Error(\n 'Atomic operations are not supported outside of CODEGEN mode.',\n );\n },\n // CODEGEN implementation\n (a, value) => {\n if (isWgslData(a.dataType) && a.dataType.type === 'atomic') {\n return snip(stitch`atomicAdd(&${a}, ${value})`, a.dataType.inner);\n }\n throw new Error(\n `Invalid atomic type: ${JSON.stringify(a.dataType, null, 2)}`,\n );\n },\n 'atomicAdd',\n atomicTypeFn,\n);\n\nexport const atomicSub = createDualImpl(\n // CPU implementation\n <T extends AnyAtomic>(a: T, value: number): number => {\n throw new Error(\n 'Atomic operations are not supported outside of CODEGEN mode.',\n );\n },\n // CODEGEN implementation\n (a, value) => {\n if (isWgslData(a.dataType) && a.dataType.type === 'atomic') {\n return snip(stitch`atomicSub(&${a}, ${value})`, a.dataType.inner);\n }\n throw new Error(\n `Invalid atomic type: ${JSON.stringify(a.dataType, null, 2)}`,\n );\n },\n 'atomicSub',\n atomicTypeFn,\n);\n\nexport const atomicMax = createDualImpl(\n // CPU implementation\n <T extends AnyAtomic>(a: T, value: number): number => {\n throw new Error(\n 'Atomic operations are not supported outside of CODEGEN mode.',\n );\n },\n // CODEGEN implementation\n (a, value) => {\n if (isWgslData(a.dataType) && a.dataType.type === 'atomic') {\n return snip(stitch`atomicMax(&${a}, ${value})`, a.dataType.inner);\n }\n throw new Error(\n `Invalid atomic type: ${JSON.stringify(a.dataType, null, 2)}`,\n );\n },\n 'atomicMax',\n atomicTypeFn,\n);\n\nexport const atomicMin = createDualImpl(\n // CPU implementation\n <T extends AnyAtomic>(a: T, value: number): number => {\n throw new Error(\n 'Atomic operations are not supported outside of CODEGEN mode.',\n );\n },\n // CODEGEN implementation\n (a, value) => {\n if (isWgslData(a.dataType) && a.dataType.type === 'atomic') {\n return snip(stitch`atomicMin(&${a}, ${value})`, a.dataType.inner);\n }\n throw new Error(\n `Invalid atomic type: ${JSON.stringify(a.dataType, null, 2)}`,\n );\n },\n 'atomicMin',\n atomicTypeFn,\n);\n\nexport const atomicAnd = createDualImpl(\n // CPU implementation\n <T extends AnyAtomic>(a: T, value: number): number => {\n throw new Error(\n 'Atomic operations are not supported outside of CODEGEN mode.',\n );\n },\n // CODEGEN implementation\n (a, value) => {\n if (isWgslData(a.dataType) && a.dataType.type === 'atomic') {\n return snip(stitch`atomicAnd(&${a}, ${value})`, a.dataType.inner);\n }\n throw new Error(\n `Invalid atomic type: ${JSON.stringify(a.dataType, null, 2)}`,\n );\n },\n 'atomicAnd',\n atomicTypeFn,\n);\n\nexport const atomicOr = createDualImpl(\n // CPU implementation\n <T extends AnyAtomic>(a: T, value: number): number => {\n throw new Error(\n 'Atomic operations are not supported outside of CODEGEN mode.',\n );\n },\n // CODEGEN implementation\n (a, value) => {\n if (isWgslData(a.dataType) && a.dataType.type === 'atomic') {\n return snip(stitch`atomicOr(&${a}, ${value})`, a.dataType.inner);\n }\n throw new Error(\n `Invalid atomic type: ${JSON.stringify(a.dataType, null, 2)}`,\n );\n },\n 'atomicOr',\n atomicTypeFn,\n);\n\nexport const atomicXor = createDualImpl(\n // CPU implementation\n <T extends AnyAtomic>(a: T, value: number): number => {\n throw new Error(\n 'Atomic operations are not supported outside of CODEGEN mode.',\n );\n },\n // CODEGEN implementation\n (a, value) => {\n if (isWgslData(a.dataType) && a.dataType.type === 'atomic') {\n return snip(stitch`atomicXor(&${a}, ${value})`, a.dataType.inner);\n }\n throw new Error(\n `Invalid atomic type: ${JSON.stringify(a.dataType, null, 2)}`,\n );\n },\n 'atomicXor',\n atomicTypeFn,\n);\n"]}
|