typegpu 0.9.0 → 0.10.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/README.md +1 -1
  2. package/builtin-DdtWpk2t.js +818 -0
  3. package/builtin-DdtWpk2t.js.map +1 -0
  4. package/chunk-BYypO7fO.js +18 -0
  5. package/common/index.d.ts +8 -23
  6. package/common/index.d.ts.map +1 -0
  7. package/common/index.js +7 -5
  8. package/common/index.js.map +1 -1
  9. package/data/index.d.ts +7 -401
  10. package/data/index.d.ts.map +1 -0
  11. package/data/index.js +164 -1
  12. package/data/index.js.map +1 -1
  13. package/deepEqual-DQxK4vdp.js +413 -0
  14. package/deepEqual-DQxK4vdp.js.map +1 -0
  15. package/extensions-DIVuAfBM.js +2032 -0
  16. package/extensions-DIVuAfBM.js.map +1 -0
  17. package/fullScreenTriangle-CfFyQd_0.js +543 -0
  18. package/fullScreenTriangle-CfFyQd_0.js.map +1 -0
  19. package/index.d.ts +124 -310
  20. package/index.d.ts.map +1 -0
  21. package/index.js +6282 -153
  22. package/index.js.map +1 -1
  23. package/indexNamedExports-oL6tyaJ9.d.ts +5697 -0
  24. package/indexNamedExports-oL6tyaJ9.d.ts.map +1 -0
  25. package/operators-d-PMVTo7.js +4158 -0
  26. package/operators-d-PMVTo7.js.map +1 -0
  27. package/package.json +3 -2
  28. package/std/index.d.ts +7 -621
  29. package/std/index.d.ts.map +1 -0
  30. package/std/index.js +165 -1
  31. package/std/index.js.map +1 -1
  32. package/texture-BagDrrks.js +205 -0
  33. package/texture-BagDrrks.js.map +1 -0
  34. package/chunk-5ABKYSJD.js +0 -2
  35. package/chunk-5ABKYSJD.js.map +0 -1
  36. package/chunk-D5UYO3OX.js +0 -3
  37. package/chunk-D5UYO3OX.js.map +0 -1
  38. package/chunk-EHLRP4V2.js +0 -2
  39. package/chunk-EHLRP4V2.js.map +0 -1
  40. package/chunk-LMPPDGRD.js +0 -2
  41. package/chunk-LMPPDGRD.js.map +0 -1
  42. package/chunk-MBB2XFH6.js +0 -2
  43. package/chunk-MBB2XFH6.js.map +0 -1
  44. package/chunk-SHSILTWI.js +0 -10
  45. package/chunk-SHSILTWI.js.map +0 -1
  46. package/comptime-DKpw1IVu.d.ts +0 -28
  47. package/matrix-C4IFKU1R.d.ts +0 -123
  48. package/tgpuConstant-BOn7U_lv.d.ts +0 -4031
@@ -0,0 +1,2032 @@
1
+ import { At as stitch, B as vec2h, C as translation4, E as dualImpl, G as vec3h, Gn as isVecInstance, H as vec2u, J as vec4b, K as vec3i, Mt as getResolutionCtx, N as abstruct, Pn as isHalfPrecisionSchema, Q as vec4u, R as vec2b, S as scaling4, T as MissingCpuImplError, U as vec3b, Un as isVec, V as vec2i, Vn as isPtr, W as vec3f, X as vec4h, Y as vec4f, Z as vec4i, _ as mat4x4f, at as i32, b as rotationY4, c as bitcastU32toF32Impl, et as abstractFloat, ft as unify, g as mat3x3f, h as mat2x2f, ht as safeStringify, i as mul, it as f32, jn as isAtomic, kn as Void, l as bitcastU32toI32Impl, mn as snip, nt as bool, o as sub, or as $gpuCallable, pn as isSnippetNumeric, q as vec3u, qn as isWgslArray, rt as f16, s as VectorOps, sn as SignatureNotSupportedError, st as u32, tt as abstractInt, u as smoothstepScalar, w as comptime, wt as ptrFn, x as rotationZ4, xt as isRef, y as rotationX4, z as vec2f } from "./operators-d-PMVTo7.js";
2
+ import { r as isWgslTexture } from "./texture-BagDrrks.js";
3
+ import * as TB from "typed-binary";
4
+
5
+ //#region src/core/texture/textureFormats.ts
6
+ const DEPTH_ASPECT_NON_COPYABLE = {
7
+ channelType: f32,
8
+ vectorType: vec4f,
9
+ sampleTypes: ["depth", "unfilterable-float"],
10
+ texelSize: "non-copyable"
11
+ };
12
+ const DEPTH_ASPECT_16 = {
13
+ channelType: f32,
14
+ vectorType: vec4f,
15
+ sampleTypes: ["depth", "unfilterable-float"],
16
+ texelSize: 2
17
+ };
18
+ const DEPTH_ASPECT_32 = {
19
+ channelType: f32,
20
+ vectorType: vec4f,
21
+ sampleTypes: ["depth", "unfilterable-float"],
22
+ texelSize: 4
23
+ };
24
+ const STENCIL_ASPECT = {
25
+ channelType: u32,
26
+ vectorType: vec4u,
27
+ sampleTypes: ["uint"],
28
+ texelSize: 1
29
+ };
30
+ const formatInfoCache = /* @__PURE__ */ new Map();
31
+ function getTextureFormatInfo(format) {
32
+ let info = formatInfoCache.get(format);
33
+ if (info === void 0) {
34
+ info = createFormatInfo(format);
35
+ formatInfoCache.set(format, info);
36
+ }
37
+ return info;
38
+ }
39
+ function createFormatInfo(format) {
40
+ const channelType = parseChannelType(format);
41
+ const depthAspect = getDepthAspect(format);
42
+ const hasStencil = format.includes("stencil");
43
+ return {
44
+ channelType,
45
+ vectorType: channelType === u32 ? vec4u : channelType === i32 ? vec4i : vec4f,
46
+ texelSize: parseTexelSize(format),
47
+ sampleTypes: parseSampleTypes(format),
48
+ canRenderAttachment: canRenderAttachment(format),
49
+ ...depthAspect && { depthAspect },
50
+ ...hasStencil && { stencilAspect: STENCIL_ASPECT }
51
+ };
52
+ }
53
+ function getDepthAspect(format) {
54
+ if (format === "depth16unorm") return DEPTH_ASPECT_16;
55
+ if (format === "depth32float" || format === "depth32float-stencil8") return DEPTH_ASPECT_32;
56
+ if (format === "depth24plus" || format === "depth24plus-stencil8") return DEPTH_ASPECT_NON_COPYABLE;
57
+ }
58
+ function canRenderAttachment(format) {
59
+ if (format.startsWith("bc") || format.startsWith("etc2") || format.startsWith("eac") || format.startsWith("astc")) return false;
60
+ if (format === "rgb9e5ufloat") return false;
61
+ return true;
62
+ }
63
+ function parseChannelType(format) {
64
+ if (format === "stencil8") return u32;
65
+ if (format.includes("uint")) return u32;
66
+ if (format.includes("sint")) return i32;
67
+ return f32;
68
+ }
69
+ function parseTexelSize(format) {
70
+ const [, channels, bits] = format.match(/^(rgba|bgra|rg|r)(8|16|32)/) ?? [];
71
+ if (channels && bits) return channels.length * Number(bits) / 8;
72
+ if (format === "stencil8") return 1;
73
+ if (format === "depth16unorm") return 2;
74
+ if (format === "depth32float") return 4;
75
+ if (format === "depth32float-stencil8") return 5;
76
+ if (format === "depth24plus" || format === "depth24plus-stencil8") return "non-copyable";
77
+ if (/^(bc[14]-|etc2-rgb8|eac-r11)/.test(format)) return 8;
78
+ if (/^(bc|astc-|etc2-rgba|eac-rg)/.test(format)) return 16;
79
+ return 4;
80
+ }
81
+ function parseSampleTypes(format) {
82
+ if (format === "stencil8") return ["uint"];
83
+ if (format.includes("uint")) return ["uint"];
84
+ if (format.includes("sint")) return ["sint"];
85
+ if (format.includes("depth")) return ["depth", "unfilterable-float"];
86
+ if (/^(r|rg|rgba)16(u|s)norm$/.test(format)) return ["unfilterable-float"];
87
+ return ["float", "unfilterable-float"];
88
+ }
89
+ const FLOAT32_FORMATS = new Set([
90
+ "r32float",
91
+ "rg32float",
92
+ "rgba32float"
93
+ ]);
94
+ function getEffectiveSampleTypes(device, format) {
95
+ if (FLOAT32_FORMATS.has(format) && !device.features.has("float32-filterable")) return ["unfilterable-float"];
96
+ return getTextureFormatInfo(format).sampleTypes;
97
+ }
98
+
99
+ //#endregion
100
+ //#region src/std/numeric.ts
101
+ const unaryIdentitySignature = (arg) => {
102
+ return {
103
+ argTypes: [arg],
104
+ returnType: arg
105
+ };
106
+ };
107
+ const variadicUnifySignature = (...args) => {
108
+ const uargs = unify(args) ?? args;
109
+ return {
110
+ argTypes: uargs,
111
+ returnType: uargs[0]
112
+ };
113
+ };
114
+ const unifyRestrictedSignature = (restrict) => (...args) => {
115
+ const uargs = unify(args, restrict);
116
+ if (!uargs) throw new SignatureNotSupportedError(args, restrict);
117
+ return {
118
+ argTypes: uargs,
119
+ returnType: uargs[0]
120
+ };
121
+ };
122
+ function variadicReduce(fn) {
123
+ return (fst, ...rest) => {
124
+ let acc = fst;
125
+ for (const r of rest) acc = fn(acc, r);
126
+ return acc;
127
+ };
128
+ }
129
+ function variadicStitch(wrapper) {
130
+ return (_ctx, [fst, ...rest]) => {
131
+ let acc = stitch`${fst}`;
132
+ for (const r of rest) acc = stitch`${wrapper}(${acc}, ${r})`;
133
+ return acc;
134
+ };
135
+ }
136
+ const anyFloatPrimitive = [
137
+ f32,
138
+ f16,
139
+ abstractFloat
140
+ ];
141
+ const anyFloatVec = [
142
+ vec2f,
143
+ vec3f,
144
+ vec4f,
145
+ vec2h,
146
+ vec3h,
147
+ vec4h
148
+ ];
149
+ const anyFloat = [...anyFloatPrimitive, ...anyFloatVec];
150
+ const anyConcreteIntegerPrimitive = [i32, u32];
151
+ const anyConcreteIntegerVec = [
152
+ vec2i,
153
+ vec3i,
154
+ vec4i,
155
+ vec2u,
156
+ vec3u,
157
+ vec4u
158
+ ];
159
+ const anyConcreteInteger = [...anyConcreteIntegerPrimitive, ...anyConcreteIntegerVec];
160
+ function cpuAbs(value) {
161
+ if (typeof value === "number") return Math.abs(value);
162
+ return VectorOps.abs[value.kind](value);
163
+ }
164
+ const abs = dualImpl({
165
+ name: "abs",
166
+ signature: unaryIdentitySignature,
167
+ normalImpl: cpuAbs,
168
+ codegenImpl: (_ctx, [value]) => stitch`abs(${value})`
169
+ });
170
+ function cpuAcos(value) {
171
+ if (typeof value === "number") return Math.acos(value);
172
+ return VectorOps.acos[value.kind](value);
173
+ }
174
+ const acos = dualImpl({
175
+ name: "acos",
176
+ signature: unifyRestrictedSignature(anyFloat),
177
+ normalImpl: cpuAcos,
178
+ codegenImpl: (_ctx, [value]) => stitch`acos(${value})`
179
+ });
180
+ function cpuAcosh(value) {
181
+ if (typeof value === "number") return Math.acosh(value);
182
+ return VectorOps.acosh[value.kind](value);
183
+ }
184
+ const acosh = dualImpl({
185
+ name: "acosh",
186
+ signature: unifyRestrictedSignature(anyFloat),
187
+ normalImpl: cpuAcosh,
188
+ codegenImpl: (_ctx, [value]) => stitch`acosh(${value})`
189
+ });
190
+ function cpuAsin(value) {
191
+ if (typeof value === "number") return Math.asin(value);
192
+ return VectorOps.asin[value.kind](value);
193
+ }
194
+ const asin = dualImpl({
195
+ name: "asin",
196
+ signature: unifyRestrictedSignature(anyFloat),
197
+ normalImpl: cpuAsin,
198
+ codegenImpl: (_ctx, [value]) => stitch`asin(${value})`
199
+ });
200
+ function cpuAsinh(value) {
201
+ if (typeof value === "number") return Math.asinh(value);
202
+ return VectorOps.asinh[value.kind](value);
203
+ }
204
+ const asinh = dualImpl({
205
+ name: "asinh",
206
+ signature: unifyRestrictedSignature(anyFloat),
207
+ normalImpl: cpuAsinh,
208
+ codegenImpl: (_ctx, [value]) => stitch`asinh(${value})`
209
+ });
210
+ function cpuAtan(value) {
211
+ if (typeof value === "number") return Math.atan(value);
212
+ return VectorOps.atan[value.kind](value);
213
+ }
214
+ const atan = dualImpl({
215
+ name: "atan",
216
+ signature: unifyRestrictedSignature(anyFloat),
217
+ normalImpl: cpuAtan,
218
+ codegenImpl: (_ctx, [value]) => stitch`atan(${value})`
219
+ });
220
+ function cpuAtanh(value) {
221
+ if (typeof value === "number") return Math.atanh(value);
222
+ return VectorOps.atanh[value.kind](value);
223
+ }
224
+ const atanh = dualImpl({
225
+ name: "atanh",
226
+ signature: unifyRestrictedSignature(anyFloat),
227
+ normalImpl: cpuAtanh,
228
+ codegenImpl: (_ctx, [value]) => stitch`atanh(${value})`
229
+ });
230
+ function cpuAtan2(y, x) {
231
+ if (typeof y === "number" && typeof x === "number") return Math.atan2(y, x);
232
+ return VectorOps.atan2[y.kind](y, x);
233
+ }
234
+ const atan2 = dualImpl({
235
+ name: "atan2",
236
+ signature: unifyRestrictedSignature(anyFloat),
237
+ normalImpl: cpuAtan2,
238
+ codegenImpl: (_ctx, [y, x]) => stitch`atan2(${y}, ${x})`
239
+ });
240
+ function cpuCeil(value) {
241
+ if (typeof value === "number") return Math.ceil(value);
242
+ return VectorOps.ceil[value.kind](value);
243
+ }
244
+ const ceil = dualImpl({
245
+ name: "ceil",
246
+ signature: unifyRestrictedSignature(anyFloat),
247
+ normalImpl: cpuCeil,
248
+ codegenImpl: (_ctx, [value]) => stitch`ceil(${value})`
249
+ });
250
+ function cpuClamp(value, low, high) {
251
+ if (typeof value === "number") return Math.min(Math.max(low, value), high);
252
+ return VectorOps.clamp[value.kind](value, low, high);
253
+ }
254
+ const clamp = dualImpl({
255
+ name: "clamp",
256
+ signature: variadicUnifySignature,
257
+ normalImpl: cpuClamp,
258
+ codegenImpl: (_ctx, [value, low, high]) => stitch`clamp(${value}, ${low}, ${high})`
259
+ });
260
+ function cpuCos(value) {
261
+ if (typeof value === "number") return Math.cos(value);
262
+ return VectorOps.cos[value.kind](value);
263
+ }
264
+ const cos = dualImpl({
265
+ name: "cos",
266
+ signature: unifyRestrictedSignature(anyFloat),
267
+ normalImpl: cpuCos,
268
+ codegenImpl: (_ctx, [value]) => stitch`cos(${value})`
269
+ });
270
+ function cpuCosh(value) {
271
+ if (typeof value === "number") return Math.cosh(value);
272
+ return VectorOps.cosh[value.kind](value);
273
+ }
274
+ const cosh = dualImpl({
275
+ name: "cosh",
276
+ signature: unifyRestrictedSignature(anyFloat),
277
+ normalImpl: cpuCosh,
278
+ codegenImpl: (_ctx, [value]) => stitch`cosh(${value})`
279
+ });
280
+ const countLeadingZeros = dualImpl({
281
+ name: "countLeadingZeros",
282
+ signature: unifyRestrictedSignature(anyConcreteInteger),
283
+ normalImpl: "CPU implementation for countLeadingZeros not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues",
284
+ codegenImpl: (_ctx, [value]) => stitch`countLeadingZeros(${value})`
285
+ });
286
+ const countOneBits = dualImpl({
287
+ name: "countOneBits",
288
+ signature: unifyRestrictedSignature(anyConcreteInteger),
289
+ normalImpl: "CPU implementation for countOneBits not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues",
290
+ codegenImpl: (_ctx, [value]) => stitch`countOneBits(${value})`
291
+ });
292
+ const countTrailingZeros = dualImpl({
293
+ name: "countTrailingZeros",
294
+ signature: unifyRestrictedSignature(anyConcreteInteger),
295
+ normalImpl: "CPU implementation for countTrailingZeros not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues",
296
+ codegenImpl: (_ctx, [value]) => stitch`countTrailingZeros(${value})`
297
+ });
298
+ const cross = dualImpl({
299
+ name: "cross",
300
+ signature: unifyRestrictedSignature([vec3f, vec3h]),
301
+ normalImpl: (a, b) => VectorOps.cross[a.kind](a, b),
302
+ codegenImpl: (_ctx, [a, b]) => stitch`cross(${a}, ${b})`
303
+ });
304
+ function cpuDegrees(value) {
305
+ if (typeof value === "number") return value * 180 / Math.PI;
306
+ throw new MissingCpuImplError("CPU implementation for degrees on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues");
307
+ }
308
+ const degrees = dualImpl({
309
+ name: "degrees",
310
+ signature: unifyRestrictedSignature(anyFloat),
311
+ normalImpl: cpuDegrees,
312
+ codegenImpl: (_ctx, [value]) => stitch`degrees(${value})`
313
+ });
314
+ const determinant = dualImpl({
315
+ name: "determinant",
316
+ signature: (arg) => {
317
+ if (!(arg.type === "mat2x2f" || arg.type === "mat3x3f" || arg.type === "mat4x4f")) throw new SignatureNotSupportedError([arg], [
318
+ mat2x2f,
319
+ mat3x3f,
320
+ mat4x4f
321
+ ]);
322
+ return {
323
+ argTypes: [arg],
324
+ returnType: f32
325
+ };
326
+ },
327
+ normalImpl: "CPU implementation for determinant not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues",
328
+ codegenImpl: (_ctx, [value]) => stitch`determinant(${value})`
329
+ });
330
+ function cpuDistance(a, b) {
331
+ if (typeof a === "number" && typeof b === "number") return Math.abs(a - b);
332
+ return length(sub(a, b));
333
+ }
334
+ const distance = dualImpl({
335
+ name: "distance",
336
+ signature: (...args) => {
337
+ const uargs = unify(args, anyFloat);
338
+ if (!uargs) throw new SignatureNotSupportedError(args, anyFloat);
339
+ return {
340
+ argTypes: uargs,
341
+ returnType: isHalfPrecisionSchema(uargs[0]) ? f16 : f32
342
+ };
343
+ },
344
+ normalImpl: cpuDistance,
345
+ codegenImpl: (_ctx, [a, b]) => stitch`distance(${a}, ${b})`
346
+ });
347
+ const dot = dualImpl({
348
+ name: "dot",
349
+ signature: (...args) => ({
350
+ argTypes: args,
351
+ returnType: args[0].primitive
352
+ }),
353
+ normalImpl: (lhs, rhs) => VectorOps.dot[lhs.kind](lhs, rhs),
354
+ codegenImpl: (_ctx, [lhs, rhs]) => stitch`dot(${lhs}, ${rhs})`
355
+ });
356
+ const dot4U8Packed = dualImpl({
357
+ name: "dot4U8Packed",
358
+ signature: {
359
+ argTypes: [u32, u32],
360
+ returnType: u32
361
+ },
362
+ normalImpl: "CPU implementation for dot4U8Packed not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues",
363
+ codegenImpl: (_ctx, [e1, e2]) => stitch`dot4U8Packed(${e1}, ${e2})`
364
+ });
365
+ const dot4I8Packed = dualImpl({
366
+ name: "dot4I8Packed",
367
+ signature: {
368
+ argTypes: [u32, u32],
369
+ returnType: i32
370
+ },
371
+ normalImpl: "CPU implementation for dot4I8Packed not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues",
372
+ codegenImpl: (_ctx, [e1, e2]) => stitch`dot4I8Packed(${e1}, ${e2})`
373
+ });
374
+ function cpuExp(value) {
375
+ if (typeof value === "number") return Math.exp(value);
376
+ return VectorOps.exp[value.kind](value);
377
+ }
378
+ const exp = dualImpl({
379
+ name: "exp",
380
+ signature: unifyRestrictedSignature(anyFloat),
381
+ normalImpl: cpuExp,
382
+ codegenImpl: (_ctx, [value]) => stitch`exp(${value})`
383
+ });
384
+ function cpuExp2(value) {
385
+ if (typeof value === "number") return 2 ** value;
386
+ return VectorOps.exp2[value.kind](value);
387
+ }
388
+ const exp2 = dualImpl({
389
+ name: "exp2",
390
+ signature: unifyRestrictedSignature(anyFloat),
391
+ normalImpl: cpuExp2,
392
+ codegenImpl: (_ctx, [value]) => stitch`exp2(${value})`
393
+ });
394
+ const extractBits = dualImpl({
395
+ name: "extractBits",
396
+ signature: (arg, _offset, _count) => {
397
+ const argRestricted = unify([arg], anyConcreteInteger)?.[0];
398
+ if (!argRestricted) throw new SignatureNotSupportedError([arg], anyConcreteInteger);
399
+ return {
400
+ argTypes: [
401
+ argRestricted,
402
+ u32,
403
+ u32
404
+ ],
405
+ returnType: argRestricted
406
+ };
407
+ },
408
+ normalImpl: "CPU implementation for extractBits not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues",
409
+ codegenImpl: (_ctx, [e, offset, count]) => stitch`extractBits(${e}, ${offset}, ${count})`
410
+ });
411
+ const faceForward = dualImpl({
412
+ name: "faceForward",
413
+ signature: unifyRestrictedSignature(anyFloatVec),
414
+ normalImpl: "CPU implementation for faceForward not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues",
415
+ codegenImpl: (_ctx, [e1, e2, e3]) => stitch`faceForward(${e1}, ${e2}, ${e3})`
416
+ });
417
+ const firstLeadingBit = dualImpl({
418
+ name: "firstLeadingBit",
419
+ signature: unaryIdentitySignature,
420
+ normalImpl: "CPU implementation for firstLeadingBit not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues",
421
+ codegenImpl: (_ctx, [value]) => stitch`firstLeadingBit(${value})`
422
+ });
423
+ const firstTrailingBit = dualImpl({
424
+ name: "firstTrailingBit",
425
+ signature: unifyRestrictedSignature(anyConcreteInteger),
426
+ normalImpl: "CPU implementation for firstTrailingBit not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues",
427
+ codegenImpl: (_ctx, [value]) => stitch`firstTrailingBit(${value})`
428
+ });
429
+ function cpuFloor(value) {
430
+ if (typeof value === "number") return Math.floor(value);
431
+ return VectorOps.floor[value.kind](value);
432
+ }
433
+ const floor = dualImpl({
434
+ name: "floor",
435
+ signature: unifyRestrictedSignature(anyFloat),
436
+ normalImpl: cpuFloor,
437
+ codegenImpl: (_ctx, [arg]) => stitch`floor(${arg})`
438
+ });
439
+ function cpuFma(e1, e2, e3) {
440
+ if (typeof e1 === "number") return e1 * e2 + e3;
441
+ throw new MissingCpuImplError("CPU implementation for fma on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues");
442
+ }
443
+ const fma = dualImpl({
444
+ name: "fma",
445
+ signature: unifyRestrictedSignature(anyFloat),
446
+ normalImpl: cpuFma,
447
+ codegenImpl: (_ctx, [e1, e2, e3]) => stitch`fma(${e1}, ${e2}, ${e3})`
448
+ });
449
+ function cpuFract(value) {
450
+ if (typeof value === "number") return value - Math.floor(value);
451
+ return VectorOps.fract[value.kind](value);
452
+ }
453
+ const fract = dualImpl({
454
+ name: "fract",
455
+ signature: unifyRestrictedSignature(anyFloat),
456
+ normalImpl: cpuFract,
457
+ codegenImpl: (_ctx, [a]) => stitch`fract(${a})`
458
+ });
459
+ const FrexpResults = {
460
+ f32: abstruct({
461
+ fract: f32,
462
+ exp: i32
463
+ }),
464
+ f16: abstruct({
465
+ fract: f16,
466
+ exp: i32
467
+ }),
468
+ abstractFloat: abstruct({
469
+ fract: abstractFloat,
470
+ exp: abstractInt
471
+ }),
472
+ vec2f: abstruct({
473
+ fract: vec2f,
474
+ exp: vec2i
475
+ }),
476
+ vec3f: abstruct({
477
+ fract: vec3f,
478
+ exp: vec3i
479
+ }),
480
+ vec4f: abstruct({
481
+ fract: vec4f,
482
+ exp: vec4i
483
+ }),
484
+ vec2h: abstruct({
485
+ fract: vec2h,
486
+ exp: vec2i
487
+ }),
488
+ vec3h: abstruct({
489
+ fract: vec3h,
490
+ exp: vec3i
491
+ }),
492
+ vec4h: abstruct({
493
+ fract: vec4h,
494
+ exp: vec4i
495
+ })
496
+ };
497
+ const frexp = dualImpl({
498
+ name: "frexp",
499
+ normalImpl: "CPU implementation for frexp not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues",
500
+ signature: (value) => {
501
+ const returnType = FrexpResults[value.type];
502
+ if (!returnType) throw new SignatureNotSupportedError([value], anyFloat);
503
+ return {
504
+ argTypes: [value],
505
+ returnType
506
+ };
507
+ },
508
+ codegenImpl: (_ctx, [value]) => stitch`frexp(${value})`
509
+ });
510
+ const insertBits = dualImpl({
511
+ name: "insertBits",
512
+ signature: (e, newbits, _offset, _count) => {
513
+ const uargs = unify([e, newbits], anyConcreteInteger);
514
+ if (!uargs) throw new SignatureNotSupportedError([e, newbits], anyConcreteInteger);
515
+ return {
516
+ argTypes: [
517
+ ...uargs,
518
+ u32,
519
+ u32
520
+ ],
521
+ returnType: uargs[0]
522
+ };
523
+ },
524
+ normalImpl: "CPU implementation for insertBits not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues",
525
+ codegenImpl: (_ctx, [e, newbits, offset, count]) => stitch`insertBits(${e}, ${newbits}, ${offset}, ${count})`
526
+ });
527
+ function cpuInverseSqrt(value) {
528
+ if (typeof value === "number") return 1 / Math.sqrt(value);
529
+ throw new MissingCpuImplError("CPU implementation for inverseSqrt on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues");
530
+ }
531
+ const inverseSqrt = dualImpl({
532
+ name: "inverseSqrt",
533
+ signature: unifyRestrictedSignature(anyFloat),
534
+ normalImpl: cpuInverseSqrt,
535
+ codegenImpl: (_ctx, [value]) => stitch`inverseSqrt(${value})`
536
+ });
537
+ const ldexp = dualImpl({
538
+ name: "ldexp",
539
+ signature: (e1, _e2) => {
540
+ switch (e1.type) {
541
+ case "abstractFloat": return {
542
+ argTypes: [e1, abstractInt],
543
+ returnType: e1
544
+ };
545
+ case "f32":
546
+ case "f16": return {
547
+ argTypes: [e1, i32],
548
+ returnType: e1
549
+ };
550
+ case "vec2f":
551
+ case "vec2h": return {
552
+ argTypes: [e1, vec2i],
553
+ returnType: e1
554
+ };
555
+ case "vec3f":
556
+ case "vec3h": return {
557
+ argTypes: [e1, vec3i],
558
+ returnType: e1
559
+ };
560
+ case "vec4f":
561
+ case "vec4h": return {
562
+ argTypes: [e1, vec4i],
563
+ returnType: e1
564
+ };
565
+ default: throw new Error(`Unsupported data type for ldexp: ${e1.type}. Supported types are abstractFloat, f32, f16, vec2f, vec2h, vec3f, vec3h, vec4f, vec4h.`);
566
+ }
567
+ },
568
+ normalImpl: "CPU implementation for ldexp not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues",
569
+ codegenImpl: (_ctx, [e1, e2]) => stitch`ldexp(${e1}, ${e2})`
570
+ });
571
+ function cpuLength(value) {
572
+ if (typeof value === "number") return Math.abs(value);
573
+ return VectorOps.length[value.kind](value);
574
+ }
575
+ const length = dualImpl({
576
+ name: "length",
577
+ signature: (arg) => {
578
+ const uarg = unify([arg], anyFloat);
579
+ if (!uarg) throw new SignatureNotSupportedError([arg], anyFloat);
580
+ return {
581
+ argTypes: uarg,
582
+ returnType: isHalfPrecisionSchema(uarg[0]) ? f16 : f32
583
+ };
584
+ },
585
+ normalImpl: cpuLength,
586
+ codegenImpl: (_ctx, [arg]) => stitch`length(${arg})`
587
+ });
588
+ function cpuLog(value) {
589
+ if (typeof value === "number") return Math.log(value);
590
+ return VectorOps.log[value.kind](value);
591
+ }
592
+ const log = dualImpl({
593
+ name: "log",
594
+ signature: unifyRestrictedSignature(anyFloat),
595
+ normalImpl: cpuLog,
596
+ codegenImpl: (_ctx, [value]) => stitch`log(${value})`
597
+ });
598
+ function cpuLog2(value) {
599
+ if (typeof value === "number") return Math.log2(value);
600
+ return VectorOps.log2[value.kind](value);
601
+ }
602
+ const log2 = dualImpl({
603
+ name: "log2",
604
+ signature: unifyRestrictedSignature(anyFloat),
605
+ normalImpl: cpuLog2,
606
+ codegenImpl: (_ctx, [value]) => stitch`log2(${value})`
607
+ });
608
+ function cpuMax(a, b) {
609
+ if (typeof a === "number") return Math.max(a, b);
610
+ return VectorOps.max[a.kind](a, b);
611
+ }
612
+ const max = dualImpl({
613
+ name: "max",
614
+ signature: variadicUnifySignature,
615
+ normalImpl: variadicReduce(cpuMax),
616
+ codegenImpl: variadicStitch("max")
617
+ });
618
+ function cpuMin(a, b) {
619
+ if (typeof a === "number") return Math.min(a, b);
620
+ return VectorOps.min[a.kind](a, b);
621
+ }
622
+ const min = dualImpl({
623
+ name: "min",
624
+ signature: variadicUnifySignature,
625
+ normalImpl: variadicReduce(cpuMin),
626
+ codegenImpl: variadicStitch("min")
627
+ });
628
+ function cpuMix(e1, e2, e3) {
629
+ if (typeof e1 === "number") {
630
+ if (typeof e3 !== "number" || typeof e2 !== "number") throw new Error("When e1 and e2 are numbers, the blend factor must be a number.");
631
+ return e1 * (1 - e3) + e2 * e3;
632
+ }
633
+ if (typeof e1 === "number" || typeof e2 === "number") throw new Error("e1 and e2 need to both be vectors of the same kind.");
634
+ return VectorOps.mix[e1.kind](e1, e2, e3);
635
+ }
636
+ const mix = dualImpl({
637
+ name: "mix",
638
+ signature: (e1, e2, e3) => {
639
+ if (e1.type.startsWith("vec") && !e3.type.startsWith("vec")) {
640
+ const uarg = unify([e3], [e1.primitive]);
641
+ if (!uarg) throw new SignatureNotSupportedError([e3], [e1.primitive]);
642
+ return {
643
+ argTypes: [
644
+ e1,
645
+ e2,
646
+ uarg[0]
647
+ ],
648
+ returnType: e1
649
+ };
650
+ }
651
+ const uargs = unify([
652
+ e1,
653
+ e2,
654
+ e3
655
+ ], anyFloat);
656
+ if (!uargs) throw new SignatureNotSupportedError([
657
+ e1,
658
+ e2,
659
+ e3
660
+ ], anyFloat);
661
+ return {
662
+ argTypes: uargs,
663
+ returnType: uargs[0]
664
+ };
665
+ },
666
+ normalImpl: cpuMix,
667
+ codegenImpl: (_ctx, [e1, e2, e3]) => stitch`mix(${e1}, ${e2}, ${e3})`
668
+ });
669
+ const ModfResult = {
670
+ f32: abstruct({
671
+ fract: f32,
672
+ whole: f32
673
+ }),
674
+ f16: abstruct({
675
+ fract: f16,
676
+ whole: f16
677
+ }),
678
+ abstractFloat: abstruct({
679
+ fract: abstractFloat,
680
+ whole: abstractFloat
681
+ }),
682
+ vec2f: abstruct({
683
+ fract: vec2f,
684
+ whole: vec2f
685
+ }),
686
+ vec3f: abstruct({
687
+ fract: vec3f,
688
+ whole: vec3f
689
+ }),
690
+ vec4f: abstruct({
691
+ fract: vec4f,
692
+ whole: vec4f
693
+ }),
694
+ vec2h: abstruct({
695
+ fract: vec2h,
696
+ whole: vec2h
697
+ }),
698
+ vec3h: abstruct({
699
+ fract: vec3h,
700
+ whole: vec3h
701
+ }),
702
+ vec4h: abstruct({
703
+ fract: vec4h,
704
+ whole: vec4h
705
+ })
706
+ };
707
+ const modf = dualImpl({
708
+ name: "modf",
709
+ signature: (e) => {
710
+ const returnType = ModfResult[e.type];
711
+ if (!returnType) throw new Error(`Unsupported data type for modf: ${e.type}. Supported types are f32, f16, abstractFloat, vec2f, vec3f, vec4f, vec2h, vec3h, vec4h.`);
712
+ return {
713
+ argTypes: [e],
714
+ returnType
715
+ };
716
+ },
717
+ normalImpl: "CPU implementation for modf not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues",
718
+ codegenImpl: (_ctx, [value]) => stitch`modf(${value})`
719
+ });
720
+ const normalize = dualImpl({
721
+ name: "normalize",
722
+ signature: unifyRestrictedSignature(anyFloatVec),
723
+ normalImpl: (v) => VectorOps.normalize[v.kind](v),
724
+ codegenImpl: (_ctx, [value]) => stitch`normalize(${value})`
725
+ });
726
+ function powCpu(base, exponent) {
727
+ if (typeof base === "number" && typeof exponent === "number") return base ** exponent;
728
+ if (isVecInstance(base) && isVecInstance(exponent)) return VectorOps.pow[base.kind](base, exponent);
729
+ throw new Error(`Invalid arguments to pow(): '${base}' '${exponent}'`);
730
+ }
731
+ const pow = dualImpl({
732
+ name: "pow",
733
+ signature: unifyRestrictedSignature(anyFloat),
734
+ normalImpl: powCpu,
735
+ codegenImpl: (_ctx, [lhs, rhs]) => stitch`pow(${lhs}, ${rhs})`
736
+ });
737
+ const quantizeToF16 = dualImpl({
738
+ name: "quantizeToF16",
739
+ signature: (arg) => {
740
+ const candidates = [
741
+ vec2f,
742
+ vec3f,
743
+ vec4f,
744
+ f32
745
+ ];
746
+ const uarg = unify([arg], candidates)?.[0];
747
+ if (!uarg) throw new SignatureNotSupportedError([arg], candidates);
748
+ return {
749
+ argTypes: [uarg],
750
+ returnType: uarg
751
+ };
752
+ },
753
+ normalImpl: "CPU implementation for quantizeToF16 not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues",
754
+ codegenImpl: (_ctx, [value]) => stitch`quantizeToF16(${value})`
755
+ });
756
+ function cpuRadians(value) {
757
+ if (typeof value === "number") return value * Math.PI / 180;
758
+ throw new MissingCpuImplError("CPU implementation for radians on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues");
759
+ }
760
+ const radians = dualImpl({
761
+ name: "radians",
762
+ signature: unifyRestrictedSignature(anyFloat),
763
+ normalImpl: cpuRadians,
764
+ codegenImpl: (_ctx, [value]) => stitch`radians(${value})`
765
+ });
766
+ const reflect = dualImpl({
767
+ name: "reflect",
768
+ signature: (...args) => {
769
+ const uargs = unify(args, anyFloatVec);
770
+ if (!uargs) throw new SignatureNotSupportedError(args, anyFloatVec);
771
+ return {
772
+ argTypes: uargs,
773
+ returnType: uargs[0]
774
+ };
775
+ },
776
+ normalImpl: (e1, e2) => sub(e1, mul(2 * dot(e2, e1), e2)),
777
+ codegenImpl: (_ctx, [e1, e2]) => stitch`reflect(${e1}, ${e2})`
778
+ });
779
+ const refract = dualImpl({
780
+ name: "refract",
781
+ normalImpl: "CPU implementation for refract not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues",
782
+ codegenImpl: (_ctx, [e1, e2, e3]) => stitch`refract(${e1}, ${e2}, ${e3})`,
783
+ signature: (e1, e2, _e3) => ({
784
+ argTypes: [
785
+ e1,
786
+ e2,
787
+ isHalfPrecisionSchema(e1) ? f16 : f32
788
+ ],
789
+ returnType: e1
790
+ })
791
+ });
792
+ const reverseBits = dualImpl({
793
+ name: "reverseBits",
794
+ signature: unifyRestrictedSignature(anyConcreteInteger),
795
+ normalImpl: "CPU implementation for reverseBits not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues",
796
+ codegenImpl: (_ctx, [value]) => stitch`reverseBits(${value})`
797
+ });
798
+ function cpuRound(value) {
799
+ if (typeof value === "number") {
800
+ const floor = Math.floor(value);
801
+ if (value === floor + .5) {
802
+ if (floor % 2 === 0) return floor;
803
+ return floor + 1;
804
+ }
805
+ return Math.round(value);
806
+ }
807
+ throw new MissingCpuImplError("CPU implementation for round on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues");
808
+ }
809
+ const round = dualImpl({
810
+ name: "round",
811
+ signature: unifyRestrictedSignature(anyFloat),
812
+ normalImpl: cpuRound,
813
+ codegenImpl: (_ctx, [value]) => stitch`round(${value})`
814
+ });
815
+ function cpuSaturate(value) {
816
+ if (typeof value === "number") return Math.max(0, Math.min(1, value));
817
+ throw new MissingCpuImplError("CPU implementation for saturate on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues");
818
+ }
819
+ const saturate = dualImpl({
820
+ name: "saturate",
821
+ signature: unifyRestrictedSignature(anyFloat),
822
+ normalImpl: cpuSaturate,
823
+ codegenImpl: (_ctx, [value]) => stitch`saturate(${value})`
824
+ });
825
+ function cpuSign(e) {
826
+ if (typeof e === "number") return Math.sign(e);
827
+ return VectorOps.sign[e.kind](e);
828
+ }
829
+ const sign = dualImpl({
830
+ name: "sign",
831
+ signature: (arg) => {
832
+ const candidates = [
833
+ ...anyFloat,
834
+ i32,
835
+ vec2i,
836
+ vec3i,
837
+ vec4i
838
+ ];
839
+ const uarg = unify([arg], candidates)?.[0];
840
+ if (!uarg) throw new SignatureNotSupportedError([arg], candidates);
841
+ return {
842
+ argTypes: [uarg],
843
+ returnType: uarg
844
+ };
845
+ },
846
+ normalImpl: cpuSign,
847
+ codegenImpl: (_ctx, [e]) => stitch`sign(${e})`
848
+ });
849
+ function cpuSin(value) {
850
+ if (typeof value === "number") return Math.sin(value);
851
+ return VectorOps.sin[value.kind](value);
852
+ }
853
+ const sin = dualImpl({
854
+ name: "sin",
855
+ signature: unifyRestrictedSignature(anyFloat),
856
+ normalImpl: cpuSin,
857
+ codegenImpl: (_ctx, [value]) => stitch`sin(${value})`
858
+ });
859
+ function cpuSinh(value) {
860
+ if (typeof value === "number") return Math.sinh(value);
861
+ throw new MissingCpuImplError("CPU implementation for sinh on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues");
862
+ }
863
+ const sinh = dualImpl({
864
+ name: "sinh",
865
+ signature: unifyRestrictedSignature(anyFloat),
866
+ normalImpl: cpuSinh,
867
+ codegenImpl: (_ctx, [value]) => stitch`sinh(${value})`
868
+ });
869
+ function cpuSmoothstep(edge0, edge1, x) {
870
+ if (typeof x === "number") return smoothstepScalar(edge0, edge1, x);
871
+ return VectorOps.smoothstep[x.kind](edge0, edge1, x);
872
+ }
873
+ const smoothstep = dualImpl({
874
+ name: "smoothstep",
875
+ signature: unifyRestrictedSignature(anyFloat),
876
+ normalImpl: cpuSmoothstep,
877
+ codegenImpl: (_ctx, [edge0, edge1, x]) => stitch`smoothstep(${edge0}, ${edge1}, ${x})`
878
+ });
879
+ function cpuSqrt(value) {
880
+ if (typeof value === "number") return Math.sqrt(value);
881
+ return VectorOps.sqrt[value.kind](value);
882
+ }
883
+ const sqrt = dualImpl({
884
+ name: "sqrt",
885
+ signature: unifyRestrictedSignature(anyFloat),
886
+ normalImpl: cpuSqrt,
887
+ codegenImpl: (_ctx, [value]) => stitch`sqrt(${value})`
888
+ });
889
+ function cpuStep(edge, x) {
890
+ if (typeof edge === "number") return edge <= x ? 1 : 0;
891
+ throw new MissingCpuImplError("CPU implementation for step on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues");
892
+ }
893
+ const step = dualImpl({
894
+ name: "step",
895
+ signature: unifyRestrictedSignature(anyFloat),
896
+ normalImpl: cpuStep,
897
+ codegenImpl: (_ctx, [edge, x]) => stitch`step(${edge}, ${x})`
898
+ });
899
+ function cpuTan(value) {
900
+ if (typeof value === "number") return Math.tan(value);
901
+ throw new MissingCpuImplError("CPU implementation for tan on vectors not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues");
902
+ }
903
+ const tan = dualImpl({
904
+ name: "tan",
905
+ signature: unifyRestrictedSignature(anyFloat),
906
+ normalImpl: cpuTan,
907
+ codegenImpl: (_ctx, [value]) => stitch`tan(${value})`
908
+ });
909
+ function cpuTanh(value) {
910
+ if (typeof value === "number") return Math.tanh(value);
911
+ return VectorOps.tanh[value.kind](value);
912
+ }
913
+ const tanh = dualImpl({
914
+ name: "tanh",
915
+ signature: unifyRestrictedSignature(anyFloat),
916
+ normalImpl: cpuTanh,
917
+ codegenImpl: (_ctx, [value]) => stitch`tanh(${value})`
918
+ });
919
+ const transpose = dualImpl({
920
+ name: "transpose",
921
+ signature: unaryIdentitySignature,
922
+ normalImpl: "CPU implementation for transpose not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues",
923
+ codegenImpl: (_ctx, [e]) => stitch`transpose(${e})`
924
+ });
925
+ const trunc = dualImpl({
926
+ name: "trunc",
927
+ signature: unifyRestrictedSignature(anyFloat),
928
+ normalImpl: "CPU implementation for trunc not implemented yet. Please submit an issue at https://github.com/software-mansion/TypeGPU/issues",
929
+ codegenImpl: (_ctx, [value]) => stitch`trunc(${value})`
930
+ });
931
+
932
+ //#endregion
933
+ //#region src/std/array.ts
934
+ const sizeOfPointedToArray = (dataType) => isPtr(dataType) && isWgslArray(dataType.inner) ? dataType.inner.elementCount : 0;
935
+ const arrayLength = dualImpl({
936
+ name: "arrayLength",
937
+ signature: (arg) => {
938
+ const ptrArg = isPtr(arg) ? arg : ptrFn(arg);
939
+ return {
940
+ argTypes: [ptrArg],
941
+ returnType: sizeOfPointedToArray(ptrArg) > 0 ? abstractInt : u32
942
+ };
943
+ },
944
+ normalImpl: (a) => isRef(a) ? a.$.length : a.length,
945
+ codegenImpl(_ctx, [a]) {
946
+ const length = sizeOfPointedToArray(a.dataType);
947
+ return length > 0 ? `${length}` : stitch`arrayLength(${a})`;
948
+ }
949
+ });
950
+
951
+ //#endregion
952
+ //#region src/std/bitcast.ts
953
+ const bitcastU32toF32 = dualImpl({
954
+ name: "bitcastU32toF32",
955
+ normalImpl: ((value) => {
956
+ if (typeof value === "number") return bitcastU32toF32Impl(value);
957
+ return VectorOps.bitcastU32toF32[value.kind](value);
958
+ }),
959
+ codegenImpl: (_ctx, [n]) => stitch`bitcast<f32>(${n})`,
960
+ signature: (...arg) => {
961
+ const uargs = unify(arg, [u32]) ?? arg;
962
+ return {
963
+ argTypes: uargs,
964
+ returnType: isVec(uargs[0]) ? uargs[0].type === "vec2u" ? vec2f : uargs[0].type === "vec3u" ? vec3f : vec4f : f32
965
+ };
966
+ }
967
+ });
968
+ const bitcastU32toI32 = dualImpl({
969
+ name: "bitcastU32toI32",
970
+ normalImpl: ((value) => {
971
+ if (typeof value === "number") return bitcastU32toI32Impl(value);
972
+ return VectorOps.bitcastU32toI32[value.kind](value);
973
+ }),
974
+ codegenImpl: (_ctx, [n]) => stitch`bitcast<i32>(${n})`,
975
+ signature: (...arg) => {
976
+ const uargs = unify(arg, [u32]) ?? arg;
977
+ return {
978
+ argTypes: uargs,
979
+ returnType: isVec(uargs[0]) ? uargs[0].type === "vec2u" ? vec2i : uargs[0].type === "vec3u" ? vec3i : vec4i : i32
980
+ };
981
+ }
982
+ });
983
+
984
+ //#endregion
985
+ //#region src/std/packing.ts
986
+ /**
987
+ * @privateRemarks
988
+ * https://gpuweb.github.io/gpuweb/wgsl/#unpack2x16float-builtin
989
+ */
990
+ const unpack2x16float = dualImpl({
991
+ name: "unpack2x16float",
992
+ normalImpl: (e) => {
993
+ const buffer = /* @__PURE__ */ new ArrayBuffer(4);
994
+ new TB.BufferWriter(buffer).writeUint32(e);
995
+ const reader = new TB.BufferReader(buffer);
996
+ return vec2f(reader.readFloat16(), reader.readFloat16());
997
+ },
998
+ signature: {
999
+ argTypes: [u32],
1000
+ returnType: vec2f
1001
+ },
1002
+ codegenImpl: (_ctx, [e]) => stitch`unpack2x16float(${e})`
1003
+ });
1004
+ /**
1005
+ * @privateRemarks
1006
+ * https://gpuweb.github.io/gpuweb/wgsl/#pack2x16float-builtin
1007
+ */
1008
+ const pack2x16float = dualImpl({
1009
+ name: "pack2x16float",
1010
+ normalImpl: (e) => {
1011
+ const buffer = /* @__PURE__ */ new ArrayBuffer(4);
1012
+ const writer = new TB.BufferWriter(buffer);
1013
+ writer.writeFloat16(e.x);
1014
+ writer.writeFloat16(e.y);
1015
+ return u32(new TB.BufferReader(buffer).readUint32());
1016
+ },
1017
+ signature: {
1018
+ argTypes: [vec2f],
1019
+ returnType: u32
1020
+ },
1021
+ codegenImpl: (_ctx, [e]) => stitch`pack2x16float(${e})`
1022
+ });
1023
+ /**
1024
+ * @privateRemarks
1025
+ * https://gpuweb.github.io/gpuweb/wgsl/#unpack4x8unorm-builtin
1026
+ */
1027
+ const unpack4x8unorm = dualImpl({
1028
+ name: "unpack4x8unorm",
1029
+ normalImpl: (e) => {
1030
+ const buffer = /* @__PURE__ */ new ArrayBuffer(4);
1031
+ new TB.BufferWriter(buffer).writeUint32(e);
1032
+ const reader = new TB.BufferReader(buffer);
1033
+ return vec4f(reader.readUint8() / 255, reader.readUint8() / 255, reader.readUint8() / 255, reader.readUint8() / 255);
1034
+ },
1035
+ signature: {
1036
+ argTypes: [u32],
1037
+ returnType: vec4f
1038
+ },
1039
+ codegenImpl: (_ctx, [e]) => stitch`unpack4x8unorm(${e})`
1040
+ });
1041
+ /**
1042
+ * @privateRemarks
1043
+ * https://gpuweb.github.io/gpuweb/wgsl/#pack4x8unorm-builtin
1044
+ */
1045
+ const pack4x8unorm = dualImpl({
1046
+ name: "pack4x8unorm",
1047
+ normalImpl: (e) => {
1048
+ const buffer = /* @__PURE__ */ new ArrayBuffer(4);
1049
+ const writer = new TB.BufferWriter(buffer);
1050
+ writer.writeUint8(e.x * 255);
1051
+ writer.writeUint8(e.y * 255);
1052
+ writer.writeUint8(e.z * 255);
1053
+ writer.writeUint8(e.w * 255);
1054
+ return u32(new TB.BufferReader(buffer).readUint32());
1055
+ },
1056
+ signature: {
1057
+ argTypes: [vec4f],
1058
+ returnType: u32
1059
+ },
1060
+ codegenImpl: (_ctx, [e]) => stitch`pack4x8unorm(${e})`
1061
+ });
1062
+
1063
+ //#endregion
1064
+ //#region src/wgslExtensions.ts
1065
+ const wgslExtensions = [
1066
+ "f16",
1067
+ "clip_distances",
1068
+ "dual_source_blending",
1069
+ "subgroups",
1070
+ "primitive_index"
1071
+ ];
1072
+ const wgslExtensionToFeatureName = {
1073
+ f16: "shader-f16",
1074
+ clip_distances: "clip-distances",
1075
+ dual_source_blending: "dual-source-blending",
1076
+ subgroups: "subgroups",
1077
+ primitive_index: "primitive-index"
1078
+ };
1079
+
1080
+ //#endregion
1081
+ //#region src/std/boolean.ts
1082
+ function correspondingBooleanVectorSchema(dataType) {
1083
+ if (dataType.type.includes("2")) return vec2b;
1084
+ if (dataType.type.includes("3")) return vec3b;
1085
+ return vec4b;
1086
+ }
1087
+ /**
1088
+ * Checks whether `lhs == rhs` on all components.
1089
+ * Equivalent to `all(eq(lhs, rhs))`.
1090
+ * @example
1091
+ * allEq(vec2f(0.0, 1.0), vec2f(0.0, 2.0)) // returns false
1092
+ * allEq(vec3u(0, 1, 2), vec3u(0, 1, 2)) // returns true
1093
+ */
1094
+ const allEq = dualImpl({
1095
+ name: "allEq",
1096
+ signature: (...argTypes) => ({
1097
+ argTypes,
1098
+ returnType: bool
1099
+ }),
1100
+ normalImpl: (lhs, rhs) => cpuAll(cpuEq(lhs, rhs)),
1101
+ codegenImpl: (_ctx, [lhs, rhs]) => stitch`all(${lhs} == ${rhs})`
1102
+ });
1103
+ const cpuEq = (lhs, rhs) => VectorOps.eq[lhs.kind](lhs, rhs);
1104
+ /**
1105
+ * Checks **component-wise** whether `lhs == rhs`.
1106
+ * This function does **not** return `bool`, for that use-case, wrap the result in `all`, or use `allEq`.
1107
+ * @example
1108
+ * eq(vec2f(0.0, 1.0), vec2f(0.0, 2.0)) // returns vec2b(true, false)
1109
+ * eq(vec3u(0, 1, 2), vec3u(2, 1, 0)) // returns vec3b(false, true, false)
1110
+ * all(eq(vec4i(4, 3, 2, 1), vec4i(4, 3, 2, 1))) // returns true
1111
+ * allEq(vec4i(4, 3, 2, 1), vec4i(4, 3, 2, 1)) // returns true
1112
+ */
1113
+ const eq = dualImpl({
1114
+ name: "eq",
1115
+ signature: (...argTypes) => ({
1116
+ argTypes,
1117
+ returnType: correspondingBooleanVectorSchema(argTypes[0])
1118
+ }),
1119
+ normalImpl: cpuEq,
1120
+ codegenImpl: (_ctx, [lhs, rhs]) => stitch`(${lhs} == ${rhs})`
1121
+ });
1122
+ /**
1123
+ * Checks **component-wise** whether `lhs != rhs`.
1124
+ * This function does **not** return `bool`, for that use-case, wrap the result in `any`.
1125
+ * @example
1126
+ * ne(vec2f(0.0, 1.0), vec2f(0.0, 2.0)) // returns vec2b(false, true)
1127
+ * ne(vec3u(0, 1, 2), vec3u(2, 1, 0)) // returns vec3b(true, false, true)
1128
+ * any(ne(vec4i(4, 3, 2, 1), vec4i(4, 2, 2, 1))) // returns true
1129
+ */
1130
+ const ne = dualImpl({
1131
+ name: "ne",
1132
+ signature: (...argTypes) => ({
1133
+ argTypes,
1134
+ returnType: correspondingBooleanVectorSchema(argTypes[0])
1135
+ }),
1136
+ normalImpl: (lhs, rhs) => cpuNot(cpuEq(lhs, rhs)),
1137
+ codegenImpl: (_ctx, [lhs, rhs]) => stitch`(${lhs} != ${rhs})`
1138
+ });
1139
+ const cpuLt = (lhs, rhs) => VectorOps.lt[lhs.kind](lhs, rhs);
1140
+ /**
1141
+ * Checks **component-wise** whether `lhs < rhs`.
1142
+ * This function does **not** return `bool`, for that use-case, wrap the result in `all`.
1143
+ * @example
1144
+ * lt(vec2f(0.0, 0.0), vec2f(0.0, 1.0)) // returns vec2b(false, true)
1145
+ * lt(vec3u(0, 1, 2), vec3u(2, 1, 0)) // returns vec3b(true, false, false)
1146
+ * all(lt(vec4i(1, 2, 3, 4), vec4i(2, 3, 4, 5))) // returns true
1147
+ */
1148
+ const lt = dualImpl({
1149
+ name: "lt",
1150
+ signature: (...argTypes) => ({
1151
+ argTypes,
1152
+ returnType: correspondingBooleanVectorSchema(argTypes[0])
1153
+ }),
1154
+ normalImpl: cpuLt,
1155
+ codegenImpl: (_ctx, [lhs, rhs]) => stitch`(${lhs} < ${rhs})`
1156
+ });
1157
+ /**
1158
+ * Checks **component-wise** whether `lhs <= rhs`.
1159
+ * This function does **not** return `bool`, for that use-case, wrap the result in `all`.
1160
+ * @example
1161
+ * le(vec2f(0.0, 0.0), vec2f(0.0, 1.0)) // returns vec2b(true, true)
1162
+ * le(vec3u(0, 1, 2), vec3u(2, 1, 0)) // returns vec3b(true, true, false)
1163
+ * all(le(vec4i(1, 2, 3, 4), vec4i(2, 3, 3, 5))) // returns true
1164
+ */
1165
+ const le = dualImpl({
1166
+ name: "le",
1167
+ signature: (...argTypes) => ({
1168
+ argTypes,
1169
+ returnType: correspondingBooleanVectorSchema(argTypes[0])
1170
+ }),
1171
+ normalImpl: (lhs, rhs) => cpuOr(cpuLt(lhs, rhs), cpuEq(lhs, rhs)),
1172
+ codegenImpl: (_ctx, [lhs, rhs]) => stitch`(${lhs} <= ${rhs})`
1173
+ });
1174
+ /**
1175
+ * Checks **component-wise** whether `lhs > rhs`.
1176
+ * This function does **not** return `bool`, for that use-case, wrap the result in `all`.
1177
+ * @example
1178
+ * gt(vec2f(0.0, 0.0), vec2f(0.0, 1.0)) // returns vec2b(false, false)
1179
+ * gt(vec3u(0, 1, 2), vec3u(2, 1, 0)) // returns vec3b(false, false, true)
1180
+ * all(gt(vec4i(2, 3, 4, 5), vec4i(1, 2, 3, 4))) // returns true
1181
+ */
1182
+ const gt = dualImpl({
1183
+ name: "gt",
1184
+ signature: (...argTypes) => ({
1185
+ argTypes,
1186
+ returnType: correspondingBooleanVectorSchema(argTypes[0])
1187
+ }),
1188
+ normalImpl: (lhs, rhs) => cpuAnd(cpuNot(cpuLt(lhs, rhs)), cpuNot(cpuEq(lhs, rhs))),
1189
+ codegenImpl: (_ctx, [lhs, rhs]) => stitch`(${lhs} > ${rhs})`
1190
+ });
1191
+ /**
1192
+ * Checks **component-wise** whether `lhs >= rhs`.
1193
+ * This function does **not** return `bool`, for that use-case, wrap the result in `all`.
1194
+ * @example
1195
+ * ge(vec2f(0.0, 0.0), vec2f(0.0, 1.0)) // returns vec2b(true, false)
1196
+ * ge(vec3u(0, 1, 2), vec3u(2, 1, 0)) // returns vec3b(false, true, true)
1197
+ * all(ge(vec4i(2, 2, 4, 5), vec4i(1, 2, 3, 4))) // returns true
1198
+ */
1199
+ const ge = dualImpl({
1200
+ name: "ge",
1201
+ signature: (...argTypes) => ({
1202
+ argTypes,
1203
+ returnType: correspondingBooleanVectorSchema(argTypes[0])
1204
+ }),
1205
+ normalImpl: (lhs, rhs) => cpuNot(cpuLt(lhs, rhs)),
1206
+ codegenImpl: (_ctx, [lhs, rhs]) => stitch`(${lhs} >= ${rhs})`
1207
+ });
1208
+ const cpuNot = (value) => VectorOps.neg[value.kind](value);
1209
+ /**
1210
+ * Returns **component-wise** `!value`.
1211
+ * @example
1212
+ * not(vec2b(false, true)) // returns vec2b(true, false)
1213
+ * not(vec3b(true, true, false)) // returns vec3b(false, false, true)
1214
+ */
1215
+ const not = dualImpl({
1216
+ name: "not",
1217
+ signature: (...argTypes) => ({
1218
+ argTypes,
1219
+ returnType: argTypes[0]
1220
+ }),
1221
+ normalImpl: cpuNot,
1222
+ codegenImpl: (_ctx, [arg]) => stitch`!(${arg})`
1223
+ });
1224
+ const cpuOr = (lhs, rhs) => VectorOps.or[lhs.kind](lhs, rhs);
1225
+ /**
1226
+ * Returns **component-wise** logical `or` result.
1227
+ * @example
1228
+ * or(vec2b(false, true), vec2b(false, false)) // returns vec2b(false, true)
1229
+ * or(vec3b(true, true, false), vec3b(false, true, false)) // returns vec3b(true, true, false)
1230
+ */
1231
+ const or = dualImpl({
1232
+ name: "or",
1233
+ signature: (...argTypes) => ({
1234
+ argTypes,
1235
+ returnType: argTypes[0]
1236
+ }),
1237
+ normalImpl: cpuOr,
1238
+ codegenImpl: (_ctx, [lhs, rhs]) => stitch`(${lhs} | ${rhs})`
1239
+ });
1240
+ const cpuAnd = (lhs, rhs) => cpuNot(cpuOr(cpuNot(lhs), cpuNot(rhs)));
1241
+ /**
1242
+ * Returns **component-wise** logical `and` result.
1243
+ * @example
1244
+ * and(vec2b(false, true), vec2b(true, true)) // returns vec2b(false, true)
1245
+ * and(vec3b(true, true, false), vec3b(false, true, false)) // returns vec3b(false, true, false)
1246
+ */
1247
+ const and = dualImpl({
1248
+ name: "and",
1249
+ signature: (...argTypes) => ({
1250
+ argTypes,
1251
+ returnType: argTypes[0]
1252
+ }),
1253
+ normalImpl: cpuAnd,
1254
+ codegenImpl: (_ctx, [lhs, rhs]) => stitch`(${lhs} & ${rhs})`
1255
+ });
1256
+ const cpuAll = (value) => VectorOps.all[value.kind](value);
1257
+ /**
1258
+ * Returns `true` if each component of `value` is true.
1259
+ * @example
1260
+ * all(vec2b(false, true)) // returns false
1261
+ * all(vec3b(true, true, true)) // returns true
1262
+ */
1263
+ const all = dualImpl({
1264
+ name: "all",
1265
+ signature: (...argTypes) => ({
1266
+ argTypes,
1267
+ returnType: bool
1268
+ }),
1269
+ normalImpl: cpuAll,
1270
+ codegenImpl: (_ctx, [value]) => stitch`all(${value})`
1271
+ });
1272
+ /**
1273
+ * Returns `true` if any component of `value` is true.
1274
+ * @example
1275
+ * any(vec2b(false, true)) // returns true
1276
+ * any(vec3b(false, false, false)) // returns false
1277
+ */
1278
+ const any = dualImpl({
1279
+ name: "any",
1280
+ signature: (...argTypes) => ({
1281
+ argTypes,
1282
+ returnType: bool
1283
+ }),
1284
+ normalImpl: (value) => !cpuAll(cpuNot(value)),
1285
+ codegenImpl: (_ctx, [arg]) => stitch`any(${arg})`
1286
+ });
1287
+ /**
1288
+ * Checks whether the given elements differ by at most the `precision` value.
1289
+ * Checks all elements of `lhs` and `rhs` if arguments are vectors.
1290
+ * @example
1291
+ * isCloseTo(0, 0.1) // returns false
1292
+ * isCloseTo(vec3f(0, 0, 0), vec3f(0.002, -0.009, 0)) // returns true
1293
+ *
1294
+ * @param {number} precision argument that specifies the maximum allowed difference, 0.01 by default.
1295
+ */
1296
+ const isCloseTo = dualImpl({
1297
+ name: "isCloseTo",
1298
+ signature: (...args) => ({
1299
+ argTypes: args,
1300
+ returnType: bool
1301
+ }),
1302
+ normalImpl: (lhs, rhs, precision = .01) => {
1303
+ if (typeof lhs === "number" && typeof rhs === "number") return Math.abs(lhs - rhs) < precision;
1304
+ if (isVecInstance(lhs) && isVecInstance(rhs)) return VectorOps.isCloseToZero[lhs.kind](sub(lhs, rhs), precision);
1305
+ return false;
1306
+ },
1307
+ codegenImpl: (_ctx, [lhs, rhs, precision = snip(.01, f32, "constant")]) => {
1308
+ if (isSnippetNumeric(lhs) && isSnippetNumeric(rhs)) return stitch`(abs(f32(${lhs}) - f32(${rhs})) <= ${precision})`;
1309
+ if (!isSnippetNumeric(lhs) && !isSnippetNumeric(rhs)) return stitch`all(abs(${lhs} - ${rhs}) <= (${lhs} - ${lhs}) + ${precision})`;
1310
+ return "false";
1311
+ }
1312
+ });
1313
+ function cpuSelect(f, t, cond) {
1314
+ if (typeof cond === "boolean") return cond ? t : f;
1315
+ return VectorOps.select[f.kind](f, t, cond);
1316
+ }
1317
+ /**
1318
+ * Returns `t` if `cond` is `true`, and `f` otherwise.
1319
+ * Component-wise if `cond` is a vector.
1320
+ * @example
1321
+ * select(1, 2, false) // returns 1
1322
+ * select(1, 2, true) // returns 2
1323
+ * select(vec2i(1, 2), vec2i(3, 4), true) // returns vec2i(3, 4)
1324
+ * select(vec2i(1, 2), vec2i(3, 4), vec2b(false, true)) // returns vec2i(1, 4)
1325
+ */
1326
+ const select = dualImpl({
1327
+ name: "select",
1328
+ signature: (f, t, cond) => {
1329
+ const [uf, ut] = unify([f, t]) ?? [f, t];
1330
+ return {
1331
+ argTypes: [
1332
+ uf,
1333
+ ut,
1334
+ cond
1335
+ ],
1336
+ returnType: uf
1337
+ };
1338
+ },
1339
+ normalImpl: cpuSelect,
1340
+ codegenImpl: (_ctx, [f, t, cond]) => stitch`select(${f}, ${t}, ${cond})`
1341
+ });
1342
+
1343
+ //#endregion
1344
+ //#region src/std/discard.ts
1345
+ const discard = dualImpl({
1346
+ name: "discard",
1347
+ normalImpl: "`discard` relies on GPU resources and cannot be executed outside of a draw call",
1348
+ signature: {
1349
+ argTypes: [],
1350
+ returnType: Void
1351
+ },
1352
+ codegenImpl: () => "discard;"
1353
+ });
1354
+
1355
+ //#endregion
1356
+ //#region src/std/matrix.ts
1357
+ const gpuTranslation4 = translation4[$gpuCallable].call.bind(translation4);
1358
+ const gpuScaling4 = scaling4[$gpuCallable].call.bind(scaling4);
1359
+ const gpuRotationX4 = rotationX4[$gpuCallable].call.bind(rotationX4);
1360
+ const gpuRotationY4 = rotationY4[$gpuCallable].call.bind(rotationY4);
1361
+ const gpuRotationZ4 = rotationZ4[$gpuCallable].call.bind(rotationZ4);
1362
+ /**
1363
+ * Translates the given 4-by-4 matrix by the given vector.
1364
+ * @param {m4x4f} matrix - The matrix to be modified.
1365
+ * @param {v3f} vector - The vector by which to translate the matrix.
1366
+ * @returns {m4x4f} The translated matrix.
1367
+ */
1368
+ const translate4 = dualImpl({
1369
+ name: "translate4",
1370
+ normalImpl: (matrix, vector) => mul(translation4(vector), matrix),
1371
+ signature: {
1372
+ argTypes: [mat4x4f, vec3f],
1373
+ returnType: mat4x4f
1374
+ },
1375
+ codegenImpl: (ctx, [matrix, vector]) => stitch`(${gpuTranslation4(ctx, [vector])} * ${matrix})`
1376
+ });
1377
+ /**
1378
+ * Scales the given 4-by-4 matrix in each dimension by an amount given by the corresponding entry in the given vector.
1379
+ * @param {m4x4f} matrix - The matrix to be modified.
1380
+ * @param {v3f} vector - A vector of three entries specifying the factor by which to scale in each dimension.
1381
+ * @returns {m4x4f} The scaled matrix.
1382
+ */
1383
+ const scale4 = dualImpl({
1384
+ name: "scale4",
1385
+ normalImpl: (matrix, vector) => mul(scaling4(vector), matrix),
1386
+ signature: {
1387
+ argTypes: [mat4x4f, vec3f],
1388
+ returnType: mat4x4f
1389
+ },
1390
+ codegenImpl: (ctx, [matrix, vector]) => stitch`(${gpuScaling4(ctx, [vector])} * ${matrix})`
1391
+ });
1392
+ const rotateSignature = {
1393
+ argTypes: [mat4x4f, f32],
1394
+ returnType: mat4x4f
1395
+ };
1396
+ /**
1397
+ * Rotates the given 4-by-4 matrix around the x-axis by the given angle.
1398
+ * @param {m4x4f} matrix - The matrix to be modified.
1399
+ * @param {number} angle - The angle by which to rotate (in radians).
1400
+ * @returns {m4x4f} The rotated matrix.
1401
+ */
1402
+ const rotateX4 = dualImpl({
1403
+ name: "rotateX4",
1404
+ normalImpl: (matrix, angle) => mul(rotationX4(angle), matrix),
1405
+ signature: rotateSignature,
1406
+ codegenImpl: (ctx, [matrix, angle]) => stitch`(${gpuRotationX4(ctx, [angle])} * ${matrix})`
1407
+ });
1408
+ /**
1409
+ * Rotates the given 4-by-4 matrix around the y-axis by the given angle.
1410
+ * @param {m4x4f} matrix - The matrix to be modified.
1411
+ * @param {number} angle - The angle by which to rotate (in radians).
1412
+ * @returns {m4x4f} The rotated matrix.
1413
+ */
1414
+ const rotateY4 = dualImpl({
1415
+ name: "rotateY4",
1416
+ normalImpl: (matrix, angle) => mul(rotationY4(angle), matrix),
1417
+ signature: rotateSignature,
1418
+ codegenImpl: (ctx, [matrix, angle]) => stitch`(${gpuRotationY4(ctx, [angle])} * ${matrix})`
1419
+ });
1420
+ /**
1421
+ * Rotates the given 4-by-4 matrix around the z-axis by the given angle.
1422
+ * @param {m4x4f} matrix - The matrix to be modified.
1423
+ * @param {number} angle - The angle by which to rotate (in radians).
1424
+ * @returns {m4x4f} The rotated matrix.
1425
+ */
1426
+ const rotateZ4 = dualImpl({
1427
+ name: "rotateZ4",
1428
+ normalImpl: (matrix, angle) => mul(rotationZ4(angle), matrix),
1429
+ signature: rotateSignature,
1430
+ codegenImpl: (ctx, [matrix, angle]) => stitch`(${gpuRotationZ4(ctx, [angle])} * ${matrix})`
1431
+ });
1432
+
1433
+ //#endregion
1434
+ //#region src/std/atomic.ts
1435
+ const workgroupBarrier = dualImpl({
1436
+ name: "workgroupBarrier",
1437
+ normalImpl: "workgroupBarrier is a no-op outside of CODEGEN mode.",
1438
+ signature: {
1439
+ argTypes: [],
1440
+ returnType: Void
1441
+ },
1442
+ codegenImpl: () => "workgroupBarrier()"
1443
+ });
1444
+ const storageBarrier = dualImpl({
1445
+ name: "storageBarrier",
1446
+ normalImpl: "storageBarrier is a no-op outside of CODEGEN mode.",
1447
+ signature: {
1448
+ argTypes: [],
1449
+ returnType: Void
1450
+ },
1451
+ codegenImpl: () => "storageBarrier()"
1452
+ });
1453
+ const textureBarrier = dualImpl({
1454
+ name: "textureBarrier",
1455
+ normalImpl: "textureBarrier is a no-op outside of CODEGEN mode.",
1456
+ signature: {
1457
+ argTypes: [],
1458
+ returnType: Void
1459
+ },
1460
+ codegenImpl: () => "textureBarrier()"
1461
+ });
1462
+ const atomicNormalError = "Atomic operations are not supported outside of CODEGEN mode.";
1463
+ const atomicLoad = dualImpl({
1464
+ name: "atomicLoad",
1465
+ normalImpl: atomicNormalError,
1466
+ signature: (a) => {
1467
+ if (!isAtomic(a)) throw new Error(`Invalid atomic type: ${safeStringify(a)}`);
1468
+ return {
1469
+ argTypes: [a],
1470
+ returnType: a.inner
1471
+ };
1472
+ },
1473
+ codegenImpl: (_ctx, [a]) => stitch`atomicLoad(&${a})`
1474
+ });
1475
+ const atomicActionSignature = (a) => {
1476
+ if (!isAtomic(a)) throw new Error(`Invalid atomic type: ${safeStringify(a)}`);
1477
+ return {
1478
+ argTypes: [a, a.inner.type === "u32" ? u32 : i32],
1479
+ returnType: Void
1480
+ };
1481
+ };
1482
+ const atomicOpSignature = (a) => {
1483
+ if (!isAtomic(a)) throw new Error(`Invalid atomic type: ${safeStringify(a)}`);
1484
+ const paramType = a.inner.type === "u32" ? u32 : i32;
1485
+ return {
1486
+ argTypes: [a, paramType],
1487
+ returnType: paramType
1488
+ };
1489
+ };
1490
+ const atomicStore = dualImpl({
1491
+ name: "atomicStore",
1492
+ normalImpl: atomicNormalError,
1493
+ signature: atomicActionSignature,
1494
+ codegenImpl: (_ctx, [a, value]) => stitch`atomicStore(&${a}, ${value})`
1495
+ });
1496
+ const atomicAdd = dualImpl({
1497
+ name: "atomicAdd",
1498
+ normalImpl: atomicNormalError,
1499
+ signature: atomicOpSignature,
1500
+ codegenImpl: (_ctx, [a, value]) => stitch`atomicAdd(&${a}, ${value})`
1501
+ });
1502
+ const atomicSub = dualImpl({
1503
+ name: "atomicSub",
1504
+ normalImpl: atomicNormalError,
1505
+ signature: atomicOpSignature,
1506
+ codegenImpl: (_ctx, [a, value]) => stitch`atomicSub(&${a}, ${value})`
1507
+ });
1508
+ const atomicMax = dualImpl({
1509
+ name: "atomicMax",
1510
+ normalImpl: atomicNormalError,
1511
+ signature: atomicOpSignature,
1512
+ codegenImpl: (_ctx, [a, value]) => stitch`atomicMax(&${a}, ${value})`
1513
+ });
1514
+ const atomicMin = dualImpl({
1515
+ name: "atomicMin",
1516
+ normalImpl: atomicNormalError,
1517
+ signature: atomicOpSignature,
1518
+ codegenImpl: (_ctx, [a, value]) => stitch`atomicMin(&${a}, ${value})`
1519
+ });
1520
+ const atomicAnd = dualImpl({
1521
+ name: "atomicAnd",
1522
+ normalImpl: atomicNormalError,
1523
+ signature: atomicOpSignature,
1524
+ codegenImpl: (_ctx, [a, value]) => stitch`atomicAnd(&${a}, ${value})`
1525
+ });
1526
+ const atomicOr = dualImpl({
1527
+ name: "atomicOr",
1528
+ normalImpl: atomicNormalError,
1529
+ signature: atomicOpSignature,
1530
+ codegenImpl: (_ctx, [a, value]) => stitch`atomicOr(&${a}, ${value})`
1531
+ });
1532
+ const atomicXor = dualImpl({
1533
+ name: "atomicXor",
1534
+ normalImpl: atomicNormalError,
1535
+ signature: atomicOpSignature,
1536
+ codegenImpl: (_ctx, [a, value]) => stitch`atomicXor(&${a}, ${value})`
1537
+ });
1538
+
1539
+ //#endregion
1540
+ //#region src/std/derivative.ts
1541
+ const derivativeNormalError = "Derivative builtins are not allowed on the CPU";
1542
+ const dpdx = dualImpl({
1543
+ name: "dpdx",
1544
+ normalImpl: derivativeNormalError,
1545
+ signature: (value) => ({
1546
+ argTypes: [value],
1547
+ returnType: value
1548
+ }),
1549
+ codegenImpl: (_ctx, [value]) => stitch`dpdx(${value})`
1550
+ });
1551
+ const dpdxCoarse = dualImpl({
1552
+ name: "dpdxCoarse",
1553
+ normalImpl: derivativeNormalError,
1554
+ signature: (value) => ({
1555
+ argTypes: [value],
1556
+ returnType: value
1557
+ }),
1558
+ codegenImpl: (_ctx, [value]) => stitch`dpdxCoarse(${value})`
1559
+ });
1560
+ const dpdxFine = dualImpl({
1561
+ name: "dpdxFine",
1562
+ normalImpl: derivativeNormalError,
1563
+ signature: (value) => ({
1564
+ argTypes: [value],
1565
+ returnType: value
1566
+ }),
1567
+ codegenImpl: (_ctx, [value]) => stitch`dpdxFine(${value})`
1568
+ });
1569
+ const dpdy = dualImpl({
1570
+ name: "dpdy",
1571
+ normalImpl: derivativeNormalError,
1572
+ signature: (value) => ({
1573
+ argTypes: [value],
1574
+ returnType: value
1575
+ }),
1576
+ codegenImpl: (_ctx, [value]) => stitch`dpdy(${value})`
1577
+ });
1578
+ const dpdyCoarse = dualImpl({
1579
+ name: "dpdyCoarse",
1580
+ normalImpl: derivativeNormalError,
1581
+ signature: (value) => ({
1582
+ argTypes: [value],
1583
+ returnType: value
1584
+ }),
1585
+ codegenImpl: (_ctx, [value]) => stitch`dpdyCoarse(${value})`
1586
+ });
1587
+ const dpdyFine = dualImpl({
1588
+ name: "dpdyFine",
1589
+ normalImpl: derivativeNormalError,
1590
+ signature: (value) => ({
1591
+ argTypes: [value],
1592
+ returnType: value
1593
+ }),
1594
+ codegenImpl: (_ctx, [value]) => stitch`dpdyFine(${value})`
1595
+ });
1596
+ const fwidth = dualImpl({
1597
+ name: "fwidth",
1598
+ normalImpl: derivativeNormalError,
1599
+ signature: (value) => ({
1600
+ argTypes: [value],
1601
+ returnType: value
1602
+ }),
1603
+ codegenImpl: (_ctx, [value]) => stitch`fwidth(${value})`
1604
+ });
1605
+ const fwidthCoarse = dualImpl({
1606
+ name: "fwidthCoarse",
1607
+ normalImpl: derivativeNormalError,
1608
+ signature: (value) => ({
1609
+ argTypes: [value],
1610
+ returnType: value
1611
+ }),
1612
+ codegenImpl: (_ctx, [value]) => stitch`fwidthCoarse(${value})`
1613
+ });
1614
+ const fwidthFine = dualImpl({
1615
+ name: "fwidthFine",
1616
+ normalImpl: derivativeNormalError,
1617
+ signature: (value) => ({
1618
+ argTypes: [value],
1619
+ returnType: value
1620
+ }),
1621
+ codegenImpl: (_ctx, [value]) => stitch`fwidthFine(${value})`
1622
+ });
1623
+
1624
+ //#endregion
1625
+ //#region src/std/texture.ts
1626
+ function sampleCpu(_texture, _sampler, _coords, _offsetOrArrayIndex, _maybeOffset) {
1627
+ throw new MissingCpuImplError("Texture sampling relies on GPU resources and cannot be executed outside of a draw call");
1628
+ }
1629
+ const textureSample = dualImpl({
1630
+ name: "textureSample",
1631
+ normalImpl: sampleCpu,
1632
+ codegenImpl: (_ctx, args) => stitch`textureSample(${args})`,
1633
+ signature: (...args) => {
1634
+ return {
1635
+ argTypes: args,
1636
+ returnType: args[0].type.startsWith("texture_depth") ? f32 : vec4f
1637
+ };
1638
+ }
1639
+ });
1640
+ function sampleBiasCpu(_texture, _sampler, _coords, _biasOrArrayIndex, _biasOrOffset, _maybeOffset) {
1641
+ throw new MissingCpuImplError("Texture sampling with bias relies on GPU resources and cannot be executed outside of a draw call");
1642
+ }
1643
+ const textureSampleBias = dualImpl({
1644
+ name: "textureSampleBias",
1645
+ normalImpl: sampleBiasCpu,
1646
+ codegenImpl: (_ctx, args) => stitch`textureSampleBias(${args})`,
1647
+ signature: (...args) => ({
1648
+ argTypes: args,
1649
+ returnType: vec4f
1650
+ })
1651
+ });
1652
+ function sampleLevelCpu(_texture, _sampler, _coords, _level, _offsetOrArrayIndex, _maybeOffset) {
1653
+ throw new MissingCpuImplError("Texture sampling relies on GPU resources and cannot be executed outside of a draw call");
1654
+ }
1655
+ const textureSampleLevel = dualImpl({
1656
+ name: "textureSampleLevel",
1657
+ normalImpl: sampleLevelCpu,
1658
+ codegenImpl: (_ctx, args) => stitch`textureSampleLevel(${args})`,
1659
+ signature: (...args) => {
1660
+ return {
1661
+ argTypes: args,
1662
+ returnType: args[0].type.startsWith("texture_depth") ? f32 : vec4f
1663
+ };
1664
+ }
1665
+ });
1666
+ function textureLoadCpu(_texture, _coords, _levelOrArrayIndex) {
1667
+ throw new MissingCpuImplError("`textureLoad` relies on GPU resources and cannot be executed outside of a draw call");
1668
+ }
1669
+ const textureLoad = dualImpl({
1670
+ name: "textureLoad",
1671
+ normalImpl: textureLoadCpu,
1672
+ codegenImpl: (_ctx, args) => stitch`textureLoad(${args})`,
1673
+ signature: (...args) => {
1674
+ const texture = args[0];
1675
+ if (isWgslTexture(texture)) {
1676
+ const isDepth = texture.type.startsWith("texture_depth");
1677
+ const sampleType = texture.sampleType;
1678
+ return {
1679
+ argTypes: args,
1680
+ returnType: isDepth ? f32 : sampleType.type === "f32" ? vec4f : sampleType.type === "u32" ? vec4u : vec4i
1681
+ };
1682
+ }
1683
+ const format = texture.format;
1684
+ return {
1685
+ argTypes: args,
1686
+ returnType: getTextureFormatInfo(format).vectorType
1687
+ };
1688
+ }
1689
+ });
1690
+ function textureStoreCpu(_texture, _coords, _arrayIndexOrValue, _maybeValue) {
1691
+ throw new MissingCpuImplError("`textureStore` relies on GPU resources and cannot be executed outside of a draw call");
1692
+ }
1693
+ const textureStore = dualImpl({
1694
+ name: "textureStore",
1695
+ normalImpl: textureStoreCpu,
1696
+ codegenImpl: (_ctx, args) => stitch`textureStore(${args})`,
1697
+ signature: (...args) => ({
1698
+ argTypes: args,
1699
+ returnType: Void
1700
+ })
1701
+ });
1702
+ function textureDimensionsCpu(_texture, _level) {
1703
+ throw new MissingCpuImplError("`textureDimensions` relies on GPU resources and cannot be executed outside of a draw call");
1704
+ }
1705
+ const textureDimensions = dualImpl({
1706
+ name: "textureDimensions",
1707
+ normalImpl: textureDimensionsCpu,
1708
+ codegenImpl: (_ctx, args) => stitch`textureDimensions(${args})`,
1709
+ signature: (...args) => {
1710
+ const dim = args[0].dimension;
1711
+ if (dim === "1d") return {
1712
+ argTypes: args,
1713
+ returnType: u32
1714
+ };
1715
+ if (dim === "3d") return {
1716
+ argTypes: args,
1717
+ returnType: vec3u
1718
+ };
1719
+ return {
1720
+ argTypes: args,
1721
+ returnType: vec2u
1722
+ };
1723
+ }
1724
+ });
1725
+ const textureGatherCpu = (..._args) => {
1726
+ throw new Error("Texture gather relies on GPU resources and cannot be executed outside of a draw call");
1727
+ };
1728
+ const sampleTypeToVecType = {
1729
+ f32: vec4f,
1730
+ i32: vec4i,
1731
+ u32: vec4u
1732
+ };
1733
+ const textureGather = dualImpl({
1734
+ name: "textureGather",
1735
+ normalImpl: textureGatherCpu,
1736
+ codegenImpl: (_ctx, args) => stitch`textureGather(${args})`,
1737
+ signature: (...args) => {
1738
+ if (args[0].type.startsWith("texture")) {
1739
+ const [texture, sampler, coords, _, ...rest] = args;
1740
+ return {
1741
+ argTypes: texture.type === "texture_depth_2d_array" || texture.type === "texture_depth_cube_array" ? [
1742
+ texture,
1743
+ sampler,
1744
+ coords,
1745
+ [u32, i32],
1746
+ ...rest
1747
+ ] : args,
1748
+ returnType: vec4f
1749
+ };
1750
+ }
1751
+ const [_, texture, sampler, coords, ...rest] = args;
1752
+ return {
1753
+ argTypes: texture.type === "texture_2d_array" || texture.type === "texture_cube_array" ? [
1754
+ [u32, i32],
1755
+ texture,
1756
+ sampler,
1757
+ coords,
1758
+ [u32, i32],
1759
+ ...rest
1760
+ ] : [
1761
+ [u32, i32],
1762
+ texture,
1763
+ sampler,
1764
+ coords,
1765
+ ...rest
1766
+ ],
1767
+ returnType: sampleTypeToVecType[texture.sampleType.type]
1768
+ };
1769
+ }
1770
+ });
1771
+ function textureSampleCompareCpu(_texture, _sampler, _coords, _depthRefOrArrayIndex, _depthRefOrOffset, _maybeOffset) {
1772
+ throw new MissingCpuImplError("Texture comparison sampling relies on GPU resources and cannot be executed outside of a draw call");
1773
+ }
1774
+ const textureSampleCompare = dualImpl({
1775
+ name: "textureSampleCompare",
1776
+ normalImpl: textureSampleCompareCpu,
1777
+ codegenImpl: (_ctx, args) => stitch`textureSampleCompare(${args})`,
1778
+ signature: (...args) => ({
1779
+ argTypes: args,
1780
+ returnType: f32
1781
+ })
1782
+ });
1783
+ function textureSampleCompareLevelCpu(_texture, _sampler, _coords, _depthRefOrArrayIndex, _depthRefOrOffset, _maybeOffset) {
1784
+ throw new MissingCpuImplError("Texture comparison sampling with level relies on GPU resources and cannot be executed outside of a draw call");
1785
+ }
1786
+ const textureSampleCompareLevel = dualImpl({
1787
+ name: "textureSampleCompareLevel",
1788
+ normalImpl: textureSampleCompareLevelCpu,
1789
+ codegenImpl: (ctx, args) => stitch`textureSampleCompareLevel(${args})`,
1790
+ signature: (...args) => ({
1791
+ argTypes: args,
1792
+ returnType: f32
1793
+ })
1794
+ });
1795
+ function textureSampleBaseClampToEdgeCpu(_texture, _sampler, _coords) {
1796
+ throw new MissingCpuImplError("Texture sampling with base clamp to edge is not supported outside of GPU mode.");
1797
+ }
1798
+ const textureSampleBaseClampToEdge = dualImpl({
1799
+ name: "textureSampleBaseClampToEdge",
1800
+ normalImpl: textureSampleBaseClampToEdgeCpu,
1801
+ codegenImpl: (_ctx, args) => stitch`textureSampleBaseClampToEdge(${args})`,
1802
+ signature: (...args) => ({
1803
+ argTypes: args,
1804
+ returnType: vec4f
1805
+ })
1806
+ });
1807
+
1808
+ //#endregion
1809
+ //#region src/std/subgroup.ts
1810
+ const errorMessage = "Subgroup operations can only be used in the GPU context.";
1811
+ const subgroupAdd = dualImpl({
1812
+ name: "subgroupAdd",
1813
+ signature: (arg) => ({
1814
+ argTypes: [arg],
1815
+ returnType: arg
1816
+ }),
1817
+ normalImpl: errorMessage,
1818
+ codegenImpl: (_ctx, [arg]) => stitch`subgroupAdd(${arg})`
1819
+ });
1820
+ const subgroupExclusiveAdd = dualImpl({
1821
+ name: "subgroupExclusiveAdd",
1822
+ signature: (arg) => ({
1823
+ argTypes: [arg],
1824
+ returnType: arg
1825
+ }),
1826
+ normalImpl: errorMessage,
1827
+ codegenImpl: (_ctx, [arg]) => stitch`subgroupExclusiveAdd(${arg})`
1828
+ });
1829
+ const subgroupInclusiveAdd = dualImpl({
1830
+ name: "subgroupInclusiveAdd",
1831
+ signature: (arg) => ({
1832
+ argTypes: [arg],
1833
+ returnType: arg
1834
+ }),
1835
+ normalImpl: errorMessage,
1836
+ codegenImpl: (_ctx, [arg]) => stitch`subgroupInclusiveAdd(${arg})`
1837
+ });
1838
+ const subgroupAll = dualImpl({
1839
+ name: "subgroupAll",
1840
+ signature: {
1841
+ argTypes: [bool],
1842
+ returnType: bool
1843
+ },
1844
+ normalImpl: errorMessage,
1845
+ codegenImpl: (_ctx, [e]) => stitch`subgroupAll(${e})`
1846
+ });
1847
+ const subgroupAnd = dualImpl({
1848
+ name: "subgroupAnd",
1849
+ signature: (arg) => ({
1850
+ argTypes: [arg],
1851
+ returnType: arg
1852
+ }),
1853
+ normalImpl: errorMessage,
1854
+ codegenImpl: (_ctx, [e]) => stitch`subgroupAnd(${e})`
1855
+ });
1856
+ const subgroupAny = dualImpl({
1857
+ name: "subgroupAny",
1858
+ signature: {
1859
+ argTypes: [bool],
1860
+ returnType: bool
1861
+ },
1862
+ normalImpl: errorMessage,
1863
+ codegenImpl: (_ctx, [e]) => stitch`subgroupAny(${e})`
1864
+ });
1865
+ const subgroupBallot = dualImpl({
1866
+ name: "subgroupBallot",
1867
+ signature: {
1868
+ argTypes: [bool],
1869
+ returnType: vec4u
1870
+ },
1871
+ normalImpl: errorMessage,
1872
+ codegenImpl: (_ctx, [e]) => stitch`subgroupBallot(${e})`
1873
+ });
1874
+ const subgroupBroadcast = dualImpl({
1875
+ name: "subgroupBroadcast",
1876
+ signature: (...args) => {
1877
+ const id = unify([args[1]], [i32, u32]);
1878
+ if (!id) throw new Error(`subgroupBroadcast's second argument has to be compatible with i32 or u32. Got: ${args[1].type}`);
1879
+ return {
1880
+ argTypes: [args[0], id[0]],
1881
+ returnType: args[0]
1882
+ };
1883
+ },
1884
+ normalImpl: errorMessage,
1885
+ codegenImpl: (_ctx, [e, index]) => stitch`subgroupBroadcast(${e}, ${index})`
1886
+ });
1887
+ const subgroupBroadcastFirst = dualImpl({
1888
+ name: "subgroupBroadcastFirst",
1889
+ signature: (arg) => ({
1890
+ argTypes: [arg],
1891
+ returnType: arg
1892
+ }),
1893
+ normalImpl: errorMessage,
1894
+ codegenImpl: (_ctx, [e]) => stitch`subgroupBroadcastFirst(${e})`
1895
+ });
1896
+ const subgroupElect = dualImpl({
1897
+ name: "subgroupElect",
1898
+ signature: {
1899
+ argTypes: [],
1900
+ returnType: bool
1901
+ },
1902
+ normalImpl: errorMessage,
1903
+ codegenImpl: () => stitch`subgroupElect()`
1904
+ });
1905
+ const subgroupMax = dualImpl({
1906
+ name: "subgroupMax",
1907
+ signature: (arg) => ({
1908
+ argTypes: [arg],
1909
+ returnType: arg
1910
+ }),
1911
+ normalImpl: errorMessage,
1912
+ codegenImpl: (_ctx, [arg]) => stitch`subgroupMax(${arg})`
1913
+ });
1914
+ const subgroupMin = dualImpl({
1915
+ name: "subgroupMin",
1916
+ signature: (arg) => ({
1917
+ argTypes: [arg],
1918
+ returnType: arg
1919
+ }),
1920
+ normalImpl: errorMessage,
1921
+ codegenImpl: (_ctx, [arg]) => stitch`subgroupMin(${arg})`
1922
+ });
1923
+ const subgroupMul = dualImpl({
1924
+ name: "subgroupMul",
1925
+ signature: (arg) => ({
1926
+ argTypes: [arg],
1927
+ returnType: arg
1928
+ }),
1929
+ normalImpl: errorMessage,
1930
+ codegenImpl: (_ctx, [arg]) => stitch`subgroupMul(${arg})`
1931
+ });
1932
+ const subgroupExclusiveMul = dualImpl({
1933
+ name: "subgroupExclusiveMul",
1934
+ signature: (arg) => ({
1935
+ argTypes: [arg],
1936
+ returnType: arg
1937
+ }),
1938
+ normalImpl: errorMessage,
1939
+ codegenImpl: (_ctx, [arg]) => stitch`subgroupExclusiveMul(${arg})`
1940
+ });
1941
+ const subgroupInclusiveMul = dualImpl({
1942
+ name: "subgroupInclusiveMul",
1943
+ signature: (arg) => ({
1944
+ argTypes: [arg],
1945
+ returnType: arg
1946
+ }),
1947
+ normalImpl: errorMessage,
1948
+ codegenImpl: (_ctx, [arg]) => stitch`subgroupInclusiveMul(${arg})`
1949
+ });
1950
+ const subgroupOr = dualImpl({
1951
+ name: "subgroupOr",
1952
+ signature: (arg) => ({
1953
+ argTypes: [arg],
1954
+ returnType: arg
1955
+ }),
1956
+ normalImpl: errorMessage,
1957
+ codegenImpl: (_ctx, [e]) => stitch`subgroupOr(${e})`
1958
+ });
1959
+ const subgroupShuffle = dualImpl({
1960
+ name: "subgroupShuffle",
1961
+ signature: (...args) => {
1962
+ const id = unify([args[1]], [i32, u32]);
1963
+ if (!id) throw new Error(`subgroupShuffle's second argument has to be compatible with i32 or u32. Got: ${args[1].type}`);
1964
+ return {
1965
+ argTypes: [args[0], id[0]],
1966
+ returnType: args[0]
1967
+ };
1968
+ },
1969
+ normalImpl: errorMessage,
1970
+ codegenImpl: (_ctx, [e, index]) => stitch`subgroupShuffle(${e}, ${index})`
1971
+ });
1972
+ const subgroupShuffleDown = dualImpl({
1973
+ name: "subgroupShuffleDown",
1974
+ signature: (...args) => {
1975
+ const delta = unify([args[1]], [u32]);
1976
+ if (!delta) throw new Error(`subgroupShuffleDown's second argument has to be compatible with u32. Got: ${args[1].type}`);
1977
+ return {
1978
+ argTypes: [args[0], delta[0]],
1979
+ returnType: args[0]
1980
+ };
1981
+ },
1982
+ normalImpl: errorMessage,
1983
+ codegenImpl: (_ctx, [e, delta]) => stitch`subgroupShuffleDown(${e}, ${delta})`
1984
+ });
1985
+ const subgroupShuffleUp = dualImpl({
1986
+ name: "subgroupShuffleUp",
1987
+ signature: (...args) => {
1988
+ const delta = unify([args[1]], [u32]);
1989
+ if (!delta) throw new Error(`subgroupShuffleUp's second argument has to be compatible with u32. Got: ${args[1].type}`);
1990
+ return {
1991
+ argTypes: [args[0], delta[0]],
1992
+ returnType: args[0]
1993
+ };
1994
+ },
1995
+ normalImpl: errorMessage,
1996
+ codegenImpl: (_ctx, [e, delta]) => stitch`subgroupShuffleUp(${e}, ${delta})`
1997
+ });
1998
+ const subgroupShuffleXor = dualImpl({
1999
+ name: "subgroupShuffleXor",
2000
+ signature: (...args) => {
2001
+ const mask = unify([args[1]], [u32]);
2002
+ if (!mask) throw new Error(`subgroupShuffleXor's second argument has to be compatible with u32. Got: ${args[1].type}`);
2003
+ return {
2004
+ argTypes: [args[0], mask[0]],
2005
+ returnType: args[0]
2006
+ };
2007
+ },
2008
+ normalImpl: errorMessage,
2009
+ codegenImpl: (_ctx, [e, mask]) => stitch`subgroupShuffleXor(${e}, ${mask})`
2010
+ });
2011
+ const subgroupXor = dualImpl({
2012
+ name: "subgroupXor",
2013
+ signature: (arg) => ({
2014
+ argTypes: [arg],
2015
+ returnType: arg
2016
+ }),
2017
+ normalImpl: errorMessage,
2018
+ codegenImpl: (_ctx, [e]) => stitch`subgroupXor(${e})`
2019
+ });
2020
+
2021
+ //#endregion
2022
+ //#region src/std/extensions.ts
2023
+ const extensionEnabled = comptime((extensionName) => {
2024
+ const resolutionCtx = getResolutionCtx();
2025
+ if (!resolutionCtx) throw new Error("Functions using `extensionEnabled` cannot be called directly. Either generate WGSL from them, or use tgpu['~unstable'].simulate(...)");
2026
+ if (typeof extensionName !== "string" || !wgslExtensions.includes(extensionName)) throw new Error(`extensionEnabled has to be called with a string literal representing a valid WGSL extension name. Got: '${extensionName}'`);
2027
+ return (resolutionCtx.enableExtensions ?? []).includes(extensionName);
2028
+ });
2029
+
2030
+ //#endregion
2031
+ export { workgroupBarrier as $, extractBits as $t, textureSampleCompareLevel as A, sqrt as An, acos as At, fwidthCoarse as B, cosh as Bt, textureDimensions as C, reverseBits as Cn, pack4x8unorm as Ct, textureSampleBaseClampToEdge as D, sin as Dn, bitcastU32toI32 as Dt, textureSample as E, sign as En, bitcastU32toF32 as Et, dpdxFine as F, trunc as Fn, atan2 as Ft, atomicMax as G, degrees as Gt, atomicAdd as H, countOneBits as Ht, dpdy as I, getEffectiveSampleTypes as In, atanh as It, atomicStore as J, dot as Jt, atomicMin as K, determinant as Kt, dpdyCoarse as L, getTextureFormatInfo as Ln, ceil as Lt, textureStore as M, tan as Mn, asin as Mt, dpdx as N, tanh as Nn, asinh as Nt, textureSampleBias as O, sinh as On, arrayLength as Ot, dpdxCoarse as P, transpose as Pn, atan as Pt, textureBarrier as Q, exp2 as Qt, dpdyFine as R, clamp as Rt, subgroupXor as S, refract as Sn, pack2x16float as St, textureLoad as T, saturate as Tn, unpack4x8unorm as Tt, atomicAnd as U, countTrailingZeros as Ut, fwidthFine as V, countLeadingZeros as Vt, atomicLoad as W, cross as Wt, atomicXor as X, dot4U8Packed as Xt, atomicSub as Y, dot4I8Packed as Yt, storageBarrier as Z, exp as Zt, subgroupOr as _, normalize as _n, not as _t, subgroupAny as a, fract as an, discard as at, subgroupShuffleUp as b, radians as bn, wgslExtensionToFeatureName as bt, subgroupBroadcastFirst as c, inverseSqrt as cn, and as ct, subgroupExclusiveMul as d, log as dn, ge as dt, faceForward as en, rotateX4 as et, subgroupInclusiveAdd as f, log2 as fn, gt as ft, subgroupMul as g, modf as gn, ne as gt, subgroupMin as h, mix as hn, lt as ht, subgroupAnd as i, fma as in, translate4 as it, textureSampleLevel as j, step as jn, acosh as jt, textureSampleCompare as k, smoothstep as kn, abs as kt, subgroupElect as l, ldexp as ln, any as lt, subgroupMax as m, min as mn, le as mt, subgroupAdd as n, firstTrailingBit as nn, rotateZ4 as nt, subgroupBallot as o, frexp as on, all as ot, subgroupInclusiveMul as p, max as pn, isCloseTo as pt, atomicOr as q, distance as qt, subgroupAll as r, floor as rn, scale4 as rt, subgroupBroadcast as s, insertBits as sn, allEq as st, extensionEnabled as t, firstLeadingBit as tn, rotateY4 as tt, subgroupExclusiveAdd as u, length as un, eq as ut, subgroupShuffle as v, pow as vn, or as vt, textureGather as w, round as wn, unpack2x16float as wt, subgroupShuffleXor as x, reflect as xn, wgslExtensions as xt, subgroupShuffleDown as y, quantizeToF16 as yn, select as yt, fwidth as z, cos as zt };
2032
+ //# sourceMappingURL=extensions-DIVuAfBM.js.map