webgpu-computed 0.0.15 → 0.0.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +96 -358
  2. package/package.json +1 -1
  3. package/src/index.js +126 -124
package/README.md CHANGED
@@ -1,20 +1,23 @@
1
1
  # webgpu-computed
2
2
 
3
- 🌐 Other language versions:
4
- - [简体中文](https://github.com/xiaguochuqiu/webgpu-computed/blob/main/README.zh.md)
3
+ 🌐 Other languages:
5
4
 
6
- A simplified WebGPU computing library that encapsulates tedious initialization and buffer management, allowing developers to focus on WGSL shader logic.
5
+ * [中文](https://github.com/xiaguochuqiu/webgpu-computed/blob/main/README.zh.md)
6
+
7
+ A simplified WebGPU compute library that wraps verbose initialization and buffer management, allowing developers to focus on WGSL shader logic.
7
8
 
8
9
  ## Features
9
10
 
10
- - 🚀 Simplified WebGPU initialization
11
- - 📦 Automatic buffer management and layout calculation
12
- - 🔧 Support for complex data structures (vectors, matrices)
13
- - ⚡ High-performance GPU computing
14
- - 📚 Built-in common WGSL functions
15
- -Support for Node.js environment
16
- - 🛠️ TypeScript support
17
- - 📖 Detailed English documentation and examples
11
+ * 🚀 Simplified WebGPU initialization
12
+ * 📦 Automatic buffer management and layout calculation
13
+ * 🔧 Supports complex data structures (vectors, matrices)
14
+ * ⚡ High-performance GPU computation
15
+ * 📚 Built-in common WGSL functions
16
+ * ✅ Node.js environment support
17
+ * 🛠️ TypeScript support
18
+ * 📖 Detailed documentation and examples
19
+ * 🔄 Buffer reuse support
20
+ * ⚛️ Atomic operations support (u32)
18
21
 
19
22
  ## Installation
20
23
 
@@ -26,7 +29,7 @@ npm install webgpu-computed
26
29
 
27
30
  ### 1. Initialize WebGPU
28
31
 
29
- Before using any computing features, you need to initialize the WebGPU environment:
32
+ Before using any compute features, you need to initialize the WebGPU environment:
30
33
 
31
34
  ```javascript
32
35
  import { GpuComputed } from 'webgpu-computed';
@@ -34,13 +37,13 @@ import { GpuComputed } from 'webgpu-computed';
34
37
  // Initialize WebGPU
35
38
  await GpuComputed.init();
36
39
 
37
- // After using in Node.js environment, please call:
40
+ // In Node.js, call this after usage:
38
41
  // GpuComputed.destroy()
39
42
  ```
40
43
 
41
- ### 2. Perform Simple Computation
44
+ ### 2. Execute a Simple Computation
42
45
 
43
- Here is a simple vector addition example:
46
+ Below is a simple vector addition example:
44
47
 
45
48
  ```javascript
46
49
  import { GpuComputed } from 'webgpu-computed';
@@ -86,113 +89,99 @@ const code = `
86
89
  output[index].vel = positions[index].vel * 2.0;
87
90
  `;
88
91
 
89
- // Execute computation
90
92
  GpuComputed.computed({
91
93
  code,
92
94
  data,
93
- synchronize: ["output"], // Fields to return
94
- workgroupCount: [1] // Number of workgroups
95
+ synchronize: ["output"],
96
+ workgroupCount: [1]
95
97
  }).then(results => {
96
- console.log(results); // [[1.100000023841858,2.200000047683716,3.299999952316284,0,0.20000000298023224,0.4000000059604645,0.6000000238418579,0,4.400000095367432,5.5,6.599999904632568,0,0.800000011920929,1,1.2000000476837158,0]]
98
+ console.log(results);
97
99
  })
98
100
  ```
99
101
 
100
- ### 4. Manually Create GpuComputed Instance
102
+ ### 4. Using Different Data Types
101
103
 
102
- If you need more fine-grained control, you can directly create a GpuComputed instance:
104
+ #### Unsigned Integer (u32)
103
105
 
104
106
  ```javascript
105
107
  import { GpuComputed } from 'webgpu-computed';
106
108
 
107
- // 1. Define data template
108
- const template = {
109
- inputA: [] as number[],
110
- inputB: [] as number[],
111
- output: [] as number[]
109
+ const data = {
110
+ counters: new Uint32Array([0, 1, 2, 3]),
111
+ output: new Uint32Array(4)
112
112
  };
113
113
 
114
- // 2. Create instance
115
- const gpuComputed = new GpuComputed(template, {
116
- code: `
117
- output[index] = inputA[index] + inputB[index];
118
- `,
119
- workgroupSize: [32, 1, 1] // Optional: custom workgroup size
114
+ const code = `
115
+ output[index] = counters[index] * 2u;
116
+ `;
117
+
118
+ const results = await GpuComputed.computed({
119
+ code,
120
+ data,
121
+ synchronize: ["output"],
122
+ workgroupCount: [1]
120
123
  });
121
124
 
122
- // 3. Initialize pipeline
123
- await gpuComputed.initPipeline();
125
+ console.log(results[0]); // [0, 2, 4, 6]
126
+ ```
127
+
128
+ #### Atomic Operations
129
+
130
+ ```javascript
131
+ import { GpuComputed, AtomicUint32Array } from 'webgpu-computed';
124
132
 
125
- // 4. Prepare data
126
133
  const data = {
127
- inputA: [1.0, 2.0, 3.0, 4.0],
128
- inputB: [0.5, 1.5, 2.5, 3.5],
129
- output: new Array(4).fill(0)
134
+ atomicCounter: new AtomicUint32Array([0]),
135
+ output: new Uint32Array(4)
130
136
  };
131
137
 
132
- // 5. Create bind group
133
- const bindGroup = gpuComputed.createBindGroup(data);
138
+ const code = `
139
+ let old = atomicAdd(&atomicCounter[0], 1u);
140
+ output[index] = old + 1u;
141
+ `;
134
142
 
135
- // 6. Execute computation
136
- const results = await gpuComputed.computed(bindGroup, [1], ['output']);
143
+ const results = await GpuComputed.computed({
144
+ code,
145
+ data,
146
+ synchronize: ["output"],
147
+ workgroupCount: [1]
148
+ });
137
149
 
138
- console.log(results[0]); // [1.5, 3.5, 5.5, 7.5]
150
+ console.log(results[0]); // [1, 2, 3, 4]
139
151
  ```
140
152
 
141
- #### Using Struct Data
153
+ ### 5. Manually Creating a GpuComputed Instance
154
+
155
+ If you need more fine-grained control, you can create a GpuComputed instance directly:
142
156
 
143
157
  ```javascript
144
- // Define struct template
145
- const structTemplate = {
146
- particles: {
147
- layout: [
148
- { name: 'position', type: 'vec3' },
149
- { name: 'velocity', type: 'vec3' },
150
- { name: 'mass', type: 'f32' }
151
- ]
152
- },
153
- output: {
154
- layout: [
155
- { name: 'position', type: 'vec3' },
156
- { name: 'velocity', type: 'vec3' },
157
- { name: 'mass', type: 'f32' }
158
- ]
159
- }
158
+ import { GpuComputed } from 'webgpu-computed';
159
+
160
+ const template = {
161
+ inputA: [] as number[],
162
+ inputB: [] as number[],
163
+ output: [] as number[]
160
164
  };
161
165
 
162
- const gpuComputed = new GpuComputed(structTemplate, {
166
+ const gpuComputed = new GpuComputed(template, {
163
167
  code: `
164
- output[index].position = particles[index].position + particles[index].velocity;
165
- output[index].velocity = particles[index].velocity * 2.0;
166
- output[index].mass = particles[index].mass * 1.5;
167
- `
168
+ output[index] = inputA[index] + inputB[index];
169
+ `,
170
+ workgroupSize: [32, 1, 1]
168
171
  });
169
172
 
170
173
  await gpuComputed.initPipeline();
171
174
 
172
175
  const data = {
173
- particles: [
174
- { position: [1, 2, 3], velocity: [0.1, 0.2, 0.3], mass: 1.0 },
175
- { position: [4, 5, 6], velocity: [0.4, 0.5, 0.6], mass: 2.0 }
176
- ],
177
- output: [
178
- { position: [0, 0, 0], velocity: [0, 0, 0], mass: 0 },
179
- { position: [0, 0, 0], velocity: [0, 0, 0], mass: 0 }
180
- ]
176
+ inputA: [1.0, 2.0, 3.0, 4.0],
177
+ inputB: [0.5, 1.5, 2.5, 3.5],
178
+ output: new Array(4).fill(0)
181
179
  };
182
180
 
183
181
  const bindGroup = gpuComputed.createBindGroup(data);
184
182
  const results = await gpuComputed.computed(bindGroup, [1], ['output']);
185
183
 
186
- console.log(results[0]); // Mapped data
187
- ```
188
-
189
- #### Data Mapping
190
-
191
- When using structs, you can use the `dataMap` method to map results back to the original structure:
192
-
193
- ```javascript
194
- const mappedData = gpuComputed.dataMap(results[0], 'output');
195
- console.log(mappedData); // Returns structured object array
184
+ console.log(results[0]);
196
185
  ```
197
186
 
198
187
  ## API Reference
@@ -205,292 +194,41 @@ console.log(mappedData); // Returns structured object array
205
194
 
206
195
  Initializes the WebGPU environment. Must be called before using other features.
207
196
 
208
- **Returns**: `Promise<void>`
209
-
210
- **Throws**: If the browser does not support WebGPU or fails to obtain adapter/device
211
-
212
197
  ##### `GpuComputed.computed(options)`
213
198
 
214
199
  Executes a GPU computation task.
215
200
 
216
201
  **Parameters**:
217
202
 
218
- - `code` (string): WGSL computation code
219
- - `data` (object): Input/output data object
220
- - `workgroupCount` (array): Number of workgroups [x, y?, z?]
221
- - `workgroupSize` (array, optional): Workgroup size, default [32, 1, 1]
222
- - `globalInvocationIdName` (string, optional): Global invocation ID variable name, default "grid"
223
- - `workgroupIndexName` (string, optional): Workgroup index variable name, default "index"
224
- - `synchronize` (array, optional): Array of buffer names to synchronize back to CPU
225
- - `beforeCodes` (array, optional): WGSL code snippets before the computation function
226
- - `onSuccess` (function, optional): Success callback function
227
-
228
- **Returns**: `Promise<Array<Float32Array>>` - Data from synchronized buffers
229
-
230
- ### Data Types
231
-
232
- Supports the following WGSL types:
233
-
234
- - `f32`: Single-precision float
235
- - `vec2`: 2D vector
236
- - `vec3`: 3D vector
237
- - `vec4`: 4D vector
238
- - `mat3x3`: 3x3 matrix
239
- - `mat4x4`: 4x4 matrix
240
-
241
- ### Built-in WGSL Functions
242
-
243
- The library provides some commonly used WGSL helper functions:
244
-
245
- #### Quaternion Rotation
246
-
247
- ```wgsl
248
- fn quat_rotate(q: vec4<f32>, v: vec3<f32>) -> vec3<f32>
249
- ```
250
-
251
- Usage example:
252
-
253
- ```javascript
254
- import { WGSL_Fun } from 'webgpu-computed';
255
-
256
- await GpuComputed.computed({
257
- code: "",
258
- data: {....},
259
- beforeCodes:[WGSL_Fun.quat_rotate]
260
- })
261
- ```
262
-
263
- #### Point in OBB Detection
264
-
265
- ```wgsl
266
- fn point_in_obb(point: vec3<f32>, center: vec3<f32>, halfSize: vec3<f32>, quat: vec4<f32>) -> bool
267
- ```
268
-
269
- ## Advanced Usage
270
-
271
- ### Custom Workgroup Configuration
272
-
273
- ```javascript
274
- await GpuComputed.computed({
275
- code: '...',
276
- data: {...},
277
- workgroupCount: [4, 4], // 16 workgroups
278
- workgroupSize: [16, 16], // 256 threads per workgroup
279
- });
280
- ```
281
-
282
- ### Synchronizing Data Back to CPU
283
-
284
- ```javascript
285
- const results = await GpuComputed.computed({
286
- code: '...',
287
- data: {...},
288
- synchronize: ['output'], // Specify buffers to synchronize
289
- workgroupCount: [1]
290
- });
291
-
292
- // results contains synchronized data
293
- ```
294
-
295
- ### Callback Function
296
-
297
- ```javascript
298
- await GpuComputed.computed({
299
- code: '...',
300
- data: {...},
301
- workgroupCount: [1],
302
- onSuccess: ({ gpuComputed, group, results }) => {
303
- console.log('Computation completed', results);
304
- }
305
- });
306
- ```
307
-
308
- ## Example Project
309
-
310
- ```js
311
- import { GpuComputed } from "webgpu-computed"
312
- import * as WGSL_Fun from "webgpu-computed"
313
-
314
- // 1. Initialize WebGPU
315
- console.log('Initializing WebGPU...');
316
- await GpuComputed.init();
317
- console.log('WebGPU initialized successfully');
318
-
319
- // 2. Simple array computation example
320
- console.log('\n=== Simple Array Computation ===');
321
- const simpleData = {
322
- inputA: [1.0, 2.0, 3.0, 4.0],
323
- inputB: [0.5, 1.5, 2.5, 3.5],
324
- output: new Array(4).fill(0)
325
- };
326
-
327
- const simpleCode = `
328
- output[index] = inputA[index] + inputB[index];
329
- `;
330
-
331
- const simpleResults = await GpuComputed.computed({
332
- code: simpleCode,
333
- data: simpleData,
334
- workgroupCount: [1],
335
- synchronize: ['output']
336
- });
337
-
338
- console.log('Simple computation result:', simpleResults[0]); // [1.5, 3.5, 5.5, 7.5]
339
-
340
- // 3. Complex data structure example (struct)
341
- console.log('\n=== Complex Data Structure Computation ===');
342
- const complexData = {
343
- particles: [
344
- { position: [1.0, 2.0, 3.0], velocity: [0.1, 0.2, 0.3], mass: 1.0 },
345
- { position: [4.0, 5.0, 6.0], velocity: [0.4, 0.5, 0.6], mass: 2.0 }
346
- ],
347
- output: [
348
- { position: [0, 0, 0], velocity: [0, 0, 0], mass: 0 },
349
- { position: [0, 0, 0], velocity: [0, 0, 0], mass: 0 }
350
- ]
351
- };
352
-
353
- const complexCode = `
354
- output[index].position = particles[index].position + particles[index].velocity;
355
- output[index].velocity = particles[index].velocity * 2.0;
356
- output[index].mass = particles[index].mass * 1.5;
357
- `;
358
-
359
- const complexResults = await GpuComputed.computed({
360
- code: complexCode,
361
- data: complexData,
362
- workgroupCount: [1],
363
- synchronize: ['output']
364
- });
365
-
366
- console.log('Complex computation result:', complexResults[0]);
367
-
368
- // 4. Using built-in WGSL functions example
369
- console.log('\n=== Using Built-in WGSL Functions ===');
370
- const wgslFunData = {
371
- points: [
372
- {
373
- x: 1.0, y: 0.0, z: 0.0
374
- },
375
- {
376
- x: 0.0, y: 1.0, z: 0.0
377
- },
378
- {
379
- x: -1.0, y: 0.0, z: 0.0
380
- }
381
- ],
382
- obbCenter: [0.0, 0.0, 0.0],
383
- obbHalfSize: [2.0, 2.0, 2.0],
384
- obbRotation: [0.0, 0.0, 0.0, 1.0], // Unit quaternion, no rotation
385
- results: new Array(3).fill(0)
386
- };
387
-
388
- const wgslFunCode = `
389
- let point = vec3(points[index].x, points[index].y, points[index].z);
390
- let center = vec3<f32>(obbCenter[0], obbCenter[1], obbCenter[2]);
391
- let halfSize = vec3<f32>(obbHalfSize[0], obbHalfSize[1], obbHalfSize[2]);
392
- let quat = vec4<f32>(obbRotation[0], obbRotation[1], obbRotation[2], obbRotation[3]);
393
-
394
- if (point_in_obb(point, center, halfSize, quat)) {
395
- results[index] = 1.0;
396
- } else {
397
- results[index] = 0.0;
398
- }
399
- `;
400
-
401
- const wgslFunResults = await GpuComputed.computed({
402
- code: wgslFunCode,
403
- data: wgslFunData,
404
- workgroupCount: [1],
405
- beforeCodes: [WGSL_Fun.quat_rotate, WGSL_Fun.point_in_obb, /** Add your own function code */],
406
- synchronize: ['results']
407
- });
408
-
409
- console.log('OBB detection result:', wgslFunResults[0]); // [1, 1, 1] All points are inside the OBB
410
-
411
- // 5. Custom workgroup configuration example
412
- console.log('\n=== Custom Workgroup Configuration ===');
413
- const largeData = {
414
- largeArray: new Array(1024).fill(0).map((_, i) => i * 1.0),
415
- output: new Array(1024).fill(0)
416
- };
417
-
418
- const largeCode = `
419
- output[index] = largeArray[index] * 2.0;
420
- `;
421
-
422
- const largeResults = await GpuComputed.computed({
423
- code: largeCode,
424
- data: largeData,
425
- workgroupCount: [32], // 32 workgroups
426
- workgroupSize: [32, 1, 1], // 32 threads per workgroup, total 1024 threads
427
- synchronize: ['output']
428
- });
429
-
430
- console.log('Large array computation result (first 10):', largeResults[0].slice(0, 10));
431
-
432
- // 6. Using callback function example
433
- console.log('\n=== Using Callback Function ===');
434
- const callbackData = {
435
- values: [10.0, 20.0, 30.0],
436
- squares: new Array(3).fill(0)
437
- };
438
-
439
- const callbackCode = `
440
- squares[index] = values[index] * values[index];
441
- `;
442
-
443
- await GpuComputed.computed({
444
- code: callbackCode,
445
- data: callbackData,
446
- workgroupCount: [1],
447
- synchronize: ['squares'],
448
- onSuccess: ({ gpuComputed, group, results }) => {
449
- console.log('Callback triggered, square computation result:', results[0]); // [100, 400, 900]
450
- }
451
- });
452
-
453
- // 7. Multi-dimensional workgroup example
454
- console.log('\n=== Multi-dimensional Workgroup ===');
455
- const matrixData = {
456
- matrixA: new Array(16).fill(0).map((_, i) => i * 1.0),
457
- matrixB: new Array(16).fill(0).map((_, i) => (i + 1) * 1.0),
458
- result: new Array(16).fill(0)
459
- };
460
-
461
- const matrixCode = `
462
- let x = index % 4u;
463
- let y = index / 4u;
464
- let idx = y * 4u + x;
465
- result[idx] = matrixA[idx] + matrixB[idx];
466
- `;
467
-
468
- const matrixResults = await GpuComputed.computed({
469
- code: matrixCode,
470
- data: matrixData,
471
- workgroupCount: [4, 4], // 4x4 workgroup grid
472
- workgroupSize: [1, 1, 1], // 1 thread per workgroup
473
- synchronize: ['result']
474
- });
475
-
476
- console.log('Matrix computation result:', matrixResults[0]);
477
-
478
- console.log('\nAll feature examples completed!');
479
- ```
203
+ * `code`: WGSL computation code
204
+ * `data`: Input/output data object
205
+ * `workgroupCount`: Workgroup count
206
+ * `workgroupSize`: Workgroup size
207
+ * `globalInvocationIdName`: Global invocation ID variable name
208
+ * `workgroupIndexName`: Workgroup index variable name
209
+ * `synchronize`: Buffers to synchronize back to CPU
210
+ * `beforeCodes`: WGSL code snippets before the main code
211
+ * `onSuccess`: Success callback
212
+
213
+ ## Supported Types
214
+
215
+ * `f32`
216
+ * `u32`
217
+ * `vec2`
218
+ * `vec3`
219
+ * `vec4`
220
+ * `mat3x3`
221
+ * `mat4x4`
480
222
 
481
223
  ## Browser Support
482
224
 
483
- - Chrome 113+
484
- - Edge 113+
485
- - Firefox (partial support)
486
- - Safari (partial support)
225
+ * Chrome 113+
226
+ * Edge 113+
227
+ * Firefox (partial)
228
+ * Safari (partial)
487
229
 
488
230
  Ensure the browser supports the WebGPU API.
489
231
 
490
- ## Contributing
491
-
492
- Welcome to submit Issues and Pull Requests!
493
-
494
232
  ## License
495
233
 
496
- ISC License
234
+ ISC License
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "webgpu-computed",
3
- "version": "0.0.15",
3
+ "version": "0.0.16",
4
4
  "description": "对webgpu的封装,处理了繁琐的前置工作,只关注wgsl本身逻辑",
5
5
  "main": "./src/index.js",
6
6
  "scripts": {
package/src/index.js CHANGED
@@ -1,71 +1,71 @@
1
- async function z(s, e = !0) {
1
+ async function z(f, e = !0) {
2
2
  if (typeof global < "u" && typeof require < "u")
3
- return require(s);
3
+ return require(f);
4
4
  {
5
5
  let r = await import(
6
6
  /* @vite-ignore */
7
- s
7
+ f
8
8
  );
9
9
  return e && (r = r.default), r;
10
10
  }
11
11
  }
12
- function q(s) {
13
- if (s instanceof Float32Array) return "f32";
14
- if (s instanceof Int32Array) return "i32";
15
- if (s instanceof Uint32Array) return "u32";
12
+ function q(f) {
13
+ if (f instanceof Float32Array) return "f32";
14
+ if (f instanceof Int32Array) return "i32";
15
+ if (f instanceof Uint32Array) return "u32";
16
16
  throw new Error(
17
- `Unsupported ArrayBufferView type: ${s.constructor.name}`
17
+ `Unsupported ArrayBufferView type: ${f.constructor.name}`
18
18
  );
19
19
  }
20
- function S(s) {
21
- return s && s[0].toUpperCase() + s.slice(1);
20
+ function S(f) {
21
+ return f && f[0].toUpperCase() + f.slice(1);
22
22
  }
23
- function x(s) {
24
- return O.includes(s);
23
+ function x(f) {
24
+ return G.includes(f);
25
25
  }
26
- function m(s) {
27
- return s && Array.isArray(s) && s.length ? s.every((e) => e && typeof e == "object" && "name" in e && "type" in e && x(e.type)) : !1;
26
+ function m(f) {
27
+ return f && Array.isArray(f) && f.length ? f.every((e) => e && typeof e == "object" && "name" in e && "type" in e && x(e.type)) : !1;
28
28
  }
29
- function $(s) {
30
- return s && "layout" in s && m(s.layout);
29
+ function B(f) {
30
+ return f && "layout" in f && m(f.layout);
31
31
  }
32
- function _(s, e) {
32
+ function _(f, e) {
33
33
  const r = E[e];
34
- return (r - s % r) % r;
34
+ return (r - f % r) % r;
35
35
  }
36
- function P(s, e) {
37
- return s + (e - s % e) % e;
36
+ function P(f, e) {
37
+ return f + (e - f % e) % e;
38
38
  }
39
- function U(s) {
40
- const e = Object.keys(s), r = e.map((c) => {
41
- const l = s[c];
39
+ function U(f) {
40
+ const e = Object.keys(f), r = e.map((c) => {
41
+ const l = f[c];
42
42
  if (Array.isArray(l)) {
43
43
  for (const d of ["vec2", "vec3", "vec4", "mat3x3", "mat4x4"])
44
- if (B[d] === l.length) return d;
44
+ if ($[d] === l.length) return d;
45
45
  throw new Error(`${c} 不支持的数组长度 ${l.length}`);
46
46
  }
47
47
  if (typeof l == "number") return "f32";
48
48
  throw new Error(`${c} 不支持的类型`);
49
49
  });
50
50
  if (e.length !== r.length) throw new Error("keys 与 types 长度不一致");
51
- let i = 0;
52
- const f = e.map((c, l) => {
51
+ let s = 0;
52
+ const u = e.map((c, l) => {
53
53
  const d = r[l];
54
- i += _(i, d);
55
- const u = {
54
+ s += _(s, d);
55
+ const a = {
56
56
  name: c,
57
57
  type: d,
58
- offset: i,
59
- size: B[d]
58
+ offset: s,
59
+ size: $[d]
60
60
  };
61
- return i += B[d], u;
61
+ return s += $[d], a;
62
62
  }), t = Math.max(...r.map((c) => E[c]));
63
63
  return {
64
- stride: i + (t - i % t) % t,
65
- layout: f
64
+ stride: s + (t - s % t) % t,
65
+ layout: u
66
66
  };
67
67
  }
68
- const B = {
68
+ const $ = {
69
69
  f32: 1,
70
70
  u32: 1,
71
71
  vec2: 2,
@@ -86,8 +86,8 @@ const B = {
86
86
  };
87
87
  class D extends Uint32Array {
88
88
  }
89
- const O = ["f32", "u32", "vec2", "vec3", "vec4", "mat3x3", "mat4x4"];
90
- let w = null, v = null;
89
+ const G = ["f32", "u32", "vec2", "vec3", "vec4", "mat3x3", "mat4x4"];
90
+ let w = null, A = null;
91
91
  class T {
92
92
  template;
93
93
  option;
@@ -105,9 +105,9 @@ class T {
105
105
  const r = (t) => {
106
106
  let n = 0;
107
107
  t.forEach((c) => {
108
- n += _(n, c.type), c.offset = n, c.size = B[c.type], n += c.size;
108
+ n += _(n, c.type), c.offset = n, c.size = $[c.type], n += c.size;
109
109
  });
110
- }, i = (t) => {
110
+ }, s = (t) => {
111
111
  r(t.layout);
112
112
  const n = t.layout[t.layout.length - 1], c = n.offset + n.size;
113
113
  let l = 1;
@@ -116,15 +116,15 @@ class T {
116
116
  };
117
117
  Object.keys(e).forEach((t) => {
118
118
  const n = e[t];
119
- m(n) ? r(n) : $(n) ? i(n) : Array.isArray(n) && typeof n[0] == "number" && (e[t] = new Float32Array());
119
+ m(n) ? r(n) : B(n) ? s(n) : Array.isArray(n) && typeof n[0] == "number" && (e[t] = new Float32Array());
120
120
  });
121
121
  }
122
122
  /** 获取Gpu设备
123
123
  * @returns
124
124
  */
125
125
  async getDevice() {
126
- if (!w || !v) throw new Error("webgpu未初始化或不可用");
127
- return { adapter: w, device: v };
126
+ if (!w || !A) throw new Error("webgpu未初始化或不可用");
127
+ return { adapter: w, device: A };
128
128
  }
129
129
  /**
130
130
  * 初始化计算管线
@@ -132,8 +132,8 @@ class T {
132
132
  async initPipeline() {
133
133
  if (!this.template) throw new Error("初始化计算管线错误,未找到可用数据模版");
134
134
  await T.init();
135
- const e = this.template, { device: r } = await this.getDevice(), i = [], f = [], t = [];
136
- this.device = r, Object.keys(e).forEach((a, y) => {
135
+ const e = this.template, { device: r } = await this.getDevice(), s = [], u = [], t = [];
136
+ this.device = r, Object.keys(e).forEach((i, y) => {
137
137
  if (t.push({
138
138
  binding: y,
139
139
  // 绑定到组里的0号位插槽
@@ -142,15 +142,15 @@ class T {
142
142
  buffer: {
143
143
  type: "storage"
144
144
  }
145
- }), m(e[a])) {
146
- const g = e[a], h = g.map((b) => `${b.name}:${b.type === "f32" ? "f32" : b.type + "<f32>"}`).join(","), A = `${S(a)}Struct`;
147
- i.push(`struct ${S(a)}Struct {${h}};`), f.push(`@group(0) @binding(${y}) var<storage, read_write> ${a}: ${A};`);
148
- } else if ($(e[a])) {
149
- const g = e[a], h = g.layout.map((b) => `${b.name}:${b.type === "f32" ? "f32" : b.type + "<f32>"}`).join(","), A = `${S(a)}Struct`;
150
- i.push(`struct ${A} {${h}};`), f.push(`@group(0) @binding(${y}) var<storage, read_write> ${a}: array<${A}>;`);
151
- } else if (ArrayBuffer.isView(e[a]) && !(e[a] instanceof DataView)) {
152
- const g = e[a], h = q(g);
153
- g instanceof D ? f.push(`@group(0) @binding(${y}) var<storage, read_write> ${a}: array<atomic<${h}>>;`) : f.push(`@group(0) @binding(${y}) var<storage, read_write> ${a}: array<${h}>;`);
145
+ }), m(e[i])) {
146
+ const g = e[i], h = g.map((b) => `${b.name}:${b.type === "f32" ? "f32" : b.type + "<f32>"}`).join(","), v = `${S(i)}Struct`;
147
+ s.push(`struct ${S(i)}Struct {${h}};`), u.push(`@group(0) @binding(${y}) var<storage, read_write> ${i}: ${v};`);
148
+ } else if (B(e[i])) {
149
+ const g = e[i], h = g.layout.map((b) => `${b.name}:${b.type === "f32" ? "f32" : b.type + "<f32>"}`).join(","), v = `${S(i)}Struct`;
150
+ s.push(`struct ${v} {${h}};`), u.push(`@group(0) @binding(${y}) var<storage, read_write> ${i}: array<${v}>;`);
151
+ } else if (ArrayBuffer.isView(e[i]) && !(e[i] instanceof DataView)) {
152
+ const g = e[i], h = q(g);
153
+ g instanceof D ? u.push(`@group(0) @binding(${y}) var<storage, read_write> ${i}: array<atomic<${h}>>;`) : u.push(`@group(0) @binding(${y}) var<storage, read_write> ${i}: array<${h}>;`);
154
154
  }
155
155
  });
156
156
  const {
@@ -158,18 +158,18 @@ class T {
158
158
  workgroupSize: c = [32, 1, 1],
159
159
  globalInvocationIdName: l = "grid",
160
160
  workgroupIndexName: d = "index",
161
- code: u = ""
161
+ code: a = ""
162
162
  } = this.option ?? {}, o = (
163
163
  /*wgsl*/
164
164
  `
165
- ${i.join("")}
166
- ${f.join("")}
165
+ ${s.join("")}
166
+ ${u.join("")}
167
167
  ${n.join(" ") ?? ""}
168
168
 
169
169
  @compute @workgroup_size(${c.join(",")})
170
170
  fn main(@builtin(global_invocation_id) ${l}: vec3<u32>) {
171
171
  var ${d} = ${l}.x;
172
- ${u}
172
+ ${a}
173
173
  }
174
174
  `
175
175
  );
@@ -196,50 +196,52 @@ ${f.join("")}
196
196
  createBindGroup(e, r) {
197
197
  if (!this.template) throw new Error("创建buffer组错误,未找到可用数据模版");
198
198
  if (!this.device) throw new Error("创建buffer组错误,未找到可用的gpu设备,请确保初始化完计算管线");
199
- const i = this.device, f = this.template, t = [], n = r?.buffers?.reduce((u, o) => (u.set(o.name, o.buffer), u), /* @__PURE__ */ new Map());
200
- function c(u, o, p = 0, a) {
201
- if (!a) {
199
+ const s = this.device, u = this.template, t = [], n = r?.buffers?.reduce((a, o) => (a.set(o.name, o.buffer), a), /* @__PURE__ */ new Map());
200
+ function c(a, o, p = 0, i) {
201
+ if (ArrayBuffer.isView(a) || Array.isArray(a)) return a;
202
+ if (!i) {
202
203
  const y = o[o.length - 1], g = y.offset + y.size;
203
204
  let h = 1;
204
- for (const A of o) h = Math.max(h, E[A.type]);
205
- a = a ?? new Array(P(g, h)).fill(0);
205
+ for (const v of o) h = Math.max(h, E[v.type]);
206
+ i = i ?? new Float32Array(P(g, h)).fill(0);
206
207
  }
207
208
  return o.forEach((y) => {
208
- let g = u[y.name];
209
+ let g = a[y.name];
209
210
  Array.isArray(g) || (g = [g]);
210
211
  for (let h = 0; h < y.size; h++)
211
- a[p + y.offset + h] = Number(g[h] ?? 0);
212
- }), a;
212
+ i[p + y.offset + h] = Number(g[h] ?? 0);
213
+ }), i;
213
214
  }
214
- function l(u, o) {
215
- const p = new Array(o.stride * u.length).fill(0);
216
- return u.forEach((a, y) => {
215
+ function l(a, o) {
216
+ if (ArrayBuffer.isView(a) || typeof a[0] == "number") return a;
217
+ const p = new Float32Array(o.stride * a.length).fill(0);
218
+ return a.forEach((i, y) => {
217
219
  const g = y * o.stride;
218
- c(a, o.layout, g, p);
220
+ c(i, o.layout, g, p);
219
221
  }), p;
220
222
  }
221
- return Object.keys(f).forEach((u) => {
222
- if (!(u in e)) {
223
- if (n && n.has(u))
224
- return t.push({ name: u, buffer: n.get(u) });
225
- throw new Error(`传入的数据中,不存在${u}字段`);
223
+ return Object.keys(u).forEach((a) => {
224
+ if (!(a in e)) {
225
+ if (n && n.has(a))
226
+ return t.push({ name: a, buffer: n.get(a) });
227
+ throw new Error(`传入的数据中,不存在${a}字段`);
226
228
  }
227
- const o = f[u], p = e[u];
228
- let a = [];
229
- m(o) ? a = c(p, o) : $(o) ? a = l(p, o) : (Array.isArray(p) || ArrayBuffer.isView(p)) && (a = p);
229
+ const o = u[a], p = e[a];
230
+ let i = [];
231
+ m(o) ? i = c(p, o) : B(o) ? i = l(p, o) : (Array.isArray(p) || ArrayBuffer.isView(p)) && (i = p);
230
232
  let y = null;
231
- if (o instanceof Float32Array ? y = new Float32Array(a) : o instanceof Uint32Array ? y = new Uint32Array(a) : o instanceof Int32Array ? y = new Int32Array(a) : y = new Float32Array(a), !y) throw new Error("不支持的数组类型" + o);
232
- const g = i.createBuffer({
233
+ if (o instanceof Float32Array ? y = new Float32Array(i) : o instanceof Uint32Array ? y = new Uint32Array(i) : o instanceof Int32Array ? y = new Int32Array(i) : y = new Float32Array(i), !y) throw new Error("不支持的数组类型" + o);
234
+ const g = s.createBuffer({
233
235
  size: y.byteLength,
234
236
  usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE
235
237
  });
236
- i.queue.writeBuffer(g, 0, y), t.push({ name: u, buffer: g });
238
+ s.queue.writeBuffer(g, 0, y), t.push({ name: a, buffer: g });
237
239
  }), {
238
- group: i.createBindGroup({
240
+ group: s.createBindGroup({
239
241
  layout: this.groupLayout,
240
- entries: t.map((u, o) => ({
242
+ entries: t.map((a, o) => ({
241
243
  binding: o,
242
- resource: { buffer: u.buffer }
244
+ resource: { buffer: a.buffer }
243
245
  }))
244
246
  }),
245
247
  buffers: t
@@ -251,23 +253,23 @@ ${f.join("")}
251
253
  */
252
254
  dataMap(e, r) {
253
255
  if (!(r in this.template)) throw new Error("未找到数据字段:" + r);
254
- if ($(this.template[r])) {
255
- const i = this.template[r], f = e.length / i.stride, t = [];
256
- for (let n = 0; n < f; n++) {
257
- const c = n * i.stride, l = {};
258
- i.layout.forEach((d) => {
259
- const u = e.slice(c + d.offset, c + d.offset + d.size);
260
- l[d.name] = u.length === 1 ? u[0] : u;
256
+ if (B(this.template[r])) {
257
+ const s = this.template[r], u = e.length / s.stride, t = [];
258
+ for (let n = 0; n < u; n++) {
259
+ const c = n * s.stride, l = {};
260
+ s.layout.forEach((d) => {
261
+ const a = e.slice(c + d.offset, c + d.offset + d.size);
262
+ l[d.name] = a.length === 1 ? a[0] : a;
261
263
  }), t.push(l);
262
264
  }
263
265
  return t;
264
266
  }
265
267
  if (m(this.template[r])) {
266
- const i = this.template[r], f = {};
267
- return i.forEach((t) => {
268
+ const s = this.template[r], u = {};
269
+ return s.forEach((t) => {
268
270
  const n = e.slice(t.offset, t.offset + t.size);
269
- f[t.name] = n.length === 1 ? n[0] : n;
270
- }), f;
271
+ u[t.name] = n.length === 1 ? n[0] : n;
272
+ }), u;
271
273
  }
272
274
  return e;
273
275
  }
@@ -277,69 +279,69 @@ ${f.join("")}
277
279
  * @param synchronize 需要同步的数据字段
278
280
  * @returns
279
281
  */
280
- async computed(e, r, i = []) {
282
+ async computed(e, r, s = []) {
281
283
  if (!this.pipeline) throw new Error("未找到可用计算管线,请确保计算管线已经创建成功");
282
- const f = this.device, t = this.pipeline, n = f.createCommandEncoder(), c = n.beginComputePass();
284
+ const u = this.device, t = this.pipeline, n = u.createCommandEncoder(), c = n.beginComputePass();
283
285
  c.setPipeline(t), c.setBindGroup(0, e.group), c.dispatchWorkgroups(r[0], r[1], r[2]), c.end();
284
286
  const l = e.buffers?.map((o) => {
285
- if (i?.includes(o.name)) {
286
- const p = f.createBuffer({
287
+ if (s?.includes(o.name)) {
288
+ const p = u.createBuffer({
287
289
  size: o.buffer.size,
288
290
  usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST
289
291
  });
290
292
  return n.copyBufferToBuffer(o.buffer, 0, p, 0, p.size), { buffer: p, name: o.name };
291
293
  }
292
294
  }).filter((o) => !!o);
293
- f.queue.submit([n.finish()]), await f.queue.onSubmittedWorkDone();
295
+ u.queue.submit([n.finish()]), await u.queue.onSubmittedWorkDone();
294
296
  const d = /* @__PURE__ */ new Map();
295
297
  return await Promise.all(
296
298
  l.map(async (o) => {
297
299
  await o.buffer.mapAsync(GPUMapMode.READ);
298
300
  const p = o.buffer.getMappedRange();
299
- let a = null;
300
- this.template[o.name] instanceof Float32Array ? a = new Float32Array(p) : this.template[o.name] instanceof Uint32Array ? a = new Uint32Array(p) : this.template[o.name] instanceof Int32Array ? a = new Int32Array(p) : a = new Float32Array(p);
301
- const y = [...a];
301
+ let i = null;
302
+ this.template[o.name] instanceof Float32Array ? i = new Float32Array(p) : this.template[o.name] instanceof Uint32Array ? i = new Uint32Array(p) : this.template[o.name] instanceof Int32Array ? i = new Int32Array(p) : i = new Float32Array(p);
303
+ const y = [...i];
302
304
  d.set(o.name, y);
303
305
  })
304
- ), i.map((o) => d.get(o));
306
+ ), s.map((o) => d.get(o));
305
307
  }
306
308
  /** 初始化gpu设备
307
309
  * @returns
308
310
  */
309
311
  static async init() {
310
- if (!(w && v)) {
312
+ if (!(w && A)) {
311
313
  if (typeof globalThis < "u" && typeof window > "u") {
312
314
  const { create: e, globals: r } = await z("webgpu", !1);
313
315
  Object.assign(globalThis, r), globalThis.navigator || (globalThis.navigator = {}), Object.assign(globalThis.navigator, { gpu: e([]) });
314
316
  }
315
317
  if (!navigator.gpu) throw new Error("该环境不支持webgpu");
316
318
  if (w || (w = await navigator.gpu.requestAdapter({})), !w) throw new Error("获取适配器失败");
317
- if (v = await w.requestDevice(), !w) throw new Error("获取设备失败");
319
+ if (A = await w.requestDevice(), !w) throw new Error("获取设备失败");
318
320
  }
319
321
  }
320
322
  /** 注销gpu设备
321
323
  */
322
324
  static destroy() {
323
- v && v.destroy(), v = null;
325
+ A && A.destroy(), A = null;
324
326
  }
325
327
  /**
326
328
  * @param data
327
329
  */
328
330
  static buildBufferTypeByData(e) {
329
- return Object.keys(e).reduce((i, f) => {
330
- let t = e[f];
331
+ return Object.keys(e).reduce((s, u) => {
332
+ let t = e[u];
331
333
  if (Array.isArray(t) && typeof t[0] == "number" && (t = new Float32Array()), Array.isArray(t))
332
334
  if (typeof t[0] == "object" || t.length) {
333
335
  const n = U(t[0]);
334
- i[f] = n;
335
- } else console.log(`字段:${f}, 不支持该值对应数据类型或数组为空`);
336
+ s[u] = n;
337
+ } else console.log(`字段:${u}, 不支持该值对应数据类型或数组为空`);
336
338
  else if (ArrayBuffer.isView(t) && !(t instanceof DataView))
337
- i[f] = t;
339
+ s[u] = t;
338
340
  else if (typeof t == "object") {
339
341
  const n = U(t);
340
- i[f] = n.layout;
341
- } else console.log(`字段:${f}, 不支持的数据类型`);
342
- return i;
342
+ s[u] = n.layout;
343
+ } else console.log(`字段:${u}, 不支持的数据类型`);
344
+ return s;
343
345
  }, {});
344
346
  }
345
347
  /** 通过数据创建
@@ -347,8 +349,8 @@ ${f.join("")}
347
349
  * @returns
348
350
  */
349
351
  static async fromByData(e) {
350
- let { data: r, ...i } = e;
351
- const f = this.buildBufferTypeByData(r), t = new T(f, i);
352
+ let { data: r, ...s } = e;
353
+ const u = this.buildBufferTypeByData(r), t = new T(u, s);
352
354
  return await t.initPipeline(), t;
353
355
  }
354
356
  /** 快捷计算方法
@@ -356,12 +358,12 @@ ${f.join("")}
356
358
  * @returns
357
359
  */
358
360
  static async computed(e) {
359
- let { data: r, map: i = !1, workgroupCount: f, synchronize: t, onSuccess: n, ...c } = e;
360
- const l = await this.fromByData({ data: r, ...c }), d = l.createBindGroup(r), u = await l.computed(d, f, t);
361
- return n && n({ gpuComputed: l, group: d, results: u }), i ? u.map((o, p) => l.dataMap(o, t[p])) : u;
361
+ let { data: r, map: s = !1, workgroupCount: u, synchronize: t, onSuccess: n, ...c } = e;
362
+ const l = await this.fromByData({ data: r, ...c }), d = l.createBindGroup(r), a = await l.computed(d, u, t);
363
+ return n && n({ gpuComputed: l, group: d, results: a }), s ? a.map((o, p) => l.dataMap(o, t[p])) : a;
362
364
  }
363
365
  }
364
- const j = (
366
+ const M = (
365
367
  /* wgsl */
366
368
  `
367
369
  fn quat_rotate(q: vec4<f32>, v: vec3<f32>) -> vec3<f32> {
@@ -370,7 +372,7 @@ const j = (
370
372
  return v + q.w * t + cross(q.xyz, t);
371
373
  }
372
374
  `
373
- ), G = (
375
+ ), O = (
374
376
  /* wgsl */
375
377
  `
376
378
  fn point_in_obb(
@@ -390,13 +392,13 @@ const j = (
390
392
  return all(abs(pLocal) <= halfSize);
391
393
  }
392
394
  `
393
- ), M = /* @__PURE__ */ Object.freeze(/* @__PURE__ */ Object.defineProperty({
395
+ ), j = /* @__PURE__ */ Object.freeze(/* @__PURE__ */ Object.defineProperty({
394
396
  __proto__: null,
395
- point_in_obb: G,
396
- quat_rotate: j
397
+ point_in_obb: O,
398
+ quat_rotate: M
397
399
  }, Symbol.toStringTag, { value: "Module" }));
398
400
  export {
399
401
  D as AtomicUint32Array,
400
402
  T as GpuComputed,
401
- M as WGSL_Fun
403
+ j as WGSL_Fun
402
404
  };