@simulatte/webgpu-doe 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1124 @@
1
+ /*
2
+ * doe_napi.c — N-API binding for libdoe_webgpu (Doe WebGPU runtime).
3
+ *
4
+ * Loads the Doe shared library at runtime via dlopen and exposes the core
5
+ * WebGPU compute surface to JavaScript through Node.js N-API.
6
+ *
7
+ * All WGPUInstance/Adapter/Device/Buffer/etc. handles are wrapped as
8
+ * napi_external values. Struct descriptors are marshaled from JS objects.
9
+ * Async operations (requestAdapter, requestDevice, bufferMapAsync) use
10
+ * synchronous WaitAny blocking — acceptable for headless compute.
11
+ */
12
+
13
+ #include <node_api.h>
14
+ #include <stdlib.h>
15
+ #include <string.h>
16
+ #include <stdint.h>
17
+
18
+ #ifdef _WIN32
19
+ #include <windows.h>
20
+ #define LIB_OPEN(p) LoadLibraryA(p)
21
+ #define LIB_SYM(h, n) ((void*)GetProcAddress((HMODULE)(h), n))
22
+ #define LIB_CLOSE(h) FreeLibrary((HMODULE)(h))
23
+ #else
24
+ #include <dlfcn.h>
25
+ #define LIB_OPEN(p) dlopen(p, RTLD_NOW | RTLD_LOCAL)
26
+ #define LIB_SYM(h, n) dlsym(h, n)
27
+ #define LIB_CLOSE(h) dlclose(h)
28
+ #endif
29
+
30
+ /* ================================================================
31
+ * WebGPU C ABI type definitions (matching wgpu_types.zig)
32
+ * ================================================================ */
33
+
34
+ typedef void* WGPUInstance;
35
+ typedef void* WGPUAdapter;
36
+ typedef void* WGPUDevice;
37
+ typedef void* WGPUQueue;
38
+ typedef void* WGPUBuffer;
39
+ typedef void* WGPUShaderModule;
40
+ typedef void* WGPUComputePipeline;
41
+ typedef void* WGPURenderPipeline;
42
+ typedef void* WGPUBindGroupLayout;
43
+ typedef void* WGPUBindGroup;
44
+ typedef void* WGPUPipelineLayout;
45
+ typedef void* WGPUCommandEncoder;
46
+ typedef void* WGPUCommandBuffer;
47
+ typedef void* WGPUComputePassEncoder;
48
+ typedef void* WGPUQuerySet;
49
+ typedef void* WGPUTexture;
50
+ typedef void* WGPUTextureView;
51
+ typedef void* WGPUSampler;
52
+ typedef uint64_t WGPUFlags;
53
+ typedef uint32_t WGPUBool;
54
+
55
+ #define WGPU_STRLEN SIZE_MAX
56
+ #define WGPU_WHOLE_SIZE UINT64_MAX
57
+
58
+ typedef struct { uint64_t id; } WGPUFuture;
59
+ typedef struct { const char* data; size_t length; } WGPUStringView;
60
+ typedef struct { WGPUFuture future; WGPUBool completed; } WGPUFutureWaitInfo;
61
+
62
+ typedef struct { void* next; uint32_t sType; } WGPUChainedStruct;
63
+
64
+ typedef enum {
65
+ WGPUInstanceFeatureName_TimedWaitAny = 0x00000001,
66
+ } WGPUInstanceFeatureName;
67
+
68
+ typedef struct {
69
+ WGPUChainedStruct* nextInChain;
70
+ size_t timedWaitAnyMaxCount;
71
+ } WGPUInstanceLimits;
72
+
73
+ typedef struct {
74
+ WGPUChainedStruct* nextInChain;
75
+ size_t requiredFeatureCount;
76
+ const WGPUInstanceFeatureName* requiredFeatures;
77
+ const WGPUInstanceLimits* requiredLimits;
78
+ } WGPUInstanceDescriptor;
79
+
80
+ typedef struct {
81
+ void* nextInChain;
82
+ WGPUStringView label;
83
+ uint64_t usage;
84
+ uint64_t size;
85
+ WGPUBool mappedAtCreation;
86
+ } WGPUBufferDescriptor;
87
+
88
+ typedef struct {
89
+ void* nextInChain;
90
+ WGPUStringView label;
91
+ } WGPUCommandEncoderDescriptor;
92
+
93
+ typedef struct {
94
+ void* nextInChain;
95
+ WGPUStringView label;
96
+ } WGPUCommandBufferDescriptor;
97
+
98
+ typedef struct {
99
+ WGPUChainedStruct chain;
100
+ WGPUStringView code;
101
+ } WGPUShaderSourceWGSL;
102
+
103
+ typedef struct {
104
+ void* nextInChain;
105
+ WGPUStringView label;
106
+ } WGPUShaderModuleDescriptor;
107
+
108
+ typedef struct {
109
+ void* nextInChain;
110
+ WGPUShaderModule module;
111
+ WGPUStringView entryPoint;
112
+ size_t constantCount;
113
+ void* constants;
114
+ } WGPUComputeState;
115
+
116
+ typedef struct {
117
+ void* nextInChain;
118
+ WGPUStringView label;
119
+ void* layout;
120
+ WGPUComputeState compute;
121
+ } WGPUComputePipelineDescriptor;
122
+
123
+ typedef struct {
124
+ void* nextInChain;
125
+ uint32_t type;
126
+ WGPUBool hasDynamicOffset;
127
+ uint64_t minBindingSize;
128
+ } WGPUBufferBindingLayout;
129
+
130
+ typedef struct {
131
+ void* nextInChain;
132
+ uint32_t type;
133
+ } WGPUSamplerBindingLayout;
134
+
135
+ typedef struct {
136
+ void* nextInChain;
137
+ uint32_t sampleType;
138
+ uint32_t viewDimension;
139
+ WGPUBool multisampled;
140
+ } WGPUTextureBindingLayout;
141
+
142
+ typedef struct {
143
+ void* nextInChain;
144
+ uint32_t access;
145
+ uint32_t format;
146
+ uint32_t viewDimension;
147
+ } WGPUStorageTextureBindingLayout;
148
+
149
+ typedef struct {
150
+ void* nextInChain;
151
+ uint32_t binding;
152
+ uint64_t visibility;
153
+ uint32_t bindingArraySize;
154
+ WGPUBufferBindingLayout buffer;
155
+ WGPUSamplerBindingLayout sampler;
156
+ WGPUTextureBindingLayout texture;
157
+ WGPUStorageTextureBindingLayout storageTexture;
158
+ } WGPUBindGroupLayoutEntry;
159
+
160
+ typedef struct {
161
+ void* nextInChain;
162
+ WGPUStringView label;
163
+ size_t entryCount;
164
+ const WGPUBindGroupLayoutEntry* entries;
165
+ } WGPUBindGroupLayoutDescriptor;
166
+
167
+ typedef struct {
168
+ void* nextInChain;
169
+ uint32_t binding;
170
+ WGPUBuffer buffer;
171
+ uint64_t offset;
172
+ uint64_t size;
173
+ WGPUSampler sampler;
174
+ WGPUTextureView textureView;
175
+ } WGPUBindGroupEntry;
176
+
177
+ typedef struct {
178
+ void* nextInChain;
179
+ WGPUStringView label;
180
+ WGPUBindGroupLayout layout;
181
+ size_t entryCount;
182
+ const WGPUBindGroupEntry* entries;
183
+ } WGPUBindGroupDescriptor;
184
+
185
+ typedef struct {
186
+ void* nextInChain;
187
+ WGPUStringView label;
188
+ size_t bindGroupLayoutCount;
189
+ const WGPUBindGroupLayout* bindGroupLayouts;
190
+ uint32_t immediateSize;
191
+ } WGPUPipelineLayoutDescriptor;
192
+
193
+ typedef struct {
194
+ void* nextInChain;
195
+ WGPUStringView label;
196
+ void* timestampWrites;
197
+ } WGPUComputePassDescriptor;
198
+
199
+ /* Callback types */
200
+ typedef void (*WGPURequestAdapterCallback)(
201
+ uint32_t status, WGPUAdapter adapter, WGPUStringView message,
202
+ void* userdata1, void* userdata2);
203
+
204
+ typedef void (*WGPURequestDeviceCallback)(
205
+ uint32_t status, WGPUDevice device, WGPUStringView message,
206
+ void* userdata1, void* userdata2);
207
+
208
+ typedef void (*WGPUBufferMapCallback)(
209
+ uint32_t status, WGPUStringView message,
210
+ void* userdata1, void* userdata2);
211
+
212
+ /* ================================================================
213
+ * Function pointer types and global storage
214
+ * ================================================================ */
215
+
216
+ #define DECL_PFN(ret, name, params) typedef ret (*PFN_##name) params; static PFN_##name pfn_##name = NULL
217
+
218
+ DECL_PFN(WGPUInstance, wgpuCreateInstance, (const void*));
219
+ DECL_PFN(void, wgpuInstanceRelease, (WGPUInstance));
220
+ DECL_PFN(uint32_t, wgpuInstanceWaitAny, (WGPUInstance, size_t, WGPUFutureWaitInfo*, uint64_t));
221
+ DECL_PFN(void, wgpuInstanceProcessEvents, (WGPUInstance));
222
+ DECL_PFN(void, wgpuAdapterRelease, (WGPUAdapter));
223
+ DECL_PFN(WGPUBool, wgpuAdapterHasFeature, (WGPUAdapter, uint32_t));
224
+ DECL_PFN(void, wgpuDeviceRelease, (WGPUDevice));
225
+ DECL_PFN(WGPUBool, wgpuDeviceHasFeature, (WGPUDevice, uint32_t));
226
+ DECL_PFN(WGPUQueue, wgpuDeviceGetQueue, (WGPUDevice));
227
+ DECL_PFN(WGPUBuffer, wgpuDeviceCreateBuffer, (WGPUDevice, const WGPUBufferDescriptor*));
228
+ DECL_PFN(WGPUShaderModule, wgpuDeviceCreateShaderModule, (WGPUDevice, const WGPUShaderModuleDescriptor*));
229
+ DECL_PFN(void, wgpuShaderModuleRelease, (WGPUShaderModule));
230
+ DECL_PFN(WGPUComputePipeline, wgpuDeviceCreateComputePipeline, (WGPUDevice, const WGPUComputePipelineDescriptor*));
231
+ DECL_PFN(void, wgpuComputePipelineRelease, (WGPUComputePipeline));
232
+ DECL_PFN(WGPUBindGroupLayout, wgpuDeviceCreateBindGroupLayout, (WGPUDevice, const WGPUBindGroupLayoutDescriptor*));
233
+ DECL_PFN(void, wgpuBindGroupLayoutRelease, (WGPUBindGroupLayout));
234
+ DECL_PFN(WGPUBindGroup, wgpuDeviceCreateBindGroup, (WGPUDevice, const WGPUBindGroupDescriptor*));
235
+ DECL_PFN(void, wgpuBindGroupRelease, (WGPUBindGroup));
236
+ DECL_PFN(WGPUPipelineLayout, wgpuDeviceCreatePipelineLayout, (WGPUDevice, const WGPUPipelineLayoutDescriptor*));
237
+ DECL_PFN(void, wgpuPipelineLayoutRelease, (WGPUPipelineLayout));
238
+ DECL_PFN(WGPUCommandEncoder, wgpuDeviceCreateCommandEncoder, (WGPUDevice, const WGPUCommandEncoderDescriptor*));
239
+ DECL_PFN(void, wgpuCommandEncoderRelease, (WGPUCommandEncoder));
240
+ DECL_PFN(WGPUComputePassEncoder, wgpuCommandEncoderBeginComputePass, (WGPUCommandEncoder, const WGPUComputePassDescriptor*));
241
+ DECL_PFN(void, wgpuCommandEncoderCopyBufferToBuffer, (WGPUCommandEncoder, WGPUBuffer, uint64_t, WGPUBuffer, uint64_t, uint64_t));
242
+ DECL_PFN(WGPUCommandBuffer, wgpuCommandEncoderFinish, (WGPUCommandEncoder, const WGPUCommandBufferDescriptor*));
243
+ DECL_PFN(void, wgpuComputePassEncoderSetPipeline, (WGPUComputePassEncoder, WGPUComputePipeline));
244
+ DECL_PFN(void, wgpuComputePassEncoderSetBindGroup, (WGPUComputePassEncoder, uint32_t, WGPUBindGroup, size_t, const uint32_t*));
245
+ DECL_PFN(void, wgpuComputePassEncoderDispatchWorkgroups, (WGPUComputePassEncoder, uint32_t, uint32_t, uint32_t));
246
+ DECL_PFN(void, wgpuComputePassEncoderEnd, (WGPUComputePassEncoder));
247
+ DECL_PFN(void, wgpuComputePassEncoderRelease, (WGPUComputePassEncoder));
248
+ DECL_PFN(void, wgpuQueueSubmit, (WGPUQueue, size_t, const WGPUCommandBuffer*));
249
+ DECL_PFN(void, wgpuQueueWriteBuffer, (WGPUQueue, WGPUBuffer, uint64_t, const void*, size_t));
250
+ DECL_PFN(void, wgpuQueueRelease, (WGPUQueue));
251
+ DECL_PFN(void, wgpuBufferRelease, (WGPUBuffer));
252
+ DECL_PFN(void, wgpuBufferUnmap, (WGPUBuffer));
253
+ DECL_PFN(const void*, wgpuBufferGetConstMappedRange, (WGPUBuffer, size_t, size_t));
254
+ DECL_PFN(void*, wgpuBufferGetMappedRange, (WGPUBuffer, size_t, size_t));
255
+ DECL_PFN(void, wgpuCommandBufferRelease, (WGPUCommandBuffer));
256
+
257
+ /* Flat helpers for FFI-friendly adapter/device request */
258
+ DECL_PFN(WGPUFuture, doeRequestAdapterFlat, (WGPUInstance, const void*, uint32_t, WGPURequestAdapterCallback, void*, void*));
259
+ DECL_PFN(WGPUFuture, doeRequestDeviceFlat, (WGPUAdapter, const void*, uint32_t, WGPURequestDeviceCallback, void*, void*));
260
+ typedef struct {
261
+ void* nextInChain;
262
+ uint32_t mode;
263
+ WGPUBufferMapCallback callback;
264
+ void* userdata1;
265
+ void* userdata2;
266
+ } WGPUBufferMapCallbackInfo;
267
+
268
+ typedef WGPUFuture (*PFN_wgpuBufferMapAsync2)(WGPUBuffer, uint64_t, size_t, size_t, WGPUBufferMapCallbackInfo);
269
+ static PFN_wgpuBufferMapAsync2 pfn_wgpuBufferMapAsync2 = NULL;
270
+
271
+ static void* g_lib = NULL;
272
+
273
+ /* ================================================================
274
+ * N-API utility helpers
275
+ * ================================================================ */
276
+
277
+ #define NAPI_THROW(env, msg) do { napi_throw_error(env, NULL, msg); return NULL; } while(0)
278
+ #define NAPI_ASSERT_ARGC(env, info, n) \
279
+ size_t _argc = n; napi_value _args[n]; \
280
+ if (napi_get_cb_info(env, info, &_argc, _args, NULL, NULL) != napi_ok) NAPI_THROW(env, "napi_get_cb_info failed")
281
+
282
+ static void* unwrap_ptr(napi_env env, napi_value val) {
283
+ void* ptr = NULL;
284
+ napi_get_value_external(env, val, &ptr);
285
+ return ptr;
286
+ }
287
+
288
+ static napi_value wrap_ptr(napi_env env, void* ptr) {
289
+ napi_value result;
290
+ if (napi_create_external(env, ptr, NULL, NULL, &result) != napi_ok) return NULL;
291
+ return result;
292
+ }
293
+
294
+ static uint32_t get_uint32_prop(napi_env env, napi_value obj, const char* key) {
295
+ napi_value val;
296
+ napi_get_named_property(env, obj, key, &val);
297
+ uint32_t out = 0;
298
+ napi_get_value_uint32(env, val, &out);
299
+ return out;
300
+ }
301
+
302
+ static int64_t get_int64_prop(napi_env env, napi_value obj, const char* key) {
303
+ napi_value val;
304
+ napi_get_named_property(env, obj, key, &val);
305
+ int64_t out = 0;
306
+ napi_get_value_int64(env, val, &out);
307
+ return out;
308
+ }
309
+
310
+ static bool get_bool_prop(napi_env env, napi_value obj, const char* key) {
311
+ napi_value val;
312
+ napi_get_named_property(env, obj, key, &val);
313
+ napi_valuetype vt;
314
+ napi_typeof(env, val, &vt);
315
+ if (vt != napi_boolean) return false;
316
+ bool out = false;
317
+ napi_get_value_bool(env, val, &out);
318
+ return out;
319
+ }
320
+
321
+ static bool has_prop(napi_env env, napi_value obj, const char* key) {
322
+ bool result = false;
323
+ napi_has_named_property(env, obj, key, &result);
324
+ return result;
325
+ }
326
+
327
+ static napi_value get_prop(napi_env env, napi_value obj, const char* key) {
328
+ napi_value val;
329
+ napi_get_named_property(env, obj, key, &val);
330
+ return val;
331
+ }
332
+
333
+ static napi_valuetype prop_type(napi_env env, napi_value obj, const char* key) {
334
+ napi_value val;
335
+ napi_get_named_property(env, obj, key, &val);
336
+ napi_valuetype vt;
337
+ napi_typeof(env, val, &vt);
338
+ return vt;
339
+ }
340
+
341
+ /* ================================================================
342
+ * Library loading
343
+ * ================================================================ */
344
+
345
+ #define LOAD_SYM(name) pfn_##name = (PFN_##name)LIB_SYM(g_lib, #name)
346
+
347
+ static napi_value doe_load_library(napi_env env, napi_callback_info info) {
348
+ NAPI_ASSERT_ARGC(env, info, 1);
349
+ size_t path_len = 0;
350
+ napi_get_value_string_utf8(env, _args[0], NULL, 0, &path_len);
351
+ char* path = (char*)malloc(path_len + 1);
352
+ napi_get_value_string_utf8(env, _args[0], path, path_len + 1, &path_len);
353
+
354
+ if (g_lib) { LIB_CLOSE(g_lib); g_lib = NULL; }
355
+ g_lib = LIB_OPEN(path);
356
+ free(path);
357
+ if (!g_lib) NAPI_THROW(env, "Failed to load libdoe_webgpu");
358
+
359
+ LOAD_SYM(wgpuCreateInstance);
360
+ LOAD_SYM(wgpuInstanceRelease);
361
+ LOAD_SYM(wgpuInstanceWaitAny);
362
+ LOAD_SYM(wgpuInstanceProcessEvents);
363
+ LOAD_SYM(wgpuAdapterRelease);
364
+ LOAD_SYM(wgpuAdapterHasFeature);
365
+ LOAD_SYM(wgpuDeviceRelease);
366
+ LOAD_SYM(wgpuDeviceHasFeature);
367
+ LOAD_SYM(wgpuDeviceGetQueue);
368
+ LOAD_SYM(wgpuDeviceCreateBuffer);
369
+ LOAD_SYM(wgpuDeviceCreateShaderModule);
370
+ LOAD_SYM(wgpuShaderModuleRelease);
371
+ LOAD_SYM(wgpuDeviceCreateComputePipeline);
372
+ LOAD_SYM(wgpuComputePipelineRelease);
373
+ LOAD_SYM(wgpuDeviceCreateBindGroupLayout);
374
+ LOAD_SYM(wgpuBindGroupLayoutRelease);
375
+ LOAD_SYM(wgpuDeviceCreateBindGroup);
376
+ LOAD_SYM(wgpuBindGroupRelease);
377
+ LOAD_SYM(wgpuDeviceCreatePipelineLayout);
378
+ LOAD_SYM(wgpuPipelineLayoutRelease);
379
+ LOAD_SYM(wgpuDeviceCreateCommandEncoder);
380
+ LOAD_SYM(wgpuCommandEncoderRelease);
381
+ LOAD_SYM(wgpuCommandEncoderBeginComputePass);
382
+ LOAD_SYM(wgpuCommandEncoderCopyBufferToBuffer);
383
+ LOAD_SYM(wgpuCommandEncoderFinish);
384
+ LOAD_SYM(wgpuComputePassEncoderSetPipeline);
385
+ LOAD_SYM(wgpuComputePassEncoderSetBindGroup);
386
+ LOAD_SYM(wgpuComputePassEncoderDispatchWorkgroups);
387
+ LOAD_SYM(wgpuComputePassEncoderEnd);
388
+ LOAD_SYM(wgpuComputePassEncoderRelease);
389
+ LOAD_SYM(wgpuQueueSubmit);
390
+ LOAD_SYM(wgpuQueueWriteBuffer);
391
+ LOAD_SYM(wgpuQueueRelease);
392
+ LOAD_SYM(wgpuBufferRelease);
393
+ LOAD_SYM(wgpuBufferUnmap);
394
+ LOAD_SYM(wgpuBufferGetConstMappedRange);
395
+ LOAD_SYM(wgpuBufferGetMappedRange);
396
+ LOAD_SYM(wgpuCommandBufferRelease);
397
+ LOAD_SYM(doeRequestAdapterFlat);
398
+ LOAD_SYM(doeRequestDeviceFlat);
399
+ pfn_wgpuBufferMapAsync2 = (PFN_wgpuBufferMapAsync2)LIB_SYM(g_lib, "wgpuBufferMapAsync");
400
+
401
+ napi_value result;
402
+ napi_get_boolean(env, true, &result);
403
+ return result;
404
+ }
405
+
406
+ /* ================================================================
407
+ * Instance
408
+ * ================================================================ */
409
+
410
+ static napi_value doe_create_instance(napi_env env, napi_callback_info info) {
411
+ (void)info;
412
+ if (!pfn_wgpuCreateInstance) NAPI_THROW(env, "Library not loaded");
413
+
414
+ WGPUInstanceFeatureName features[] = { WGPUInstanceFeatureName_TimedWaitAny };
415
+ WGPUInstanceLimits limits = { .nextInChain = NULL, .timedWaitAnyMaxCount = 64 };
416
+ WGPUInstanceDescriptor desc = {
417
+ .nextInChain = NULL,
418
+ .requiredFeatureCount = 1,
419
+ .requiredFeatures = features,
420
+ .requiredLimits = &limits,
421
+ };
422
+ WGPUInstance inst = pfn_wgpuCreateInstance(&desc);
423
+ if (!inst) NAPI_THROW(env, "wgpuCreateInstance returned NULL");
424
+ return wrap_ptr(env, inst);
425
+ }
426
+
427
+ static napi_value doe_instance_release(napi_env env, napi_callback_info info) {
428
+ NAPI_ASSERT_ARGC(env, info, 1);
429
+ void* inst = unwrap_ptr(env, _args[0]);
430
+ if (inst) pfn_wgpuInstanceRelease(inst);
431
+ return NULL;
432
+ }
433
+
434
+ /* ================================================================
435
+ * Adapter (synchronous requestAdapter via WaitAny)
436
+ * ================================================================ */
437
+
438
+ typedef struct {
439
+ uint32_t status;
440
+ WGPUAdapter adapter;
441
+ } AdapterRequestResult;
442
+
443
+ static void adapter_callback(uint32_t status, WGPUAdapter adapter,
444
+ WGPUStringView message, void* userdata1, void* userdata2) {
445
+ (void)message; (void)userdata2;
446
+ AdapterRequestResult* r = (AdapterRequestResult*)userdata1;
447
+ r->status = status;
448
+ r->adapter = adapter;
449
+ }
450
+
451
+ static napi_value doe_request_adapter(napi_env env, napi_callback_info info) {
452
+ NAPI_ASSERT_ARGC(env, info, 1);
453
+ WGPUInstance inst = unwrap_ptr(env, _args[0]);
454
+ if (!inst) NAPI_THROW(env, "Invalid instance");
455
+
456
+ AdapterRequestResult result = {0, NULL};
457
+ WGPUFuture future = pfn_doeRequestAdapterFlat(
458
+ inst, NULL, 1 /* WaitAnyOnly */, adapter_callback, &result, NULL);
459
+
460
+ WGPUFutureWaitInfo wait_info = { .future = future, .completed = 0 };
461
+ uint32_t wait_status = pfn_wgpuInstanceWaitAny(
462
+ inst, 1, &wait_info, (uint64_t)5000000000ULL);
463
+
464
+ if (wait_status != 1 || result.status != 1 || !result.adapter)
465
+ NAPI_THROW(env, "requestAdapter failed");
466
+
467
+ return wrap_ptr(env, result.adapter);
468
+ }
469
+
470
+ static napi_value doe_adapter_release(napi_env env, napi_callback_info info) {
471
+ NAPI_ASSERT_ARGC(env, info, 1);
472
+ void* adapter = unwrap_ptr(env, _args[0]);
473
+ if (adapter) pfn_wgpuAdapterRelease(adapter);
474
+ return NULL;
475
+ }
476
+
477
+ /* ================================================================
478
+ * Device (synchronous requestDevice via WaitAny)
479
+ * ================================================================ */
480
+
481
+ typedef struct {
482
+ uint32_t status;
483
+ WGPUDevice device;
484
+ } DeviceRequestResult;
485
+
486
+ static void device_callback(uint32_t status, WGPUDevice device,
487
+ WGPUStringView message, void* userdata1, void* userdata2) {
488
+ (void)message; (void)userdata2;
489
+ DeviceRequestResult* r = (DeviceRequestResult*)userdata1;
490
+ r->status = status;
491
+ r->device = device;
492
+ }
493
+
494
+ static napi_value doe_request_device(napi_env env, napi_callback_info info) {
495
+ NAPI_ASSERT_ARGC(env, info, 2);
496
+ WGPUInstance inst = unwrap_ptr(env, _args[0]);
497
+ WGPUAdapter adapter = unwrap_ptr(env, _args[1]);
498
+ if (!inst || !adapter) NAPI_THROW(env, "Invalid instance or adapter");
499
+
500
+ DeviceRequestResult result = {0, NULL};
501
+ WGPUFuture future = pfn_doeRequestDeviceFlat(
502
+ adapter, NULL, 1 /* WaitAnyOnly */, device_callback, &result, NULL);
503
+
504
+ WGPUFutureWaitInfo wait_info = { .future = future, .completed = 0 };
505
+ uint32_t wait_status = pfn_wgpuInstanceWaitAny(
506
+ inst, 1, &wait_info, (uint64_t)5000000000ULL);
507
+
508
+ if (wait_status != 1 || result.status != 1 || !result.device)
509
+ NAPI_THROW(env, "requestDevice failed");
510
+
511
+ return wrap_ptr(env, result.device);
512
+ }
513
+
514
+ static napi_value doe_device_release(napi_env env, napi_callback_info info) {
515
+ NAPI_ASSERT_ARGC(env, info, 1);
516
+ void* device = unwrap_ptr(env, _args[0]);
517
+ if (device) pfn_wgpuDeviceRelease(device);
518
+ return NULL;
519
+ }
520
+
521
+ static napi_value doe_device_get_queue(napi_env env, napi_callback_info info) {
522
+ NAPI_ASSERT_ARGC(env, info, 1);
523
+ WGPUDevice device = unwrap_ptr(env, _args[0]);
524
+ if (!device) NAPI_THROW(env, "Invalid device");
525
+ WGPUQueue queue = pfn_wgpuDeviceGetQueue(device);
526
+ return wrap_ptr(env, queue);
527
+ }
528
+
529
+ /* ================================================================
530
+ * Buffer
531
+ * ================================================================ */
532
+
533
+ static napi_value doe_create_buffer(napi_env env, napi_callback_info info) {
534
+ NAPI_ASSERT_ARGC(env, info, 2);
535
+ WGPUDevice device = unwrap_ptr(env, _args[0]);
536
+ if (!device) NAPI_THROW(env, "Invalid device");
537
+
538
+ WGPUBufferDescriptor desc;
539
+ memset(&desc, 0, sizeof(desc));
540
+ desc.usage = (uint64_t)get_int64_prop(env, _args[1], "usage");
541
+ desc.size = (uint64_t)get_int64_prop(env, _args[1], "size");
542
+ desc.mappedAtCreation = get_bool_prop(env, _args[1], "mappedAtCreation") ? 1 : 0;
543
+
544
+ WGPUBuffer buf = pfn_wgpuDeviceCreateBuffer(device, &desc);
545
+ if (!buf) NAPI_THROW(env, "createBuffer failed");
546
+ return wrap_ptr(env, buf);
547
+ }
548
+
549
+ static napi_value doe_buffer_release(napi_env env, napi_callback_info info) {
550
+ NAPI_ASSERT_ARGC(env, info, 1);
551
+ void* buf = unwrap_ptr(env, _args[0]);
552
+ if (buf) pfn_wgpuBufferRelease(buf);
553
+ return NULL;
554
+ }
555
+
556
+ static napi_value doe_buffer_unmap(napi_env env, napi_callback_info info) {
557
+ NAPI_ASSERT_ARGC(env, info, 1);
558
+ WGPUBuffer buf = unwrap_ptr(env, _args[0]);
559
+ if (buf) pfn_wgpuBufferUnmap(buf);
560
+ return NULL;
561
+ }
562
+
563
+ typedef struct {
564
+ uint32_t status;
565
+ } BufferMapResult;
566
+
567
+ static void buffer_map_callback(uint32_t status, WGPUStringView message,
568
+ void* userdata1, void* userdata2) {
569
+ (void)message; (void)userdata2;
570
+ BufferMapResult* r = (BufferMapResult*)userdata1;
571
+ r->status = status;
572
+ }
573
+
574
+ /* bufferMapSync(instance, buffer, mode, offset, size) */
575
+ static napi_value doe_buffer_map_sync(napi_env env, napi_callback_info info) {
576
+ NAPI_ASSERT_ARGC(env, info, 5);
577
+ WGPUInstance inst = unwrap_ptr(env, _args[0]);
578
+ WGPUBuffer buf = unwrap_ptr(env, _args[1]);
579
+ uint32_t mode;
580
+ napi_get_value_uint32(env, _args[2], &mode);
581
+ int64_t offset_i, size_i;
582
+ napi_get_value_int64(env, _args[3], &offset_i);
583
+ napi_get_value_int64(env, _args[4], &size_i);
584
+
585
+ BufferMapResult result = {0};
586
+ WGPUBufferMapCallbackInfo cb_info = {
587
+ .nextInChain = NULL,
588
+ .mode = 1, /* WaitAnyOnly */
589
+ .callback = buffer_map_callback,
590
+ .userdata1 = &result,
591
+ .userdata2 = NULL,
592
+ };
593
+
594
+ WGPUFuture future = pfn_wgpuBufferMapAsync2(buf, (uint64_t)mode,
595
+ (size_t)offset_i, (size_t)size_i, cb_info);
596
+
597
+ WGPUFutureWaitInfo wait_info = { .future = future, .completed = 0 };
598
+ uint32_t wait_status = pfn_wgpuInstanceWaitAny(
599
+ inst, 1, &wait_info, (uint64_t)5000000000ULL);
600
+
601
+ if (wait_status != 1 || result.status != 1)
602
+ NAPI_THROW(env, "bufferMapAsync failed");
603
+
604
+ napi_value ok;
605
+ napi_get_boolean(env, true, &ok);
606
+ return ok;
607
+ }
608
+
609
+ /* bufferGetMappedRange(buffer, offset, size) → ArrayBuffer */
610
+ static napi_value doe_buffer_get_mapped_range(napi_env env, napi_callback_info info) {
611
+ NAPI_ASSERT_ARGC(env, info, 3);
612
+ WGPUBuffer buf = unwrap_ptr(env, _args[0]);
613
+ int64_t offset_i, size_i;
614
+ napi_get_value_int64(env, _args[1], &offset_i);
615
+ napi_get_value_int64(env, _args[2], &size_i);
616
+
617
+ const void* data = pfn_wgpuBufferGetConstMappedRange(buf, (size_t)offset_i, (size_t)size_i);
618
+ if (!data) NAPI_THROW(env, "getMappedRange returned NULL");
619
+
620
+ /* Copy native data into a JS ArrayBuffer */
621
+ void* ab_data = NULL;
622
+ napi_value ab;
623
+ napi_create_arraybuffer(env, (size_t)size_i, &ab_data, &ab);
624
+ memcpy(ab_data, data, (size_t)size_i);
625
+ return ab;
626
+ }
627
+
628
+ /* ================================================================
629
+ * Shader Module
630
+ * ================================================================ */
631
+
632
+ static napi_value doe_create_shader_module(napi_env env, napi_callback_info info) {
633
+ NAPI_ASSERT_ARGC(env, info, 2);
634
+ WGPUDevice device = unwrap_ptr(env, _args[0]);
635
+ if (!device) NAPI_THROW(env, "Invalid device");
636
+
637
+ /* _args[1] is the WGSL source code string */
638
+ size_t code_len = 0;
639
+ napi_get_value_string_utf8(env, _args[1], NULL, 0, &code_len);
640
+ char* code = (char*)malloc(code_len + 1);
641
+ napi_get_value_string_utf8(env, _args[1], code, code_len + 1, &code_len);
642
+
643
+ WGPUShaderSourceWGSL wgsl_source = {
644
+ .chain = { .next = NULL, .sType = 0x00000002 /* ShaderSourceWGSL */ },
645
+ .code = { .data = code, .length = code_len },
646
+ };
647
+ WGPUShaderModuleDescriptor desc = {
648
+ .nextInChain = (void*)&wgsl_source,
649
+ .label = { .data = NULL, .length = 0 },
650
+ };
651
+
652
+ WGPUShaderModule mod = pfn_wgpuDeviceCreateShaderModule(device, &desc);
653
+ free(code);
654
+ if (!mod) NAPI_THROW(env, "createShaderModule failed");
655
+ return wrap_ptr(env, mod);
656
+ }
657
+
658
+ static napi_value doe_shader_module_release(napi_env env, napi_callback_info info) {
659
+ NAPI_ASSERT_ARGC(env, info, 1);
660
+ void* mod = unwrap_ptr(env, _args[0]);
661
+ if (mod) pfn_wgpuShaderModuleRelease(mod);
662
+ return NULL;
663
+ }
664
+
665
+ /* ================================================================
666
+ * Compute Pipeline
667
+ * createComputePipeline(device, shaderModule, entryPoint, pipelineLayout?)
668
+ * ================================================================ */
669
+
670
+ static napi_value doe_create_compute_pipeline(napi_env env, napi_callback_info info) {
671
+ NAPI_ASSERT_ARGC(env, info, 4);
672
+ WGPUDevice device = unwrap_ptr(env, _args[0]);
673
+ WGPUShaderModule shader = unwrap_ptr(env, _args[1]);
674
+ if (!device || !shader) NAPI_THROW(env, "Invalid device or shader");
675
+
676
+ size_t ep_len = 0;
677
+ napi_get_value_string_utf8(env, _args[2], NULL, 0, &ep_len);
678
+ char* ep = (char*)malloc(ep_len + 1);
679
+ napi_get_value_string_utf8(env, _args[2], ep, ep_len + 1, &ep_len);
680
+
681
+ /* pipelineLayout can be null (auto layout) */
682
+ napi_valuetype layout_type;
683
+ napi_typeof(env, _args[3], &layout_type);
684
+ void* layout = NULL;
685
+ if (layout_type == napi_external) layout = unwrap_ptr(env, _args[3]);
686
+
687
+ WGPUComputePipelineDescriptor desc;
688
+ memset(&desc, 0, sizeof(desc));
689
+ desc.layout = layout;
690
+ desc.compute.module = shader;
691
+ desc.compute.entryPoint.data = ep;
692
+ desc.compute.entryPoint.length = ep_len;
693
+
694
+ WGPUComputePipeline pipeline = pfn_wgpuDeviceCreateComputePipeline(device, &desc);
695
+ free(ep);
696
+ if (!pipeline) NAPI_THROW(env, "createComputePipeline failed");
697
+ return wrap_ptr(env, pipeline);
698
+ }
699
+
700
+ static napi_value doe_compute_pipeline_release(napi_env env, napi_callback_info info) {
701
+ NAPI_ASSERT_ARGC(env, info, 1);
702
+ void* p = unwrap_ptr(env, _args[0]);
703
+ if (p) pfn_wgpuComputePipelineRelease(p);
704
+ return NULL;
705
+ }
706
+
707
+ /* ================================================================
708
+ * Bind Group Layout
709
+ * createBindGroupLayout(device, entries[])
710
+ * Each entry: { binding, visibility, buffer?: { type }, storageTexture?: { ... } }
711
+ * ================================================================ */
712
+
713
+ static uint32_t buffer_binding_type_from_string(napi_env env, napi_value val) {
714
+ napi_valuetype vt;
715
+ napi_typeof(env, val, &vt);
716
+ if (vt != napi_string) return 0x00000001; /* Undefined */
717
+ char buf[32] = {0};
718
+ size_t len = 0;
719
+ napi_get_value_string_utf8(env, val, buf, sizeof(buf), &len);
720
+ if (strcmp(buf, "uniform") == 0) return 0x00000002;
721
+ if (strcmp(buf, "storage") == 0) return 0x00000003;
722
+ if (strcmp(buf, "read-only-storage") == 0) return 0x00000004;
723
+ return 0x00000001;
724
+ }
725
+
726
+ static napi_value doe_create_bind_group_layout(napi_env env, napi_callback_info info) {
727
+ NAPI_ASSERT_ARGC(env, info, 2);
728
+ WGPUDevice device = unwrap_ptr(env, _args[0]);
729
+ if (!device) NAPI_THROW(env, "Invalid device");
730
+
731
+ uint32_t entry_count = 0;
732
+ napi_get_array_length(env, _args[1], &entry_count);
733
+
734
+ WGPUBindGroupLayoutEntry* entries = (WGPUBindGroupLayoutEntry*)calloc(
735
+ entry_count, sizeof(WGPUBindGroupLayoutEntry));
736
+
737
+ for (uint32_t i = 0; i < entry_count; i++) {
738
+ napi_value elem;
739
+ napi_get_element(env, _args[1], i, &elem);
740
+
741
+ entries[i].binding = get_uint32_prop(env, elem, "binding");
742
+ entries[i].visibility = (uint64_t)get_int64_prop(env, elem, "visibility");
743
+
744
+ if (has_prop(env, elem, "buffer") && prop_type(env, elem, "buffer") == napi_object) {
745
+ napi_value buf_obj = get_prop(env, elem, "buffer");
746
+ entries[i].buffer.type = buffer_binding_type_from_string(
747
+ env, get_prop(env, buf_obj, "type"));
748
+ if (has_prop(env, buf_obj, "hasDynamicOffset"))
749
+ entries[i].buffer.hasDynamicOffset = get_bool_prop(env, buf_obj, "hasDynamicOffset") ? 1 : 0;
750
+ if (has_prop(env, buf_obj, "minBindingSize"))
751
+ entries[i].buffer.minBindingSize = (uint64_t)get_int64_prop(env, buf_obj, "minBindingSize");
752
+ }
753
+
754
+ if (has_prop(env, elem, "storageTexture") && prop_type(env, elem, "storageTexture") == napi_object) {
755
+ napi_value st_obj = get_prop(env, elem, "storageTexture");
756
+ entries[i].storageTexture.access = get_uint32_prop(env, st_obj, "access");
757
+ entries[i].storageTexture.format = get_uint32_prop(env, st_obj, "format");
758
+ entries[i].storageTexture.viewDimension = get_uint32_prop(env, st_obj, "viewDimension");
759
+ }
760
+ }
761
+
762
+ WGPUBindGroupLayoutDescriptor desc = {
763
+ .nextInChain = NULL,
764
+ .label = { .data = NULL, .length = 0 },
765
+ .entryCount = entry_count,
766
+ .entries = entries,
767
+ };
768
+
769
+ WGPUBindGroupLayout layout = pfn_wgpuDeviceCreateBindGroupLayout(device, &desc);
770
+ free(entries);
771
+ if (!layout) NAPI_THROW(env, "createBindGroupLayout failed");
772
+ return wrap_ptr(env, layout);
773
+ }
774
+
775
+ static napi_value doe_bind_group_layout_release(napi_env env, napi_callback_info info) {
776
+ NAPI_ASSERT_ARGC(env, info, 1);
777
+ void* p = unwrap_ptr(env, _args[0]);
778
+ if (p) pfn_wgpuBindGroupLayoutRelease(p);
779
+ return NULL;
780
+ }
781
+
782
+ /* ================================================================
783
+ * Bind Group
784
+ * createBindGroup(device, layout, entries[])
785
+ * Each entry: { binding, buffer, offset?, size? }
786
+ * ================================================================ */
787
+
788
+ static napi_value doe_create_bind_group(napi_env env, napi_callback_info info) {
789
+ NAPI_ASSERT_ARGC(env, info, 3);
790
+ WGPUDevice device = unwrap_ptr(env, _args[0]);
791
+ WGPUBindGroupLayout layout = unwrap_ptr(env, _args[1]);
792
+ if (!device || !layout) NAPI_THROW(env, "Invalid device or layout");
793
+
794
+ uint32_t entry_count = 0;
795
+ napi_get_array_length(env, _args[2], &entry_count);
796
+
797
+ WGPUBindGroupEntry* entries = (WGPUBindGroupEntry*)calloc(
798
+ entry_count, sizeof(WGPUBindGroupEntry));
799
+
800
+ for (uint32_t i = 0; i < entry_count; i++) {
801
+ napi_value elem;
802
+ napi_get_element(env, _args[2], i, &elem);
803
+
804
+ entries[i].binding = get_uint32_prop(env, elem, "binding");
805
+
806
+ if (has_prop(env, elem, "buffer") && prop_type(env, elem, "buffer") == napi_external)
807
+ entries[i].buffer = unwrap_ptr(env, get_prop(env, elem, "buffer"));
808
+
809
+ if (has_prop(env, elem, "offset"))
810
+ entries[i].offset = (uint64_t)get_int64_prop(env, elem, "offset");
811
+
812
+ entries[i].size = WGPU_WHOLE_SIZE;
813
+ if (has_prop(env, elem, "size"))
814
+ entries[i].size = (uint64_t)get_int64_prop(env, elem, "size");
815
+ }
816
+
817
+ WGPUBindGroupDescriptor desc = {
818
+ .nextInChain = NULL,
819
+ .label = { .data = NULL, .length = 0 },
820
+ .layout = layout,
821
+ .entryCount = entry_count,
822
+ .entries = entries,
823
+ };
824
+
825
+ WGPUBindGroup group = pfn_wgpuDeviceCreateBindGroup(device, &desc);
826
+ free(entries);
827
+ if (!group) NAPI_THROW(env, "createBindGroup failed");
828
+ return wrap_ptr(env, group);
829
+ }
830
+
831
+ static napi_value doe_bind_group_release(napi_env env, napi_callback_info info) {
832
+ NAPI_ASSERT_ARGC(env, info, 1);
833
+ void* p = unwrap_ptr(env, _args[0]);
834
+ if (p) pfn_wgpuBindGroupRelease(p);
835
+ return NULL;
836
+ }
837
+
838
+ /* ================================================================
839
+ * Pipeline Layout
840
+ * createPipelineLayout(device, bindGroupLayouts[])
841
+ * ================================================================ */
842
+
843
+ static napi_value doe_create_pipeline_layout(napi_env env, napi_callback_info info) {
844
+ NAPI_ASSERT_ARGC(env, info, 2);
845
+ WGPUDevice device = unwrap_ptr(env, _args[0]);
846
+ if (!device) NAPI_THROW(env, "Invalid device");
847
+
848
+ uint32_t layout_count = 0;
849
+ napi_get_array_length(env, _args[1], &layout_count);
850
+
851
+ WGPUBindGroupLayout* layouts = (WGPUBindGroupLayout*)calloc(
852
+ layout_count, sizeof(WGPUBindGroupLayout));
853
+ for (uint32_t i = 0; i < layout_count; i++) {
854
+ napi_value elem;
855
+ napi_get_element(env, _args[1], i, &elem);
856
+ layouts[i] = unwrap_ptr(env, elem);
857
+ }
858
+
859
+ WGPUPipelineLayoutDescriptor desc = {
860
+ .nextInChain = NULL,
861
+ .label = { .data = NULL, .length = 0 },
862
+ .bindGroupLayoutCount = layout_count,
863
+ .bindGroupLayouts = layouts,
864
+ .immediateSize = 0,
865
+ };
866
+
867
+ WGPUPipelineLayout pl = pfn_wgpuDeviceCreatePipelineLayout(device, &desc);
868
+ free(layouts);
869
+ if (!pl) NAPI_THROW(env, "createPipelineLayout failed");
870
+ return wrap_ptr(env, pl);
871
+ }
872
+
873
+ static napi_value doe_pipeline_layout_release(napi_env env, napi_callback_info info) {
874
+ NAPI_ASSERT_ARGC(env, info, 1);
875
+ void* p = unwrap_ptr(env, _args[0]);
876
+ if (p) pfn_wgpuPipelineLayoutRelease(p);
877
+ return NULL;
878
+ }
879
+
880
+ /* ================================================================
881
+ * Command Encoder
882
+ * ================================================================ */
883
+
884
+ static napi_value doe_create_command_encoder(napi_env env, napi_callback_info info) {
885
+ NAPI_ASSERT_ARGC(env, info, 1);
886
+ WGPUDevice device = unwrap_ptr(env, _args[0]);
887
+ if (!device) NAPI_THROW(env, "Invalid device");
888
+
889
+ WGPUCommandEncoderDescriptor desc = {
890
+ .nextInChain = NULL,
891
+ .label = { .data = NULL, .length = 0 },
892
+ };
893
+ WGPUCommandEncoder enc = pfn_wgpuDeviceCreateCommandEncoder(device, &desc);
894
+ if (!enc) NAPI_THROW(env, "createCommandEncoder failed");
895
+ return wrap_ptr(env, enc);
896
+ }
897
+
898
+ static napi_value doe_command_encoder_release(napi_env env, napi_callback_info info) {
899
+ NAPI_ASSERT_ARGC(env, info, 1);
900
+ void* p = unwrap_ptr(env, _args[0]);
901
+ if (p) pfn_wgpuCommandEncoderRelease(p);
902
+ return NULL;
903
+ }
904
+
905
+ static napi_value doe_command_encoder_copy_buffer_to_buffer(napi_env env, napi_callback_info info) {
906
+ NAPI_ASSERT_ARGC(env, info, 6);
907
+ WGPUCommandEncoder enc = unwrap_ptr(env, _args[0]);
908
+ WGPUBuffer src = unwrap_ptr(env, _args[1]);
909
+ int64_t src_offset; napi_get_value_int64(env, _args[2], &src_offset);
910
+ WGPUBuffer dst = unwrap_ptr(env, _args[3]);
911
+ int64_t dst_offset; napi_get_value_int64(env, _args[4], &dst_offset);
912
+ int64_t size; napi_get_value_int64(env, _args[5], &size);
913
+
914
+ pfn_wgpuCommandEncoderCopyBufferToBuffer(enc, src, (uint64_t)src_offset,
915
+ dst, (uint64_t)dst_offset, (uint64_t)size);
916
+ return NULL;
917
+ }
918
+
919
+ static napi_value doe_command_encoder_finish(napi_env env, napi_callback_info info) {
920
+ NAPI_ASSERT_ARGC(env, info, 1);
921
+ WGPUCommandEncoder enc = unwrap_ptr(env, _args[0]);
922
+ if (!enc) NAPI_THROW(env, "Invalid encoder");
923
+
924
+ WGPUCommandBufferDescriptor desc = {
925
+ .nextInChain = NULL,
926
+ .label = { .data = NULL, .length = 0 },
927
+ };
928
+ WGPUCommandBuffer cmd = pfn_wgpuCommandEncoderFinish(enc, &desc);
929
+ if (!cmd) NAPI_THROW(env, "commandEncoderFinish failed");
930
+ return wrap_ptr(env, cmd);
931
+ }
932
+
933
+ static napi_value doe_command_buffer_release(napi_env env, napi_callback_info info) {
934
+ NAPI_ASSERT_ARGC(env, info, 1);
935
+ void* p = unwrap_ptr(env, _args[0]);
936
+ if (p) pfn_wgpuCommandBufferRelease(p);
937
+ return NULL;
938
+ }
939
+
940
+ /* ================================================================
941
+ * Compute Pass
942
+ * ================================================================ */
943
+
944
+ static napi_value doe_begin_compute_pass(napi_env env, napi_callback_info info) {
945
+ NAPI_ASSERT_ARGC(env, info, 1);
946
+ WGPUCommandEncoder enc = unwrap_ptr(env, _args[0]);
947
+ if (!enc) NAPI_THROW(env, "Invalid encoder");
948
+
949
+ WGPUComputePassDescriptor desc = {
950
+ .nextInChain = NULL,
951
+ .label = { .data = NULL, .length = 0 },
952
+ .timestampWrites = NULL,
953
+ };
954
+ WGPUComputePassEncoder pass = pfn_wgpuCommandEncoderBeginComputePass(enc, &desc);
955
+ if (!pass) NAPI_THROW(env, "beginComputePass failed");
956
+ return wrap_ptr(env, pass);
957
+ }
958
+
959
+ static napi_value doe_compute_pass_set_pipeline(napi_env env, napi_callback_info info) {
960
+ NAPI_ASSERT_ARGC(env, info, 2);
961
+ pfn_wgpuComputePassEncoderSetPipeline(
962
+ unwrap_ptr(env, _args[0]), unwrap_ptr(env, _args[1]));
963
+ return NULL;
964
+ }
965
+
966
+ static napi_value doe_compute_pass_set_bind_group(napi_env env, napi_callback_info info) {
967
+ NAPI_ASSERT_ARGC(env, info, 3);
968
+ WGPUComputePassEncoder pass = unwrap_ptr(env, _args[0]);
969
+ uint32_t index; napi_get_value_uint32(env, _args[1], &index);
970
+ WGPUBindGroup group = unwrap_ptr(env, _args[2]);
971
+ pfn_wgpuComputePassEncoderSetBindGroup(pass, index, group, 0, NULL);
972
+ return NULL;
973
+ }
974
+
975
+ static napi_value doe_compute_pass_dispatch(napi_env env, napi_callback_info info) {
976
+ NAPI_ASSERT_ARGC(env, info, 4);
977
+ WGPUComputePassEncoder pass = unwrap_ptr(env, _args[0]);
978
+ uint32_t x, y, z;
979
+ napi_get_value_uint32(env, _args[1], &x);
980
+ napi_get_value_uint32(env, _args[2], &y);
981
+ napi_get_value_uint32(env, _args[3], &z);
982
+ pfn_wgpuComputePassEncoderDispatchWorkgroups(pass, x, y, z);
983
+ return NULL;
984
+ }
985
+
986
+ static napi_value doe_compute_pass_end(napi_env env, napi_callback_info info) {
987
+ NAPI_ASSERT_ARGC(env, info, 1);
988
+ pfn_wgpuComputePassEncoderEnd(unwrap_ptr(env, _args[0]));
989
+ return NULL;
990
+ }
991
+
992
+ static napi_value doe_compute_pass_release(napi_env env, napi_callback_info info) {
993
+ NAPI_ASSERT_ARGC(env, info, 1);
994
+ void* p = unwrap_ptr(env, _args[0]);
995
+ if (p) pfn_wgpuComputePassEncoderRelease(p);
996
+ return NULL;
997
+ }
998
+
999
+ /* ================================================================
1000
+ * Queue
1001
+ * ================================================================ */
1002
+
1003
+ static napi_value doe_queue_submit(napi_env env, napi_callback_info info) {
1004
+ NAPI_ASSERT_ARGC(env, info, 2);
1005
+ WGPUQueue queue = unwrap_ptr(env, _args[0]);
1006
+ if (!queue) NAPI_THROW(env, "Invalid queue");
1007
+
1008
+ uint32_t cmd_count = 0;
1009
+ napi_get_array_length(env, _args[1], &cmd_count);
1010
+
1011
+ WGPUCommandBuffer* cmds = (WGPUCommandBuffer*)calloc(
1012
+ cmd_count, sizeof(WGPUCommandBuffer));
1013
+ for (uint32_t i = 0; i < cmd_count; i++) {
1014
+ napi_value elem;
1015
+ napi_get_element(env, _args[1], i, &elem);
1016
+ cmds[i] = unwrap_ptr(env, elem);
1017
+ }
1018
+
1019
+ pfn_wgpuQueueSubmit(queue, cmd_count, cmds);
1020
+ free(cmds);
1021
+ return NULL;
1022
+ }
1023
+
1024
+ /* queueWriteBuffer(queue, buffer, offset, typedArray) */
1025
+ static napi_value doe_queue_write_buffer(napi_env env, napi_callback_info info) {
1026
+ NAPI_ASSERT_ARGC(env, info, 4);
1027
+ WGPUQueue queue = unwrap_ptr(env, _args[0]);
1028
+ WGPUBuffer buf = unwrap_ptr(env, _args[1]);
1029
+ int64_t offset; napi_get_value_int64(env, _args[2], &offset);
1030
+
1031
+ void* data = NULL;
1032
+ size_t byte_length = 0;
1033
+ bool is_typedarray = false;
1034
+ napi_is_typedarray(env, _args[3], &is_typedarray);
1035
+ if (is_typedarray) {
1036
+ napi_typedarray_type ta_type;
1037
+ size_t ta_length;
1038
+ napi_value ab;
1039
+ size_t byte_offset;
1040
+ napi_get_typedarray_info(env, _args[3], &ta_type, &ta_length, &data, &ab, &byte_offset);
1041
+ /* Get byte length from the arraybuffer */
1042
+ napi_get_arraybuffer_info(env, ab, NULL, &byte_length);
1043
+ byte_length = byte_length - byte_offset;
1044
+ data = (char*)data;
1045
+ } else {
1046
+ bool is_ab = false;
1047
+ napi_is_arraybuffer(env, _args[3], &is_ab);
1048
+ if (is_ab) {
1049
+ napi_get_arraybuffer_info(env, _args[3], &data, &byte_length);
1050
+ } else {
1051
+ bool is_buffer = false;
1052
+ napi_is_buffer(env, _args[3], &is_buffer);
1053
+ if (is_buffer) {
1054
+ napi_get_buffer_info(env, _args[3], &data, &byte_length);
1055
+ } else {
1056
+ NAPI_THROW(env, "queueWriteBuffer: data must be TypedArray, ArrayBuffer, or Buffer");
1057
+ }
1058
+ }
1059
+ }
1060
+
1061
+ pfn_wgpuQueueWriteBuffer(queue, buf, (uint64_t)offset, data, byte_length);
1062
+ return NULL;
1063
+ }
1064
+
1065
+ static napi_value doe_queue_release(napi_env env, napi_callback_info info) {
1066
+ NAPI_ASSERT_ARGC(env, info, 1);
1067
+ void* p = unwrap_ptr(env, _args[0]);
1068
+ if (p) pfn_wgpuQueueRelease(p);
1069
+ return NULL;
1070
+ }
1071
+
1072
+ /* ================================================================
1073
+ * Module initialization
1074
+ * ================================================================ */
1075
+
1076
+ #define EXPORT_FN(name, fn) { name, 0, fn, 0, 0, 0, napi_default, 0 }
1077
+
1078
+ static napi_value doe_module_init(napi_env env, napi_value exports) {
1079
+ napi_property_descriptor descriptors[] = {
1080
+ EXPORT_FN("loadLibrary", doe_load_library),
1081
+ EXPORT_FN("createInstance", doe_create_instance),
1082
+ EXPORT_FN("instanceRelease", doe_instance_release),
1083
+ EXPORT_FN("requestAdapter", doe_request_adapter),
1084
+ EXPORT_FN("adapterRelease", doe_adapter_release),
1085
+ EXPORT_FN("requestDevice", doe_request_device),
1086
+ EXPORT_FN("deviceRelease", doe_device_release),
1087
+ EXPORT_FN("deviceGetQueue", doe_device_get_queue),
1088
+ EXPORT_FN("createBuffer", doe_create_buffer),
1089
+ EXPORT_FN("bufferRelease", doe_buffer_release),
1090
+ EXPORT_FN("bufferUnmap", doe_buffer_unmap),
1091
+ EXPORT_FN("bufferMapSync", doe_buffer_map_sync),
1092
+ EXPORT_FN("bufferGetMappedRange", doe_buffer_get_mapped_range),
1093
+ EXPORT_FN("createShaderModule", doe_create_shader_module),
1094
+ EXPORT_FN("shaderModuleRelease", doe_shader_module_release),
1095
+ EXPORT_FN("createComputePipeline", doe_create_compute_pipeline),
1096
+ EXPORT_FN("computePipelineRelease", doe_compute_pipeline_release),
1097
+ EXPORT_FN("createBindGroupLayout", doe_create_bind_group_layout),
1098
+ EXPORT_FN("bindGroupLayoutRelease", doe_bind_group_layout_release),
1099
+ EXPORT_FN("createBindGroup", doe_create_bind_group),
1100
+ EXPORT_FN("bindGroupRelease", doe_bind_group_release),
1101
+ EXPORT_FN("createPipelineLayout", doe_create_pipeline_layout),
1102
+ EXPORT_FN("pipelineLayoutRelease", doe_pipeline_layout_release),
1103
+ EXPORT_FN("createCommandEncoder", doe_create_command_encoder),
1104
+ EXPORT_FN("commandEncoderRelease", doe_command_encoder_release),
1105
+ EXPORT_FN("commandEncoderCopyBufferToBuffer", doe_command_encoder_copy_buffer_to_buffer),
1106
+ EXPORT_FN("commandEncoderFinish", doe_command_encoder_finish),
1107
+ EXPORT_FN("commandBufferRelease", doe_command_buffer_release),
1108
+ EXPORT_FN("beginComputePass", doe_begin_compute_pass),
1109
+ EXPORT_FN("computePassSetPipeline", doe_compute_pass_set_pipeline),
1110
+ EXPORT_FN("computePassSetBindGroup", doe_compute_pass_set_bind_group),
1111
+ EXPORT_FN("computePassDispatchWorkgroups", doe_compute_pass_dispatch),
1112
+ EXPORT_FN("computePassEnd", doe_compute_pass_end),
1113
+ EXPORT_FN("computePassRelease", doe_compute_pass_release),
1114
+ EXPORT_FN("queueSubmit", doe_queue_submit),
1115
+ EXPORT_FN("queueWriteBuffer", doe_queue_write_buffer),
1116
+ EXPORT_FN("queueRelease", doe_queue_release),
1117
+ };
1118
+
1119
+ size_t count = sizeof(descriptors) / sizeof(descriptors[0]);
1120
+ napi_define_properties(env, exports, count, descriptors);
1121
+ return exports;
1122
+ }
1123
+
1124
+ NAPI_MODULE(NODE_GYP_MODULE_NAME, doe_module_init)