@ruvector/attention-wasm 2.0.4 → 2.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,252 +1,285 @@
1
1
  /* tslint:disable */
2
2
  /* eslint-disable */
3
3
 
4
+ /**
5
+ * Adam optimizer
6
+ */
4
7
  export class WasmAdam {
5
- free(): void;
6
- [Symbol.dispose](): void;
7
- /**
8
- * Create a new Adam optimizer
9
- *
10
- * # Arguments
11
- * * `param_count` - Number of parameters
12
- * * `learning_rate` - Learning rate
13
- */
14
- constructor(param_count: number, learning_rate: number);
15
- /**
16
- * Perform optimization step
17
- *
18
- * # Arguments
19
- * * `params` - Current parameter values (will be updated in-place)
20
- * * `gradients` - Gradient values
21
- */
22
- step(params: Float32Array, gradients: Float32Array): void;
23
- /**
24
- * Reset optimizer state
25
- */
26
- reset(): void;
27
- /**
28
- * Get current learning rate
29
- */
30
- learning_rate: number;
8
+ free(): void;
9
+ [Symbol.dispose](): void;
10
+ /**
11
+ * Create a new Adam optimizer
12
+ *
13
+ * # Arguments
14
+ * * `param_count` - Number of parameters
15
+ * * `learning_rate` - Learning rate
16
+ */
17
+ constructor(param_count: number, learning_rate: number);
18
+ /**
19
+ * Reset optimizer state
20
+ */
21
+ reset(): void;
22
+ /**
23
+ * Perform optimization step
24
+ *
25
+ * # Arguments
26
+ * * `params` - Current parameter values (will be updated in-place)
27
+ * * `gradients` - Gradient values
28
+ */
29
+ step(params: Float32Array, gradients: Float32Array): void;
30
+ /**
31
+ * Get current learning rate
32
+ */
33
+ learning_rate: number;
31
34
  }
32
35
 
36
+ /**
37
+ * AdamW optimizer (Adam with decoupled weight decay)
38
+ */
33
39
  export class WasmAdamW {
34
- free(): void;
35
- [Symbol.dispose](): void;
36
- /**
37
- * Create a new AdamW optimizer
38
- *
39
- * # Arguments
40
- * * `param_count` - Number of parameters
41
- * * `learning_rate` - Learning rate
42
- * * `weight_decay` - Weight decay coefficient
43
- */
44
- constructor(param_count: number, learning_rate: number, weight_decay: number);
45
- /**
46
- * Perform optimization step with weight decay
47
- */
48
- step(params: Float32Array, gradients: Float32Array): void;
49
- /**
50
- * Reset optimizer state
51
- */
52
- reset(): void;
53
- /**
54
- * Get weight decay
55
- */
56
- readonly weight_decay: number;
57
- /**
58
- * Get current learning rate
59
- */
60
- learning_rate: number;
40
+ free(): void;
41
+ [Symbol.dispose](): void;
42
+ /**
43
+ * Create a new AdamW optimizer
44
+ *
45
+ * # Arguments
46
+ * * `param_count` - Number of parameters
47
+ * * `learning_rate` - Learning rate
48
+ * * `weight_decay` - Weight decay coefficient
49
+ */
50
+ constructor(param_count: number, learning_rate: number, weight_decay: number);
51
+ /**
52
+ * Reset optimizer state
53
+ */
54
+ reset(): void;
55
+ /**
56
+ * Perform optimization step with weight decay
57
+ */
58
+ step(params: Float32Array, gradients: Float32Array): void;
59
+ /**
60
+ * Get current learning rate
61
+ */
62
+ learning_rate: number;
63
+ /**
64
+ * Get weight decay
65
+ */
66
+ readonly weight_decay: number;
61
67
  }
62
68
 
69
+ /**
70
+ * Flash attention mechanism
71
+ */
63
72
  export class WasmFlashAttention {
64
- free(): void;
65
- [Symbol.dispose](): void;
66
- /**
67
- * Create a new flash attention instance
68
- *
69
- * # Arguments
70
- * * `dim` - Embedding dimension
71
- * * `block_size` - Block size for tiling
72
- */
73
- constructor(dim: number, block_size: number);
74
- /**
75
- * Compute flash attention
76
- */
77
- compute(query: Float32Array, keys: any, values: any): Float32Array;
73
+ free(): void;
74
+ [Symbol.dispose](): void;
75
+ /**
76
+ * Compute flash attention
77
+ */
78
+ compute(query: Float32Array, keys: any, values: any): Float32Array;
79
+ /**
80
+ * Create a new flash attention instance
81
+ *
82
+ * # Arguments
83
+ * * `dim` - Embedding dimension
84
+ * * `block_size` - Block size for tiling
85
+ */
86
+ constructor(dim: number, block_size: number);
78
87
  }
79
88
 
89
+ /**
90
+ * Hyperbolic attention mechanism
91
+ */
80
92
  export class WasmHyperbolicAttention {
81
- free(): void;
82
- [Symbol.dispose](): void;
83
- /**
84
- * Create a new hyperbolic attention instance
85
- *
86
- * # Arguments
87
- * * `dim` - Embedding dimension
88
- * * `curvature` - Hyperbolic curvature parameter
89
- */
90
- constructor(dim: number, curvature: number);
91
- /**
92
- * Compute hyperbolic attention
93
- */
94
- compute(query: Float32Array, keys: any, values: any): Float32Array;
95
- /**
96
- * Get the curvature
97
- */
98
- readonly curvature: number;
93
+ free(): void;
94
+ [Symbol.dispose](): void;
95
+ /**
96
+ * Compute hyperbolic attention
97
+ */
98
+ compute(query: Float32Array, keys: any, values: any): Float32Array;
99
+ /**
100
+ * Create a new hyperbolic attention instance
101
+ *
102
+ * # Arguments
103
+ * * `dim` - Embedding dimension
104
+ * * `curvature` - Hyperbolic curvature parameter
105
+ */
106
+ constructor(dim: number, curvature: number);
107
+ /**
108
+ * Get the curvature
109
+ */
110
+ readonly curvature: number;
99
111
  }
100
112
 
113
+ /**
114
+ * InfoNCE contrastive loss for training
115
+ */
101
116
  export class WasmInfoNCELoss {
102
- free(): void;
103
- [Symbol.dispose](): void;
104
- /**
105
- * Create a new InfoNCE loss instance
106
- *
107
- * # Arguments
108
- * * `temperature` - Temperature parameter for softmax
109
- */
110
- constructor(temperature: number);
111
- /**
112
- * Compute InfoNCE loss
113
- *
114
- * # Arguments
115
- * * `anchor` - Anchor embedding
116
- * * `positive` - Positive example embedding
117
- * * `negatives` - Array of negative example embeddings
118
- */
119
- compute(anchor: Float32Array, positive: Float32Array, negatives: any): number;
117
+ free(): void;
118
+ [Symbol.dispose](): void;
119
+ /**
120
+ * Compute InfoNCE loss
121
+ *
122
+ * # Arguments
123
+ * * `anchor` - Anchor embedding
124
+ * * `positive` - Positive example embedding
125
+ * * `negatives` - Array of negative example embeddings
126
+ */
127
+ compute(anchor: Float32Array, positive: Float32Array, negatives: any): number;
128
+ /**
129
+ * Create a new InfoNCE loss instance
130
+ *
131
+ * # Arguments
132
+ * * `temperature` - Temperature parameter for softmax
133
+ */
134
+ constructor(temperature: number);
120
135
  }
121
136
 
137
+ /**
138
+ * Learning rate scheduler
139
+ */
122
140
  export class WasmLRScheduler {
123
- free(): void;
124
- [Symbol.dispose](): void;
125
- /**
126
- * Create a new learning rate scheduler with warmup and cosine decay
127
- *
128
- * # Arguments
129
- * * `initial_lr` - Initial learning rate
130
- * * `warmup_steps` - Number of warmup steps
131
- * * `total_steps` - Total training steps
132
- */
133
- constructor(initial_lr: number, warmup_steps: number, total_steps: number);
134
- /**
135
- * Advance to next step
136
- */
137
- step(): void;
138
- /**
139
- * Reset scheduler
140
- */
141
- reset(): void;
142
- /**
143
- * Get learning rate for current step
144
- */
145
- get_lr(): number;
141
+ free(): void;
142
+ [Symbol.dispose](): void;
143
+ /**
144
+ * Get learning rate for current step
145
+ */
146
+ get_lr(): number;
147
+ /**
148
+ * Create a new learning rate scheduler with warmup and cosine decay
149
+ *
150
+ * # Arguments
151
+ * * `initial_lr` - Initial learning rate
152
+ * * `warmup_steps` - Number of warmup steps
153
+ * * `total_steps` - Total training steps
154
+ */
155
+ constructor(initial_lr: number, warmup_steps: number, total_steps: number);
156
+ /**
157
+ * Reset scheduler
158
+ */
159
+ reset(): void;
160
+ /**
161
+ * Advance to next step
162
+ */
163
+ step(): void;
146
164
  }
147
165
 
166
+ /**
167
+ * Linear attention (Performer-style)
168
+ */
148
169
  export class WasmLinearAttention {
149
- free(): void;
150
- [Symbol.dispose](): void;
151
- /**
152
- * Create a new linear attention instance
153
- *
154
- * # Arguments
155
- * * `dim` - Embedding dimension
156
- * * `num_features` - Number of random features
157
- */
158
- constructor(dim: number, num_features: number);
159
- /**
160
- * Compute linear attention
161
- */
162
- compute(query: Float32Array, keys: any, values: any): Float32Array;
170
+ free(): void;
171
+ [Symbol.dispose](): void;
172
+ /**
173
+ * Compute linear attention
174
+ */
175
+ compute(query: Float32Array, keys: any, values: any): Float32Array;
176
+ /**
177
+ * Create a new linear attention instance
178
+ *
179
+ * # Arguments
180
+ * * `dim` - Embedding dimension
181
+ * * `num_features` - Number of random features
182
+ */
183
+ constructor(dim: number, num_features: number);
163
184
  }
164
185
 
186
+ /**
187
+ * Local-global attention mechanism
188
+ */
165
189
  export class WasmLocalGlobalAttention {
166
- free(): void;
167
- [Symbol.dispose](): void;
168
- /**
169
- * Create a new local-global attention instance
170
- *
171
- * # Arguments
172
- * * `dim` - Embedding dimension
173
- * * `local_window` - Size of local attention window
174
- * * `global_tokens` - Number of global attention tokens
175
- */
176
- constructor(dim: number, local_window: number, global_tokens: number);
177
- /**
178
- * Compute local-global attention
179
- */
180
- compute(query: Float32Array, keys: any, values: any): Float32Array;
190
+ free(): void;
191
+ [Symbol.dispose](): void;
192
+ /**
193
+ * Compute local-global attention
194
+ */
195
+ compute(query: Float32Array, keys: any, values: any): Float32Array;
196
+ /**
197
+ * Create a new local-global attention instance
198
+ *
199
+ * # Arguments
200
+ * * `dim` - Embedding dimension
201
+ * * `local_window` - Size of local attention window
202
+ * * `global_tokens` - Number of global attention tokens
203
+ */
204
+ constructor(dim: number, local_window: number, global_tokens: number);
181
205
  }
182
206
 
207
+ /**
208
+ * Mixture of Experts (MoE) attention
209
+ */
183
210
  export class WasmMoEAttention {
184
- free(): void;
185
- [Symbol.dispose](): void;
186
- /**
187
- * Create a new MoE attention instance
188
- *
189
- * # Arguments
190
- * * `dim` - Embedding dimension
191
- * * `num_experts` - Number of expert attention mechanisms
192
- * * `top_k` - Number of experts to use per query
193
- */
194
- constructor(dim: number, num_experts: number, top_k: number);
195
- /**
196
- * Compute MoE attention
197
- */
198
- compute(query: Float32Array, keys: any, values: any): Float32Array;
211
+ free(): void;
212
+ [Symbol.dispose](): void;
213
+ /**
214
+ * Compute MoE attention
215
+ */
216
+ compute(query: Float32Array, keys: any, values: any): Float32Array;
217
+ /**
218
+ * Create a new MoE attention instance
219
+ *
220
+ * # Arguments
221
+ * * `dim` - Embedding dimension
222
+ * * `num_experts` - Number of expert attention mechanisms
223
+ * * `top_k` - Number of experts to use per query
224
+ */
225
+ constructor(dim: number, num_experts: number, top_k: number);
199
226
  }
200
227
 
228
+ /**
229
+ * Multi-head attention mechanism
230
+ */
201
231
  export class WasmMultiHeadAttention {
202
- free(): void;
203
- [Symbol.dispose](): void;
204
- /**
205
- * Create a new multi-head attention instance
206
- *
207
- * # Arguments
208
- * * `dim` - Embedding dimension
209
- * * `num_heads` - Number of attention heads
210
- */
211
- constructor(dim: number, num_heads: number);
212
- /**
213
- * Compute multi-head attention
214
- */
215
- compute(query: Float32Array, keys: any, values: any): Float32Array;
216
- /**
217
- * Get the dimension
218
- */
219
- readonly dim: number;
220
- /**
221
- * Get the number of heads
222
- */
223
- readonly num_heads: number;
232
+ free(): void;
233
+ [Symbol.dispose](): void;
234
+ /**
235
+ * Compute multi-head attention
236
+ */
237
+ compute(query: Float32Array, keys: any, values: any): Float32Array;
238
+ /**
239
+ * Create a new multi-head attention instance
240
+ *
241
+ * # Arguments
242
+ * * `dim` - Embedding dimension
243
+ * * `num_heads` - Number of attention heads
244
+ */
245
+ constructor(dim: number, num_heads: number);
246
+ /**
247
+ * Get the dimension
248
+ */
249
+ readonly dim: number;
250
+ /**
251
+ * Get the number of heads
252
+ */
253
+ readonly num_heads: number;
224
254
  }
225
255
 
256
+ /**
257
+ * SGD optimizer with momentum
258
+ */
226
259
  export class WasmSGD {
227
- free(): void;
228
- [Symbol.dispose](): void;
229
- /**
230
- * Create a new SGD optimizer
231
- *
232
- * # Arguments
233
- * * `param_count` - Number of parameters
234
- * * `learning_rate` - Learning rate
235
- * * `momentum` - Momentum coefficient (default: 0)
236
- */
237
- constructor(param_count: number, learning_rate: number, momentum?: number | null);
238
- /**
239
- * Perform optimization step
240
- */
241
- step(params: Float32Array, gradients: Float32Array): void;
242
- /**
243
- * Reset optimizer state
244
- */
245
- reset(): void;
246
- /**
247
- * Get current learning rate
248
- */
249
- learning_rate: number;
260
+ free(): void;
261
+ [Symbol.dispose](): void;
262
+ /**
263
+ * Create a new SGD optimizer
264
+ *
265
+ * # Arguments
266
+ * * `param_count` - Number of parameters
267
+ * * `learning_rate` - Learning rate
268
+ * * `momentum` - Momentum coefficient (default: 0)
269
+ */
270
+ constructor(param_count: number, learning_rate: number, momentum?: number | null);
271
+ /**
272
+ * Reset optimizer state
273
+ */
274
+ reset(): void;
275
+ /**
276
+ * Perform optimization step
277
+ */
278
+ step(params: Float32Array, gradients: Float32Array): void;
279
+ /**
280
+ * Get current learning rate
281
+ */
282
+ learning_rate: number;
250
283
  }
251
284
 
252
285
  /**
@@ -328,95 +361,95 @@ export function version(): string;
328
361
  export type InitInput = RequestInfo | URL | Response | BufferSource | WebAssembly.Module;
329
362
 
330
363
  export interface InitOutput {
331
- readonly memory: WebAssembly.Memory;
332
- readonly __wbg_wasmadam_free: (a: number, b: number) => void;
333
- readonly __wbg_wasmadamw_free: (a: number, b: number) => void;
334
- readonly __wbg_wasmflashattention_free: (a: number, b: number) => void;
335
- readonly __wbg_wasmhyperbolicattention_free: (a: number, b: number) => void;
336
- readonly __wbg_wasminfonceloss_free: (a: number, b: number) => void;
337
- readonly __wbg_wasmlinearattention_free: (a: number, b: number) => void;
338
- readonly __wbg_wasmmoeattention_free: (a: number, b: number) => void;
339
- readonly __wbg_wasmmultiheadattention_free: (a: number, b: number) => void;
340
- readonly __wbg_wasmsgd_free: (a: number, b: number) => void;
341
- readonly attention_weights: (a: number, b: number, c: number, d: number) => void;
342
- readonly available_mechanisms: () => number;
343
- readonly batch_normalize: (a: number, b: number, c: number) => void;
344
- readonly cosine_similarity: (a: number, b: number, c: number, d: number, e: number) => void;
345
- readonly l2_norm: (a: number, b: number) => number;
346
- readonly log: (a: number, b: number) => void;
347
- readonly log_error: (a: number, b: number) => void;
348
- readonly normalize: (a: number, b: number, c: number, d: number) => void;
349
- readonly pairwise_distances: (a: number, b: number) => void;
350
- readonly random_orthogonal_matrix: (a: number, b: number) => void;
351
- readonly scaled_dot_attention: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
352
- readonly softmax: (a: number, b: number, c: number) => void;
353
- readonly version: (a: number) => void;
354
- readonly wasmadam_learning_rate: (a: number) => number;
355
- readonly wasmadam_new: (a: number, b: number) => number;
356
- readonly wasmadam_reset: (a: number) => void;
357
- readonly wasmadam_set_learning_rate: (a: number, b: number) => void;
358
- readonly wasmadam_step: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
359
- readonly wasmadamw_new: (a: number, b: number, c: number) => number;
360
- readonly wasmadamw_reset: (a: number) => void;
361
- readonly wasmadamw_step: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
362
- readonly wasmadamw_weight_decay: (a: number) => number;
363
- readonly wasmflashattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
364
- readonly wasmflashattention_new: (a: number, b: number) => number;
365
- readonly wasmhyperbolicattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
366
- readonly wasmhyperbolicattention_curvature: (a: number) => number;
367
- readonly wasmhyperbolicattention_new: (a: number, b: number) => number;
368
- readonly wasminfonceloss_compute: (a: number, b: number, c: number, d: number, e: number, f: number, g: number) => void;
369
- readonly wasminfonceloss_new: (a: number) => number;
370
- readonly wasmlinearattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
371
- readonly wasmlinearattention_new: (a: number, b: number) => number;
372
- readonly wasmlocalglobalattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
373
- readonly wasmlocalglobalattention_new: (a: number, b: number, c: number) => number;
374
- readonly wasmlrscheduler_get_lr: (a: number) => number;
375
- readonly wasmlrscheduler_new: (a: number, b: number, c: number) => number;
376
- readonly wasmlrscheduler_reset: (a: number) => void;
377
- readonly wasmlrscheduler_step: (a: number) => void;
378
- readonly wasmmoeattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
379
- readonly wasmmoeattention_new: (a: number, b: number, c: number) => number;
380
- readonly wasmmultiheadattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
381
- readonly wasmmultiheadattention_dim: (a: number) => number;
382
- readonly wasmmultiheadattention_new: (a: number, b: number, c: number) => void;
383
- readonly wasmmultiheadattention_num_heads: (a: number) => number;
384
- readonly wasmsgd_learning_rate: (a: number) => number;
385
- readonly wasmsgd_new: (a: number, b: number, c: number) => number;
386
- readonly wasmsgd_reset: (a: number) => void;
387
- readonly wasmsgd_set_learning_rate: (a: number, b: number) => void;
388
- readonly wasmsgd_step: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
389
- readonly init: () => void;
390
- readonly wasmadamw_set_learning_rate: (a: number, b: number) => void;
391
- readonly wasmadamw_learning_rate: (a: number) => number;
392
- readonly __wbg_wasmlocalglobalattention_free: (a: number, b: number) => void;
393
- readonly __wbg_wasmlrscheduler_free: (a: number, b: number) => void;
394
- readonly __wbindgen_export: (a: number, b: number) => number;
395
- readonly __wbindgen_export2: (a: number, b: number, c: number, d: number) => number;
396
- readonly __wbindgen_export3: (a: number) => void;
397
- readonly __wbindgen_export4: (a: number, b: number, c: number) => void;
398
- readonly __wbindgen_add_to_stack_pointer: (a: number) => number;
399
- readonly __wbindgen_start: () => void;
364
+ readonly memory: WebAssembly.Memory;
365
+ readonly __wbg_wasmadam_free: (a: number, b: number) => void;
366
+ readonly __wbg_wasmadamw_free: (a: number, b: number) => void;
367
+ readonly __wbg_wasmflashattention_free: (a: number, b: number) => void;
368
+ readonly __wbg_wasmhyperbolicattention_free: (a: number, b: number) => void;
369
+ readonly __wbg_wasminfonceloss_free: (a: number, b: number) => void;
370
+ readonly __wbg_wasmlinearattention_free: (a: number, b: number) => void;
371
+ readonly __wbg_wasmmoeattention_free: (a: number, b: number) => void;
372
+ readonly __wbg_wasmmultiheadattention_free: (a: number, b: number) => void;
373
+ readonly __wbg_wasmsgd_free: (a: number, b: number) => void;
374
+ readonly attention_weights: (a: number, b: number, c: number, d: number) => void;
375
+ readonly available_mechanisms: () => number;
376
+ readonly batch_normalize: (a: number, b: number, c: number) => void;
377
+ readonly cosine_similarity: (a: number, b: number, c: number, d: number, e: number) => void;
378
+ readonly l2_norm: (a: number, b: number) => number;
379
+ readonly log: (a: number, b: number) => void;
380
+ readonly log_error: (a: number, b: number) => void;
381
+ readonly normalize: (a: number, b: number, c: number, d: number) => void;
382
+ readonly pairwise_distances: (a: number, b: number) => void;
383
+ readonly random_orthogonal_matrix: (a: number, b: number) => void;
384
+ readonly scaled_dot_attention: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
385
+ readonly softmax: (a: number, b: number, c: number) => void;
386
+ readonly version: (a: number) => void;
387
+ readonly wasmadam_learning_rate: (a: number) => number;
388
+ readonly wasmadam_new: (a: number, b: number) => number;
389
+ readonly wasmadam_reset: (a: number) => void;
390
+ readonly wasmadam_set_learning_rate: (a: number, b: number) => void;
391
+ readonly wasmadam_step: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
392
+ readonly wasmadamw_new: (a: number, b: number, c: number) => number;
393
+ readonly wasmadamw_reset: (a: number) => void;
394
+ readonly wasmadamw_step: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
395
+ readonly wasmadamw_weight_decay: (a: number) => number;
396
+ readonly wasmflashattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
397
+ readonly wasmflashattention_new: (a: number, b: number) => number;
398
+ readonly wasmhyperbolicattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
399
+ readonly wasmhyperbolicattention_curvature: (a: number) => number;
400
+ readonly wasmhyperbolicattention_new: (a: number, b: number) => number;
401
+ readonly wasminfonceloss_compute: (a: number, b: number, c: number, d: number, e: number, f: number, g: number) => void;
402
+ readonly wasminfonceloss_new: (a: number) => number;
403
+ readonly wasmlinearattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
404
+ readonly wasmlinearattention_new: (a: number, b: number) => number;
405
+ readonly wasmlocalglobalattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
406
+ readonly wasmlocalglobalattention_new: (a: number, b: number, c: number) => number;
407
+ readonly wasmlrscheduler_get_lr: (a: number) => number;
408
+ readonly wasmlrscheduler_new: (a: number, b: number, c: number) => number;
409
+ readonly wasmlrscheduler_reset: (a: number) => void;
410
+ readonly wasmlrscheduler_step: (a: number) => void;
411
+ readonly wasmmoeattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
412
+ readonly wasmmoeattention_new: (a: number, b: number, c: number) => number;
413
+ readonly wasmmultiheadattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
414
+ readonly wasmmultiheadattention_dim: (a: number) => number;
415
+ readonly wasmmultiheadattention_new: (a: number, b: number, c: number) => void;
416
+ readonly wasmmultiheadattention_num_heads: (a: number) => number;
417
+ readonly wasmsgd_learning_rate: (a: number) => number;
418
+ readonly wasmsgd_new: (a: number, b: number, c: number) => number;
419
+ readonly wasmsgd_reset: (a: number) => void;
420
+ readonly wasmsgd_set_learning_rate: (a: number, b: number) => void;
421
+ readonly wasmsgd_step: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
422
+ readonly init: () => void;
423
+ readonly wasmadamw_set_learning_rate: (a: number, b: number) => void;
424
+ readonly wasmadamw_learning_rate: (a: number) => number;
425
+ readonly __wbg_wasmlocalglobalattention_free: (a: number, b: number) => void;
426
+ readonly __wbg_wasmlrscheduler_free: (a: number, b: number) => void;
427
+ readonly __wbindgen_export: (a: number, b: number) => number;
428
+ readonly __wbindgen_export2: (a: number, b: number, c: number, d: number) => number;
429
+ readonly __wbindgen_export3: (a: number) => void;
430
+ readonly __wbindgen_export4: (a: number, b: number, c: number) => void;
431
+ readonly __wbindgen_add_to_stack_pointer: (a: number) => number;
432
+ readonly __wbindgen_start: () => void;
400
433
  }
401
434
 
402
435
  export type SyncInitInput = BufferSource | WebAssembly.Module;
403
436
 
404
437
  /**
405
- * Instantiates the given `module`, which can either be bytes or
406
- * a precompiled `WebAssembly.Module`.
407
- *
408
- * @param {{ module: SyncInitInput }} module - Passing `SyncInitInput` directly is deprecated.
409
- *
410
- * @returns {InitOutput}
411
- */
438
+ * Instantiates the given `module`, which can either be bytes or
439
+ * a precompiled `WebAssembly.Module`.
440
+ *
441
+ * @param {{ module: SyncInitInput }} module - Passing `SyncInitInput` directly is deprecated.
442
+ *
443
+ * @returns {InitOutput}
444
+ */
412
445
  export function initSync(module: { module: SyncInitInput } | SyncInitInput): InitOutput;
413
446
 
414
447
  /**
415
- * If `module_or_path` is {RequestInfo} or {URL}, makes a request and
416
- * for everything else, calls `WebAssembly.instantiate` directly.
417
- *
418
- * @param {{ module_or_path: InitInput | Promise<InitInput> }} module_or_path - Passing `InitInput` directly is deprecated.
419
- *
420
- * @returns {Promise<InitOutput>}
421
- */
448
+ * If `module_or_path` is {RequestInfo} or {URL}, makes a request and
449
+ * for everything else, calls `WebAssembly.instantiate` directly.
450
+ *
451
+ * @param {{ module_or_path: InitInput | Promise<InitInput> }} module_or_path - Passing `InitInput` directly is deprecated.
452
+ *
453
+ * @returns {Promise<InitOutput>}
454
+ */
422
455
  export default function __wbg_init (module_or_path?: { module_or_path: InitInput | Promise<InitInput> } | InitInput | Promise<InitInput>): Promise<InitOutput>;