@litlab/audx 0.0.1 → 0.5.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +96 -53
- package/dist/bin.js +1212 -0
- package/dist/cc-DgCkkqq8.js +13 -0
- package/dist/cc-he3fHS3P.js +12 -0
- package/dist/index.d.ts +723 -3
- package/dist/index.js +1534 -126
- package/dist/react.d.ts +583 -0
- package/dist/react.js +1556 -0
- package/package.json +64 -39
- package/schemas/pack.schema.json +4 -0
- package/schemas/patch.schema.json +857 -0
- package/dist/codegen/theme-codegen.d.ts +0 -12
- package/dist/codegen/theme-codegen.d.ts.map +0 -1
- package/dist/codegen/theme-codegen.js +0 -153
- package/dist/codegen/theme-codegen.js.map +0 -1
- package/dist/commands/add.d.ts +0 -2
- package/dist/commands/add.d.ts.map +0 -1
- package/dist/commands/add.js +0 -120
- package/dist/commands/add.js.map +0 -1
- package/dist/commands/diff.d.ts +0 -2
- package/dist/commands/diff.d.ts.map +0 -1
- package/dist/commands/diff.js +0 -103
- package/dist/commands/diff.js.map +0 -1
- package/dist/commands/generate.d.ts +0 -12
- package/dist/commands/generate.d.ts.map +0 -1
- package/dist/commands/generate.js +0 -96
- package/dist/commands/generate.js.map +0 -1
- package/dist/commands/init.d.ts +0 -2
- package/dist/commands/init.d.ts.map +0 -1
- package/dist/commands/init.js +0 -79
- package/dist/commands/init.js.map +0 -1
- package/dist/commands/list.d.ts +0 -14
- package/dist/commands/list.d.ts.map +0 -1
- package/dist/commands/list.js +0 -93
- package/dist/commands/list.js.map +0 -1
- package/dist/commands/remove.d.ts +0 -2
- package/dist/commands/remove.d.ts.map +0 -1
- package/dist/commands/remove.js +0 -71
- package/dist/commands/remove.js.map +0 -1
- package/dist/commands/theme.d.ts +0 -31
- package/dist/commands/theme.d.ts.map +0 -1
- package/dist/commands/theme.js +0 -142
- package/dist/commands/theme.js.map +0 -1
- package/dist/commands/update.d.ts +0 -2
- package/dist/commands/update.d.ts.map +0 -1
- package/dist/commands/update.js +0 -123
- package/dist/commands/update.js.map +0 -1
- package/dist/core/alias-resolver.d.ts +0 -24
- package/dist/core/alias-resolver.d.ts.map +0 -1
- package/dist/core/alias-resolver.js +0 -87
- package/dist/core/alias-resolver.js.map +0 -1
- package/dist/core/config.d.ts +0 -21
- package/dist/core/config.d.ts.map +0 -1
- package/dist/core/config.js +0 -43
- package/dist/core/config.js.map +0 -1
- package/dist/core/file-writer.d.ts +0 -14
- package/dist/core/file-writer.d.ts.map +0 -1
- package/dist/core/file-writer.js +0 -90
- package/dist/core/file-writer.js.map +0 -1
- package/dist/core/package-manager.d.ts +0 -3
- package/dist/core/package-manager.d.ts.map +0 -1
- package/dist/core/package-manager.js +0 -17
- package/dist/core/package-manager.js.map +0 -1
- package/dist/core/registry.d.ts +0 -18
- package/dist/core/registry.d.ts.map +0 -1
- package/dist/core/registry.js +0 -69
- package/dist/core/registry.js.map +0 -1
- package/dist/core/theme-manager.d.ts +0 -35
- package/dist/core/theme-manager.d.ts.map +0 -1
- package/dist/core/theme-manager.js +0 -94
- package/dist/core/theme-manager.js.map +0 -1
- package/dist/core/utils.d.ts +0 -22
- package/dist/core/utils.d.ts.map +0 -1
- package/dist/core/utils.js +0 -44
- package/dist/core/utils.js.map +0 -1
- package/dist/index.d.ts.map +0 -1
- package/dist/index.js.map +0 -1
- package/dist/types.d.ts +0 -116
- package/dist/types.d.ts.map +0 -1
- package/dist/types.js +0 -43
- package/dist/types.js.map +0 -1
package/dist/react.js
ADDED
|
@@ -0,0 +1,1556 @@
|
|
|
1
|
+
'use client';
|
|
2
|
+
import { _ as _extends } from './cc-he3fHS3P.js';
|
|
3
|
+
import { jsx } from 'react/jsx-runtime';
|
|
4
|
+
import { useMemo, useRef, useState, useEffect, use, useCallback, createContext, useSyncExternalStore } from 'react';
|
|
5
|
+
|
|
6
|
+
let ctx = null;
|
|
7
|
+
let masterGain = null;
|
|
8
|
+
let storedOptions = {};
|
|
9
|
+
/**
|
|
10
|
+
* Returns the shared `AudioContext`, creating one if needed.
|
|
11
|
+
*
|
|
12
|
+
* If the context is suspended (e.g. before a user gesture), it will be
|
|
13
|
+
* resumed automatically. Pass `options` on first call to configure latency
|
|
14
|
+
* and sample rate.
|
|
15
|
+
*
|
|
16
|
+
* @param options - Context creation options (stored for future calls)
|
|
17
|
+
* @returns The shared `AudioContext`
|
|
18
|
+
*/ function getContext(options) {
|
|
19
|
+
if (!ctx || ctx.state === "closed") {
|
|
20
|
+
ctx = new AudioContext({
|
|
21
|
+
latencyHint: storedOptions.latencyHint,
|
|
22
|
+
sampleRate: storedOptions.sampleRate
|
|
23
|
+
});
|
|
24
|
+
masterGain = null;
|
|
25
|
+
}
|
|
26
|
+
if (ctx.state === "suspended") {
|
|
27
|
+
ctx.resume();
|
|
28
|
+
}
|
|
29
|
+
return ctx;
|
|
30
|
+
}
|
|
31
|
+
/**
|
|
32
|
+
* Returns the master bus `GainNode`, creating it on first access.
|
|
33
|
+
*
|
|
34
|
+
* The master bus sits between all sound output and `ctx.destination`,
|
|
35
|
+
* providing a single point to control global volume.
|
|
36
|
+
*/ function getMasterBus() {
|
|
37
|
+
const c = getContext();
|
|
38
|
+
if (!masterGain || masterGain.context !== c) {
|
|
39
|
+
masterGain = c.createGain();
|
|
40
|
+
masterGain.connect(c.destination);
|
|
41
|
+
}
|
|
42
|
+
return masterGain;
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* Returns the appropriate destination node for sound output.
|
|
46
|
+
*
|
|
47
|
+
* If a master bus has been created, routes through it; otherwise falls
|
|
48
|
+
* back to `ctx.destination`.
|
|
49
|
+
*/ function getDestination() {
|
|
50
|
+
const c = getContext();
|
|
51
|
+
if (masterGain && masterGain.context === c) {
|
|
52
|
+
return masterGain;
|
|
53
|
+
}
|
|
54
|
+
return c.destination;
|
|
55
|
+
}
|
|
56
|
+
/**
|
|
57
|
+
* Configures the 3D audio listener position and orientation.
|
|
58
|
+
*
|
|
59
|
+
* @param listener - Position and orientation values
|
|
60
|
+
* @see {@link getListener}
|
|
61
|
+
*/ function setListener(listener) {
|
|
62
|
+
var _listener_forwardX, _listener_forwardY, _listener_forwardZ, _listener_upX, _listener_upY, _listener_upZ;
|
|
63
|
+
const audio = getContext();
|
|
64
|
+
const l = audio.listener;
|
|
65
|
+
l.positionX.value = listener.positionX;
|
|
66
|
+
l.positionY.value = listener.positionY;
|
|
67
|
+
l.positionZ.value = listener.positionZ;
|
|
68
|
+
l.forwardX.value = (_listener_forwardX = listener.forwardX) != null ? _listener_forwardX : 0;
|
|
69
|
+
l.forwardY.value = (_listener_forwardY = listener.forwardY) != null ? _listener_forwardY : 0;
|
|
70
|
+
l.forwardZ.value = (_listener_forwardZ = listener.forwardZ) != null ? _listener_forwardZ : -1;
|
|
71
|
+
l.upX.value = (_listener_upX = listener.upX) != null ? _listener_upX : 0;
|
|
72
|
+
l.upY.value = (_listener_upY = listener.upY) != null ? _listener_upY : 1;
|
|
73
|
+
l.upZ.value = (_listener_upZ = listener.upZ) != null ? _listener_upZ : 0;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/**
|
|
77
|
+
* Creates a standalone {@link AudioAnalyser}.
|
|
78
|
+
*
|
|
79
|
+
* The caller is responsible for connecting a source to `analyser.node`.
|
|
80
|
+
* Call `analyser.dispose()` when finished to disconnect.
|
|
81
|
+
*
|
|
82
|
+
* @param opts - FFT size, smoothing, and dB range overrides
|
|
83
|
+
*/ function createAnalyser(opts) {
|
|
84
|
+
var _ref, _ref1;
|
|
85
|
+
const ctx = getContext();
|
|
86
|
+
const node = ctx.createAnalyser();
|
|
87
|
+
node.fftSize = (_ref = opts == null ? void 0 : opts.fftSize) != null ? _ref : 2048;
|
|
88
|
+
node.smoothingTimeConstant = (_ref1 = opts == null ? void 0 : opts.smoothingTimeConstant) != null ? _ref1 : 0.8;
|
|
89
|
+
if ((opts == null ? void 0 : opts.minDecibels) !== undefined) node.minDecibels = opts.minDecibels;
|
|
90
|
+
if ((opts == null ? void 0 : opts.maxDecibels) !== undefined) node.maxDecibels = opts.maxDecibels;
|
|
91
|
+
const freqData = new Uint8Array(node.frequencyBinCount);
|
|
92
|
+
const timeData = new Uint8Array(node.fftSize);
|
|
93
|
+
const floatFreqData = new Float32Array(node.frequencyBinCount);
|
|
94
|
+
const floatTimeData = new Float32Array(node.fftSize);
|
|
95
|
+
return {
|
|
96
|
+
node,
|
|
97
|
+
frequencyBinCount: node.frequencyBinCount,
|
|
98
|
+
getFrequencyData () {
|
|
99
|
+
node.getByteFrequencyData(freqData);
|
|
100
|
+
return freqData;
|
|
101
|
+
},
|
|
102
|
+
getTimeDomainData () {
|
|
103
|
+
node.getByteTimeDomainData(timeData);
|
|
104
|
+
return timeData;
|
|
105
|
+
},
|
|
106
|
+
getFloatFrequencyData () {
|
|
107
|
+
node.getFloatFrequencyData(floatFreqData);
|
|
108
|
+
return floatFreqData;
|
|
109
|
+
},
|
|
110
|
+
getFloatTimeDomainData () {
|
|
111
|
+
node.getFloatTimeDomainData(floatTimeData);
|
|
112
|
+
return floatTimeData;
|
|
113
|
+
},
|
|
114
|
+
dispose () {
|
|
115
|
+
try {
|
|
116
|
+
node.disconnect();
|
|
117
|
+
} catch (_) {}
|
|
118
|
+
}
|
|
119
|
+
};
|
|
120
|
+
}
|
|
121
|
+
/**
|
|
122
|
+
* Creates an {@link AudioAnalyser} that is pre-connected to the master bus.
|
|
123
|
+
*
|
|
124
|
+
* Useful for visualising the combined output of all sounds.
|
|
125
|
+
* The returned analyser automatically disconnects from the master bus on
|
|
126
|
+
* `dispose()`.
|
|
127
|
+
*
|
|
128
|
+
* @param opts - FFT size, smoothing, and dB range overrides
|
|
129
|
+
*/ function createMasterAnalyser(opts) {
|
|
130
|
+
const bus = getMasterBus();
|
|
131
|
+
const analyser = createAnalyser(opts);
|
|
132
|
+
bus.connect(analyser.node);
|
|
133
|
+
const originalDispose = analyser.dispose;
|
|
134
|
+
analyser.dispose = ()=>{
|
|
135
|
+
try {
|
|
136
|
+
bus.disconnect(analyser.node);
|
|
137
|
+
} catch (_) {}
|
|
138
|
+
originalDispose();
|
|
139
|
+
};
|
|
140
|
+
return analyser;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
function withMix(ctx, mix, // biome-ignore lint/suspicious/noConfusingVoidType: callers may omit return
|
|
144
|
+
create) {
|
|
145
|
+
const input = ctx.createGain();
|
|
146
|
+
const output = ctx.createGain();
|
|
147
|
+
const dry = ctx.createGain();
|
|
148
|
+
dry.gain.value = 1 - mix;
|
|
149
|
+
input.connect(dry);
|
|
150
|
+
dry.connect(output);
|
|
151
|
+
const wet = ctx.createGain();
|
|
152
|
+
wet.gain.value = mix;
|
|
153
|
+
input.connect(wet);
|
|
154
|
+
const wetOut = ctx.createGain();
|
|
155
|
+
wetOut.connect(output);
|
|
156
|
+
const result = create(wet, wetOut);
|
|
157
|
+
return {
|
|
158
|
+
input,
|
|
159
|
+
output,
|
|
160
|
+
dispose: result == null ? void 0 : result.dispose
|
|
161
|
+
};
|
|
162
|
+
}
|
|
163
|
+
function createReverb(ctx, opts) {
|
|
164
|
+
var _opts_decay, _opts_mix, _opts_preDelay, _opts_damping, _opts_roomSize;
|
|
165
|
+
const decay = (_opts_decay = opts.decay) != null ? _opts_decay : 0.5;
|
|
166
|
+
const mix = (_opts_mix = opts.mix) != null ? _opts_mix : 0.3;
|
|
167
|
+
const preDelay = (_opts_preDelay = opts.preDelay) != null ? _opts_preDelay : 0;
|
|
168
|
+
const damping = (_opts_damping = opts.damping) != null ? _opts_damping : 0;
|
|
169
|
+
const roomSize = (_opts_roomSize = opts.roomSize) != null ? _opts_roomSize : 1;
|
|
170
|
+
return withMix(ctx, mix, (wet, wetOut)=>{
|
|
171
|
+
const sampleRate = ctx.sampleRate;
|
|
172
|
+
const effectiveDecay = decay * roomSize;
|
|
173
|
+
const length = Math.ceil(sampleRate * effectiveDecay);
|
|
174
|
+
const buffer = ctx.createBuffer(2, length, sampleRate);
|
|
175
|
+
for(let ch = 0; ch < 2; ch++){
|
|
176
|
+
const data = buffer.getChannelData(ch);
|
|
177
|
+
for(let i = 0; i < length; i++){
|
|
178
|
+
data[i] = (Math.random() * 2 - 1) * Math.exp(-i / (length * 0.28));
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
if (damping > 0) {
|
|
182
|
+
for(let ch = 0; ch < 2; ch++){
|
|
183
|
+
const data = buffer.getChannelData(ch);
|
|
184
|
+
const coeff = Math.min(damping, 0.99);
|
|
185
|
+
let prev = 0;
|
|
186
|
+
for(let i = 0; i < length; i++){
|
|
187
|
+
prev = data[i] * (1 - coeff) + prev * coeff;
|
|
188
|
+
data[i] = prev;
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
const convolver = ctx.createConvolver();
|
|
193
|
+
convolver.buffer = buffer;
|
|
194
|
+
if (preDelay > 0) {
|
|
195
|
+
const preDelayNode = ctx.createDelay(Math.max(preDelay + 0.01, 1));
|
|
196
|
+
preDelayNode.delayTime.value = preDelay;
|
|
197
|
+
wet.connect(preDelayNode);
|
|
198
|
+
preDelayNode.connect(convolver);
|
|
199
|
+
} else {
|
|
200
|
+
wet.connect(convolver);
|
|
201
|
+
}
|
|
202
|
+
convolver.connect(wetOut);
|
|
203
|
+
});
|
|
204
|
+
}
|
|
205
|
+
const irCache = new Map();
|
|
206
|
+
function createConvolver(ctx, opts) {
|
|
207
|
+
var _opts_mix;
|
|
208
|
+
const mix = (_opts_mix = opts.mix) != null ? _opts_mix : 0.5;
|
|
209
|
+
return withMix(ctx, mix, (wet, wetOut)=>{
|
|
210
|
+
const convolver = ctx.createConvolver();
|
|
211
|
+
if (opts.buffer) {
|
|
212
|
+
convolver.buffer = opts.buffer;
|
|
213
|
+
} else if (opts.url) {
|
|
214
|
+
const cached = irCache.get(opts.url);
|
|
215
|
+
if (cached) {
|
|
216
|
+
convolver.buffer = cached;
|
|
217
|
+
} else {
|
|
218
|
+
const url = opts.url;
|
|
219
|
+
fetch(url).then((res)=>res.arrayBuffer()).then((data)=>ctx.decodeAudioData(data)).then((decoded)=>{
|
|
220
|
+
irCache.set(url, decoded);
|
|
221
|
+
convolver.buffer = decoded;
|
|
222
|
+
});
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
wet.connect(convolver);
|
|
226
|
+
convolver.connect(wetOut);
|
|
227
|
+
});
|
|
228
|
+
}
|
|
229
|
+
function createDelay(ctx, opts) {
|
|
230
|
+
var _opts_time, _opts_feedback, _opts_mix;
|
|
231
|
+
const time = (_opts_time = opts.time) != null ? _opts_time : 0.25;
|
|
232
|
+
const feedback = (_opts_feedback = opts.feedback) != null ? _opts_feedback : 0.3;
|
|
233
|
+
const mix = (_opts_mix = opts.mix) != null ? _opts_mix : 0.3;
|
|
234
|
+
return withMix(ctx, mix, (wet, wetOut)=>{
|
|
235
|
+
const delay = ctx.createDelay(Math.max(time + 0.01, 1));
|
|
236
|
+
delay.delayTime.value = time;
|
|
237
|
+
const fb = ctx.createGain();
|
|
238
|
+
fb.gain.value = feedback;
|
|
239
|
+
wet.connect(delay);
|
|
240
|
+
delay.connect(fb);
|
|
241
|
+
if (opts.feedbackFilter) {
|
|
242
|
+
var _opts_feedbackFilter_Q;
|
|
243
|
+
const filter = ctx.createBiquadFilter();
|
|
244
|
+
filter.type = opts.feedbackFilter.type;
|
|
245
|
+
filter.frequency.value = opts.feedbackFilter.frequency;
|
|
246
|
+
filter.Q.value = (_opts_feedbackFilter_Q = opts.feedbackFilter.Q) != null ? _opts_feedbackFilter_Q : 1;
|
|
247
|
+
fb.connect(filter);
|
|
248
|
+
filter.connect(delay);
|
|
249
|
+
} else {
|
|
250
|
+
fb.connect(delay);
|
|
251
|
+
}
|
|
252
|
+
delay.connect(wetOut);
|
|
253
|
+
});
|
|
254
|
+
}
|
|
255
|
+
function createDistortion(ctx, opts) {
|
|
256
|
+
var _opts_amount, _opts_mix;
|
|
257
|
+
const amount = (_opts_amount = opts.amount) != null ? _opts_amount : 50;
|
|
258
|
+
const mix = (_opts_mix = opts.mix) != null ? _opts_mix : 0.5;
|
|
259
|
+
return withMix(ctx, mix, (wet, wetOut)=>{
|
|
260
|
+
const shaper = ctx.createWaveShaper();
|
|
261
|
+
const samples = 44100;
|
|
262
|
+
const curve = new Float32Array(samples);
|
|
263
|
+
const k = amount;
|
|
264
|
+
for(let i = 0; i < samples; i++){
|
|
265
|
+
const x = i * 2 / samples - 1;
|
|
266
|
+
curve[i] = Math.tanh(k * x);
|
|
267
|
+
}
|
|
268
|
+
shaper.curve = curve;
|
|
269
|
+
shaper.oversample = "4x";
|
|
270
|
+
wet.connect(shaper);
|
|
271
|
+
shaper.connect(wetOut);
|
|
272
|
+
});
|
|
273
|
+
}
|
|
274
|
+
function createChorus(ctx, opts) {
|
|
275
|
+
var _opts_rate, _opts_depth, _opts_mix;
|
|
276
|
+
const rate = (_opts_rate = opts.rate) != null ? _opts_rate : 1.5;
|
|
277
|
+
const depth = (_opts_depth = opts.depth) != null ? _opts_depth : 0.003;
|
|
278
|
+
const mix = (_opts_mix = opts.mix) != null ? _opts_mix : 0.3;
|
|
279
|
+
return withMix(ctx, mix, (wet, wetOut)=>{
|
|
280
|
+
const delayL = ctx.createDelay();
|
|
281
|
+
delayL.delayTime.value = 0.012;
|
|
282
|
+
const delayR = ctx.createDelay();
|
|
283
|
+
delayR.delayTime.value = 0.016;
|
|
284
|
+
const lfoL = ctx.createOscillator();
|
|
285
|
+
lfoL.type = "sine";
|
|
286
|
+
lfoL.frequency.value = rate;
|
|
287
|
+
const lfoR = ctx.createOscillator();
|
|
288
|
+
lfoR.type = "sine";
|
|
289
|
+
lfoR.frequency.value = rate * 1.1;
|
|
290
|
+
const lfoGainL = ctx.createGain();
|
|
291
|
+
lfoGainL.gain.value = depth;
|
|
292
|
+
const lfoGainR = ctx.createGain();
|
|
293
|
+
lfoGainR.gain.value = depth;
|
|
294
|
+
lfoL.connect(lfoGainL);
|
|
295
|
+
lfoGainL.connect(delayL.delayTime);
|
|
296
|
+
lfoL.start();
|
|
297
|
+
lfoR.connect(lfoGainR);
|
|
298
|
+
lfoGainR.connect(delayR.delayTime);
|
|
299
|
+
lfoR.start();
|
|
300
|
+
wet.connect(delayL);
|
|
301
|
+
wet.connect(delayR);
|
|
302
|
+
delayL.connect(wetOut);
|
|
303
|
+
delayR.connect(wetOut);
|
|
304
|
+
return {
|
|
305
|
+
dispose () {
|
|
306
|
+
try {
|
|
307
|
+
lfoL.stop();
|
|
308
|
+
} catch (_) {}
|
|
309
|
+
try {
|
|
310
|
+
lfoR.stop();
|
|
311
|
+
} catch (_) {}
|
|
312
|
+
}
|
|
313
|
+
};
|
|
314
|
+
});
|
|
315
|
+
}
|
|
316
|
+
function createFlanger(ctx, opts) {
|
|
317
|
+
var _opts_rate, _opts_depth, _opts_feedback, _opts_mix;
|
|
318
|
+
const rate = (_opts_rate = opts.rate) != null ? _opts_rate : 0.5;
|
|
319
|
+
const depth = (_opts_depth = opts.depth) != null ? _opts_depth : 0.002;
|
|
320
|
+
const feedback = (_opts_feedback = opts.feedback) != null ? _opts_feedback : 0.5;
|
|
321
|
+
const mix = (_opts_mix = opts.mix) != null ? _opts_mix : 0.5;
|
|
322
|
+
return withMix(ctx, mix, (wet, wetOut)=>{
|
|
323
|
+
const delay = ctx.createDelay();
|
|
324
|
+
delay.delayTime.value = 0.005;
|
|
325
|
+
const lfo = ctx.createOscillator();
|
|
326
|
+
lfo.type = "sine";
|
|
327
|
+
lfo.frequency.value = rate;
|
|
328
|
+
const lfoGain = ctx.createGain();
|
|
329
|
+
lfoGain.gain.value = depth;
|
|
330
|
+
lfo.connect(lfoGain);
|
|
331
|
+
lfoGain.connect(delay.delayTime);
|
|
332
|
+
lfo.start();
|
|
333
|
+
const fb = ctx.createGain();
|
|
334
|
+
fb.gain.value = feedback;
|
|
335
|
+
delay.connect(fb);
|
|
336
|
+
fb.connect(delay);
|
|
337
|
+
wet.connect(delay);
|
|
338
|
+
delay.connect(wetOut);
|
|
339
|
+
return {
|
|
340
|
+
dispose () {
|
|
341
|
+
try {
|
|
342
|
+
lfo.stop();
|
|
343
|
+
} catch (_) {}
|
|
344
|
+
}
|
|
345
|
+
};
|
|
346
|
+
});
|
|
347
|
+
}
|
|
348
|
+
function createPhaser(ctx, opts) {
|
|
349
|
+
var _opts_rate, _opts_depth, _opts_stages, _opts_feedback, _opts_mix;
|
|
350
|
+
const rate = (_opts_rate = opts.rate) != null ? _opts_rate : 0.5;
|
|
351
|
+
const depth = (_opts_depth = opts.depth) != null ? _opts_depth : 1000;
|
|
352
|
+
const stages = (_opts_stages = opts.stages) != null ? _opts_stages : 4;
|
|
353
|
+
const feedback = (_opts_feedback = opts.feedback) != null ? _opts_feedback : 0.5;
|
|
354
|
+
const mix = (_opts_mix = opts.mix) != null ? _opts_mix : 0.5;
|
|
355
|
+
return withMix(ctx, mix, (wet, wetOut)=>{
|
|
356
|
+
const filters = [];
|
|
357
|
+
const baseFreqs = [
|
|
358
|
+
200,
|
|
359
|
+
600,
|
|
360
|
+
1200,
|
|
361
|
+
2400,
|
|
362
|
+
4800,
|
|
363
|
+
8000
|
|
364
|
+
];
|
|
365
|
+
for(let i = 0; i < stages; i++){
|
|
366
|
+
const f = ctx.createBiquadFilter();
|
|
367
|
+
f.type = "allpass";
|
|
368
|
+
f.frequency.value = baseFreqs[i % baseFreqs.length];
|
|
369
|
+
f.Q.value = 0.5;
|
|
370
|
+
filters.push(f);
|
|
371
|
+
}
|
|
372
|
+
for(let i = 0; i < filters.length - 1; i++){
|
|
373
|
+
filters[i].connect(filters[i + 1]);
|
|
374
|
+
}
|
|
375
|
+
const lfo = ctx.createOscillator();
|
|
376
|
+
lfo.type = "sine";
|
|
377
|
+
lfo.frequency.value = rate;
|
|
378
|
+
const lfoGain = ctx.createGain();
|
|
379
|
+
lfoGain.gain.value = depth;
|
|
380
|
+
lfo.connect(lfoGain);
|
|
381
|
+
for (const f of filters){
|
|
382
|
+
lfoGain.connect(f.frequency);
|
|
383
|
+
}
|
|
384
|
+
lfo.start();
|
|
385
|
+
const fb = ctx.createGain();
|
|
386
|
+
fb.gain.value = feedback;
|
|
387
|
+
filters[filters.length - 1].connect(fb);
|
|
388
|
+
fb.connect(filters[0]);
|
|
389
|
+
wet.connect(filters[0]);
|
|
390
|
+
filters[filters.length - 1].connect(wetOut);
|
|
391
|
+
return {
|
|
392
|
+
dispose () {
|
|
393
|
+
try {
|
|
394
|
+
lfo.stop();
|
|
395
|
+
} catch (_) {}
|
|
396
|
+
}
|
|
397
|
+
};
|
|
398
|
+
});
|
|
399
|
+
}
|
|
400
|
+
function createTremolo(ctx, opts) {
|
|
401
|
+
var _opts_rate, _opts_depth;
|
|
402
|
+
const rate = (_opts_rate = opts.rate) != null ? _opts_rate : 4;
|
|
403
|
+
const depth = (_opts_depth = opts.depth) != null ? _opts_depth : 0.5;
|
|
404
|
+
const input = ctx.createGain();
|
|
405
|
+
const output = ctx.createGain();
|
|
406
|
+
const tremGain = ctx.createGain();
|
|
407
|
+
tremGain.gain.value = 1 - depth / 2;
|
|
408
|
+
input.connect(tremGain);
|
|
409
|
+
tremGain.connect(output);
|
|
410
|
+
const lfo = ctx.createOscillator();
|
|
411
|
+
lfo.type = "sine";
|
|
412
|
+
lfo.frequency.value = rate;
|
|
413
|
+
const lfoGain = ctx.createGain();
|
|
414
|
+
lfoGain.gain.value = depth / 2;
|
|
415
|
+
lfo.connect(lfoGain);
|
|
416
|
+
lfoGain.connect(tremGain.gain);
|
|
417
|
+
lfo.start();
|
|
418
|
+
return {
|
|
419
|
+
input,
|
|
420
|
+
output,
|
|
421
|
+
dispose () {
|
|
422
|
+
try {
|
|
423
|
+
lfo.stop();
|
|
424
|
+
} catch (_) {}
|
|
425
|
+
}
|
|
426
|
+
};
|
|
427
|
+
}
|
|
428
|
+
function createVibrato(ctx, opts) {
|
|
429
|
+
var _opts_rate, _opts_depth;
|
|
430
|
+
const rate = (_opts_rate = opts.rate) != null ? _opts_rate : 5;
|
|
431
|
+
const depth = (_opts_depth = opts.depth) != null ? _opts_depth : 0.002;
|
|
432
|
+
const input = ctx.createGain();
|
|
433
|
+
const output = ctx.createGain();
|
|
434
|
+
const delay = ctx.createDelay();
|
|
435
|
+
delay.delayTime.value = depth;
|
|
436
|
+
const lfo = ctx.createOscillator();
|
|
437
|
+
lfo.type = "sine";
|
|
438
|
+
lfo.frequency.value = rate;
|
|
439
|
+
const lfoGain = ctx.createGain();
|
|
440
|
+
lfoGain.gain.value = depth;
|
|
441
|
+
lfo.connect(lfoGain);
|
|
442
|
+
lfoGain.connect(delay.delayTime);
|
|
443
|
+
lfo.start();
|
|
444
|
+
input.connect(delay);
|
|
445
|
+
delay.connect(output);
|
|
446
|
+
return {
|
|
447
|
+
input,
|
|
448
|
+
output,
|
|
449
|
+
dispose () {
|
|
450
|
+
try {
|
|
451
|
+
lfo.stop();
|
|
452
|
+
} catch (_) {}
|
|
453
|
+
}
|
|
454
|
+
};
|
|
455
|
+
}
|
|
456
|
+
function createBitcrusher(ctx, opts) {
|
|
457
|
+
var _opts_bits, _opts_mix, _opts_sampleRateReduction;
|
|
458
|
+
const bits = (_opts_bits = opts.bits) != null ? _opts_bits : 8;
|
|
459
|
+
const mix = (_opts_mix = opts.mix) != null ? _opts_mix : 1;
|
|
460
|
+
const srReduction = (_opts_sampleRateReduction = opts.sampleRateReduction) != null ? _opts_sampleRateReduction : 1;
|
|
461
|
+
return withMix(ctx, mix, (wet, wetOut)=>{
|
|
462
|
+
const shaper = ctx.createWaveShaper();
|
|
463
|
+
const steps = 2 ** bits;
|
|
464
|
+
const samples = 65536;
|
|
465
|
+
const curve = new Float32Array(samples);
|
|
466
|
+
for(let i = 0; i < samples; i++){
|
|
467
|
+
const x = i * 2 / samples - 1;
|
|
468
|
+
if (srReduction > 1) {
|
|
469
|
+
const blockIndex = Math.floor(i / srReduction) * srReduction;
|
|
470
|
+
const blockX = blockIndex * 2 / samples - 1;
|
|
471
|
+
curve[i] = Math.round(blockX * steps) / steps;
|
|
472
|
+
} else {
|
|
473
|
+
curve[i] = Math.round(x * steps) / steps;
|
|
474
|
+
}
|
|
475
|
+
}
|
|
476
|
+
shaper.curve = curve;
|
|
477
|
+
wet.connect(shaper);
|
|
478
|
+
shaper.connect(wetOut);
|
|
479
|
+
});
|
|
480
|
+
}
|
|
481
|
+
function createCompressor(ctx, opts) {
|
|
482
|
+
var _opts_threshold, _opts_knee, _opts_ratio, _opts_attack, _opts_release;
|
|
483
|
+
const comp = ctx.createDynamicsCompressor();
|
|
484
|
+
comp.threshold.value = (_opts_threshold = opts.threshold) != null ? _opts_threshold : -24;
|
|
485
|
+
comp.knee.value = (_opts_knee = opts.knee) != null ? _opts_knee : 30;
|
|
486
|
+
comp.ratio.value = (_opts_ratio = opts.ratio) != null ? _opts_ratio : 4;
|
|
487
|
+
comp.attack.value = (_opts_attack = opts.attack) != null ? _opts_attack : 0.003;
|
|
488
|
+
comp.release.value = (_opts_release = opts.release) != null ? _opts_release : 0.25;
|
|
489
|
+
return {
|
|
490
|
+
input: comp,
|
|
491
|
+
output: comp
|
|
492
|
+
};
|
|
493
|
+
}
|
|
494
|
+
function createEQ(ctx, opts) {
|
|
495
|
+
const input = ctx.createGain();
|
|
496
|
+
const output = ctx.createGain();
|
|
497
|
+
if (opts.bands.length === 0) {
|
|
498
|
+
input.connect(output);
|
|
499
|
+
return {
|
|
500
|
+
input,
|
|
501
|
+
output
|
|
502
|
+
};
|
|
503
|
+
}
|
|
504
|
+
const filters = opts.bands.map((band)=>{
|
|
505
|
+
var _band_Q;
|
|
506
|
+
const f = ctx.createBiquadFilter();
|
|
507
|
+
f.type = band.type;
|
|
508
|
+
f.frequency.value = band.frequency;
|
|
509
|
+
f.gain.value = band.gain;
|
|
510
|
+
f.Q.value = (_band_Q = band.Q) != null ? _band_Q : 1;
|
|
511
|
+
return f;
|
|
512
|
+
});
|
|
513
|
+
input.connect(filters[0]);
|
|
514
|
+
for(let i = 0; i < filters.length - 1; i++){
|
|
515
|
+
filters[i].connect(filters[i + 1]);
|
|
516
|
+
}
|
|
517
|
+
filters[filters.length - 1].connect(output);
|
|
518
|
+
return {
|
|
519
|
+
input,
|
|
520
|
+
output
|
|
521
|
+
};
|
|
522
|
+
}
|
|
523
|
+
function createGainEffect(ctx, opts) {
|
|
524
|
+
const gain = ctx.createGain();
|
|
525
|
+
gain.gain.value = opts.value;
|
|
526
|
+
return {
|
|
527
|
+
input: gain,
|
|
528
|
+
output: gain
|
|
529
|
+
};
|
|
530
|
+
}
|
|
531
|
+
function createPanEffect(ctx, opts) {
|
|
532
|
+
const panner = ctx.createStereoPanner();
|
|
533
|
+
panner.pan.value = opts.value;
|
|
534
|
+
return {
|
|
535
|
+
input: panner,
|
|
536
|
+
output: panner
|
|
537
|
+
};
|
|
538
|
+
}
|
|
539
|
+
/**
|
|
540
|
+
* Instantiates an {@link EffectNode} from an {@link Effect} descriptor.
|
|
541
|
+
*
|
|
542
|
+
* This is the main factory used by the engine to build effect chains.
|
|
543
|
+
* It dispatches to the appropriate `create*` function based on `effect.type`.
|
|
544
|
+
*
|
|
545
|
+
* @param ctx - The audio context to create nodes in
|
|
546
|
+
* @param effect - The effect descriptor
|
|
547
|
+
* @returns A connectable effect node with `input`, `output`, and optional `dispose`
|
|
548
|
+
*/ function createEffect(ctx, effect) {
|
|
549
|
+
switch(effect.type){
|
|
550
|
+
case "reverb":
|
|
551
|
+
return createReverb(ctx, effect);
|
|
552
|
+
case "convolver":
|
|
553
|
+
return createConvolver(ctx, effect);
|
|
554
|
+
case "delay":
|
|
555
|
+
return createDelay(ctx, effect);
|
|
556
|
+
case "distortion":
|
|
557
|
+
return createDistortion(ctx, effect);
|
|
558
|
+
case "chorus":
|
|
559
|
+
return createChorus(ctx, effect);
|
|
560
|
+
case "flanger":
|
|
561
|
+
return createFlanger(ctx, effect);
|
|
562
|
+
case "phaser":
|
|
563
|
+
return createPhaser(ctx, effect);
|
|
564
|
+
case "tremolo":
|
|
565
|
+
return createTremolo(ctx, effect);
|
|
566
|
+
case "vibrato":
|
|
567
|
+
return createVibrato(ctx, effect);
|
|
568
|
+
case "bitcrusher":
|
|
569
|
+
return createBitcrusher(ctx, effect);
|
|
570
|
+
case "compressor":
|
|
571
|
+
return createCompressor(ctx, effect);
|
|
572
|
+
case "eq":
|
|
573
|
+
return createEQ(ctx, effect);
|
|
574
|
+
case "gain":
|
|
575
|
+
return createGainEffect(ctx, effect);
|
|
576
|
+
case "pan":
|
|
577
|
+
return createPanEffect(ctx, effect);
|
|
578
|
+
}
|
|
579
|
+
}
|
|
580
|
+
|
|
581
|
+
const SILENCE = 0.0001;
|
|
582
|
+
function isMultiLayer(def) {
|
|
583
|
+
return "layers" in def;
|
|
584
|
+
}
|
|
585
|
+
function normalize(def) {
|
|
586
|
+
if (isMultiLayer(def)) return def;
|
|
587
|
+
return {
|
|
588
|
+
layers: [
|
|
589
|
+
def
|
|
590
|
+
],
|
|
591
|
+
effects: []
|
|
592
|
+
};
|
|
593
|
+
}
|
|
594
|
+
function generateWhiteNoise(data) {
|
|
595
|
+
for(let i = 0; i < data.length; i++){
|
|
596
|
+
data[i] = Math.random() * 2 - 1;
|
|
597
|
+
}
|
|
598
|
+
}
|
|
599
|
+
function generatePinkNoise(data) {
|
|
600
|
+
let b0 = 0;
|
|
601
|
+
let b1 = 0;
|
|
602
|
+
let b2 = 0;
|
|
603
|
+
let b3 = 0;
|
|
604
|
+
let b4 = 0;
|
|
605
|
+
let b5 = 0;
|
|
606
|
+
let b6 = 0;
|
|
607
|
+
for(let i = 0; i < data.length; i++){
|
|
608
|
+
const white = Math.random() * 2 - 1;
|
|
609
|
+
b0 = 0.99886 * b0 + white * 0.0555179;
|
|
610
|
+
b1 = 0.99332 * b1 + white * 0.0750759;
|
|
611
|
+
b2 = 0.969 * b2 + white * 0.153852;
|
|
612
|
+
b3 = 0.8665 * b3 + white * 0.3104856;
|
|
613
|
+
b4 = 0.55 * b4 + white * 0.5329522;
|
|
614
|
+
b5 = -0.7616 * b5 - white * 0.016898;
|
|
615
|
+
data[i] = (b0 + b1 + b2 + b3 + b4 + b5 + b6 + white * 0.5362) * 0.11;
|
|
616
|
+
b6 = white * 0.115926;
|
|
617
|
+
}
|
|
618
|
+
}
|
|
619
|
+
function generateBrownNoise(data) {
|
|
620
|
+
let last = 0;
|
|
621
|
+
for(let i = 0; i < data.length; i++){
|
|
622
|
+
const white = Math.random() * 2 - 1;
|
|
623
|
+
last = (last + 0.02 * white) / 1.02;
|
|
624
|
+
data[i] = last * 3.5;
|
|
625
|
+
}
|
|
626
|
+
}
|
|
627
|
+
function createNoiseBuffer(ctx, color, duration) {
|
|
628
|
+
const length = ctx.sampleRate * duration;
|
|
629
|
+
const buffer = ctx.createBuffer(1, length, ctx.sampleRate);
|
|
630
|
+
const data = buffer.getChannelData(0);
|
|
631
|
+
switch(color){
|
|
632
|
+
case "pink":
|
|
633
|
+
generatePinkNoise(data);
|
|
634
|
+
break;
|
|
635
|
+
case "brown":
|
|
636
|
+
generateBrownNoise(data);
|
|
637
|
+
break;
|
|
638
|
+
default:
|
|
639
|
+
generateWhiteNoise(data);
|
|
640
|
+
break;
|
|
641
|
+
}
|
|
642
|
+
return buffer;
|
|
643
|
+
}
|
|
644
|
+
const sampleCache = new Map();
|
|
645
|
+
async function loadSample(ctx, url) {
|
|
646
|
+
const cached = sampleCache.get(url);
|
|
647
|
+
if (cached) return cached;
|
|
648
|
+
const response = await fetch(url);
|
|
649
|
+
const data = await response.arrayBuffer();
|
|
650
|
+
const decoded = await ctx.decodeAudioData(data);
|
|
651
|
+
sampleCache.set(url, decoded);
|
|
652
|
+
return decoded;
|
|
653
|
+
}
|
|
654
|
+
function buildOscillatorSource(ctx, src, t, duration) {
|
|
655
|
+
const osc = ctx.createOscillator();
|
|
656
|
+
osc.type = src.type;
|
|
657
|
+
if (typeof src.frequency === "number") {
|
|
658
|
+
osc.frequency.setValueAtTime(src.frequency, t);
|
|
659
|
+
} else {
|
|
660
|
+
osc.frequency.setValueAtTime(src.frequency.start, t);
|
|
661
|
+
osc.frequency.exponentialRampToValueAtTime(Math.max(src.frequency.end, 1), t + duration);
|
|
662
|
+
}
|
|
663
|
+
if (src.detune) {
|
|
664
|
+
osc.detune.value = src.detune;
|
|
665
|
+
}
|
|
666
|
+
osc.start(t);
|
|
667
|
+
osc.stop(t + duration + 0.1);
|
|
668
|
+
let fmMod;
|
|
669
|
+
if (src.fm) {
|
|
670
|
+
const carrierFreq = typeof src.frequency === "number" ? src.frequency : src.frequency.start;
|
|
671
|
+
fmMod = ctx.createOscillator();
|
|
672
|
+
fmMod.type = "sine";
|
|
673
|
+
fmMod.frequency.value = carrierFreq * src.fm.ratio;
|
|
674
|
+
const modGain = ctx.createGain();
|
|
675
|
+
modGain.gain.value = src.fm.depth;
|
|
676
|
+
fmMod.connect(modGain);
|
|
677
|
+
modGain.connect(osc.frequency);
|
|
678
|
+
fmMod.start(t);
|
|
679
|
+
fmMod.stop(t + duration + 0.1);
|
|
680
|
+
}
|
|
681
|
+
return {
|
|
682
|
+
node: osc,
|
|
683
|
+
scheduled: osc,
|
|
684
|
+
frequencyParam: osc.frequency,
|
|
685
|
+
detuneParam: osc.detune
|
|
686
|
+
};
|
|
687
|
+
}
|
|
688
|
+
function buildNoiseSource(ctx, src, t, duration) {
|
|
689
|
+
var _src_color;
|
|
690
|
+
const color = (_src_color = src.color) != null ? _src_color : "white";
|
|
691
|
+
const buffer = createNoiseBuffer(ctx, color, duration + 0.1);
|
|
692
|
+
const node = ctx.createBufferSource();
|
|
693
|
+
node.buffer = buffer;
|
|
694
|
+
node.start(t);
|
|
695
|
+
node.stop(t + duration + 0.1);
|
|
696
|
+
return {
|
|
697
|
+
node,
|
|
698
|
+
scheduled: node
|
|
699
|
+
};
|
|
700
|
+
}
|
|
701
|
+
function buildWavetableSource(ctx, src, t, duration) {
|
|
702
|
+
const real = new Float32Array(src.harmonics.length + 1);
|
|
703
|
+
const imag = new Float32Array(src.harmonics.length + 1);
|
|
704
|
+
real[0] = 0;
|
|
705
|
+
imag[0] = 0;
|
|
706
|
+
for(let i = 0; i < src.harmonics.length; i++){
|
|
707
|
+
real[i + 1] = 0;
|
|
708
|
+
imag[i + 1] = src.harmonics[i];
|
|
709
|
+
}
|
|
710
|
+
const wave = ctx.createPeriodicWave(real, imag, {
|
|
711
|
+
disableNormalization: false
|
|
712
|
+
});
|
|
713
|
+
const osc = ctx.createOscillator();
|
|
714
|
+
osc.setPeriodicWave(wave);
|
|
715
|
+
if (typeof src.frequency === "number") {
|
|
716
|
+
osc.frequency.setValueAtTime(src.frequency, t);
|
|
717
|
+
} else {
|
|
718
|
+
osc.frequency.setValueAtTime(src.frequency.start, t);
|
|
719
|
+
osc.frequency.exponentialRampToValueAtTime(Math.max(src.frequency.end, 1), t + duration);
|
|
720
|
+
}
|
|
721
|
+
osc.start(t);
|
|
722
|
+
osc.stop(t + duration + 0.1);
|
|
723
|
+
return {
|
|
724
|
+
node: osc,
|
|
725
|
+
scheduled: osc,
|
|
726
|
+
frequencyParam: osc.frequency,
|
|
727
|
+
detuneParam: osc.detune
|
|
728
|
+
};
|
|
729
|
+
}
|
|
730
|
+
function buildSampleSource(ctx, src, t) {
|
|
731
|
+
const node = ctx.createBufferSource();
|
|
732
|
+
if (src.playbackRate !== undefined) {
|
|
733
|
+
node.playbackRate.value = src.playbackRate;
|
|
734
|
+
}
|
|
735
|
+
if (src.detune !== undefined) {
|
|
736
|
+
node.detune.value = src.detune;
|
|
737
|
+
}
|
|
738
|
+
if (src.loop) {
|
|
739
|
+
node.loop = true;
|
|
740
|
+
if (src.loopStart !== undefined) node.loopStart = src.loopStart;
|
|
741
|
+
if (src.loopEnd !== undefined) node.loopEnd = src.loopEnd;
|
|
742
|
+
}
|
|
743
|
+
if (src.buffer) {
|
|
744
|
+
node.buffer = src.buffer;
|
|
745
|
+
node.start(t);
|
|
746
|
+
} else if (src.url) {
|
|
747
|
+
loadSample(ctx, src.url).then((buf)=>{
|
|
748
|
+
node.buffer = buf;
|
|
749
|
+
node.start(Math.max(t, ctx.currentTime));
|
|
750
|
+
});
|
|
751
|
+
}
|
|
752
|
+
return {
|
|
753
|
+
node,
|
|
754
|
+
scheduled: node,
|
|
755
|
+
detuneParam: node.detune,
|
|
756
|
+
playbackRateParam: node.playbackRate
|
|
757
|
+
};
|
|
758
|
+
}
|
|
759
|
+
function buildStreamSource(ctx, src) {
|
|
760
|
+
const node = ctx.createMediaStreamSource(src.stream);
|
|
761
|
+
return {
|
|
762
|
+
node
|
|
763
|
+
};
|
|
764
|
+
}
|
|
765
|
+
function buildConstantSource(ctx, src, t, duration) {
|
|
766
|
+
var _src_offset;
|
|
767
|
+
const node = ctx.createConstantSource();
|
|
768
|
+
node.offset.value = (_src_offset = src.offset) != null ? _src_offset : 1;
|
|
769
|
+
node.start(t);
|
|
770
|
+
node.stop(t + duration + 0.1);
|
|
771
|
+
return {
|
|
772
|
+
node,
|
|
773
|
+
scheduled: node
|
|
774
|
+
};
|
|
775
|
+
}
|
|
776
|
+
function buildSource(ctx, src, t, duration) {
|
|
777
|
+
switch(src.type){
|
|
778
|
+
case "sine":
|
|
779
|
+
case "triangle":
|
|
780
|
+
case "square":
|
|
781
|
+
case "sawtooth":
|
|
782
|
+
return buildOscillatorSource(ctx, src, t, duration);
|
|
783
|
+
case "noise":
|
|
784
|
+
return buildNoiseSource(ctx, src, t, duration);
|
|
785
|
+
case "wavetable":
|
|
786
|
+
return buildWavetableSource(ctx, src, t, duration);
|
|
787
|
+
case "sample":
|
|
788
|
+
return buildSampleSource(ctx, src, t);
|
|
789
|
+
case "stream":
|
|
790
|
+
return buildStreamSource(ctx, src);
|
|
791
|
+
case "constant":
|
|
792
|
+
return buildConstantSource(ctx, src, t, duration);
|
|
793
|
+
}
|
|
794
|
+
}
|
|
795
|
+
function buildBiquadFilter(ctx, filter, t) {
|
|
796
|
+
var _filter_resonance;
|
|
797
|
+
const node = ctx.createBiquadFilter();
|
|
798
|
+
node.type = filter.type;
|
|
799
|
+
node.frequency.setValueAtTime(filter.frequency, t);
|
|
800
|
+
node.Q.value = (_filter_resonance = filter.resonance) != null ? _filter_resonance : 1;
|
|
801
|
+
if (filter.gain !== undefined) {
|
|
802
|
+
node.gain.value = filter.gain;
|
|
803
|
+
}
|
|
804
|
+
if (filter.envelope) {
|
|
805
|
+
var _env_attack;
|
|
806
|
+
const env = filter.envelope;
|
|
807
|
+
const attackEnd = t + ((_env_attack = env.attack) != null ? _env_attack : 0);
|
|
808
|
+
node.frequency.setValueAtTime(filter.frequency, t);
|
|
809
|
+
node.frequency.linearRampToValueAtTime(env.peak, attackEnd);
|
|
810
|
+
node.frequency.exponentialRampToValueAtTime(Math.max(filter.frequency, 1), attackEnd + env.decay);
|
|
811
|
+
}
|
|
812
|
+
return {
|
|
813
|
+
node,
|
|
814
|
+
frequencyParam: node.frequency
|
|
815
|
+
};
|
|
816
|
+
}
|
|
817
|
+
function buildIIRFilter(ctx, filter) {
|
|
818
|
+
const node = ctx.createIIRFilter(filter.feedforward, filter.feedback);
|
|
819
|
+
return {
|
|
820
|
+
node
|
|
821
|
+
};
|
|
822
|
+
}
|
|
823
|
+
function buildSingleFilter(ctx, filter, t) {
|
|
824
|
+
if (filter.type === "iir") {
|
|
825
|
+
const { node } = buildIIRFilter(ctx, filter);
|
|
826
|
+
return {
|
|
827
|
+
node
|
|
828
|
+
};
|
|
829
|
+
}
|
|
830
|
+
const { node, frequencyParam } = buildBiquadFilter(ctx, filter, t);
|
|
831
|
+
return {
|
|
832
|
+
node,
|
|
833
|
+
frequencyParam,
|
|
834
|
+
detuneParam: node.detune,
|
|
835
|
+
QParam: node.Q,
|
|
836
|
+
gainParam: node.gain
|
|
837
|
+
};
|
|
838
|
+
}
|
|
839
|
+
function buildFilters(ctx, filters, t) {
|
|
840
|
+
const arr = Array.isArray(filters) ? filters : [
|
|
841
|
+
filters
|
|
842
|
+
];
|
|
843
|
+
return arr.map((f)=>buildSingleFilter(ctx, f, t));
|
|
844
|
+
}
|
|
845
|
+
function buildEnvelope(ctx, envelope, gain, t) {
|
|
846
|
+
var _envelope_attack, _envelope_sustain, _envelope_release;
|
|
847
|
+
const node = ctx.createGain();
|
|
848
|
+
if (!envelope) {
|
|
849
|
+
node.gain.setValueAtTime(gain, t);
|
|
850
|
+
node.gain.setTargetAtTime(SILENCE, t, 0.15);
|
|
851
|
+
return {
|
|
852
|
+
node,
|
|
853
|
+
duration: 0.5
|
|
854
|
+
};
|
|
855
|
+
}
|
|
856
|
+
const attack = (_envelope_attack = envelope.attack) != null ? _envelope_attack : 0;
|
|
857
|
+
const decay = envelope.decay;
|
|
858
|
+
const sustain = (_envelope_sustain = envelope.sustain) != null ? _envelope_sustain : 0;
|
|
859
|
+
const release = (_envelope_release = envelope.release) != null ? _envelope_release : 0;
|
|
860
|
+
const sustainLevel = Math.max(sustain * gain, SILENCE);
|
|
861
|
+
const decayTC = decay / 3;
|
|
862
|
+
node.gain.setValueAtTime(SILENCE, t);
|
|
863
|
+
if (attack > 0) {
|
|
864
|
+
node.gain.linearRampToValueAtTime(gain, t + attack);
|
|
865
|
+
} else {
|
|
866
|
+
node.gain.setValueAtTime(gain, t);
|
|
867
|
+
}
|
|
868
|
+
if (sustain > 0) {
|
|
869
|
+
node.gain.setTargetAtTime(sustainLevel, t + attack, decayTC);
|
|
870
|
+
if (release > 0) {
|
|
871
|
+
const releaseTC = release / 3;
|
|
872
|
+
node.gain.setTargetAtTime(SILENCE, t + attack + decay, releaseTC);
|
|
873
|
+
}
|
|
874
|
+
} else {
|
|
875
|
+
node.gain.setTargetAtTime(SILENCE, t + attack, decayTC);
|
|
876
|
+
}
|
|
877
|
+
return {
|
|
878
|
+
node,
|
|
879
|
+
duration: attack + decay + release
|
|
880
|
+
};
|
|
881
|
+
}
|
|
882
|
+
function buildLFO(ctx, lfo, t, duration, targets) {
|
|
883
|
+
const osc = ctx.createOscillator();
|
|
884
|
+
osc.type = lfo.type;
|
|
885
|
+
osc.frequency.value = lfo.frequency;
|
|
886
|
+
const gain = ctx.createGain();
|
|
887
|
+
gain.gain.value = lfo.depth;
|
|
888
|
+
osc.connect(gain);
|
|
889
|
+
let target = null;
|
|
890
|
+
switch(lfo.target){
|
|
891
|
+
case "frequency":
|
|
892
|
+
var _targets_source_frequencyParam;
|
|
893
|
+
target = (_targets_source_frequencyParam = targets.source.frequencyParam) != null ? _targets_source_frequencyParam : null;
|
|
894
|
+
break;
|
|
895
|
+
case "detune":
|
|
896
|
+
var _targets_source_detuneParam;
|
|
897
|
+
target = (_targets_source_detuneParam = targets.source.detuneParam) != null ? _targets_source_detuneParam : null;
|
|
898
|
+
break;
|
|
899
|
+
case "gain":
|
|
900
|
+
target = targets.envNode.gain;
|
|
901
|
+
break;
|
|
902
|
+
case "pan":
|
|
903
|
+
var _ref;
|
|
904
|
+
var _targets_panner;
|
|
905
|
+
target = (_ref = (_targets_panner = targets.panner) == null ? void 0 : _targets_panner.pan) != null ? _ref : null;
|
|
906
|
+
break;
|
|
907
|
+
case "playbackRate":
|
|
908
|
+
var _targets_source_playbackRateParam;
|
|
909
|
+
target = (_targets_source_playbackRateParam = targets.source.playbackRateParam) != null ? _targets_source_playbackRateParam : null;
|
|
910
|
+
break;
|
|
911
|
+
case "filter.frequency":
|
|
912
|
+
var _ref1;
|
|
913
|
+
var _targets_filters_;
|
|
914
|
+
target = (_ref1 = (_targets_filters_ = targets.filters[0]) == null ? void 0 : _targets_filters_.frequencyParam) != null ? _ref1 : null;
|
|
915
|
+
break;
|
|
916
|
+
case "filter.detune":
|
|
917
|
+
var _ref2;
|
|
918
|
+
var _targets_filters_1;
|
|
919
|
+
target = (_ref2 = (_targets_filters_1 = targets.filters[0]) == null ? void 0 : _targets_filters_1.detuneParam) != null ? _ref2 : null;
|
|
920
|
+
break;
|
|
921
|
+
case "filter.Q":
|
|
922
|
+
var _ref3;
|
|
923
|
+
var _targets_filters_2;
|
|
924
|
+
target = (_ref3 = (_targets_filters_2 = targets.filters[0]) == null ? void 0 : _targets_filters_2.QParam) != null ? _ref3 : null;
|
|
925
|
+
break;
|
|
926
|
+
case "filter.gain":
|
|
927
|
+
var _ref4;
|
|
928
|
+
var _targets_filters_3;
|
|
929
|
+
target = (_ref4 = (_targets_filters_3 = targets.filters[0]) == null ? void 0 : _targets_filters_3.gainParam) != null ? _ref4 : null;
|
|
930
|
+
break;
|
|
931
|
+
}
|
|
932
|
+
if (target) {
|
|
933
|
+
gain.connect(target);
|
|
934
|
+
osc.start(t);
|
|
935
|
+
osc.stop(t + duration + 0.1);
|
|
936
|
+
return osc;
|
|
937
|
+
}
|
|
938
|
+
return null;
|
|
939
|
+
}
|
|
940
|
+
function buildPanner3D(ctx, config) {
|
|
941
|
+
var _config_panningModel, _config_distanceModel;
|
|
942
|
+
const panner = ctx.createPanner();
|
|
943
|
+
panner.panningModel = (_config_panningModel = config.panningModel) != null ? _config_panningModel : "HRTF";
|
|
944
|
+
panner.distanceModel = (_config_distanceModel = config.distanceModel) != null ? _config_distanceModel : "inverse";
|
|
945
|
+
panner.positionX.value = config.positionX;
|
|
946
|
+
panner.positionY.value = config.positionY;
|
|
947
|
+
panner.positionZ.value = config.positionZ;
|
|
948
|
+
if (config.orientationX !== undefined) panner.orientationX.value = config.orientationX;
|
|
949
|
+
if (config.orientationY !== undefined) panner.orientationY.value = config.orientationY;
|
|
950
|
+
if (config.orientationZ !== undefined) panner.orientationZ.value = config.orientationZ;
|
|
951
|
+
if (config.maxDistance !== undefined) panner.maxDistance = config.maxDistance;
|
|
952
|
+
if (config.refDistance !== undefined) panner.refDistance = config.refDistance;
|
|
953
|
+
if (config.rolloffFactor !== undefined) panner.rolloffFactor = config.rolloffFactor;
|
|
954
|
+
if (config.coneInnerAngle !== undefined) panner.coneInnerAngle = config.coneInnerAngle;
|
|
955
|
+
if (config.coneOuterAngle !== undefined) panner.coneOuterAngle = config.coneOuterAngle;
|
|
956
|
+
if (config.coneOuterGain !== undefined) panner.coneOuterGain = config.coneOuterGain;
|
|
957
|
+
return panner;
|
|
958
|
+
}
|
|
959
|
+
function buildEffectsChain(ctx, effects, destination) {
|
|
960
|
+
if (effects.length === 0) {
|
|
961
|
+
return {
|
|
962
|
+
input: destination,
|
|
963
|
+
output: destination,
|
|
964
|
+
dispose () {}
|
|
965
|
+
};
|
|
966
|
+
}
|
|
967
|
+
const nodes = effects.map((e)=>createEffect(ctx, e));
|
|
968
|
+
for(let i = 0; i < nodes.length - 1; i++){
|
|
969
|
+
nodes[i].output.connect(nodes[i + 1].input);
|
|
970
|
+
}
|
|
971
|
+
nodes[nodes.length - 1].output.connect(destination);
|
|
972
|
+
return {
|
|
973
|
+
input: nodes[0].input,
|
|
974
|
+
output: nodes[nodes.length - 1].output,
|
|
975
|
+
dispose () {
|
|
976
|
+
for (const n of nodes)n.dispose == null ? void 0 : n.dispose.call(n);
|
|
977
|
+
}
|
|
978
|
+
};
|
|
979
|
+
}
|
|
980
|
+
/**
|
|
981
|
+
* Renders a {@link SoundDefinition} into the Web Audio graph and starts playback.
|
|
982
|
+
*
|
|
983
|
+
* Builds sources, filters, envelopes, LFOs, panners, and effects for every
|
|
984
|
+
* layer, connects them to `destination`, and returns a {@link VoiceHandle}
|
|
985
|
+
* that can stop the sound mid-flight.
|
|
986
|
+
*
|
|
987
|
+
* @param ctx - The `BaseAudioContext` to build nodes in
|
|
988
|
+
* @param definition - A single-layer or multi-layer sound definition
|
|
989
|
+
* @param opts - Runtime overrides (volume, pan, detune, velocity, etc.)
|
|
990
|
+
* @param baseTime - Scheduled start time in seconds (`ctx.currentTime` if omitted)
|
|
991
|
+
* @param destination - Target node to connect to (`ctx.destination` if omitted)
|
|
992
|
+
* @returns A handle with a `stop()` method for cancelling the voice
|
|
993
|
+
*/ function render(ctx, definition, opts, baseTime, destination) {
|
|
994
|
+
var _ref;
|
|
995
|
+
const { layers, effects } = normalize(definition);
|
|
996
|
+
const dest = destination != null ? destination : ctx.destination;
|
|
997
|
+
const chain = buildEffectsChain(ctx, effects != null ? effects : [], dest);
|
|
998
|
+
const t0 = baseTime != null ? baseTime : ctx.currentTime;
|
|
999
|
+
const velocity = (_ref = opts == null ? void 0 : opts.velocity) != null ? _ref : 1;
|
|
1000
|
+
const jitter = opts == null ? void 0 : opts.jitter;
|
|
1001
|
+
const detuneJitter = (jitter == null ? void 0 : jitter.detune) ? (Math.random() * 2 - 1) * jitter.detune : 0;
|
|
1002
|
+
const volumeJitter = (jitter == null ? void 0 : jitter.volume) ? 1 + (Math.random() * 2 - 1) * jitter.volume : 1;
|
|
1003
|
+
const rateJitter = (jitter == null ? void 0 : jitter.playbackRate) ? 1 + (Math.random() * 2 - 1) * jitter.playbackRate : 1;
|
|
1004
|
+
const allDisposers = [
|
|
1005
|
+
chain.dispose
|
|
1006
|
+
];
|
|
1007
|
+
const allSourceNodes = [];
|
|
1008
|
+
const allEnvNodes = [];
|
|
1009
|
+
for (const layer of layers){
|
|
1010
|
+
var _layer_delay, _layer_gain, _ref1, _ref2;
|
|
1011
|
+
const layerStart = t0 + ((_layer_delay = layer.delay) != null ? _layer_delay : 0);
|
|
1012
|
+
const baseGain = ((_layer_gain = layer.gain) != null ? _layer_gain : 0.5) * ((_ref1 = opts == null ? void 0 : opts.volume) != null ? _ref1 : 1) * velocity * volumeJitter;
|
|
1013
|
+
const { node: envNode, duration: envDuration } = buildEnvelope(ctx, layer.envelope, baseGain, layerStart);
|
|
1014
|
+
allEnvNodes.push(envNode);
|
|
1015
|
+
const sourceResult = buildSource(ctx, layer.source, layerStart, envDuration);
|
|
1016
|
+
if (sourceResult.detuneParam && ((opts == null ? void 0 : opts.detune) || detuneJitter !== 0)) {
|
|
1017
|
+
var _ref3;
|
|
1018
|
+
sourceResult.detuneParam.value += ((_ref3 = opts == null ? void 0 : opts.detune) != null ? _ref3 : 0) + detuneJitter;
|
|
1019
|
+
}
|
|
1020
|
+
if (sourceResult.playbackRateParam && ((opts == null ? void 0 : opts.playbackRate) || rateJitter !== 1)) {
|
|
1021
|
+
var _ref4;
|
|
1022
|
+
sourceResult.playbackRateParam.value *= ((_ref4 = opts == null ? void 0 : opts.playbackRate) != null ? _ref4 : 1) * rateJitter;
|
|
1023
|
+
}
|
|
1024
|
+
let tail = sourceResult.node;
|
|
1025
|
+
const filterResults = [];
|
|
1026
|
+
if (layer.filter) {
|
|
1027
|
+
const builtFilters = buildFilters(ctx, layer.filter, layerStart);
|
|
1028
|
+
for (const f of builtFilters){
|
|
1029
|
+
tail.connect(f.node);
|
|
1030
|
+
tail = f.node;
|
|
1031
|
+
filterResults.push(f);
|
|
1032
|
+
if (velocity < 1 && f.frequencyParam) {
|
|
1033
|
+
const baseFreq = f.frequencyParam.value;
|
|
1034
|
+
f.frequencyParam.setValueAtTime(baseFreq * (0.5 + 0.5 * velocity), layerStart);
|
|
1035
|
+
}
|
|
1036
|
+
}
|
|
1037
|
+
}
|
|
1038
|
+
tail.connect(envNode);
|
|
1039
|
+
let cursor = envNode;
|
|
1040
|
+
const layerDisposers = [];
|
|
1041
|
+
if (layer.effects && layer.effects.length > 0) {
|
|
1042
|
+
const layerFxNodes = layer.effects.map((e)=>createEffect(ctx, e));
|
|
1043
|
+
for(let i = 0; i < layerFxNodes.length - 1; i++){
|
|
1044
|
+
layerFxNodes[i].output.connect(layerFxNodes[i + 1].input);
|
|
1045
|
+
}
|
|
1046
|
+
cursor.connect(layerFxNodes[0].input);
|
|
1047
|
+
cursor = layerFxNodes[layerFxNodes.length - 1].output;
|
|
1048
|
+
for (const n of layerFxNodes){
|
|
1049
|
+
if (n.dispose) layerDisposers.push(n.dispose);
|
|
1050
|
+
}
|
|
1051
|
+
}
|
|
1052
|
+
let stereoPanner;
|
|
1053
|
+
const effectivePan = (_ref2 = opts == null ? void 0 : opts.pan) != null ? _ref2 : layer.pan;
|
|
1054
|
+
if (layer.panner) {
|
|
1055
|
+
const panner3d = buildPanner3D(ctx, layer.panner);
|
|
1056
|
+
cursor.connect(panner3d);
|
|
1057
|
+
cursor = panner3d;
|
|
1058
|
+
} else if (effectivePan !== undefined && effectivePan !== 0) {
|
|
1059
|
+
stereoPanner = ctx.createStereoPanner();
|
|
1060
|
+
stereoPanner.pan.value = effectivePan;
|
|
1061
|
+
cursor.connect(stereoPanner);
|
|
1062
|
+
cursor = stereoPanner;
|
|
1063
|
+
}
|
|
1064
|
+
cursor.connect(chain.input);
|
|
1065
|
+
if (layer.lfo) {
|
|
1066
|
+
const lfos = Array.isArray(layer.lfo) ? layer.lfo : [
|
|
1067
|
+
layer.lfo
|
|
1068
|
+
];
|
|
1069
|
+
for (const l of lfos){
|
|
1070
|
+
buildLFO(ctx, l, layerStart, envDuration, {
|
|
1071
|
+
source: sourceResult,
|
|
1072
|
+
filters: filterResults,
|
|
1073
|
+
envNode,
|
|
1074
|
+
panner: stereoPanner
|
|
1075
|
+
});
|
|
1076
|
+
}
|
|
1077
|
+
}
|
|
1078
|
+
if (sourceResult.scheduled) {
|
|
1079
|
+
allSourceNodes.push(sourceResult.scheduled);
|
|
1080
|
+
const nodesToDisconnect = [
|
|
1081
|
+
sourceResult.node,
|
|
1082
|
+
envNode,
|
|
1083
|
+
...filterResults.map((f)=>f.node),
|
|
1084
|
+
...stereoPanner ? [
|
|
1085
|
+
stereoPanner
|
|
1086
|
+
] : []
|
|
1087
|
+
];
|
|
1088
|
+
sourceResult.scheduled.onended = ()=>{
|
|
1089
|
+
for (const n of nodesToDisconnect){
|
|
1090
|
+
try {
|
|
1091
|
+
n.disconnect();
|
|
1092
|
+
} catch (_) {}
|
|
1093
|
+
}
|
|
1094
|
+
for (const d of layerDisposers)d();
|
|
1095
|
+
};
|
|
1096
|
+
}
|
|
1097
|
+
allDisposers.push(...layerDisposers);
|
|
1098
|
+
}
|
|
1099
|
+
return {
|
|
1100
|
+
stop (releaseTime) {
|
|
1101
|
+
const now = ctx.currentTime;
|
|
1102
|
+
const fade = releaseTime != null ? releaseTime : 0.015;
|
|
1103
|
+
for (const env of allEnvNodes){
|
|
1104
|
+
env.gain.cancelScheduledValues(now);
|
|
1105
|
+
env.gain.setValueAtTime(env.gain.value, now);
|
|
1106
|
+
env.gain.setTargetAtTime(SILENCE, now, fade / 3);
|
|
1107
|
+
}
|
|
1108
|
+
for (const src of allSourceNodes){
|
|
1109
|
+
try {
|
|
1110
|
+
src.stop(now + fade + 0.05);
|
|
1111
|
+
} catch (_) {}
|
|
1112
|
+
}
|
|
1113
|
+
}
|
|
1114
|
+
};
|
|
1115
|
+
}
|
|
1116
|
+
|
|
1117
|
+
function createPatchInstance(data) {
|
|
1118
|
+
const soundNames = Object.keys(data.sounds);
|
|
1119
|
+
return {
|
|
1120
|
+
ready: true,
|
|
1121
|
+
name: data.name,
|
|
1122
|
+
author: data.author,
|
|
1123
|
+
version: data.version,
|
|
1124
|
+
description: data.description,
|
|
1125
|
+
tags: data.tags,
|
|
1126
|
+
sounds: soundNames,
|
|
1127
|
+
play (name, opts) {
|
|
1128
|
+
const def = data.sounds[name];
|
|
1129
|
+
if (!def) throw new Error(`Sound "${name}" not found in patch "${data.name}"`);
|
|
1130
|
+
const ctx = getContext();
|
|
1131
|
+
return render(ctx, def, opts, undefined, getDestination());
|
|
1132
|
+
},
|
|
1133
|
+
get (name) {
|
|
1134
|
+
return data.sounds[name];
|
|
1135
|
+
},
|
|
1136
|
+
toJSON () {
|
|
1137
|
+
return structuredClone(data);
|
|
1138
|
+
}
|
|
1139
|
+
};
|
|
1140
|
+
}
|
|
1141
|
+
/**
|
|
1142
|
+
* Loads a sound patch from a URL or an in-memory object.
|
|
1143
|
+
*
|
|
1144
|
+
* When `source` is a string, it is fetched as JSON and decoded into a
|
|
1145
|
+
* {@link SoundPatch}. When it is already a `SoundPatch`, it is used directly.
|
|
1146
|
+
*
|
|
1147
|
+
* @param source - URL string or `SoundPatch` object
|
|
1148
|
+
* @returns A promise that resolves to a ready-to-play {@link AudioPatch}
|
|
1149
|
+
* @throws {Error} If the network request fails
|
|
1150
|
+
*/ async function loadPatch(source) {
|
|
1151
|
+
if (typeof source === "string") {
|
|
1152
|
+
const response = await fetch(source);
|
|
1153
|
+
if (!response.ok) throw new Error(`Failed to load patch from ${source}: ${response.status}`);
|
|
1154
|
+
const data = await response.json();
|
|
1155
|
+
return createPatchInstance(data);
|
|
1156
|
+
}
|
|
1157
|
+
return createPatchInstance(source);
|
|
1158
|
+
}
|
|
1159
|
+
|
|
1160
|
+
function isDefinition(sound) {
|
|
1161
|
+
return typeof sound !== "function";
|
|
1162
|
+
}
|
|
1163
|
+
function resolveStepTimes(steps) {
|
|
1164
|
+
const times = [];
|
|
1165
|
+
let cursor = 0;
|
|
1166
|
+
for(let i = 0; i < steps.length; i++){
|
|
1167
|
+
const step = steps[i];
|
|
1168
|
+
if (step.at !== undefined) {
|
|
1169
|
+
cursor = step.at;
|
|
1170
|
+
} else if (step.wait !== undefined) {
|
|
1171
|
+
cursor += step.wait;
|
|
1172
|
+
} else if (i === 0) {
|
|
1173
|
+
cursor = 0;
|
|
1174
|
+
}
|
|
1175
|
+
times.push(cursor);
|
|
1176
|
+
}
|
|
1177
|
+
return times;
|
|
1178
|
+
}
|
|
1179
|
+
const LOOKAHEAD_MS = 25;
|
|
1180
|
+
const SCHEDULE_AHEAD = 0.1;
|
|
1181
|
+
function scheduleOnce(ctx, steps, times, opts, baseTime, scheduled) {
|
|
1182
|
+
const handles = [];
|
|
1183
|
+
for(let i = 0; i < steps.length; i++){
|
|
1184
|
+
var _step_volume;
|
|
1185
|
+
if (scheduled.has(i)) continue;
|
|
1186
|
+
const stepTime = baseTime + times[i];
|
|
1187
|
+
if (stepTime > ctx.currentTime + SCHEDULE_AHEAD) continue;
|
|
1188
|
+
scheduled.add(i);
|
|
1189
|
+
const step = steps[i];
|
|
1190
|
+
const volume = (_step_volume = step.volume) != null ? _step_volume : opts == null ? void 0 : opts.volume;
|
|
1191
|
+
if (isDefinition(step.sound)) {
|
|
1192
|
+
const handle = render(ctx, step.sound, volume !== undefined ? {
|
|
1193
|
+
volume
|
|
1194
|
+
} : opts, stepTime, getDestination());
|
|
1195
|
+
handles.push(handle);
|
|
1196
|
+
} else {
|
|
1197
|
+
const fn = step.sound;
|
|
1198
|
+
const delay = (stepTime - ctx.currentTime) * 1000;
|
|
1199
|
+
if (delay <= 0) {
|
|
1200
|
+
const result = fn(volume !== undefined ? {
|
|
1201
|
+
volume
|
|
1202
|
+
} : opts);
|
|
1203
|
+
if (result) handles.push(result);
|
|
1204
|
+
} else {
|
|
1205
|
+
setTimeout(()=>fn(volume !== undefined ? {
|
|
1206
|
+
volume
|
|
1207
|
+
} : opts), delay);
|
|
1208
|
+
}
|
|
1209
|
+
}
|
|
1210
|
+
}
|
|
1211
|
+
return handles;
|
|
1212
|
+
}
|
|
1213
|
+
/**
|
|
1214
|
+
* Schedules and plays a sequence of sounds using a lookahead timer.
|
|
1215
|
+
*
|
|
1216
|
+
* Steps are positioned in time via `at` (absolute) or `wait` (relative)
|
|
1217
|
+
* fields. When `options.loop` is true the sequence repeats indefinitely
|
|
1218
|
+
* using `options.duration` as the loop length.
|
|
1219
|
+
*
|
|
1220
|
+
* @param ctx - The real-time `AudioContext`
|
|
1221
|
+
* @param steps - Ordered list of {@link SequenceStep}s
|
|
1222
|
+
* @param options - Loop and duration settings
|
|
1223
|
+
* @param opts - Runtime overrides applied to every step
|
|
1224
|
+
* @returns A stop function that halts playback, or `undefined` if empty
|
|
1225
|
+
*/ function playSequence(ctx, steps, options, opts) {
|
|
1226
|
+
var _options_duration;
|
|
1227
|
+
const times = resolveStepTimes(steps);
|
|
1228
|
+
if (!(options == null ? void 0 : options.loop)) {
|
|
1229
|
+
const scheduled = new Set();
|
|
1230
|
+
const handles = [];
|
|
1231
|
+
const tick = ()=>{
|
|
1232
|
+
const h = scheduleOnce(ctx, steps, times, opts, ctx.currentTime, scheduled);
|
|
1233
|
+
handles.push(...h);
|
|
1234
|
+
if (scheduled.size < steps.length) {
|
|
1235
|
+
timerId = setTimeout(tick, LOOKAHEAD_MS);
|
|
1236
|
+
}
|
|
1237
|
+
};
|
|
1238
|
+
let timerId = null;
|
|
1239
|
+
tick();
|
|
1240
|
+
return ()=>{
|
|
1241
|
+
if (timerId !== null) clearTimeout(timerId);
|
|
1242
|
+
for (const h of handles)h.stop();
|
|
1243
|
+
};
|
|
1244
|
+
}
|
|
1245
|
+
const duration = (_options_duration = options.duration) != null ? _options_duration : 1;
|
|
1246
|
+
let stopped = false;
|
|
1247
|
+
let timerId = null;
|
|
1248
|
+
let loopBase = ctx.currentTime;
|
|
1249
|
+
let scheduled = new Set();
|
|
1250
|
+
const handles = [];
|
|
1251
|
+
const tick = ()=>{
|
|
1252
|
+
if (stopped) return;
|
|
1253
|
+
const h = scheduleOnce(ctx, steps, times, opts, loopBase, scheduled);
|
|
1254
|
+
handles.push(...h);
|
|
1255
|
+
if (scheduled.size >= steps.length) {
|
|
1256
|
+
if (ctx.currentTime >= loopBase + duration - SCHEDULE_AHEAD) {
|
|
1257
|
+
loopBase += duration;
|
|
1258
|
+
scheduled = new Set();
|
|
1259
|
+
}
|
|
1260
|
+
}
|
|
1261
|
+
};
|
|
1262
|
+
timerId = setInterval(tick, LOOKAHEAD_MS);
|
|
1263
|
+
tick();
|
|
1264
|
+
return ()=>{
|
|
1265
|
+
stopped = true;
|
|
1266
|
+
if (timerId !== null) clearInterval(timerId);
|
|
1267
|
+
for (const h of handles)h.stop();
|
|
1268
|
+
};
|
|
1269
|
+
}
|
|
1270
|
+
|
|
1271
|
+
function subscribeToReducedMotion(cb) {
|
|
1272
|
+
const mql = window.matchMedia("(prefers-reduced-motion: reduce)");
|
|
1273
|
+
mql.addEventListener("change", cb);
|
|
1274
|
+
return ()=>mql.removeEventListener("change", cb);
|
|
1275
|
+
}
|
|
1276
|
+
function getReducedMotionSnapshot() {
|
|
1277
|
+
return window.matchMedia("(prefers-reduced-motion: reduce)").matches;
|
|
1278
|
+
}
|
|
1279
|
+
function getReducedMotionServerSnapshot() {
|
|
1280
|
+
return false;
|
|
1281
|
+
}
|
|
1282
|
+
function usePrefersReducedMotion() {
|
|
1283
|
+
return useSyncExternalStore(subscribeToReducedMotion, getReducedMotionSnapshot, getReducedMotionServerSnapshot);
|
|
1284
|
+
}
|
|
1285
|
+
const DEFAULT_STATE = {
|
|
1286
|
+
enabled: true,
|
|
1287
|
+
volume: 1
|
|
1288
|
+
};
|
|
1289
|
+
const NOOP_ACTIONS = {
|
|
1290
|
+
setEnabled () {},
|
|
1291
|
+
setVolume () {}
|
|
1292
|
+
};
|
|
1293
|
+
const SoundContext = /*#__PURE__*/ createContext({
|
|
1294
|
+
state: DEFAULT_STATE,
|
|
1295
|
+
actions: NOOP_ACTIONS
|
|
1296
|
+
});
|
|
1297
|
+
/**
|
|
1298
|
+
* Context provider that controls global sound state for all descendant hooks.
|
|
1299
|
+
*
|
|
1300
|
+
* Wrap your app (or a subtree) with `<SoundProvider>` to enable
|
|
1301
|
+
* {@link useSound}, {@link useSequence}, and {@link usePatch} to respect
|
|
1302
|
+
* a shared enabled/volume state.
|
|
1303
|
+
*
|
|
1304
|
+
* @param props.enabled - Whether sounds are allowed to play. @defaultValue `true`
|
|
1305
|
+
* @param props.volume - Master volume multiplier (0 – 1). @defaultValue `1`
|
|
1306
|
+
* @param props.onEnabledChange - Called when a child requests an enabled change
|
|
1307
|
+
* @param props.onVolumeChange - Called when a child requests a volume change
|
|
1308
|
+
*
|
|
1309
|
+
* @example
|
|
1310
|
+
* ```tsx
|
|
1311
|
+
* <SoundProvider enabled={soundsOn} volume={0.8}>
|
|
1312
|
+
* <App />
|
|
1313
|
+
* </SoundProvider>
|
|
1314
|
+
* ```
|
|
1315
|
+
*/ function SoundProvider({ children, enabled = true, volume = 1, onEnabledChange, onVolumeChange }) {
|
|
1316
|
+
const state = useMemo(()=>({
|
|
1317
|
+
enabled,
|
|
1318
|
+
volume
|
|
1319
|
+
}), [
|
|
1320
|
+
enabled,
|
|
1321
|
+
volume
|
|
1322
|
+
]);
|
|
1323
|
+
const onEnabledChangeRef = useRef(onEnabledChange);
|
|
1324
|
+
onEnabledChangeRef.current = onEnabledChange;
|
|
1325
|
+
const onVolumeChangeRef = useRef(onVolumeChange);
|
|
1326
|
+
onVolumeChangeRef.current = onVolumeChange;
|
|
1327
|
+
const actions = useMemo(()=>({
|
|
1328
|
+
setEnabled: (v)=>onEnabledChangeRef.current == null ? void 0 : onEnabledChangeRef.current.call(onEnabledChangeRef, v),
|
|
1329
|
+
setVolume: (v)=>onVolumeChangeRef.current == null ? void 0 : onVolumeChangeRef.current.call(onVolumeChangeRef, v)
|
|
1330
|
+
}), []);
|
|
1331
|
+
const value = useMemo(()=>({
|
|
1332
|
+
state,
|
|
1333
|
+
actions
|
|
1334
|
+
}), [
|
|
1335
|
+
state,
|
|
1336
|
+
actions
|
|
1337
|
+
]);
|
|
1338
|
+
return /*#__PURE__*/ jsx(SoundContext, {
|
|
1339
|
+
value: value,
|
|
1340
|
+
children: children
|
|
1341
|
+
});
|
|
1342
|
+
}
|
|
1343
|
+
/**
|
|
1344
|
+
* Returns a stable callback that plays the given sound definition.
|
|
1345
|
+
*
|
|
1346
|
+
* Respects the nearest {@link SoundProvider}'s enabled/volume state and
|
|
1347
|
+
* the user's `prefers-reduced-motion` preference. The callback reference
|
|
1348
|
+
* never changes between renders (values are read from refs).
|
|
1349
|
+
*
|
|
1350
|
+
* @param definition - The sound to play
|
|
1351
|
+
* @param opts - Default play options (can be overridden at call time)
|
|
1352
|
+
* @returns A function that triggers the sound and returns a {@link VoiceHandle}, or `undefined` if muted
|
|
1353
|
+
*
|
|
1354
|
+
* @example
|
|
1355
|
+
* ```tsx
|
|
1356
|
+
* const play = useSound({
|
|
1357
|
+
* source: { type: "sine", frequency: 440 },
|
|
1358
|
+
* envelope: { decay: 0.1 },
|
|
1359
|
+
* });
|
|
1360
|
+
*
|
|
1361
|
+
* <button onClick={play}>Beep</button>
|
|
1362
|
+
* ```
|
|
1363
|
+
*/ function useSound(definition, opts) {
|
|
1364
|
+
const { state } = use(SoundContext);
|
|
1365
|
+
const reducedMotion = usePrefersReducedMotion();
|
|
1366
|
+
const stateRef = useRef(state);
|
|
1367
|
+
stateRef.current = state;
|
|
1368
|
+
const reducedMotionRef = useRef(reducedMotion);
|
|
1369
|
+
reducedMotionRef.current = reducedMotion;
|
|
1370
|
+
const defRef = useRef(definition);
|
|
1371
|
+
defRef.current = definition;
|
|
1372
|
+
const optsRef = useRef(opts);
|
|
1373
|
+
optsRef.current = opts;
|
|
1374
|
+
return useCallback(()=>{
|
|
1375
|
+
var _ref;
|
|
1376
|
+
var _optsRef_current;
|
|
1377
|
+
const { enabled, volume } = stateRef.current;
|
|
1378
|
+
if (!enabled || reducedMotionRef.current) return undefined;
|
|
1379
|
+
const audio = getContext();
|
|
1380
|
+
const v = ((_ref = (_optsRef_current = optsRef.current) == null ? void 0 : _optsRef_current.volume) != null ? _ref : 1) * volume;
|
|
1381
|
+
return render(audio, defRef.current, _extends({}, optsRef.current, {
|
|
1382
|
+
volume: v
|
|
1383
|
+
}), undefined, getDestination());
|
|
1384
|
+
}, []);
|
|
1385
|
+
}
|
|
1386
|
+
/**
|
|
1387
|
+
* Returns stable `play` and `stop` callbacks for a sound sequence.
|
|
1388
|
+
*
|
|
1389
|
+
* Calling `play()` starts the sequence; calling `stop()` halts it.
|
|
1390
|
+
* Both callbacks are referentially stable across renders.
|
|
1391
|
+
*
|
|
1392
|
+
* @param steps - Ordered list of {@link SequenceStep}s
|
|
1393
|
+
* @param options - Loop and duration settings
|
|
1394
|
+
* @returns An object with `play` and `stop` functions
|
|
1395
|
+
*/ function useSequence(steps, options) {
|
|
1396
|
+
const { state } = use(SoundContext);
|
|
1397
|
+
const reducedMotion = usePrefersReducedMotion();
|
|
1398
|
+
const stopRef = useRef(null);
|
|
1399
|
+
const stateRef = useRef(state);
|
|
1400
|
+
stateRef.current = state;
|
|
1401
|
+
const reducedMotionRef = useRef(reducedMotion);
|
|
1402
|
+
reducedMotionRef.current = reducedMotion;
|
|
1403
|
+
const stepsRef = useRef(steps);
|
|
1404
|
+
stepsRef.current = steps;
|
|
1405
|
+
const optionsRef = useRef(options);
|
|
1406
|
+
optionsRef.current = options;
|
|
1407
|
+
const play = useCallback(()=>{
|
|
1408
|
+
const { enabled, volume } = stateRef.current;
|
|
1409
|
+
if (!enabled || reducedMotionRef.current) return;
|
|
1410
|
+
stopRef.current == null ? void 0 : stopRef.current.call(stopRef);
|
|
1411
|
+
const audio = getContext();
|
|
1412
|
+
const result = playSequence(audio, stepsRef.current, optionsRef.current, {
|
|
1413
|
+
volume
|
|
1414
|
+
});
|
|
1415
|
+
if (typeof result === "function") {
|
|
1416
|
+
stopRef.current = result;
|
|
1417
|
+
}
|
|
1418
|
+
}, []);
|
|
1419
|
+
const stop = useCallback(()=>{
|
|
1420
|
+
stopRef.current == null ? void 0 : stopRef.current.call(stopRef);
|
|
1421
|
+
stopRef.current = null;
|
|
1422
|
+
}, []);
|
|
1423
|
+
return useMemo(()=>({
|
|
1424
|
+
play,
|
|
1425
|
+
stop
|
|
1426
|
+
}), [
|
|
1427
|
+
play,
|
|
1428
|
+
stop
|
|
1429
|
+
]);
|
|
1430
|
+
}
|
|
1431
|
+
/**
|
|
1432
|
+
* Creates and returns an {@link AudioAnalyser} connected to the master bus.
|
|
1433
|
+
*
|
|
1434
|
+
* The analyser is initialized once (lazy state) and automatically disposed
|
|
1435
|
+
* when the component unmounts.
|
|
1436
|
+
*
|
|
1437
|
+
* @param opts - FFT size, smoothing, and dB range overrides
|
|
1438
|
+
*/ function useAnalyser(opts) {
|
|
1439
|
+
const optsRef = useRef(opts);
|
|
1440
|
+
const [analyser] = useState(()=>createMasterAnalyser(optsRef.current));
|
|
1441
|
+
useEffect(()=>{
|
|
1442
|
+
return ()=>analyser.dispose();
|
|
1443
|
+
}, [
|
|
1444
|
+
analyser
|
|
1445
|
+
]);
|
|
1446
|
+
return analyser;
|
|
1447
|
+
}
|
|
1448
|
+
const emptyPatch = {
|
|
1449
|
+
ready: false,
|
|
1450
|
+
name: "",
|
|
1451
|
+
sounds: [],
|
|
1452
|
+
play () {
|
|
1453
|
+
return {
|
|
1454
|
+
stop () {}
|
|
1455
|
+
};
|
|
1456
|
+
},
|
|
1457
|
+
get () {
|
|
1458
|
+
return undefined;
|
|
1459
|
+
},
|
|
1460
|
+
toJSON () {
|
|
1461
|
+
return {
|
|
1462
|
+
name: "",
|
|
1463
|
+
sounds: {}
|
|
1464
|
+
};
|
|
1465
|
+
}
|
|
1466
|
+
};
|
|
1467
|
+
/**
|
|
1468
|
+
* Loads a sound patch and returns a context-aware {@link AudioPatch}.
|
|
1469
|
+
*
|
|
1470
|
+
* The returned patch's `play()` method automatically respects the nearest
|
|
1471
|
+
* {@link SoundProvider}'s enabled/volume state and reduced-motion preference.
|
|
1472
|
+
* While the patch is loading, an empty no-op patch is returned (`ready: false`).
|
|
1473
|
+
*
|
|
1474
|
+
* @param source - URL string or in-memory {@link SoundPatch} object
|
|
1475
|
+
* @returns An `AudioPatch` (initially empty until loaded)
|
|
1476
|
+
*
|
|
1477
|
+
* @example
|
|
1478
|
+
* ```tsx
|
|
1479
|
+
* const patch = usePatch("https://example.com/ui.json");
|
|
1480
|
+
*
|
|
1481
|
+
* <button onClick={() => patch.play("click")}>Click</button>
|
|
1482
|
+
* ```
|
|
1483
|
+
*/ function usePatch(source) {
|
|
1484
|
+
const { state } = use(SoundContext);
|
|
1485
|
+
const reducedMotion = usePrefersReducedMotion();
|
|
1486
|
+
const [patch, setPatch] = useState(()=>typeof source !== "string" ? createPatchInstance(source) : null);
|
|
1487
|
+
const stateRef = useRef(state);
|
|
1488
|
+
stateRef.current = state;
|
|
1489
|
+
const reducedMotionRef = useRef(reducedMotion);
|
|
1490
|
+
reducedMotionRef.current = reducedMotion;
|
|
1491
|
+
useEffect(()=>{
|
|
1492
|
+
if (typeof source !== "string") return;
|
|
1493
|
+
let cancelled = false;
|
|
1494
|
+
loadPatch(source).then((p)=>{
|
|
1495
|
+
if (!cancelled) setPatch(p);
|
|
1496
|
+
}).catch(()=>{});
|
|
1497
|
+
return ()=>{
|
|
1498
|
+
cancelled = true;
|
|
1499
|
+
};
|
|
1500
|
+
}, [
|
|
1501
|
+
source
|
|
1502
|
+
]);
|
|
1503
|
+
return useMemo(()=>{
|
|
1504
|
+
if (!patch) return emptyPatch;
|
|
1505
|
+
return _extends({}, patch, {
|
|
1506
|
+
play (name, opts) {
|
|
1507
|
+
var _ref;
|
|
1508
|
+
const { enabled, volume } = stateRef.current;
|
|
1509
|
+
if (!enabled || reducedMotionRef.current) return {
|
|
1510
|
+
stop () {}
|
|
1511
|
+
};
|
|
1512
|
+
const v = ((_ref = opts == null ? void 0 : opts.volume) != null ? _ref : 1) * volume;
|
|
1513
|
+
return patch.play(name, _extends({}, opts, {
|
|
1514
|
+
volume: v
|
|
1515
|
+
}));
|
|
1516
|
+
}
|
|
1517
|
+
});
|
|
1518
|
+
}, [
|
|
1519
|
+
patch
|
|
1520
|
+
]);
|
|
1521
|
+
}
|
|
1522
|
+
/**
|
|
1523
|
+
* Synchronizes the 3D audio listener with the given position and orientation.
|
|
1524
|
+
*
|
|
1525
|
+
* The effect only re-runs when individual primitive values change, not when
|
|
1526
|
+
* the `listener` object reference changes.
|
|
1527
|
+
*
|
|
1528
|
+
* @param listener - Listener position and orientation
|
|
1529
|
+
*/ function useListener(listener) {
|
|
1530
|
+
const { positionX, positionY, positionZ, forwardX, forwardY, forwardZ, upX, upY, upZ } = listener;
|
|
1531
|
+
useEffect(()=>{
|
|
1532
|
+
setListener({
|
|
1533
|
+
positionX,
|
|
1534
|
+
positionY,
|
|
1535
|
+
positionZ,
|
|
1536
|
+
forwardX,
|
|
1537
|
+
forwardY,
|
|
1538
|
+
forwardZ,
|
|
1539
|
+
upX,
|
|
1540
|
+
upY,
|
|
1541
|
+
upZ
|
|
1542
|
+
});
|
|
1543
|
+
}, [
|
|
1544
|
+
positionX,
|
|
1545
|
+
positionY,
|
|
1546
|
+
positionZ,
|
|
1547
|
+
forwardX,
|
|
1548
|
+
forwardY,
|
|
1549
|
+
forwardZ,
|
|
1550
|
+
upX,
|
|
1551
|
+
upY,
|
|
1552
|
+
upZ
|
|
1553
|
+
]);
|
|
1554
|
+
}
|
|
1555
|
+
|
|
1556
|
+
export { SoundProvider, useAnalyser, useListener, usePatch, useSequence, useSound };
|