@marmooo/midy 0.4.9 → 0.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +28 -1
- package/esm/midy-GM1.d.ts +63 -9
- package/esm/midy-GM1.d.ts.map +1 -1
- package/esm/midy-GM1.js +1094 -94
- package/esm/midy-GM2.d.ts +74 -24
- package/esm/midy-GM2.d.ts.map +1 -1
- package/esm/midy-GM2.js +1298 -234
- package/esm/midy-GMLite.d.ts +63 -8
- package/esm/midy-GMLite.d.ts.map +1 -1
- package/esm/midy-GMLite.js +1099 -92
- package/esm/midy.d.ts +49 -30
- package/esm/midy.d.ts.map +1 -1
- package/esm/midy.js +1310 -248
- package/esm/reverb.d.ts +58 -0
- package/esm/reverb.d.ts.map +1 -0
- package/esm/reverb.js +389 -0
- package/package.json +1 -1
- package/script/midy-GM1.d.ts +63 -9
- package/script/midy-GM1.d.ts.map +1 -1
- package/script/midy-GM1.js +1094 -94
- package/script/midy-GM2.d.ts +74 -24
- package/script/midy-GM2.d.ts.map +1 -1
- package/script/midy-GM2.js +1298 -234
- package/script/midy-GMLite.d.ts +63 -8
- package/script/midy-GMLite.d.ts.map +1 -1
- package/script/midy-GMLite.js +1099 -92
- package/script/midy.d.ts +49 -30
- package/script/midy.d.ts.map +1 -1
- package/script/midy.js +1310 -248
- package/script/reverb.d.ts +58 -0
- package/script/reverb.d.ts.map +1 -0
- package/script/reverb.js +405 -0
package/esm/reverb.d.ts
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
export function createConvolutionReverbImpulse(audioContext: any, decay: any, preDecay: any): any;
|
|
2
|
+
export function createConvolutionReverb(audioContext: any, impulse: any): {
|
|
3
|
+
input: any;
|
|
4
|
+
output: any;
|
|
5
|
+
};
|
|
6
|
+
export function createCombFilter(audioContext: any, input: any, delay: any, feedback: any): any;
|
|
7
|
+
export function createAllpassFilter(audioContext: any, input: any, delay: any, feedback: any): any;
|
|
8
|
+
export function createLPFCombFilter(audioContext: any, input: any, delayTime: any, feedback: any, damping: any): any;
|
|
9
|
+
export function createSchroederReverb(audioContext: any, combFeedbacks: any, combDelays: any, allpassFeedbacks: any, allpassDelays: any): {
|
|
10
|
+
input: any;
|
|
11
|
+
output: any;
|
|
12
|
+
};
|
|
13
|
+
export function createMoorerReverb(audioContext: any, earlyTaps: any, earlyGains: any, combDelays: any, combFeedbacks: any, damping: any, allpassDelays: any, allpassFeedbacks: any): {
|
|
14
|
+
input: any;
|
|
15
|
+
output: any;
|
|
16
|
+
};
|
|
17
|
+
export function createMoorerReverbDefault(audioContext: any, { rt60, damping, }?: {
|
|
18
|
+
rt60?: number | undefined;
|
|
19
|
+
damping?: number | undefined;
|
|
20
|
+
}): {
|
|
21
|
+
input: any;
|
|
22
|
+
output: any;
|
|
23
|
+
};
|
|
24
|
+
export function createFDN(audioContext: any, delayTimes: any, gains: any, damping?: number, modulation?: number): {
|
|
25
|
+
input: any;
|
|
26
|
+
output: any;
|
|
27
|
+
};
|
|
28
|
+
export function createFDNDefault(audioContext: any, { rt60, damping, modulation }?: {
|
|
29
|
+
rt60?: number | undefined;
|
|
30
|
+
damping?: number | undefined;
|
|
31
|
+
modulation?: number | undefined;
|
|
32
|
+
}): {
|
|
33
|
+
input: any;
|
|
34
|
+
output: any;
|
|
35
|
+
};
|
|
36
|
+
export function createDattorroReverb(audioContext: any, { decay, damping, bandwidth, }?: {
|
|
37
|
+
decay?: number | undefined;
|
|
38
|
+
damping?: number | undefined;
|
|
39
|
+
bandwidth?: number | undefined;
|
|
40
|
+
}): {
|
|
41
|
+
input: any;
|
|
42
|
+
output: any;
|
|
43
|
+
};
|
|
44
|
+
export function createFreeverb(audioContext: any, { roomSize, damping }?: {
|
|
45
|
+
roomSize?: number | undefined;
|
|
46
|
+
damping?: number | undefined;
|
|
47
|
+
}): {
|
|
48
|
+
inputL: any;
|
|
49
|
+
inputR: any;
|
|
50
|
+
outputL: any;
|
|
51
|
+
outputR: any;
|
|
52
|
+
};
|
|
53
|
+
export function createVelvetNoiseImpulse(audioContext: any, decay: any, density?: number): any;
|
|
54
|
+
export function createVelvetNoiseReverb(audioContext: any, decay: any, density: any): {
|
|
55
|
+
input: any;
|
|
56
|
+
output: any;
|
|
57
|
+
};
|
|
58
|
+
//# sourceMappingURL=reverb.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"reverb.d.ts","sourceRoot":"","sources":["../src/reverb.js"],"names":[],"mappings":"AAWA,kGAiBC;AAED;;;EAGC;AAED,gGAUC;AAMD,mGAYC;AAKD,qHAuBC;AAOD;;;EA8BC;AAWD;;;EAmDC;AAGD;;;;;;EA0BC;AAcD;;;EA+EC;AAGD;;;;;;;EAWC;AAcD;;;;;;;EAoFC;AAoBD;;;;;;;;EA2CC;AAOD,+FAkBC;AAED;;;EAGC"}
|
package/esm/reverb.js
ADDED
|
@@ -0,0 +1,389 @@
|
|
|
1
|
+
// Reverb algorithms for Web Audio API
|
|
2
|
+
// - Convolution Reverb
|
|
3
|
+
// - Schroeder (1962)
|
|
4
|
+
// - Moorer (1979)
|
|
5
|
+
// - FDN (1992)
|
|
6
|
+
// - Dattorro (1997)
|
|
7
|
+
// - Freeverb (1999)
|
|
8
|
+
// - Velvet Noise Reverb (2012)
|
|
9
|
+
// Convolution Reverb
|
|
10
|
+
export function createConvolutionReverbImpulse(audioContext, decay, preDecay) {
|
|
11
|
+
const sampleRate = audioContext.sampleRate;
|
|
12
|
+
const length = sampleRate * decay;
|
|
13
|
+
const impulse = new AudioBuffer({ numberOfChannels: 2, length, sampleRate });
|
|
14
|
+
const preDecayLength = Math.min(sampleRate * preDecay, length);
|
|
15
|
+
for (let channel = 0; channel < impulse.numberOfChannels; channel++) {
|
|
16
|
+
const channelData = impulse.getChannelData(channel);
|
|
17
|
+
for (let i = 0; i < preDecayLength; i++) {
|
|
18
|
+
channelData[i] = Math.random() * 2 - 1;
|
|
19
|
+
}
|
|
20
|
+
const attenuationFactor = 1 / (sampleRate * decay);
|
|
21
|
+
for (let i = preDecayLength; i < length; i++) {
|
|
22
|
+
const attenuation = Math.exp(-(i - preDecayLength) * attenuationFactor);
|
|
23
|
+
channelData[i] = (Math.random() * 2 - 1) * attenuation;
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
return impulse;
|
|
27
|
+
}
|
|
28
|
+
export function createConvolutionReverb(audioContext, impulse) {
|
|
29
|
+
const convolverNode = new ConvolverNode(audioContext, { buffer: impulse });
|
|
30
|
+
return { input: convolverNode, output: convolverNode };
|
|
31
|
+
}
|
|
32
|
+
export function createCombFilter(audioContext, input, delay, feedback) {
|
|
33
|
+
const delayNode = new DelayNode(audioContext, {
|
|
34
|
+
maxDelayTime: delay,
|
|
35
|
+
delayTime: delay,
|
|
36
|
+
});
|
|
37
|
+
const feedbackGain = new GainNode(audioContext, { gain: feedback });
|
|
38
|
+
input.connect(delayNode);
|
|
39
|
+
delayNode.connect(feedbackGain);
|
|
40
|
+
feedbackGain.connect(delayNode);
|
|
41
|
+
return delayNode;
|
|
42
|
+
}
|
|
43
|
+
// Schroeder allpass approximation for Web Audio API.
|
|
44
|
+
// Exact H(z) = (-g + z^-d) / (1 - g·z^-d) requires a feedforward path
|
|
45
|
+
// that causes zero-delay loops in the Web Audio API graph, which is unstable.
|
|
46
|
+
// This approximation omits the feedforward term and returns only the delay output.
|
|
47
|
+
export function createAllpassFilter(audioContext, input, delay, feedback) {
|
|
48
|
+
const delayNode = new DelayNode(audioContext, {
|
|
49
|
+
maxDelayTime: delay,
|
|
50
|
+
delayTime: delay,
|
|
51
|
+
});
|
|
52
|
+
const feedbackGain = new GainNode(audioContext, { gain: feedback });
|
|
53
|
+
const passGain = new GainNode(audioContext, { gain: 1 - feedback });
|
|
54
|
+
input.connect(delayNode);
|
|
55
|
+
delayNode.connect(feedbackGain);
|
|
56
|
+
feedbackGain.connect(delayNode);
|
|
57
|
+
delayNode.connect(passGain);
|
|
58
|
+
return passGain;
|
|
59
|
+
}
|
|
60
|
+
// LPF comb filter (Freeverb / Moorer):
|
|
61
|
+
// feedback loop contains a one-pole lowpass to simulate air absorption.
|
|
62
|
+
// damping=0: bright tail, damping=1: dark tail
|
|
63
|
+
export function createLPFCombFilter(audioContext, input, delayTime, feedback, damping) {
|
|
64
|
+
const delayNode = new DelayNode(audioContext, {
|
|
65
|
+
maxDelayTime: delayTime,
|
|
66
|
+
delayTime,
|
|
67
|
+
});
|
|
68
|
+
const feedbackGain = new GainNode(audioContext, { gain: feedback });
|
|
69
|
+
const damp = Math.max(0, Math.min(1, damping));
|
|
70
|
+
// y[n] = (1-d)*x[n] + d*y[n-1]
|
|
71
|
+
const lpf = new IIRFilterNode(audioContext, {
|
|
72
|
+
feedforward: [1 - damp],
|
|
73
|
+
feedback: [1, -damp],
|
|
74
|
+
});
|
|
75
|
+
input.connect(delayNode);
|
|
76
|
+
delayNode.connect(lpf);
|
|
77
|
+
lpf.connect(feedbackGain);
|
|
78
|
+
feedbackGain.connect(delayNode);
|
|
79
|
+
return delayNode;
|
|
80
|
+
}
|
|
81
|
+
// Schroeder Reverb (1962)
|
|
82
|
+
// https://hajim.rochester.edu/ece/sites/zduan/teaching/ece472/reading/Schroeder_1962.pdf
|
|
83
|
+
// M.R.Schroeder, "Natural Sounding Artificial Reverberation",
|
|
84
|
+
// J. Audio Eng. Soc., vol.10, p.219, 1962
|
|
85
|
+
export function createSchroederReverb(audioContext, combFeedbacks, combDelays, allpassFeedbacks, allpassDelays) {
|
|
86
|
+
const input = new GainNode(audioContext);
|
|
87
|
+
const mergerGain = new GainNode(audioContext);
|
|
88
|
+
for (let i = 0; i < combDelays.length; i++) {
|
|
89
|
+
const comb = createCombFilter(audioContext, input, combDelays[i], combFeedbacks[i]);
|
|
90
|
+
comb.connect(mergerGain);
|
|
91
|
+
}
|
|
92
|
+
const allpasses = [];
|
|
93
|
+
for (let i = 0; i < allpassDelays.length; i++) {
|
|
94
|
+
const src = i === 0 ? mergerGain : allpasses.at(-1);
|
|
95
|
+
const allpass = createAllpassFilter(audioContext, src, allpassDelays[i], allpassFeedbacks[i]);
|
|
96
|
+
allpasses.push(allpass);
|
|
97
|
+
}
|
|
98
|
+
return { input, output: allpasses.at(-1) };
|
|
99
|
+
}
|
|
100
|
+
// Moorer Reverb (1979)
|
|
101
|
+
// http://articles.ircam.fr/textes/Moorer78b/
|
|
102
|
+
// J.A.Moorer, "About this Reverberation Business",
|
|
103
|
+
// Computer Music Journal, vol.3, no.2, 1979
|
|
104
|
+
//
|
|
105
|
+
// Adds two things over Schroeder:
|
|
106
|
+
// 1. Early reflections as a tapped delay line (FIR)
|
|
107
|
+
// 2. LPF-comb filters instead of plain combs
|
|
108
|
+
export function createMoorerReverb(audioContext, earlyTaps, earlyGains, combDelays, combFeedbacks, damping, allpassDelays, allpassFeedbacks) {
|
|
109
|
+
const input = new GainNode(audioContext);
|
|
110
|
+
const earlySum = new GainNode(audioContext);
|
|
111
|
+
for (let i = 0; i < earlyTaps.length; i++) {
|
|
112
|
+
const tapDelay = new DelayNode(audioContext, {
|
|
113
|
+
maxDelayTime: earlyTaps[i],
|
|
114
|
+
delayTime: earlyTaps[i],
|
|
115
|
+
});
|
|
116
|
+
const tapGain = new GainNode(audioContext, { gain: earlyGains[i] });
|
|
117
|
+
input.connect(tapDelay);
|
|
118
|
+
tapDelay.connect(tapGain);
|
|
119
|
+
tapGain.connect(earlySum);
|
|
120
|
+
}
|
|
121
|
+
// Late reverberation: LPF-comb filters
|
|
122
|
+
const lateSum = new GainNode(audioContext);
|
|
123
|
+
for (let i = 0; i < combDelays.length; i++) {
|
|
124
|
+
const comb = createLPFCombFilter(audioContext, earlySum, combDelays[i], combFeedbacks[i], damping);
|
|
125
|
+
comb.connect(lateSum);
|
|
126
|
+
}
|
|
127
|
+
// Allpass diffusers
|
|
128
|
+
const allpasses = [];
|
|
129
|
+
for (let i = 0; i < allpassDelays.length; i++) {
|
|
130
|
+
const src = i === 0 ? lateSum : allpasses.at(-1);
|
|
131
|
+
const allpass = createAllpassFilter(audioContext, src, allpassDelays[i], allpassFeedbacks[i]);
|
|
132
|
+
allpasses.push(allpass);
|
|
133
|
+
}
|
|
134
|
+
// Mix early + late to output
|
|
135
|
+
const output = new GainNode(audioContext);
|
|
136
|
+
earlySum.connect(output);
|
|
137
|
+
allpasses.at(-1).connect(output);
|
|
138
|
+
return { input, output };
|
|
139
|
+
}
|
|
140
|
+
// Sensible defaults for Moorer at 44100 Hz
|
|
141
|
+
export function createMoorerReverbDefault(audioContext, { rt60 = 2.0, damping = 0.3, } = {}) {
|
|
142
|
+
const sr = audioContext.sampleRate;
|
|
143
|
+
// Early reflection taps (ms -> seconds), gains chosen for natural sounding
|
|
144
|
+
const earlyTaps = [0.0043, 0.0215, 0.0225, 0.0268, 0.0270, 0.0298, 0.0458];
|
|
145
|
+
const earlyGains = [0.841, 0.504, 0.491, 0.379, 0.380, 0.346, 0.289];
|
|
146
|
+
// RT60 -> comb feedback: g = 10^(-3 * delay / rt60)
|
|
147
|
+
const combSamples = [1309, 1635, 1811, 1926, 2053, 2667];
|
|
148
|
+
const combDelays = combSamples.map((s) => s / sr);
|
|
149
|
+
const combFeedbacks = combDelays.map((d) => Math.pow(10, -3 * d / rt60));
|
|
150
|
+
const allpassDelays = [0.005, 0.0017];
|
|
151
|
+
const allpassFeedbacks = [0.7, 0.7];
|
|
152
|
+
return createMoorerReverb(audioContext, earlyTaps, earlyGains, combDelays, combFeedbacks, damping, allpassDelays, allpassFeedbacks);
|
|
153
|
+
}
|
|
154
|
+
// FDN - Feedback Delay Network (1992)
|
|
155
|
+
// https://ccrma.stanford.edu/~jos/Reverb/Reverb.pdf
|
|
156
|
+
// J.-M.Jot, A.Chaigne, "Digital Delay Networks for Designing
|
|
157
|
+
// Artificial Reverberators", AES 90th Convention, 1991
|
|
158
|
+
//
|
|
159
|
+
// N delay lines connected via a unitary feedback matrix.
|
|
160
|
+
// Using the normalized 4×4 Hadamard matrix for energy preservation.
|
|
161
|
+
//
|
|
162
|
+
// modulation: each delay line is driven by a low-frequency oscillator at a
|
|
163
|
+
// slightly different frequency and phase, breaking up the periodic ringing
|
|
164
|
+
// that fixed delay lengths produce. Set to 0 to disable.
|
|
165
|
+
export function createFDN(audioContext, delayTimes, gains, damping = 0.2, modulation = 0.0005) {
|
|
166
|
+
const N = delayTimes.length;
|
|
167
|
+
// Normalized 4×4 Hadamard matrix (only N=4 supported here)
|
|
168
|
+
// H4 = (1/2) * [[1,1,1,1],[1,-1,1,-1],[1,1,-1,-1],[1,-1,-1,1]]
|
|
169
|
+
// Any unitary matrix works; Hadamard is convenient and mixes all lines equally.
|
|
170
|
+
if (N !== 4) {
|
|
171
|
+
throw new Error("createFDN: only N=4 is supported (4x4 Hadamard)");
|
|
172
|
+
}
|
|
173
|
+
const H = [
|
|
174
|
+
[0.5, 0.5, 0.5, 0.5],
|
|
175
|
+
[0.5, -0.5, 0.5, -0.5],
|
|
176
|
+
[0.5, 0.5, -0.5, -0.5],
|
|
177
|
+
[0.5, -0.5, -0.5, 0.5],
|
|
178
|
+
];
|
|
179
|
+
const input = new GainNode(audioContext);
|
|
180
|
+
const output = new GainNode(audioContext);
|
|
181
|
+
// Create delay lines with headroom for modulation depth
|
|
182
|
+
const delays = delayTimes.map((t) => new DelayNode(audioContext, {
|
|
183
|
+
maxDelayTime: t + modulation,
|
|
184
|
+
delayTime: t,
|
|
185
|
+
}));
|
|
186
|
+
// Per-line LPF for damping (air absorption)
|
|
187
|
+
const lpfs = delays.map(() => {
|
|
188
|
+
const damp = Math.max(0, Math.min(1, damping));
|
|
189
|
+
return new IIRFilterNode(audioContext, {
|
|
190
|
+
feedforward: [1 - damp],
|
|
191
|
+
feedback: [1, -damp],
|
|
192
|
+
});
|
|
193
|
+
});
|
|
194
|
+
// Per-line attenuation gains (RT60 control)
|
|
195
|
+
const attenuations = gains.map((g) => new GainNode(audioContext, { gain: g }));
|
|
196
|
+
// Delay modulation: slightly different LFO per line to avoid coherent artifacts
|
|
197
|
+
if (modulation > 0) {
|
|
198
|
+
delays.forEach((delayNode, i) => {
|
|
199
|
+
const osc = new OscillatorNode(audioContext, {
|
|
200
|
+
frequency: 0.3 + i * 0.07, // 0.30, 0.37, 0.44, 0.51 Hz
|
|
201
|
+
});
|
|
202
|
+
const oscGain = new GainNode(audioContext, { gain: modulation });
|
|
203
|
+
osc.connect(oscGain);
|
|
204
|
+
oscGain.connect(delayNode.delayTime);
|
|
205
|
+
osc.start();
|
|
206
|
+
});
|
|
207
|
+
}
|
|
208
|
+
// Input injection: feed input into all delay lines equally
|
|
209
|
+
const inputScale = new GainNode(audioContext, { gain: 1 / N });
|
|
210
|
+
input.connect(inputScale);
|
|
211
|
+
delays.forEach((d) => inputScale.connect(d));
|
|
212
|
+
// Feedback matrix: for each output delay line j,
|
|
213
|
+
// sum over all input lines i: H[j][i] * attenuation[i] * lpf[i]
|
|
214
|
+
// Signal flow per line i:
|
|
215
|
+
// delay[i] -> lpf[i] -> attenuation[i] -> (distributed via H to all delay[j] inputs)
|
|
216
|
+
// We implement H as N×N individual GainNodes (N^2 = 16 for N=4).
|
|
217
|
+
for (let i = 0; i < N; i++) {
|
|
218
|
+
delays[i].connect(lpfs[i]);
|
|
219
|
+
lpfs[i].connect(attenuations[i]);
|
|
220
|
+
}
|
|
221
|
+
for (let j = 0; j < N; j++) {
|
|
222
|
+
for (let i = 0; i < N; i++) {
|
|
223
|
+
if (H[j][i] === 0)
|
|
224
|
+
continue;
|
|
225
|
+
const matrixGain = new GainNode(audioContext, { gain: H[j][i] });
|
|
226
|
+
attenuations[i].connect(matrixGain);
|
|
227
|
+
matrixGain.connect(delays[j]);
|
|
228
|
+
}
|
|
229
|
+
delays[j].connect(output);
|
|
230
|
+
}
|
|
231
|
+
return { input, output };
|
|
232
|
+
}
|
|
233
|
+
// Sensible defaults for FDN
|
|
234
|
+
export function createFDNDefault(audioContext, { rt60 = 2.0, damping = 0.2, modulation = 0.0005 } = {}) {
|
|
235
|
+
const sr = audioContext.sampleRate;
|
|
236
|
+
// Mutually prime delay lengths (samples) — avoids periodicity artifacts
|
|
237
|
+
const delaySamples = [1049, 1327, 1601, 1873];
|
|
238
|
+
const delayTimes = delaySamples.map((s) => s / sr);
|
|
239
|
+
// Attenuation from RT60: g = 10^(-3 * delayTime / rt60)
|
|
240
|
+
const gains = delayTimes.map((d) => Math.pow(10, -3 * d / rt60));
|
|
241
|
+
return createFDN(audioContext, delayTimes, gains, damping, modulation);
|
|
242
|
+
}
|
|
243
|
+
// Dattorro Reverb (1997)
|
|
244
|
+
// https://ccrma.stanford.edu/~dattorro/EffectDesignPart1.pdf
|
|
245
|
+
// J.Dattorro, "Effect Design Part 1: Reverberator and Other Filters",
|
|
246
|
+
// J. Audio Eng. Soc., vol.45, no.9, 1997
|
|
247
|
+
//
|
|
248
|
+
// Figure-of-eight allpass loop with pre-diffusion stage.
|
|
249
|
+
// Topology:
|
|
250
|
+
// input -> pre-LPF -> 4×allpass (pre-diffusion)
|
|
251
|
+
// -> split into two "tank" loops (left / right)
|
|
252
|
+
// each loop: allpass -> delay1 -> LPF -> delay2 -> decayGain -> cross-feed
|
|
253
|
+
// output tapped at multiple points from both loops
|
|
254
|
+
export function createDattorroReverb(audioContext, { decay = 0.7, damping = 0.0005, bandwidth = 0.9995, } = {}) {
|
|
255
|
+
const sr = audioContext.sampleRate;
|
|
256
|
+
// Pre-filter (bandwidth)
|
|
257
|
+
// One-pole LPF on input: y[n] = (1-bw)*x[n] + bw*y[n-1]
|
|
258
|
+
const bw = Math.max(0, Math.min(1, bandwidth));
|
|
259
|
+
const preLPF = new IIRFilterNode(audioContext, {
|
|
260
|
+
feedforward: [1 - bw],
|
|
261
|
+
feedback: [1, -bw],
|
|
262
|
+
});
|
|
263
|
+
// Pre-diffusion: 4 allpass filters in series
|
|
264
|
+
// Delay lengths from Dattorro Table 1 (normalized to 29761 Hz, rescaled)
|
|
265
|
+
const scale = sr / 29761;
|
|
266
|
+
const preDiffSamples = [142, 107, 379, 277];
|
|
267
|
+
const preDiffFeedbacks = [0.75, 0.75, 0.625, 0.625];
|
|
268
|
+
const input = new GainNode(audioContext);
|
|
269
|
+
input.connect(preLPF);
|
|
270
|
+
const preDiffs = [];
|
|
271
|
+
for (let i = 0; i < preDiffSamples.length; i++) {
|
|
272
|
+
const src = i === 0 ? preLPF : preDiffs.at(-1);
|
|
273
|
+
const allpass = createAllpassFilter(audioContext, src, (preDiffSamples[i] * scale) / sr, preDiffFeedbacks[i]);
|
|
274
|
+
preDiffs.push(allpass);
|
|
275
|
+
}
|
|
276
|
+
const preDiffOut = preDiffs.at(-1);
|
|
277
|
+
// Tank: two cross-coupled loops
|
|
278
|
+
// Each tank: allpass(modulated) -> delay1 -> LPF -> allpass -> delay2 -> decayGain -> cross-feed
|
|
279
|
+
// Sample counts from Dattorro Table 1
|
|
280
|
+
const tankAllpassSamples = [672, 908];
|
|
281
|
+
const tankAllpassFeedbacks = [0.5, 0.5];
|
|
282
|
+
const tankDelay1Samples = [4453, 4217];
|
|
283
|
+
const tankDelay2Samples = [3720, 3163];
|
|
284
|
+
const damp = Math.max(0, Math.min(1, damping));
|
|
285
|
+
const loopInput = [new GainNode(audioContext), new GainNode(audioContext)];
|
|
286
|
+
preDiffOut.connect(loopInput[0]);
|
|
287
|
+
preDiffOut.connect(loopInput[1]);
|
|
288
|
+
const loopOutput = [];
|
|
289
|
+
for (let t = 0; t < 2; t++) {
|
|
290
|
+
// allpass -> delay1 -> LPF -> allpass -> delay2 -> decayGain
|
|
291
|
+
const allpass1 = createAllpassFilter(audioContext, loopInput[t], (tankAllpassSamples[t] * scale) / sr, tankAllpassFeedbacks[t]);
|
|
292
|
+
const delay1 = new DelayNode(audioContext, {
|
|
293
|
+
maxDelayTime: (tankDelay1Samples[t] * scale) / sr,
|
|
294
|
+
delayTime: (tankDelay1Samples[t] * scale) / sr,
|
|
295
|
+
});
|
|
296
|
+
const tankLPF = new IIRFilterNode(audioContext, {
|
|
297
|
+
feedforward: [1 - damp],
|
|
298
|
+
feedback: [1, -damp],
|
|
299
|
+
});
|
|
300
|
+
const delay2 = new DelayNode(audioContext, {
|
|
301
|
+
maxDelayTime: (tankDelay2Samples[t] * scale) / sr,
|
|
302
|
+
delayTime: (tankDelay2Samples[t] * scale) / sr,
|
|
303
|
+
});
|
|
304
|
+
const decayGain = new GainNode(audioContext, { gain: decay });
|
|
305
|
+
allpass1.connect(delay1);
|
|
306
|
+
delay1.connect(tankLPF);
|
|
307
|
+
tankLPF.connect(delay2);
|
|
308
|
+
delay2.connect(decayGain);
|
|
309
|
+
loopOutput.push(decayGain);
|
|
310
|
+
}
|
|
311
|
+
// Cross-feed: decayGain of each tank feeds the other tank's loopInput
|
|
312
|
+
loopOutput[0].connect(loopInput[1]);
|
|
313
|
+
loopOutput[1].connect(loopInput[0]);
|
|
314
|
+
// Output: mix both tanks
|
|
315
|
+
const output = new GainNode(audioContext, { gain: 0.5 });
|
|
316
|
+
loopOutput[0].connect(output);
|
|
317
|
+
loopOutput[1].connect(output);
|
|
318
|
+
return { input, output };
|
|
319
|
+
}
|
|
320
|
+
// Freeverb (1999)
|
|
321
|
+
// https://github.com/sinshu/freeverb
|
|
322
|
+
// Jezar at Dreampoint, 1999
|
|
323
|
+
const FREEVERB_COMB_SAMPLES_L = [
|
|
324
|
+
1116,
|
|
325
|
+
1188,
|
|
326
|
+
1277,
|
|
327
|
+
1356,
|
|
328
|
+
1422,
|
|
329
|
+
1491,
|
|
330
|
+
1557,
|
|
331
|
+
1617,
|
|
332
|
+
];
|
|
333
|
+
const FREEVERB_STEREO_SPREAD = 23; // samples
|
|
334
|
+
const FREEVERB_ALLPASS_SAMPLES = [225, 341, 441, 556];
|
|
335
|
+
const FREEVERB_ALLPASS_FEEDBACK = 0.5;
|
|
336
|
+
export function createFreeverb(audioContext, { roomSize = 0.84, damping = 0.2 } = {}) {
|
|
337
|
+
const sr = audioContext.sampleRate;
|
|
338
|
+
const feedback = roomSize * 0.28 + 0.7; // maps [0,1] -> [0.7, 0.98]
|
|
339
|
+
const buildChannel = (sampleOffsetPerComb) => {
|
|
340
|
+
const inputGain = new GainNode(audioContext);
|
|
341
|
+
const sumGain = new GainNode(audioContext);
|
|
342
|
+
for (const samples of FREEVERB_COMB_SAMPLES_L) {
|
|
343
|
+
const delayTime = (samples + sampleOffsetPerComb) / sr;
|
|
344
|
+
const comb = createLPFCombFilter(audioContext, inputGain, delayTime, feedback, damping);
|
|
345
|
+
comb.connect(sumGain);
|
|
346
|
+
}
|
|
347
|
+
const allpasses = [];
|
|
348
|
+
for (let i = 0; i < FREEVERB_ALLPASS_SAMPLES.length; i++) {
|
|
349
|
+
const src = i === 0 ? sumGain : allpasses.at(-1);
|
|
350
|
+
const allpass = createAllpassFilter(audioContext, src, FREEVERB_ALLPASS_SAMPLES[i] / sr, FREEVERB_ALLPASS_FEEDBACK);
|
|
351
|
+
allpasses.push(allpass);
|
|
352
|
+
}
|
|
353
|
+
return { input: inputGain, output: allpasses.at(-1) };
|
|
354
|
+
};
|
|
355
|
+
const L = buildChannel(0);
|
|
356
|
+
const R = buildChannel(FREEVERB_STEREO_SPREAD);
|
|
357
|
+
return {
|
|
358
|
+
inputL: L.input,
|
|
359
|
+
inputR: R.input,
|
|
360
|
+
outputL: L.output,
|
|
361
|
+
outputR: R.output,
|
|
362
|
+
};
|
|
363
|
+
}
|
|
364
|
+
// Velvet Noise Reverb (2012)
|
|
365
|
+
// https://aaltodoc.aalto.fi/server/api/core/bitstreams/97ed04a8-cb88-461f-b1a3-e72da5129256/content
|
|
366
|
+
// V.Välimäki et al., "Fifty Years of Artificial Reverberation",
|
|
367
|
+
// IEEE Trans. Audio Speech Lang. Process., vol.20, no.5, 2012
|
|
368
|
+
export function createVelvetNoiseImpulse(audioContext, decay, density = 2000) {
|
|
369
|
+
const sampleRate = audioContext.sampleRate;
|
|
370
|
+
const length = Math.ceil(sampleRate * decay);
|
|
371
|
+
const impulse = new AudioBuffer({ numberOfChannels: 2, length, sampleRate });
|
|
372
|
+
const interval = Math.max(1, Math.round(sampleRate / density));
|
|
373
|
+
for (let ch = 0; ch < 2; ch++) {
|
|
374
|
+
const data = impulse.getChannelData(ch);
|
|
375
|
+
for (let i = 0; i < length; i += interval) {
|
|
376
|
+
// Randomize position within the interval (velvet noise definition)
|
|
377
|
+
const idx = i + Math.floor(Math.random() * interval);
|
|
378
|
+
if (idx < length) {
|
|
379
|
+
const env = Math.exp(-idx / (sampleRate * decay * 0.3));
|
|
380
|
+
data[idx] = (Math.random() > 0.5 ? 1 : -1) * env;
|
|
381
|
+
}
|
|
382
|
+
}
|
|
383
|
+
}
|
|
384
|
+
return impulse;
|
|
385
|
+
}
|
|
386
|
+
export function createVelvetNoiseReverb(audioContext, decay, density) {
|
|
387
|
+
const impulse = createVelvetNoiseImpulse(audioContext, decay, density);
|
|
388
|
+
return createConvolutionReverb(audioContext, impulse);
|
|
389
|
+
}
|
package/package.json
CHANGED
package/script/midy-GM1.d.ts
CHANGED
|
@@ -11,7 +11,7 @@ export class MidyGM1 extends EventTarget {
|
|
|
11
11
|
fineTuning: number;
|
|
12
12
|
coarseTuning: number;
|
|
13
13
|
};
|
|
14
|
-
constructor(audioContext: any);
|
|
14
|
+
constructor(audioContext: any, options?: {});
|
|
15
15
|
perceptualSmoothingTime: number;
|
|
16
16
|
mode: string;
|
|
17
17
|
numChannels: number;
|
|
@@ -28,7 +28,6 @@ export class MidyGM1 extends EventTarget {
|
|
|
28
28
|
voiceCache: Map<any, any>;
|
|
29
29
|
realtimeVoiceCache: Map<any, any>;
|
|
30
30
|
decodeMethod: string;
|
|
31
|
-
decoderQueue: Promise<void>;
|
|
32
31
|
isPlaying: boolean;
|
|
33
32
|
isPausing: boolean;
|
|
34
33
|
isPaused: boolean;
|
|
@@ -42,7 +41,15 @@ export class MidyGM1 extends EventTarget {
|
|
|
42
41
|
notePromises: any[];
|
|
43
42
|
instruments: Set<any>;
|
|
44
43
|
exclusiveClassNotes: any[];
|
|
44
|
+
adsrVoiceCache: Map<any, any>;
|
|
45
|
+
noteOnDurations: Map<any, any>;
|
|
46
|
+
noteOnEvents: Map<any, any>;
|
|
47
|
+
fullVoiceCache: Map<any, any>;
|
|
48
|
+
renderedAudioBuffer: null;
|
|
49
|
+
isRendering: boolean;
|
|
50
|
+
audioModeBufferSource: null;
|
|
45
51
|
audioContext: any;
|
|
52
|
+
cacheMode: any;
|
|
46
53
|
masterVolume: any;
|
|
47
54
|
scheduler: any;
|
|
48
55
|
schedulerBuffer: any;
|
|
@@ -54,7 +61,7 @@ export class MidyGM1 extends EventTarget {
|
|
|
54
61
|
modLfoToVolume: (channel: any, note: any, scheduleTime: any) => void;
|
|
55
62
|
chorusEffectsSend: (_channel: any, _note: any, _scheduleTime: any) => void;
|
|
56
63
|
reverbEffectsSend: (_channel: any, _note: any, _scheduleTime: any) => void;
|
|
57
|
-
delayModLFO: (
|
|
64
|
+
delayModLFO: (channel: any, note: any, scheduleTime: any) => void;
|
|
58
65
|
freqModLFO: (_channel: any, note: any, scheduleTime: any) => void;
|
|
59
66
|
delayVibLFO: (_channel: any, _note: any, _scheduleTime: any) => void;
|
|
60
67
|
freqVibLFO: (_channel: any, _note: any, _scheduleTime: any) => void;
|
|
@@ -66,8 +73,9 @@ export class MidyGM1 extends EventTarget {
|
|
|
66
73
|
toUint8Array(input: any): Promise<Uint8Array<ArrayBuffer>>;
|
|
67
74
|
loadSoundFont(input: any): Promise<void>;
|
|
68
75
|
loadMIDI(input: any): Promise<void>;
|
|
76
|
+
buildNoteOnDurations(): void;
|
|
69
77
|
cacheVoiceIds(): void;
|
|
70
|
-
getVoiceId(channel: any, noteNumber: any, velocity: any):
|
|
78
|
+
getVoiceId(channel: any, noteNumber: any, velocity: any): number | undefined;
|
|
71
79
|
createChannelAudioNodes(audioContext: any): {
|
|
72
80
|
gainL: any;
|
|
73
81
|
gainR: any;
|
|
@@ -76,11 +84,12 @@ export class MidyGM1 extends EventTarget {
|
|
|
76
84
|
createChannels(audioContext: any): Channel[];
|
|
77
85
|
decodeOggVorbis(sample: any): Promise<any>;
|
|
78
86
|
createAudioBuffer(voiceParams: any): Promise<any>;
|
|
79
|
-
createBufferSource(voiceParams: any,
|
|
87
|
+
createBufferSource(voiceParams: any, renderedOrRaw: any): any;
|
|
80
88
|
scheduleTimelineEvents(scheduleTime: any, queueIndex: any): any;
|
|
81
89
|
getQueueIndex(second: any): number;
|
|
82
90
|
resetAllStates(): void;
|
|
83
91
|
updateStates(queueIndex: any, nextQueueIndex: any): void;
|
|
92
|
+
playAudioBuffer(): Promise<void>;
|
|
84
93
|
playNotes(): Promise<void>;
|
|
85
94
|
ticksToSecond(ticks: any, secondsPerBeat: any): number;
|
|
86
95
|
secondToTicks(second: any, secondsPerBeat: any): number;
|
|
@@ -92,6 +101,7 @@ export class MidyGM1 extends EventTarget {
|
|
|
92
101
|
stopActiveNotes(channelNumber: any, velocity: any, force: any, scheduleTime: any): Promise<any[]>;
|
|
93
102
|
stopChannelNotes(channelNumber: any, velocity: any, force: any, scheduleTime: any): Promise<any[]>;
|
|
94
103
|
stopNotes(velocity: any, force: any, scheduleTime: any): Promise<any[]>;
|
|
104
|
+
render(): Promise<null | undefined>;
|
|
95
105
|
start(): Promise<void>;
|
|
96
106
|
stop(): Promise<void>;
|
|
97
107
|
pause(): Promise<void>;
|
|
@@ -114,12 +124,21 @@ export class MidyGM1 extends EventTarget {
|
|
|
114
124
|
clampCutoffFrequency(frequency: any): number;
|
|
115
125
|
setFilterEnvelope(note: any, scheduleTime: any): void;
|
|
116
126
|
startModulation(channel: any, note: any, scheduleTime: any): void;
|
|
117
|
-
|
|
127
|
+
createAdsRenderedBuffer(note: any, voiceParams: any, audioBuffer: any, isDrum?: boolean): Promise<RenderedBuffer>;
|
|
128
|
+
createAdsrRenderedBuffer(note: any, voiceParams: any, audioBuffer: any, noteDuration: any): Promise<RenderedBuffer>;
|
|
129
|
+
createFullRenderedBuffer(channel: any, note: any, voiceParams: any, noteDuration: any, noteEvent?: {}): Promise<RenderedBuffer>;
|
|
130
|
+
getAudioBuffer(channel: any, note: any, realtime: any): Promise<any>;
|
|
131
|
+
getAdsCachedBuffer(channel: any, note: any, audioBufferId: any, realtime: any): Promise<any>;
|
|
132
|
+
getAdsrCachedBuffer(note: any, audioBufferId: any): Promise<any>;
|
|
133
|
+
getFullCachedBuffer(channel: any, note: any, audioBufferId: any): Promise<any>;
|
|
118
134
|
setNoteAudioNode(channel: any, note: any, realtime: any): Promise<any>;
|
|
119
135
|
handleExclusiveClass(note: any, channelNumber: any, startTime: any): void;
|
|
120
136
|
setNoteRouting(channelNumber: any, note: any, startTime: any): void;
|
|
121
|
-
noteOn(channelNumber: any, noteNumber: any, velocity: any, startTime: any): Promise<
|
|
137
|
+
noteOn(channelNumber: any, noteNumber: any, velocity: any, startTime: any): Promise<any>;
|
|
138
|
+
createNote(channelNumber: any, noteNumber: any, velocity: any, startTime: any): Note;
|
|
139
|
+
setupNote(channelNumber: any, note: any, startTime: any): Promise<any>;
|
|
122
140
|
disconnectNote(note: any): void;
|
|
141
|
+
releaseFullCache(note: any): void;
|
|
123
142
|
releaseNote(channel: any, note: any, endTime: any): Promise<any>;
|
|
124
143
|
noteOff(channelNumber: any, noteNumber: any, _velocity: any, endTime: any, force: any): any;
|
|
125
144
|
setNoteIndex(channel: any, index: any): void;
|
|
@@ -143,7 +162,7 @@ export class MidyGM1 extends EventTarget {
|
|
|
143
162
|
modLfoToVolume: (channel: any, note: any, scheduleTime: any) => void;
|
|
144
163
|
chorusEffectsSend: (_channel: any, _note: any, _scheduleTime: any) => void;
|
|
145
164
|
reverbEffectsSend: (_channel: any, _note: any, _scheduleTime: any) => void;
|
|
146
|
-
delayModLFO: (
|
|
165
|
+
delayModLFO: (channel: any, note: any, scheduleTime: any) => void;
|
|
147
166
|
freqModLFO: (_channel: any, note: any, scheduleTime: any) => void;
|
|
148
167
|
delayVibLFO: (_channel: any, _note: any, _scheduleTime: any) => void;
|
|
149
168
|
freqVibLFO: (_channel: any, _note: any, _scheduleTime: any) => void;
|
|
@@ -190,7 +209,8 @@ export class MidyGM1 extends EventTarget {
|
|
|
190
209
|
scheduleTask(callback: any, scheduleTime: any): Promise<any>;
|
|
191
210
|
}
|
|
192
211
|
declare class Channel {
|
|
193
|
-
constructor(audioNodes: any, settings: any);
|
|
212
|
+
constructor(channelNumber: any, audioNodes: any, settings: any);
|
|
213
|
+
channelNumber: number;
|
|
194
214
|
isDrum: boolean;
|
|
195
215
|
programNumber: number;
|
|
196
216
|
scheduleIndex: number;
|
|
@@ -208,6 +228,40 @@ declare class Channel {
|
|
|
208
228
|
state: ControllerState;
|
|
209
229
|
resetSettings(settings: any): void;
|
|
210
230
|
}
|
|
231
|
+
declare class RenderedBuffer {
|
|
232
|
+
constructor(buffer: any, meta?: {});
|
|
233
|
+
buffer: any;
|
|
234
|
+
isLoop: any;
|
|
235
|
+
isFull: any;
|
|
236
|
+
adsDuration: any;
|
|
237
|
+
loopStart: any;
|
|
238
|
+
loopDuration: any;
|
|
239
|
+
noteDuration: any;
|
|
240
|
+
releaseDuration: any;
|
|
241
|
+
}
|
|
242
|
+
declare class Note {
|
|
243
|
+
constructor(noteNumber: any, velocity: any, startTime: any);
|
|
244
|
+
voice: any;
|
|
245
|
+
voiceParams: any;
|
|
246
|
+
adjustedBaseFreq: number;
|
|
247
|
+
index: number;
|
|
248
|
+
ending: boolean;
|
|
249
|
+
bufferSource: any;
|
|
250
|
+
timelineIndex: null;
|
|
251
|
+
renderedBuffer: null;
|
|
252
|
+
fullCacheVoiceId: null;
|
|
253
|
+
filterEnvelopeNode: any;
|
|
254
|
+
volumeEnvelopeNode: any;
|
|
255
|
+
modLfo: any;
|
|
256
|
+
modLfoToPitch: any;
|
|
257
|
+
modLfoToFilterFc: any;
|
|
258
|
+
modLfoToVolume: any;
|
|
259
|
+
noteNumber: any;
|
|
260
|
+
velocity: any;
|
|
261
|
+
startTime: any;
|
|
262
|
+
ready: Promise<any>;
|
|
263
|
+
resolveReady: (value: any) => void;
|
|
264
|
+
}
|
|
211
265
|
declare class ControllerState {
|
|
212
266
|
array: Float32Array<ArrayBuffer>;
|
|
213
267
|
}
|
package/script/midy-GM1.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"midy-GM1.d.ts","sourceRoot":"","sources":["../src/midy-GM1.js"],"names":[],"mappings":"
|
|
1
|
+
{"version":3,"file":"midy-GM1.d.ts","sourceRoot":"","sources":["../src/midy-GM1.js"],"names":[],"mappings":"AA4NA;IA6CE;;;;;;;;;;;MAWE;IAEF,6CAiBC;IAtED,gCAAgC;IAChC,aAAa;IACb,oBAAiB;IACjB,qBAAmB;IACnB,kBAAc;IACd,0BAAwB;IACxB,kBAAc;IACd,mBAAiB;IACjB,kBAAc;IACd,mBAAe;IACf,kBAAgB;IAChB,0BAAuD;IACvD,4BAAyB;IACzB,0BAAuB;IACvB,kCAA+B;IAC/B,qBAAqC;IACrC,mBAAkB;IAClB,mBAAkB;IAClB,kBAAiB;IACjB,oBAAmB;IACnB,mBAAkB;IAClB,iCAA2C;IAC3C,cAAU;IACV,cAAa;IACb,iBAAY;IACZ,gBAAc;IACd,oBAAkB;IAClB,sBAAwB;IACxB,2BAAqC;IAErC,8BAA2B;IAE3B,+BAA4B;IAC5B,4BAAyB;IACzB,8BAA2B;IAE3B,0BAA2B;IAC3B,qBAAoB;IACpB,4BAA6B;IAiB3B,kBAAgC;IAChC,eAAwD;IACxD,kBAA8C;IAC9C,eAAwD;IACxD,qBAGE;IACF,uBAAmD;IACnD;;;;;;;;;;;;MAA2D;IAC3D,6BAA+D;IAC/D,oBAAiD;IAMnD,mCASC;IAED,2DAYC;IAED,yCAmBC;IAED,oCAYC;IAED,6BAiIC;IAED,sBAgCC;IAED,6EAiBC;IAED;;;;MAWC;IAED,6CAOC;IAED,2CAsBC;IAED,kDA6BC;IAED,8DAoBC;IAED,gEAwDC;IAED,mCASC;IAED,uBAUC;IAED,yDAqCC;IAED,iCA4EC;IAED,2BA6EC;IAED,uDAEC;IAED,wDAEC;IAED,qCAKC;IAED;;;MAwDC;IAED,kGAeC;IAED,mGAeC;IAED,wEAQC;IAED,oCAkJC;IAED,uBAMC;IAED,sBAIC;IAED,uBAMC;IAED,wBAIC;IAED,0BAKC;IAED,8BAoBC;IAED,wBAYC;IAED,sBAOC;IAED,kEAWC;IAED,kFAYC;IAED,8BAEC;IAED,8BAEC;IAED,4BAEC;IAED,qCAMC;IAED,2DAKC;IAED,6CAEC;IAED,sDAgBC;IAED,4DAMC;IAED,qDAkBC;IAED,6CAIC;IAED,sDA8BC;IAED,kEAwBC;IAED,kHAwEC;IAED,oHAoGC;IAED,gIA0DC;IAED,qEAwBC;IAED,6FAqCC;IAED,iEA+CC;IAED,+EA0CC;IAED,uEAwDC;IAED,0EAiBC;IAED,oEAUC;IAED,yFAQC;IAED,qFAKC;IAED,uEA8BC;IAED,gCAUC;IAED,kCAWC;IAED,iEAkFC;IAED,4FAaC;IAED,6CAUC;IAED,qDAUC;IAED,qFAeC;IAED,+BAmBC;IAED,kDAOC;IAED,sFAsBC;IAED,mFAGC;IAED,wFAGC;IAED,sEAUC;IAED,mEAYC;IAED,wDAKC;IAED,sDAOC;IAED,mDAMC;IAED,kDAKC;IAED;;;;;;;;;;;;MAoCC;IAED,oFAMC;IAED,6EA4BC;IAED,qCAeC;IAED,+FAYC;IAED,wDAWC;IAED,4EAKC;IAED,mEAKC;IAED;;;MAMC;IAED,gEAKC;IAED,uEAKC;IAED,sEAGC;IAED,2DAUC;IAED,yEAeC;IAED,kFAeC;IAED,2DAMC;IAED,uDAoBC;IAED,gDAEC;IAED,gDAEC;IAED,sEAGC;IAED,qEAKC;IAED,2EAUC;IAED,iEAMC;IAED,uEAQC;IAED,mEAKC;IAED,yEAQC;IAED,gFAGC;IAED,6CAmBC;IAGD,8EAgCC;IAED,gFAGC;IAED,+EAgBC;IAED,qCAUC;IAED,4EAaC;IAED,4DAGC;IAED,qDAMC;IAED,gDAYC;IAGD,6DAgBC;CACF;AAvjFD;IAiBE,gEAKC;IArBD,sBAAkB;IAClB,gBAAe;IACf,sBAAkB;IAClB,sBAAkB;IAClB,eAAW;IACX,gBAAY;IACZ,gBAAY;IACZ,eAAa;IACb,eAAa;IACb,6BAA0B;IAC1B,mBAAe;IACf,qBAAiB;IACjB,sBAAoB;IACpB,oBAAkB;IAClB,0BAA2B;IAMzB,uBAAkC;IAGpC,mCAEC;CACF;AAuED;IAUE,oCASC;IAlBD,YAAO;IACP,YAAO;IACP,YAAO;IACP,iBAAY;IACZ,eAAU;IACV,kBAAa;IACb,kBAAa;IACb,qBAAgB;CAYjB;AAjJD;IAiBE,4DAOC;IAvBD,WAAM;IACN,iBAAY;IACZ,yBAAyB;IACzB,cAAW;IACX,gBAAe;IACf,kBAAa;IACb,oBAAqB;IACrB,qBAAsB;IACtB,uBAAwB;IACxB,wBAAmB;IACnB,wBAAmB;IACnB,YAAO;IACP,mBAAc;IACd,sBAAiB;IACjB,oBAAe;IAGb,gBAA4B;IAC5B,cAAwB;IACxB,eAA0B;IAC1B,oBAEE;IADA,mCAA2B;CAGhC;AAoDD;IACE,iCAA8B;CAa/B"}
|