@marmooo/midy 0.5.0 → 0.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/midy.js CHANGED
@@ -1,6 +1,7 @@
1
1
  import { parseMidi } from "midi-file";
2
2
  import { parse, SoundFont } from "@marmooo/soundfont-parser";
3
3
  import { OggVorbisDecoderWebWorker } from "@wasm-audio-decoders/ogg-vorbis";
4
+ import { createConvolutionReverb, createConvolutionReverbImpulse, createDattorroReverb, createFDNDefault, createFreeverb, createMoorerReverbDefault, createSchroederReverb, createVelvetNoiseReverb, } from "./reverb.js";
4
5
  // Cache mode
5
6
  // - "none" for full real-time control (dynamic CC, LFO, pitch)
6
7
  // - "ads" for real-time playback with higher cache hit rate
@@ -202,7 +203,13 @@ class Note {
202
203
  }
203
204
  }
204
205
  class Channel {
205
- constructor(audioNodes, settings) {
206
+ constructor(channelNumber, audioNodes, settings) {
207
+ Object.defineProperty(this, "channelNumber", {
208
+ enumerable: true,
209
+ configurable: true,
210
+ writable: true,
211
+ value: 0
212
+ });
206
213
  Object.defineProperty(this, "isDrum", {
207
214
  enumerable: true,
208
215
  configurable: true,
@@ -353,6 +360,7 @@ class Channel {
353
360
  writable: true,
354
361
  value: null
355
362
  });
363
+ this.channelNumber = channelNumber;
356
364
  Object.assign(this, audioNodes);
357
365
  Object.assign(this, settings);
358
366
  this.state = new ControllerState();
@@ -618,7 +626,7 @@ export class Midy extends EventTarget {
618
626
  configurable: true,
619
627
  writable: true,
620
628
  value: {
621
- algorithm: "SchroederReverb",
629
+ algorithm: "Schroeder",
622
630
  time: this.getReverbTime(64),
623
631
  feedback: 0.8,
624
632
  }
@@ -866,6 +874,7 @@ export class Midy extends EventTarget {
866
874
  writable: true,
867
875
  value: null
868
876
  });
877
+ // MPE
869
878
  Object.defineProperty(this, "mpeEnabled", {
870
879
  enumerable: true,
871
880
  configurable: true,
@@ -906,9 +915,9 @@ export class Midy extends EventTarget {
906
915
  this.controlChangeHandlers = this.createControlChangeHandlers();
907
916
  this.keyBasedControllerHandlers = this.createKeyBasedControllerHandlers();
908
917
  this.effectHandlers = this.createEffectHandlers();
909
- this.channels = this.createChannels(audioContext);
910
- this.reverbEffect = this.createReverbEffect(audioContext);
911
- this.chorusEffect = this.createChorusEffect(audioContext);
918
+ this.channels = this.createChannels();
919
+ this.reverbEffect = this.createReverbEffect(this.reverb.algorithm);
920
+ this.chorusEffect = this.createChorusEffect();
912
921
  this.chorusEffect.output.connect(this.masterVolume);
913
922
  this.reverbEffect.output.connect(this.masterVolume);
914
923
  this.masterVolume.connect(audioContext.destination);
@@ -1187,6 +1196,8 @@ export class Midy extends EventTarget {
1187
1196
  return;
1188
1197
  const soundFont = this.soundFonts[soundFontIndex];
1189
1198
  const voice = soundFont.getVoice(bank, programNumber, noteNumber, velocity);
1199
+ if (!voice)
1200
+ return;
1190
1201
  const { instrument, sampleID } = voice.generators;
1191
1202
  return soundFontIndex * (2 ** 31) + instrument * (2 ** 24) +
1192
1203
  (sampleID << 8);
@@ -1201,9 +1212,10 @@ export class Midy extends EventTarget {
1201
1212
  merger.connect(this.masterVolume);
1202
1213
  return { gainL, gainR, merger };
1203
1214
  }
1204
- createChannels(audioContext) {
1215
+ createChannels() {
1205
1216
  const settings = this.constructor.channelSettings;
1206
- return Array.from({ length: this.numChannels }, () => new Channel(this.createChannelAudioNodes(audioContext), settings));
1217
+ const audioContext = this.audioContext;
1218
+ return Array.from({ length: this.numChannels }, (_, ch) => new Channel(ch, this.createChannelAudioNodes(audioContext), settings));
1207
1219
  }
1208
1220
  decodeOggVorbis(sample) {
1209
1221
  const task = decoderQueue.then(async () => {
@@ -1739,6 +1751,7 @@ export class Midy extends EventTarget {
1739
1751
  const soundFont = this.soundFonts[soundFontIndex];
1740
1752
  const pressure = renderNoteAftertouch[ch * 128 + noteNumber];
1741
1753
  const fakeChannel = {
1754
+ channelNumber: ch,
1742
1755
  state: { array: renderControllerStates[ch].slice() },
1743
1756
  programNumber,
1744
1757
  isDrum,
@@ -1990,62 +2003,6 @@ export class Midy extends EventTarget {
1990
2003
  }
1991
2004
  }
1992
2005
  }
1993
- createConvolutionReverbImpulse(audioContext, decay, preDecay) {
1994
- const sampleRate = audioContext.sampleRate;
1995
- const length = sampleRate * decay;
1996
- const impulse = new AudioBuffer({
1997
- numberOfChannels: 2,
1998
- length,
1999
- sampleRate,
2000
- });
2001
- const preDecayLength = Math.min(sampleRate * preDecay, length);
2002
- for (let channel = 0; channel < impulse.numberOfChannels; channel++) {
2003
- const channelData = impulse.getChannelData(channel);
2004
- for (let i = 0; i < preDecayLength; i++) {
2005
- channelData[i] = Math.random() * 2 - 1;
2006
- }
2007
- const attenuationFactor = 1 / (sampleRate * decay);
2008
- for (let i = preDecayLength; i < length; i++) {
2009
- const attenuation = Math.exp(-(i - preDecayLength) * attenuationFactor);
2010
- channelData[i] = (Math.random() * 2 - 1) * attenuation;
2011
- }
2012
- }
2013
- return impulse;
2014
- }
2015
- createConvolutionReverb(audioContext, impulse) {
2016
- const convolverNode = new ConvolverNode(audioContext, {
2017
- buffer: impulse,
2018
- });
2019
- return {
2020
- input: convolverNode,
2021
- output: convolverNode,
2022
- convolverNode,
2023
- };
2024
- }
2025
- createCombFilter(audioContext, input, delay, feedback) {
2026
- const delayNode = new DelayNode(audioContext, {
2027
- maxDelayTime: delay,
2028
- delayTime: delay,
2029
- });
2030
- const feedbackGain = new GainNode(audioContext, { gain: feedback });
2031
- input.connect(delayNode);
2032
- delayNode.connect(feedbackGain);
2033
- feedbackGain.connect(delayNode);
2034
- return delayNode;
2035
- }
2036
- createAllpassFilter(audioContext, input, delay, feedback) {
2037
- const delayNode = new DelayNode(audioContext, {
2038
- maxDelayTime: delay,
2039
- delayTime: delay,
2040
- });
2041
- const feedbackGain = new GainNode(audioContext, { gain: feedback });
2042
- const passGain = new GainNode(audioContext, { gain: 1 - feedback });
2043
- input.connect(delayNode);
2044
- delayNode.connect(feedbackGain);
2045
- feedbackGain.connect(delayNode);
2046
- delayNode.connect(passGain);
2047
- return passGain;
2048
- }
2049
2006
  generateDistributedArray(center, count, varianceRatio = 0.1, randomness = 0.05) {
2050
2007
  const variance = center * varianceRatio;
2051
2008
  const array = new Array(count);
@@ -2056,40 +2013,60 @@ export class Midy extends EventTarget {
2056
2013
  }
2057
2014
  return array;
2058
2015
  }
2059
- // https://hajim.rochester.edu/ece/sites/zduan/teaching/ece472/reading/Schroeder_1962.pdf
2060
- // M.R.Schroeder, "Natural Sounding Artificial Reverberation", J.Audio Eng. Soc., vol.10, p.219, 1962
2061
- createSchroederReverb(audioContext, combFeedbacks, combDelays, allpassFeedbacks, allpassDelays) {
2062
- const input = new GainNode(audioContext);
2063
- const mergerGain = new GainNode(audioContext);
2064
- for (let i = 0; i < combDelays.length; i++) {
2065
- const comb = this.createCombFilter(audioContext, input, combDelays[i], combFeedbacks[i]);
2066
- comb.connect(mergerGain);
2067
- }
2068
- const allpasses = [];
2069
- for (let i = 0; i < allpassDelays.length; i++) {
2070
- const allpass = this.createAllpassFilter(audioContext, (i === 0) ? mergerGain : allpasses.at(-1), allpassDelays[i], allpassFeedbacks[i]);
2071
- allpasses.push(allpass);
2072
- }
2073
- const output = allpasses.at(-1);
2074
- return { input, output };
2016
+ setReverbEffect(algorithm) {
2017
+ if (this.reverbEffect)
2018
+ this.reverbEffect.output.disconnect();
2019
+ this.reverbEffect = this.createReverbEffect(algorithm);
2020
+ this.reverb.algorithm = algorithm;
2075
2021
  }
2076
- createReverbEffect(audioContext) {
2077
- const { algorithm, time: rt60, feedback } = this.reverb;
2022
+ createReverbEffect(algorithm) {
2023
+ const { audioContext, reverb } = this;
2024
+ const { time: rt60, feedback } = reverb;
2078
2025
  switch (algorithm) {
2079
- case "ConvolutionReverb": {
2080
- const impulse = this.createConvolutionReverbImpulse(audioContext, rt60, this.calcDelay(rt60, feedback));
2081
- return this.createConvolutionReverb(audioContext, impulse);
2026
+ case "Convolution": {
2027
+ const impulse = createConvolutionReverbImpulse(audioContext, rt60, this.calcDelay(rt60, feedback));
2028
+ return createConvolutionReverb(audioContext, impulse);
2082
2029
  }
2083
- case "SchroederReverb": {
2030
+ case "Schroeder": {
2084
2031
  const combFeedbacks = this.generateDistributedArray(feedback, 4);
2085
- const combDelays = combFeedbacks.map((feedback) => this.calcDelay(rt60, feedback));
2032
+ const combDelays = combFeedbacks.map((fb) => this.calcDelay(rt60, fb));
2086
2033
  const allpassFeedbacks = this.generateDistributedArray(feedback, 4);
2087
- const allpassDelays = allpassFeedbacks.map((feedback) => this.calcDelay(rt60, feedback));
2088
- return this.createSchroederReverb(audioContext, combFeedbacks, combDelays, allpassFeedbacks, allpassDelays);
2034
+ const allpassDelays = allpassFeedbacks.map((fb) => this.calcDelay(rt60, fb));
2035
+ return createSchroederReverb(audioContext, combFeedbacks, combDelays, allpassFeedbacks, allpassDelays);
2036
+ }
2037
+ case "Moorer":
2038
+ return createMoorerReverbDefault(audioContext, {
2039
+ rt60,
2040
+ damping: 1 - feedback,
2041
+ });
2042
+ case "FDN":
2043
+ return createFDNDefault(audioContext, { rt60, damping: 1 - feedback });
2044
+ case "Dattorro": {
2045
+ const decay = feedback * 0.28 + 0.7;
2046
+ return createDattorroReverb(audioContext, {
2047
+ decay,
2048
+ damping: 1 - feedback,
2049
+ });
2089
2050
  }
2051
+ case "Freeverb": {
2052
+ const damping = 1 - feedback;
2053
+ const { inputL, inputR, outputL, outputR } = createFreeverb(audioContext, { roomSize: feedback, damping });
2054
+ const inputMerger = new GainNode(audioContext);
2055
+ const outputMerger = new GainNode(audioContext, { gain: 0.5 });
2056
+ inputMerger.connect(inputL);
2057
+ inputMerger.connect(inputR);
2058
+ outputL.connect(outputMerger);
2059
+ outputR.connect(outputMerger);
2060
+ return { input: inputMerger, output: outputMerger };
2061
+ }
2062
+ case "VelvetNoise":
2063
+ return createVelvetNoiseReverb(audioContext, rt60);
2064
+ default:
2065
+ throw new Error(`Unknown reverb algorithm: ${algorithm}`);
2090
2066
  }
2091
2067
  }
2092
- createChorusEffect(audioContext) {
2068
+ createChorusEffect() {
2069
+ const audioContext = this.audioContext;
2093
2070
  const input = new GainNode(audioContext);
2094
2071
  const output = new GainNode(audioContext);
2095
2072
  const sendGain = new GainNode(audioContext);
@@ -2272,9 +2249,10 @@ export class Midy extends EventTarget {
2272
2249
  }
2273
2250
  setVolumeNode(channel, note, scheduleTime) {
2274
2251
  const depth = 1 + this.getNoteAmplitudeControl(channel, note);
2252
+ const timeConstant = this.perceptualSmoothingTime / 5; // 99.3% (5 * tau)
2275
2253
  note.volumeNode.gain
2276
- .cancelScheduledValues(scheduleTime)
2277
- .setValueAtTime(depth, scheduleTime);
2254
+ .cancelAndHoldAtTime(scheduleTime)
2255
+ .setTargetAtTime(depth, scheduleTime, timeConstant);
2278
2256
  }
2279
2257
  setPortamentoDetune(channel, note, scheduleTime) {
2280
2258
  if (channel.portamentoControl) {
@@ -2414,15 +2392,16 @@ export class Midy extends EventTarget {
2414
2392
  note.modLfoToVolume.connect(volumeTarget.gain);
2415
2393
  }
2416
2394
  startVibrato(channel, note, scheduleTime) {
2395
+ const audioContext = this.audioContext;
2417
2396
  const { voiceParams, noteNumber } = note;
2418
2397
  const vibratoRate = this.getRelativeKeyBasedValue(channel, noteNumber, 76) *
2419
2398
  2;
2420
2399
  const vibratoDelay = this.getRelativeKeyBasedValue(channel, noteNumber, 78) * 2;
2421
- note.vibLfo = new OscillatorNode(this.audioContext, {
2400
+ note.vibLfo = new OscillatorNode(audioContext, {
2422
2401
  frequency: this.centToHz(voiceParams.freqVibLFO) * vibratoRate,
2423
2402
  });
2424
2403
  note.vibLfo.start(note.startTime + voiceParams.delayVibLFO * vibratoDelay);
2425
- note.vibLfoToPitch = new GainNode(this.audioContext);
2404
+ note.vibLfoToPitch = new GainNode(audioContext);
2426
2405
  this.setVibLfoToPitch(channel, note, scheduleTime);
2427
2406
  note.vibLfo.connect(note.vibLfoToPitch);
2428
2407
  note.vibLfoToPitch.connect(note.bufferSource.detune);
@@ -2433,25 +2412,29 @@ export class Midy extends EventTarget {
2433
2412
  const volHold = volAttack + voiceParams.volHold;
2434
2413
  const decayDuration = voiceParams.volDecay;
2435
2414
  const adsDuration = volHold + decayDuration * decayCurve * 5;
2436
- const loopStartTime = voiceParams.loopStart / voiceParams.sampleRate;
2437
- const loopDuration = isLoop
2415
+ const sampleLoopStart = voiceParams.loopStart / voiceParams.sampleRate;
2416
+ const sampleLoopDuration = isLoop
2438
2417
  ? (voiceParams.loopEnd - voiceParams.loopStart) / voiceParams.sampleRate
2439
2418
  : 0;
2440
- const loopCount = isLoop && adsDuration > loopStartTime
2441
- ? Math.ceil((adsDuration - loopStartTime) / loopDuration)
2419
+ const playbackRate = voiceParams.playbackRate;
2420
+ const outputLoopStart = sampleLoopStart / playbackRate;
2421
+ const outputLoopDuration = sampleLoopDuration / playbackRate;
2422
+ const loopCount = isLoop && adsDuration > outputLoopStart
2423
+ ? Math.ceil((adsDuration - outputLoopStart) / outputLoopDuration)
2442
2424
  : 0;
2443
- const alignedLoopStart = loopStartTime + loopCount * loopDuration;
2425
+ const alignedLoopStart = outputLoopStart + loopCount * outputLoopDuration;
2444
2426
  const renderDuration = isLoop
2445
- ? alignedLoopStart + loopDuration
2446
- : audioBuffer.duration;
2447
- const offlineContext = new OfflineAudioContext(audioBuffer.numberOfChannels, Math.ceil(renderDuration * this.audioContext.sampleRate), this.audioContext.sampleRate);
2427
+ ? alignedLoopStart + outputLoopDuration
2428
+ : audioBuffer.duration / playbackRate;
2429
+ const sampleRate = this.audioContext.sampleRate;
2430
+ const offlineContext = new OfflineAudioContext(audioBuffer.numberOfChannels, Math.ceil(renderDuration * sampleRate), sampleRate);
2448
2431
  const bufferSource = new AudioBufferSourceNode(offlineContext);
2449
2432
  bufferSource.buffer = audioBuffer;
2450
- bufferSource.playbackRate.value = voiceParams.playbackRate;
2433
+ bufferSource.playbackRate.value = playbackRate;
2451
2434
  bufferSource.loop = isLoop;
2452
2435
  if (isLoop) {
2453
- bufferSource.loopStart = loopStartTime;
2454
- bufferSource.loopEnd = loopStartTime + loopDuration;
2436
+ bufferSource.loopStart = sampleLoopStart;
2437
+ bufferSource.loopEnd = sampleLoopStart + sampleLoopDuration;
2455
2438
  }
2456
2439
  const initialFreq = this.clampCutoffFrequency(this.centToHz(voiceParams.initialFilterFc));
2457
2440
  const filterEnvelopeNode = new BiquadFilterNode(offlineContext, {
@@ -2483,7 +2466,7 @@ export class Midy extends EventTarget {
2483
2466
  isLoop,
2484
2467
  adsDuration,
2485
2468
  loopStart: alignedLoopStart,
2486
- loopDuration,
2469
+ loopDuration: outputLoopDuration,
2487
2470
  });
2488
2471
  }
2489
2472
  async createAdsrRenderedBuffer(channel, note, voiceParams, audioBuffer, noteDuration) {
@@ -2581,7 +2564,7 @@ export class Midy extends EventTarget {
2581
2564
  }
2582
2565
  async createFullRenderedBuffer(channel, note, voiceParams, noteDuration, noteEvent = {}) {
2583
2566
  const { startTime: noteStartTime = 0, events: noteEvents = [] } = noteEvent;
2584
- const ch = note.channel ?? 0;
2567
+ const ch = channel.channelNumber;
2585
2568
  const releaseEndDuration = voiceParams.volRelease * releaseCurve * 5;
2586
2569
  const totalDuration = noteDuration + releaseEndDuration;
2587
2570
  const sampleRate = this.audioContext.sampleRate;
@@ -2636,7 +2619,7 @@ export class Midy extends EventTarget {
2636
2619
  const audioBufferId = this.getVoiceId(channel, noteNumber, velocity);
2637
2620
  if (!realtime) {
2638
2621
  if (cacheMode === "note") {
2639
- return await this.getFullCachedBuffer(note, audioBufferId);
2622
+ return await this.getFullCachedBuffer(channel, note, audioBufferId);
2640
2623
  }
2641
2624
  else if (cacheMode === "adsr") {
2642
2625
  return await this.getAdsrCachedBuffer(channel, note, audioBufferId);
@@ -2727,7 +2710,7 @@ export class Midy extends EventTarget {
2727
2710
  durationMap.set(cacheKey, renderPromise);
2728
2711
  return await renderPromise;
2729
2712
  }
2730
- async getFullCachedBuffer(note, audioBufferId) {
2713
+ async getFullCachedBuffer(channel, note, audioBufferId) {
2731
2714
  const voiceParams = note.voiceParams;
2732
2715
  const timelineIndex = note.timelineIndex;
2733
2716
  const noteEvent = this.noteOnEvents.get(timelineIndex);
@@ -2752,8 +2735,7 @@ export class Midy extends EventTarget {
2752
2735
  }
2753
2736
  const renderPromise = (async () => {
2754
2737
  try {
2755
- const rawBuffer = await this.createAudioBuffer(voiceParams);
2756
- const rendered = await this.createFullRenderedBuffer(note, voiceParams, rawBuffer, noteDuration, noteEvent);
2738
+ const rendered = await this.createFullRenderedBuffer(channel, note, voiceParams, noteDuration, noteEvent);
2757
2739
  durationMap.set(cacheKey, rendered);
2758
2740
  return rendered;
2759
2741
  }
@@ -2780,7 +2762,6 @@ export class Midy extends EventTarget {
2780
2762
  note.renderedBuffer = isRendered ? audioBuffer : null;
2781
2763
  note.bufferSource = this.createBufferSource(channel, noteNumber, voiceParams, audioBuffer);
2782
2764
  note.volumeNode = new GainNode(audioContext);
2783
- note.volumeNode.gain.setValueAtTime(1, now);
2784
2765
  const cacheMode = this.cacheMode;
2785
2766
  const isFullCached = isRendered && audioBuffer.isFull === true;
2786
2767
  if (cacheMode === "none") {
@@ -2926,9 +2907,6 @@ export class Midy extends EventTarget {
2926
2907
  startTime = this.audioContext.currentTime;
2927
2908
  const note = new Note(noteNumber, velocity, startTime);
2928
2909
  note.channel = channelNumber;
2929
- const channel = this.channels[channelNumber];
2930
- note.index = channel.scheduledNotes.length;
2931
- channel.scheduledNotes.push(note);
2932
2910
  return note;
2933
2911
  }
2934
2912
  async setupNote(channelNumber, note, startTime) {
@@ -2951,6 +2929,8 @@ export class Midy extends EventTarget {
2951
2929
  note.voice = soundFont.getVoice(bank, programNumber, note.noteNumber, note.velocity);
2952
2930
  if (!note.voice)
2953
2931
  return;
2932
+ note.index = channel.scheduledNotes.length;
2933
+ channel.scheduledNotes.push(note);
2954
2934
  await this.setNoteAudioNode(channel, note, realtime);
2955
2935
  this.setNoteRouting(channelNumber, note, startTime);
2956
2936
  note.resolveReady();
@@ -3010,18 +2990,8 @@ export class Midy extends EventTarget {
3010
2990
  const volRelease = endTime + volDuration;
3011
2991
  note.volumeNode.gain
3012
2992
  .cancelScheduledValues(endTime)
3013
- .setValueAtTime(1, endTime)
3014
2993
  .setTargetAtTime(0, endTime, volDuration * releaseCurve);
3015
- return new Promise((resolve) => {
3016
- this.scheduleTask(() => {
3017
- note.bufferSource.loop = false;
3018
- note.bufferSource.stop(volRelease);
3019
- this.disconnectNote(note);
3020
- channel.scheduledNotes[note.index] = undefined;
3021
- this.releaseFullCache(note);
3022
- resolve();
3023
- }, volRelease);
3024
- });
2994
+ note.bufferSource.stop(volRelease);
3025
2995
  }
3026
2996
  else {
3027
2997
  const now = this.audioContext.currentTime;
@@ -3031,15 +3001,16 @@ export class Midy extends EventTarget {
3031
3001
  this.releaseFullCache(note);
3032
3002
  return Promise.resolve();
3033
3003
  }
3034
- return new Promise((resolve) => {
3035
- this.scheduleTask(() => {
3036
- this.disconnectNote(note);
3037
- channel.scheduledNotes[note.index] = undefined;
3038
- this.releaseFullCache(note);
3039
- resolve();
3040
- }, naturalEndTime);
3041
- });
3004
+ note.bufferSource.stop(naturalEndTime);
3042
3005
  }
3006
+ return new Promise((resolve) => {
3007
+ note.bufferSource.onended = () => {
3008
+ this.disconnectNote(note);
3009
+ channel.scheduledNotes[note.index] = undefined;
3010
+ this.releaseFullCache(note);
3011
+ resolve();
3012
+ };
3013
+ });
3043
3014
  }
3044
3015
  const releaseTime = this.getRelativeKeyBasedValue(channel, note.noteNumber, 72) * 2;
3045
3016
  const volDuration = note.voiceParams.volRelease * releaseTime;
@@ -3061,45 +3032,33 @@ export class Midy extends EventTarget {
3061
3032
  const noteOffTime = note.startTime + (rb.noteDuration ?? 0);
3062
3033
  const isEarlyCut = endTime < noteOffTime;
3063
3034
  if (isEarlyCut) {
3064
- const volRelease = endTime + volDuration;
3065
3035
  note.volumeNode.gain
3066
3036
  .cancelScheduledValues(endTime)
3067
- .setValueAtTime(1, endTime)
3068
3037
  .setTargetAtTime(0, endTime, volDuration * releaseCurve);
3069
- return new Promise((resolve) => {
3070
- this.scheduleTask(() => {
3071
- note.bufferSource.stop(volRelease);
3072
- this.disconnectNote(note);
3073
- channel.scheduledNotes[note.index] = undefined;
3074
- resolve();
3075
- }, volRelease);
3076
- });
3038
+ note.bufferSource.stop(volRelease);
3077
3039
  }
3078
3040
  else {
3079
- return new Promise((resolve) => {
3080
- this.scheduleTask(() => {
3081
- note.bufferSource.stop();
3082
- this.disconnectNote(note);
3083
- channel.scheduledNotes[note.index] = undefined;
3084
- resolve();
3085
- }, naturalEndTime);
3086
- });
3041
+ note.bufferSource.stop(naturalEndTime);
3087
3042
  }
3043
+ return new Promise((resolve) => {
3044
+ note.bufferSource.onended = () => {
3045
+ this.disconnectNote(note);
3046
+ channel.scheduledNotes[note.index] = undefined;
3047
+ resolve();
3048
+ };
3049
+ });
3088
3050
  }
3089
3051
  note.volumeNode.gain
3090
3052
  .cancelScheduledValues(endTime)
3091
- .setValueAtTime(1, endTime)
3092
3053
  .setTargetAtTime(0, endTime, volDuration * releaseCurve);
3093
3054
  }
3055
+ note.bufferSource.stop(volRelease);
3094
3056
  return new Promise((resolve) => {
3095
- this.scheduleTask(() => {
3096
- const bufferSource = note.bufferSource;
3097
- bufferSource.loop = false;
3098
- bufferSource.stop(volRelease);
3057
+ note.bufferSource.onended = () => {
3099
3058
  this.disconnectNote(note);
3100
3059
  channel.scheduledNotes[note.index] = undefined;
3101
3060
  resolve();
3102
- }, volRelease);
3061
+ };
3103
3062
  });
3104
3063
  }
3105
3064
  noteOff(channelNumber, noteNumber, velocity, endTime, force) {
@@ -4401,9 +4360,10 @@ export class Midy extends EventTarget {
4401
4360
  setMasterVolume(value, scheduleTime) {
4402
4361
  if (!(0 <= scheduleTime))
4403
4362
  scheduleTime = this.audioContext.currentTime;
4363
+ const timeConstant = this.perceptualSmoothingTime / 5; // 99.3% (5 * tau)
4404
4364
  this.masterVolume.gain
4405
- .cancelScheduledValues(scheduleTime)
4406
- .setValueAtTime(value * value, scheduleTime);
4365
+ .cancelAndHoldAtTime(scheduleTime)
4366
+ .setTargetAtTime(value * value, scheduleTime, timeConstant);
4407
4367
  }
4408
4368
  handleMasterFineTuningSysEx(data, scheduleTime) {
4409
4369
  const value = (data[5] * 128 + data[4]) / 16383;
@@ -4468,7 +4428,7 @@ export class Midy extends EventTarget {
4468
4428
  setReverbType(type) {
4469
4429
  this.reverb.time = this.getReverbTimeFromType(type);
4470
4430
  this.reverb.feedback = (type === 8) ? 0.9 : 0.8;
4471
- this.reverbEffect = this.createReverbEffect(this.audioContext);
4431
+ this.reverbEffect = this.setReverbEffect(this.reverb.algorithm);
4472
4432
  }
4473
4433
  getReverbTimeFromType(type) {
4474
4434
  switch (type) {
@@ -4490,7 +4450,7 @@ export class Midy extends EventTarget {
4490
4450
  }
4491
4451
  setReverbTime(value) {
4492
4452
  this.reverb.time = this.getReverbTime(value);
4493
- this.reverbEffect = this.createReverbEffect(this.audioContext);
4453
+ this.reverbEffect = this.setReverbEffect(this.reverb.algorithm);
4494
4454
  }
4495
4455
  getReverbTime(value) {
4496
4456
  return Math.exp((value - 40) * 0.025);
@@ -0,0 +1,58 @@
1
+ export function createConvolutionReverbImpulse(audioContext: any, decay: any, preDecay: any): any;
2
+ export function createConvolutionReverb(audioContext: any, impulse: any): {
3
+ input: any;
4
+ output: any;
5
+ };
6
+ export function createCombFilter(audioContext: any, input: any, delay: any, feedback: any): any;
7
+ export function createAllpassFilter(audioContext: any, input: any, delay: any, feedback: any): any;
8
+ export function createLPFCombFilter(audioContext: any, input: any, delayTime: any, feedback: any, damping: any): any;
9
+ export function createSchroederReverb(audioContext: any, combFeedbacks: any, combDelays: any, allpassFeedbacks: any, allpassDelays: any): {
10
+ input: any;
11
+ output: any;
12
+ };
13
+ export function createMoorerReverb(audioContext: any, earlyTaps: any, earlyGains: any, combDelays: any, combFeedbacks: any, damping: any, allpassDelays: any, allpassFeedbacks: any): {
14
+ input: any;
15
+ output: any;
16
+ };
17
+ export function createMoorerReverbDefault(audioContext: any, { rt60, damping, }?: {
18
+ rt60?: number | undefined;
19
+ damping?: number | undefined;
20
+ }): {
21
+ input: any;
22
+ output: any;
23
+ };
24
+ export function createFDN(audioContext: any, delayTimes: any, gains: any, damping?: number, modulation?: number): {
25
+ input: any;
26
+ output: any;
27
+ };
28
+ export function createFDNDefault(audioContext: any, { rt60, damping, modulation }?: {
29
+ rt60?: number | undefined;
30
+ damping?: number | undefined;
31
+ modulation?: number | undefined;
32
+ }): {
33
+ input: any;
34
+ output: any;
35
+ };
36
+ export function createDattorroReverb(audioContext: any, { decay, damping, bandwidth, }?: {
37
+ decay?: number | undefined;
38
+ damping?: number | undefined;
39
+ bandwidth?: number | undefined;
40
+ }): {
41
+ input: any;
42
+ output: any;
43
+ };
44
+ export function createFreeverb(audioContext: any, { roomSize, damping }?: {
45
+ roomSize?: number | undefined;
46
+ damping?: number | undefined;
47
+ }): {
48
+ inputL: any;
49
+ inputR: any;
50
+ outputL: any;
51
+ outputR: any;
52
+ };
53
+ export function createVelvetNoiseImpulse(audioContext: any, decay: any, density?: number): any;
54
+ export function createVelvetNoiseReverb(audioContext: any, decay: any, density: any): {
55
+ input: any;
56
+ output: any;
57
+ };
58
+ //# sourceMappingURL=reverb.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"reverb.d.ts","sourceRoot":"","sources":["../src/reverb.js"],"names":[],"mappings":"AAWA,kGAiBC;AAED;;;EAGC;AAED,gGAUC;AAMD,mGAYC;AAKD,qHAuBC;AAOD;;;EA8BC;AAWD;;;EAmDC;AAGD;;;;;;EA0BC;AAcD;;;EA+EC;AAGD;;;;;;;EAWC;AAcD;;;;;;;EAoFC;AAoBD;;;;;;;;EA2CC;AAOD,+FAkBC;AAED;;;EAGC"}