mercury-engine 1.5.0 → 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/mercury.js +206 -46
- package/dist/mercury.min.js +1 -1
- package/examples/interface/index.html +8 -3
- package/package.json +1 -1
package/dist/mercury.js
CHANGED
|
@@ -15457,17 +15457,20 @@ const TL = require('total-serialism').Translate;
|
|
|
15457
15457
|
// all the available effects
|
|
15458
15458
|
const fxMap = {
|
|
15459
15459
|
'drive' : (params) => {
|
|
15460
|
-
return new
|
|
15460
|
+
return new Overdrive(params);
|
|
15461
15461
|
},
|
|
15462
15462
|
'distort' : (params) => {
|
|
15463
|
-
return new
|
|
15463
|
+
return new Overdrive(params);
|
|
15464
15464
|
},
|
|
15465
15465
|
'overdrive' : (params) => {
|
|
15466
|
-
return new
|
|
15466
|
+
return new Overdrive(params);
|
|
15467
15467
|
},
|
|
15468
15468
|
'squash' : (params) => {
|
|
15469
15469
|
return new Squash(params);
|
|
15470
15470
|
},
|
|
15471
|
+
'fuzz' : (params) => {
|
|
15472
|
+
return new Fuzz(params);
|
|
15473
|
+
},
|
|
15471
15474
|
'compress' : (params) => {
|
|
15472
15475
|
return new Compressor(params);
|
|
15473
15476
|
},
|
|
@@ -15558,6 +15561,14 @@ const fxMap = {
|
|
|
15558
15561
|
}
|
|
15559
15562
|
module.exports = fxMap;
|
|
15560
15563
|
|
|
15564
|
+
// Dispose a array of nodes
|
|
15565
|
+
//
|
|
15566
|
+
function disposeNodes(nodes=[]) {
|
|
15567
|
+
nodes.forEach((n) => {
|
|
15568
|
+
n?.disconnect();
|
|
15569
|
+
n?.dispose();
|
|
15570
|
+
});
|
|
15571
|
+
}
|
|
15561
15572
|
|
|
15562
15573
|
// A formant/vowel filter. With this filter you can imitate the vowels of human
|
|
15563
15574
|
// speech.
|
|
@@ -15714,11 +15725,11 @@ const DownSampler = function(_params){
|
|
|
15714
15725
|
}
|
|
15715
15726
|
}
|
|
15716
15727
|
|
|
15717
|
-
//
|
|
15728
|
+
// An overdrive/saturation algorithm using the arctan function as a
|
|
15718
15729
|
// waveshaping technique. Some mapping to apply a more equal loudness
|
|
15719
|
-
//
|
|
15730
|
+
// on the overdrive parameter when increasing the amount
|
|
15720
15731
|
//
|
|
15721
|
-
const
|
|
15732
|
+
const Overdrive = function(_params){
|
|
15722
15733
|
_params = Util.mapDefaults(_params, [ 2, 1 ]);
|
|
15723
15734
|
// apply the default values and convert to arrays where necessary
|
|
15724
15735
|
this._drive = Util.toArray(_params[0]);
|
|
@@ -15737,7 +15748,7 @@ const TanhDistortion = function(_params){
|
|
|
15737
15748
|
this._fx.output = new Tone.Gain(1).connect(this._mixWet);
|
|
15738
15749
|
|
|
15739
15750
|
// the fx processor
|
|
15740
|
-
this._fx.workletNode = Tone.getContext().createAudioWorkletNode('
|
|
15751
|
+
this._fx.workletNode = Tone.getContext().createAudioWorkletNode('arctan-distortion-processor');
|
|
15741
15752
|
|
|
15742
15753
|
// connect input, fx, output and wetdry
|
|
15743
15754
|
this._fx.input.chain(this._fx.workletNode, this._fx.output);
|
|
@@ -15746,17 +15757,9 @@ const TanhDistortion = function(_params){
|
|
|
15746
15757
|
// drive amount, minimum drive of 1
|
|
15747
15758
|
const d = Util.assureNum(Math.max(0, Util.getParam(this._drive, c)) + 1);
|
|
15748
15759
|
|
|
15749
|
-
// preamp gain reduction for linear at drive = 1
|
|
15750
|
-
const p = 0.8;
|
|
15751
|
-
// makeup gain
|
|
15752
|
-
const m = 1.0 / (p * (d ** 1.1));
|
|
15753
|
-
|
|
15754
15760
|
// set the parameters in the workletNode
|
|
15755
15761
|
const amount = this._fx.workletNode.parameters.get('amount');
|
|
15756
|
-
amount.setValueAtTime(
|
|
15757
|
-
|
|
15758
|
-
const makeup = this._fx.workletNode.parameters.get('makeup');
|
|
15759
|
-
makeup.setValueAtTime(m, time);
|
|
15762
|
+
amount.setValueAtTime(d, time);
|
|
15760
15763
|
|
|
15761
15764
|
const wet = Util.clip(Util.getParam(this._wet, c), 0, 1);
|
|
15762
15765
|
this._mixWet.gain.setValueAtTime(wet);
|
|
@@ -15768,12 +15771,57 @@ const TanhDistortion = function(_params){
|
|
|
15768
15771
|
}
|
|
15769
15772
|
|
|
15770
15773
|
this.delete = function(){
|
|
15771
|
-
|
|
15774
|
+
disposeNodes([ this._fx, this._fx.input, this._fx.output, this._mix, this._mixDry, this._mixWet ]);
|
|
15775
|
+
}
|
|
15776
|
+
}
|
|
15772
15777
|
|
|
15773
|
-
|
|
15774
|
-
|
|
15775
|
-
|
|
15776
|
-
|
|
15778
|
+
// A fuzz distortion effect in modelled after the Big Muff Pi pedal
|
|
15779
|
+
// by Electro Harmonics. Using three stages of distortion:
|
|
15780
|
+
// 1 soft-clipping stage, 2 half-wave rectifier, 3 hard-clipping stage
|
|
15781
|
+
//
|
|
15782
|
+
const Fuzz = function(_params){
|
|
15783
|
+
_params = Util.mapDefaults(_params, [ 10, 1 ]);
|
|
15784
|
+
// apply the default values and convert to arrays where necessary
|
|
15785
|
+
this._drive = Util.toArray(_params[0]);
|
|
15786
|
+
this._wet = Util.toArray(_params[1]);
|
|
15787
|
+
|
|
15788
|
+
// The crossfader for wet-dry (originally implemented with CrossFade)
|
|
15789
|
+
// this._mix = new Tone.CrossFade();
|
|
15790
|
+
this._mix = new Tone.Add();
|
|
15791
|
+
this._mixWet = new Tone.Gain(0).connect(this._mix.input);
|
|
15792
|
+
this._mixDry = new Tone.Gain(1).connect(this._mix.addend);
|
|
15793
|
+
|
|
15794
|
+
// ToneAudioNode has all the tone effect parameters
|
|
15795
|
+
this._fx = new Tone.ToneAudioNode();
|
|
15796
|
+
// A gain node for connecting with input and output
|
|
15797
|
+
this._fx.input = new Tone.Gain(1).connect(this._mixDry);
|
|
15798
|
+
this._fx.output = new Tone.Gain(1).connect(this._mixWet);
|
|
15799
|
+
|
|
15800
|
+
// the fx processor
|
|
15801
|
+
this._fx.workletNode = Tone.getContext().createAudioWorkletNode('fuzz-processor');
|
|
15802
|
+
|
|
15803
|
+
// connect input, fx, output to wetdry
|
|
15804
|
+
this._fx.input.chain(this._fx.workletNode, this._fx.output);
|
|
15805
|
+
|
|
15806
|
+
this.set = function(c, time, bpm){
|
|
15807
|
+
// drive amount, minimum drive of 1
|
|
15808
|
+
const d = Util.assureNum(Math.max(1, Util.getParam(this._drive, c)) + 1);
|
|
15809
|
+
|
|
15810
|
+
// set the parameters in the workletNode
|
|
15811
|
+
const amount = this._fx.workletNode.parameters.get('amount');
|
|
15812
|
+
amount.setValueAtTime(d, time);
|
|
15813
|
+
|
|
15814
|
+
const wet = Util.clip(Util.getParam(this._wet, c), 0, 1);
|
|
15815
|
+
this._mixWet.gain.setValueAtTime(wet);
|
|
15816
|
+
this._mixDry.gain.setValueAtTime(1 - wet);
|
|
15817
|
+
}
|
|
15818
|
+
|
|
15819
|
+
this.chain = function(){
|
|
15820
|
+
return { 'send' : this._fx, 'return' : this._mix }
|
|
15821
|
+
}
|
|
15822
|
+
|
|
15823
|
+
this.delete = function(){
|
|
15824
|
+
disposeNodes([ this._fx, this._fx.input, this._fx.output, this._mix, this._mixDry, this._mixWet ]);
|
|
15777
15825
|
}
|
|
15778
15826
|
}
|
|
15779
15827
|
|
|
@@ -16572,7 +16620,7 @@ const Delay = function(_params){
|
|
|
16572
16620
|
// this._fx.dispose();
|
|
16573
16621
|
// }
|
|
16574
16622
|
// }
|
|
16575
|
-
},{"./Util.js":
|
|
16623
|
+
},{"./Util.js":67,"tone":44,"total-serialism":47}],57:[function(require,module,exports){
|
|
16576
16624
|
const Tone = require('tone');
|
|
16577
16625
|
const Util = require('./Util.js');
|
|
16578
16626
|
const fxMap = require('./Effects.js');
|
|
@@ -16595,6 +16643,7 @@ class Instrument extends Sequencer {
|
|
|
16595
16643
|
this.adsr;
|
|
16596
16644
|
this.panner;
|
|
16597
16645
|
this.gain;
|
|
16646
|
+
this.post;
|
|
16598
16647
|
this._fx;
|
|
16599
16648
|
|
|
16600
16649
|
// The source to be defined by inheriting class
|
|
@@ -16606,8 +16655,10 @@ class Instrument extends Sequencer {
|
|
|
16606
16655
|
channelStrip(){
|
|
16607
16656
|
// gain => output
|
|
16608
16657
|
this.gain = new Tone.Gain(0, "normalRange").toDestination();
|
|
16658
|
+
// postfx-gain => gain (for gain() function in instrument)
|
|
16659
|
+
this.post = new Tone.Gain(1, "gain").connect(this.gain);
|
|
16609
16660
|
// panning => gain
|
|
16610
|
-
this.panner = new Tone.Panner(0).connect(this.
|
|
16661
|
+
this.panner = new Tone.Panner(0).connect(this.post);
|
|
16611
16662
|
// adsr => panning
|
|
16612
16663
|
this.adsr = this.envelope(this.panner);
|
|
16613
16664
|
// return Node to connect source => adsr
|
|
@@ -16646,9 +16697,12 @@ class Instrument extends Sequencer {
|
|
|
16646
16697
|
this.panner.pan.setValueAtTime(p, time);
|
|
16647
16698
|
|
|
16648
16699
|
// ramp volume
|
|
16649
|
-
let g = Util.atodb(Util.getParam(this._gain[0], c) * 0.707);
|
|
16700
|
+
// let g = Util.atodb(Util.getParam(this._gain[0], c) * 0.707);
|
|
16701
|
+
let g = Util.getParam(this._gain[0], c) * 0.707;
|
|
16650
16702
|
let r = Util.msToS(Math.max(0, Util.getParam(this._gain[1], c)));
|
|
16651
|
-
this.source.volume.rampTo(g, r, time);
|
|
16703
|
+
// this.source.volume.rampTo(g, r, time);
|
|
16704
|
+
this.source.volume.setValueAtTime(1, time);
|
|
16705
|
+
this.post.gain.rampTo(g, r, time);
|
|
16652
16706
|
|
|
16653
16707
|
this.sourceEvent(c, e, time);
|
|
16654
16708
|
|
|
@@ -16729,6 +16783,9 @@ class Instrument extends Sequencer {
|
|
|
16729
16783
|
this.gain.disconnect();
|
|
16730
16784
|
this.gain.dispose();
|
|
16731
16785
|
|
|
16786
|
+
this.post.disconnect();
|
|
16787
|
+
this.post.dispose();
|
|
16788
|
+
|
|
16732
16789
|
this.panner.disconnect();
|
|
16733
16790
|
this.panner.dispose();
|
|
16734
16791
|
|
|
@@ -16739,7 +16796,7 @@ class Instrument extends Sequencer {
|
|
|
16739
16796
|
this.source?.stop();
|
|
16740
16797
|
this.source?.disconnect();
|
|
16741
16798
|
this.source?.dispose();
|
|
16742
|
-
|
|
16799
|
+
|
|
16743
16800
|
// remove all fx
|
|
16744
16801
|
this._fx.map((f) => f.delete());
|
|
16745
16802
|
console.log('=> disposed Instrument() with FX:', this._fx);
|
|
@@ -16789,7 +16846,7 @@ class Instrument extends Sequencer {
|
|
|
16789
16846
|
add_fx(...fx){
|
|
16790
16847
|
// the effects chain for the sound
|
|
16791
16848
|
this._fx = [];
|
|
16792
|
-
|
|
16849
|
+
|
|
16793
16850
|
fx.forEach((f) => {
|
|
16794
16851
|
if (fxMap[f[0]]){
|
|
16795
16852
|
let tmpF = fxMap[f[0]](f.slice(1));
|
|
@@ -16811,19 +16868,20 @@ class Instrument extends Sequencer {
|
|
|
16811
16868
|
// allowing to chain multiple effects within one process
|
|
16812
16869
|
let pfx = this._ch[0];
|
|
16813
16870
|
this.panner.connect(pfx.send);
|
|
16814
|
-
for (let f=1; f<this._ch.length; f++){
|
|
16871
|
+
for (let f = 1; f < this._ch.length; f++){
|
|
16815
16872
|
if (pfx){
|
|
16816
16873
|
pfx.return.connect(this._ch[f].send);
|
|
16817
16874
|
}
|
|
16818
16875
|
pfx = this._ch[f];
|
|
16819
16876
|
}
|
|
16820
16877
|
// pfx.return.connect(Tone.Destination);
|
|
16821
|
-
pfx.return.connect(this.gain);
|
|
16878
|
+
// pfx.return.connect(this.gain);
|
|
16879
|
+
pfx.return.connect(this.post);
|
|
16822
16880
|
}
|
|
16823
16881
|
}
|
|
16824
16882
|
}
|
|
16825
16883
|
module.exports = Instrument;
|
|
16826
|
-
},{"./Effects.js":56,"./Sequencer.js":
|
|
16884
|
+
},{"./Effects.js":56,"./Sequencer.js":66,"./Util.js":67,"tone":44}],58:[function(require,module,exports){
|
|
16827
16885
|
const Tone = require('tone');
|
|
16828
16886
|
const Instrument = require('./Instrument.js');
|
|
16829
16887
|
const Util = require('./Util.js');
|
|
@@ -16875,7 +16933,7 @@ class MonoInput extends Instrument {
|
|
|
16875
16933
|
}
|
|
16876
16934
|
}
|
|
16877
16935
|
module.exports = MonoInput;
|
|
16878
|
-
},{"./Instrument.js":57,"./Util.js":
|
|
16936
|
+
},{"./Instrument.js":57,"./Util.js":67,"tone":44}],59:[function(require,module,exports){
|
|
16879
16937
|
const Tone = require('tone');
|
|
16880
16938
|
const Util = require('./Util.js');
|
|
16881
16939
|
const Sequencer = require('./Sequencer.js');
|
|
@@ -17056,7 +17114,101 @@ class MonoMidi extends Sequencer {
|
|
|
17056
17114
|
}
|
|
17057
17115
|
}
|
|
17058
17116
|
module.exports = MonoMidi;
|
|
17059
|
-
},{"./Sequencer.js":
|
|
17117
|
+
},{"./Sequencer.js":66,"./Util.js":67,"tone":44,"webmidi":55}],60:[function(require,module,exports){
|
|
17118
|
+
const Tone = require('tone');
|
|
17119
|
+
const Instrument = require('./Instrument.js');
|
|
17120
|
+
const { toArray, getParam, clip, log } = require('./Util.js');
|
|
17121
|
+
|
|
17122
|
+
class MonoNoise extends Instrument {
|
|
17123
|
+
constructor(engine, t='white', canvas){
|
|
17124
|
+
// Inherit from Instrument
|
|
17125
|
+
super(engine, canvas);
|
|
17126
|
+
|
|
17127
|
+
// synth specific variables;
|
|
17128
|
+
this._type = toArray(t);
|
|
17129
|
+
this._typeMap = {
|
|
17130
|
+
'white' : 0,
|
|
17131
|
+
'pink' : 1,
|
|
17132
|
+
'brownian' : 2,
|
|
17133
|
+
'brown' : 2,
|
|
17134
|
+
'browny' : 2,
|
|
17135
|
+
'red' : 2,
|
|
17136
|
+
'lofi' : 3,
|
|
17137
|
+
'dust' : 4,
|
|
17138
|
+
'crackle' : 5
|
|
17139
|
+
}
|
|
17140
|
+
this._density = [ 0.25 ];
|
|
17141
|
+
this.started = false;
|
|
17142
|
+
this.createSource();
|
|
17143
|
+
|
|
17144
|
+
console.log('=> MonoNoise()', this);
|
|
17145
|
+
}
|
|
17146
|
+
|
|
17147
|
+
createSource(){
|
|
17148
|
+
// create a noise source from an audioWorkletNode, containing many
|
|
17149
|
+
// types of noises
|
|
17150
|
+
this.source = new Tone.ToneAudioNode();
|
|
17151
|
+
this.source.workletNode = Tone.getContext().createAudioWorkletNode('noise-processor');
|
|
17152
|
+
this.source.input = new Tone.Gain();
|
|
17153
|
+
this.source.output = new Tone.Gain(0, 'decibels');
|
|
17154
|
+
this.source.volume = this.source.output.gain;
|
|
17155
|
+
this.source.input.chain(this.source.workletNode, this.source.output);
|
|
17156
|
+
|
|
17157
|
+
this.source.connect(this.channelStrip());
|
|
17158
|
+
|
|
17159
|
+
// empty method to get rid of stop error
|
|
17160
|
+
this.source.stop = () => {};
|
|
17161
|
+
|
|
17162
|
+
// a pink noise source based on a buffer noise
|
|
17163
|
+
// to reduce complex calculation
|
|
17164
|
+
this.pink = new Tone.Noise('pink').connect(this.source);
|
|
17165
|
+
}
|
|
17166
|
+
|
|
17167
|
+
sourceEvent(c, e, time){
|
|
17168
|
+
// set noise type for the generator
|
|
17169
|
+
let t = getParam(this._type, c);
|
|
17170
|
+
if (Object.hasOwn(this._typeMap, t)){
|
|
17171
|
+
t = this._typeMap[t];
|
|
17172
|
+
} else {
|
|
17173
|
+
log(`${t} is not a valid noise type`);
|
|
17174
|
+
// default wave if wave does not exist
|
|
17175
|
+
t = 0;
|
|
17176
|
+
}
|
|
17177
|
+
let type = this.source.workletNode.parameters.get('type');
|
|
17178
|
+
type.setValueAtTime(t, time);
|
|
17179
|
+
|
|
17180
|
+
// set the density amount (only valid for brownian, lofi, dust, crackle)
|
|
17181
|
+
let d = clip(getParam(this._density, c), 0.01, 1);
|
|
17182
|
+
let density = this.source.workletNode.parameters.get('density');
|
|
17183
|
+
density.setValueAtTime(d, time);
|
|
17184
|
+
|
|
17185
|
+
// start the pink noise source also
|
|
17186
|
+
if (!this.started){
|
|
17187
|
+
this.pink.start(time);
|
|
17188
|
+
this.started = true;
|
|
17189
|
+
}
|
|
17190
|
+
}
|
|
17191
|
+
|
|
17192
|
+
density(d){
|
|
17193
|
+
this._density = toArray(d);
|
|
17194
|
+
}
|
|
17195
|
+
|
|
17196
|
+
delete(){
|
|
17197
|
+
// delete super class
|
|
17198
|
+
super.delete();
|
|
17199
|
+
|
|
17200
|
+
this.source.input.disconnect();
|
|
17201
|
+
this.source.input.dispose();
|
|
17202
|
+
this.source.output.disconnect();
|
|
17203
|
+
this.source.output.dispose();
|
|
17204
|
+
this.pink.disconnect();
|
|
17205
|
+
this.pink.dispose();
|
|
17206
|
+
|
|
17207
|
+
console.log('disposed MonoNoise()');
|
|
17208
|
+
}
|
|
17209
|
+
}
|
|
17210
|
+
module.exports = MonoNoise;
|
|
17211
|
+
},{"./Instrument.js":57,"./Util.js":67,"tone":44}],61:[function(require,module,exports){
|
|
17060
17212
|
const Tone = require('tone');
|
|
17061
17213
|
const Util = require('./Util.js');
|
|
17062
17214
|
// const fxMap = require('./Effects.js');
|
|
@@ -17220,7 +17372,7 @@ class MonoSample extends Instrument {
|
|
|
17220
17372
|
}
|
|
17221
17373
|
}
|
|
17222
17374
|
module.exports = MonoSample;
|
|
17223
|
-
},{"./Instrument.js":57,"./Util.js":
|
|
17375
|
+
},{"./Instrument.js":57,"./Util.js":67,"tone":44}],62:[function(require,module,exports){
|
|
17224
17376
|
const Tone = require('tone');
|
|
17225
17377
|
const Util = require('./Util.js');
|
|
17226
17378
|
// const fxMap = require('./Effects.js');
|
|
@@ -17340,7 +17492,7 @@ class MonoSynth extends Instrument {
|
|
|
17340
17492
|
}
|
|
17341
17493
|
}
|
|
17342
17494
|
module.exports = MonoSynth;
|
|
17343
|
-
},{"./Instrument":57,"./Util.js":
|
|
17495
|
+
},{"./Instrument":57,"./Util.js":67,"tone":44,"total-serialism":47}],63:[function(require,module,exports){
|
|
17344
17496
|
const Tone = require('tone');
|
|
17345
17497
|
const Util = require('./Util.js');
|
|
17346
17498
|
const Instrument = require('./Instrument.js');
|
|
@@ -17373,9 +17525,12 @@ class PolyInstrument extends Instrument {
|
|
|
17373
17525
|
channelStrip(){
|
|
17374
17526
|
// gain => output
|
|
17375
17527
|
this.gain = new Tone.Gain(0).toDestination();
|
|
17528
|
+
// postfx-gain => gain (for gain() function in instrument)
|
|
17529
|
+
this.post = new Tone.Gain(1, "gain").connect(this.gain);
|
|
17376
17530
|
// panning => gain
|
|
17377
|
-
this.panner = new Tone.Panner(0).connect(this.
|
|
17531
|
+
this.panner = new Tone.Panner(0).connect(this.post);
|
|
17378
17532
|
// adsr => panning
|
|
17533
|
+
// done through createVoices
|
|
17379
17534
|
}
|
|
17380
17535
|
|
|
17381
17536
|
createVoices(){
|
|
@@ -17524,7 +17679,7 @@ class PolyInstrument extends Instrument {
|
|
|
17524
17679
|
}
|
|
17525
17680
|
}
|
|
17526
17681
|
module.exports = PolyInstrument;
|
|
17527
|
-
},{"./Instrument.js":57,"./Util.js":
|
|
17682
|
+
},{"./Instrument.js":57,"./Util.js":67,"tone":44}],64:[function(require,module,exports){
|
|
17528
17683
|
const Tone = require('tone');
|
|
17529
17684
|
const Util = require('./Util.js');
|
|
17530
17685
|
const PolyInstrument = require('./PolyInstrument.js');
|
|
@@ -17698,7 +17853,7 @@ class PolySample extends PolyInstrument {
|
|
|
17698
17853
|
}
|
|
17699
17854
|
}
|
|
17700
17855
|
module.exports = PolySample;
|
|
17701
|
-
},{"./PolyInstrument.js":
|
|
17856
|
+
},{"./PolyInstrument.js":63,"./Util.js":67,"tone":44}],65:[function(require,module,exports){
|
|
17702
17857
|
const Tone = require('tone');
|
|
17703
17858
|
const Util = require('./Util.js');
|
|
17704
17859
|
const PolyInstrument = require('./PolyInstrument');
|
|
@@ -17804,7 +17959,7 @@ class PolySynth extends PolyInstrument {
|
|
|
17804
17959
|
}
|
|
17805
17960
|
}
|
|
17806
17961
|
module.exports = PolySynth;
|
|
17807
|
-
},{"./PolyInstrument":
|
|
17962
|
+
},{"./PolyInstrument":63,"./Util.js":67,"tone":44}],66:[function(require,module,exports){
|
|
17808
17963
|
const Tone = require('tone');
|
|
17809
17964
|
const Util = require('./Util.js');
|
|
17810
17965
|
// const WebMidi = require("webmidi");
|
|
@@ -18078,7 +18233,7 @@ class Sequencer {
|
|
|
18078
18233
|
}
|
|
18079
18234
|
}
|
|
18080
18235
|
module.exports = Sequencer;
|
|
18081
|
-
},{"./Util.js":
|
|
18236
|
+
},{"./Util.js":67,"tone":44}],67:[function(require,module,exports){
|
|
18082
18237
|
const Tone = require('tone');
|
|
18083
18238
|
const { noteToMidi, toScale, mtof } = require('total-serialism').Translate;
|
|
18084
18239
|
|
|
@@ -18310,7 +18465,7 @@ function log(msg){
|
|
|
18310
18465
|
}
|
|
18311
18466
|
|
|
18312
18467
|
module.exports = { mapDefaults, atTime, atodb, dbtoa, clip, remap,assureNum, lookup, randLookup, isRandom, getParam, toArray, msToS, formatRatio, divToS, divToF, toMidi, mtof, noteToMidi, noteToFreq, assureWave, log }
|
|
18313
|
-
},{"tone":44,"total-serialism":47}],
|
|
18468
|
+
},{"tone":44,"total-serialism":47}],68:[function(require,module,exports){
|
|
18314
18469
|
module.exports={
|
|
18315
18470
|
"uptempo" : 10,
|
|
18316
18471
|
"downtempo" : 10,
|
|
@@ -18331,9 +18486,9 @@ module.exports={
|
|
|
18331
18486
|
"dnb" : 170,
|
|
18332
18487
|
"neurofunk" : 180
|
|
18333
18488
|
}
|
|
18334
|
-
},{}],
|
|
18489
|
+
},{}],69:[function(require,module,exports){
|
|
18335
18490
|
|
|
18336
|
-
const Tone = require('tone');
|
|
18491
|
+
// const Tone = require('tone');
|
|
18337
18492
|
const Mercury = require('mercury-lang');
|
|
18338
18493
|
const TL = require('total-serialism').Translate;
|
|
18339
18494
|
const { normalize, multiply } = require('total-serialism').Utility;
|
|
@@ -18341,13 +18496,13 @@ const { normalize, multiply } = require('total-serialism').Utility;
|
|
|
18341
18496
|
const MonoSample = require('./core/MonoSample.js');
|
|
18342
18497
|
const MonoMidi = require('./core/MonoMidi.js');
|
|
18343
18498
|
const MonoSynth = require('./core/MonoSynth.js');
|
|
18499
|
+
const MonoNoise = require('./core/MonoNoise.js');
|
|
18344
18500
|
const MonoInput = require('./core/MonoInput.js');
|
|
18345
18501
|
const PolySynth = require('./core/PolySynth.js');
|
|
18346
18502
|
const PolySample = require('./core/PolySample.js');
|
|
18347
18503
|
const Tempos = require('./data/genre-tempos.json');
|
|
18348
18504
|
const Util = require('./core/Util.js');
|
|
18349
18505
|
const { divToS } = require('./core/Util.js');
|
|
18350
|
-
const { count } = require('total-serialism/src/gen-basic.js');
|
|
18351
18506
|
|
|
18352
18507
|
class MercuryInterpreter {
|
|
18353
18508
|
constructor({ hydra, p5canvas } = {}){
|
|
@@ -18577,6 +18732,11 @@ class MercuryInterpreter {
|
|
|
18577
18732
|
objectMap.applyFunctions(obj.functions, inst, obj.type);
|
|
18578
18733
|
return inst;
|
|
18579
18734
|
},
|
|
18735
|
+
'noise' : (obj) => {
|
|
18736
|
+
let inst = new MonoNoise(this, obj.type, this.canvas);
|
|
18737
|
+
objectMap.applyFunctions(obj.functions, inst, obj.type);
|
|
18738
|
+
return inst;
|
|
18739
|
+
},
|
|
18580
18740
|
'polySynth' : (obj) => {
|
|
18581
18741
|
let inst = new PolySynth(this, obj.type, this.canvas);
|
|
18582
18742
|
objectMap.applyFunctions(obj.functions, inst, obj.type);
|
|
@@ -18690,7 +18850,7 @@ class MercuryInterpreter {
|
|
|
18690
18850
|
}
|
|
18691
18851
|
}
|
|
18692
18852
|
module.exports = { MercuryInterpreter }
|
|
18693
|
-
},{"./core/MonoInput.js":58,"./core/MonoMidi.js":59,"./core/
|
|
18853
|
+
},{"./core/MonoInput.js":58,"./core/MonoMidi.js":59,"./core/MonoNoise.js":60,"./core/MonoSample.js":61,"./core/MonoSynth.js":62,"./core/PolySample.js":64,"./core/PolySynth.js":65,"./core/Util.js":67,"./data/genre-tempos.json":68,"mercury-lang":27,"total-serialism":47}],70:[function(require,module,exports){
|
|
18694
18854
|
|
|
18695
18855
|
console.log(`
|
|
18696
18856
|
Mercury Engine by Timo Hoogland (c) 2018-2025
|
|
@@ -18709,7 +18869,7 @@ const { WebMidi } = require("webmidi");
|
|
|
18709
18869
|
// load extra AudioWorkletProcessors from file
|
|
18710
18870
|
// transformed to inline with browserify brfs
|
|
18711
18871
|
|
|
18712
|
-
const fxExtensions = "\n// A white noise generator at -6dBFS to test AudioWorkletProcessor\n//\n// class NoiseProcessor extends AudioWorkletProcessor {\n// \tprocess(inputs, outputs, parameters){\n// \t\tconst output = outputs[0];\n\n// \t\toutput.forEach((channel) => {\n// \t\t\tfor (let i=0; i<channel.length; i++) {\n// \t\t\t\tchannel[i] = Math.random() - 0.5;\n// \t\t\t}\n// \t\t});\n// \t\treturn true;\n// \t}\n// }\n// registerProcessor('noise-processor', NoiseProcessor);\n\n// A Downsampling Chiptune effect. Downsamples the signal by a specified amount\n// Resulting in a lower samplerate, making it sound more like 8bit/chiptune\n// Programmed with a custom AudioWorkletProcessor, see effects/Processors.js\n//\nclass DownSampleProcessor extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors() {\n\t\treturn [{\n\t\t\tname: 'down',\n\t\t\tdefaultValue: 8,\n\t\t\tminValue: 1,\n\t\t\tmaxValue: 2048\n\t\t}];\n\t}\n\n\tconstructor(){\n\t\tsuper();\n\t\t// the frame counter\n\t\tthis.count = 0;\n\t\t// sample and hold variable array\n\t\tthis.sah = [];\n\t}\n\n\tprocess(inputs, outputs, parameters){\n\t\tconst input = inputs[0];\n\t\tconst output = outputs[0];\n\n\t\t// if there is anything to process\n\t\tif (input.length > 0){\n\t\t\t// for the length of the sample array (generally 128)\n\t\t\tfor (let i=0; i<input[0].length; i++){\n\t\t\t\tconst d = (parameters.down.length > 1) ? parameters.down[i] : parameters.down[0];\n\t\t\t\t// for every channel\n\t\t\t\tfor (let channel=0; channel<input.length; ++channel){\n\t\t\t\t\t// if counter equals 0, sample and hold\n\t\t\t\t\tif (this.count % d === 0){\n\t\t\t\t\t\tthis.sah[channel] = input[channel][i];\n\t\t\t\t\t}\n\t\t\t\t\t// output the currently held sample\n\t\t\t\t\toutput[channel][i] = this.sah[channel];\n\t\t\t\t}\n\t\t\t\t// increment sample counter\n\t\t\t\tthis.count++;\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n}\nregisterProcessor('downsampler-processor', DownSampleProcessor);\n\n// A distortion algorithm using the tanh (hyperbolic-tangent) as a \n// waveshaping technique. Some mapping to apply a more equal loudness \n// distortion is applied on the overdrive parameter\n//\nclass TanhDistortionProcessor extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors(){\n\t\treturn [{\n\t\t\tname: 'amount',\n\t\t\tdefaultValue: 4,\n\t\t\tminValue: 1\n\t\t}, {\n\t\t\tname: 'makeup',\n\t\t\tdefaultValue: 0.5,\n\t\t\tminValue: 0,\n\t\t\tmaxValue: 2\n\t\t}]\n\t}\n\n\tconstructor(){\n\t\tsuper();\n\t}\n\n\tprocess(inputs, outputs, parameters){\n\t\tconst input = inputs[0];\n\t\tconst output = outputs[0];\n\n\t\tif (input.length > 0){\n\t\t\tfor (let channel=0; channel<input.length; ++channel){\n\t\t\t\tfor (let i=0; i<input[channel].length; i++){\n\t\t\t\t\tconst a = (parameters.amount.length > 1)? parameters.amount[i] : parameters.amount[0];\n\t\t\t\t\tconst m = (parameters.makeup.length > 1)? parameters.makeup[i] : parameters.makeup[0];\n\t\t\t\t\t// simple waveshaping with tanh\n\t\t\t\t\toutput[channel][i] = Math.tanh(input[channel][i] * a) * m;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n}\nregisterProcessor('tanh-distortion-processor', TanhDistortionProcessor);\n\n// A distortion/compression effect of an incoming signal\n// Based on an algorithm by Peter McCulloch\n// \nclass SquashProcessor extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors(){\n\t\treturn [{\n\t\t\tname: 'amount',\n\t\t\tdefaultValue: 4,\n\t\t\tminValue: 1,\n\t\t\tmaxValue: 1024\n\t\t}, {\n\t\t\tname: 'makeup',\n\t\t\tdefaultValue: 0.5,\n\t\t\tminValue: 0,\n\t\t\tmaxValue: 2\n\t\t}];\n\t}\n\n\tconstructor(){\n\t\tsuper();\n\t}\n\n\tprocess(inputs, outputs, parameters){\n\t\tconst input = inputs[0];\n\t\tconst output = outputs[0];\n\t\t\n\t\tif (input.length > 0){\n\t\t\tfor (let channel=0; channel<input.length; ++channel){\n\t\t\t\tfor (let i=0; i<input[channel].length; i++){\n\t\t\t\t\t// (s * a) / ((s * a)^2 * 0.28 + 1) / √a\n\t\t\t\t\t// drive amount, minimum of 1\n\t\t\t\t\tconst a = (parameters.amount.length > 1)? parameters.amount[i] : parameters.amount[0];\n\t\t\t\t\t// makeup gain\n\t\t\t\t\tconst m = (parameters.makeup.length > 1)? parameters.makeup[i] : parameters.makeup[0];\n\t\t\t\t\t// set the waveshaper effect\n\t\t\t\t\tconst s = input[channel][i];\n\t\t\t\t\tconst x = s * a * 1.412;\n\t\t\t\t\toutput[channel][i] = (x / (x * x * 0.28 + 1.0)) * m * 0.708;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n}\nregisterProcessor('squash-processor', SquashProcessor);\n\n// Dattorro Reverberator\n// Thanks to port by khoin, taken from:\n// https://github.com/khoin/DattorroReverbNode\n// based on the paper from Jon Dattorro:\n// https://ccrma.stanford.edu/~dattorro/EffectDesignPart1.pdf\n// with small modifications to work in Mercury\n//\n// In jurisdictions that recognize copyright laws, this software is to\n// be released into the public domain.\n\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND.\n// THE AUTHOR(S) SHALL NOT BE LIABLE FOR ANYTHING, ARISING FROM, OR IN\n// CONNECTION WITH THE SOFTWARE OR THE DISTRIBUTION OF THE SOFTWARE.\n// \nclass DattorroReverb extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors() {\n\t\treturn [\n\t\t\t[\"preDelay\", 0, 0, sampleRate - 1, \"k-rate\"],\n\t\t\t// [\"bandwidth\", 0.9999, 0, 1, \"k-rate\"],\t\n\t\t\t[\"inputDiffusion1\", 0.75, 0, 1, \"k-rate\"],\n\t\t\t[\"inputDiffusion2\", 0.625, 0, 1, \"k-rate\"],\n\t\t\t[\"decay\", 0.5, 0, 1, \"k-rate\"],\n\t\t\t[\"decayDiffusion1\", 0.7, 0, 0.999999, \"k-rate\"],\n\t\t\t[\"decayDiffusion2\", 0.5, 0, 0.999999, \"k-rate\"],\n\t\t\t[\"damping\", 0.005, 0, 1, \"k-rate\"],\n\t\t\t[\"excursionRate\", 0.5, 0, 2, \"k-rate\"],\n\t\t\t[\"excursionDepth\", 0.7, 0, 2, \"k-rate\"],\n\t\t\t[\"wet\", 0.7, 0, 2, \"k-rate\"],\n\t\t\t// [\"dry\", 0.7, 0, 2, \"k-rate\"]\n\t\t].map(x => new Object({\n\t\t\tname: x[0],\n\t\t\tdefaultValue: x[1],\n\t\t\tminValue: x[2],\n\t\t\tmaxValue: x[3],\n\t\t\tautomationRate: x[4]\n\t\t}));\n\t}\n\n\tconstructor(options) {\n\t\tsuper(options);\n\n\t\tthis._Delays = [];\n\t\t// Pre-delay is always one-second long, rounded to the nearest 128-chunk\n\t\tthis._pDLength = sampleRate + (128 - sampleRate % 128);\n\t\tthis._preDelay = new Float32Array(this._pDLength);\n\t\tthis._pDWrite = 0;\n\t\tthis._lp1 = 0.0;\n\t\tthis._lp2 = 0.0;\n\t\tthis._lp3 = 0.0;\n\t\tthis._excPhase = 0.0;\n\n\t\t[\n\t\t\t0.004771345, 0.003595309, 0.012734787, 0.009307483, // pre-tank\n\t\t\t0.022579886, 0.149625349, 0.060481839, 0.1249958, // left-loop\n\t\t\t0.030509727, 0.141695508, 0.089244313, 0.106280031 // right-loop\n\t\t].forEach(x => this.makeDelay(x));\n\n\t\tthis._taps = Int16Array.from([\n\t\t\t0.008937872, 0.099929438, 0.064278754, 0.067067639, \n\t\t\t0.066866033, 0.006283391, 0.035818689, // left-output\n\t\t\t0.011861161, 0.121870905, 0.041262054, 0.08981553, \n\t\t\t0.070931756, 0.011256342, 0.004065724 // right-output\n\t\t], x => Math.round(x * sampleRate));\n\t}\n\n\tmakeDelay(length) {\n\t\t// len, array, write, read, mask\n\t\tlet len = Math.round(length * sampleRate);\n\t\tlet nextPow2 = 2 ** Math.ceil(Math.log2((len)));\n\t\tthis._Delays.push([\n\t\t\tnew Float32Array(nextPow2), len - 1, 0 | 0, nextPow2 - 1\n\t\t]);\n\t}\n\n\twriteDelay(index, data) {\n\t\treturn this._Delays[index][0][this._Delays[index][1]] = data;\n\t}\n\n\treadDelay(index) {\n\t\treturn this._Delays[index][0][this._Delays[index][2]];\n\t}\n\n\treadDelayAt(index, i) {\n\t\tlet d = this._Delays[index];\n\t\treturn d[0][(d[2] + i) & d[3]];\n\t}\n\n\t// cubic interpolation\n\t// O. Niemitalo: \n\t// https://www.musicdsp.org/en/latest/Other/49-cubic-interpollation.html\n\treadDelayCAt(index, i) {\n\t\tlet d = this._Delays[index],\n\t\t\tfrac = i - ~~i,\n\t\t\tint = ~~i + d[2] - 1,\n\t\t\tmask = d[3];\n\n\t\tlet x0 = d[0][int++ & mask],\n\t\t\tx1 = d[0][int++ & mask],\n\t\t\tx2 = d[0][int++ & mask],\n\t\t\tx3 = d[0][int & mask];\n\n\t\tlet a = (3 * (x1 - x2) - x0 + x3) / 2,\n\t\t\tb = 2 * x2 + x0 - (5 * x1 + x3) / 2,\n\t\t\tc = (x2 - x0) / 2;\n\n\t\treturn (((a * frac) + b) * frac + c) * frac + x1;\n\t}\n\n\t// First input will be downmixed to mono if number of channels is not 2\n\t// Outputs Stereo.\n\tprocess(inputs, outputs, parameters) {\n\t\tconst pd = ~~parameters.preDelay[0],\n\t\t\t// bw = parameters.bandwidth[0], // replaced by using damping\n\t\t\tfi = parameters.inputDiffusion1[0],\n\t\t\tsi = parameters.inputDiffusion2[0],\n\t\t\tdc = parameters.decay[0],\n\t\t\tft = parameters.decayDiffusion1[0],\n\t\t\tst = parameters.decayDiffusion2[0],\n\t\t\tdp = 1 - parameters.damping[0],\n\t\t\tex = parameters.excursionRate[0] / sampleRate,\n\t\t\ted = parameters.excursionDepth[0] * sampleRate / 1000,\n\t\t\twe = parameters.wet[0]; //* 0.6, // lo & ro both mult. by 0.6 anyways\n\t\t\t// dr = parameters.dry[0];\n\n\t\t// write to predelay and dry output\n\t\tif (inputs[0].length == 2) {\n\t\t\tfor (let i = 127; i >= 0; i--) {\n\t\t\t\tthis._preDelay[this._pDWrite + i] = (inputs[0][0][i] + inputs[0][1][i]) * 0.5;\n\n\t\t\t\t// removed the dry parameter, this is handled in the Tone Node\n\t\t\t\t// outputs[0][0][i] = inputs[0][0][i] * dr;\n\t\t\t\t// outputs[0][1][i] = inputs[0][1][i] * dr;\n\t\t\t}\n\t\t} else if (inputs[0].length > 0) {\n\t\t\tthis._preDelay.set(\n\t\t\t\tinputs[0][0],\n\t\t\t\tthis._pDWrite\n\t\t\t);\n\t\t\t// for (let i = 127; i >= 0; i--)\n\t\t\t// \toutputs[0][0][i] = outputs[0][1][i] = inputs[0][0][i] * dr;\n\t\t} else {\n\t\t\tthis._preDelay.set(\n\t\t\t\tnew Float32Array(128),\n\t\t\t\tthis._pDWrite\n\t\t\t);\n\t\t}\n\n\t\tlet i = 0 | 0;\n\t\twhile (i < 128) {\n\t\t\tlet lo = 0.0,\n\t\t\t\tro = 0.0;\n\n\t\t\t// input damping (formerly known as bandwidth bw, now uses dp)\n\t\t\tthis._lp1 += dp * (this._preDelay[(this._pDLength + this._pDWrite - pd + i) % this._pDLength] - this._lp1);\n\n\t\t\t// pre-tank\n\t\t\tlet pre = this.writeDelay(0, this._lp1 - fi * this.readDelay(0));\n\t\t\tpre = this.writeDelay(1, fi * (pre - this.readDelay(1)) + this.readDelay(0));\n\t\t\tpre = this.writeDelay(2, fi * pre + this.readDelay(1) - si * this.readDelay(2));\n\t\t\tpre = this.writeDelay(3, si * (pre - this.readDelay(3)) + this.readDelay(2));\n\n\t\t\tlet split = si * pre + this.readDelay(3);\n\n\t\t\t// excursions\n\t\t\t// could be optimized?\n\t\t\tlet exc = ed * (1 + Math.cos(this._excPhase * 6.2800));\n\t\t\tlet exc2 = ed * (1 + Math.sin(this._excPhase * 6.2847));\n\n\t\t\t// left loop\n\t\t\t// tank diffuse 1\n\t\t\tlet temp = this.writeDelay(4, split + dc * this.readDelay(11) + ft * this.readDelayCAt(4, exc));\n\t\t\t// long delay 1\n\t\t\tthis.writeDelay(5, this.readDelayCAt(4, exc) - ft * temp);\n\t\t\t// damp 1\n\t\t\tthis._lp2 += dp * (this.readDelay(5) - this._lp2);\n\t\t\ttemp = this.writeDelay(6, dc * this._lp2 - st * this.readDelay(6)); // tank diffuse 2\n\t\t\t// long delay 2\n\t\t\tthis.writeDelay(7, this.readDelay(6) + st * temp);\n\n\t\t\t// right loop \n\t\t\t// tank diffuse 3\n\t\t\ttemp = this.writeDelay(8, split + dc * this.readDelay(7) + ft * this.readDelayCAt(8, exc2));\n\t\t\t// long delay 3\n\t\t\tthis.writeDelay(9, this.readDelayCAt(8, exc2) - ft * temp);\n\t\t\t// damp 2\n\t\t\tthis._lp3 += dp * (this.readDelay(9) - this._lp3);\n\t\t\t// tank diffuse 4\n\t\t\ttemp = this.writeDelay(10, dc * this._lp3 - st * this.readDelay(10));\n\t\t\t// long delay 4\n\t\t\tthis.writeDelay(11, this.readDelay(10) + st * temp);\n\n\t\t\tlo = this.readDelayAt(9, this._taps[0]) +\n\t\t\t\tthis.readDelayAt(9, this._taps[1]) -\n\t\t\t\tthis.readDelayAt(10, this._taps[2]) +\n\t\t\t\tthis.readDelayAt(11, this._taps[3]) -\n\t\t\t\tthis.readDelayAt(5, this._taps[4]) -\n\t\t\t\tthis.readDelayAt(6, this._taps[5]) -\n\t\t\t\tthis.readDelayAt(7, this._taps[6]);\n\n\t\t\tro = this.readDelayAt(5, this._taps[7]) +\n\t\t\t\tthis.readDelayAt(5, this._taps[8]) -\n\t\t\t\tthis.readDelayAt(6, this._taps[9]) +\n\t\t\t\tthis.readDelayAt(7, this._taps[10]) -\n\t\t\t\tthis.readDelayAt(9, this._taps[11]) -\n\t\t\t\tthis.readDelayAt(10, this._taps[12]) -\n\t\t\t\tthis.readDelayAt(11, this._taps[13]);\n\n\t\t\toutputs[0][0][i] += lo * we;\n\t\t\toutputs[0][1][i] += ro * we;\n\n\t\t\tthis._excPhase += ex;\n\n\t\t\ti++;\n\n\t\t\tfor (let j = 0, d = this._Delays[0]; j < this._Delays.length; d = this._Delays[++j]) {\n\t\t\t\td[1] = (d[1] + 1) & d[3];\n\t\t\t\td[2] = (d[2] + 1) & d[3];\n\t\t\t}\n\t\t}\n\n\t\t// Update preDelay index\n\t\tthis._pDWrite = (this._pDWrite + 128) % this._pDLength;\n\n\t\treturn true;\n\t}\n}\nregisterProcessor('dattorro-reverb', DattorroReverb);\n";
|
|
18872
|
+
const fxExtensions = "\n// Various noise type processors for the MonoNoise source\n// Type 2 is Pink noise, used from Tone.Noise('pink') instead of calc\n//\nclass NoiseProcessor extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors(){\n\t\treturn [{\n\t\t\tname: 'type',\n\t\t\tdefaultValue: 5,\n\t\t\tminValue: 0,\n\t\t\tmaxValue: 5\n\t\t},{\n\t\t\tname: 'density',\n\t\t\tdefaultValue: 0.125,\n\t\t\tminValue: 0,\n\t\t\tmaxValue: 1\n\t\t}];\n\t}\n\t\n\tconstructor(){\n\t\tsuper();\n\t\t// sample previous value\n\t\tthis.prev = 0;\n\t\t// latch to a sample \n\t\tthis.latch = 0;\n\t\t// phasor ramp\n\t\tthis.phasor = 0;\n\t\tthis.delta = 0;\n\t}\n\n\tprocess(inputs, outputs, parameters){\n\t\t// input is not used because this is a source\n\t\tconst input = inputs[0];\n\t\tconst output = outputs[0];\n\t\tconst HALF_PI = Math.PI/2;\n\n\t\t// for one output channel generate some noise\t\n\t\tif (input.length > 0){\n\t\t\tfor (let i = 0; i < input[0].length; i++){\n\t\t\t\tconst t = (parameters.type.length > 1) ? parameters.type[i] : parameters.type[0];\n\t\t\t\tconst d = (parameters.density.length > 1) ? parameters.density[i] : parameters.density[0];\n\t\t\t\n\t\t\t\t// some bipolar white noise -1 to 1\n\t\t\t\tconst biNoise = Math.random() * 2 - 1;\n\t\t\t\t// empty output\n\t\t\t\tlet out = 0;\n\n\t\t\t\t// White noise, Use for every other choice\n\t\t\t\tif (t < 1){\n\t\t\t\t\tout = biNoise * 0.707;\n\t\t\t\t}\n\t\t\t\t// Pink noise, use Tone.Noise('pink') object for simplicity\n\t\t\t\telse if (t < 2){\n\t\t\t\t\tout = input[0][i] * 1.413;\n\t\t\t\t}\n\t\t\t\t// Brownian noise\n\t\t\t\t// calculate a random next value in \"step size\" and add to \n\t\t\t\t// the previous noise signal value creating a \"drunk walk\" \n\t\t\t\t// or brownian motion\n\t\t\t\telse if (t < 3){\t\t\n\t\t\t\t\tthis.prev += biNoise * d*d;\n\t\t\t\t\tthis.prev = Math.asin(Math.sin(this.prev * HALF_PI)) / HALF_PI;\n\t\t\t\t\tout = this.prev * 0.707;\n\t\t\t\t}\n\t\t\t\t// Lo-Fi (sampled) noise\n\t\t\t\t// creates random values at a specified frequency and slowly \n\t\t\t\t// ramps to that new value\n\t\t\t\telse if (t < 4){\n\t\t\t\t\t// create a ramp from 0-1 at specific frequency/density\n\t\t\t\t\tthis.phasor = (this.phasor + d * d * 0.5) % 1;\n\t\t\t\t\t// calculate the delta\n\t\t\t\t\tlet dlt = this.phasor - this.delta;\n\t\t\t\t\tthis.delta = this.phasor;\n\t\t\t\t\t// when ramp resets, latch a new noise value\n\t\t\t\t\tif (dlt < 0){\n\t\t\t\t\t\tthis.prev = this.latch;\n\t\t\t\t\t\tthis.latch = biNoise;\n\t\t\t\t\t}\n\t\t\t\t\t// linear interpolation from previous to next point\n\t\t\t\t\tout = this.prev + this.phasor * (this.latch - this.prev);\n\t\t\t\t\tout *= 0.707;\n\t\t\t\t}\n\t\t\t\t// Dust noise\n\t\t\t\t// randomly generate an impulse/click of value 1 depending \n\t\t\t\t// on the density, average amount of impulses per second\n\t\t\t\telse if (t < 5){\n\t\t\t\t\tout = Math.random() > (1 - d*d*d * 0.5);\n\t\t\t\t}\n\t\t\t\t// Crackle noise\n\t\t\t\t// Pink generator with \"wave-loss\" leaving gaps\n\t\t\t\telse {\n\t\t\t\t\tlet delta = input[0][i] - this.prev;\n\t\t\t\t\tthis.prev = input[0][i];\n\t\t\t\t\tif (delta > 0){\n\t\t\t\t\t\tthis.latch = Math.random();\n\t\t\t\t\t}\n\t\t\t\t\tout = (this.latch < (1 - d*d*d)) ? 0 : input[0][i] * 1.413;\n\t\t\t\t}\n\t\t\t\t// send to output whichever noise type was chosen\n\t\t\t\toutput[0][i] = out;\n\t\t\t}\n\t\t}\t\t\n\t\treturn true;\n\t}\n}\nregisterProcessor('noise-processor', NoiseProcessor);\n\n// A Downsampling Chiptune effect. Downsamples the signal by a specified amount\n// Resulting in a lower samplerate, making it sound more like 8bit/chiptune\n// Programmed with a custom AudioWorkletProcessor, see effects/Processors.js\n//\nclass DownSampleProcessor extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors() {\n\t\treturn [{\n\t\t\tname: 'down',\n\t\t\tdefaultValue: 8,\n\t\t\tminValue: 1,\n\t\t\tmaxValue: 2048\n\t\t}];\n\t}\n\n\tconstructor(){\n\t\tsuper();\n\t\t// the frame counter\n\t\tthis.count = 0;\n\t\t// sample and hold variable array\n\t\tthis.sah = [];\n\t}\n\n\tprocess(inputs, outputs, parameters){\n\t\tconst input = inputs[0];\n\t\tconst output = outputs[0];\n\n\t\t// if there is anything to process\n\t\tif (input.length > 0){\n\t\t\t// for the length of the sample array (generally 128)\n\t\t\tfor (let i=0; i<input[0].length; i++){\n\t\t\t\tconst d = (parameters.down.length > 1) ? parameters.down[i] : parameters.down[0];\n\t\t\t\t// for every channel\n\t\t\t\tfor (let channel=0; channel<input.length; ++channel){\n\t\t\t\t\t// if counter equals 0, sample and hold\n\t\t\t\t\tif (this.count % d === 0){\n\t\t\t\t\t\tthis.sah[channel] = input[channel][i];\n\t\t\t\t\t}\n\t\t\t\t\t// output the currently held sample\n\t\t\t\t\toutput[channel][i] = this.sah[channel];\n\t\t\t\t}\n\t\t\t\t// increment sample counter\n\t\t\t\tthis.count++;\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n}\nregisterProcessor('downsampler-processor', DownSampleProcessor);\n\n// A distortion algorithm using the tanh (hyperbolic-tangent) as a \n// waveshaping technique. Some mapping to apply a more equal loudness \n// distortion is applied on the overdrive parameter\n//\nclass TanhDistortionProcessor extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors(){\n\t\treturn [{\n\t\t\tname: 'amount',\n\t\t\tdefaultValue: 4,\n\t\t\tminValue: 1\n\t\t}, {\n\t\t\tname: 'makeup',\n\t\t\tdefaultValue: 0.5,\n\t\t\tminValue: 0,\n\t\t\tmaxValue: 2\n\t\t}]\n\t}\n\n\tconstructor(){\n\t\tsuper();\n\t}\n\n\tprocess(inputs, outputs, parameters){\n\t\tconst input = inputs[0];\n\t\tconst output = outputs[0];\n\n\t\tif (input.length > 0){\n\t\t\tfor (let channel=0; channel<input.length; ++channel){\n\t\t\t\tfor (let i=0; i<input[channel].length; i++){\n\t\t\t\t\tconst a = (parameters.amount.length > 1)? parameters.amount[i] : parameters.amount[0];\n\t\t\t\t\tconst m = (parameters.makeup.length > 1)? parameters.makeup[i] : parameters.makeup[0];\n\t\t\t\t\t// simple waveshaping with tanh\n\t\t\t\t\toutput[channel][i] = Math.tanh(input[channel][i] * a) * m;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n}\nregisterProcessor('tanh-distortion-processor', TanhDistortionProcessor);\n\n// A distortion algorithm using the arctan function as a \n// waveshaping technique. Some mapping to apply a more equal loudness \n// distortion is applied on the overdrive parameter\n//\nclass ArctanDistortionProcessor extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors(){\n\t\treturn [{\n\t\t\tname: 'amount',\n\t\t\tdefaultValue: 5,\n\t\t\tminValue: 1\n\t\t}]\n\t}\n\n\tconstructor(){\n\t\tsuper();\n\n\t\t// quarter pi constant and inverse\n\t\tthis.Q_PI = 0.7853981633974483; // 0.25 * Math.PI;\n\t\tthis.INVQ_PI = 1.2732395447351628; //1.0 / this.Q_PI;\n\t}\n\n\tprocess(inputs, outputs, parameters){\n\t\tconst input = inputs[0];\n\t\tconst output = outputs[0];\n\n\t\tconst gain = parameters.amount[0];\n\t\tconst makeup = Math.min(1, Math.max(0, 1 - ((Math.atan(gain) - this.Q_PI) * this.INVQ_PI * 0.823)));\n\n\t\tif (input.length > 0){\n\t\t\tfor (let channel=0; channel<input.length; channel++){\n\t\t\t\tfor (let i=0; i<input[channel].length; i++){\n\t\t\t\t\toutput[channel][i] = Math.atan(input[channel][i] * gain) * makeup;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n}\nregisterProcessor('arctan-distortion-processor', ArctanDistortionProcessor);\n\n\n// A fuzz distortion effect in modelled after the Big Muff Pi pedal \n// by Electro Harmonics. Using three stages of distortion: \n// 1 soft-clipping stage, 2 half-wave rectifier, 3 hard-clipping stage\n// Based on: https://github.com/hazza-music/EHX-Big-Muff-Pi-Emulation/blob/main/Technical%20Essay.pdf\n// \nclass FuzzProcessor extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors() {\n\t\treturn [{\n\t\t\tname: 'amount',\n\t\t\tdefaultValue: 5,\n\t\t\tminValue: 1\n\t\t}]\n\t}\n\n\tconstructor(){ \n\t\tsuper(); \n\t\t// history for onepole filter for dcblocking\n\t\tthis.history = [0, 0];\n\t}\n\n\tprocess(inputs, outputs, parameters){\n\t\tconst input = inputs[0];\n\t\tconst output = outputs[0];\n\n\t\tconst gain = parameters.amount[0];\n\t\tconst makeup = Math.max((1 - Math.pow((gain-1) / 63, 0.13)) * 0.395 + 0.605, 0.605);\n\n\t\tif (input.length > 0){\n\t\t\tfor (let channel = 0; channel < input.length; channel++){\n\t\t\t\tfor (let i = 0; i < input[channel].length; i++){\n\t\t\t\t\t// soft-clipping\n\t\t\t\t\tconst sc = Math.atan(input[channel][i] * gain * 2) * 0.6;\n\t\t\t\t\t// half-wave rectification and add for \n\t\t\t\t\t// asymmetric distortion\n\t\t\t\t\tconst hw = ((sc > 0) ? sc : 0) + input[channel][i];\n\t\t\t\t\t// hard-clipping\n\t\t\t\t\tconst hc = Math.max(-0.707, Math.min(0.707, hw));\n\t\t\t\t\t// onepole lowpass filter for dc-block\n\t\t\t\t\tthis.history[channel] = (hc - this.history[channel]) * 0.0015 + this.history[channel];\n\t\t\t\t\t// dc-block and gain compensation and output\n\t\t\t\t\toutput[channel][i] = (hc - this.history[channel]) * makeup;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n}\nregisterProcessor('fuzz-processor', FuzzProcessor);\n\n// A distortion/compression effect of an incoming signal\n// Based on an algorithm by Peter McCulloch\n// \nclass SquashProcessor extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors(){\n\t\treturn [{\n\t\t\tname: 'amount',\n\t\t\tdefaultValue: 4,\n\t\t\tminValue: 1,\n\t\t\tmaxValue: 1024\n\t\t}, {\n\t\t\tname: 'makeup',\n\t\t\tdefaultValue: 0.5,\n\t\t\tminValue: 0,\n\t\t\tmaxValue: 2\n\t\t}];\n\t}\n\n\tconstructor(){\n\t\tsuper();\n\t}\n\n\tprocess(inputs, outputs, parameters){\n\t\tconst input = inputs[0];\n\t\tconst output = outputs[0];\n\t\t\n\t\tif (input.length > 0){\n\t\t\tfor (let channel=0; channel<input.length; ++channel){\n\t\t\t\tfor (let i=0; i<input[channel].length; i++){\n\t\t\t\t\t// (s * a) / ((s * a)^2 * 0.28 + 1) / √a\n\t\t\t\t\t// drive amount, minimum of 1\n\t\t\t\t\tconst a = (parameters.amount.length > 1)? parameters.amount[i] : parameters.amount[0];\n\t\t\t\t\t// makeup gain\n\t\t\t\t\tconst m = (parameters.makeup.length > 1)? parameters.makeup[i] : parameters.makeup[0];\n\t\t\t\t\t// set the waveshaper effect\n\t\t\t\t\tconst s = input[channel][i];\n\t\t\t\t\tconst x = s * a * 1.412;\n\t\t\t\t\toutput[channel][i] = (x / (x * x * 0.28 + 1.0)) * m * 0.708;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n}\nregisterProcessor('squash-processor', SquashProcessor);\n\n// Dattorro Reverberator\n// Thanks to port by khoin, taken from:\n// https://github.com/khoin/DattorroReverbNode\n// based on the paper from Jon Dattorro:\n// https://ccrma.stanford.edu/~dattorro/EffectDesignPart1.pdf\n// with small modifications to work in Mercury\n//\n// In jurisdictions that recognize copyright laws, this software is to\n// be released into the public domain.\n\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND.\n// THE AUTHOR(S) SHALL NOT BE LIABLE FOR ANYTHING, ARISING FROM, OR IN\n// CONNECTION WITH THE SOFTWARE OR THE DISTRIBUTION OF THE SOFTWARE.\n// \nclass DattorroReverb extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors() {\n\t\treturn [\n\t\t\t[\"preDelay\", 0, 0, sampleRate - 1, \"k-rate\"],\n\t\t\t// [\"bandwidth\", 0.9999, 0, 1, \"k-rate\"],\t\n\t\t\t[\"inputDiffusion1\", 0.75, 0, 1, \"k-rate\"],\n\t\t\t[\"inputDiffusion2\", 0.625, 0, 1, \"k-rate\"],\n\t\t\t[\"decay\", 0.5, 0, 1, \"k-rate\"],\n\t\t\t[\"decayDiffusion1\", 0.7, 0, 0.999999, \"k-rate\"],\n\t\t\t[\"decayDiffusion2\", 0.5, 0, 0.999999, \"k-rate\"],\n\t\t\t[\"damping\", 0.005, 0, 1, \"k-rate\"],\n\t\t\t[\"excursionRate\", 0.5, 0, 2, \"k-rate\"],\n\t\t\t[\"excursionDepth\", 0.7, 0, 2, \"k-rate\"],\n\t\t\t[\"wet\", 0.7, 0, 2, \"k-rate\"],\n\t\t\t// [\"dry\", 0.7, 0, 2, \"k-rate\"]\n\t\t].map(x => new Object({\n\t\t\tname: x[0],\n\t\t\tdefaultValue: x[1],\n\t\t\tminValue: x[2],\n\t\t\tmaxValue: x[3],\n\t\t\tautomationRate: x[4]\n\t\t}));\n\t}\n\n\tconstructor(options) {\n\t\tsuper(options);\n\n\t\tthis._Delays = [];\n\t\t// Pre-delay is always one-second long, rounded to the nearest 128-chunk\n\t\tthis._pDLength = sampleRate + (128 - sampleRate % 128);\n\t\tthis._preDelay = new Float32Array(this._pDLength);\n\t\tthis._pDWrite = 0;\n\t\tthis._lp1 = 0.0;\n\t\tthis._lp2 = 0.0;\n\t\tthis._lp3 = 0.0;\n\t\tthis._excPhase = 0.0;\n\n\t\t[\n\t\t\t0.004771345, 0.003595309, 0.012734787, 0.009307483, // pre-tank\n\t\t\t0.022579886, 0.149625349, 0.060481839, 0.1249958, // left-loop\n\t\t\t0.030509727, 0.141695508, 0.089244313, 0.106280031 // right-loop\n\t\t].forEach(x => this.makeDelay(x));\n\n\t\tthis._taps = Int16Array.from([\n\t\t\t0.008937872, 0.099929438, 0.064278754, 0.067067639, \n\t\t\t0.066866033, 0.006283391, 0.035818689, // left-output\n\t\t\t0.011861161, 0.121870905, 0.041262054, 0.08981553, \n\t\t\t0.070931756, 0.011256342, 0.004065724 // right-output\n\t\t], x => Math.round(x * sampleRate));\n\t}\n\n\tmakeDelay(length) {\n\t\t// len, array, write, read, mask\n\t\tlet len = Math.round(length * sampleRate);\n\t\tlet nextPow2 = 2 ** Math.ceil(Math.log2((len)));\n\t\tthis._Delays.push([\n\t\t\tnew Float32Array(nextPow2), len - 1, 0 | 0, nextPow2 - 1\n\t\t]);\n\t}\n\n\twriteDelay(index, data) {\n\t\treturn this._Delays[index][0][this._Delays[index][1]] = data;\n\t}\n\n\treadDelay(index) {\n\t\treturn this._Delays[index][0][this._Delays[index][2]];\n\t}\n\n\treadDelayAt(index, i) {\n\t\tlet d = this._Delays[index];\n\t\treturn d[0][(d[2] + i) & d[3]];\n\t}\n\n\t// cubic interpolation\n\t// O. Niemitalo: \n\t// https://www.musicdsp.org/en/latest/Other/49-cubic-interpollation.html\n\treadDelayCAt(index, i) {\n\t\tlet d = this._Delays[index],\n\t\t\tfrac = i - ~~i,\n\t\t\tint = ~~i + d[2] - 1,\n\t\t\tmask = d[3];\n\n\t\tlet x0 = d[0][int++ & mask],\n\t\t\tx1 = d[0][int++ & mask],\n\t\t\tx2 = d[0][int++ & mask],\n\t\t\tx3 = d[0][int & mask];\n\n\t\tlet a = (3 * (x1 - x2) - x0 + x3) / 2,\n\t\t\tb = 2 * x2 + x0 - (5 * x1 + x3) / 2,\n\t\t\tc = (x2 - x0) / 2;\n\n\t\treturn (((a * frac) + b) * frac + c) * frac + x1;\n\t}\n\n\t// First input will be downmixed to mono if number of channels is not 2\n\t// Outputs Stereo.\n\tprocess(inputs, outputs, parameters) {\n\t\tconst pd = ~~parameters.preDelay[0],\n\t\t\t// bw = parameters.bandwidth[0], // replaced by using damping\n\t\t\tfi = parameters.inputDiffusion1[0],\n\t\t\tsi = parameters.inputDiffusion2[0],\n\t\t\tdc = parameters.decay[0],\n\t\t\tft = parameters.decayDiffusion1[0],\n\t\t\tst = parameters.decayDiffusion2[0],\n\t\t\tdp = 1 - parameters.damping[0],\n\t\t\tex = parameters.excursionRate[0] / sampleRate,\n\t\t\ted = parameters.excursionDepth[0] * sampleRate / 1000,\n\t\t\twe = parameters.wet[0]; //* 0.6, // lo & ro both mult. by 0.6 anyways\n\t\t\t// dr = parameters.dry[0];\n\n\t\t// write to predelay and dry output\n\t\tif (inputs[0].length == 2) {\n\t\t\tfor (let i = 127; i >= 0; i--) {\n\t\t\t\tthis._preDelay[this._pDWrite + i] = (inputs[0][0][i] + inputs[0][1][i]) * 0.5;\n\n\t\t\t\t// removed the dry parameter, this is handled in the Tone Node\n\t\t\t\t// outputs[0][0][i] = inputs[0][0][i] * dr;\n\t\t\t\t// outputs[0][1][i] = inputs[0][1][i] * dr;\n\t\t\t}\n\t\t} else if (inputs[0].length > 0) {\n\t\t\tthis._preDelay.set(\n\t\t\t\tinputs[0][0],\n\t\t\t\tthis._pDWrite\n\t\t\t);\n\t\t\t// for (let i = 127; i >= 0; i--)\n\t\t\t// \toutputs[0][0][i] = outputs[0][1][i] = inputs[0][0][i] * dr;\n\t\t} else {\n\t\t\tthis._preDelay.set(\n\t\t\t\tnew Float32Array(128),\n\t\t\t\tthis._pDWrite\n\t\t\t);\n\t\t}\n\n\t\tlet i = 0 | 0;\n\t\twhile (i < 128) {\n\t\t\tlet lo = 0.0,\n\t\t\t\tro = 0.0;\n\n\t\t\t// input damping (formerly known as bandwidth bw, now uses dp)\n\t\t\tthis._lp1 += dp * (this._preDelay[(this._pDLength + this._pDWrite - pd + i) % this._pDLength] - this._lp1);\n\n\t\t\t// pre-tank\n\t\t\tlet pre = this.writeDelay(0, this._lp1 - fi * this.readDelay(0));\n\t\t\tpre = this.writeDelay(1, fi * (pre - this.readDelay(1)) + this.readDelay(0));\n\t\t\tpre = this.writeDelay(2, fi * pre + this.readDelay(1) - si * this.readDelay(2));\n\t\t\tpre = this.writeDelay(3, si * (pre - this.readDelay(3)) + this.readDelay(2));\n\n\t\t\tlet split = si * pre + this.readDelay(3);\n\n\t\t\t// excursions\n\t\t\t// could be optimized?\n\t\t\tlet exc = ed * (1 + Math.cos(this._excPhase * 6.2800));\n\t\t\tlet exc2 = ed * (1 + Math.sin(this._excPhase * 6.2847));\n\n\t\t\t// left loop\n\t\t\t// tank diffuse 1\n\t\t\tlet temp = this.writeDelay(4, split + dc * this.readDelay(11) + ft * this.readDelayCAt(4, exc));\n\t\t\t// long delay 1\n\t\t\tthis.writeDelay(5, this.readDelayCAt(4, exc) - ft * temp);\n\t\t\t// damp 1\n\t\t\tthis._lp2 += dp * (this.readDelay(5) - this._lp2);\n\t\t\ttemp = this.writeDelay(6, dc * this._lp2 - st * this.readDelay(6)); // tank diffuse 2\n\t\t\t// long delay 2\n\t\t\tthis.writeDelay(7, this.readDelay(6) + st * temp);\n\n\t\t\t// right loop \n\t\t\t// tank diffuse 3\n\t\t\ttemp = this.writeDelay(8, split + dc * this.readDelay(7) + ft * this.readDelayCAt(8, exc2));\n\t\t\t// long delay 3\n\t\t\tthis.writeDelay(9, this.readDelayCAt(8, exc2) - ft * temp);\n\t\t\t// damp 2\n\t\t\tthis._lp3 += dp * (this.readDelay(9) - this._lp3);\n\t\t\t// tank diffuse 4\n\t\t\ttemp = this.writeDelay(10, dc * this._lp3 - st * this.readDelay(10));\n\t\t\t// long delay 4\n\t\t\tthis.writeDelay(11, this.readDelay(10) + st * temp);\n\n\t\t\tlo = this.readDelayAt(9, this._taps[0]) +\n\t\t\t\tthis.readDelayAt(9, this._taps[1]) -\n\t\t\t\tthis.readDelayAt(10, this._taps[2]) +\n\t\t\t\tthis.readDelayAt(11, this._taps[3]) -\n\t\t\t\tthis.readDelayAt(5, this._taps[4]) -\n\t\t\t\tthis.readDelayAt(6, this._taps[5]) -\n\t\t\t\tthis.readDelayAt(7, this._taps[6]);\n\n\t\t\tro = this.readDelayAt(5, this._taps[7]) +\n\t\t\t\tthis.readDelayAt(5, this._taps[8]) -\n\t\t\t\tthis.readDelayAt(6, this._taps[9]) +\n\t\t\t\tthis.readDelayAt(7, this._taps[10]) -\n\t\t\t\tthis.readDelayAt(9, this._taps[11]) -\n\t\t\t\tthis.readDelayAt(10, this._taps[12]) -\n\t\t\t\tthis.readDelayAt(11, this._taps[13]);\n\n\t\t\toutputs[0][0][i] += lo * we;\n\t\t\toutputs[0][1][i] += ro * we;\n\n\t\t\tthis._excPhase += ex;\n\n\t\t\ti++;\n\n\t\t\tfor (let j = 0, d = this._Delays[0]; j < this._Delays.length; d = this._Delays[++j]) {\n\t\t\t\td[1] = (d[1] + 1) & d[3];\n\t\t\t\td[2] = (d[2] + 1) & d[3];\n\t\t\t}\n\t\t}\n\n\t\t// Update preDelay index\n\t\tthis._pDWrite = (this._pDWrite + 128) % this._pDLength;\n\n\t\treturn true;\n\t}\n}\nregisterProcessor('dattorro-reverb', DattorroReverb);\n";
|
|
18713
18873
|
Tone.getContext().addAudioWorkletModule(URL.createObjectURL(new Blob([ fxExtensions ], { type: 'text/javascript' })));
|
|
18714
18874
|
|
|
18715
18875
|
// Mercury main class controls Tone and loads samples
|
|
@@ -19050,5 +19210,5 @@ class Mercury extends MercuryInterpreter {
|
|
|
19050
19210
|
// }
|
|
19051
19211
|
}
|
|
19052
19212
|
module.exports = { Mercury };
|
|
19053
|
-
},{"./core/Util.js":
|
|
19213
|
+
},{"./core/Util.js":67,"./interpreter":69,"tone":44,"webmidi":55}]},{},[70])(70)
|
|
19054
19214
|
});
|