mercury-engine 1.5.0 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/mercury.js +270 -48
- package/dist/mercury.min.es5.js +2 -2
- package/dist/mercury.min.js +1 -1
- package/examples/interface/index.html +8 -3
- package/package.json +1 -1
package/dist/mercury.js
CHANGED
|
@@ -15457,17 +15457,20 @@ const TL = require('total-serialism').Translate;
|
|
|
15457
15457
|
// all the available effects
|
|
15458
15458
|
const fxMap = {
|
|
15459
15459
|
'drive' : (params) => {
|
|
15460
|
-
return new
|
|
15460
|
+
return new Overdrive(params);
|
|
15461
15461
|
},
|
|
15462
15462
|
'distort' : (params) => {
|
|
15463
|
-
return new
|
|
15463
|
+
return new Overdrive(params);
|
|
15464
15464
|
},
|
|
15465
15465
|
'overdrive' : (params) => {
|
|
15466
|
-
return new
|
|
15466
|
+
return new Overdrive(params);
|
|
15467
15467
|
},
|
|
15468
15468
|
'squash' : (params) => {
|
|
15469
15469
|
return new Squash(params);
|
|
15470
15470
|
},
|
|
15471
|
+
'fuzz' : (params) => {
|
|
15472
|
+
return new Fuzz(params);
|
|
15473
|
+
},
|
|
15471
15474
|
'compress' : (params) => {
|
|
15472
15475
|
return new Compressor(params);
|
|
15473
15476
|
},
|
|
@@ -15477,6 +15480,12 @@ const fxMap = {
|
|
|
15477
15480
|
'comp' : (params) => {
|
|
15478
15481
|
return new Compressor(params);
|
|
15479
15482
|
},
|
|
15483
|
+
'comb' : (params) => {
|
|
15484
|
+
return new CombFilter(params);
|
|
15485
|
+
},
|
|
15486
|
+
'karplus' : (params) => {
|
|
15487
|
+
return new CombFilter(params);
|
|
15488
|
+
},
|
|
15480
15489
|
'lfo' : (params) => {
|
|
15481
15490
|
return new LFO(params);
|
|
15482
15491
|
},
|
|
@@ -15558,6 +15567,71 @@ const fxMap = {
|
|
|
15558
15567
|
}
|
|
15559
15568
|
module.exports = fxMap;
|
|
15560
15569
|
|
|
15570
|
+
// Dispose a array of nodes
|
|
15571
|
+
//
|
|
15572
|
+
function disposeNodes(nodes=[]) {
|
|
15573
|
+
nodes.forEach((n) => {
|
|
15574
|
+
n?.disconnect();
|
|
15575
|
+
n?.dispose();
|
|
15576
|
+
});
|
|
15577
|
+
}
|
|
15578
|
+
|
|
15579
|
+
// A Lowpass Feedback CombFiltering effect
|
|
15580
|
+
// Adds a short feedback delay to the sound based on a specific note
|
|
15581
|
+
// resulting in a tonal output, like the resonating sound of a string
|
|
15582
|
+
// sometimes also called Karplus Strong String Synthesis.
|
|
15583
|
+
// Negative feedback is possible for generating odd harmonics
|
|
15584
|
+
//
|
|
15585
|
+
const CombFilter = function(_params) {
|
|
15586
|
+
// the default parameters
|
|
15587
|
+
_params = Util.mapDefaults(_params, [0, 0.8, 0.5, 0.5]);
|
|
15588
|
+
this._pitch = Util.toArray(_params[0]);
|
|
15589
|
+
this._fback = Util.toArray(_params[1]);
|
|
15590
|
+
this._damp = Util.toArray(_params[2]);
|
|
15591
|
+
this._wet = Util.toArray(_params[3]);
|
|
15592
|
+
|
|
15593
|
+
// ToneAudioNode has all the tone effect parameters
|
|
15594
|
+
this._fx = new Tone.ToneAudioNode();
|
|
15595
|
+
|
|
15596
|
+
// A gain node for connecting with input and output
|
|
15597
|
+
this._fx.input = new Tone.Gain(1);
|
|
15598
|
+
this._fx.output = new Tone.Gain(1);
|
|
15599
|
+
// the fx processor
|
|
15600
|
+
this._fx.workletNode = Tone.getContext().createAudioWorkletNode('combfilter-processor');
|
|
15601
|
+
// connect input, fx and output
|
|
15602
|
+
this._fx.input.chain(this._fx.workletNode, this._fx.output);
|
|
15603
|
+
|
|
15604
|
+
this.set = (count, time, bpm) => {
|
|
15605
|
+
const pitch = Util.toMidi(Util.getParam(this._pitch, count));
|
|
15606
|
+
const _dt = 1000 / Util.mtof(pitch);
|
|
15607
|
+
|
|
15608
|
+
// some mapping for the feedback to make it logarithmic in length
|
|
15609
|
+
let _fb = Util.getParam(this._fback, count);
|
|
15610
|
+
let sign = _fb < 0 ? -1 : 1;
|
|
15611
|
+
_fb = Util.clip(Math.pow(Math.abs(_fb), 0.1) * sign, -0.999, 0.999);
|
|
15612
|
+
|
|
15613
|
+
const _dm = Util.clip(Util.getParam(this._damp, count));
|
|
15614
|
+
const _dw = Util.clip(Util.getParam(this._wet, count));
|
|
15615
|
+
|
|
15616
|
+
// get parameters from workletprocessor
|
|
15617
|
+
const dt = this._fx.workletNode.parameters.get('time');
|
|
15618
|
+
dt.setValueAtTime(_dt, time);
|
|
15619
|
+
const fb = this._fx.workletNode.parameters.get('feedback');
|
|
15620
|
+
fb.setValueAtTime(_fb, time);
|
|
15621
|
+
const dm = this._fx.workletNode.parameters.get('damping');
|
|
15622
|
+
dm.setValueAtTime(_dm, time);
|
|
15623
|
+
const dw = this._fx.workletNode.parameters.get('drywet');
|
|
15624
|
+
dw.setValueAtTime(_dw, time);
|
|
15625
|
+
}
|
|
15626
|
+
|
|
15627
|
+
this.chain = () => {
|
|
15628
|
+
return { 'send' : this._fx, 'return' : this._fx }
|
|
15629
|
+
}
|
|
15630
|
+
|
|
15631
|
+
this.delete = () => {
|
|
15632
|
+
disposeNodes([this._fx.input, this._fx.output, this._fx]);
|
|
15633
|
+
}
|
|
15634
|
+
}
|
|
15561
15635
|
|
|
15562
15636
|
// A formant/vowel filter. With this filter you can imitate the vowels of human
|
|
15563
15637
|
// speech.
|
|
@@ -15714,11 +15788,11 @@ const DownSampler = function(_params){
|
|
|
15714
15788
|
}
|
|
15715
15789
|
}
|
|
15716
15790
|
|
|
15717
|
-
//
|
|
15791
|
+
// An overdrive/saturation algorithm using the arctan function as a
|
|
15718
15792
|
// waveshaping technique. Some mapping to apply a more equal loudness
|
|
15719
|
-
//
|
|
15793
|
+
// on the overdrive parameter when increasing the amount
|
|
15720
15794
|
//
|
|
15721
|
-
const
|
|
15795
|
+
const Overdrive = function(_params){
|
|
15722
15796
|
_params = Util.mapDefaults(_params, [ 2, 1 ]);
|
|
15723
15797
|
// apply the default values and convert to arrays where necessary
|
|
15724
15798
|
this._drive = Util.toArray(_params[0]);
|
|
@@ -15737,7 +15811,7 @@ const TanhDistortion = function(_params){
|
|
|
15737
15811
|
this._fx.output = new Tone.Gain(1).connect(this._mixWet);
|
|
15738
15812
|
|
|
15739
15813
|
// the fx processor
|
|
15740
|
-
this._fx.workletNode = Tone.getContext().createAudioWorkletNode('
|
|
15814
|
+
this._fx.workletNode = Tone.getContext().createAudioWorkletNode('arctan-distortion-processor');
|
|
15741
15815
|
|
|
15742
15816
|
// connect input, fx, output and wetdry
|
|
15743
15817
|
this._fx.input.chain(this._fx.workletNode, this._fx.output);
|
|
@@ -15746,17 +15820,9 @@ const TanhDistortion = function(_params){
|
|
|
15746
15820
|
// drive amount, minimum drive of 1
|
|
15747
15821
|
const d = Util.assureNum(Math.max(0, Util.getParam(this._drive, c)) + 1);
|
|
15748
15822
|
|
|
15749
|
-
// preamp gain reduction for linear at drive = 1
|
|
15750
|
-
const p = 0.8;
|
|
15751
|
-
// makeup gain
|
|
15752
|
-
const m = 1.0 / (p * (d ** 1.1));
|
|
15753
|
-
|
|
15754
15823
|
// set the parameters in the workletNode
|
|
15755
15824
|
const amount = this._fx.workletNode.parameters.get('amount');
|
|
15756
|
-
amount.setValueAtTime(
|
|
15757
|
-
|
|
15758
|
-
const makeup = this._fx.workletNode.parameters.get('makeup');
|
|
15759
|
-
makeup.setValueAtTime(m, time);
|
|
15825
|
+
amount.setValueAtTime(d, time);
|
|
15760
15826
|
|
|
15761
15827
|
const wet = Util.clip(Util.getParam(this._wet, c), 0, 1);
|
|
15762
15828
|
this._mixWet.gain.setValueAtTime(wet);
|
|
@@ -15768,12 +15834,57 @@ const TanhDistortion = function(_params){
|
|
|
15768
15834
|
}
|
|
15769
15835
|
|
|
15770
15836
|
this.delete = function(){
|
|
15771
|
-
|
|
15837
|
+
disposeNodes([ this._fx, this._fx.input, this._fx.output, this._mix, this._mixDry, this._mixWet ]);
|
|
15838
|
+
}
|
|
15839
|
+
}
|
|
15772
15840
|
|
|
15773
|
-
|
|
15774
|
-
|
|
15775
|
-
|
|
15776
|
-
|
|
15841
|
+
// A fuzz distortion effect in modelled after the Big Muff Pi pedal
|
|
15842
|
+
// by Electro Harmonics. Using three stages of distortion:
|
|
15843
|
+
// 1 soft-clipping stage, 2 half-wave rectifier, 3 hard-clipping stage
|
|
15844
|
+
//
|
|
15845
|
+
const Fuzz = function(_params){
|
|
15846
|
+
_params = Util.mapDefaults(_params, [ 10, 1 ]);
|
|
15847
|
+
// apply the default values and convert to arrays where necessary
|
|
15848
|
+
this._drive = Util.toArray(_params[0]);
|
|
15849
|
+
this._wet = Util.toArray(_params[1]);
|
|
15850
|
+
|
|
15851
|
+
// The crossfader for wet-dry (originally implemented with CrossFade)
|
|
15852
|
+
// this._mix = new Tone.CrossFade();
|
|
15853
|
+
this._mix = new Tone.Add();
|
|
15854
|
+
this._mixWet = new Tone.Gain(0).connect(this._mix.input);
|
|
15855
|
+
this._mixDry = new Tone.Gain(1).connect(this._mix.addend);
|
|
15856
|
+
|
|
15857
|
+
// ToneAudioNode has all the tone effect parameters
|
|
15858
|
+
this._fx = new Tone.ToneAudioNode();
|
|
15859
|
+
// A gain node for connecting with input and output
|
|
15860
|
+
this._fx.input = new Tone.Gain(1).connect(this._mixDry);
|
|
15861
|
+
this._fx.output = new Tone.Gain(1).connect(this._mixWet);
|
|
15862
|
+
|
|
15863
|
+
// the fx processor
|
|
15864
|
+
this._fx.workletNode = Tone.getContext().createAudioWorkletNode('fuzz-processor');
|
|
15865
|
+
|
|
15866
|
+
// connect input, fx, output to wetdry
|
|
15867
|
+
this._fx.input.chain(this._fx.workletNode, this._fx.output);
|
|
15868
|
+
|
|
15869
|
+
this.set = function(c, time, bpm){
|
|
15870
|
+
// drive amount, minimum drive of 1
|
|
15871
|
+
const d = Util.assureNum(Math.max(1, Util.getParam(this._drive, c)) + 1);
|
|
15872
|
+
|
|
15873
|
+
// set the parameters in the workletNode
|
|
15874
|
+
const amount = this._fx.workletNode.parameters.get('amount');
|
|
15875
|
+
amount.setValueAtTime(d, time);
|
|
15876
|
+
|
|
15877
|
+
const wet = Util.clip(Util.getParam(this._wet, c), 0, 1);
|
|
15878
|
+
this._mixWet.gain.setValueAtTime(wet);
|
|
15879
|
+
this._mixDry.gain.setValueAtTime(1 - wet);
|
|
15880
|
+
}
|
|
15881
|
+
|
|
15882
|
+
this.chain = function(){
|
|
15883
|
+
return { 'send' : this._fx, 'return' : this._mix }
|
|
15884
|
+
}
|
|
15885
|
+
|
|
15886
|
+
this.delete = function(){
|
|
15887
|
+
disposeNodes([ this._fx, this._fx.input, this._fx.output, this._mix, this._mixDry, this._mixWet ]);
|
|
15777
15888
|
}
|
|
15778
15889
|
}
|
|
15779
15890
|
|
|
@@ -15991,8 +16102,7 @@ const DattorroReverb = function(_params){
|
|
|
15991
16102
|
}
|
|
15992
16103
|
|
|
15993
16104
|
this.delete = () => {
|
|
15994
|
-
|
|
15995
|
-
nodes.forEach(n => { n.disconnect(); n.dispose() });
|
|
16105
|
+
disposeNodes([ this._fx, this._mix, this._mixDry, this._mixWet, this._fx.input, this._fx.output ]);
|
|
15996
16106
|
}
|
|
15997
16107
|
}
|
|
15998
16108
|
|
|
@@ -16572,7 +16682,7 @@ const Delay = function(_params){
|
|
|
16572
16682
|
// this._fx.dispose();
|
|
16573
16683
|
// }
|
|
16574
16684
|
// }
|
|
16575
|
-
},{"./Util.js":
|
|
16685
|
+
},{"./Util.js":67,"tone":44,"total-serialism":47}],57:[function(require,module,exports){
|
|
16576
16686
|
const Tone = require('tone');
|
|
16577
16687
|
const Util = require('./Util.js');
|
|
16578
16688
|
const fxMap = require('./Effects.js');
|
|
@@ -16595,6 +16705,7 @@ class Instrument extends Sequencer {
|
|
|
16595
16705
|
this.adsr;
|
|
16596
16706
|
this.panner;
|
|
16597
16707
|
this.gain;
|
|
16708
|
+
this.post;
|
|
16598
16709
|
this._fx;
|
|
16599
16710
|
|
|
16600
16711
|
// The source to be defined by inheriting class
|
|
@@ -16606,8 +16717,10 @@ class Instrument extends Sequencer {
|
|
|
16606
16717
|
channelStrip(){
|
|
16607
16718
|
// gain => output
|
|
16608
16719
|
this.gain = new Tone.Gain(0, "normalRange").toDestination();
|
|
16720
|
+
// postfx-gain => gain (for gain() function in instrument)
|
|
16721
|
+
this.post = new Tone.Gain(1, "gain").connect(this.gain);
|
|
16609
16722
|
// panning => gain
|
|
16610
|
-
this.panner = new Tone.Panner(0).connect(this.
|
|
16723
|
+
this.panner = new Tone.Panner(0).connect(this.post);
|
|
16611
16724
|
// adsr => panning
|
|
16612
16725
|
this.adsr = this.envelope(this.panner);
|
|
16613
16726
|
// return Node to connect source => adsr
|
|
@@ -16646,9 +16759,12 @@ class Instrument extends Sequencer {
|
|
|
16646
16759
|
this.panner.pan.setValueAtTime(p, time);
|
|
16647
16760
|
|
|
16648
16761
|
// ramp volume
|
|
16649
|
-
let g = Util.atodb(Util.getParam(this._gain[0], c) * 0.707);
|
|
16762
|
+
// let g = Util.atodb(Util.getParam(this._gain[0], c) * 0.707);
|
|
16763
|
+
let g = Util.getParam(this._gain[0], c) * 0.707;
|
|
16650
16764
|
let r = Util.msToS(Math.max(0, Util.getParam(this._gain[1], c)));
|
|
16651
|
-
this.source.volume.rampTo(g, r, time);
|
|
16765
|
+
// this.source.volume.rampTo(g, r, time);
|
|
16766
|
+
this.source.volume.setValueAtTime(1, time);
|
|
16767
|
+
this.post.gain.rampTo(g, r, time);
|
|
16652
16768
|
|
|
16653
16769
|
this.sourceEvent(c, e, time);
|
|
16654
16770
|
|
|
@@ -16729,6 +16845,9 @@ class Instrument extends Sequencer {
|
|
|
16729
16845
|
this.gain.disconnect();
|
|
16730
16846
|
this.gain.dispose();
|
|
16731
16847
|
|
|
16848
|
+
this.post.disconnect();
|
|
16849
|
+
this.post.dispose();
|
|
16850
|
+
|
|
16732
16851
|
this.panner.disconnect();
|
|
16733
16852
|
this.panner.dispose();
|
|
16734
16853
|
|
|
@@ -16739,7 +16858,7 @@ class Instrument extends Sequencer {
|
|
|
16739
16858
|
this.source?.stop();
|
|
16740
16859
|
this.source?.disconnect();
|
|
16741
16860
|
this.source?.dispose();
|
|
16742
|
-
|
|
16861
|
+
|
|
16743
16862
|
// remove all fx
|
|
16744
16863
|
this._fx.map((f) => f.delete());
|
|
16745
16864
|
console.log('=> disposed Instrument() with FX:', this._fx);
|
|
@@ -16789,7 +16908,7 @@ class Instrument extends Sequencer {
|
|
|
16789
16908
|
add_fx(...fx){
|
|
16790
16909
|
// the effects chain for the sound
|
|
16791
16910
|
this._fx = [];
|
|
16792
|
-
|
|
16911
|
+
|
|
16793
16912
|
fx.forEach((f) => {
|
|
16794
16913
|
if (fxMap[f[0]]){
|
|
16795
16914
|
let tmpF = fxMap[f[0]](f.slice(1));
|
|
@@ -16811,19 +16930,20 @@ class Instrument extends Sequencer {
|
|
|
16811
16930
|
// allowing to chain multiple effects within one process
|
|
16812
16931
|
let pfx = this._ch[0];
|
|
16813
16932
|
this.panner.connect(pfx.send);
|
|
16814
|
-
for (let f=1; f<this._ch.length; f++){
|
|
16933
|
+
for (let f = 1; f < this._ch.length; f++){
|
|
16815
16934
|
if (pfx){
|
|
16816
16935
|
pfx.return.connect(this._ch[f].send);
|
|
16817
16936
|
}
|
|
16818
16937
|
pfx = this._ch[f];
|
|
16819
16938
|
}
|
|
16820
16939
|
// pfx.return.connect(Tone.Destination);
|
|
16821
|
-
pfx.return.connect(this.gain);
|
|
16940
|
+
// pfx.return.connect(this.gain);
|
|
16941
|
+
pfx.return.connect(this.post);
|
|
16822
16942
|
}
|
|
16823
16943
|
}
|
|
16824
16944
|
}
|
|
16825
16945
|
module.exports = Instrument;
|
|
16826
|
-
},{"./Effects.js":56,"./Sequencer.js":
|
|
16946
|
+
},{"./Effects.js":56,"./Sequencer.js":66,"./Util.js":67,"tone":44}],58:[function(require,module,exports){
|
|
16827
16947
|
const Tone = require('tone');
|
|
16828
16948
|
const Instrument = require('./Instrument.js');
|
|
16829
16949
|
const Util = require('./Util.js');
|
|
@@ -16875,7 +16995,7 @@ class MonoInput extends Instrument {
|
|
|
16875
16995
|
}
|
|
16876
16996
|
}
|
|
16877
16997
|
module.exports = MonoInput;
|
|
16878
|
-
},{"./Instrument.js":57,"./Util.js":
|
|
16998
|
+
},{"./Instrument.js":57,"./Util.js":67,"tone":44}],59:[function(require,module,exports){
|
|
16879
16999
|
const Tone = require('tone');
|
|
16880
17000
|
const Util = require('./Util.js');
|
|
16881
17001
|
const Sequencer = require('./Sequencer.js');
|
|
@@ -17056,7 +17176,101 @@ class MonoMidi extends Sequencer {
|
|
|
17056
17176
|
}
|
|
17057
17177
|
}
|
|
17058
17178
|
module.exports = MonoMidi;
|
|
17059
|
-
},{"./Sequencer.js":
|
|
17179
|
+
},{"./Sequencer.js":66,"./Util.js":67,"tone":44,"webmidi":55}],60:[function(require,module,exports){
|
|
17180
|
+
const Tone = require('tone');
|
|
17181
|
+
const Instrument = require('./Instrument.js');
|
|
17182
|
+
const { toArray, getParam, clip, log } = require('./Util.js');
|
|
17183
|
+
|
|
17184
|
+
class MonoNoise extends Instrument {
|
|
17185
|
+
constructor(engine, t='white', canvas){
|
|
17186
|
+
// Inherit from Instrument
|
|
17187
|
+
super(engine, canvas);
|
|
17188
|
+
|
|
17189
|
+
// synth specific variables;
|
|
17190
|
+
this._type = toArray(t);
|
|
17191
|
+
this._typeMap = {
|
|
17192
|
+
'white' : 0,
|
|
17193
|
+
'pink' : 1,
|
|
17194
|
+
'brownian' : 2,
|
|
17195
|
+
'brown' : 2,
|
|
17196
|
+
'browny' : 2,
|
|
17197
|
+
'red' : 2,
|
|
17198
|
+
'lofi' : 3,
|
|
17199
|
+
'dust' : 4,
|
|
17200
|
+
'crackle' : 5
|
|
17201
|
+
}
|
|
17202
|
+
this._density = [ 0.25 ];
|
|
17203
|
+
this.started = false;
|
|
17204
|
+
this.createSource();
|
|
17205
|
+
|
|
17206
|
+
console.log('=> MonoNoise()', this);
|
|
17207
|
+
}
|
|
17208
|
+
|
|
17209
|
+
createSource(){
|
|
17210
|
+
// create a noise source from an audioWorkletNode, containing many
|
|
17211
|
+
// types of noises
|
|
17212
|
+
this.source = new Tone.ToneAudioNode();
|
|
17213
|
+
this.source.workletNode = Tone.getContext().createAudioWorkletNode('noise-processor');
|
|
17214
|
+
this.source.input = new Tone.Gain();
|
|
17215
|
+
this.source.output = new Tone.Gain(0, 'decibels');
|
|
17216
|
+
this.source.volume = this.source.output.gain;
|
|
17217
|
+
this.source.input.chain(this.source.workletNode, this.source.output);
|
|
17218
|
+
|
|
17219
|
+
this.source.connect(this.channelStrip());
|
|
17220
|
+
|
|
17221
|
+
// empty method to get rid of stop error
|
|
17222
|
+
this.source.stop = () => {};
|
|
17223
|
+
|
|
17224
|
+
// a pink noise source based on a buffer noise
|
|
17225
|
+
// to reduce complex calculation
|
|
17226
|
+
this.pink = new Tone.Noise('pink').connect(this.source);
|
|
17227
|
+
}
|
|
17228
|
+
|
|
17229
|
+
sourceEvent(c, e, time){
|
|
17230
|
+
// set noise type for the generator
|
|
17231
|
+
let t = getParam(this._type, c);
|
|
17232
|
+
if (Object.hasOwn(this._typeMap, t)){
|
|
17233
|
+
t = this._typeMap[t];
|
|
17234
|
+
} else {
|
|
17235
|
+
log(`${t} is not a valid noise type`);
|
|
17236
|
+
// default wave if wave does not exist
|
|
17237
|
+
t = 0;
|
|
17238
|
+
}
|
|
17239
|
+
let type = this.source.workletNode.parameters.get('type');
|
|
17240
|
+
type.setValueAtTime(t, time);
|
|
17241
|
+
|
|
17242
|
+
// set the density amount (only valid for brownian, lofi, dust, crackle)
|
|
17243
|
+
let d = clip(getParam(this._density, c), 0.01, 1);
|
|
17244
|
+
let density = this.source.workletNode.parameters.get('density');
|
|
17245
|
+
density.setValueAtTime(d, time);
|
|
17246
|
+
|
|
17247
|
+
// start the pink noise source also
|
|
17248
|
+
if (!this.started){
|
|
17249
|
+
this.pink.start(time);
|
|
17250
|
+
this.started = true;
|
|
17251
|
+
}
|
|
17252
|
+
}
|
|
17253
|
+
|
|
17254
|
+
density(d){
|
|
17255
|
+
this._density = toArray(d);
|
|
17256
|
+
}
|
|
17257
|
+
|
|
17258
|
+
delete(){
|
|
17259
|
+
// delete super class
|
|
17260
|
+
super.delete();
|
|
17261
|
+
|
|
17262
|
+
this.source.input.disconnect();
|
|
17263
|
+
this.source.input.dispose();
|
|
17264
|
+
this.source.output.disconnect();
|
|
17265
|
+
this.source.output.dispose();
|
|
17266
|
+
this.pink.disconnect();
|
|
17267
|
+
this.pink.dispose();
|
|
17268
|
+
|
|
17269
|
+
console.log('disposed MonoNoise()');
|
|
17270
|
+
}
|
|
17271
|
+
}
|
|
17272
|
+
module.exports = MonoNoise;
|
|
17273
|
+
},{"./Instrument.js":57,"./Util.js":67,"tone":44}],61:[function(require,module,exports){
|
|
17060
17274
|
const Tone = require('tone');
|
|
17061
17275
|
const Util = require('./Util.js');
|
|
17062
17276
|
// const fxMap = require('./Effects.js');
|
|
@@ -17220,7 +17434,7 @@ class MonoSample extends Instrument {
|
|
|
17220
17434
|
}
|
|
17221
17435
|
}
|
|
17222
17436
|
module.exports = MonoSample;
|
|
17223
|
-
},{"./Instrument.js":57,"./Util.js":
|
|
17437
|
+
},{"./Instrument.js":57,"./Util.js":67,"tone":44}],62:[function(require,module,exports){
|
|
17224
17438
|
const Tone = require('tone');
|
|
17225
17439
|
const Util = require('./Util.js');
|
|
17226
17440
|
// const fxMap = require('./Effects.js');
|
|
@@ -17340,7 +17554,7 @@ class MonoSynth extends Instrument {
|
|
|
17340
17554
|
}
|
|
17341
17555
|
}
|
|
17342
17556
|
module.exports = MonoSynth;
|
|
17343
|
-
},{"./Instrument":57,"./Util.js":
|
|
17557
|
+
},{"./Instrument":57,"./Util.js":67,"tone":44,"total-serialism":47}],63:[function(require,module,exports){
|
|
17344
17558
|
const Tone = require('tone');
|
|
17345
17559
|
const Util = require('./Util.js');
|
|
17346
17560
|
const Instrument = require('./Instrument.js');
|
|
@@ -17373,9 +17587,12 @@ class PolyInstrument extends Instrument {
|
|
|
17373
17587
|
channelStrip(){
|
|
17374
17588
|
// gain => output
|
|
17375
17589
|
this.gain = new Tone.Gain(0).toDestination();
|
|
17590
|
+
// postfx-gain => gain (for gain() function in instrument)
|
|
17591
|
+
this.post = new Tone.Gain(1, "gain").connect(this.gain);
|
|
17376
17592
|
// panning => gain
|
|
17377
|
-
this.panner = new Tone.Panner(0).connect(this.
|
|
17593
|
+
this.panner = new Tone.Panner(0).connect(this.post);
|
|
17378
17594
|
// adsr => panning
|
|
17595
|
+
// done through createVoices
|
|
17379
17596
|
}
|
|
17380
17597
|
|
|
17381
17598
|
createVoices(){
|
|
@@ -17524,7 +17741,7 @@ class PolyInstrument extends Instrument {
|
|
|
17524
17741
|
}
|
|
17525
17742
|
}
|
|
17526
17743
|
module.exports = PolyInstrument;
|
|
17527
|
-
},{"./Instrument.js":57,"./Util.js":
|
|
17744
|
+
},{"./Instrument.js":57,"./Util.js":67,"tone":44}],64:[function(require,module,exports){
|
|
17528
17745
|
const Tone = require('tone');
|
|
17529
17746
|
const Util = require('./Util.js');
|
|
17530
17747
|
const PolyInstrument = require('./PolyInstrument.js');
|
|
@@ -17698,7 +17915,7 @@ class PolySample extends PolyInstrument {
|
|
|
17698
17915
|
}
|
|
17699
17916
|
}
|
|
17700
17917
|
module.exports = PolySample;
|
|
17701
|
-
},{"./PolyInstrument.js":
|
|
17918
|
+
},{"./PolyInstrument.js":63,"./Util.js":67,"tone":44}],65:[function(require,module,exports){
|
|
17702
17919
|
const Tone = require('tone');
|
|
17703
17920
|
const Util = require('./Util.js');
|
|
17704
17921
|
const PolyInstrument = require('./PolyInstrument');
|
|
@@ -17804,7 +18021,7 @@ class PolySynth extends PolyInstrument {
|
|
|
17804
18021
|
}
|
|
17805
18022
|
}
|
|
17806
18023
|
module.exports = PolySynth;
|
|
17807
|
-
},{"./PolyInstrument":
|
|
18024
|
+
},{"./PolyInstrument":63,"./Util.js":67,"tone":44}],66:[function(require,module,exports){
|
|
17808
18025
|
const Tone = require('tone');
|
|
17809
18026
|
const Util = require('./Util.js');
|
|
17810
18027
|
// const WebMidi = require("webmidi");
|
|
@@ -18078,7 +18295,7 @@ class Sequencer {
|
|
|
18078
18295
|
}
|
|
18079
18296
|
}
|
|
18080
18297
|
module.exports = Sequencer;
|
|
18081
|
-
},{"./Util.js":
|
|
18298
|
+
},{"./Util.js":67,"tone":44}],67:[function(require,module,exports){
|
|
18082
18299
|
const Tone = require('tone');
|
|
18083
18300
|
const { noteToMidi, toScale, mtof } = require('total-serialism').Translate;
|
|
18084
18301
|
|
|
@@ -18310,7 +18527,7 @@ function log(msg){
|
|
|
18310
18527
|
}
|
|
18311
18528
|
|
|
18312
18529
|
module.exports = { mapDefaults, atTime, atodb, dbtoa, clip, remap,assureNum, lookup, randLookup, isRandom, getParam, toArray, msToS, formatRatio, divToS, divToF, toMidi, mtof, noteToMidi, noteToFreq, assureWave, log }
|
|
18313
|
-
},{"tone":44,"total-serialism":47}],
|
|
18530
|
+
},{"tone":44,"total-serialism":47}],68:[function(require,module,exports){
|
|
18314
18531
|
module.exports={
|
|
18315
18532
|
"uptempo" : 10,
|
|
18316
18533
|
"downtempo" : 10,
|
|
@@ -18331,9 +18548,9 @@ module.exports={
|
|
|
18331
18548
|
"dnb" : 170,
|
|
18332
18549
|
"neurofunk" : 180
|
|
18333
18550
|
}
|
|
18334
|
-
},{}],
|
|
18551
|
+
},{}],69:[function(require,module,exports){
|
|
18335
18552
|
|
|
18336
|
-
const Tone = require('tone');
|
|
18553
|
+
// const Tone = require('tone');
|
|
18337
18554
|
const Mercury = require('mercury-lang');
|
|
18338
18555
|
const TL = require('total-serialism').Translate;
|
|
18339
18556
|
const { normalize, multiply } = require('total-serialism').Utility;
|
|
@@ -18341,13 +18558,13 @@ const { normalize, multiply } = require('total-serialism').Utility;
|
|
|
18341
18558
|
const MonoSample = require('./core/MonoSample.js');
|
|
18342
18559
|
const MonoMidi = require('./core/MonoMidi.js');
|
|
18343
18560
|
const MonoSynth = require('./core/MonoSynth.js');
|
|
18561
|
+
const MonoNoise = require('./core/MonoNoise.js');
|
|
18344
18562
|
const MonoInput = require('./core/MonoInput.js');
|
|
18345
18563
|
const PolySynth = require('./core/PolySynth.js');
|
|
18346
18564
|
const PolySample = require('./core/PolySample.js');
|
|
18347
18565
|
const Tempos = require('./data/genre-tempos.json');
|
|
18348
18566
|
const Util = require('./core/Util.js');
|
|
18349
18567
|
const { divToS } = require('./core/Util.js');
|
|
18350
|
-
const { count } = require('total-serialism/src/gen-basic.js');
|
|
18351
18568
|
|
|
18352
18569
|
class MercuryInterpreter {
|
|
18353
18570
|
constructor({ hydra, p5canvas } = {}){
|
|
@@ -18577,6 +18794,11 @@ class MercuryInterpreter {
|
|
|
18577
18794
|
objectMap.applyFunctions(obj.functions, inst, obj.type);
|
|
18578
18795
|
return inst;
|
|
18579
18796
|
},
|
|
18797
|
+
'noise' : (obj) => {
|
|
18798
|
+
let inst = new MonoNoise(this, obj.type, this.canvas);
|
|
18799
|
+
objectMap.applyFunctions(obj.functions, inst, obj.type);
|
|
18800
|
+
return inst;
|
|
18801
|
+
},
|
|
18580
18802
|
'polySynth' : (obj) => {
|
|
18581
18803
|
let inst = new PolySynth(this, obj.type, this.canvas);
|
|
18582
18804
|
objectMap.applyFunctions(obj.functions, inst, obj.type);
|
|
@@ -18690,7 +18912,7 @@ class MercuryInterpreter {
|
|
|
18690
18912
|
}
|
|
18691
18913
|
}
|
|
18692
18914
|
module.exports = { MercuryInterpreter }
|
|
18693
|
-
},{"./core/MonoInput.js":58,"./core/MonoMidi.js":59,"./core/
|
|
18915
|
+
},{"./core/MonoInput.js":58,"./core/MonoMidi.js":59,"./core/MonoNoise.js":60,"./core/MonoSample.js":61,"./core/MonoSynth.js":62,"./core/PolySample.js":64,"./core/PolySynth.js":65,"./core/Util.js":67,"./data/genre-tempos.json":68,"mercury-lang":27,"total-serialism":47}],70:[function(require,module,exports){
|
|
18694
18916
|
|
|
18695
18917
|
console.log(`
|
|
18696
18918
|
Mercury Engine by Timo Hoogland (c) 2018-2025
|
|
@@ -18709,7 +18931,7 @@ const { WebMidi } = require("webmidi");
|
|
|
18709
18931
|
// load extra AudioWorkletProcessors from file
|
|
18710
18932
|
// transformed to inline with browserify brfs
|
|
18711
18933
|
|
|
18712
|
-
const fxExtensions = "\n// A white noise generator at -6dBFS to test AudioWorkletProcessor\n//\n// class NoiseProcessor extends AudioWorkletProcessor {\n// \tprocess(inputs, outputs, parameters){\n// \t\tconst output = outputs[0];\n\n// \t\toutput.forEach((channel) => {\n// \t\t\tfor (let i=0; i<channel.length; i++) {\n// \t\t\t\tchannel[i] = Math.random() - 0.5;\n// \t\t\t}\n// \t\t});\n// \t\treturn true;\n// \t}\n// }\n// registerProcessor('noise-processor', NoiseProcessor);\n\n// A Downsampling Chiptune effect. Downsamples the signal by a specified amount\n// Resulting in a lower samplerate, making it sound more like 8bit/chiptune\n// Programmed with a custom AudioWorkletProcessor, see effects/Processors.js\n//\nclass DownSampleProcessor extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors() {\n\t\treturn [{\n\t\t\tname: 'down',\n\t\t\tdefaultValue: 8,\n\t\t\tminValue: 1,\n\t\t\tmaxValue: 2048\n\t\t}];\n\t}\n\n\tconstructor(){\n\t\tsuper();\n\t\t// the frame counter\n\t\tthis.count = 0;\n\t\t// sample and hold variable array\n\t\tthis.sah = [];\n\t}\n\n\tprocess(inputs, outputs, parameters){\n\t\tconst input = inputs[0];\n\t\tconst output = outputs[0];\n\n\t\t// if there is anything to process\n\t\tif (input.length > 0){\n\t\t\t// for the length of the sample array (generally 128)\n\t\t\tfor (let i=0; i<input[0].length; i++){\n\t\t\t\tconst d = (parameters.down.length > 1) ? parameters.down[i] : parameters.down[0];\n\t\t\t\t// for every channel\n\t\t\t\tfor (let channel=0; channel<input.length; ++channel){\n\t\t\t\t\t// if counter equals 0, sample and hold\n\t\t\t\t\tif (this.count % d === 0){\n\t\t\t\t\t\tthis.sah[channel] = input[channel][i];\n\t\t\t\t\t}\n\t\t\t\t\t// output the currently held sample\n\t\t\t\t\toutput[channel][i] = this.sah[channel];\n\t\t\t\t}\n\t\t\t\t// increment sample counter\n\t\t\t\tthis.count++;\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n}\nregisterProcessor('downsampler-processor', DownSampleProcessor);\n\n// A distortion algorithm using the tanh (hyperbolic-tangent) as a \n// waveshaping technique. Some mapping to apply a more equal loudness \n// distortion is applied on the overdrive parameter\n//\nclass TanhDistortionProcessor extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors(){\n\t\treturn [{\n\t\t\tname: 'amount',\n\t\t\tdefaultValue: 4,\n\t\t\tminValue: 1\n\t\t}, {\n\t\t\tname: 'makeup',\n\t\t\tdefaultValue: 0.5,\n\t\t\tminValue: 0,\n\t\t\tmaxValue: 2\n\t\t}]\n\t}\n\n\tconstructor(){\n\t\tsuper();\n\t}\n\n\tprocess(inputs, outputs, parameters){\n\t\tconst input = inputs[0];\n\t\tconst output = outputs[0];\n\n\t\tif (input.length > 0){\n\t\t\tfor (let channel=0; channel<input.length; ++channel){\n\t\t\t\tfor (let i=0; i<input[channel].length; i++){\n\t\t\t\t\tconst a = (parameters.amount.length > 1)? parameters.amount[i] : parameters.amount[0];\n\t\t\t\t\tconst m = (parameters.makeup.length > 1)? parameters.makeup[i] : parameters.makeup[0];\n\t\t\t\t\t// simple waveshaping with tanh\n\t\t\t\t\toutput[channel][i] = Math.tanh(input[channel][i] * a) * m;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n}\nregisterProcessor('tanh-distortion-processor', TanhDistortionProcessor);\n\n// A distortion/compression effect of an incoming signal\n// Based on an algorithm by Peter McCulloch\n// \nclass SquashProcessor extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors(){\n\t\treturn [{\n\t\t\tname: 'amount',\n\t\t\tdefaultValue: 4,\n\t\t\tminValue: 1,\n\t\t\tmaxValue: 1024\n\t\t}, {\n\t\t\tname: 'makeup',\n\t\t\tdefaultValue: 0.5,\n\t\t\tminValue: 0,\n\t\t\tmaxValue: 2\n\t\t}];\n\t}\n\n\tconstructor(){\n\t\tsuper();\n\t}\n\n\tprocess(inputs, outputs, parameters){\n\t\tconst input = inputs[0];\n\t\tconst output = outputs[0];\n\t\t\n\t\tif (input.length > 0){\n\t\t\tfor (let channel=0; channel<input.length; ++channel){\n\t\t\t\tfor (let i=0; i<input[channel].length; i++){\n\t\t\t\t\t// (s * a) / ((s * a)^2 * 0.28 + 1) / √a\n\t\t\t\t\t// drive amount, minimum of 1\n\t\t\t\t\tconst a = (parameters.amount.length > 1)? parameters.amount[i] : parameters.amount[0];\n\t\t\t\t\t// makeup gain\n\t\t\t\t\tconst m = (parameters.makeup.length > 1)? parameters.makeup[i] : parameters.makeup[0];\n\t\t\t\t\t// set the waveshaper effect\n\t\t\t\t\tconst s = input[channel][i];\n\t\t\t\t\tconst x = s * a * 1.412;\n\t\t\t\t\toutput[channel][i] = (x / (x * x * 0.28 + 1.0)) * m * 0.708;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n}\nregisterProcessor('squash-processor', SquashProcessor);\n\n// Dattorro Reverberator\n// Thanks to port by khoin, taken from:\n// https://github.com/khoin/DattorroReverbNode\n// based on the paper from Jon Dattorro:\n// https://ccrma.stanford.edu/~dattorro/EffectDesignPart1.pdf\n// with small modifications to work in Mercury\n//\n// In jurisdictions that recognize copyright laws, this software is to\n// be released into the public domain.\n\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND.\n// THE AUTHOR(S) SHALL NOT BE LIABLE FOR ANYTHING, ARISING FROM, OR IN\n// CONNECTION WITH THE SOFTWARE OR THE DISTRIBUTION OF THE SOFTWARE.\n// \nclass DattorroReverb extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors() {\n\t\treturn [\n\t\t\t[\"preDelay\", 0, 0, sampleRate - 1, \"k-rate\"],\n\t\t\t// [\"bandwidth\", 0.9999, 0, 1, \"k-rate\"],\t\n\t\t\t[\"inputDiffusion1\", 0.75, 0, 1, \"k-rate\"],\n\t\t\t[\"inputDiffusion2\", 0.625, 0, 1, \"k-rate\"],\n\t\t\t[\"decay\", 0.5, 0, 1, \"k-rate\"],\n\t\t\t[\"decayDiffusion1\", 0.7, 0, 0.999999, \"k-rate\"],\n\t\t\t[\"decayDiffusion2\", 0.5, 0, 0.999999, \"k-rate\"],\n\t\t\t[\"damping\", 0.005, 0, 1, \"k-rate\"],\n\t\t\t[\"excursionRate\", 0.5, 0, 2, \"k-rate\"],\n\t\t\t[\"excursionDepth\", 0.7, 0, 2, \"k-rate\"],\n\t\t\t[\"wet\", 0.7, 0, 2, \"k-rate\"],\n\t\t\t// [\"dry\", 0.7, 0, 2, \"k-rate\"]\n\t\t].map(x => new Object({\n\t\t\tname: x[0],\n\t\t\tdefaultValue: x[1],\n\t\t\tminValue: x[2],\n\t\t\tmaxValue: x[3],\n\t\t\tautomationRate: x[4]\n\t\t}));\n\t}\n\n\tconstructor(options) {\n\t\tsuper(options);\n\n\t\tthis._Delays = [];\n\t\t// Pre-delay is always one-second long, rounded to the nearest 128-chunk\n\t\tthis._pDLength = sampleRate + (128 - sampleRate % 128);\n\t\tthis._preDelay = new Float32Array(this._pDLength);\n\t\tthis._pDWrite = 0;\n\t\tthis._lp1 = 0.0;\n\t\tthis._lp2 = 0.0;\n\t\tthis._lp3 = 0.0;\n\t\tthis._excPhase = 0.0;\n\n\t\t[\n\t\t\t0.004771345, 0.003595309, 0.012734787, 0.009307483, // pre-tank\n\t\t\t0.022579886, 0.149625349, 0.060481839, 0.1249958, // left-loop\n\t\t\t0.030509727, 0.141695508, 0.089244313, 0.106280031 // right-loop\n\t\t].forEach(x => this.makeDelay(x));\n\n\t\tthis._taps = Int16Array.from([\n\t\t\t0.008937872, 0.099929438, 0.064278754, 0.067067639, \n\t\t\t0.066866033, 0.006283391, 0.035818689, // left-output\n\t\t\t0.011861161, 0.121870905, 0.041262054, 0.08981553, \n\t\t\t0.070931756, 0.011256342, 0.004065724 // right-output\n\t\t], x => Math.round(x * sampleRate));\n\t}\n\n\tmakeDelay(length) {\n\t\t// len, array, write, read, mask\n\t\tlet len = Math.round(length * sampleRate);\n\t\tlet nextPow2 = 2 ** Math.ceil(Math.log2((len)));\n\t\tthis._Delays.push([\n\t\t\tnew Float32Array(nextPow2), len - 1, 0 | 0, nextPow2 - 1\n\t\t]);\n\t}\n\n\twriteDelay(index, data) {\n\t\treturn this._Delays[index][0][this._Delays[index][1]] = data;\n\t}\n\n\treadDelay(index) {\n\t\treturn this._Delays[index][0][this._Delays[index][2]];\n\t}\n\n\treadDelayAt(index, i) {\n\t\tlet d = this._Delays[index];\n\t\treturn d[0][(d[2] + i) & d[3]];\n\t}\n\n\t// cubic interpolation\n\t// O. Niemitalo: \n\t// https://www.musicdsp.org/en/latest/Other/49-cubic-interpollation.html\n\treadDelayCAt(index, i) {\n\t\tlet d = this._Delays[index],\n\t\t\tfrac = i - ~~i,\n\t\t\tint = ~~i + d[2] - 1,\n\t\t\tmask = d[3];\n\n\t\tlet x0 = d[0][int++ & mask],\n\t\t\tx1 = d[0][int++ & mask],\n\t\t\tx2 = d[0][int++ & mask],\n\t\t\tx3 = d[0][int & mask];\n\n\t\tlet a = (3 * (x1 - x2) - x0 + x3) / 2,\n\t\t\tb = 2 * x2 + x0 - (5 * x1 + x3) / 2,\n\t\t\tc = (x2 - x0) / 2;\n\n\t\treturn (((a * frac) + b) * frac + c) * frac + x1;\n\t}\n\n\t// First input will be downmixed to mono if number of channels is not 2\n\t// Outputs Stereo.\n\tprocess(inputs, outputs, parameters) {\n\t\tconst pd = ~~parameters.preDelay[0],\n\t\t\t// bw = parameters.bandwidth[0], // replaced by using damping\n\t\t\tfi = parameters.inputDiffusion1[0],\n\t\t\tsi = parameters.inputDiffusion2[0],\n\t\t\tdc = parameters.decay[0],\n\t\t\tft = parameters.decayDiffusion1[0],\n\t\t\tst = parameters.decayDiffusion2[0],\n\t\t\tdp = 1 - parameters.damping[0],\n\t\t\tex = parameters.excursionRate[0] / sampleRate,\n\t\t\ted = parameters.excursionDepth[0] * sampleRate / 1000,\n\t\t\twe = parameters.wet[0]; //* 0.6, // lo & ro both mult. by 0.6 anyways\n\t\t\t// dr = parameters.dry[0];\n\n\t\t// write to predelay and dry output\n\t\tif (inputs[0].length == 2) {\n\t\t\tfor (let i = 127; i >= 0; i--) {\n\t\t\t\tthis._preDelay[this._pDWrite + i] = (inputs[0][0][i] + inputs[0][1][i]) * 0.5;\n\n\t\t\t\t// removed the dry parameter, this is handled in the Tone Node\n\t\t\t\t// outputs[0][0][i] = inputs[0][0][i] * dr;\n\t\t\t\t// outputs[0][1][i] = inputs[0][1][i] * dr;\n\t\t\t}\n\t\t} else if (inputs[0].length > 0) {\n\t\t\tthis._preDelay.set(\n\t\t\t\tinputs[0][0],\n\t\t\t\tthis._pDWrite\n\t\t\t);\n\t\t\t// for (let i = 127; i >= 0; i--)\n\t\t\t// \toutputs[0][0][i] = outputs[0][1][i] = inputs[0][0][i] * dr;\n\t\t} else {\n\t\t\tthis._preDelay.set(\n\t\t\t\tnew Float32Array(128),\n\t\t\t\tthis._pDWrite\n\t\t\t);\n\t\t}\n\n\t\tlet i = 0 | 0;\n\t\twhile (i < 128) {\n\t\t\tlet lo = 0.0,\n\t\t\t\tro = 0.0;\n\n\t\t\t// input damping (formerly known as bandwidth bw, now uses dp)\n\t\t\tthis._lp1 += dp * (this._preDelay[(this._pDLength + this._pDWrite - pd + i) % this._pDLength] - this._lp1);\n\n\t\t\t// pre-tank\n\t\t\tlet pre = this.writeDelay(0, this._lp1 - fi * this.readDelay(0));\n\t\t\tpre = this.writeDelay(1, fi * (pre - this.readDelay(1)) + this.readDelay(0));\n\t\t\tpre = this.writeDelay(2, fi * pre + this.readDelay(1) - si * this.readDelay(2));\n\t\t\tpre = this.writeDelay(3, si * (pre - this.readDelay(3)) + this.readDelay(2));\n\n\t\t\tlet split = si * pre + this.readDelay(3);\n\n\t\t\t// excursions\n\t\t\t// could be optimized?\n\t\t\tlet exc = ed * (1 + Math.cos(this._excPhase * 6.2800));\n\t\t\tlet exc2 = ed * (1 + Math.sin(this._excPhase * 6.2847));\n\n\t\t\t// left loop\n\t\t\t// tank diffuse 1\n\t\t\tlet temp = this.writeDelay(4, split + dc * this.readDelay(11) + ft * this.readDelayCAt(4, exc));\n\t\t\t// long delay 1\n\t\t\tthis.writeDelay(5, this.readDelayCAt(4, exc) - ft * temp);\n\t\t\t// damp 1\n\t\t\tthis._lp2 += dp * (this.readDelay(5) - this._lp2);\n\t\t\ttemp = this.writeDelay(6, dc * this._lp2 - st * this.readDelay(6)); // tank diffuse 2\n\t\t\t// long delay 2\n\t\t\tthis.writeDelay(7, this.readDelay(6) + st * temp);\n\n\t\t\t// right loop \n\t\t\t// tank diffuse 3\n\t\t\ttemp = this.writeDelay(8, split + dc * this.readDelay(7) + ft * this.readDelayCAt(8, exc2));\n\t\t\t// long delay 3\n\t\t\tthis.writeDelay(9, this.readDelayCAt(8, exc2) - ft * temp);\n\t\t\t// damp 2\n\t\t\tthis._lp3 += dp * (this.readDelay(9) - this._lp3);\n\t\t\t// tank diffuse 4\n\t\t\ttemp = this.writeDelay(10, dc * this._lp3 - st * this.readDelay(10));\n\t\t\t// long delay 4\n\t\t\tthis.writeDelay(11, this.readDelay(10) + st * temp);\n\n\t\t\tlo = this.readDelayAt(9, this._taps[0]) +\n\t\t\t\tthis.readDelayAt(9, this._taps[1]) -\n\t\t\t\tthis.readDelayAt(10, this._taps[2]) +\n\t\t\t\tthis.readDelayAt(11, this._taps[3]) -\n\t\t\t\tthis.readDelayAt(5, this._taps[4]) -\n\t\t\t\tthis.readDelayAt(6, this._taps[5]) -\n\t\t\t\tthis.readDelayAt(7, this._taps[6]);\n\n\t\t\tro = this.readDelayAt(5, this._taps[7]) +\n\t\t\t\tthis.readDelayAt(5, this._taps[8]) -\n\t\t\t\tthis.readDelayAt(6, this._taps[9]) +\n\t\t\t\tthis.readDelayAt(7, this._taps[10]) -\n\t\t\t\tthis.readDelayAt(9, this._taps[11]) -\n\t\t\t\tthis.readDelayAt(10, this._taps[12]) -\n\t\t\t\tthis.readDelayAt(11, this._taps[13]);\n\n\t\t\toutputs[0][0][i] += lo * we;\n\t\t\toutputs[0][1][i] += ro * we;\n\n\t\t\tthis._excPhase += ex;\n\n\t\t\ti++;\n\n\t\t\tfor (let j = 0, d = this._Delays[0]; j < this._Delays.length; d = this._Delays[++j]) {\n\t\t\t\td[1] = (d[1] + 1) & d[3];\n\t\t\t\td[2] = (d[2] + 1) & d[3];\n\t\t\t}\n\t\t}\n\n\t\t// Update preDelay index\n\t\tthis._pDWrite = (this._pDWrite + 128) % this._pDLength;\n\n\t\treturn true;\n\t}\n}\nregisterProcessor('dattorro-reverb', DattorroReverb);\n";
|
|
18934
|
+
const fxExtensions = "\n// Various noise type processors for the MonoNoise source\n// Type 2 is Pink noise, used from Tone.Noise('pink') instead of calc\n//\nclass NoiseProcessor extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors(){\n\t\treturn [{\n\t\t\tname: 'type',\n\t\t\tdefaultValue: 5,\n\t\t\tminValue: 0,\n\t\t\tmaxValue: 5\n\t\t},{\n\t\t\tname: 'density',\n\t\t\tdefaultValue: 0.125,\n\t\t\tminValue: 0,\n\t\t\tmaxValue: 1\n\t\t}];\n\t}\n\t\n\tconstructor(){\n\t\tsuper();\n\t\t// sample previous value\n\t\tthis.prev = 0;\n\t\t// latch to a sample \n\t\tthis.latch = 0;\n\t\t// phasor ramp\n\t\tthis.phasor = 0;\n\t\tthis.delta = 0;\n\t}\n\n\tprocess(inputs, outputs, parameters){\n\t\t// input is not used because this is a source\n\t\tconst input = inputs[0];\n\t\tconst output = outputs[0];\n\t\tconst HALF_PI = Math.PI/2;\n\n\t\t// for one output channel generate some noise\t\n\t\tif (input.length > 0){\n\t\t\tfor (let i = 0; i < input[0].length; i++){\n\t\t\t\tconst t = (parameters.type.length > 1) ? parameters.type[i] : parameters.type[0];\n\t\t\t\tconst d = (parameters.density.length > 1) ? parameters.density[i] : parameters.density[0];\n\t\t\t\n\t\t\t\t// some bipolar white noise -1 to 1\n\t\t\t\tconst biNoise = Math.random() * 2 - 1;\n\t\t\t\t// empty output\n\t\t\t\tlet out = 0;\n\n\t\t\t\t// White noise, Use for every other choice\n\t\t\t\tif (t < 1){\n\t\t\t\t\tout = biNoise * 0.707;\n\t\t\t\t}\n\t\t\t\t// Pink noise, use Tone.Noise('pink') object for simplicity\n\t\t\t\telse if (t < 2){\n\t\t\t\t\tout = input[0][i] * 1.413;\n\t\t\t\t}\n\t\t\t\t// Brownian noise\n\t\t\t\t// calculate a random next value in \"step size\" and add to \n\t\t\t\t// the previous noise signal value creating a \"drunk walk\" \n\t\t\t\t// or brownian motion\n\t\t\t\telse if (t < 3){\t\t\n\t\t\t\t\tthis.prev += biNoise * d*d;\n\t\t\t\t\tthis.prev = Math.asin(Math.sin(this.prev * HALF_PI)) / HALF_PI;\n\t\t\t\t\tout = this.prev * 0.707;\n\t\t\t\t}\n\t\t\t\t// Lo-Fi (sampled) noise\n\t\t\t\t// creates random values at a specified frequency and slowly \n\t\t\t\t// ramps to that new value\n\t\t\t\telse if (t < 4){\n\t\t\t\t\t// create a ramp from 0-1 at specific frequency/density\n\t\t\t\t\tthis.phasor = (this.phasor + d * d * 0.5) % 1;\n\t\t\t\t\t// calculate the delta\n\t\t\t\t\tlet dlt = this.phasor - this.delta;\n\t\t\t\t\tthis.delta = this.phasor;\n\t\t\t\t\t// when ramp resets, latch a new noise value\n\t\t\t\t\tif (dlt < 0){\n\t\t\t\t\t\tthis.prev = this.latch;\n\t\t\t\t\t\tthis.latch = biNoise;\n\t\t\t\t\t}\n\t\t\t\t\t// linear interpolation from previous to next point\n\t\t\t\t\tout = this.prev + this.phasor * (this.latch - this.prev);\n\t\t\t\t\tout *= 0.707;\n\t\t\t\t}\n\t\t\t\t// Dust noise\n\t\t\t\t// randomly generate an impulse/click of value 1 depending \n\t\t\t\t// on the density, average amount of impulses per second\n\t\t\t\telse if (t < 5){\n\t\t\t\t\tout = Math.random() > (1 - d*d*d * 0.5);\n\t\t\t\t}\n\t\t\t\t// Crackle noise\n\t\t\t\t// Pink generator with \"wave-loss\" leaving gaps\n\t\t\t\telse {\n\t\t\t\t\tlet delta = input[0][i] - this.prev;\n\t\t\t\t\tthis.prev = input[0][i];\n\t\t\t\t\tif (delta > 0){\n\t\t\t\t\t\tthis.latch = Math.random();\n\t\t\t\t\t}\n\t\t\t\t\tout = (this.latch < (1 - d*d*d)) ? 0 : input[0][i] * 1.413;\n\t\t\t\t}\n\t\t\t\t// send to output whichever noise type was chosen\n\t\t\t\toutput[0][i] = out;\n\t\t\t}\n\t\t}\t\t\n\t\treturn true;\n\t}\n}\nregisterProcessor('noise-processor', NoiseProcessor);\n\n// A Downsampling Chiptune effect. Downsamples the signal by a specified amount\n// Resulting in a lower samplerate, making it sound more like 8bit/chiptune\n// Programmed with a custom AudioWorkletProcessor, see effects/Processors.js\n//\nclass DownSampleProcessor extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors() {\n\t\treturn [{\n\t\t\tname: 'down',\n\t\t\tdefaultValue: 8,\n\t\t\tminValue: 1,\n\t\t\tmaxValue: 2048\n\t\t}];\n\t}\n\n\tconstructor(){\n\t\tsuper();\n\t\t// the frame counter\n\t\tthis.count = 0;\n\t\t// sample and hold variable array\n\t\tthis.sah = [];\n\t}\n\n\tprocess(inputs, outputs, parameters){\n\t\tconst input = inputs[0];\n\t\tconst output = outputs[0];\n\n\t\t// if there is anything to process\n\t\tif (input.length > 0){\n\t\t\t// for the length of the sample array (generally 128)\n\t\t\tfor (let i=0; i<input[0].length; i++){\n\t\t\t\tconst d = (parameters.down.length > 1) ? parameters.down[i] : parameters.down[0];\n\t\t\t\t// for every channel\n\t\t\t\tfor (let channel=0; channel<input.length; ++channel){\n\t\t\t\t\t// if counter equals 0, sample and hold\n\t\t\t\t\tif (this.count % d === 0){\n\t\t\t\t\t\tthis.sah[channel] = input[channel][i];\n\t\t\t\t\t}\n\t\t\t\t\t// output the currently held sample\n\t\t\t\t\toutput[channel][i] = this.sah[channel];\n\t\t\t\t}\n\t\t\t\t// increment sample counter\n\t\t\t\tthis.count++;\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n}\nregisterProcessor('downsampler-processor', DownSampleProcessor);\n\n// A distortion algorithm using the tanh (hyperbolic-tangent) as a \n// waveshaping technique. Some mapping to apply a more equal loudness \n// distortion is applied on the overdrive parameter\n//\nclass TanhDistortionProcessor extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors(){\n\t\treturn [{\n\t\t\tname: 'amount',\n\t\t\tdefaultValue: 4,\n\t\t\tminValue: 1\n\t\t}, {\n\t\t\tname: 'makeup',\n\t\t\tdefaultValue: 0.5,\n\t\t\tminValue: 0,\n\t\t\tmaxValue: 2\n\t\t}]\n\t}\n\n\tconstructor(){\n\t\tsuper();\n\t}\n\n\tprocess(inputs, outputs, parameters){\n\t\tconst input = inputs[0];\n\t\tconst output = outputs[0];\n\n\t\tif (input.length > 0){\n\t\t\tfor (let channel=0; channel<input.length; ++channel){\n\t\t\t\tfor (let i=0; i<input[channel].length; i++){\n\t\t\t\t\tconst a = (parameters.amount.length > 1)? parameters.amount[i] : parameters.amount[0];\n\t\t\t\t\tconst m = (parameters.makeup.length > 1)? parameters.makeup[i] : parameters.makeup[0];\n\t\t\t\t\t// simple waveshaping with tanh\n\t\t\t\t\toutput[channel][i] = Math.tanh(input[channel][i] * a) * m;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n}\nregisterProcessor('tanh-distortion-processor', TanhDistortionProcessor);\n\n// A distortion algorithm using the arctan function as a \n// waveshaping technique. Some mapping to apply a more equal loudness \n// distortion is applied on the overdrive parameter\n//\nclass ArctanDistortionProcessor extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors(){\n\t\treturn [{\n\t\t\tname: 'amount',\n\t\t\tdefaultValue: 5,\n\t\t\tminValue: 1\n\t\t}]\n\t}\n\n\tconstructor(){\n\t\tsuper();\n\n\t\t// quarter pi constant and inverse\n\t\tthis.Q_PI = 0.7853981633974483; // 0.25 * Math.PI;\n\t\tthis.INVQ_PI = 1.2732395447351628; //1.0 / this.Q_PI;\n\t}\n\n\tprocess(inputs, outputs, parameters){\n\t\tconst input = inputs[0];\n\t\tconst output = outputs[0];\n\n\t\tconst gain = parameters.amount[0];\n\t\tconst makeup = Math.min(1, Math.max(0, 1 - ((Math.atan(gain) - this.Q_PI) * this.INVQ_PI * 0.823)));\n\n\t\tif (input.length > 0){\n\t\t\tfor (let channel=0; channel<input.length; channel++){\n\t\t\t\tfor (let i=0; i<input[channel].length; i++){\n\t\t\t\t\toutput[channel][i] = Math.atan(input[channel][i] * gain) * makeup;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n}\nregisterProcessor('arctan-distortion-processor', ArctanDistortionProcessor);\n\n\n// A fuzz distortion effect in modelled after the Big Muff Pi pedal \n// by Electro Harmonics. Using three stages of distortion: \n// 1 soft-clipping stage, 2 half-wave rectifier, 3 hard-clipping stage\n// Based on: https://github.com/hazza-music/EHX-Big-Muff-Pi-Emulation/blob/main/Technical%20Essay.pdf\n// \nclass FuzzProcessor extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors() {\n\t\treturn [{\n\t\t\tname: 'amount',\n\t\t\tdefaultValue: 5,\n\t\t\tminValue: 1\n\t\t}]\n\t}\n\n\tconstructor(){ \n\t\tsuper(); \n\t\t// history for onepole filter for dcblocking\n\t\tthis.history = [0, 0];\n\t}\n\n\tprocess(inputs, outputs, parameters){\n\t\tconst input = inputs[0];\n\t\tconst output = outputs[0];\n\n\t\tconst gain = parameters.amount[0];\n\t\tconst makeup = Math.max((1 - Math.pow((gain-1) / 63, 0.13)) * 0.395 + 0.605, 0.605);\n\n\t\tif (input.length > 0){\n\t\t\tfor (let channel = 0; channel < input.length; channel++){\n\t\t\t\tfor (let i = 0; i < input[channel].length; i++){\n\t\t\t\t\t// soft-clipping\n\t\t\t\t\tconst sc = Math.atan(input[channel][i] * gain * 2) * 0.6;\n\t\t\t\t\t// half-wave rectification and add for \n\t\t\t\t\t// asymmetric distortion\n\t\t\t\t\tconst hw = ((sc > 0) ? sc : 0) + input[channel][i];\n\t\t\t\t\t// hard-clipping\n\t\t\t\t\tconst hc = Math.max(-0.707, Math.min(0.707, hw));\n\t\t\t\t\t// onepole lowpass filter for dc-block\n\t\t\t\t\tthis.history[channel] = (hc - this.history[channel]) * 0.0015 + this.history[channel];\n\t\t\t\t\t// dc-block and gain compensation and output\n\t\t\t\t\toutput[channel][i] = (hc - this.history[channel]) * makeup;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n}\nregisterProcessor('fuzz-processor', FuzzProcessor);\n\n// A distortion/compression effect of an incoming signal\n// Based on an algorithm by Peter McCulloch\n// \nclass SquashProcessor extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors(){\n\t\treturn [{\n\t\t\tname: 'amount',\n\t\t\tdefaultValue: 4,\n\t\t\tminValue: 1,\n\t\t\tmaxValue: 1024\n\t\t}, {\n\t\t\tname: 'makeup',\n\t\t\tdefaultValue: 0.5,\n\t\t\tminValue: 0,\n\t\t\tmaxValue: 2\n\t\t}];\n\t}\n\n\tconstructor(){\n\t\tsuper();\n\t}\n\n\tprocess(inputs, outputs, parameters){\n\t\tconst input = inputs[0];\n\t\tconst output = outputs[0];\n\t\t\n\t\tif (input.length > 0){\n\t\t\tfor (let channel=0; channel<input.length; ++channel){\n\t\t\t\tfor (let i=0; i<input[channel].length; i++){\n\t\t\t\t\t// (s * a) / ((s * a)^2 * 0.28 + 1) / √a\n\t\t\t\t\t// drive amount, minimum of 1\n\t\t\t\t\tconst a = (parameters.amount.length > 1)? parameters.amount[i] : parameters.amount[0];\n\t\t\t\t\t// makeup gain\n\t\t\t\t\tconst m = (parameters.makeup.length > 1)? parameters.makeup[i] : parameters.makeup[0];\n\t\t\t\t\t// set the waveshaper effect\n\t\t\t\t\tconst s = input[channel][i];\n\t\t\t\t\tconst x = s * a * 1.412;\n\t\t\t\t\toutput[channel][i] = (x / (x * x * 0.28 + 1.0)) * m * 0.708;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n}\nregisterProcessor('squash-processor', SquashProcessor);\n\n// Comb Filter processor\n// A LowPass FeedBack CombFilter effect (LBCF)\n// Uses a onepole lowpass filter in the feedback delay for damping\n// Feedback amount can be positive or negative \n// (negative creates odd harmonics one octave lower)\n// \nclass CombFilterProcessor extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors() {\n\t\treturn [\n\t\t\t[ 'time', 5, 0, 120, \"k-rate\" ],\n\t\t\t[ 'feedback', 0.8, -0.999, 0.999, \"k-rate\" ],\n\t\t\t[ 'damping', 0.5, 0, 1, \"k-rate\" ],\n\t\t\t[ 'drywet', 0.8, 0, 1, \"k-rate\" ]\n\t\t].map(x => new Object({\n\t\t\tname: x[0],\n\t\t\tdefaultValue: x[1],\n\t\t\tminValue: x[2],\n\t\t\tmaxValue: x[3],\n\t\t\tautomationRate: x[4]\n\t\t}));\n\t}\n\t\n\tconstructor(info) {\n\t\tsuper();\n\n\t\tconst numChannels = info.channelCount;\n\t\tconst delaySize = 120;\n\t\t// make delays for amount of channels and\n\t\t// initialize history values for lowpass\n\t\tthis.delays = [];\n\t\tthis.lpf = [];\n\t\tfor (let i = 0; i < numChannels; i++){\n\t\t\tthis.delays[i] = this.makeDelay(delaySize);\n\t\t\tthis.lpf[i] = 0;\n\t\t}\n\t}\n\n\t// makeDelay code based on Dattorro Reverberator delays\n\t// Thanks to khoin: https://github.com/khoin\n\tmakeDelay(length) {\n\t\tlet size = Math.round(length * 0.001 * sampleRate);\n\t\tlet nextPow2 = 2 ** Math.ceil(Math.log2((size)));\n\t\treturn [\n\t\t\tnew Float32Array(nextPow2), nextPow2-1, 0, nextPow2 - 1\n\t\t];\n\t}\n\t// write to specific delayline at delaysize\n\twriteDelay(i, data) {\n\t\treturn this.delays[i][0][this.delays[i][1]] = data;\n\t}\n\n\t// read from delayline at specified time\n\treadDelayAt(i, ms) {\n\t\tlet s = Math.round(ms * 0.001 * sampleRate);\n\t\treturn this.delays[i][0][(this.delays[i][2] - s) & this.delays[i][3]];\n\t}\n\n\t// move the read and writeheads of the delayline\n\tupdateReadWriteHeads(i){\n\t\t// increment read and write heads in delay and wrap at delaysize\n\t\tthis.delays[i][1] = (this.delays[i][1] + 1) & this.delays[i][3];\n\t\tthis.delays[i][2] = (this.delays[i][2] + 1) & this.delays[i][3];\n\t}\n\n\tprocess(inputs, outputs, parameters){\n\t\tconst input = inputs[0];\n\t\tconst output = outputs[0];\n\n\t\tconst dt = parameters.time[0];\n\t\tconst fb = parameters.feedback[0];\n\t\tconst dm = Math.max(0, parameters.damping[0]);\n\t\tconst dw = parameters.drywet[0];\n\n\t\t// process for every channel and every sample in the channel\n\t\tif (input.length > 0){\n\t\t\tfor (let channel = 0; channel < input.length; channel++){\n\t\t\t\tfor (let i = 0; i < input[0].length; i++){\n\t\t\t\t\t// a onepole lowpass filter after delay\n\t\t\t\t\tthis.lpf[channel] = this.readDelayAt(channel, dt) * (1 - dm) + this.lpf[channel] * dm;\n\t\t\t\t\t// write to the delayline \n\t\t\t\t\tthis.writeDelay(channel, input[channel][i] + this.lpf[channel] * fb);\n\t\t\t\t\t// apply drywet and send output from the filter\n\t\t\t\t\toutput[channel][i] = this.lpf[channel] * dw + input[channel][i] * (1-dw);\n\t\t\t\t\t// update the read and write heads of the delaylines\n\t\t\t\t\tthis.updateReadWriteHeads(channel);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true;\n\t}\n}\nregisterProcessor('combfilter-processor', CombFilterProcessor);\n\n// Dattorro Reverberator\n// Thanks to port by khoin, taken from:\n// https://github.com/khoin/DattorroReverbNode\n// based on the paper from Jon Dattorro:\n// https://ccrma.stanford.edu/~dattorro/EffectDesignPart1.pdf\n// with small modifications to work in Mercury\n//\n// In jurisdictions that recognize copyright laws, this software is to\n// be released into the public domain.\n\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND.\n// THE AUTHOR(S) SHALL NOT BE LIABLE FOR ANYTHING, ARISING FROM, OR IN\n// CONNECTION WITH THE SOFTWARE OR THE DISTRIBUTION OF THE SOFTWARE.\n// \nclass DattorroReverb extends AudioWorkletProcessor {\n\tstatic get parameterDescriptors() {\n\t\treturn [\n\t\t\t[\"preDelay\", 0, 0, sampleRate - 1, \"k-rate\"],\n\t\t\t// [\"bandwidth\", 0.9999, 0, 1, \"k-rate\"],\t\n\t\t\t[\"inputDiffusion1\", 0.75, 0, 1, \"k-rate\"],\n\t\t\t[\"inputDiffusion2\", 0.625, 0, 1, \"k-rate\"],\n\t\t\t[\"decay\", 0.5, 0, 1, \"k-rate\"],\n\t\t\t[\"decayDiffusion1\", 0.7, 0, 0.999999, \"k-rate\"],\n\t\t\t[\"decayDiffusion2\", 0.5, 0, 0.999999, \"k-rate\"],\n\t\t\t[\"damping\", 0.005, 0, 1, \"k-rate\"],\n\t\t\t[\"excursionRate\", 0.5, 0, 2, \"k-rate\"],\n\t\t\t[\"excursionDepth\", 0.7, 0, 2, \"k-rate\"],\n\t\t\t[\"wet\", 0.7, 0, 2, \"k-rate\"],\n\t\t\t// [\"dry\", 0.7, 0, 2, \"k-rate\"]\n\t\t].map(x => new Object({\n\t\t\tname: x[0],\n\t\t\tdefaultValue: x[1],\n\t\t\tminValue: x[2],\n\t\t\tmaxValue: x[3],\n\t\t\tautomationRate: x[4]\n\t\t}));\n\t}\n\n\tconstructor(options) {\n\t\tsuper(options);\n\n\t\tthis._Delays = [];\n\t\t// Pre-delay is always one-second long, rounded to the nearest 128-chunk\n\t\tthis._pDLength = sampleRate + (128 - sampleRate % 128);\n\t\tthis._preDelay = new Float32Array(this._pDLength);\n\t\tthis._pDWrite = 0;\n\t\tthis._lp1 = 0.0;\n\t\tthis._lp2 = 0.0;\n\t\tthis._lp3 = 0.0;\n\t\tthis._excPhase = 0.0;\n\n\t\t[\n\t\t\t0.004771345, 0.003595309, 0.012734787, 0.009307483, // pre-tank\n\t\t\t0.022579886, 0.149625349, 0.060481839, 0.1249958, // left-loop\n\t\t\t0.030509727, 0.141695508, 0.089244313, 0.106280031 // right-loop\n\t\t].forEach(x => this.makeDelay(x));\n\n\t\tthis._taps = Int16Array.from([\n\t\t\t0.008937872, 0.099929438, 0.064278754, 0.067067639, \n\t\t\t0.066866033, 0.006283391, 0.035818689, // left-output\n\t\t\t0.011861161, 0.121870905, 0.041262054, 0.08981553, \n\t\t\t0.070931756, 0.011256342, 0.004065724 // right-output\n\t\t], x => Math.round(x * sampleRate));\n\t}\n\n\tmakeDelay(length) {\n\t\t// len, array, write, read, mask\n\t\tlet len = Math.round(length * sampleRate);\n\t\tlet nextPow2 = 2 ** Math.ceil(Math.log2((len)));\n\t\tthis._Delays.push([\n\t\t\tnew Float32Array(nextPow2), len - 1, 0 | 0, nextPow2 - 1\n\t\t]);\n\t}\n\n\twriteDelay(index, data) {\n\t\treturn this._Delays[index][0][this._Delays[index][1]] = data;\n\t}\n\n\treadDelay(index) {\n\t\treturn this._Delays[index][0][this._Delays[index][2]];\n\t}\n\n\treadDelayAt(index, i) {\n\t\tlet d = this._Delays[index];\n\t\treturn d[0][(d[2] + i) & d[3]];\n\t}\n\n\t// cubic interpolation\n\t// O. Niemitalo: \n\t// https://www.musicdsp.org/en/latest/Other/49-cubic-interpollation.html\n\treadDelayCAt(index, i) {\n\t\tlet d = this._Delays[index],\n\t\t\tfrac = i - ~~i,\n\t\t\tint = ~~i + d[2] - 1,\n\t\t\tmask = d[3];\n\n\t\tlet x0 = d[0][int++ & mask],\n\t\t\tx1 = d[0][int++ & mask],\n\t\t\tx2 = d[0][int++ & mask],\n\t\t\tx3 = d[0][int & mask];\n\n\t\tlet a = (3 * (x1 - x2) - x0 + x3) / 2,\n\t\t\tb = 2 * x2 + x0 - (5 * x1 + x3) / 2,\n\t\t\tc = (x2 - x0) / 2;\n\n\t\treturn (((a * frac) + b) * frac + c) * frac + x1;\n\t}\n\n\t// First input will be downmixed to mono if number of channels is not 2\n\t// Outputs Stereo.\n\tprocess(inputs, outputs, parameters) {\n\t\tconst pd = ~~parameters.preDelay[0],\n\t\t\t// bw = parameters.bandwidth[0], // replaced by using damping\n\t\t\tfi = parameters.inputDiffusion1[0],\n\t\t\tsi = parameters.inputDiffusion2[0],\n\t\t\tdc = parameters.decay[0],\n\t\t\tft = parameters.decayDiffusion1[0],\n\t\t\tst = parameters.decayDiffusion2[0],\n\t\t\tdp = 1 - parameters.damping[0],\n\t\t\tex = parameters.excursionRate[0] / sampleRate,\n\t\t\ted = parameters.excursionDepth[0] * sampleRate / 1000,\n\t\t\twe = parameters.wet[0]; //* 0.6, // lo & ro both mult. by 0.6 anyways\n\t\t\t// dr = parameters.dry[0];\n\n\t\t// write to predelay and dry output\n\t\tif (inputs[0].length == 2) {\n\t\t\tfor (let i = 127; i >= 0; i--) {\n\t\t\t\tthis._preDelay[this._pDWrite + i] = (inputs[0][0][i] + inputs[0][1][i]) * 0.5;\n\n\t\t\t\t// removed the dry parameter, this is handled in the Tone Node\n\t\t\t\t// outputs[0][0][i] = inputs[0][0][i] * dr;\n\t\t\t\t// outputs[0][1][i] = inputs[0][1][i] * dr;\n\t\t\t}\n\t\t} else if (inputs[0].length > 0) {\n\t\t\tthis._preDelay.set(\n\t\t\t\tinputs[0][0],\n\t\t\t\tthis._pDWrite\n\t\t\t);\n\t\t\t// for (let i = 127; i >= 0; i--)\n\t\t\t// \toutputs[0][0][i] = outputs[0][1][i] = inputs[0][0][i] * dr;\n\t\t} else {\n\t\t\tthis._preDelay.set(\n\t\t\t\tnew Float32Array(128),\n\t\t\t\tthis._pDWrite\n\t\t\t);\n\t\t}\n\n\t\tlet i = 0 | 0;\n\t\twhile (i < 128) {\n\t\t\tlet lo = 0.0,\n\t\t\t\tro = 0.0;\n\n\t\t\t// input damping (formerly known as bandwidth bw, now uses dp)\n\t\t\tthis._lp1 += dp * (this._preDelay[(this._pDLength + this._pDWrite - pd + i) % this._pDLength] - this._lp1);\n\n\t\t\t// pre-tank\n\t\t\tlet pre = this.writeDelay(0, this._lp1 - fi * this.readDelay(0));\n\t\t\tpre = this.writeDelay(1, fi * (pre - this.readDelay(1)) + this.readDelay(0));\n\t\t\tpre = this.writeDelay(2, fi * pre + this.readDelay(1) - si * this.readDelay(2));\n\t\t\tpre = this.writeDelay(3, si * (pre - this.readDelay(3)) + this.readDelay(2));\n\n\t\t\tlet split = si * pre + this.readDelay(3);\n\n\t\t\t// excursions\n\t\t\t// could be optimized?\n\t\t\tlet exc = ed * (1 + Math.cos(this._excPhase * 6.2800));\n\t\t\tlet exc2 = ed * (1 + Math.sin(this._excPhase * 6.2847));\n\n\t\t\t// left loop\n\t\t\t// tank diffuse 1\n\t\t\tlet temp = this.writeDelay(4, split + dc * this.readDelay(11) + ft * this.readDelayCAt(4, exc));\n\t\t\t// long delay 1\n\t\t\tthis.writeDelay(5, this.readDelayCAt(4, exc) - ft * temp);\n\t\t\t// damp 1\n\t\t\tthis._lp2 += dp * (this.readDelay(5) - this._lp2);\n\t\t\ttemp = this.writeDelay(6, dc * this._lp2 - st * this.readDelay(6)); // tank diffuse 2\n\t\t\t// long delay 2\n\t\t\tthis.writeDelay(7, this.readDelay(6) + st * temp);\n\n\t\t\t// right loop \n\t\t\t// tank diffuse 3\n\t\t\ttemp = this.writeDelay(8, split + dc * this.readDelay(7) + ft * this.readDelayCAt(8, exc2));\n\t\t\t// long delay 3\n\t\t\tthis.writeDelay(9, this.readDelayCAt(8, exc2) - ft * temp);\n\t\t\t// damp 2\n\t\t\tthis._lp3 += dp * (this.readDelay(9) - this._lp3);\n\t\t\t// tank diffuse 4\n\t\t\ttemp = this.writeDelay(10, dc * this._lp3 - st * this.readDelay(10));\n\t\t\t// long delay 4\n\t\t\tthis.writeDelay(11, this.readDelay(10) + st * temp);\n\n\t\t\tlo = this.readDelayAt(9, this._taps[0]) +\n\t\t\t\tthis.readDelayAt(9, this._taps[1]) -\n\t\t\t\tthis.readDelayAt(10, this._taps[2]) +\n\t\t\t\tthis.readDelayAt(11, this._taps[3]) -\n\t\t\t\tthis.readDelayAt(5, this._taps[4]) -\n\t\t\t\tthis.readDelayAt(6, this._taps[5]) -\n\t\t\t\tthis.readDelayAt(7, this._taps[6]);\n\n\t\t\tro = this.readDelayAt(5, this._taps[7]) +\n\t\t\t\tthis.readDelayAt(5, this._taps[8]) -\n\t\t\t\tthis.readDelayAt(6, this._taps[9]) +\n\t\t\t\tthis.readDelayAt(7, this._taps[10]) -\n\t\t\t\tthis.readDelayAt(9, this._taps[11]) -\n\t\t\t\tthis.readDelayAt(10, this._taps[12]) -\n\t\t\t\tthis.readDelayAt(11, this._taps[13]);\n\n\t\t\toutputs[0][0][i] += lo * we;\n\t\t\toutputs[0][1][i] += ro * we;\n\n\t\t\tthis._excPhase += ex;\n\n\t\t\ti++;\n\n\t\t\tfor (let j = 0, d = this._Delays[0]; j < this._Delays.length; d = this._Delays[++j]) {\n\t\t\t\td[1] = (d[1] + 1) & d[3];\n\t\t\t\td[2] = (d[2] + 1) & d[3];\n\t\t\t}\n\t\t}\n\n\t\t// Update preDelay index\n\t\tthis._pDWrite = (this._pDWrite + 128) % this._pDLength;\n\n\t\treturn true;\n\t}\n}\nregisterProcessor('dattorro-reverb', DattorroReverb);\n";
|
|
18713
18935
|
Tone.getContext().addAudioWorkletModule(URL.createObjectURL(new Blob([ fxExtensions ], { type: 'text/javascript' })));
|
|
18714
18936
|
|
|
18715
18937
|
// Mercury main class controls Tone and loads samples
|
|
@@ -19050,5 +19272,5 @@ class Mercury extends MercuryInterpreter {
|
|
|
19050
19272
|
// }
|
|
19051
19273
|
}
|
|
19052
19274
|
module.exports = { Mercury };
|
|
19053
|
-
},{"./core/Util.js":
|
|
19275
|
+
},{"./core/Util.js":67,"./interpreter":69,"tone":44,"webmidi":55}]},{},[70])(70)
|
|
19054
19276
|
});
|