4track 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +674 -0
- package/README.md +80 -0
- package/dist/assets/btn_fwd.svg +30 -0
- package/dist/assets/btn_normal.png +0 -0
- package/dist/assets/btn_pause.svg +30 -0
- package/dist/assets/btn_play.svg +25 -0
- package/dist/assets/btn_pressed.png +0 -0
- package/dist/assets/btn_rec.svg +25 -0
- package/dist/assets/btn_rew.svg +30 -0
- package/dist/assets/btn_stop.svg +25 -0
- package/dist/assets/casette_hiss.mp3 +0 -0
- package/dist/assets/casette_hiss_compressed.mp3 +0 -0
- package/dist/assets/cassette.jpg +0 -0
- package/dist/assets/counter_bg.png +0 -0
- package/dist/assets/fx/counter.wav +0 -0
- package/dist/assets/fx/ffwd.wav +0 -0
- package/dist/assets/fx/pause.wav +0 -0
- package/dist/assets/fx/play.wav +0 -0
- package/dist/assets/fx/record.wav +0 -0
- package/dist/assets/fx/stop.wav +0 -0
- package/dist/assets/fx/track.wav +0 -0
- package/dist/assets/logo.svg +51 -0
- package/dist/assets/noise_50.jpg +0 -0
- package/dist/assets/openstudio.svg +38 -0
- package/dist/assets/recorder-worklet.d.ts +8 -0
- package/dist/assets/recorder-worklet.js +30 -0
- package/dist/assets/rotator.png +0 -0
- package/dist/assets/slider-indicator.svg +139 -0
- package/dist/assets/slider.png +0 -0
- package/dist/assets/slideselect-indicator.svg +64 -0
- package/dist/assets/slideselect-thumb.png +0 -0
- package/dist/assets/svg-icons.d.ts +6 -0
- package/dist/assets/svg-icons.js +8 -0
- package/dist/assets.d.ts +34 -0
- package/dist/audio/constants.d.ts +4 -0
- package/dist/audio/constants.js +27 -0
- package/dist/audio/engine.svelte.d.ts +90 -0
- package/dist/audio/engine.svelte.js +604 -0
- package/dist/audio/input-fx.d.ts +8 -0
- package/dist/audio/input-fx.js +44 -0
- package/dist/audio/metering.d.ts +3 -0
- package/dist/audio/metering.js +20 -0
- package/dist/audio/pcm.d.ts +2 -0
- package/dist/audio/pcm.js +43 -0
- package/dist/audio/project-io.d.ts +6 -0
- package/dist/audio/project-io.js +85 -0
- package/dist/audio/recording.d.ts +2 -0
- package/dist/audio/recording.js +80 -0
- package/dist/audio/track.svelte.d.ts +13 -0
- package/dist/audio/track.svelte.js +17 -0
- package/dist/components/Cassette.svelte +179 -0
- package/dist/components/Cassette.svelte.d.ts +9 -0
- package/dist/components/FourTrack.svelte +443 -0
- package/dist/components/FourTrack.svelte.d.ts +16 -0
- package/dist/components/Mixer.svelte +105 -0
- package/dist/components/Mixer.svelte.d.ts +7 -0
- package/dist/components/TransportButtons.svelte +299 -0
- package/dist/components/TransportButtons.svelte.d.ts +10 -0
- package/dist/components/els/DigitRoller.svelte +82 -0
- package/dist/components/els/DigitRoller.svelte.d.ts +5 -0
- package/dist/components/els/Knob.svelte +267 -0
- package/dist/components/els/Knob.svelte.d.ts +12 -0
- package/dist/components/els/Light.svelte +104 -0
- package/dist/components/els/Light.svelte.d.ts +8 -0
- package/dist/components/els/Lights.svelte +101 -0
- package/dist/components/els/Lights.svelte.d.ts +11 -0
- package/dist/components/els/SlideSelect.svelte +159 -0
- package/dist/components/els/SlideSelect.svelte.d.ts +15 -0
- package/dist/components/els/Slider.svelte +139 -0
- package/dist/components/els/Slider.svelte.d.ts +21 -0
- package/dist/components/els/Timestamp.svelte +92 -0
- package/dist/components/els/Timestamp.svelte.d.ts +5 -0
- package/dist/fx/soundfx.d.ts +14 -0
- package/dist/fx/soundfx.js +65 -0
- package/dist/index.d.ts +4 -0
- package/dist/index.js +3 -0
- package/dist/types.d.ts +40 -0
- package/dist/types.js +1 -0
- package/package.json +48 -0
|
@@ -0,0 +1,604 @@
|
|
|
1
|
+
// The audio engine: owns reactive state, the Web Audio context,
|
|
2
|
+
// and the transport state machine (play/pause/stop/record).
|
|
3
|
+
// Delegates DSP, metering, and I/O to focused submodules.
|
|
4
|
+
import { Track } from "./track.svelte.js";
|
|
5
|
+
import { DEFAULT_CONFIG, AUDIO_CONSTRAINTS, PLAYBACK_TICK_MS, } from "./constants.js";
|
|
6
|
+
import { buildInputFxChain, applyTrim } from "./input-fx.js";
|
|
7
|
+
import { measureRecordLatency, mergeRecordingIntoBuffer } from "./recording.js";
|
|
8
|
+
import { updateMeterLevels } from "./metering.js";
|
|
9
|
+
import { exportProject as _exportProject, importProject as _importProject, } from "./project-io.js";
|
|
10
|
+
export class AudioEngine {
|
|
11
|
+
// ─── Reactive state (read by UI) ────────────────────────────────────
|
|
12
|
+
playState = $state("stopped");
|
|
13
|
+
micStatus = $state("unsupported");
|
|
14
|
+
position = $state(0);
|
|
15
|
+
masterVolume = $state(1.5);
|
|
16
|
+
latencyInfo = $state("");
|
|
17
|
+
trimValue = $state(-1);
|
|
18
|
+
recordingVolume = $state(0.75);
|
|
19
|
+
tracks;
|
|
20
|
+
// ─── Private state ──────────────────────────────────────────────────
|
|
21
|
+
config;
|
|
22
|
+
// Web Audio graph
|
|
23
|
+
audioContext = null;
|
|
24
|
+
masterGainNode = null;
|
|
25
|
+
// Playback
|
|
26
|
+
activePlaybackSources = [];
|
|
27
|
+
monitoringSources = [];
|
|
28
|
+
playbackStartTime = 0;
|
|
29
|
+
playbackOffset = 0;
|
|
30
|
+
playbackTickId = null;
|
|
31
|
+
// Recording
|
|
32
|
+
recorderWorkletNode = null;
|
|
33
|
+
recorderSourceNode = null;
|
|
34
|
+
recordedChunks = [];
|
|
35
|
+
recordingTrackIndex = null;
|
|
36
|
+
recordingLatencySeconds = 0;
|
|
37
|
+
punchInOffset = 0;
|
|
38
|
+
timerId = null;
|
|
39
|
+
// Input FX chain (created per recording, torn down on stop)
|
|
40
|
+
inputGainNode = null;
|
|
41
|
+
trimGainNode = null;
|
|
42
|
+
waveShaperNode = null;
|
|
43
|
+
recVolNode = null;
|
|
44
|
+
inputFxNodes = [];
|
|
45
|
+
// Metering
|
|
46
|
+
meterRafId = null;
|
|
47
|
+
// Initialization guard
|
|
48
|
+
audioContextInitialized = false;
|
|
49
|
+
// ─── Constructor ────────────────────────────────────────────────────
|
|
50
|
+
constructor(config = {}) {
|
|
51
|
+
this.config = { ...DEFAULT_CONFIG, ...config };
|
|
52
|
+
if (config.inputFx)
|
|
53
|
+
this.config.inputFx = config.inputFx;
|
|
54
|
+
const trimCfg = this.config.inputFx.find((fx) => fx.type === "trim");
|
|
55
|
+
if (trimCfg)
|
|
56
|
+
this.trimValue = trimCfg.default;
|
|
57
|
+
this.tracks = Array.from({ length: this.config.trackCount }, () => new Track());
|
|
58
|
+
for (const ht of this.config.hiddenTracks ?? []) {
|
|
59
|
+
const track = new Track(true);
|
|
60
|
+
track.volume = ht.volume;
|
|
61
|
+
track.pan = ht.pan ?? 0;
|
|
62
|
+
this.tracks.push(track);
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
// ─── Context / Channel Strips ───────────────────────────────────────
|
|
66
|
+
/** Lazily creates the AudioContext and wires up channel strips. Requires a user gesture. */
|
|
67
|
+
ensureContext() {
|
|
68
|
+
if (!this.audioContext) {
|
|
69
|
+
this.audioContext = new AudioContext({
|
|
70
|
+
latencyHint: "interactive",
|
|
71
|
+
sampleRate: this.config.sampleRate,
|
|
72
|
+
});
|
|
73
|
+
}
|
|
74
|
+
this.ensureChannelStrips();
|
|
75
|
+
return this.audioContext;
|
|
76
|
+
}
|
|
77
|
+
/** Creates per-track gain, analyser, and panner nodes routed to the master bus. Runs once. */
|
|
78
|
+
ensureChannelStrips() {
|
|
79
|
+
if (this.tracks[0]?.gainNode)
|
|
80
|
+
return;
|
|
81
|
+
const ctx = this.audioContext;
|
|
82
|
+
this.masterGainNode = ctx.createGain();
|
|
83
|
+
this.masterGainNode.gain.value = this.masterVolume;
|
|
84
|
+
this.masterGainNode.connect(ctx.destination);
|
|
85
|
+
for (let i = 0; i < this.tracks.length; i++) {
|
|
86
|
+
const track = this.tracks[i];
|
|
87
|
+
const gain = ctx.createGain();
|
|
88
|
+
gain.gain.value = track.volume;
|
|
89
|
+
const analyser = ctx.createAnalyser();
|
|
90
|
+
analyser.fftSize = 256;
|
|
91
|
+
const pan = ctx.createStereoPanner();
|
|
92
|
+
pan.pan.value = track.pan;
|
|
93
|
+
gain.connect(analyser);
|
|
94
|
+
analyser.connect(pan);
|
|
95
|
+
pan.connect(this.masterGainNode);
|
|
96
|
+
track.gainNode = gain;
|
|
97
|
+
track.analyserNode = analyser;
|
|
98
|
+
track.panNode = pan;
|
|
99
|
+
}
|
|
100
|
+
this.startMeters();
|
|
101
|
+
}
|
|
102
|
+
// ─── Audio Context Initialization ─────────────────────────────────
|
|
103
|
+
/** Initializes the AudioContext and fetches hidden track audio (e.g. cassette hiss). Idempotent. */
|
|
104
|
+
async initAudioContext() {
|
|
105
|
+
if (this.audioContextInitialized)
|
|
106
|
+
return;
|
|
107
|
+
this.audioContextInitialized = true;
|
|
108
|
+
const ctx = this.ensureContext();
|
|
109
|
+
// Determine initial mic status
|
|
110
|
+
if (!navigator.mediaDevices?.getUserMedia) {
|
|
111
|
+
this.micStatus = "unsupported";
|
|
112
|
+
}
|
|
113
|
+
else {
|
|
114
|
+
try {
|
|
115
|
+
const perm = await navigator.permissions.query({ name: "microphone" });
|
|
116
|
+
if (perm.state === "denied")
|
|
117
|
+
this.micStatus = "denied";
|
|
118
|
+
else if (perm.state === "granted")
|
|
119
|
+
this.micStatus = "inactive";
|
|
120
|
+
else
|
|
121
|
+
this.micStatus = "prompt";
|
|
122
|
+
perm.addEventListener("change", () => {
|
|
123
|
+
if (perm.state === "denied")
|
|
124
|
+
this.micStatus = "denied";
|
|
125
|
+
else if (perm.state === "granted" && this.micStatus !== "active")
|
|
126
|
+
this.micStatus = "inactive";
|
|
127
|
+
else if (perm.state === "prompt")
|
|
128
|
+
this.micStatus = "prompt";
|
|
129
|
+
});
|
|
130
|
+
}
|
|
131
|
+
catch {
|
|
132
|
+
// Permissions API not supported for microphone — assume prompt
|
|
133
|
+
this.micStatus = "prompt";
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
const configs = this.config.hiddenTracks ?? [];
|
|
137
|
+
const hiddenTracks = this.tracks.filter((t) => t.hidden);
|
|
138
|
+
await Promise.all(configs.map(async (ht, i) => {
|
|
139
|
+
try {
|
|
140
|
+
const response = await fetch(ht.url);
|
|
141
|
+
const data = await response.arrayBuffer();
|
|
142
|
+
const audioBuffer = await ctx.decodeAudioData(data);
|
|
143
|
+
hiddenTracks[i].buffer = audioBuffer;
|
|
144
|
+
hiddenTracks[i].hasContent = true;
|
|
145
|
+
}
|
|
146
|
+
catch (e) {
|
|
147
|
+
console.warn(`Failed to load hidden track "${ht.url}":`, e);
|
|
148
|
+
}
|
|
149
|
+
}));
|
|
150
|
+
}
|
|
151
|
+
// ─── Transport ──────────────────────────────────────────────────────
|
|
152
|
+
/** Returns the longest track buffer duration in seconds. */
|
|
153
|
+
getMaxDuration() {
|
|
154
|
+
let max = 0;
|
|
155
|
+
for (const track of this.tracks) {
|
|
156
|
+
if (track.buffer &&
|
|
157
|
+
Number.isFinite(track.buffer.duration) &&
|
|
158
|
+
track.buffer.duration > max) {
|
|
159
|
+
max = track.buffer.duration;
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
return max;
|
|
163
|
+
}
|
|
164
|
+
hasContent = $derived(this.tracks.some((t) => t.hasContent));
|
|
165
|
+
get duration() {
|
|
166
|
+
return this.getMaxDuration();
|
|
167
|
+
}
|
|
168
|
+
/** Jumps the playhead to a position in seconds. Restarts playback if currently playing. */
|
|
169
|
+
seek(seconds) {
|
|
170
|
+
if (this.playState === "recording")
|
|
171
|
+
return;
|
|
172
|
+
const max = this.getMaxDuration();
|
|
173
|
+
const clamped = Math.max(0, Math.min(seconds, max));
|
|
174
|
+
this.playbackOffset = clamped;
|
|
175
|
+
this.position = Math.round(clamped * 10) / 10;
|
|
176
|
+
if (this.playState === "playing") {
|
|
177
|
+
this.stopSources(this.activePlaybackSources);
|
|
178
|
+
this.clearPlaybackTick();
|
|
179
|
+
this.startPlayback(clamped);
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
/** Starts playback from the current position. No-op if already playing or recording. */
|
|
183
|
+
async play() {
|
|
184
|
+
if (this.playState === "playing" || this.playState === "recording")
|
|
185
|
+
return;
|
|
186
|
+
await this.initAudioContext();
|
|
187
|
+
const maxDuration = this.getMaxDuration();
|
|
188
|
+
if (maxDuration <= 0)
|
|
189
|
+
return;
|
|
190
|
+
this.startPlayback(this.playbackOffset);
|
|
191
|
+
}
|
|
192
|
+
/** Schedules all track buffers for playback and starts the position tick timer. */
|
|
193
|
+
startPlayback(offsetSeconds) {
|
|
194
|
+
const ctx = this.ensureContext();
|
|
195
|
+
ctx.resume();
|
|
196
|
+
const startTime = ctx.currentTime + 0.02;
|
|
197
|
+
const maxDuration = this.getMaxDuration();
|
|
198
|
+
if (maxDuration <= 0)
|
|
199
|
+
return;
|
|
200
|
+
const effectiveDuration = maxDuration - offsetSeconds;
|
|
201
|
+
this.activePlaybackSources = [];
|
|
202
|
+
for (let i = 0; i < this.tracks.length; i++) {
|
|
203
|
+
const track = this.tracks[i];
|
|
204
|
+
const buf = track.buffer;
|
|
205
|
+
if (!buf)
|
|
206
|
+
continue;
|
|
207
|
+
const trim = track.trimStart;
|
|
208
|
+
const startOffset = offsetSeconds + trim;
|
|
209
|
+
if (startOffset >= buf.duration)
|
|
210
|
+
continue;
|
|
211
|
+
const playDuration = Math.min(buf.duration - startOffset, effectiveDuration);
|
|
212
|
+
if (playDuration <= 0)
|
|
213
|
+
continue;
|
|
214
|
+
const src = ctx.createBufferSource();
|
|
215
|
+
src.buffer = buf;
|
|
216
|
+
src.connect(track.gainNode);
|
|
217
|
+
src.start(startTime, startOffset, playDuration);
|
|
218
|
+
this.activePlaybackSources.push(src);
|
|
219
|
+
}
|
|
220
|
+
this.playbackStartTime = startTime;
|
|
221
|
+
this.playbackOffset = offsetSeconds;
|
|
222
|
+
this.position = Math.round(offsetSeconds * 10) / 10;
|
|
223
|
+
this.playState = "playing";
|
|
224
|
+
this.playbackTickId = window.setInterval(() => {
|
|
225
|
+
const elapsed = ctx.currentTime - this.playbackStartTime;
|
|
226
|
+
this.position = Math.round((this.playbackOffset + elapsed) * 10) / 10;
|
|
227
|
+
if (elapsed >= effectiveDuration) {
|
|
228
|
+
this.playbackOffset = maxDuration;
|
|
229
|
+
this.position = Math.round(maxDuration * 10) / 10;
|
|
230
|
+
this.clearPlaybackTick();
|
|
231
|
+
this.playState = "stopped";
|
|
232
|
+
}
|
|
233
|
+
}, PLAYBACK_TICK_MS);
|
|
234
|
+
}
|
|
235
|
+
/** Pauses playback, preserving the current position for resuming. */
|
|
236
|
+
pause() {
|
|
237
|
+
if (this.playState !== "playing")
|
|
238
|
+
return;
|
|
239
|
+
const ctx = this.audioContext;
|
|
240
|
+
if (ctx && this.activePlaybackSources.length > 0) {
|
|
241
|
+
this.playbackOffset = Math.min(this.playbackOffset + (ctx.currentTime - this.playbackStartTime), this.getMaxDuration());
|
|
242
|
+
this.stopSources(this.activePlaybackSources);
|
|
243
|
+
}
|
|
244
|
+
this.clearPlaybackTick();
|
|
245
|
+
this.position = Math.round(this.playbackOffset * 10) / 10;
|
|
246
|
+
this.playState = "paused";
|
|
247
|
+
}
|
|
248
|
+
/** Stops playback or recording. If recording, finalizes and merges the recorded audio. */
|
|
249
|
+
stop() {
|
|
250
|
+
if (this.playState === "recording") {
|
|
251
|
+
this.stopRecording();
|
|
252
|
+
return;
|
|
253
|
+
}
|
|
254
|
+
if (this.playState === "playing") {
|
|
255
|
+
const ctx = this.audioContext;
|
|
256
|
+
if (ctx && this.activePlaybackSources.length > 0) {
|
|
257
|
+
this.playbackOffset = Math.min(this.playbackOffset + (ctx.currentTime - this.playbackStartTime), this.getMaxDuration());
|
|
258
|
+
this.stopSources(this.activePlaybackSources);
|
|
259
|
+
}
|
|
260
|
+
this.clearPlaybackTick();
|
|
261
|
+
this.position = Math.round(this.playbackOffset * 10) / 10;
|
|
262
|
+
}
|
|
263
|
+
this.playState = "stopped";
|
|
264
|
+
}
|
|
265
|
+
/** Resets the playhead to the beginning and stops all playback. */
|
|
266
|
+
rewind() {
|
|
267
|
+
this.stopSources(this.activePlaybackSources);
|
|
268
|
+
this.playbackStartTime = 0;
|
|
269
|
+
this.playbackOffset = 0;
|
|
270
|
+
this.position = 0;
|
|
271
|
+
this.clearPlaybackTick();
|
|
272
|
+
this.playState = "stopped";
|
|
273
|
+
}
|
|
274
|
+
// ─── Playback helpers ──────────────────────────────────────────────
|
|
275
|
+
/** Plays all tracks except the one being recorded, so the musician can hear the mix. */
|
|
276
|
+
playOtherTracksForMonitoring(excludeIndex, offsetSeconds = 0) {
|
|
277
|
+
const ctx = this.ensureContext();
|
|
278
|
+
ctx.resume();
|
|
279
|
+
const startTime = ctx.currentTime + 0.02;
|
|
280
|
+
this.monitoringSources = [];
|
|
281
|
+
for (let i = 0; i < this.tracks.length; i++) {
|
|
282
|
+
if (i === excludeIndex)
|
|
283
|
+
continue;
|
|
284
|
+
const track = this.tracks[i];
|
|
285
|
+
const buf = track.buffer;
|
|
286
|
+
if (!buf)
|
|
287
|
+
continue;
|
|
288
|
+
const trim = track.trimStart;
|
|
289
|
+
const startOffset = offsetSeconds + trim;
|
|
290
|
+
if (startOffset >= buf.duration)
|
|
291
|
+
continue;
|
|
292
|
+
const playDuration = buf.duration - startOffset;
|
|
293
|
+
if (playDuration <= 0)
|
|
294
|
+
continue;
|
|
295
|
+
const src = ctx.createBufferSource();
|
|
296
|
+
src.buffer = buf;
|
|
297
|
+
src.connect(track.gainNode);
|
|
298
|
+
src.start(startTime, startOffset, playDuration);
|
|
299
|
+
this.monitoringSources.push(src);
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
/** Stops and clears an array of active AudioBufferSourceNodes. */
|
|
303
|
+
stopSources(sources) {
|
|
304
|
+
const ctx = this.audioContext;
|
|
305
|
+
if (!ctx)
|
|
306
|
+
return;
|
|
307
|
+
const when = ctx.currentTime;
|
|
308
|
+
for (const src of sources) {
|
|
309
|
+
try {
|
|
310
|
+
src.stop(when);
|
|
311
|
+
}
|
|
312
|
+
catch {
|
|
313
|
+
/* already stopped */
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
sources.length = 0;
|
|
317
|
+
}
|
|
318
|
+
/** Stops both active playback and monitoring sources, resets playback position. */
|
|
319
|
+
stopAllPlayback() {
|
|
320
|
+
this.stopSources(this.activePlaybackSources);
|
|
321
|
+
this.stopSources(this.monitoringSources);
|
|
322
|
+
this.clearPlaybackTick();
|
|
323
|
+
// this.playbackStartTime = 0;
|
|
324
|
+
// this.playbackOffset = 0;
|
|
325
|
+
}
|
|
326
|
+
clearPlaybackTick() {
|
|
327
|
+
if (this.playbackTickId !== null) {
|
|
328
|
+
clearInterval(this.playbackTickId);
|
|
329
|
+
this.playbackTickId = null;
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
clearTimer() {
|
|
333
|
+
if (this.timerId !== null) {
|
|
334
|
+
clearInterval(this.timerId);
|
|
335
|
+
this.timerId = null;
|
|
336
|
+
}
|
|
337
|
+
}
|
|
338
|
+
// ─── Monitoring ─────────────────────────────────────────────────────
|
|
339
|
+
/** Sets up mic input chain for live monitoring (pass-through to speakers).
|
|
340
|
+
* Can be called independently of recording (e.g. record armed + paused). */
|
|
341
|
+
async startMonitoring(trackIndex) {
|
|
342
|
+
// Already monitoring — nothing to do
|
|
343
|
+
if (this.recorderSourceNode)
|
|
344
|
+
return;
|
|
345
|
+
if (trackIndex < -1 || trackIndex >= this.tracks.length)
|
|
346
|
+
return;
|
|
347
|
+
if (trackIndex >= 0 && this.tracks[trackIndex].hidden)
|
|
348
|
+
return;
|
|
349
|
+
if (!navigator.mediaDevices?.getUserMedia) {
|
|
350
|
+
this.micStatus = "unsupported";
|
|
351
|
+
throw new Error("Recording requires a secure context (HTTPS). Please access this app over HTTPS.");
|
|
352
|
+
}
|
|
353
|
+
// Acquire mic — must be the FIRST await to stay within the
|
|
354
|
+
// user-gesture window that iOS Safari enforces for getUserMedia.
|
|
355
|
+
let stream;
|
|
356
|
+
try {
|
|
357
|
+
stream = await navigator.mediaDevices.getUserMedia({
|
|
358
|
+
audio: {
|
|
359
|
+
...AUDIO_CONSTRAINTS,
|
|
360
|
+
sampleRate: { ideal: this.config.sampleRate },
|
|
361
|
+
},
|
|
362
|
+
});
|
|
363
|
+
}
|
|
364
|
+
catch (err) {
|
|
365
|
+
if (err instanceof DOMException) {
|
|
366
|
+
if (err.name === "NotAllowedError")
|
|
367
|
+
this.micStatus = "denied";
|
|
368
|
+
else if (err.name === "NotFoundError")
|
|
369
|
+
this.micStatus = "no-device";
|
|
370
|
+
else
|
|
371
|
+
this.micStatus = "error";
|
|
372
|
+
}
|
|
373
|
+
else {
|
|
374
|
+
this.micStatus = "error";
|
|
375
|
+
}
|
|
376
|
+
throw err;
|
|
377
|
+
}
|
|
378
|
+
this.micStatus = "active";
|
|
379
|
+
await this.initAudioContext();
|
|
380
|
+
const ctx = this.ensureContext();
|
|
381
|
+
await ctx.resume();
|
|
382
|
+
// Build input chain: mic → inputGain → [FX] → recVol → worklet → destination
|
|
383
|
+
// The worklet passes audio through to destination for live monitoring.
|
|
384
|
+
await ctx.audioWorklet.addModule(this.config.workletUrl);
|
|
385
|
+
const source = ctx.createMediaStreamSource(stream);
|
|
386
|
+
const worklet = new AudioWorkletNode(ctx, "recorder");
|
|
387
|
+
this.inputGainNode = ctx.createGain();
|
|
388
|
+
this.inputGainNode.gain.value = 1.0;
|
|
389
|
+
this.recVolNode = ctx.createGain();
|
|
390
|
+
this.recVolNode.gain.value = this.recordingVolume;
|
|
391
|
+
const { nodes, trimGainNode, waveShaperNode } = buildInputFxChain(ctx, this.config.inputFx, this.trimValue);
|
|
392
|
+
this.inputFxNodes = nodes;
|
|
393
|
+
this.trimGainNode = trimGainNode;
|
|
394
|
+
this.waveShaperNode = waveShaperNode;
|
|
395
|
+
source.connect(this.inputGainNode);
|
|
396
|
+
let prev = this.inputGainNode;
|
|
397
|
+
for (const node of this.inputFxNodes) {
|
|
398
|
+
prev.connect(node);
|
|
399
|
+
prev = node;
|
|
400
|
+
}
|
|
401
|
+
prev.connect(this.recVolNode);
|
|
402
|
+
this.recVolNode.connect(worklet);
|
|
403
|
+
if (trackIndex >= 0) {
|
|
404
|
+
const track = this.tracks[trackIndex];
|
|
405
|
+
worklet.connect(track.gainNode);
|
|
406
|
+
}
|
|
407
|
+
else {
|
|
408
|
+
worklet.connect(this.masterGainNode);
|
|
409
|
+
}
|
|
410
|
+
this.recorderSourceNode = source;
|
|
411
|
+
this.recorderWorkletNode = worklet;
|
|
412
|
+
}
|
|
413
|
+
/** Tears down the mic input chain and releases the microphone. */
|
|
414
|
+
stopMonitoring() {
|
|
415
|
+
// Disconnect input processing chain
|
|
416
|
+
this.inputGainNode?.disconnect();
|
|
417
|
+
for (const node of this.inputFxNodes)
|
|
418
|
+
node.disconnect();
|
|
419
|
+
this.inputFxNodes = [];
|
|
420
|
+
this.recVolNode?.disconnect();
|
|
421
|
+
this.inputGainNode = null;
|
|
422
|
+
this.trimGainNode = null;
|
|
423
|
+
this.waveShaperNode = null;
|
|
424
|
+
this.recVolNode = null;
|
|
425
|
+
const source = this.recorderSourceNode;
|
|
426
|
+
const worklet = this.recorderWorkletNode;
|
|
427
|
+
this.recorderSourceNode = null;
|
|
428
|
+
this.recorderWorkletNode = null;
|
|
429
|
+
if (source)
|
|
430
|
+
source.disconnect();
|
|
431
|
+
if (worklet)
|
|
432
|
+
worklet.port.onmessage = null;
|
|
433
|
+
worklet?.disconnect();
|
|
434
|
+
if (source?.mediaStream)
|
|
435
|
+
source.mediaStream.getTracks().forEach((t) => t.stop());
|
|
436
|
+
this.micStatus = "inactive";
|
|
437
|
+
}
|
|
438
|
+
// ─── Recording ──────────────────────────────────────────────────────
|
|
439
|
+
/** Arms and starts recording on the given track. Reuses monitoring if already active. */
|
|
440
|
+
async record(trackIndex) {
|
|
441
|
+
if (this.playState === "recording")
|
|
442
|
+
return;
|
|
443
|
+
// Ensure mic input chain is set up (no-op if already monitoring)
|
|
444
|
+
await this.startMonitoring(trackIndex);
|
|
445
|
+
const ctx = this.ensureContext();
|
|
446
|
+
const stream = this.recorderSourceNode.mediaStream;
|
|
447
|
+
// Measure round-trip latency for alignment when merging
|
|
448
|
+
const recordLatencySeconds = measureRecordLatency(stream, ctx);
|
|
449
|
+
this.updateLatencyDisplay(recordLatencySeconds);
|
|
450
|
+
// Start collecting PCM chunks from the worklet
|
|
451
|
+
this.recordedChunks = [];
|
|
452
|
+
this.recorderWorkletNode.port.onmessage = (e) => {
|
|
453
|
+
if (e.data?.type === "pcm" && e.data.data)
|
|
454
|
+
this.recordedChunks.push(e.data.data);
|
|
455
|
+
};
|
|
456
|
+
// Store state needed by stopRecording to merge audio into the track
|
|
457
|
+
this.recordingTrackIndex = trackIndex;
|
|
458
|
+
this.recordingLatencySeconds = recordLatencySeconds;
|
|
459
|
+
this.punchInOffset = this.playbackOffset;
|
|
460
|
+
this.position = Math.round(this.punchInOffset * 10) / 10;
|
|
461
|
+
this.playState = "recording";
|
|
462
|
+
// Play other tracks for overdub monitoring, start meters and position timer
|
|
463
|
+
this.playOtherTracksForMonitoring(trackIndex, this.punchInOffset);
|
|
464
|
+
this.timerId = window.setInterval(() => {
|
|
465
|
+
const next = this.position + 1;
|
|
466
|
+
if (next >= this.getMaxDuration()) {
|
|
467
|
+
this.stop();
|
|
468
|
+
return;
|
|
469
|
+
}
|
|
470
|
+
this.position = next;
|
|
471
|
+
}, 1000);
|
|
472
|
+
}
|
|
473
|
+
/** Stops recording and merges captured audio. Keeps mic monitoring alive. */
|
|
474
|
+
stopRecording() {
|
|
475
|
+
const selectedTrackIndex = this.recordingTrackIndex ?? -1;
|
|
476
|
+
const recordLatencySeconds = this.recordingLatencySeconds;
|
|
477
|
+
this.recordingTrackIndex = null;
|
|
478
|
+
const ctx = this.audioContext;
|
|
479
|
+
// Stop PCM collection (but keep worklet connected for pass-through monitoring)
|
|
480
|
+
if (this.recorderWorkletNode) {
|
|
481
|
+
this.recorderWorkletNode.port.onmessage = null;
|
|
482
|
+
}
|
|
483
|
+
// Stop all tracks from playing
|
|
484
|
+
if (ctx && this.activePlaybackSources.length > 0) {
|
|
485
|
+
this.stopSources(this.activePlaybackSources);
|
|
486
|
+
}
|
|
487
|
+
// Store the playbackposition - so it can resume at this time later.
|
|
488
|
+
// TODO: at the moment it doesnt have granularity we need (1s) but we are doing that later
|
|
489
|
+
this.playbackOffset = this.position;
|
|
490
|
+
this.stopAllPlayback();
|
|
491
|
+
this.clearTimer();
|
|
492
|
+
// Merge recorded audio into the track buffer with latency compensation
|
|
493
|
+
const trimSamples = Math.max(0, Math.round((ctx?.sampleRate ?? this.config.sampleRate) * recordLatencySeconds));
|
|
494
|
+
if (selectedTrackIndex >= 0 && this.recordedChunks.length && ctx) {
|
|
495
|
+
const track = this.tracks[selectedTrackIndex];
|
|
496
|
+
const hadExistingBuffer = track.buffer !== null;
|
|
497
|
+
const newBuffer = mergeRecordingIntoBuffer(ctx, this.recordedChunks, trimSamples, track.buffer, track.trimStart, this.punchInOffset);
|
|
498
|
+
if (newBuffer) {
|
|
499
|
+
track.buffer = newBuffer;
|
|
500
|
+
track.hasContent = true;
|
|
501
|
+
}
|
|
502
|
+
if (!hadExistingBuffer) {
|
|
503
|
+
track.trimStart = recordLatencySeconds;
|
|
504
|
+
}
|
|
505
|
+
this.latencyInfo = `Latency: ~${Math.round(recordLatencySeconds * 1000)} ms (compensated)`;
|
|
506
|
+
}
|
|
507
|
+
this.recordedChunks = [];
|
|
508
|
+
this.playState = "stopped";
|
|
509
|
+
}
|
|
510
|
+
/** Updates the latencyInfo reactive state with a human-readable breakdown. */
|
|
511
|
+
updateLatencyDisplay(recordLatencySeconds) {
|
|
512
|
+
const ctx = this.audioContext;
|
|
513
|
+
const baseMs = ctx && typeof ctx.baseLatency === "number" ? ctx.baseLatency * 1000 : 0;
|
|
514
|
+
const outMs = ctx && typeof ctx.outputLatency === "number"
|
|
515
|
+
? ctx.outputLatency * 1000
|
|
516
|
+
: 0;
|
|
517
|
+
const totalMs = Math.round(recordLatencySeconds * 1000);
|
|
518
|
+
this.latencyInfo = `Target <20ms \u2022 Round-trip ~${totalMs} ms (base ${Math.round(baseMs)} ms, out ${Math.round(outMs)} ms)`;
|
|
519
|
+
}
|
|
520
|
+
// ─── Mixer Controls ─────────────────────────────────────────────────
|
|
521
|
+
setTrackVolume(index, value) {
|
|
522
|
+
const track = this.tracks[index];
|
|
523
|
+
if (!track)
|
|
524
|
+
return;
|
|
525
|
+
track.volume = value;
|
|
526
|
+
if (track.gainNode)
|
|
527
|
+
track.gainNode.gain.value = value;
|
|
528
|
+
}
|
|
529
|
+
setTrackPan(index, value) {
|
|
530
|
+
const track = this.tracks[index];
|
|
531
|
+
if (!track)
|
|
532
|
+
return;
|
|
533
|
+
track.pan = value;
|
|
534
|
+
if (track.panNode)
|
|
535
|
+
track.panNode.pan.value = value;
|
|
536
|
+
}
|
|
537
|
+
setMasterVolume(value) {
|
|
538
|
+
this.masterVolume = value;
|
|
539
|
+
if (this.masterGainNode)
|
|
540
|
+
this.masterGainNode.gain.value = value;
|
|
541
|
+
}
|
|
542
|
+
setTrim(value) {
|
|
543
|
+
this.trimValue = value;
|
|
544
|
+
const cfg = this.config.inputFx.find((fx) => fx.type === "trim");
|
|
545
|
+
if (cfg)
|
|
546
|
+
applyTrim(value, cfg, this.trimGainNode, this.waveShaperNode);
|
|
547
|
+
}
|
|
548
|
+
setRecordingVolume(value) {
|
|
549
|
+
this.recordingVolume = value;
|
|
550
|
+
if (this.recVolNode)
|
|
551
|
+
this.recVolNode.gain.value = value;
|
|
552
|
+
}
|
|
553
|
+
setInputGain(value) {
|
|
554
|
+
if (this.inputGainNode)
|
|
555
|
+
this.inputGainNode.gain.value = value;
|
|
556
|
+
}
|
|
557
|
+
// ─── Metering ───────────────────────────────────────────────────────
|
|
558
|
+
startMeters() {
|
|
559
|
+
if (this.meterRafId !== null)
|
|
560
|
+
return;
|
|
561
|
+
const tick = () => {
|
|
562
|
+
updateMeterLevels(this.tracks, this.masterGainNode?.gain.value ?? 1);
|
|
563
|
+
this.meterRafId = requestAnimationFrame(tick);
|
|
564
|
+
};
|
|
565
|
+
this.meterRafId = requestAnimationFrame(tick);
|
|
566
|
+
}
|
|
567
|
+
stopMeters() {
|
|
568
|
+
if (this.meterRafId !== null) {
|
|
569
|
+
cancelAnimationFrame(this.meterRafId);
|
|
570
|
+
this.meterRafId = null;
|
|
571
|
+
}
|
|
572
|
+
}
|
|
573
|
+
// ─── Save / Load ────────────────────────────────────────────────────
|
|
574
|
+
/** Serializes all tracks and settings into a .4trk binary blob. */
|
|
575
|
+
exportProject() {
|
|
576
|
+
return _exportProject(this.tracks, this.config, this.masterVolume);
|
|
577
|
+
}
|
|
578
|
+
/** Loads a .4trk file, restoring all track buffers, mixer settings, and master volume. */
|
|
579
|
+
async importProject(file) {
|
|
580
|
+
const { masterVolume } = await _importProject(file, this.tracks, () => this.ensureContext());
|
|
581
|
+
this.setMasterVolume(masterVolume);
|
|
582
|
+
this.rewind();
|
|
583
|
+
}
|
|
584
|
+
// ─── Cleanup ────────────────────────────────────────────────────────
|
|
585
|
+
/** Disconnects all audio nodes, closes the AudioContext, and releases media streams. */
|
|
586
|
+
dispose() {
|
|
587
|
+
this.stopAllPlayback();
|
|
588
|
+
this.clearTimer();
|
|
589
|
+
this.stopMeters();
|
|
590
|
+
this.stopMonitoring();
|
|
591
|
+
for (const track of this.tracks) {
|
|
592
|
+
track.gainNode?.disconnect();
|
|
593
|
+
track.analyserNode?.disconnect();
|
|
594
|
+
track.panNode?.disconnect();
|
|
595
|
+
}
|
|
596
|
+
this.masterGainNode?.disconnect();
|
|
597
|
+
if (this.audioContext) {
|
|
598
|
+
this.audioContext.close();
|
|
599
|
+
this.audioContext = null;
|
|
600
|
+
}
|
|
601
|
+
this.playState = "stopped";
|
|
602
|
+
this.position = 0;
|
|
603
|
+
}
|
|
604
|
+
}
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
import type { TrimFxConfig } from '../types.js';
|
|
2
|
+
export interface InputFxChainResult {
|
|
3
|
+
nodes: AudioNode[];
|
|
4
|
+
trimGainNode: GainNode | null;
|
|
5
|
+
waveShaperNode: WaveShaperNode | null;
|
|
6
|
+
}
|
|
7
|
+
export declare function buildInputFxChain(ctx: AudioContext, config: TrimFxConfig[], trimValue: number): InputFxChainResult;
|
|
8
|
+
export declare function applyTrim(sliderValue: number, cfg: TrimFxConfig, trimGainNode: GainNode | null, waveShaperNode: WaveShaperNode | null): void;
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
// Input effects chain: trim gain and waveshaper saturation.
|
|
2
|
+
// Built fresh for each recording session, torn down on stop.
|
|
3
|
+
// Generate a tanh-based saturation curve for the WaveShaper node.
|
|
4
|
+
// Higher intensity = more harmonic coloring / warmth.
|
|
5
|
+
function makeSaturationCurve(intensity, curveBase, curveRange) {
|
|
6
|
+
const n = 8192;
|
|
7
|
+
const curve = new Float32Array(n);
|
|
8
|
+
const k = curveBase + intensity * curveRange;
|
|
9
|
+
for (let i = 0; i < n; i++) {
|
|
10
|
+
const x = (i / (n - 1)) * 2 - 1;
|
|
11
|
+
curve[i] = x + (Math.tanh(k * x) - x) * intensity;
|
|
12
|
+
}
|
|
13
|
+
return curve;
|
|
14
|
+
}
|
|
15
|
+
// Build the audio node chain for all enabled input effects.
|
|
16
|
+
// Returns the nodes in signal-flow order plus handles to the
|
|
17
|
+
// trim/saturation nodes for later parameter updates.
|
|
18
|
+
export function buildInputFxChain(ctx, config, trimValue) {
|
|
19
|
+
let trimGainNode = null;
|
|
20
|
+
let waveShaperNode = null;
|
|
21
|
+
const nodes = [];
|
|
22
|
+
for (const fx of config) {
|
|
23
|
+
if (!fx.enabled)
|
|
24
|
+
continue;
|
|
25
|
+
if (fx.type === 'trim') {
|
|
26
|
+
trimGainNode = ctx.createGain();
|
|
27
|
+
waveShaperNode = ctx.createWaveShaper();
|
|
28
|
+
waveShaperNode.oversample = '4x';
|
|
29
|
+
applyTrim(trimValue, fx, trimGainNode, waveShaperNode);
|
|
30
|
+
nodes.push(trimGainNode, waveShaperNode);
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
return { nodes, trimGainNode, waveShaperNode };
|
|
34
|
+
}
|
|
35
|
+
// Update trim gain and saturation curve from a slider value (-1 to 1).
|
|
36
|
+
export function applyTrim(sliderValue, cfg, trimGainNode, waveShaperNode) {
|
|
37
|
+
const norm = (sliderValue + 1) / 2; // -1..1 → 0..1
|
|
38
|
+
if (trimGainNode) {
|
|
39
|
+
trimGainNode.gain.value = cfg.gainMin + norm * cfg.gainRange;
|
|
40
|
+
}
|
|
41
|
+
if (waveShaperNode) {
|
|
42
|
+
waveShaperNode.curve = makeSaturationCurve(norm, cfg.curveBase, cfg.curveRange);
|
|
43
|
+
}
|
|
44
|
+
}
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
// Peak metering: reads analyser nodes and updates track levels for the UI.
|
|
2
|
+
export function updateMeterLevels(tracks, masterGain) {
|
|
3
|
+
const buf = new Float32Array(tracks[0]?.analyserNode?.fftSize ?? 256);
|
|
4
|
+
for (const track of tracks) {
|
|
5
|
+
if (!track.analyserNode)
|
|
6
|
+
continue;
|
|
7
|
+
track.analyserNode.getFloatTimeDomainData(buf);
|
|
8
|
+
let peak = 0;
|
|
9
|
+
for (let j = 0; j < buf.length; j++) {
|
|
10
|
+
const abs = Math.abs(buf[j]);
|
|
11
|
+
if (abs > peak)
|
|
12
|
+
peak = abs;
|
|
13
|
+
}
|
|
14
|
+
track.level = Math.min(100, Math.round(peak * masterGain * 100));
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
export function resetMeterLevels(tracks) {
|
|
18
|
+
for (const track of tracks)
|
|
19
|
+
track.level = 0;
|
|
20
|
+
}
|