@dawcore/components 0.0.3 → 0.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1083,6 +1083,13 @@ var PeakPipeline = class {
1083
1083
  this._baseScale = baseScale;
1084
1084
  this._bits = bits;
1085
1085
  }
1086
+ /**
1087
+ * Inject externally-loaded WaveformData (e.g., from a .dat file) into the cache.
1088
+ * Prevents worker generation for this AudioBuffer on all subsequent calls.
1089
+ */
1090
+ cacheWaveformData(audioBuffer, waveformData) {
1091
+ this._cache.set(audioBuffer, waveformData);
1092
+ }
1086
1093
  /**
1087
1094
  * Generate PeakData for a clip from its AudioBuffer.
1088
1095
  * Uses cached WaveformData when available; otherwise generates via worker.
@@ -1090,8 +1097,9 @@ var PeakPipeline = class {
1090
1097
  */
1091
1098
  async generatePeaks(audioBuffer, samplesPerPixel, isMono, offsetSamples, durationSamples) {
1092
1099
  const waveformData = await this._getWaveformData(audioBuffer);
1100
+ const effectiveScale = this._clampScale(waveformData, samplesPerPixel);
1093
1101
  try {
1094
- return extractPeaks(waveformData, samplesPerPixel, isMono, offsetSamples, durationSamples);
1102
+ return extractPeaks(waveformData, effectiveScale, isMono, offsetSamples, durationSamples);
1095
1103
  } catch (err) {
1096
1104
  console.warn("[dawcore] extractPeaks failed: " + String(err));
1097
1105
  throw err;
@@ -1099,23 +1107,29 @@ var PeakPipeline = class {
1099
1107
  }
1100
1108
  /**
1101
1109
  * Re-extract peaks for all clips at a new zoom level using cached WaveformData.
1102
- * Only works for zoom levels coarser than (or equal to) the cached base scale.
1103
- * Returns a new Map of clipId PeakData. Clips without cached data or where
1104
- * the target scale is finer than the cached base are skipped.
1110
+ * Returns a new Map of clipId PeakData. Clips without cached data are skipped.
1111
+ * When the requested scale is finer than cached data, peaks are clamped to the
1112
+ * cached scale and a single summary warning is logged.
1105
1113
  */
1106
1114
  reextractPeaks(clipBuffers, samplesPerPixel, isMono, clipOffsets) {
1107
1115
  const result = /* @__PURE__ */ new Map();
1116
+ let clampedCount = 0;
1117
+ let clampedScale = 0;
1108
1118
  for (const [clipId, audioBuffer] of clipBuffers) {
1109
1119
  const cached = this._cache.get(audioBuffer);
1110
1120
  if (cached) {
1111
- if (samplesPerPixel < cached.scale) continue;
1121
+ const effectiveScale = this._clampScale(cached, samplesPerPixel, false);
1122
+ if (effectiveScale !== samplesPerPixel) {
1123
+ clampedCount++;
1124
+ clampedScale = effectiveScale;
1125
+ }
1112
1126
  try {
1113
1127
  const offsets = clipOffsets?.get(clipId);
1114
1128
  result.set(
1115
1129
  clipId,
1116
1130
  extractPeaks(
1117
1131
  cached,
1118
- samplesPerPixel,
1132
+ effectiveScale,
1119
1133
  isMono,
1120
1134
  offsets?.offsetSamples,
1121
1135
  offsets?.durationSamples
@@ -1126,8 +1140,42 @@ var PeakPipeline = class {
1126
1140
  }
1127
1141
  }
1128
1142
  }
1143
+ if (clampedCount > 0) {
1144
+ console.warn(
1145
+ "[dawcore] Requested zoom " + samplesPerPixel + " spp is finer than pre-computed peaks (" + clampedScale + " spp) \u2014 " + clampedCount + " clip(s) using available resolution"
1146
+ );
1147
+ }
1129
1148
  return result;
1130
1149
  }
1150
+ /**
1151
+ * Clamp requested scale to cached WaveformData scale.
1152
+ * WaveformData.resample() can only go coarser — if the requested zoom is
1153
+ * finer than the cached data, use the cached scale. Set warn=true to log
1154
+ * (default); reextractPeaks passes false and logs a single summary instead.
1155
+ */
1156
+ _clampScale(waveformData, requestedScale, warn = true) {
1157
+ if (requestedScale < waveformData.scale) {
1158
+ if (warn) {
1159
+ console.warn(
1160
+ "[dawcore] Requested zoom " + requestedScale + " spp is finer than pre-computed peaks (" + waveformData.scale + " spp) \u2014 using available resolution"
1161
+ );
1162
+ }
1163
+ return waveformData.scale;
1164
+ }
1165
+ return requestedScale;
1166
+ }
1167
+ /**
1168
+ * Return the coarsest (largest) scale among cached WaveformData entries
1169
+ * that correspond to the given clip buffers. Returns 0 if none are cached.
1170
+ */
1171
+ getMaxCachedScale(clipBuffers) {
1172
+ let max = 0;
1173
+ for (const audioBuffer of clipBuffers.values()) {
1174
+ const cached = this._cache.get(audioBuffer);
1175
+ if (cached && cached.scale > max) max = cached.scale;
1176
+ }
1177
+ return max;
1178
+ }
1131
1179
  terminate() {
1132
1180
  this._worker?.terminate();
1133
1181
  this._worker = null;
@@ -2436,6 +2484,7 @@ async function loadFiles(host, files) {
2436
2484
  clips: [
2437
2485
  {
2438
2486
  src: "",
2487
+ peaksSrc: "",
2439
2488
  start: 0,
2440
2489
  duration: audioBuffer.duration,
2441
2490
  offset: 0,
@@ -2521,6 +2570,7 @@ function addRecordedClip(host, trackId, buf, startSample, durSamples, offsetSamp
2521
2570
  const sr = host.effectiveSampleRate;
2522
2571
  const clipDesc = {
2523
2572
  src: "",
2573
+ peaksSrc: "",
2524
2574
  start: startSample / sr,
2525
2575
  duration: durSamples / sr,
2526
2576
  offset: 0,
@@ -2722,11 +2772,29 @@ function findAudioBufferForClip(host, clip, track) {
2722
2772
  return null;
2723
2773
  }
2724
2774
 
2775
+ // src/interactions/peaks-loader.ts
2776
+ import WaveformData2 from "waveform-data";
2777
+ async function loadWaveformDataFromUrl(src) {
2778
+ const response = await fetch(src);
2779
+ if (!response.ok) {
2780
+ throw new Error("[dawcore] Failed to fetch peaks data: " + response.statusText);
2781
+ }
2782
+ const { pathname } = new URL(src, globalThis.location?.href ?? "http://localhost");
2783
+ const isBinary = pathname.toLowerCase().endsWith(".dat");
2784
+ if (isBinary) {
2785
+ const arrayBuffer = await response.arrayBuffer();
2786
+ return WaveformData2.create(arrayBuffer);
2787
+ } else {
2788
+ const json = await response.json();
2789
+ return WaveformData2.create(json);
2790
+ }
2791
+ }
2792
+
2725
2793
  // src/elements/daw-editor.ts
2726
2794
  var DawEditorElement = class extends LitElement8 {
2727
2795
  constructor() {
2728
2796
  super(...arguments);
2729
- this.samplesPerPixel = 1024;
2797
+ this._samplesPerPixel = 1024;
2730
2798
  this.waveHeight = 128;
2731
2799
  this.timescale = false;
2732
2800
  this.mono = false;
@@ -2753,9 +2821,12 @@ var DawEditorElement = class extends LitElement8 {
2753
2821
  this._engine = null;
2754
2822
  this._enginePromise = null;
2755
2823
  this._audioCache = /* @__PURE__ */ new Map();
2824
+ this._peaksCache = /* @__PURE__ */ new Map();
2756
2825
  this._clipBuffers = /* @__PURE__ */ new Map();
2757
2826
  this._clipOffsets = /* @__PURE__ */ new Map();
2758
2827
  this._peakPipeline = new PeakPipeline();
2828
+ /** Coarsest scale from pre-computed peaks — zoom cannot go finer than this. 0 = no limit. */
2829
+ this._minSamplesPerPixel = 0;
2759
2830
  this._trackElements = /* @__PURE__ */ new Map();
2760
2831
  this._childObserver = null;
2761
2832
  this._audioResume = new AudioResumeController(this);
@@ -2877,6 +2948,21 @@ var DawEditorElement = class extends LitElement8 {
2877
2948
  // --- Recording ---
2878
2949
  this.recordingStream = null;
2879
2950
  }
2951
+ get samplesPerPixel() {
2952
+ return this._samplesPerPixel;
2953
+ }
2954
+ set samplesPerPixel(value) {
2955
+ const old = this._samplesPerPixel;
2956
+ if (!Number.isFinite(value) || value <= 0) return;
2957
+ const clamped = this._minSamplesPerPixel > 0 && value < this._minSamplesPerPixel ? this._minSamplesPerPixel : value;
2958
+ if (clamped !== value) {
2959
+ console.warn(
2960
+ "[dawcore] Zoom " + value + " spp rejected \u2014 pre-computed peaks limit is " + this._minSamplesPerPixel + " spp"
2961
+ );
2962
+ }
2963
+ this._samplesPerPixel = clamped;
2964
+ this.requestUpdate("samplesPerPixel", old);
2965
+ }
2880
2966
  get _clipHandler() {
2881
2967
  return this.interactiveClips ? this._clipPointer : null;
2882
2968
  }
@@ -2972,9 +3058,11 @@ var DawEditorElement = class extends LitElement8 {
2972
3058
  this._childObserver = null;
2973
3059
  this._trackElements.clear();
2974
3060
  this._audioCache.clear();
3061
+ this._peaksCache.clear();
2975
3062
  this._clipBuffers.clear();
2976
3063
  this._clipOffsets.clear();
2977
3064
  this._peakPipeline.terminate();
3065
+ this._minSamplesPerPixel = 0;
2978
3066
  try {
2979
3067
  this._disposeEngine();
2980
3068
  } catch (err) {
@@ -3024,6 +3112,7 @@ var DawEditorElement = class extends LitElement8 {
3024
3112
  if (this._engine) {
3025
3113
  this._engine.removeTrack(trackId);
3026
3114
  }
3115
+ this._minSamplesPerPixel = this._peakPipeline.getMaxCachedScale(this._clipBuffers);
3027
3116
  if (nextEngine.size === 0) {
3028
3117
  this._currentTime = 0;
3029
3118
  this._stopPlayhead();
@@ -3035,6 +3124,7 @@ var DawEditorElement = class extends LitElement8 {
3035
3124
  if (clipEls.length === 0 && trackEl.src) {
3036
3125
  clips.push({
3037
3126
  src: trackEl.src,
3127
+ peaksSrc: "",
3038
3128
  start: 0,
3039
3129
  duration: 0,
3040
3130
  offset: 0,
@@ -3048,6 +3138,7 @@ var DawEditorElement = class extends LitElement8 {
3048
3138
  for (const clipEl of clipEls) {
3049
3139
  clips.push({
3050
3140
  src: clipEl.src,
3141
+ peaksSrc: clipEl.peaksSrc,
3051
3142
  start: clipEl.start,
3052
3143
  duration: clipEl.duration,
3053
3144
  offset: clipEl.offset,
@@ -3075,7 +3166,77 @@ var DawEditorElement = class extends LitElement8 {
3075
3166
  const clips = [];
3076
3167
  for (const clipDesc of descriptor.clips) {
3077
3168
  if (!clipDesc.src) continue;
3078
- const audioBuffer = await this._fetchAndDecode(clipDesc.src);
3169
+ const waveformDataPromise = clipDesc.peaksSrc ? this._fetchPeaks(clipDesc.peaksSrc) : null;
3170
+ const audioPromise = this._fetchAndDecode(clipDesc.src);
3171
+ let waveformData = null;
3172
+ if (waveformDataPromise) {
3173
+ try {
3174
+ waveformData = await waveformDataPromise;
3175
+ } catch (err) {
3176
+ console.warn(
3177
+ "[dawcore] Failed to load peaks from " + clipDesc.peaksSrc + ": " + String(err) + " \u2014 falling back to AudioBuffer generation"
3178
+ );
3179
+ }
3180
+ }
3181
+ if (waveformData) {
3182
+ const clip2 = createClipFromSeconds2({
3183
+ waveformData,
3184
+ startTime: clipDesc.start,
3185
+ duration: clipDesc.duration || waveformData.duration,
3186
+ offset: clipDesc.offset,
3187
+ gain: clipDesc.gain,
3188
+ name: clipDesc.name,
3189
+ sampleRate: waveformData.sample_rate,
3190
+ sourceDuration: waveformData.duration
3191
+ });
3192
+ const effectiveScale = Math.max(this.samplesPerPixel, waveformData.scale);
3193
+ const peakData2 = extractPeaks(
3194
+ waveformData,
3195
+ effectiveScale,
3196
+ this.mono,
3197
+ clip2.offsetSamples,
3198
+ clip2.durationSamples
3199
+ );
3200
+ this._clipOffsets.set(clip2.id, {
3201
+ offsetSamples: clip2.offsetSamples,
3202
+ durationSamples: clip2.durationSamples
3203
+ });
3204
+ this._peaksData = new Map(this._peaksData).set(clip2.id, peakData2);
3205
+ this._minSamplesPerPixel = Math.max(this._minSamplesPerPixel, waveformData.scale);
3206
+ const previewTrack = createTrack2({
3207
+ name: descriptor.name,
3208
+ clips: [clip2],
3209
+ volume: descriptor.volume,
3210
+ pan: descriptor.pan,
3211
+ muted: descriptor.muted,
3212
+ soloed: descriptor.soloed
3213
+ });
3214
+ previewTrack.id = trackId;
3215
+ this._engineTracks = new Map(this._engineTracks).set(trackId, previewTrack);
3216
+ this._recomputeDuration();
3217
+ let audioBuffer2;
3218
+ try {
3219
+ audioBuffer2 = await audioPromise;
3220
+ } catch (audioErr) {
3221
+ const nextPeaks = new Map(this._peaksData);
3222
+ nextPeaks.delete(clip2.id);
3223
+ this._peaksData = nextPeaks;
3224
+ this._clipOffsets.delete(clip2.id);
3225
+ const nextEngine = new Map(this._engineTracks);
3226
+ nextEngine.delete(trackId);
3227
+ this._engineTracks = nextEngine;
3228
+ this._minSamplesPerPixel = this._peakPipeline.getMaxCachedScale(this._clipBuffers);
3229
+ this._recomputeDuration();
3230
+ throw audioErr;
3231
+ }
3232
+ this._resolvedSampleRate = audioBuffer2.sampleRate;
3233
+ const updatedClip = { ...clip2, audioBuffer: audioBuffer2 };
3234
+ this._clipBuffers = new Map(this._clipBuffers).set(clip2.id, audioBuffer2);
3235
+ this._peakPipeline.cacheWaveformData(audioBuffer2, waveformData);
3236
+ clips.push(updatedClip);
3237
+ continue;
3238
+ }
3239
+ const audioBuffer = await audioPromise;
3079
3240
  this._resolvedSampleRate = audioBuffer.sampleRate;
3080
3241
  const clip = createClipFromSeconds2({
3081
3242
  audioBuffer,
@@ -3157,6 +3318,16 @@ var DawEditorElement = class extends LitElement8 {
3157
3318
  throw err;
3158
3319
  }
3159
3320
  }
3321
+ _fetchPeaks(src) {
3322
+ const cached = this._peaksCache.get(src);
3323
+ if (cached) return cached;
3324
+ const promise = loadWaveformDataFromUrl(src).catch((err) => {
3325
+ this._peaksCache.delete(src);
3326
+ throw err;
3327
+ });
3328
+ this._peaksCache.set(src, promise);
3329
+ return promise;
3330
+ }
3160
3331
  _recomputeDuration() {
3161
3332
  let maxSample = 0;
3162
3333
  for (const track of this._engineTracks.values()) {
@@ -3569,8 +3740,8 @@ DawEditorElement.styles = [
3569
3740
  ];
3570
3741
  DawEditorElement._CONTROL_PROPS = /* @__PURE__ */ new Set(["volume", "pan", "muted", "soloed"]);
3571
3742
  __decorateClass([
3572
- property6({ type: Number, attribute: "samples-per-pixel" })
3573
- ], DawEditorElement.prototype, "samplesPerPixel", 2);
3743
+ property6({ type: Number, attribute: "samples-per-pixel", noAccessor: true })
3744
+ ], DawEditorElement.prototype, "samplesPerPixel", 1);
3574
3745
  __decorateClass([
3575
3746
  property6({ type: Number, attribute: "wave-height" })
3576
3747
  ], DawEditorElement.prototype, "waveHeight", 2);