@dawcore/components 0.0.3 → 0.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -755,7 +755,12 @@ DawStopButtonElement = __decorateClass([
755
755
  // src/elements/daw-editor.ts
756
756
  import { LitElement as LitElement8, html as html7, css as css7 } from "lit";
757
757
  import { customElement as customElement10, property as property6, state as state3 } from "lit/decorators.js";
758
- import { createClipFromSeconds as createClipFromSeconds2, createTrack as createTrack2, clipPixelWidth } from "@waveform-playlist/core";
758
+ import {
759
+ createClip as createClip2,
760
+ createClipFromSeconds as createClipFromSeconds2,
761
+ createTrack as createTrack2,
762
+ clipPixelWidth
763
+ } from "@waveform-playlist/core";
759
764
 
760
765
  // src/workers/peaksWorker.ts
761
766
  import WaveformData from "waveform-data";
@@ -1083,6 +1088,13 @@ var PeakPipeline = class {
1083
1088
  this._baseScale = baseScale;
1084
1089
  this._bits = bits;
1085
1090
  }
1091
+ /**
1092
+ * Inject externally-loaded WaveformData (e.g., from a .dat file) into the cache.
1093
+ * Prevents worker generation for this AudioBuffer on all subsequent calls.
1094
+ */
1095
+ cacheWaveformData(audioBuffer, waveformData) {
1096
+ this._cache.set(audioBuffer, waveformData);
1097
+ }
1086
1098
  /**
1087
1099
  * Generate PeakData for a clip from its AudioBuffer.
1088
1100
  * Uses cached WaveformData when available; otherwise generates via worker.
@@ -1090,8 +1102,9 @@ var PeakPipeline = class {
1090
1102
  */
1091
1103
  async generatePeaks(audioBuffer, samplesPerPixel, isMono, offsetSamples, durationSamples) {
1092
1104
  const waveformData = await this._getWaveformData(audioBuffer);
1105
+ const effectiveScale = this._clampScale(waveformData, samplesPerPixel);
1093
1106
  try {
1094
- return extractPeaks(waveformData, samplesPerPixel, isMono, offsetSamples, durationSamples);
1107
+ return extractPeaks(waveformData, effectiveScale, isMono, offsetSamples, durationSamples);
1095
1108
  } catch (err) {
1096
1109
  console.warn("[dawcore] extractPeaks failed: " + String(err));
1097
1110
  throw err;
@@ -1099,23 +1112,29 @@ var PeakPipeline = class {
1099
1112
  }
1100
1113
  /**
1101
1114
  * Re-extract peaks for all clips at a new zoom level using cached WaveformData.
1102
- * Only works for zoom levels coarser than (or equal to) the cached base scale.
1103
- * Returns a new Map of clipId PeakData. Clips without cached data or where
1104
- * the target scale is finer than the cached base are skipped.
1115
+ * Returns a new Map of clipId PeakData. Clips without cached data are skipped.
1116
+ * When the requested scale is finer than cached data, peaks are clamped to the
1117
+ * cached scale and a single summary warning is logged.
1105
1118
  */
1106
1119
  reextractPeaks(clipBuffers, samplesPerPixel, isMono, clipOffsets) {
1107
1120
  const result = /* @__PURE__ */ new Map();
1121
+ let clampedCount = 0;
1122
+ let clampedScale = 0;
1108
1123
  for (const [clipId, audioBuffer] of clipBuffers) {
1109
1124
  const cached = this._cache.get(audioBuffer);
1110
1125
  if (cached) {
1111
- if (samplesPerPixel < cached.scale) continue;
1126
+ const effectiveScale = this._clampScale(cached, samplesPerPixel, false);
1127
+ if (effectiveScale !== samplesPerPixel) {
1128
+ clampedCount++;
1129
+ clampedScale = effectiveScale;
1130
+ }
1112
1131
  try {
1113
1132
  const offsets = clipOffsets?.get(clipId);
1114
1133
  result.set(
1115
1134
  clipId,
1116
1135
  extractPeaks(
1117
1136
  cached,
1118
- samplesPerPixel,
1137
+ effectiveScale,
1119
1138
  isMono,
1120
1139
  offsets?.offsetSamples,
1121
1140
  offsets?.durationSamples
@@ -1126,8 +1145,42 @@ var PeakPipeline = class {
1126
1145
  }
1127
1146
  }
1128
1147
  }
1148
+ if (clampedCount > 0) {
1149
+ console.warn(
1150
+ "[dawcore] Requested zoom " + samplesPerPixel + " spp is finer than pre-computed peaks (" + clampedScale + " spp) \u2014 " + clampedCount + " clip(s) using available resolution"
1151
+ );
1152
+ }
1129
1153
  return result;
1130
1154
  }
1155
+ /**
1156
+ * Clamp requested scale to cached WaveformData scale.
1157
+ * WaveformData.resample() can only go coarser — if the requested zoom is
1158
+ * finer than the cached data, use the cached scale. Set warn=true to log
1159
+ * (default); reextractPeaks passes false and logs a single summary instead.
1160
+ */
1161
+ _clampScale(waveformData, requestedScale, warn = true) {
1162
+ if (requestedScale < waveformData.scale) {
1163
+ if (warn) {
1164
+ console.warn(
1165
+ "[dawcore] Requested zoom " + requestedScale + " spp is finer than pre-computed peaks (" + waveformData.scale + " spp) \u2014 using available resolution"
1166
+ );
1167
+ }
1168
+ return waveformData.scale;
1169
+ }
1170
+ return requestedScale;
1171
+ }
1172
+ /**
1173
+ * Return the coarsest (largest) scale among cached WaveformData entries
1174
+ * that correspond to the given clip buffers. Returns 0 if none are cached.
1175
+ */
1176
+ getMaxCachedScale(clipBuffers) {
1177
+ let max = 0;
1178
+ for (const audioBuffer of clipBuffers.values()) {
1179
+ const cached = this._cache.get(audioBuffer);
1180
+ if (cached && cached.scale > max) max = cached.scale;
1181
+ }
1182
+ return max;
1183
+ }
1131
1184
  terminate() {
1132
1185
  this._worker?.terminate();
1133
1186
  this._worker = null;
@@ -2436,6 +2489,7 @@ async function loadFiles(host, files) {
2436
2489
  clips: [
2437
2490
  {
2438
2491
  src: "",
2492
+ peaksSrc: "",
2439
2493
  start: 0,
2440
2494
  duration: audioBuffer.duration,
2441
2495
  offset: 0,
@@ -2521,6 +2575,7 @@ function addRecordedClip(host, trackId, buf, startSample, durSamples, offsetSamp
2521
2575
  const sr = host.effectiveSampleRate;
2522
2576
  const clipDesc = {
2523
2577
  src: "",
2578
+ peaksSrc: "",
2524
2579
  start: startSample / sr,
2525
2580
  duration: durSamples / sr,
2526
2581
  offset: 0,
@@ -2722,11 +2777,29 @@ function findAudioBufferForClip(host, clip, track) {
2722
2777
  return null;
2723
2778
  }
2724
2779
 
2780
+ // src/interactions/peaks-loader.ts
2781
+ import WaveformData2 from "waveform-data";
2782
+ async function loadWaveformDataFromUrl(src) {
2783
+ const response = await fetch(src);
2784
+ if (!response.ok) {
2785
+ throw new Error("[dawcore] Failed to fetch peaks data: " + response.statusText);
2786
+ }
2787
+ const { pathname } = new URL(src, globalThis.location?.href ?? "http://localhost");
2788
+ const isBinary = pathname.toLowerCase().endsWith(".dat");
2789
+ if (isBinary) {
2790
+ const arrayBuffer = await response.arrayBuffer();
2791
+ return WaveformData2.create(arrayBuffer);
2792
+ } else {
2793
+ const json = await response.json();
2794
+ return WaveformData2.create(json);
2795
+ }
2796
+ }
2797
+
2725
2798
  // src/elements/daw-editor.ts
2726
2799
  var DawEditorElement = class extends LitElement8 {
2727
2800
  constructor() {
2728
2801
  super(...arguments);
2729
- this.samplesPerPixel = 1024;
2802
+ this._samplesPerPixel = 1024;
2730
2803
  this.waveHeight = 128;
2731
2804
  this.timescale = false;
2732
2805
  this.mono = false;
@@ -2753,9 +2826,12 @@ var DawEditorElement = class extends LitElement8 {
2753
2826
  this._engine = null;
2754
2827
  this._enginePromise = null;
2755
2828
  this._audioCache = /* @__PURE__ */ new Map();
2829
+ this._peaksCache = /* @__PURE__ */ new Map();
2756
2830
  this._clipBuffers = /* @__PURE__ */ new Map();
2757
2831
  this._clipOffsets = /* @__PURE__ */ new Map();
2758
2832
  this._peakPipeline = new PeakPipeline();
2833
+ /** Coarsest scale from pre-computed peaks — zoom cannot go finer than this. 0 = no limit. */
2834
+ this._minSamplesPerPixel = 0;
2759
2835
  this._trackElements = /* @__PURE__ */ new Map();
2760
2836
  this._childObserver = null;
2761
2837
  this._audioResume = new AudioResumeController(this);
@@ -2841,6 +2917,7 @@ var DawEditorElement = class extends LitElement8 {
2841
2917
  this._onTrackRemoved(trackId);
2842
2918
  }
2843
2919
  };
2920
+ this._contextConfigurePromise = null;
2844
2921
  // --- File Drop ---
2845
2922
  this._onDragOver = (e) => {
2846
2923
  if (!this.fileDrop) return;
@@ -2877,6 +2954,21 @@ var DawEditorElement = class extends LitElement8 {
2877
2954
  // --- Recording ---
2878
2955
  this.recordingStream = null;
2879
2956
  }
2957
+ get samplesPerPixel() {
2958
+ return this._samplesPerPixel;
2959
+ }
2960
+ set samplesPerPixel(value) {
2961
+ const old = this._samplesPerPixel;
2962
+ if (!Number.isFinite(value) || value <= 0) return;
2963
+ const clamped = this._minSamplesPerPixel > 0 && value < this._minSamplesPerPixel ? this._minSamplesPerPixel : value;
2964
+ if (clamped !== value) {
2965
+ console.warn(
2966
+ "[dawcore] Zoom " + value + " spp rejected \u2014 pre-computed peaks limit is " + this._minSamplesPerPixel + " spp"
2967
+ );
2968
+ }
2969
+ this._samplesPerPixel = clamped;
2970
+ this.requestUpdate("samplesPerPixel", old);
2971
+ }
2880
2972
  get _clipHandler() {
2881
2973
  return this.interactiveClips ? this._clipPointer : null;
2882
2974
  }
@@ -2972,9 +3064,12 @@ var DawEditorElement = class extends LitElement8 {
2972
3064
  this._childObserver = null;
2973
3065
  this._trackElements.clear();
2974
3066
  this._audioCache.clear();
3067
+ this._peaksCache.clear();
2975
3068
  this._clipBuffers.clear();
2976
3069
  this._clipOffsets.clear();
2977
3070
  this._peakPipeline.terminate();
3071
+ this._minSamplesPerPixel = 0;
3072
+ this._contextConfigurePromise = null;
2978
3073
  try {
2979
3074
  this._disposeEngine();
2980
3075
  } catch (err) {
@@ -3024,6 +3119,7 @@ var DawEditorElement = class extends LitElement8 {
3024
3119
  if (this._engine) {
3025
3120
  this._engine.removeTrack(trackId);
3026
3121
  }
3122
+ this._minSamplesPerPixel = this._peakPipeline.getMaxCachedScale(this._clipBuffers);
3027
3123
  if (nextEngine.size === 0) {
3028
3124
  this._currentTime = 0;
3029
3125
  this._stopPlayhead();
@@ -3035,6 +3131,7 @@ var DawEditorElement = class extends LitElement8 {
3035
3131
  if (clipEls.length === 0 && trackEl.src) {
3036
3132
  clips.push({
3037
3133
  src: trackEl.src,
3134
+ peaksSrc: "",
3038
3135
  start: 0,
3039
3136
  duration: 0,
3040
3137
  offset: 0,
@@ -3048,6 +3145,7 @@ var DawEditorElement = class extends LitElement8 {
3048
3145
  for (const clipEl of clipEls) {
3049
3146
  clips.push({
3050
3147
  src: clipEl.src,
3148
+ peaksSrc: clipEl.peaksSrc,
3051
3149
  start: clipEl.start,
3052
3150
  duration: clipEl.duration,
3053
3151
  offset: clipEl.offset,
@@ -3075,7 +3173,88 @@ var DawEditorElement = class extends LitElement8 {
3075
3173
  const clips = [];
3076
3174
  for (const clipDesc of descriptor.clips) {
3077
3175
  if (!clipDesc.src) continue;
3078
- const audioBuffer = await this._fetchAndDecode(clipDesc.src);
3176
+ const waveformDataPromise = clipDesc.peaksSrc ? this._fetchPeaks(clipDesc.peaksSrc) : null;
3177
+ const audioPromise = this._fetchAndDecode(clipDesc.src);
3178
+ let waveformData = null;
3179
+ if (waveformDataPromise) {
3180
+ try {
3181
+ const wd = await waveformDataPromise;
3182
+ await this._ensureContextConfigured();
3183
+ const { getGlobalAudioContext } = await import("@waveform-playlist/playout");
3184
+ const contextRate = getGlobalAudioContext().sampleRate;
3185
+ if (wd.sample_rate === contextRate) {
3186
+ waveformData = wd;
3187
+ } else {
3188
+ console.warn(
3189
+ "[dawcore] Pre-computed peaks at " + wd.sample_rate + " Hz do not match AudioContext at " + contextRate + " Hz \u2014 ignoring " + clipDesc.peaksSrc + ", generating from audio"
3190
+ );
3191
+ }
3192
+ } catch (err) {
3193
+ console.warn(
3194
+ "[dawcore] Failed to load peaks from " + clipDesc.peaksSrc + ": " + String(err) + " \u2014 falling back to AudioBuffer generation"
3195
+ );
3196
+ }
3197
+ }
3198
+ if (waveformData) {
3199
+ const wdRate = waveformData.sample_rate;
3200
+ const clip2 = createClip2({
3201
+ waveformData,
3202
+ startSample: Math.round(clipDesc.start * wdRate),
3203
+ durationSamples: Math.round((clipDesc.duration || waveformData.duration) * wdRate),
3204
+ offsetSamples: Math.round(clipDesc.offset * wdRate),
3205
+ gain: clipDesc.gain,
3206
+ name: clipDesc.name,
3207
+ sampleRate: wdRate,
3208
+ sourceDurationSamples: Math.ceil(waveformData.duration * wdRate)
3209
+ });
3210
+ const effectiveScale = Math.max(this.samplesPerPixel, waveformData.scale);
3211
+ const peakData2 = extractPeaks(
3212
+ waveformData,
3213
+ effectiveScale,
3214
+ this.mono,
3215
+ clip2.offsetSamples,
3216
+ clip2.durationSamples
3217
+ );
3218
+ this._clipOffsets.set(clip2.id, {
3219
+ offsetSamples: clip2.offsetSamples,
3220
+ durationSamples: clip2.durationSamples
3221
+ });
3222
+ this._peaksData = new Map(this._peaksData).set(clip2.id, peakData2);
3223
+ this._minSamplesPerPixel = Math.max(this._minSamplesPerPixel, waveformData.scale);
3224
+ const previewTrack = createTrack2({
3225
+ name: descriptor.name,
3226
+ clips: [clip2],
3227
+ volume: descriptor.volume,
3228
+ pan: descriptor.pan,
3229
+ muted: descriptor.muted,
3230
+ soloed: descriptor.soloed
3231
+ });
3232
+ previewTrack.id = trackId;
3233
+ this._engineTracks = new Map(this._engineTracks).set(trackId, previewTrack);
3234
+ this._recomputeDuration();
3235
+ let audioBuffer2;
3236
+ try {
3237
+ audioBuffer2 = await audioPromise;
3238
+ } catch (audioErr) {
3239
+ const nextPeaks = new Map(this._peaksData);
3240
+ nextPeaks.delete(clip2.id);
3241
+ this._peaksData = nextPeaks;
3242
+ this._clipOffsets.delete(clip2.id);
3243
+ const nextEngine = new Map(this._engineTracks);
3244
+ nextEngine.delete(trackId);
3245
+ this._engineTracks = nextEngine;
3246
+ this._minSamplesPerPixel = this._peakPipeline.getMaxCachedScale(this._clipBuffers);
3247
+ this._recomputeDuration();
3248
+ throw audioErr;
3249
+ }
3250
+ this._resolvedSampleRate = audioBuffer2.sampleRate;
3251
+ const updatedClip = { ...clip2, audioBuffer: audioBuffer2 };
3252
+ this._clipBuffers = new Map(this._clipBuffers).set(clip2.id, audioBuffer2);
3253
+ this._peakPipeline.cacheWaveformData(audioBuffer2, waveformData);
3254
+ clips.push(updatedClip);
3255
+ continue;
3256
+ }
3257
+ const audioBuffer = await audioPromise;
3079
3258
  this._resolvedSampleRate = audioBuffer.sampleRate;
3080
3259
  const clip = createClipFromSeconds2({
3081
3260
  audioBuffer,
@@ -3134,6 +3313,30 @@ var DawEditorElement = class extends LitElement8 {
3134
3313
  );
3135
3314
  }
3136
3315
  }
3316
+ /**
3317
+ * Ensure the global AudioContext is configured with the editor's sample-rate hint
3318
+ * before the first audio operation. Idempotent — concurrent callers await the
3319
+ * same promise so no one proceeds to getGlobalAudioContext() before configuration.
3320
+ */
3321
+ _ensureContextConfigured() {
3322
+ if (!this._contextConfigurePromise) {
3323
+ this._contextConfigurePromise = (async () => {
3324
+ const { configureGlobalContext } = await import("@waveform-playlist/playout");
3325
+ const actualRate = configureGlobalContext({
3326
+ sampleRate: this.sampleRate
3327
+ });
3328
+ if (actualRate !== this.sampleRate) {
3329
+ console.warn(
3330
+ "[dawcore] Requested sampleRate " + this.sampleRate + " but AudioContext is running at " + actualRate
3331
+ );
3332
+ }
3333
+ })().catch((err) => {
3334
+ this._contextConfigurePromise = null;
3335
+ throw err;
3336
+ });
3337
+ }
3338
+ return this._contextConfigurePromise;
3339
+ }
3137
3340
  async _fetchAndDecode(src) {
3138
3341
  if (this._audioCache.has(src)) {
3139
3342
  return this._audioCache.get(src);
@@ -3146,6 +3349,7 @@ var DawEditorElement = class extends LitElement8 {
3146
3349
  );
3147
3350
  }
3148
3351
  const arrayBuffer = await response.arrayBuffer();
3352
+ await this._ensureContextConfigured();
3149
3353
  const { getGlobalAudioContext } = await import("@waveform-playlist/playout");
3150
3354
  return getGlobalAudioContext().decodeAudioData(arrayBuffer);
3151
3355
  })();
@@ -3157,6 +3361,16 @@ var DawEditorElement = class extends LitElement8 {
3157
3361
  throw err;
3158
3362
  }
3159
3363
  }
3364
+ _fetchPeaks(src) {
3365
+ const cached = this._peaksCache.get(src);
3366
+ if (cached) return cached;
3367
+ const promise = loadWaveformDataFromUrl(src).catch((err) => {
3368
+ this._peaksCache.delete(src);
3369
+ throw err;
3370
+ });
3371
+ this._peaksCache.set(src, promise);
3372
+ return promise;
3373
+ }
3160
3374
  _recomputeDuration() {
3161
3375
  let maxSample = 0;
3162
3376
  for (const track of this._engineTracks.values()) {
@@ -3569,8 +3783,8 @@ DawEditorElement.styles = [
3569
3783
  ];
3570
3784
  DawEditorElement._CONTROL_PROPS = /* @__PURE__ */ new Set(["volume", "pan", "muted", "soloed"]);
3571
3785
  __decorateClass([
3572
- property6({ type: Number, attribute: "samples-per-pixel" })
3573
- ], DawEditorElement.prototype, "samplesPerPixel", 2);
3786
+ property6({ type: Number, attribute: "samples-per-pixel", noAccessor: true })
3787
+ ], DawEditorElement.prototype, "samplesPerPixel", 1);
3574
3788
  __decorateClass([
3575
3789
  property6({ type: Number, attribute: "wave-height" })
3576
3790
  ], DawEditorElement.prototype, "waveHeight", 2);