@dawcore/components 0.0.4 → 0.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,170 @@
1
+ # @dawcore/components
2
+
3
+ Framework-agnostic Web Components for multi-track audio editing. Drop `<daw-editor>` into any HTML page — no React, no build step required.
4
+
5
+ ## Features
6
+
7
+ - **Pure Web Components** — Works with vanilla HTML, React, Vue, Svelte, or any framework
8
+ - **Declarative tracks** — `<daw-track>` and `<daw-clip>` elements define your timeline in HTML
9
+ - **Canvas waveforms** — Chunked rendering with virtual scrolling for large timelines
10
+ - **Drag interactions** — Move clips, trim boundaries, split at playhead
11
+ - **Keyboard shortcuts** — Play/pause, split, undo/redo via `<daw-keyboard-shortcuts>`
12
+ - **File drop** — Drag audio files onto the editor to add tracks
13
+ - **Recording** — Live mic recording with waveform preview (optional)
14
+ - **Pre-computed peaks** — Instant waveform rendering from `.dat` files before audio decodes
15
+ - **CSS theming** — Dark mode by default, fully customizable via CSS custom properties
16
+ - **Native Web Audio** — Uses `@dawcore/transport` for playback scheduling. No Tone.js dependency.
17
+
18
+ ## Installation
19
+
20
+ ```bash
21
+ npm install @dawcore/components
22
+ ```
23
+
24
+ Peer dependencies:
25
+ ```bash
26
+ npm install @waveform-playlist/core @waveform-playlist/engine @dawcore/transport
27
+ ```
28
+
29
+ Optional (for recording):
30
+ ```bash
31
+ npm install @waveform-playlist/recording @waveform-playlist/worklets
32
+ ```
33
+
34
+ ## Quick Start
35
+
36
+ ```html
37
+ <script type="module">
38
+ import '@dawcore/components';
39
+ </script>
40
+
41
+ <daw-editor id="editor" samples-per-pixel="1024" wave-height="100" timescale>
42
+ <daw-track src="/audio/drums.opus" name="Drums"></daw-track>
43
+ <daw-track src="/audio/bass.opus" name="Bass"></daw-track>
44
+ <daw-track src="/audio/synth.opus" name="Synth"></daw-track>
45
+ </daw-editor>
46
+
47
+ <daw-transport for="editor">
48
+ <daw-play-button></daw-play-button>
49
+ <daw-pause-button></daw-pause-button>
50
+ <daw-stop-button></daw-stop-button>
51
+ </daw-transport>
52
+ ```
53
+
54
+ That's it. The editor loads audio, generates waveforms, and handles playback.
55
+
56
+ ## Multi-Clip Timeline
57
+
58
+ For multiple clips per track with independent positioning:
59
+
60
+ ```html
61
+ <daw-editor id="editor" samples-per-pixel="1024" wave-height="80"
62
+ timescale clip-headers interactive-clips>
63
+ <daw-keyboard-shortcuts playback splitting undo></daw-keyboard-shortcuts>
64
+
65
+ <daw-track name="Drums">
66
+ <daw-clip src="/audio/drums.opus" start="0" duration="8"></daw-clip>
67
+ <daw-clip src="/audio/drums.opus" start="12" duration="8" offset="8"></daw-clip>
68
+ </daw-track>
69
+
70
+ <daw-track name="Bass">
71
+ <daw-clip src="/audio/bass.opus" start="0" duration="20"></daw-clip>
72
+ </daw-track>
73
+ </daw-editor>
74
+ ```
75
+
76
+ ## Pre-Computed Peaks
77
+
78
+ For instant waveform rendering before audio finishes decoding:
79
+
80
+ ```html
81
+ <daw-track name="Drums">
82
+ <daw-clip src="/audio/drums.opus"
83
+ peaks-src="/audio/drums.dat"
84
+ start="0" duration="8"></daw-clip>
85
+ </daw-track>
86
+ ```
87
+
88
+ The `.dat` file renders the waveform immediately. Audio decodes in the background for playback.
89
+
90
+ ## CSS Theming
91
+
92
+ Style with CSS custom properties on `<daw-editor>` or any ancestor:
93
+
94
+ ```css
95
+ daw-editor {
96
+ --daw-wave-color: #c49a6c;
97
+ --daw-playhead-color: #d08070;
98
+ --daw-background: #1a1a2e;
99
+ --daw-track-background: #16213e;
100
+ --daw-ruler-color: #c49a6c;
101
+ --daw-ruler-background: #0f0f1a;
102
+ --daw-selection-color: rgba(99, 199, 95, 0.3);
103
+ --daw-controls-background: #1a1a2e;
104
+ --daw-controls-text: #e0d4c8;
105
+ --daw-clip-header-background: rgba(0, 0, 0, 0.4);
106
+ --daw-clip-header-text: #e0d4c8;
107
+ --daw-controls-width: 180px;
108
+ --daw-min-height: 200px;
109
+ }
110
+ ```
111
+
112
+ ## Recording
113
+
114
+ ```html
115
+ <daw-editor id="editor" samples-per-pixel="1024" wave-height="100">
116
+ <daw-track name="Recording"></daw-track>
117
+ </daw-editor>
118
+
119
+ <daw-transport for="editor">
120
+ <daw-play-button></daw-play-button>
121
+ <daw-pause-button></daw-pause-button>
122
+ <daw-stop-button></daw-stop-button>
123
+ <daw-record-button></daw-record-button>
124
+ </daw-transport>
125
+
126
+ <script type="module">
127
+ const editor = document.getElementById('editor');
128
+ // Consumer provides the mic stream
129
+ const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
130
+ editor.recordingStream = stream;
131
+ </script>
132
+ ```
133
+
134
+ ## Events
135
+
136
+ Listen for editor events on the `<daw-editor>` element:
137
+
138
+ ```javascript
139
+ const editor = document.getElementById('editor');
140
+
141
+ editor.addEventListener('daw-play', () => console.log('playing'));
142
+ editor.addEventListener('daw-pause', () => console.log('paused'));
143
+ editor.addEventListener('daw-stop', () => console.log('stopped'));
144
+ editor.addEventListener('daw-seek', (e) => console.log('seek:', e.detail.time));
145
+ editor.addEventListener('daw-selection', (e) => console.log('selection:', e.detail));
146
+ editor.addEventListener('daw-track-select', (e) => console.log('track:', e.detail.trackId));
147
+ editor.addEventListener('daw-clip-move', (e) => console.log('move:', e.detail));
148
+ editor.addEventListener('daw-clip-trim', (e) => console.log('trim:', e.detail));
149
+ editor.addEventListener('daw-clip-split', (e) => console.log('split:', e.detail));
150
+ editor.addEventListener('daw-track-error', (e) => console.error('error:', e.detail));
151
+ ```
152
+
153
+ ## Custom AudioContext
154
+
155
+ By default, `<daw-editor>` creates its own `AudioContext` using the `sample-rate` attribute. To provide your own:
156
+
157
+ ```javascript
158
+ const editor = document.getElementById('editor');
159
+ editor.audioContext = new AudioContext({ sampleRate: 48000, latencyHint: 0 });
160
+ ```
161
+
162
+ Set this before tracks load. The provided context is used for decoding, playback, and recording.
163
+
164
+ ## API
165
+
166
+ See [COMPONENTS.md](./COMPONENTS.md) for the full element and attribute reference.
167
+
168
+ ## License
169
+
170
+ MIT
package/dist/index.d.mts CHANGED
@@ -278,6 +278,8 @@ declare global {
278
278
  interface RecordingOptions {
279
279
  trackId?: string;
280
280
  bits?: 8 | 16;
281
+ /** Fallback channel count when stream doesn't report one via getSettings(). Must be 1 or 2. */
282
+ channelCount?: 1 | 2;
281
283
  startSample?: number;
282
284
  /** Start playback during recording so user hears existing tracks. */
283
285
  overdub?: boolean;
@@ -300,7 +302,7 @@ interface RecordingSession {
300
302
  readonly channelCount: number;
301
303
  readonly bits: Bits;
302
304
  isFirstMessage: boolean;
303
- /** Latency samples to skip in live preview (outputLatency + lookAhead). */
305
+ /** Latency samples to skip in live preview (outputLatency only). */
304
306
  readonly latencySamples: number;
305
307
  readonly wasOverdub: boolean;
306
308
  /** Stored so it can be removed on stop/cleanup — not just when stream ends. */
@@ -315,6 +317,7 @@ type ReadonlyRecordingSession = Readonly<Omit<RecordingSession, 'chunks' | 'peak
315
317
  };
316
318
  /** Narrow interface for the host editor. */
317
319
  interface RecordingHost extends ReactiveControllerHost {
320
+ readonly audioContext: AudioContext;
318
321
  readonly samplesPerPixel: number;
319
322
  readonly effectiveSampleRate: number;
320
323
  readonly _selectedTrackId: string | null;
@@ -329,7 +332,7 @@ interface RecordingHost extends ReactiveControllerHost {
329
332
  declare class RecordingController implements ReactiveController {
330
333
  private _host;
331
334
  private _sessions;
332
- private _workletLoaded;
335
+ private _workletLoadedCtx;
333
336
  constructor(host: RecordingHost & HTMLElement);
334
337
  hostConnected(): void;
335
338
  hostDisconnected(): void;
@@ -547,7 +550,8 @@ declare class DawEditorElement extends LitElement {
547
550
  clipHeaders: boolean;
548
551
  clipHeaderHeight: number;
549
552
  interactiveClips: boolean;
550
- /** Initial sample rate hint. Overridden by decoded audio buffer's actual rate. */
553
+ /** Desired sample rate. Creates a cross-browser AudioContext at this rate.
554
+ * Pre-computed .dat peaks render instantly when they match. */
551
555
  sampleRate: number;
552
556
  /** Resolved sample rate — falls back to sampleRate property until first audio decode. */
553
557
  _resolvedSampleRate: number | null;
@@ -561,6 +565,12 @@ declare class DawEditorElement extends LitElement {
561
565
  _selectionStartTime: number;
562
566
  _selectionEndTime: number;
563
567
  _currentTime: number;
568
+ /** Consumer-provided AudioContext. When set, used for decode, playback, and recording. */
569
+ private _externalAudioContext;
570
+ private _ownedAudioContext;
571
+ /** Set an AudioContext to use for all audio operations. Must be set before tracks load. */
572
+ set audioContext(ctx: AudioContext | null);
573
+ get audioContext(): AudioContext;
564
574
  _engine: PlaylistEngine | null;
565
575
  private _enginePromise;
566
576
  _audioCache: Map<string, Promise<AudioBuffer>>;
@@ -761,6 +771,10 @@ declare global {
761
771
  }
762
772
  }
763
773
 
774
+ interface AudioResumeHost extends ReactiveControllerHost, HTMLElement {
775
+ /** Returns the AudioContext to resume on user gesture */
776
+ readonly audioContext: AudioContext;
777
+ }
764
778
  declare class AudioResumeController implements ReactiveController {
765
779
  private _host;
766
780
  private _target;
@@ -768,7 +782,7 @@ declare class AudioResumeController implements ReactiveController {
768
782
  private _generation;
769
783
  /** CSS selector, or 'document'. When undefined, controller is inert. */
770
784
  target?: string;
771
- constructor(host: ReactiveControllerHost & HTMLElement);
785
+ constructor(host: AudioResumeHost);
772
786
  hostConnected(): void;
773
787
  hostDisconnected(): void;
774
788
  private _onGesture;
package/dist/index.d.ts CHANGED
@@ -278,6 +278,8 @@ declare global {
278
278
  interface RecordingOptions {
279
279
  trackId?: string;
280
280
  bits?: 8 | 16;
281
+ /** Fallback channel count when stream doesn't report one via getSettings(). Must be 1 or 2. */
282
+ channelCount?: 1 | 2;
281
283
  startSample?: number;
282
284
  /** Start playback during recording so user hears existing tracks. */
283
285
  overdub?: boolean;
@@ -300,7 +302,7 @@ interface RecordingSession {
300
302
  readonly channelCount: number;
301
303
  readonly bits: Bits;
302
304
  isFirstMessage: boolean;
303
- /** Latency samples to skip in live preview (outputLatency + lookAhead). */
305
+ /** Latency samples to skip in live preview (outputLatency only). */
304
306
  readonly latencySamples: number;
305
307
  readonly wasOverdub: boolean;
306
308
  /** Stored so it can be removed on stop/cleanup — not just when stream ends. */
@@ -315,6 +317,7 @@ type ReadonlyRecordingSession = Readonly<Omit<RecordingSession, 'chunks' | 'peak
315
317
  };
316
318
  /** Narrow interface for the host editor. */
317
319
  interface RecordingHost extends ReactiveControllerHost {
320
+ readonly audioContext: AudioContext;
318
321
  readonly samplesPerPixel: number;
319
322
  readonly effectiveSampleRate: number;
320
323
  readonly _selectedTrackId: string | null;
@@ -329,7 +332,7 @@ interface RecordingHost extends ReactiveControllerHost {
329
332
  declare class RecordingController implements ReactiveController {
330
333
  private _host;
331
334
  private _sessions;
332
- private _workletLoaded;
335
+ private _workletLoadedCtx;
333
336
  constructor(host: RecordingHost & HTMLElement);
334
337
  hostConnected(): void;
335
338
  hostDisconnected(): void;
@@ -547,7 +550,8 @@ declare class DawEditorElement extends LitElement {
547
550
  clipHeaders: boolean;
548
551
  clipHeaderHeight: number;
549
552
  interactiveClips: boolean;
550
- /** Initial sample rate hint. Overridden by decoded audio buffer's actual rate. */
553
+ /** Desired sample rate. Creates a cross-browser AudioContext at this rate.
554
+ * Pre-computed .dat peaks render instantly when they match. */
551
555
  sampleRate: number;
552
556
  /** Resolved sample rate — falls back to sampleRate property until first audio decode. */
553
557
  _resolvedSampleRate: number | null;
@@ -561,6 +565,12 @@ declare class DawEditorElement extends LitElement {
561
565
  _selectionStartTime: number;
562
566
  _selectionEndTime: number;
563
567
  _currentTime: number;
568
+ /** Consumer-provided AudioContext. When set, used for decode, playback, and recording. */
569
+ private _externalAudioContext;
570
+ private _ownedAudioContext;
571
+ /** Set an AudioContext to use for all audio operations. Must be set before tracks load. */
572
+ set audioContext(ctx: AudioContext | null);
573
+ get audioContext(): AudioContext;
564
574
  _engine: PlaylistEngine | null;
565
575
  private _enginePromise;
566
576
  _audioCache: Map<string, Promise<AudioBuffer>>;
@@ -761,6 +771,10 @@ declare global {
761
771
  }
762
772
  }
763
773
 
774
+ interface AudioResumeHost extends ReactiveControllerHost, HTMLElement {
775
+ /** Returns the AudioContext to resume on user gesture */
776
+ readonly audioContext: AudioContext;
777
+ }
764
778
  declare class AudioResumeController implements ReactiveController {
765
779
  private _host;
766
780
  private _target;
@@ -768,7 +782,7 @@ declare class AudioResumeController implements ReactiveController {
768
782
  private _generation;
769
783
  /** CSS selector, or 'document'. When undefined, controller is inert. */
770
784
  target?: string;
771
- constructor(host: ReactiveControllerHost & HTMLElement);
785
+ constructor(host: AudioResumeHost);
772
786
  hostConnected(): void;
773
787
  hostDisconnected(): void;
774
788
  private _onGesture;
package/dist/index.js CHANGED
@@ -1668,18 +1668,22 @@ var ViewportController = class {
1668
1668
  };
1669
1669
 
1670
1670
  // src/controllers/audio-resume-controller.ts
1671
- var import_playout = require("@waveform-playlist/playout");
1672
1671
  var AudioResumeController = class {
1673
1672
  constructor(host) {
1674
1673
  this._target = null;
1675
1674
  this._attached = false;
1676
1675
  this._generation = 0;
1677
1676
  this._onGesture = (e) => {
1678
- (0, import_playout.resumeGlobalAudioContext)().catch((err) => {
1679
- console.warn(
1680
- "[dawcore] AudioResumeController: eager resume failed, will retry on play: " + String(err)
1681
- );
1682
- });
1677
+ const ctx = this._host.audioContext;
1678
+ if (ctx.state === "closed") {
1679
+ console.warn("[dawcore] AudioResumeController: AudioContext is closed, cannot resume.");
1680
+ } else if (ctx.state === "suspended") {
1681
+ ctx.resume().catch((err) => {
1682
+ console.warn(
1683
+ "[dawcore] AudioResumeController: eager resume failed, will retry on play: " + String(err)
1684
+ );
1685
+ });
1686
+ }
1683
1687
  const otherType = e.type === "pointerdown" ? "keydown" : "pointerdown";
1684
1688
  this._target?.removeEventListener(otherType, this._onGesture, {
1685
1689
  capture: true
@@ -1747,13 +1751,12 @@ var AudioResumeController = class {
1747
1751
  };
1748
1752
 
1749
1753
  // src/controllers/recording-controller.ts
1750
- var import_playout2 = require("@waveform-playlist/playout");
1751
1754
  var import_worklets = require("@waveform-playlist/worklets");
1752
1755
  var import_recording = require("@waveform-playlist/recording");
1753
1756
  var RecordingController = class {
1754
1757
  constructor(host) {
1755
1758
  this._sessions = /* @__PURE__ */ new Map();
1756
- this._workletLoaded = false;
1759
+ this._workletLoadedCtx = null;
1757
1760
  this._host = host;
1758
1761
  host.addController(this);
1759
1762
  }
@@ -1763,6 +1766,7 @@ var RecordingController = class {
1763
1766
  for (const trackId of [...this._sessions.keys()]) {
1764
1767
  this._cleanupSession(trackId);
1765
1768
  }
1769
+ this._workletLoadedCtx = null;
1766
1770
  }
1767
1771
  get isRecording() {
1768
1772
  return this._sessions.size > 0;
@@ -1781,21 +1785,25 @@ var RecordingController = class {
1781
1785
  return;
1782
1786
  }
1783
1787
  const bits = options.bits ?? 16;
1784
- const context = (0, import_playout2.getGlobalContext)();
1785
- const rawCtx = context.rawContext;
1786
- this._host.resolveAudioContextSampleRate(rawCtx.sampleRate);
1787
1788
  try {
1788
- if (!this._workletLoaded) {
1789
+ const rawCtx = this._host.audioContext;
1790
+ this._host.resolveAudioContextSampleRate(rawCtx.sampleRate);
1791
+ if (!this._workletLoadedCtx || this._workletLoadedCtx !== rawCtx) {
1789
1792
  await rawCtx.audioWorklet.addModule(import_worklets.recordingProcessorUrl);
1790
- this._workletLoaded = true;
1793
+ this._workletLoadedCtx = rawCtx;
1791
1794
  }
1792
- const channelCount = stream.getAudioTracks()[0]?.getSettings()?.channelCount ?? 1;
1795
+ const detectedChannelCount = stream.getAudioTracks()[0]?.getSettings()?.channelCount;
1796
+ if (detectedChannelCount === void 0 && options.channelCount !== void 0) {
1797
+ console.warn(
1798
+ "[dawcore] Could not detect stream channel count, using fallback: " + options.channelCount
1799
+ );
1800
+ }
1801
+ const channelCount = detectedChannelCount ?? options.channelCount ?? 1;
1793
1802
  const startSample = options.startSample ?? Math.floor(this._host._currentTime * this._host.effectiveSampleRate);
1794
1803
  const outputLatency = rawCtx.outputLatency ?? 0;
1795
- const lookAhead = context.lookAhead ?? 0;
1796
- const latencySamples = Math.floor((outputLatency + lookAhead) * rawCtx.sampleRate);
1797
- const source = context.createMediaStreamSource(stream);
1798
- const workletNode = context.createAudioWorkletNode("recording-processor", {
1804
+ const latencySamples = Math.floor(outputLatency * rawCtx.sampleRate);
1805
+ const source = rawCtx.createMediaStreamSource(stream);
1806
+ const workletNode = new AudioWorkletNode(rawCtx, "recording-processor", {
1799
1807
  channelCount,
1800
1808
  channelCountMode: "explicit"
1801
1809
  });
@@ -1896,8 +1904,7 @@ var RecordingController = class {
1896
1904
  );
1897
1905
  return;
1898
1906
  }
1899
- const context = (0, import_playout2.getGlobalContext)();
1900
- const stopCtx = context.rawContext;
1907
+ const stopCtx = this._host.audioContext;
1901
1908
  const channelData = session.chunks.map((chunkArr) => (0, import_recording.concatenateAudioData)(chunkArr));
1902
1909
  const audioBuffer = (0, import_recording.createAudioBuffer)(
1903
1910
  stopCtx,
@@ -2869,6 +2876,9 @@ var DawEditorElement = class extends import_lit12.LitElement {
2869
2876
  this._selectionStartTime = 0;
2870
2877
  this._selectionEndTime = 0;
2871
2878
  this._currentTime = 0;
2879
+ /** Consumer-provided AudioContext. When set, used for decode, playback, and recording. */
2880
+ this._externalAudioContext = null;
2881
+ this._ownedAudioContext = null;
2872
2882
  this._engine = null;
2873
2883
  this._enginePromise = null;
2874
2884
  this._audioCache = /* @__PURE__ */ new Map();
@@ -3014,6 +3024,31 @@ var DawEditorElement = class extends import_lit12.LitElement {
3014
3024
  this._samplesPerPixel = clamped;
3015
3025
  this.requestUpdate("samplesPerPixel", old);
3016
3026
  }
3027
+ /** Set an AudioContext to use for all audio operations. Must be set before tracks load. */
3028
+ set audioContext(ctx) {
3029
+ if (ctx && ctx.state === "closed") {
3030
+ console.warn("[dawcore] Provided AudioContext is already closed. Ignoring.");
3031
+ return;
3032
+ }
3033
+ if (this._engine) {
3034
+ console.warn(
3035
+ "[dawcore] audioContext set after engine is built. The engine will continue using the previous context."
3036
+ );
3037
+ }
3038
+ this._externalAudioContext = ctx;
3039
+ }
3040
+ get audioContext() {
3041
+ if (this._externalAudioContext) return this._externalAudioContext;
3042
+ if (!this._ownedAudioContext) {
3043
+ this._ownedAudioContext = new AudioContext({ sampleRate: this.sampleRate });
3044
+ if (this._ownedAudioContext.sampleRate !== this.sampleRate) {
3045
+ console.warn(
3046
+ "[dawcore] Requested sampleRate " + this.sampleRate + " but AudioContext is running at " + this._ownedAudioContext.sampleRate
3047
+ );
3048
+ }
3049
+ }
3050
+ return this._ownedAudioContext;
3051
+ }
3017
3052
  get _clipHandler() {
3018
3053
  return this.interactiveClips ? this._clipPointer : null;
3019
3054
  }
@@ -3119,6 +3154,12 @@ var DawEditorElement = class extends import_lit12.LitElement {
3119
3154
  } catch (err) {
3120
3155
  console.warn("[dawcore] Error disposing engine: " + String(err));
3121
3156
  }
3157
+ if (this._ownedAudioContext) {
3158
+ this._ownedAudioContext.close().catch((err) => {
3159
+ console.warn("[dawcore] Error closing AudioContext: " + String(err));
3160
+ });
3161
+ this._ownedAudioContext = null;
3162
+ }
3122
3163
  }
3123
3164
  willUpdate(changedProperties) {
3124
3165
  if (changedProperties.has("eagerResume")) {
@@ -3222,7 +3263,15 @@ var DawEditorElement = class extends import_lit12.LitElement {
3222
3263
  let waveformData = null;
3223
3264
  if (waveformDataPromise) {
3224
3265
  try {
3225
- waveformData = await waveformDataPromise;
3266
+ const wd = await waveformDataPromise;
3267
+ const contextRate = this.audioContext.sampleRate;
3268
+ if (wd.sample_rate === contextRate) {
3269
+ waveformData = wd;
3270
+ } else {
3271
+ console.warn(
3272
+ "[dawcore] Pre-computed peaks at " + wd.sample_rate + " Hz do not match AudioContext at " + contextRate + " Hz \u2014 ignoring " + clipDesc.peaksSrc + ", generating from audio"
3273
+ );
3274
+ }
3226
3275
  } catch (err) {
3227
3276
  console.warn(
3228
3277
  "[dawcore] Failed to load peaks from " + clipDesc.peaksSrc + ": " + String(err) + " \u2014 falling back to AudioBuffer generation"
@@ -3230,15 +3279,16 @@ var DawEditorElement = class extends import_lit12.LitElement {
3230
3279
  }
3231
3280
  }
3232
3281
  if (waveformData) {
3233
- const clip2 = (0, import_core4.createClipFromSeconds)({
3282
+ const wdRate = waveformData.sample_rate;
3283
+ const clip2 = (0, import_core4.createClip)({
3234
3284
  waveformData,
3235
- startTime: clipDesc.start,
3236
- duration: clipDesc.duration || waveformData.duration,
3237
- offset: clipDesc.offset,
3285
+ startSample: Math.round(clipDesc.start * wdRate),
3286
+ durationSamples: Math.round((clipDesc.duration || waveformData.duration) * wdRate),
3287
+ offsetSamples: Math.round(clipDesc.offset * wdRate),
3238
3288
  gain: clipDesc.gain,
3239
3289
  name: clipDesc.name,
3240
- sampleRate: waveformData.sample_rate,
3241
- sourceDuration: waveformData.duration
3290
+ sampleRate: wdRate,
3291
+ sourceDurationSamples: Math.ceil(waveformData.duration * wdRate)
3242
3292
  });
3243
3293
  const effectiveScale = Math.max(this.samplesPerPixel, waveformData.scale);
3244
3294
  const peakData2 = extractPeaks(
@@ -3358,8 +3408,7 @@ var DawEditorElement = class extends import_lit12.LitElement {
3358
3408
  );
3359
3409
  }
3360
3410
  const arrayBuffer = await response.arrayBuffer();
3361
- const { getGlobalAudioContext } = await import("@waveform-playlist/playout");
3362
- return getGlobalAudioContext().decodeAudioData(arrayBuffer);
3411
+ return this.audioContext.decodeAudioData(arrayBuffer);
3363
3412
  })();
3364
3413
  this._audioCache.set(src, promise);
3365
3414
  try {
@@ -3400,11 +3449,11 @@ var DawEditorElement = class extends import_lit12.LitElement {
3400
3449
  return this._enginePromise;
3401
3450
  }
3402
3451
  async _buildEngine() {
3403
- const [{ PlaylistEngine }, { createToneAdapter }] = await Promise.all([
3452
+ const [{ PlaylistEngine }, { NativePlayoutAdapter }] = await Promise.all([
3404
3453
  import("@waveform-playlist/engine"),
3405
- import("@waveform-playlist/playout")
3454
+ import("@dawcore/transport")
3406
3455
  ]);
3407
- const adapter = createToneAdapter();
3456
+ const adapter = new NativePlayoutAdapter(this.audioContext);
3408
3457
  const engine = new PlaylistEngine({
3409
3458
  adapter,
3410
3459
  sampleRate: this.effectiveSampleRate,