@waveform-playlist/core 5.0.0-alpha.8 → 5.0.0-alpha.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -78,18 +78,37 @@ type TrackEffectsFunction = (graphEnd: unknown, destination: unknown, isOffline:
78
78
  * IMPORTANT: All positions/durations are stored as SAMPLE COUNTS (integers)
79
79
  * to avoid floating-point precision errors. Convert to seconds only when
80
80
  * needed for playback using: seconds = samples / sampleRate
81
+ *
82
+ * Clips can be created with just waveformData (for instant visual rendering)
83
+ * and have audioBuffer added later when audio finishes loading.
81
84
  */
82
85
  interface AudioClip {
83
86
  /** Unique identifier for this clip */
84
87
  id: string;
85
- /** The audio buffer containing the audio data */
86
- audioBuffer: AudioBuffer;
88
+ /**
89
+ * The audio buffer containing the audio data.
90
+ * Optional for peaks-first rendering - can be added later.
91
+ * Required for playback and editing operations.
92
+ */
93
+ audioBuffer?: AudioBuffer;
87
94
  /** Position on timeline where this clip starts (in samples at timeline sampleRate) */
88
95
  startSample: number;
89
96
  /** Duration of this clip (in samples) - how much of the audio buffer to play */
90
97
  durationSamples: number;
91
98
  /** Offset into the audio buffer where playback starts (in samples) - the "trim start" point */
92
99
  offsetSamples: number;
100
+ /**
101
+ * Sample rate for this clip's audio.
102
+ * Required when audioBuffer is not provided (for peaks-first rendering).
103
+ * When audioBuffer is present, this should match audioBuffer.sampleRate.
104
+ */
105
+ sampleRate: number;
106
+ /**
107
+ * Total duration of the source audio in samples.
108
+ * Required when audioBuffer is not provided (for trim bounds calculation).
109
+ * When audioBuffer is present, this should equal audioBuffer.length.
110
+ */
111
+ sourceDurationSamples: number;
93
112
  /** Optional fade in effect */
94
113
  fadeIn?: Fade;
95
114
  /** Optional fade out effect */
@@ -155,9 +174,13 @@ interface Timeline {
155
174
  }
156
175
  /**
157
176
  * Options for creating a new audio clip (using sample counts)
177
+ *
178
+ * Either audioBuffer OR (sampleRate + sourceDurationSamples + waveformData) must be provided.
179
+ * Providing waveformData without audioBuffer enables peaks-first rendering.
158
180
  */
159
181
  interface CreateClipOptions {
160
- audioBuffer: AudioBuffer;
182
+ /** Audio buffer - optional for peaks-first rendering */
183
+ audioBuffer?: AudioBuffer;
161
184
  startSample: number;
162
185
  durationSamples?: number;
163
186
  offsetSamples?: number;
@@ -168,12 +191,20 @@ interface CreateClipOptions {
168
191
  fadeOut?: Fade;
169
192
  /** Pre-computed waveform data from waveform-data.js (e.g., from BBC audiowaveform) */
170
193
  waveformData?: WaveformDataObject;
194
+ /** Sample rate - required if audioBuffer not provided */
195
+ sampleRate?: number;
196
+ /** Total source audio duration in samples - required if audioBuffer not provided */
197
+ sourceDurationSamples?: number;
171
198
  }
172
199
  /**
173
200
  * Options for creating a new audio clip (using seconds for convenience)
201
+ *
202
+ * Either audioBuffer OR (sampleRate + sourceDuration + waveformData) must be provided.
203
+ * Providing waveformData without audioBuffer enables peaks-first rendering.
174
204
  */
175
205
  interface CreateClipOptionsSeconds {
176
- audioBuffer: AudioBuffer;
206
+ /** Audio buffer - optional for peaks-first rendering */
207
+ audioBuffer?: AudioBuffer;
177
208
  startTime: number;
178
209
  duration?: number;
179
210
  offset?: number;
@@ -184,6 +215,10 @@ interface CreateClipOptionsSeconds {
184
215
  fadeOut?: Fade;
185
216
  /** Pre-computed waveform data from waveform-data.js (e.g., from BBC audiowaveform) */
186
217
  waveformData?: WaveformDataObject;
218
+ /** Sample rate - required if audioBuffer not provided */
219
+ sampleRate?: number;
220
+ /** Total source audio duration in seconds - required if audioBuffer not provided */
221
+ sourceDuration?: number;
187
222
  }
188
223
  /**
189
224
  * Options for creating a new track
@@ -200,11 +235,19 @@ interface CreateTrackOptions {
200
235
  }
201
236
  /**
202
237
  * Creates a new AudioClip with sensible defaults (using sample counts)
238
+ *
239
+ * For peaks-first rendering (no audioBuffer), sampleRate and sourceDurationSamples can be:
240
+ * - Provided explicitly via options
241
+ * - Derived from waveformData (sample_rate and duration properties)
203
242
  */
204
243
  declare function createClip(options: CreateClipOptions): AudioClip;
205
244
  /**
206
245
  * Creates a new AudioClip from time-based values (convenience function)
207
- * Converts seconds to samples using the audioBuffer's sampleRate
246
+ * Converts seconds to samples using the audioBuffer's sampleRate or explicit sampleRate
247
+ *
248
+ * For peaks-first rendering (no audioBuffer), sampleRate and sourceDuration can be:
249
+ * - Provided explicitly via options
250
+ * - Derived from waveformData (sample_rate and duration properties)
208
251
  */
209
252
  declare function createClipFromSeconds(options: CreateClipOptionsSeconds): AudioClip;
210
253
  /**
package/dist/index.d.ts CHANGED
@@ -78,18 +78,37 @@ type TrackEffectsFunction = (graphEnd: unknown, destination: unknown, isOffline:
78
78
  * IMPORTANT: All positions/durations are stored as SAMPLE COUNTS (integers)
79
79
  * to avoid floating-point precision errors. Convert to seconds only when
80
80
  * needed for playback using: seconds = samples / sampleRate
81
+ *
82
+ * Clips can be created with just waveformData (for instant visual rendering)
83
+ * and have audioBuffer added later when audio finishes loading.
81
84
  */
82
85
  interface AudioClip {
83
86
  /** Unique identifier for this clip */
84
87
  id: string;
85
- /** The audio buffer containing the audio data */
86
- audioBuffer: AudioBuffer;
88
+ /**
89
+ * The audio buffer containing the audio data.
90
+ * Optional for peaks-first rendering - can be added later.
91
+ * Required for playback and editing operations.
92
+ */
93
+ audioBuffer?: AudioBuffer;
87
94
  /** Position on timeline where this clip starts (in samples at timeline sampleRate) */
88
95
  startSample: number;
89
96
  /** Duration of this clip (in samples) - how much of the audio buffer to play */
90
97
  durationSamples: number;
91
98
  /** Offset into the audio buffer where playback starts (in samples) - the "trim start" point */
92
99
  offsetSamples: number;
100
+ /**
101
+ * Sample rate for this clip's audio.
102
+ * Required when audioBuffer is not provided (for peaks-first rendering).
103
+ * When audioBuffer is present, this should match audioBuffer.sampleRate.
104
+ */
105
+ sampleRate: number;
106
+ /**
107
+ * Total duration of the source audio in samples.
108
+ * Required when audioBuffer is not provided (for trim bounds calculation).
109
+ * When audioBuffer is present, this should equal audioBuffer.length.
110
+ */
111
+ sourceDurationSamples: number;
93
112
  /** Optional fade in effect */
94
113
  fadeIn?: Fade;
95
114
  /** Optional fade out effect */
@@ -155,9 +174,13 @@ interface Timeline {
155
174
  }
156
175
  /**
157
176
  * Options for creating a new audio clip (using sample counts)
177
+ *
178
+ * Either audioBuffer OR (sampleRate + sourceDurationSamples + waveformData) must be provided.
179
+ * Providing waveformData without audioBuffer enables peaks-first rendering.
158
180
  */
159
181
  interface CreateClipOptions {
160
- audioBuffer: AudioBuffer;
182
+ /** Audio buffer - optional for peaks-first rendering */
183
+ audioBuffer?: AudioBuffer;
161
184
  startSample: number;
162
185
  durationSamples?: number;
163
186
  offsetSamples?: number;
@@ -168,12 +191,20 @@ interface CreateClipOptions {
168
191
  fadeOut?: Fade;
169
192
  /** Pre-computed waveform data from waveform-data.js (e.g., from BBC audiowaveform) */
170
193
  waveformData?: WaveformDataObject;
194
+ /** Sample rate - required if audioBuffer not provided */
195
+ sampleRate?: number;
196
+ /** Total source audio duration in samples - required if audioBuffer not provided */
197
+ sourceDurationSamples?: number;
171
198
  }
172
199
  /**
173
200
  * Options for creating a new audio clip (using seconds for convenience)
201
+ *
202
+ * Either audioBuffer OR (sampleRate + sourceDuration + waveformData) must be provided.
203
+ * Providing waveformData without audioBuffer enables peaks-first rendering.
174
204
  */
175
205
  interface CreateClipOptionsSeconds {
176
- audioBuffer: AudioBuffer;
206
+ /** Audio buffer - optional for peaks-first rendering */
207
+ audioBuffer?: AudioBuffer;
177
208
  startTime: number;
178
209
  duration?: number;
179
210
  offset?: number;
@@ -184,6 +215,10 @@ interface CreateClipOptionsSeconds {
184
215
  fadeOut?: Fade;
185
216
  /** Pre-computed waveform data from waveform-data.js (e.g., from BBC audiowaveform) */
186
217
  waveformData?: WaveformDataObject;
218
+ /** Sample rate - required if audioBuffer not provided */
219
+ sampleRate?: number;
220
+ /** Total source audio duration in seconds - required if audioBuffer not provided */
221
+ sourceDuration?: number;
187
222
  }
188
223
  /**
189
224
  * Options for creating a new track
@@ -200,11 +235,19 @@ interface CreateTrackOptions {
200
235
  }
201
236
  /**
202
237
  * Creates a new AudioClip with sensible defaults (using sample counts)
238
+ *
239
+ * For peaks-first rendering (no audioBuffer), sampleRate and sourceDurationSamples can be:
240
+ * - Provided explicitly via options
241
+ * - Derived from waveformData (sample_rate and duration properties)
203
242
  */
204
243
  declare function createClip(options: CreateClipOptions): AudioClip;
205
244
  /**
206
245
  * Creates a new AudioClip from time-based values (convenience function)
207
- * Converts seconds to samples using the audioBuffer's sampleRate
246
+ * Converts seconds to samples using the audioBuffer's sampleRate or explicit sampleRate
247
+ *
248
+ * For peaks-first rendering (no audioBuffer), sampleRate and sourceDuration can be:
249
+ * - Provided explicitly via options
250
+ * - Derived from waveformData (sample_rate and duration properties)
208
251
  */
209
252
  declare function createClipFromSeconds(options: CreateClipOptionsSeconds): AudioClip;
210
253
  /**
package/dist/index.js CHANGED
@@ -44,8 +44,6 @@ function createClip(options) {
44
44
  const {
45
45
  audioBuffer,
46
46
  startSample,
47
- durationSamples = audioBuffer.length,
48
- // Full buffer by default
49
47
  offsetSamples = 0,
50
48
  gain = 1,
51
49
  name,
@@ -54,12 +52,28 @@ function createClip(options) {
54
52
  fadeOut,
55
53
  waveformData
56
54
  } = options;
55
+ const sampleRate = audioBuffer?.sampleRate ?? options.sampleRate ?? waveformData?.sample_rate;
56
+ const sourceDurationSamples = audioBuffer?.length ?? options.sourceDurationSamples ?? (waveformData && sampleRate ? Math.ceil(waveformData.duration * sampleRate) : void 0);
57
+ if (sampleRate === void 0) {
58
+ throw new Error("createClip: sampleRate is required when audioBuffer is not provided (can use waveformData.sample_rate)");
59
+ }
60
+ if (sourceDurationSamples === void 0) {
61
+ throw new Error("createClip: sourceDurationSamples is required when audioBuffer is not provided (can use waveformData.duration)");
62
+ }
63
+ if (audioBuffer && waveformData && audioBuffer.sampleRate !== waveformData.sample_rate) {
64
+ console.warn(
65
+ `Sample rate mismatch: audioBuffer (${audioBuffer.sampleRate}) vs waveformData (${waveformData.sample_rate}). Using audioBuffer sample rate. Waveform visualization may be slightly off.`
66
+ );
67
+ }
68
+ const durationSamples = options.durationSamples ?? sourceDurationSamples;
57
69
  return {
58
70
  id: generateId(),
59
71
  audioBuffer,
60
72
  startSample,
61
73
  durationSamples,
62
74
  offsetSamples,
75
+ sampleRate,
76
+ sourceDurationSamples,
63
77
  gain,
64
78
  name,
65
79
  color,
@@ -72,7 +86,6 @@ function createClipFromSeconds(options) {
72
86
  const {
73
87
  audioBuffer,
74
88
  startTime,
75
- duration = audioBuffer.duration,
76
89
  offset = 0,
77
90
  gain = 1,
78
91
  name,
@@ -81,12 +94,27 @@ function createClipFromSeconds(options) {
81
94
  fadeOut,
82
95
  waveformData
83
96
  } = options;
84
- const sampleRate = audioBuffer.sampleRate;
97
+ const sampleRate = audioBuffer?.sampleRate ?? options.sampleRate ?? waveformData?.sample_rate;
98
+ if (sampleRate === void 0) {
99
+ throw new Error("createClipFromSeconds: sampleRate is required when audioBuffer is not provided (can use waveformData.sample_rate)");
100
+ }
101
+ const sourceDuration = audioBuffer?.duration ?? options.sourceDuration ?? waveformData?.duration;
102
+ if (sourceDuration === void 0) {
103
+ throw new Error("createClipFromSeconds: sourceDuration is required when audioBuffer is not provided (can use waveformData.duration)");
104
+ }
105
+ if (audioBuffer && waveformData && audioBuffer.sampleRate !== waveformData.sample_rate) {
106
+ console.warn(
107
+ `Sample rate mismatch: audioBuffer (${audioBuffer.sampleRate}) vs waveformData (${waveformData.sample_rate}). Using audioBuffer sample rate. Waveform visualization may be slightly off.`
108
+ );
109
+ }
110
+ const duration = options.duration ?? sourceDuration;
85
111
  return createClip({
86
112
  audioBuffer,
87
113
  startSample: Math.round(startTime * sampleRate),
88
114
  durationSamples: Math.round(duration * sampleRate),
89
115
  offsetSamples: Math.round(offset * sampleRate),
116
+ sampleRate,
117
+ sourceDurationSamples: Math.ceil(sourceDuration * sampleRate),
90
118
  gain,
91
119
  name,
92
120
  color,
package/dist/index.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/index.ts","../src/types/clip.ts","../src/types/index.ts","../src/utils/conversions.ts"],"sourcesContent":["export * from './types';\nexport * from './utils';\n","/**\n * Clip-Based Model Types\n *\n * These types support a professional multi-track editing model where:\n * - Each track can contain multiple audio clips\n * - Clips can be positioned anywhere on the timeline\n * - Clips have independent trim points (offset/duration)\n * - Gaps between clips are silent\n * - Clips can overlap (for crossfades)\n */\n\nimport { Fade } from './index';\n\n/**\n * WaveformData object from waveform-data.js library.\n * Supports resample() and slice() for dynamic zoom levels.\n * See: https://github.com/bbc/waveform-data.js\n */\nexport interface WaveformDataObject {\n /** Sample rate of the original audio */\n readonly sample_rate: number;\n /** Number of audio samples per pixel */\n readonly scale: number;\n /** Length of waveform data in pixels */\n readonly length: number;\n /** Bit depth (8 or 16) */\n readonly bits: number;\n /** Duration in seconds */\n readonly duration: number;\n /** Number of channels */\n readonly channels: number;\n /** Get channel data */\n channel: (index: number) => {\n min_array: () => number[];\n max_array: () => number[];\n };\n /** Resample to different scale */\n resample: (options: { scale: number } | { width: number }) => WaveformDataObject;\n /** Slice a portion of the waveform */\n slice: (options: { startTime: number; endTime: number } | { startIndex: number; endIndex: number }) => WaveformDataObject;\n}\n\n/**\n * Generic effects function type for track-level audio processing.\n *\n * The actual implementation receives Tone.js audio nodes. Using generic types\n * here to avoid circular dependencies with the playout package.\n *\n * @param graphEnd - The end of the track's audio graph (Tone.js Gain node)\n * @param destination - Where to connect the effects output (Tone.js ToneAudioNode)\n * @param isOffline - Whether rendering offline (for export)\n * @returns Optional cleanup function called when track is disposed\n *\n * @example\n * ```typescript\n * const trackEffects: TrackEffectsFunction = (graphEnd, destination, isOffline) => {\n * const reverb = new Tone.Reverb({ decay: 1.5 });\n * graphEnd.connect(reverb);\n * reverb.connect(destination);\n *\n * return () => {\n * reverb.dispose();\n * };\n * };\n * ```\n */\nexport type TrackEffectsFunction = (\n graphEnd: unknown,\n destination: unknown,\n isOffline: boolean\n) => void | (() => void);\n\n/**\n * Represents a single audio clip on the timeline\n *\n * IMPORTANT: All positions/durations are stored as SAMPLE COUNTS (integers)\n * to avoid floating-point precision errors. Convert to seconds only when\n * needed for playback using: seconds = samples / sampleRate\n */\nexport interface AudioClip {\n /** Unique identifier for this clip */\n id: string;\n\n /** The audio buffer containing the audio data */\n audioBuffer: AudioBuffer;\n\n /** Position on timeline where this clip starts (in samples at timeline sampleRate) */\n startSample: number;\n\n /** Duration of this clip (in samples) - how much of the audio buffer to play */\n durationSamples: number;\n\n /** Offset into the audio buffer where playback starts (in samples) - the \"trim start\" point */\n offsetSamples: number;\n\n /** Optional fade in effect */\n fadeIn?: Fade;\n\n /** Optional fade out effect */\n fadeOut?: Fade;\n\n /** Clip-specific gain/volume multiplier (0.0 to 1.0+) */\n gain: number;\n\n /** Optional label/name for this clip */\n name?: string;\n\n /** Optional color for visual distinction */\n color?: string;\n\n /**\n * Pre-computed waveform data from waveform-data.js library.\n * When provided, the library will use this instead of computing peaks from the audioBuffer.\n * Supports resampling to different zoom levels and slicing for clip trimming.\n * Load with: `const waveformData = await loadWaveformData('/path/to/peaks.dat')`\n */\n waveformData?: WaveformDataObject;\n}\n\n/**\n * Represents a track containing multiple audio clips\n */\nexport interface ClipTrack {\n /** Unique identifier for this track */\n id: string;\n\n /** Display name for this track */\n name: string;\n\n /** Array of audio clips on this track */\n clips: AudioClip[];\n\n /** Whether this track is muted */\n muted: boolean;\n\n /** Whether this track is soloed */\n soloed: boolean;\n\n /** Track volume (0.0 to 1.0+) */\n volume: number;\n\n /** Stereo pan (-1.0 = left, 0 = center, 1.0 = right) */\n pan: number;\n\n /** Optional track color for visual distinction */\n color?: string;\n\n /** Track height in pixels (for UI) */\n height?: number;\n\n /** Optional effects function for this track */\n effects?: TrackEffectsFunction;\n}\n\n/**\n * Represents the entire timeline/project\n */\nexport interface Timeline {\n /** All tracks in the timeline */\n tracks: ClipTrack[];\n\n /** Total timeline duration in seconds */\n duration: number;\n\n /** Sample rate for all audio (typically 44100 or 48000) */\n sampleRate: number;\n\n /** Optional project name */\n name?: string;\n\n /** Optional tempo (BPM) for grid snapping */\n tempo?: number;\n\n /** Optional time signature for grid snapping */\n timeSignature?: {\n numerator: number;\n denominator: number;\n };\n}\n\n/**\n * Options for creating a new audio clip (using sample counts)\n */\nexport interface CreateClipOptions {\n audioBuffer: AudioBuffer;\n startSample: number; // Position on timeline (in samples)\n durationSamples?: number; // Defaults to full buffer duration (in samples)\n offsetSamples?: number; // Defaults to 0\n gain?: number; // Defaults to 1.0\n name?: string;\n color?: string;\n fadeIn?: Fade;\n fadeOut?: Fade;\n /** Pre-computed waveform data from waveform-data.js (e.g., from BBC audiowaveform) */\n waveformData?: WaveformDataObject;\n}\n\n/**\n * Options for creating a new audio clip (using seconds for convenience)\n */\nexport interface CreateClipOptionsSeconds {\n audioBuffer: AudioBuffer;\n startTime: number; // Position on timeline (in seconds)\n duration?: number; // Defaults to full buffer duration (in seconds)\n offset?: number; // Defaults to 0 (in seconds)\n gain?: number; // Defaults to 1.0\n name?: string;\n color?: string;\n fadeIn?: Fade;\n fadeOut?: Fade;\n /** Pre-computed waveform data from waveform-data.js (e.g., from BBC audiowaveform) */\n waveformData?: WaveformDataObject;\n}\n\n/**\n * Options for creating a new track\n */\nexport interface CreateTrackOptions {\n name: string;\n clips?: AudioClip[];\n muted?: boolean;\n soloed?: boolean;\n volume?: number;\n pan?: number;\n color?: string;\n height?: number;\n}\n\n/**\n * Creates a new AudioClip with sensible defaults (using sample counts)\n */\nexport function createClip(options: CreateClipOptions): AudioClip {\n const {\n audioBuffer,\n startSample,\n durationSamples = audioBuffer.length, // Full buffer by default\n offsetSamples = 0,\n gain = 1.0,\n name,\n color,\n fadeIn,\n fadeOut,\n waveformData,\n } = options;\n\n return {\n id: generateId(),\n audioBuffer,\n startSample,\n durationSamples,\n offsetSamples,\n gain,\n name,\n color,\n fadeIn,\n fadeOut,\n waveformData,\n };\n}\n\n/**\n * Creates a new AudioClip from time-based values (convenience function)\n * Converts seconds to samples using the audioBuffer's sampleRate\n */\nexport function createClipFromSeconds(options: CreateClipOptionsSeconds): AudioClip {\n const {\n audioBuffer,\n startTime,\n duration = audioBuffer.duration,\n offset = 0,\n gain = 1.0,\n name,\n color,\n fadeIn,\n fadeOut,\n waveformData,\n } = options;\n\n const sampleRate = audioBuffer.sampleRate;\n\n return createClip({\n audioBuffer,\n startSample: Math.round(startTime * sampleRate),\n durationSamples: Math.round(duration * sampleRate),\n offsetSamples: Math.round(offset * sampleRate),\n gain,\n name,\n color,\n fadeIn,\n fadeOut,\n waveformData,\n });\n}\n\n/**\n * Creates a new ClipTrack with sensible defaults\n */\nexport function createTrack(options: CreateTrackOptions): ClipTrack {\n const {\n name,\n clips = [],\n muted = false,\n soloed = false,\n volume = 1.0,\n pan = 0,\n color,\n height,\n } = options;\n\n return {\n id: generateId(),\n name,\n clips,\n muted,\n soloed,\n volume,\n pan,\n color,\n height,\n };\n}\n\n/**\n * Creates a new Timeline with sensible defaults\n */\nexport function createTimeline(\n tracks: ClipTrack[],\n sampleRate: number = 44100,\n options?: {\n name?: string;\n tempo?: number;\n timeSignature?: { numerator: number; denominator: number };\n }\n): Timeline {\n // Calculate total duration from all clips across all tracks (in seconds)\n const durationSamples = tracks.reduce((maxSamples, track) => {\n const trackSamples = track.clips.reduce((max, clip) => {\n return Math.max(max, clip.startSample + clip.durationSamples);\n }, 0);\n return Math.max(maxSamples, trackSamples);\n }, 0);\n\n const duration = durationSamples / sampleRate;\n\n return {\n tracks,\n duration,\n sampleRate,\n name: options?.name,\n tempo: options?.tempo,\n timeSignature: options?.timeSignature,\n };\n}\n\n/**\n * Generates a unique ID for clips and tracks\n */\nfunction generateId(): string {\n return `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;\n}\n\n/**\n * Utility: Get all clips within a sample range\n */\nexport function getClipsInRange(\n track: ClipTrack,\n startSample: number,\n endSample: number\n): AudioClip[] {\n return track.clips.filter((clip) => {\n const clipEnd = clip.startSample + clip.durationSamples;\n // Clip overlaps with range if:\n // - Clip starts before range ends AND\n // - Clip ends after range starts\n return clip.startSample < endSample && clipEnd > startSample;\n });\n}\n\n/**\n * Utility: Get all clips at a specific sample position\n */\nexport function getClipsAtSample(track: ClipTrack, sample: number): AudioClip[] {\n return track.clips.filter((clip) => {\n const clipEnd = clip.startSample + clip.durationSamples;\n return sample >= clip.startSample && sample < clipEnd;\n });\n}\n\n/**\n * Utility: Check if two clips overlap\n */\nexport function clipsOverlap(clip1: AudioClip, clip2: AudioClip): boolean {\n const clip1End = clip1.startSample + clip1.durationSamples;\n const clip2End = clip2.startSample + clip2.durationSamples;\n\n return clip1.startSample < clip2End && clip1End > clip2.startSample;\n}\n\n/**\n * Utility: Sort clips by startSample\n */\nexport function sortClipsByTime(clips: AudioClip[]): AudioClip[] {\n return [...clips].sort((a, b) => a.startSample - b.startSample);\n}\n\n/**\n * Utility: Find gaps between clips (silent regions)\n */\nexport interface Gap {\n startSample: number;\n endSample: number;\n durationSamples: number;\n}\n\nexport function findGaps(track: ClipTrack): Gap[] {\n if (track.clips.length === 0) return [];\n\n const sorted = sortClipsByTime(track.clips);\n const gaps: Gap[] = [];\n\n for (let i = 0; i < sorted.length - 1; i++) {\n const currentClipEnd = sorted[i].startSample + sorted[i].durationSamples;\n const nextClipStart = sorted[i + 1].startSample;\n\n if (nextClipStart > currentClipEnd) {\n gaps.push({\n startSample: currentClipEnd,\n endSample: nextClipStart,\n durationSamples: nextClipStart - currentClipEnd,\n });\n }\n }\n\n return gaps;\n}\n","export interface WaveformConfig {\n sampleRate: number;\n samplesPerPixel: number;\n waveHeight?: number;\n waveOutlineColor?: string;\n waveFillColor?: string;\n waveProgressColor?: string;\n}\n\nexport interface AudioBuffer {\n length: number;\n duration: number;\n numberOfChannels: number;\n sampleRate: number;\n getChannelData(channel: number): Float32Array;\n}\n\nexport interface Track {\n id: string;\n name: string;\n src?: string | AudioBuffer; // Support both URL strings and AudioBuffer objects\n gain: number;\n muted: boolean;\n soloed: boolean;\n stereoPan: number;\n startTime: number;\n endTime?: number;\n fadeIn?: Fade;\n fadeOut?: Fade;\n cueIn?: number;\n cueOut?: number;\n}\n\n/**\n * Simple fade configuration\n */\nexport interface Fade {\n /** Duration of the fade in seconds */\n duration: number;\n /** Type of fade curve (default: 'linear') */\n type?: FadeType;\n}\n\nexport type FadeType = 'logarithmic' | 'linear' | 'sCurve' | 'exponential';\n\nexport interface PlaylistConfig {\n samplesPerPixel?: number;\n waveHeight?: number;\n container?: HTMLElement;\n isAutomaticScroll?: boolean;\n timescale?: boolean;\n colors?: {\n waveOutlineColor?: string;\n waveFillColor?: string;\n waveProgressColor?: string;\n };\n controls?: {\n show?: boolean;\n width?: number;\n };\n zoomLevels?: number[];\n}\n\nexport interface PlayoutState {\n isPlaying: boolean;\n isPaused: boolean;\n cursor: number;\n duration: number;\n}\n\nexport interface TimeSelection {\n start: number;\n end: number;\n}\n\nexport enum InteractionState {\n Cursor = 'cursor',\n Select = 'select',\n Shift = 'shift',\n FadeIn = 'fadein',\n FadeOut = 'fadeout',\n}\n\n// Export clip-based model types\nexport * from './clip';\n","export function samplesToSeconds(samples: number, sampleRate: number): number {\n return samples / sampleRate;\n}\n\nexport function secondsToSamples(seconds: number, sampleRate: number): number {\n return Math.ceil(seconds * sampleRate);\n}\n\nexport function samplesToPixels(samples: number, samplesPerPixel: number): number {\n return Math.floor(samples / samplesPerPixel);\n}\n\nexport function pixelsToSamples(pixels: number, samplesPerPixel: number): number {\n return Math.floor(pixels * samplesPerPixel);\n}\n\nexport function pixelsToSeconds(\n pixels: number,\n samplesPerPixel: number,\n sampleRate: number\n): number {\n return (pixels * samplesPerPixel) / sampleRate;\n}\n\nexport function secondsToPixels(\n seconds: number,\n samplesPerPixel: number,\n sampleRate: number\n): number {\n return Math.ceil((seconds * sampleRate) / samplesPerPixel);\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACuOO,SAAS,WAAW,SAAuC;AAChE,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA,kBAAkB,YAAY;AAAA;AAAA,IAC9B,gBAAgB;AAAA,IAChB,OAAO;AAAA,IACP;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,IAAI;AAEJ,SAAO;AAAA,IACL,IAAI,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;AAMO,SAAS,sBAAsB,SAA8C;AAClF,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA,WAAW,YAAY;AAAA,IACvB,SAAS;AAAA,IACT,OAAO;AAAA,IACP;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,IAAI;AAEJ,QAAM,aAAa,YAAY;AAE/B,SAAO,WAAW;AAAA,IAChB;AAAA,IACA,aAAa,KAAK,MAAM,YAAY,UAAU;AAAA,IAC9C,iBAAiB,KAAK,MAAM,WAAW,UAAU;AAAA,IACjD,eAAe,KAAK,MAAM,SAAS,UAAU;AAAA,IAC7C;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,CAAC;AACH;AAKO,SAAS,YAAY,SAAwC;AAClE,QAAM;AAAA,IACJ;AAAA,IACA,QAAQ,CAAC;AAAA,IACT,QAAQ;AAAA,IACR,SAAS;AAAA,IACT,SAAS;AAAA,IACT,MAAM;AAAA,IACN;AAAA,IACA;AAAA,EACF,IAAI;AAEJ,SAAO;AAAA,IACL,IAAI,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;AAKO,SAAS,eACd,QACA,aAAqB,OACrB,SAKU;AAEV,QAAM,kBAAkB,OAAO,OAAO,CAAC,YAAY,UAAU;AAC3D,UAAM,eAAe,MAAM,MAAM,OAAO,CAAC,KAAK,SAAS;AACrD,aAAO,KAAK,IAAI,KAAK,KAAK,cAAc,KAAK,eAAe;AAAA,IAC9D,GAAG,CAAC;AACJ,WAAO,KAAK,IAAI,YAAY,YAAY;AAAA,EAC1C,GAAG,CAAC;AAEJ,QAAM,WAAW,kBAAkB;AAEnC,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA,MAAM,SAAS;AAAA,IACf,OAAO,SAAS;AAAA,IAChB,eAAe,SAAS;AAAA,EAC1B;AACF;AAKA,SAAS,aAAqB;AAC5B,SAAO,GAAG,KAAK,IAAI,CAAC,IAAI,KAAK,OAAO,EAAE,SAAS,EAAE,EAAE,OAAO,GAAG,CAAC,CAAC;AACjE;AAKO,SAAS,gBACd,OACA,aACA,WACa;AACb,SAAO,MAAM,MAAM,OAAO,CAAC,SAAS;AAClC,UAAM,UAAU,KAAK,cAAc,KAAK;AAIxC,WAAO,KAAK,cAAc,aAAa,UAAU;AAAA,EACnD,CAAC;AACH;AAKO,SAAS,iBAAiB,OAAkB,QAA6B;AAC9E,SAAO,MAAM,MAAM,OAAO,CAAC,SAAS;AAClC,UAAM,UAAU,KAAK,cAAc,KAAK;AACxC,WAAO,UAAU,KAAK,eAAe,SAAS;AAAA,EAChD,CAAC;AACH;AAKO,SAAS,aAAa,OAAkB,OAA2B;AACxE,QAAM,WAAW,MAAM,cAAc,MAAM;AAC3C,QAAM,WAAW,MAAM,cAAc,MAAM;AAE3C,SAAO,MAAM,cAAc,YAAY,WAAW,MAAM;AAC1D;AAKO,SAAS,gBAAgB,OAAiC;AAC/D,SAAO,CAAC,GAAG,KAAK,EAAE,KAAK,CAAC,GAAG,MAAM,EAAE,cAAc,EAAE,WAAW;AAChE;AAWO,SAAS,SAAS,OAAyB;AAChD,MAAI,MAAM,MAAM,WAAW,EAAG,QAAO,CAAC;AAEtC,QAAM,SAAS,gBAAgB,MAAM,KAAK;AAC1C,QAAM,OAAc,CAAC;AAErB,WAAS,IAAI,GAAG,IAAI,OAAO,SAAS,GAAG,KAAK;AAC1C,UAAM,iBAAiB,OAAO,CAAC,EAAE,cAAc,OAAO,CAAC,EAAE;AACzD,UAAM,gBAAgB,OAAO,IAAI,CAAC,EAAE;AAEpC,QAAI,gBAAgB,gBAAgB;AAClC,WAAK,KAAK;AAAA,QACR,aAAa;AAAA,QACb,WAAW;AAAA,QACX,iBAAiB,gBAAgB;AAAA,MACnC,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO;AACT;;;ACvWO,IAAK,mBAAL,kBAAKA,sBAAL;AACL,EAAAA,kBAAA,YAAS;AACT,EAAAA,kBAAA,YAAS;AACT,EAAAA,kBAAA,WAAQ;AACR,EAAAA,kBAAA,YAAS;AACT,EAAAA,kBAAA,aAAU;AALA,SAAAA;AAAA,GAAA;;;AC3EL,SAAS,iBAAiB,SAAiB,YAA4B;AAC5E,SAAO,UAAU;AACnB;AAEO,SAAS,iBAAiB,SAAiB,YAA4B;AAC5E,SAAO,KAAK,KAAK,UAAU,UAAU;AACvC;AAEO,SAAS,gBAAgB,SAAiB,iBAAiC;AAChF,SAAO,KAAK,MAAM,UAAU,eAAe;AAC7C;AAEO,SAAS,gBAAgB,QAAgB,iBAAiC;AAC/E,SAAO,KAAK,MAAM,SAAS,eAAe;AAC5C;AAEO,SAAS,gBACd,QACA,iBACA,YACQ;AACR,SAAQ,SAAS,kBAAmB;AACtC;AAEO,SAAS,gBACd,SACA,iBACA,YACQ;AACR,SAAO,KAAK,KAAM,UAAU,aAAc,eAAe;AAC3D;","names":["InteractionState"]}
1
+ {"version":3,"sources":["../src/index.ts","../src/types/clip.ts","../src/types/index.ts","../src/utils/conversions.ts"],"sourcesContent":["export * from './types';\nexport * from './utils';\n","/**\n * Clip-Based Model Types\n *\n * These types support a professional multi-track editing model where:\n * - Each track can contain multiple audio clips\n * - Clips can be positioned anywhere on the timeline\n * - Clips have independent trim points (offset/duration)\n * - Gaps between clips are silent\n * - Clips can overlap (for crossfades)\n */\n\nimport { Fade } from './index';\n\n/**\n * WaveformData object from waveform-data.js library.\n * Supports resample() and slice() for dynamic zoom levels.\n * See: https://github.com/bbc/waveform-data.js\n */\nexport interface WaveformDataObject {\n /** Sample rate of the original audio */\n readonly sample_rate: number;\n /** Number of audio samples per pixel */\n readonly scale: number;\n /** Length of waveform data in pixels */\n readonly length: number;\n /** Bit depth (8 or 16) */\n readonly bits: number;\n /** Duration in seconds */\n readonly duration: number;\n /** Number of channels */\n readonly channels: number;\n /** Get channel data */\n channel: (index: number) => {\n min_array: () => number[];\n max_array: () => number[];\n };\n /** Resample to different scale */\n resample: (options: { scale: number } | { width: number }) => WaveformDataObject;\n /** Slice a portion of the waveform */\n slice: (options: { startTime: number; endTime: number } | { startIndex: number; endIndex: number }) => WaveformDataObject;\n}\n\n/**\n * Generic effects function type for track-level audio processing.\n *\n * The actual implementation receives Tone.js audio nodes. Using generic types\n * here to avoid circular dependencies with the playout package.\n *\n * @param graphEnd - The end of the track's audio graph (Tone.js Gain node)\n * @param destination - Where to connect the effects output (Tone.js ToneAudioNode)\n * @param isOffline - Whether rendering offline (for export)\n * @returns Optional cleanup function called when track is disposed\n *\n * @example\n * ```typescript\n * const trackEffects: TrackEffectsFunction = (graphEnd, destination, isOffline) => {\n * const reverb = new Tone.Reverb({ decay: 1.5 });\n * graphEnd.connect(reverb);\n * reverb.connect(destination);\n *\n * return () => {\n * reverb.dispose();\n * };\n * };\n * ```\n */\nexport type TrackEffectsFunction = (\n graphEnd: unknown,\n destination: unknown,\n isOffline: boolean\n) => void | (() => void);\n\n/**\n * Represents a single audio clip on the timeline\n *\n * IMPORTANT: All positions/durations are stored as SAMPLE COUNTS (integers)\n * to avoid floating-point precision errors. Convert to seconds only when\n * needed for playback using: seconds = samples / sampleRate\n *\n * Clips can be created with just waveformData (for instant visual rendering)\n * and have audioBuffer added later when audio finishes loading.\n */\nexport interface AudioClip {\n /** Unique identifier for this clip */\n id: string;\n\n /**\n * The audio buffer containing the audio data.\n * Optional for peaks-first rendering - can be added later.\n * Required for playback and editing operations.\n */\n audioBuffer?: AudioBuffer;\n\n /** Position on timeline where this clip starts (in samples at timeline sampleRate) */\n startSample: number;\n\n /** Duration of this clip (in samples) - how much of the audio buffer to play */\n durationSamples: number;\n\n /** Offset into the audio buffer where playback starts (in samples) - the \"trim start\" point */\n offsetSamples: number;\n\n /**\n * Sample rate for this clip's audio.\n * Required when audioBuffer is not provided (for peaks-first rendering).\n * When audioBuffer is present, this should match audioBuffer.sampleRate.\n */\n sampleRate: number;\n\n /**\n * Total duration of the source audio in samples.\n * Required when audioBuffer is not provided (for trim bounds calculation).\n * When audioBuffer is present, this should equal audioBuffer.length.\n */\n sourceDurationSamples: number;\n\n /** Optional fade in effect */\n fadeIn?: Fade;\n\n /** Optional fade out effect */\n fadeOut?: Fade;\n\n /** Clip-specific gain/volume multiplier (0.0 to 1.0+) */\n gain: number;\n\n /** Optional label/name for this clip */\n name?: string;\n\n /** Optional color for visual distinction */\n color?: string;\n\n /**\n * Pre-computed waveform data from waveform-data.js library.\n * When provided, the library will use this instead of computing peaks from the audioBuffer.\n * Supports resampling to different zoom levels and slicing for clip trimming.\n * Load with: `const waveformData = await loadWaveformData('/path/to/peaks.dat')`\n */\n waveformData?: WaveformDataObject;\n}\n\n/**\n * Represents a track containing multiple audio clips\n */\nexport interface ClipTrack {\n /** Unique identifier for this track */\n id: string;\n\n /** Display name for this track */\n name: string;\n\n /** Array of audio clips on this track */\n clips: AudioClip[];\n\n /** Whether this track is muted */\n muted: boolean;\n\n /** Whether this track is soloed */\n soloed: boolean;\n\n /** Track volume (0.0 to 1.0+) */\n volume: number;\n\n /** Stereo pan (-1.0 = left, 0 = center, 1.0 = right) */\n pan: number;\n\n /** Optional track color for visual distinction */\n color?: string;\n\n /** Track height in pixels (for UI) */\n height?: number;\n\n /** Optional effects function for this track */\n effects?: TrackEffectsFunction;\n}\n\n/**\n * Represents the entire timeline/project\n */\nexport interface Timeline {\n /** All tracks in the timeline */\n tracks: ClipTrack[];\n\n /** Total timeline duration in seconds */\n duration: number;\n\n /** Sample rate for all audio (typically 44100 or 48000) */\n sampleRate: number;\n\n /** Optional project name */\n name?: string;\n\n /** Optional tempo (BPM) for grid snapping */\n tempo?: number;\n\n /** Optional time signature for grid snapping */\n timeSignature?: {\n numerator: number;\n denominator: number;\n };\n}\n\n/**\n * Options for creating a new audio clip (using sample counts)\n *\n * Either audioBuffer OR (sampleRate + sourceDurationSamples + waveformData) must be provided.\n * Providing waveformData without audioBuffer enables peaks-first rendering.\n */\nexport interface CreateClipOptions {\n /** Audio buffer - optional for peaks-first rendering */\n audioBuffer?: AudioBuffer;\n startSample: number; // Position on timeline (in samples)\n durationSamples?: number; // Defaults to full buffer/source duration (in samples)\n offsetSamples?: number; // Defaults to 0\n gain?: number; // Defaults to 1.0\n name?: string;\n color?: string;\n fadeIn?: Fade;\n fadeOut?: Fade;\n /** Pre-computed waveform data from waveform-data.js (e.g., from BBC audiowaveform) */\n waveformData?: WaveformDataObject;\n /** Sample rate - required if audioBuffer not provided */\n sampleRate?: number;\n /** Total source audio duration in samples - required if audioBuffer not provided */\n sourceDurationSamples?: number;\n}\n\n/**\n * Options for creating a new audio clip (using seconds for convenience)\n *\n * Either audioBuffer OR (sampleRate + sourceDuration + waveformData) must be provided.\n * Providing waveformData without audioBuffer enables peaks-first rendering.\n */\nexport interface CreateClipOptionsSeconds {\n /** Audio buffer - optional for peaks-first rendering */\n audioBuffer?: AudioBuffer;\n startTime: number; // Position on timeline (in seconds)\n duration?: number; // Defaults to full buffer/source duration (in seconds)\n offset?: number; // Defaults to 0 (in seconds)\n gain?: number; // Defaults to 1.0\n name?: string;\n color?: string;\n fadeIn?: Fade;\n fadeOut?: Fade;\n /** Pre-computed waveform data from waveform-data.js (e.g., from BBC audiowaveform) */\n waveformData?: WaveformDataObject;\n /** Sample rate - required if audioBuffer not provided */\n sampleRate?: number;\n /** Total source audio duration in seconds - required if audioBuffer not provided */\n sourceDuration?: number;\n}\n\n/**\n * Options for creating a new track\n */\nexport interface CreateTrackOptions {\n name: string;\n clips?: AudioClip[];\n muted?: boolean;\n soloed?: boolean;\n volume?: number;\n pan?: number;\n color?: string;\n height?: number;\n}\n\n/**\n * Creates a new AudioClip with sensible defaults (using sample counts)\n *\n * For peaks-first rendering (no audioBuffer), sampleRate and sourceDurationSamples can be:\n * - Provided explicitly via options\n * - Derived from waveformData (sample_rate and duration properties)\n */\nexport function createClip(options: CreateClipOptions): AudioClip {\n const {\n audioBuffer,\n startSample,\n offsetSamples = 0,\n gain = 1.0,\n name,\n color,\n fadeIn,\n fadeOut,\n waveformData,\n } = options;\n\n // Determine sample rate: audioBuffer > explicit option > waveformData\n const sampleRate = audioBuffer?.sampleRate ?? options.sampleRate ?? waveformData?.sample_rate;\n\n // Determine source duration: audioBuffer > explicit option > waveformData (converted to samples)\n const sourceDurationSamples = audioBuffer?.length\n ?? options.sourceDurationSamples\n ?? (waveformData && sampleRate ? Math.ceil(waveformData.duration * sampleRate) : undefined);\n\n if (sampleRate === undefined) {\n throw new Error('createClip: sampleRate is required when audioBuffer is not provided (can use waveformData.sample_rate)');\n }\n if (sourceDurationSamples === undefined) {\n throw new Error('createClip: sourceDurationSamples is required when audioBuffer is not provided (can use waveformData.duration)');\n }\n\n // Warn if sample rates don't match\n if (audioBuffer && waveformData && audioBuffer.sampleRate !== waveformData.sample_rate) {\n console.warn(\n `Sample rate mismatch: audioBuffer (${audioBuffer.sampleRate}) vs waveformData (${waveformData.sample_rate}). ` +\n `Using audioBuffer sample rate. Waveform visualization may be slightly off.`\n );\n }\n\n // Default duration to full source duration\n const durationSamples = options.durationSamples ?? sourceDurationSamples;\n\n return {\n id: generateId(),\n audioBuffer,\n startSample,\n durationSamples,\n offsetSamples,\n sampleRate,\n sourceDurationSamples,\n gain,\n name,\n color,\n fadeIn,\n fadeOut,\n waveformData,\n };\n}\n\n/**\n * Creates a new AudioClip from time-based values (convenience function)\n * Converts seconds to samples using the audioBuffer's sampleRate or explicit sampleRate\n *\n * For peaks-first rendering (no audioBuffer), sampleRate and sourceDuration can be:\n * - Provided explicitly via options\n * - Derived from waveformData (sample_rate and duration properties)\n */\nexport function createClipFromSeconds(options: CreateClipOptionsSeconds): AudioClip {\n const {\n audioBuffer,\n startTime,\n offset = 0,\n gain = 1.0,\n name,\n color,\n fadeIn,\n fadeOut,\n waveformData,\n } = options;\n\n // Determine sample rate: audioBuffer > explicit option > waveformData\n const sampleRate = audioBuffer?.sampleRate ?? options.sampleRate ?? waveformData?.sample_rate;\n if (sampleRate === undefined) {\n throw new Error('createClipFromSeconds: sampleRate is required when audioBuffer is not provided (can use waveformData.sample_rate)');\n }\n\n // Determine source duration: audioBuffer > explicit option > waveformData\n const sourceDuration = audioBuffer?.duration ?? options.sourceDuration ?? waveformData?.duration;\n if (sourceDuration === undefined) {\n throw new Error('createClipFromSeconds: sourceDuration is required when audioBuffer is not provided (can use waveformData.duration)');\n }\n\n // Warn if sample rates don't match (could cause visual/audio sync issues)\n if (audioBuffer && waveformData && audioBuffer.sampleRate !== waveformData.sample_rate) {\n console.warn(\n `Sample rate mismatch: audioBuffer (${audioBuffer.sampleRate}) vs waveformData (${waveformData.sample_rate}). ` +\n `Using audioBuffer sample rate. Waveform visualization may be slightly off.`\n );\n }\n\n // Default clip duration to full source duration\n const duration = options.duration ?? sourceDuration;\n\n return createClip({\n audioBuffer,\n startSample: Math.round(startTime * sampleRate),\n durationSamples: Math.round(duration * sampleRate),\n offsetSamples: Math.round(offset * sampleRate),\n sampleRate,\n sourceDurationSamples: Math.ceil(sourceDuration * sampleRate),\n gain,\n name,\n color,\n fadeIn,\n fadeOut,\n waveformData,\n });\n}\n\n/**\n * Creates a new ClipTrack with sensible defaults\n */\nexport function createTrack(options: CreateTrackOptions): ClipTrack {\n const {\n name,\n clips = [],\n muted = false,\n soloed = false,\n volume = 1.0,\n pan = 0,\n color,\n height,\n } = options;\n\n return {\n id: generateId(),\n name,\n clips,\n muted,\n soloed,\n volume,\n pan,\n color,\n height,\n };\n}\n\n/**\n * Creates a new Timeline with sensible defaults\n */\nexport function createTimeline(\n tracks: ClipTrack[],\n sampleRate: number = 44100,\n options?: {\n name?: string;\n tempo?: number;\n timeSignature?: { numerator: number; denominator: number };\n }\n): Timeline {\n // Calculate total duration from all clips across all tracks (in seconds)\n const durationSamples = tracks.reduce((maxSamples, track) => {\n const trackSamples = track.clips.reduce((max, clip) => {\n return Math.max(max, clip.startSample + clip.durationSamples);\n }, 0);\n return Math.max(maxSamples, trackSamples);\n }, 0);\n\n const duration = durationSamples / sampleRate;\n\n return {\n tracks,\n duration,\n sampleRate,\n name: options?.name,\n tempo: options?.tempo,\n timeSignature: options?.timeSignature,\n };\n}\n\n/**\n * Generates a unique ID for clips and tracks\n */\nfunction generateId(): string {\n return `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;\n}\n\n/**\n * Utility: Get all clips within a sample range\n */\nexport function getClipsInRange(\n track: ClipTrack,\n startSample: number,\n endSample: number\n): AudioClip[] {\n return track.clips.filter((clip) => {\n const clipEnd = clip.startSample + clip.durationSamples;\n // Clip overlaps with range if:\n // - Clip starts before range ends AND\n // - Clip ends after range starts\n return clip.startSample < endSample && clipEnd > startSample;\n });\n}\n\n/**\n * Utility: Get all clips at a specific sample position\n */\nexport function getClipsAtSample(track: ClipTrack, sample: number): AudioClip[] {\n return track.clips.filter((clip) => {\n const clipEnd = clip.startSample + clip.durationSamples;\n return sample >= clip.startSample && sample < clipEnd;\n });\n}\n\n/**\n * Utility: Check if two clips overlap\n */\nexport function clipsOverlap(clip1: AudioClip, clip2: AudioClip): boolean {\n const clip1End = clip1.startSample + clip1.durationSamples;\n const clip2End = clip2.startSample + clip2.durationSamples;\n\n return clip1.startSample < clip2End && clip1End > clip2.startSample;\n}\n\n/**\n * Utility: Sort clips by startSample\n */\nexport function sortClipsByTime(clips: AudioClip[]): AudioClip[] {\n return [...clips].sort((a, b) => a.startSample - b.startSample);\n}\n\n/**\n * Utility: Find gaps between clips (silent regions)\n */\nexport interface Gap {\n startSample: number;\n endSample: number;\n durationSamples: number;\n}\n\nexport function findGaps(track: ClipTrack): Gap[] {\n if (track.clips.length === 0) return [];\n\n const sorted = sortClipsByTime(track.clips);\n const gaps: Gap[] = [];\n\n for (let i = 0; i < sorted.length - 1; i++) {\n const currentClipEnd = sorted[i].startSample + sorted[i].durationSamples;\n const nextClipStart = sorted[i + 1].startSample;\n\n if (nextClipStart > currentClipEnd) {\n gaps.push({\n startSample: currentClipEnd,\n endSample: nextClipStart,\n durationSamples: nextClipStart - currentClipEnd,\n });\n }\n }\n\n return gaps;\n}\n","export interface WaveformConfig {\n sampleRate: number;\n samplesPerPixel: number;\n waveHeight?: number;\n waveOutlineColor?: string;\n waveFillColor?: string;\n waveProgressColor?: string;\n}\n\nexport interface AudioBuffer {\n length: number;\n duration: number;\n numberOfChannels: number;\n sampleRate: number;\n getChannelData(channel: number): Float32Array;\n}\n\nexport interface Track {\n id: string;\n name: string;\n src?: string | AudioBuffer; // Support both URL strings and AudioBuffer objects\n gain: number;\n muted: boolean;\n soloed: boolean;\n stereoPan: number;\n startTime: number;\n endTime?: number;\n fadeIn?: Fade;\n fadeOut?: Fade;\n cueIn?: number;\n cueOut?: number;\n}\n\n/**\n * Simple fade configuration\n */\nexport interface Fade {\n /** Duration of the fade in seconds */\n duration: number;\n /** Type of fade curve (default: 'linear') */\n type?: FadeType;\n}\n\nexport type FadeType = 'logarithmic' | 'linear' | 'sCurve' | 'exponential';\n\nexport interface PlaylistConfig {\n samplesPerPixel?: number;\n waveHeight?: number;\n container?: HTMLElement;\n isAutomaticScroll?: boolean;\n timescale?: boolean;\n colors?: {\n waveOutlineColor?: string;\n waveFillColor?: string;\n waveProgressColor?: string;\n };\n controls?: {\n show?: boolean;\n width?: number;\n };\n zoomLevels?: number[];\n}\n\nexport interface PlayoutState {\n isPlaying: boolean;\n isPaused: boolean;\n cursor: number;\n duration: number;\n}\n\nexport interface TimeSelection {\n start: number;\n end: number;\n}\n\nexport enum InteractionState {\n Cursor = 'cursor',\n Select = 'select',\n Shift = 'shift',\n FadeIn = 'fadein',\n FadeOut = 'fadeout',\n}\n\n// Export clip-based model types\nexport * from './clip';\n","export function samplesToSeconds(samples: number, sampleRate: number): number {\n return samples / sampleRate;\n}\n\nexport function secondsToSamples(seconds: number, sampleRate: number): number {\n return Math.ceil(seconds * sampleRate);\n}\n\nexport function samplesToPixels(samples: number, samplesPerPixel: number): number {\n return Math.floor(samples / samplesPerPixel);\n}\n\nexport function pixelsToSamples(pixels: number, samplesPerPixel: number): number {\n return Math.floor(pixels * samplesPerPixel);\n}\n\nexport function pixelsToSeconds(\n pixels: number,\n samplesPerPixel: number,\n sampleRate: number\n): number {\n return (pixels * samplesPerPixel) / sampleRate;\n}\n\nexport function secondsToPixels(\n seconds: number,\n samplesPerPixel: number,\n sampleRate: number\n): number {\n return Math.ceil((seconds * sampleRate) / samplesPerPixel);\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACgRO,SAAS,WAAW,SAAuC;AAChE,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA,gBAAgB;AAAA,IAChB,OAAO;AAAA,IACP;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,IAAI;AAGJ,QAAM,aAAa,aAAa,cAAc,QAAQ,cAAc,cAAc;AAGlF,QAAM,wBAAwB,aAAa,UACtC,QAAQ,0BACP,gBAAgB,aAAa,KAAK,KAAK,aAAa,WAAW,UAAU,IAAI;AAEnF,MAAI,eAAe,QAAW;AAC5B,UAAM,IAAI,MAAM,wGAAwG;AAAA,EAC1H;AACA,MAAI,0BAA0B,QAAW;AACvC,UAAM,IAAI,MAAM,gHAAgH;AAAA,EAClI;AAGA,MAAI,eAAe,gBAAgB,YAAY,eAAe,aAAa,aAAa;AACtF,YAAQ;AAAA,MACN,sCAAsC,YAAY,UAAU,sBAAsB,aAAa,WAAW;AAAA,IAE5G;AAAA,EACF;AAGA,QAAM,kBAAkB,QAAQ,mBAAmB;AAEnD,SAAO;AAAA,IACL,IAAI,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;AAUO,SAAS,sBAAsB,SAA8C;AAClF,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA,SAAS;AAAA,IACT,OAAO;AAAA,IACP;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,IAAI;AAGJ,QAAM,aAAa,aAAa,cAAc,QAAQ,cAAc,cAAc;AAClF,MAAI,eAAe,QAAW;AAC5B,UAAM,IAAI,MAAM,mHAAmH;AAAA,EACrI;AAGA,QAAM,iBAAiB,aAAa,YAAY,QAAQ,kBAAkB,cAAc;AACxF,MAAI,mBAAmB,QAAW;AAChC,UAAM,IAAI,MAAM,oHAAoH;AAAA,EACtI;AAGA,MAAI,eAAe,gBAAgB,YAAY,eAAe,aAAa,aAAa;AACtF,YAAQ;AAAA,MACN,sCAAsC,YAAY,UAAU,sBAAsB,aAAa,WAAW;AAAA,IAE5G;AAAA,EACF;AAGA,QAAM,WAAW,QAAQ,YAAY;AAErC,SAAO,WAAW;AAAA,IAChB;AAAA,IACA,aAAa,KAAK,MAAM,YAAY,UAAU;AAAA,IAC9C,iBAAiB,KAAK,MAAM,WAAW,UAAU;AAAA,IACjD,eAAe,KAAK,MAAM,SAAS,UAAU;AAAA,IAC7C;AAAA,IACA,uBAAuB,KAAK,KAAK,iBAAiB,UAAU;AAAA,IAC5D;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,CAAC;AACH;AAKO,SAAS,YAAY,SAAwC;AAClE,QAAM;AAAA,IACJ;AAAA,IACA,QAAQ,CAAC;AAAA,IACT,QAAQ;AAAA,IACR,SAAS;AAAA,IACT,SAAS;AAAA,IACT,MAAM;AAAA,IACN;AAAA,IACA;AAAA,EACF,IAAI;AAEJ,SAAO;AAAA,IACL,IAAI,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;AAKO,SAAS,eACd,QACA,aAAqB,OACrB,SAKU;AAEV,QAAM,kBAAkB,OAAO,OAAO,CAAC,YAAY,UAAU;AAC3D,UAAM,eAAe,MAAM,MAAM,OAAO,CAAC,KAAK,SAAS;AACrD,aAAO,KAAK,IAAI,KAAK,KAAK,cAAc,KAAK,eAAe;AAAA,IAC9D,GAAG,CAAC;AACJ,WAAO,KAAK,IAAI,YAAY,YAAY;AAAA,EAC1C,GAAG,CAAC;AAEJ,QAAM,WAAW,kBAAkB;AAEnC,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA,MAAM,SAAS;AAAA,IACf,OAAO,SAAS;AAAA,IAChB,eAAe,SAAS;AAAA,EAC1B;AACF;AAKA,SAAS,aAAqB;AAC5B,SAAO,GAAG,KAAK,IAAI,CAAC,IAAI,KAAK,OAAO,EAAE,SAAS,EAAE,EAAE,OAAO,GAAG,CAAC,CAAC;AACjE;AAKO,SAAS,gBACd,OACA,aACA,WACa;AACb,SAAO,MAAM,MAAM,OAAO,CAAC,SAAS;AAClC,UAAM,UAAU,KAAK,cAAc,KAAK;AAIxC,WAAO,KAAK,cAAc,aAAa,UAAU;AAAA,EACnD,CAAC;AACH;AAKO,SAAS,iBAAiB,OAAkB,QAA6B;AAC9E,SAAO,MAAM,MAAM,OAAO,CAAC,SAAS;AAClC,UAAM,UAAU,KAAK,cAAc,KAAK;AACxC,WAAO,UAAU,KAAK,eAAe,SAAS;AAAA,EAChD,CAAC;AACH;AAKO,SAAS,aAAa,OAAkB,OAA2B;AACxE,QAAM,WAAW,MAAM,cAAc,MAAM;AAC3C,QAAM,WAAW,MAAM,cAAc,MAAM;AAE3C,SAAO,MAAM,cAAc,YAAY,WAAW,MAAM;AAC1D;AAKO,SAAS,gBAAgB,OAAiC;AAC/D,SAAO,CAAC,GAAG,KAAK,EAAE,KAAK,CAAC,GAAG,MAAM,EAAE,cAAc,EAAE,WAAW;AAChE;AAWO,SAAS,SAAS,OAAyB;AAChD,MAAI,MAAM,MAAM,WAAW,EAAG,QAAO,CAAC;AAEtC,QAAM,SAAS,gBAAgB,MAAM,KAAK;AAC1C,QAAM,OAAc,CAAC;AAErB,WAAS,IAAI,GAAG,IAAI,OAAO,SAAS,GAAG,KAAK;AAC1C,UAAM,iBAAiB,OAAO,CAAC,EAAE,cAAc,OAAO,CAAC,EAAE;AACzD,UAAM,gBAAgB,OAAO,IAAI,CAAC,EAAE;AAEpC,QAAI,gBAAgB,gBAAgB;AAClC,WAAK,KAAK;AAAA,QACR,aAAa;AAAA,QACb,WAAW;AAAA,QACX,iBAAiB,gBAAgB;AAAA,MACnC,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO;AACT;;;ACrcO,IAAK,mBAAL,kBAAKA,sBAAL;AACL,EAAAA,kBAAA,YAAS;AACT,EAAAA,kBAAA,YAAS;AACT,EAAAA,kBAAA,WAAQ;AACR,EAAAA,kBAAA,YAAS;AACT,EAAAA,kBAAA,aAAU;AALA,SAAAA;AAAA,GAAA;;;AC3EL,SAAS,iBAAiB,SAAiB,YAA4B;AAC5E,SAAO,UAAU;AACnB;AAEO,SAAS,iBAAiB,SAAiB,YAA4B;AAC5E,SAAO,KAAK,KAAK,UAAU,UAAU;AACvC;AAEO,SAAS,gBAAgB,SAAiB,iBAAiC;AAChF,SAAO,KAAK,MAAM,UAAU,eAAe;AAC7C;AAEO,SAAS,gBAAgB,QAAgB,iBAAiC;AAC/E,SAAO,KAAK,MAAM,SAAS,eAAe;AAC5C;AAEO,SAAS,gBACd,QACA,iBACA,YACQ;AACR,SAAQ,SAAS,kBAAmB;AACtC;AAEO,SAAS,gBACd,SACA,iBACA,YACQ;AACR,SAAO,KAAK,KAAM,UAAU,aAAc,eAAe;AAC3D;","names":["InteractionState"]}
package/dist/index.mjs CHANGED
@@ -3,8 +3,6 @@ function createClip(options) {
3
3
  const {
4
4
  audioBuffer,
5
5
  startSample,
6
- durationSamples = audioBuffer.length,
7
- // Full buffer by default
8
6
  offsetSamples = 0,
9
7
  gain = 1,
10
8
  name,
@@ -13,12 +11,28 @@ function createClip(options) {
13
11
  fadeOut,
14
12
  waveformData
15
13
  } = options;
14
+ const sampleRate = audioBuffer?.sampleRate ?? options.sampleRate ?? waveformData?.sample_rate;
15
+ const sourceDurationSamples = audioBuffer?.length ?? options.sourceDurationSamples ?? (waveformData && sampleRate ? Math.ceil(waveformData.duration * sampleRate) : void 0);
16
+ if (sampleRate === void 0) {
17
+ throw new Error("createClip: sampleRate is required when audioBuffer is not provided (can use waveformData.sample_rate)");
18
+ }
19
+ if (sourceDurationSamples === void 0) {
20
+ throw new Error("createClip: sourceDurationSamples is required when audioBuffer is not provided (can use waveformData.duration)");
21
+ }
22
+ if (audioBuffer && waveformData && audioBuffer.sampleRate !== waveformData.sample_rate) {
23
+ console.warn(
24
+ `Sample rate mismatch: audioBuffer (${audioBuffer.sampleRate}) vs waveformData (${waveformData.sample_rate}). Using audioBuffer sample rate. Waveform visualization may be slightly off.`
25
+ );
26
+ }
27
+ const durationSamples = options.durationSamples ?? sourceDurationSamples;
16
28
  return {
17
29
  id: generateId(),
18
30
  audioBuffer,
19
31
  startSample,
20
32
  durationSamples,
21
33
  offsetSamples,
34
+ sampleRate,
35
+ sourceDurationSamples,
22
36
  gain,
23
37
  name,
24
38
  color,
@@ -31,7 +45,6 @@ function createClipFromSeconds(options) {
31
45
  const {
32
46
  audioBuffer,
33
47
  startTime,
34
- duration = audioBuffer.duration,
35
48
  offset = 0,
36
49
  gain = 1,
37
50
  name,
@@ -40,12 +53,27 @@ function createClipFromSeconds(options) {
40
53
  fadeOut,
41
54
  waveformData
42
55
  } = options;
43
- const sampleRate = audioBuffer.sampleRate;
56
+ const sampleRate = audioBuffer?.sampleRate ?? options.sampleRate ?? waveformData?.sample_rate;
57
+ if (sampleRate === void 0) {
58
+ throw new Error("createClipFromSeconds: sampleRate is required when audioBuffer is not provided (can use waveformData.sample_rate)");
59
+ }
60
+ const sourceDuration = audioBuffer?.duration ?? options.sourceDuration ?? waveformData?.duration;
61
+ if (sourceDuration === void 0) {
62
+ throw new Error("createClipFromSeconds: sourceDuration is required when audioBuffer is not provided (can use waveformData.duration)");
63
+ }
64
+ if (audioBuffer && waveformData && audioBuffer.sampleRate !== waveformData.sample_rate) {
65
+ console.warn(
66
+ `Sample rate mismatch: audioBuffer (${audioBuffer.sampleRate}) vs waveformData (${waveformData.sample_rate}). Using audioBuffer sample rate. Waveform visualization may be slightly off.`
67
+ );
68
+ }
69
+ const duration = options.duration ?? sourceDuration;
44
70
  return createClip({
45
71
  audioBuffer,
46
72
  startSample: Math.round(startTime * sampleRate),
47
73
  durationSamples: Math.round(duration * sampleRate),
48
74
  offsetSamples: Math.round(offset * sampleRate),
75
+ sampleRate,
76
+ sourceDurationSamples: Math.ceil(sourceDuration * sampleRate),
49
77
  gain,
50
78
  name,
51
79
  color,
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/types/clip.ts","../src/types/index.ts","../src/utils/conversions.ts"],"sourcesContent":["/**\n * Clip-Based Model Types\n *\n * These types support a professional multi-track editing model where:\n * - Each track can contain multiple audio clips\n * - Clips can be positioned anywhere on the timeline\n * - Clips have independent trim points (offset/duration)\n * - Gaps between clips are silent\n * - Clips can overlap (for crossfades)\n */\n\nimport { Fade } from './index';\n\n/**\n * WaveformData object from waveform-data.js library.\n * Supports resample() and slice() for dynamic zoom levels.\n * See: https://github.com/bbc/waveform-data.js\n */\nexport interface WaveformDataObject {\n /** Sample rate of the original audio */\n readonly sample_rate: number;\n /** Number of audio samples per pixel */\n readonly scale: number;\n /** Length of waveform data in pixels */\n readonly length: number;\n /** Bit depth (8 or 16) */\n readonly bits: number;\n /** Duration in seconds */\n readonly duration: number;\n /** Number of channels */\n readonly channels: number;\n /** Get channel data */\n channel: (index: number) => {\n min_array: () => number[];\n max_array: () => number[];\n };\n /** Resample to different scale */\n resample: (options: { scale: number } | { width: number }) => WaveformDataObject;\n /** Slice a portion of the waveform */\n slice: (options: { startTime: number; endTime: number } | { startIndex: number; endIndex: number }) => WaveformDataObject;\n}\n\n/**\n * Generic effects function type for track-level audio processing.\n *\n * The actual implementation receives Tone.js audio nodes. Using generic types\n * here to avoid circular dependencies with the playout package.\n *\n * @param graphEnd - The end of the track's audio graph (Tone.js Gain node)\n * @param destination - Where to connect the effects output (Tone.js ToneAudioNode)\n * @param isOffline - Whether rendering offline (for export)\n * @returns Optional cleanup function called when track is disposed\n *\n * @example\n * ```typescript\n * const trackEffects: TrackEffectsFunction = (graphEnd, destination, isOffline) => {\n * const reverb = new Tone.Reverb({ decay: 1.5 });\n * graphEnd.connect(reverb);\n * reverb.connect(destination);\n *\n * return () => {\n * reverb.dispose();\n * };\n * };\n * ```\n */\nexport type TrackEffectsFunction = (\n graphEnd: unknown,\n destination: unknown,\n isOffline: boolean\n) => void | (() => void);\n\n/**\n * Represents a single audio clip on the timeline\n *\n * IMPORTANT: All positions/durations are stored as SAMPLE COUNTS (integers)\n * to avoid floating-point precision errors. Convert to seconds only when\n * needed for playback using: seconds = samples / sampleRate\n */\nexport interface AudioClip {\n /** Unique identifier for this clip */\n id: string;\n\n /** The audio buffer containing the audio data */\n audioBuffer: AudioBuffer;\n\n /** Position on timeline where this clip starts (in samples at timeline sampleRate) */\n startSample: number;\n\n /** Duration of this clip (in samples) - how much of the audio buffer to play */\n durationSamples: number;\n\n /** Offset into the audio buffer where playback starts (in samples) - the \"trim start\" point */\n offsetSamples: number;\n\n /** Optional fade in effect */\n fadeIn?: Fade;\n\n /** Optional fade out effect */\n fadeOut?: Fade;\n\n /** Clip-specific gain/volume multiplier (0.0 to 1.0+) */\n gain: number;\n\n /** Optional label/name for this clip */\n name?: string;\n\n /** Optional color for visual distinction */\n color?: string;\n\n /**\n * Pre-computed waveform data from waveform-data.js library.\n * When provided, the library will use this instead of computing peaks from the audioBuffer.\n * Supports resampling to different zoom levels and slicing for clip trimming.\n * Load with: `const waveformData = await loadWaveformData('/path/to/peaks.dat')`\n */\n waveformData?: WaveformDataObject;\n}\n\n/**\n * Represents a track containing multiple audio clips\n */\nexport interface ClipTrack {\n /** Unique identifier for this track */\n id: string;\n\n /** Display name for this track */\n name: string;\n\n /** Array of audio clips on this track */\n clips: AudioClip[];\n\n /** Whether this track is muted */\n muted: boolean;\n\n /** Whether this track is soloed */\n soloed: boolean;\n\n /** Track volume (0.0 to 1.0+) */\n volume: number;\n\n /** Stereo pan (-1.0 = left, 0 = center, 1.0 = right) */\n pan: number;\n\n /** Optional track color for visual distinction */\n color?: string;\n\n /** Track height in pixels (for UI) */\n height?: number;\n\n /** Optional effects function for this track */\n effects?: TrackEffectsFunction;\n}\n\n/**\n * Represents the entire timeline/project\n */\nexport interface Timeline {\n /** All tracks in the timeline */\n tracks: ClipTrack[];\n\n /** Total timeline duration in seconds */\n duration: number;\n\n /** Sample rate for all audio (typically 44100 or 48000) */\n sampleRate: number;\n\n /** Optional project name */\n name?: string;\n\n /** Optional tempo (BPM) for grid snapping */\n tempo?: number;\n\n /** Optional time signature for grid snapping */\n timeSignature?: {\n numerator: number;\n denominator: number;\n };\n}\n\n/**\n * Options for creating a new audio clip (using sample counts)\n */\nexport interface CreateClipOptions {\n audioBuffer: AudioBuffer;\n startSample: number; // Position on timeline (in samples)\n durationSamples?: number; // Defaults to full buffer duration (in samples)\n offsetSamples?: number; // Defaults to 0\n gain?: number; // Defaults to 1.0\n name?: string;\n color?: string;\n fadeIn?: Fade;\n fadeOut?: Fade;\n /** Pre-computed waveform data from waveform-data.js (e.g., from BBC audiowaveform) */\n waveformData?: WaveformDataObject;\n}\n\n/**\n * Options for creating a new audio clip (using seconds for convenience)\n */\nexport interface CreateClipOptionsSeconds {\n audioBuffer: AudioBuffer;\n startTime: number; // Position on timeline (in seconds)\n duration?: number; // Defaults to full buffer duration (in seconds)\n offset?: number; // Defaults to 0 (in seconds)\n gain?: number; // Defaults to 1.0\n name?: string;\n color?: string;\n fadeIn?: Fade;\n fadeOut?: Fade;\n /** Pre-computed waveform data from waveform-data.js (e.g., from BBC audiowaveform) */\n waveformData?: WaveformDataObject;\n}\n\n/**\n * Options for creating a new track\n */\nexport interface CreateTrackOptions {\n name: string;\n clips?: AudioClip[];\n muted?: boolean;\n soloed?: boolean;\n volume?: number;\n pan?: number;\n color?: string;\n height?: number;\n}\n\n/**\n * Creates a new AudioClip with sensible defaults (using sample counts)\n */\nexport function createClip(options: CreateClipOptions): AudioClip {\n const {\n audioBuffer,\n startSample,\n durationSamples = audioBuffer.length, // Full buffer by default\n offsetSamples = 0,\n gain = 1.0,\n name,\n color,\n fadeIn,\n fadeOut,\n waveformData,\n } = options;\n\n return {\n id: generateId(),\n audioBuffer,\n startSample,\n durationSamples,\n offsetSamples,\n gain,\n name,\n color,\n fadeIn,\n fadeOut,\n waveformData,\n };\n}\n\n/**\n * Creates a new AudioClip from time-based values (convenience function)\n * Converts seconds to samples using the audioBuffer's sampleRate\n */\nexport function createClipFromSeconds(options: CreateClipOptionsSeconds): AudioClip {\n const {\n audioBuffer,\n startTime,\n duration = audioBuffer.duration,\n offset = 0,\n gain = 1.0,\n name,\n color,\n fadeIn,\n fadeOut,\n waveformData,\n } = options;\n\n const sampleRate = audioBuffer.sampleRate;\n\n return createClip({\n audioBuffer,\n startSample: Math.round(startTime * sampleRate),\n durationSamples: Math.round(duration * sampleRate),\n offsetSamples: Math.round(offset * sampleRate),\n gain,\n name,\n color,\n fadeIn,\n fadeOut,\n waveformData,\n });\n}\n\n/**\n * Creates a new ClipTrack with sensible defaults\n */\nexport function createTrack(options: CreateTrackOptions): ClipTrack {\n const {\n name,\n clips = [],\n muted = false,\n soloed = false,\n volume = 1.0,\n pan = 0,\n color,\n height,\n } = options;\n\n return {\n id: generateId(),\n name,\n clips,\n muted,\n soloed,\n volume,\n pan,\n color,\n height,\n };\n}\n\n/**\n * Creates a new Timeline with sensible defaults\n */\nexport function createTimeline(\n tracks: ClipTrack[],\n sampleRate: number = 44100,\n options?: {\n name?: string;\n tempo?: number;\n timeSignature?: { numerator: number; denominator: number };\n }\n): Timeline {\n // Calculate total duration from all clips across all tracks (in seconds)\n const durationSamples = tracks.reduce((maxSamples, track) => {\n const trackSamples = track.clips.reduce((max, clip) => {\n return Math.max(max, clip.startSample + clip.durationSamples);\n }, 0);\n return Math.max(maxSamples, trackSamples);\n }, 0);\n\n const duration = durationSamples / sampleRate;\n\n return {\n tracks,\n duration,\n sampleRate,\n name: options?.name,\n tempo: options?.tempo,\n timeSignature: options?.timeSignature,\n };\n}\n\n/**\n * Generates a unique ID for clips and tracks\n */\nfunction generateId(): string {\n return `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;\n}\n\n/**\n * Utility: Get all clips within a sample range\n */\nexport function getClipsInRange(\n track: ClipTrack,\n startSample: number,\n endSample: number\n): AudioClip[] {\n return track.clips.filter((clip) => {\n const clipEnd = clip.startSample + clip.durationSamples;\n // Clip overlaps with range if:\n // - Clip starts before range ends AND\n // - Clip ends after range starts\n return clip.startSample < endSample && clipEnd > startSample;\n });\n}\n\n/**\n * Utility: Get all clips at a specific sample position\n */\nexport function getClipsAtSample(track: ClipTrack, sample: number): AudioClip[] {\n return track.clips.filter((clip) => {\n const clipEnd = clip.startSample + clip.durationSamples;\n return sample >= clip.startSample && sample < clipEnd;\n });\n}\n\n/**\n * Utility: Check if two clips overlap\n */\nexport function clipsOverlap(clip1: AudioClip, clip2: AudioClip): boolean {\n const clip1End = clip1.startSample + clip1.durationSamples;\n const clip2End = clip2.startSample + clip2.durationSamples;\n\n return clip1.startSample < clip2End && clip1End > clip2.startSample;\n}\n\n/**\n * Utility: Sort clips by startSample\n */\nexport function sortClipsByTime(clips: AudioClip[]): AudioClip[] {\n return [...clips].sort((a, b) => a.startSample - b.startSample);\n}\n\n/**\n * Utility: Find gaps between clips (silent regions)\n */\nexport interface Gap {\n startSample: number;\n endSample: number;\n durationSamples: number;\n}\n\nexport function findGaps(track: ClipTrack): Gap[] {\n if (track.clips.length === 0) return [];\n\n const sorted = sortClipsByTime(track.clips);\n const gaps: Gap[] = [];\n\n for (let i = 0; i < sorted.length - 1; i++) {\n const currentClipEnd = sorted[i].startSample + sorted[i].durationSamples;\n const nextClipStart = sorted[i + 1].startSample;\n\n if (nextClipStart > currentClipEnd) {\n gaps.push({\n startSample: currentClipEnd,\n endSample: nextClipStart,\n durationSamples: nextClipStart - currentClipEnd,\n });\n }\n }\n\n return gaps;\n}\n","export interface WaveformConfig {\n sampleRate: number;\n samplesPerPixel: number;\n waveHeight?: number;\n waveOutlineColor?: string;\n waveFillColor?: string;\n waveProgressColor?: string;\n}\n\nexport interface AudioBuffer {\n length: number;\n duration: number;\n numberOfChannels: number;\n sampleRate: number;\n getChannelData(channel: number): Float32Array;\n}\n\nexport interface Track {\n id: string;\n name: string;\n src?: string | AudioBuffer; // Support both URL strings and AudioBuffer objects\n gain: number;\n muted: boolean;\n soloed: boolean;\n stereoPan: number;\n startTime: number;\n endTime?: number;\n fadeIn?: Fade;\n fadeOut?: Fade;\n cueIn?: number;\n cueOut?: number;\n}\n\n/**\n * Simple fade configuration\n */\nexport interface Fade {\n /** Duration of the fade in seconds */\n duration: number;\n /** Type of fade curve (default: 'linear') */\n type?: FadeType;\n}\n\nexport type FadeType = 'logarithmic' | 'linear' | 'sCurve' | 'exponential';\n\nexport interface PlaylistConfig {\n samplesPerPixel?: number;\n waveHeight?: number;\n container?: HTMLElement;\n isAutomaticScroll?: boolean;\n timescale?: boolean;\n colors?: {\n waveOutlineColor?: string;\n waveFillColor?: string;\n waveProgressColor?: string;\n };\n controls?: {\n show?: boolean;\n width?: number;\n };\n zoomLevels?: number[];\n}\n\nexport interface PlayoutState {\n isPlaying: boolean;\n isPaused: boolean;\n cursor: number;\n duration: number;\n}\n\nexport interface TimeSelection {\n start: number;\n end: number;\n}\n\nexport enum InteractionState {\n Cursor = 'cursor',\n Select = 'select',\n Shift = 'shift',\n FadeIn = 'fadein',\n FadeOut = 'fadeout',\n}\n\n// Export clip-based model types\nexport * from './clip';\n","export function samplesToSeconds(samples: number, sampleRate: number): number {\n return samples / sampleRate;\n}\n\nexport function secondsToSamples(seconds: number, sampleRate: number): number {\n return Math.ceil(seconds * sampleRate);\n}\n\nexport function samplesToPixels(samples: number, samplesPerPixel: number): number {\n return Math.floor(samples / samplesPerPixel);\n}\n\nexport function pixelsToSamples(pixels: number, samplesPerPixel: number): number {\n return Math.floor(pixels * samplesPerPixel);\n}\n\nexport function pixelsToSeconds(\n pixels: number,\n samplesPerPixel: number,\n sampleRate: number\n): number {\n return (pixels * samplesPerPixel) / sampleRate;\n}\n\nexport function secondsToPixels(\n seconds: number,\n samplesPerPixel: number,\n sampleRate: number\n): number {\n return Math.ceil((seconds * sampleRate) / samplesPerPixel);\n}\n"],"mappings":";AAuOO,SAAS,WAAW,SAAuC;AAChE,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA,kBAAkB,YAAY;AAAA;AAAA,IAC9B,gBAAgB;AAAA,IAChB,OAAO;AAAA,IACP;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,IAAI;AAEJ,SAAO;AAAA,IACL,IAAI,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;AAMO,SAAS,sBAAsB,SAA8C;AAClF,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA,WAAW,YAAY;AAAA,IACvB,SAAS;AAAA,IACT,OAAO;AAAA,IACP;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,IAAI;AAEJ,QAAM,aAAa,YAAY;AAE/B,SAAO,WAAW;AAAA,IAChB;AAAA,IACA,aAAa,KAAK,MAAM,YAAY,UAAU;AAAA,IAC9C,iBAAiB,KAAK,MAAM,WAAW,UAAU;AAAA,IACjD,eAAe,KAAK,MAAM,SAAS,UAAU;AAAA,IAC7C;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,CAAC;AACH;AAKO,SAAS,YAAY,SAAwC;AAClE,QAAM;AAAA,IACJ;AAAA,IACA,QAAQ,CAAC;AAAA,IACT,QAAQ;AAAA,IACR,SAAS;AAAA,IACT,SAAS;AAAA,IACT,MAAM;AAAA,IACN;AAAA,IACA;AAAA,EACF,IAAI;AAEJ,SAAO;AAAA,IACL,IAAI,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;AAKO,SAAS,eACd,QACA,aAAqB,OACrB,SAKU;AAEV,QAAM,kBAAkB,OAAO,OAAO,CAAC,YAAY,UAAU;AAC3D,UAAM,eAAe,MAAM,MAAM,OAAO,CAAC,KAAK,SAAS;AACrD,aAAO,KAAK,IAAI,KAAK,KAAK,cAAc,KAAK,eAAe;AAAA,IAC9D,GAAG,CAAC;AACJ,WAAO,KAAK,IAAI,YAAY,YAAY;AAAA,EAC1C,GAAG,CAAC;AAEJ,QAAM,WAAW,kBAAkB;AAEnC,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA,MAAM,SAAS;AAAA,IACf,OAAO,SAAS;AAAA,IAChB,eAAe,SAAS;AAAA,EAC1B;AACF;AAKA,SAAS,aAAqB;AAC5B,SAAO,GAAG,KAAK,IAAI,CAAC,IAAI,KAAK,OAAO,EAAE,SAAS,EAAE,EAAE,OAAO,GAAG,CAAC,CAAC;AACjE;AAKO,SAAS,gBACd,OACA,aACA,WACa;AACb,SAAO,MAAM,MAAM,OAAO,CAAC,SAAS;AAClC,UAAM,UAAU,KAAK,cAAc,KAAK;AAIxC,WAAO,KAAK,cAAc,aAAa,UAAU;AAAA,EACnD,CAAC;AACH;AAKO,SAAS,iBAAiB,OAAkB,QAA6B;AAC9E,SAAO,MAAM,MAAM,OAAO,CAAC,SAAS;AAClC,UAAM,UAAU,KAAK,cAAc,KAAK;AACxC,WAAO,UAAU,KAAK,eAAe,SAAS;AAAA,EAChD,CAAC;AACH;AAKO,SAAS,aAAa,OAAkB,OAA2B;AACxE,QAAM,WAAW,MAAM,cAAc,MAAM;AAC3C,QAAM,WAAW,MAAM,cAAc,MAAM;AAE3C,SAAO,MAAM,cAAc,YAAY,WAAW,MAAM;AAC1D;AAKO,SAAS,gBAAgB,OAAiC;AAC/D,SAAO,CAAC,GAAG,KAAK,EAAE,KAAK,CAAC,GAAG,MAAM,EAAE,cAAc,EAAE,WAAW;AAChE;AAWO,SAAS,SAAS,OAAyB;AAChD,MAAI,MAAM,MAAM,WAAW,EAAG,QAAO,CAAC;AAEtC,QAAM,SAAS,gBAAgB,MAAM,KAAK;AAC1C,QAAM,OAAc,CAAC;AAErB,WAAS,IAAI,GAAG,IAAI,OAAO,SAAS,GAAG,KAAK;AAC1C,UAAM,iBAAiB,OAAO,CAAC,EAAE,cAAc,OAAO,CAAC,EAAE;AACzD,UAAM,gBAAgB,OAAO,IAAI,CAAC,EAAE;AAEpC,QAAI,gBAAgB,gBAAgB;AAClC,WAAK,KAAK;AAAA,QACR,aAAa;AAAA,QACb,WAAW;AAAA,QACX,iBAAiB,gBAAgB;AAAA,MACnC,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO;AACT;;;ACvWO,IAAK,mBAAL,kBAAKA,sBAAL;AACL,EAAAA,kBAAA,YAAS;AACT,EAAAA,kBAAA,YAAS;AACT,EAAAA,kBAAA,WAAQ;AACR,EAAAA,kBAAA,YAAS;AACT,EAAAA,kBAAA,aAAU;AALA,SAAAA;AAAA,GAAA;;;AC3EL,SAAS,iBAAiB,SAAiB,YAA4B;AAC5E,SAAO,UAAU;AACnB;AAEO,SAAS,iBAAiB,SAAiB,YAA4B;AAC5E,SAAO,KAAK,KAAK,UAAU,UAAU;AACvC;AAEO,SAAS,gBAAgB,SAAiB,iBAAiC;AAChF,SAAO,KAAK,MAAM,UAAU,eAAe;AAC7C;AAEO,SAAS,gBAAgB,QAAgB,iBAAiC;AAC/E,SAAO,KAAK,MAAM,SAAS,eAAe;AAC5C;AAEO,SAAS,gBACd,QACA,iBACA,YACQ;AACR,SAAQ,SAAS,kBAAmB;AACtC;AAEO,SAAS,gBACd,SACA,iBACA,YACQ;AACR,SAAO,KAAK,KAAM,UAAU,aAAc,eAAe;AAC3D;","names":["InteractionState"]}
1
+ {"version":3,"sources":["../src/types/clip.ts","../src/types/index.ts","../src/utils/conversions.ts"],"sourcesContent":["/**\n * Clip-Based Model Types\n *\n * These types support a professional multi-track editing model where:\n * - Each track can contain multiple audio clips\n * - Clips can be positioned anywhere on the timeline\n * - Clips have independent trim points (offset/duration)\n * - Gaps between clips are silent\n * - Clips can overlap (for crossfades)\n */\n\nimport { Fade } from './index';\n\n/**\n * WaveformData object from waveform-data.js library.\n * Supports resample() and slice() for dynamic zoom levels.\n * See: https://github.com/bbc/waveform-data.js\n */\nexport interface WaveformDataObject {\n /** Sample rate of the original audio */\n readonly sample_rate: number;\n /** Number of audio samples per pixel */\n readonly scale: number;\n /** Length of waveform data in pixels */\n readonly length: number;\n /** Bit depth (8 or 16) */\n readonly bits: number;\n /** Duration in seconds */\n readonly duration: number;\n /** Number of channels */\n readonly channels: number;\n /** Get channel data */\n channel: (index: number) => {\n min_array: () => number[];\n max_array: () => number[];\n };\n /** Resample to different scale */\n resample: (options: { scale: number } | { width: number }) => WaveformDataObject;\n /** Slice a portion of the waveform */\n slice: (options: { startTime: number; endTime: number } | { startIndex: number; endIndex: number }) => WaveformDataObject;\n}\n\n/**\n * Generic effects function type for track-level audio processing.\n *\n * The actual implementation receives Tone.js audio nodes. Using generic types\n * here to avoid circular dependencies with the playout package.\n *\n * @param graphEnd - The end of the track's audio graph (Tone.js Gain node)\n * @param destination - Where to connect the effects output (Tone.js ToneAudioNode)\n * @param isOffline - Whether rendering offline (for export)\n * @returns Optional cleanup function called when track is disposed\n *\n * @example\n * ```typescript\n * const trackEffects: TrackEffectsFunction = (graphEnd, destination, isOffline) => {\n * const reverb = new Tone.Reverb({ decay: 1.5 });\n * graphEnd.connect(reverb);\n * reverb.connect(destination);\n *\n * return () => {\n * reverb.dispose();\n * };\n * };\n * ```\n */\nexport type TrackEffectsFunction = (\n graphEnd: unknown,\n destination: unknown,\n isOffline: boolean\n) => void | (() => void);\n\n/**\n * Represents a single audio clip on the timeline\n *\n * IMPORTANT: All positions/durations are stored as SAMPLE COUNTS (integers)\n * to avoid floating-point precision errors. Convert to seconds only when\n * needed for playback using: seconds = samples / sampleRate\n *\n * Clips can be created with just waveformData (for instant visual rendering)\n * and have audioBuffer added later when audio finishes loading.\n */\nexport interface AudioClip {\n /** Unique identifier for this clip */\n id: string;\n\n /**\n * The audio buffer containing the audio data.\n * Optional for peaks-first rendering - can be added later.\n * Required for playback and editing operations.\n */\n audioBuffer?: AudioBuffer;\n\n /** Position on timeline where this clip starts (in samples at timeline sampleRate) */\n startSample: number;\n\n /** Duration of this clip (in samples) - how much of the audio buffer to play */\n durationSamples: number;\n\n /** Offset into the audio buffer where playback starts (in samples) - the \"trim start\" point */\n offsetSamples: number;\n\n /**\n * Sample rate for this clip's audio.\n * Required when audioBuffer is not provided (for peaks-first rendering).\n * When audioBuffer is present, this should match audioBuffer.sampleRate.\n */\n sampleRate: number;\n\n /**\n * Total duration of the source audio in samples.\n * Required when audioBuffer is not provided (for trim bounds calculation).\n * When audioBuffer is present, this should equal audioBuffer.length.\n */\n sourceDurationSamples: number;\n\n /** Optional fade in effect */\n fadeIn?: Fade;\n\n /** Optional fade out effect */\n fadeOut?: Fade;\n\n /** Clip-specific gain/volume multiplier (0.0 to 1.0+) */\n gain: number;\n\n /** Optional label/name for this clip */\n name?: string;\n\n /** Optional color for visual distinction */\n color?: string;\n\n /**\n * Pre-computed waveform data from waveform-data.js library.\n * When provided, the library will use this instead of computing peaks from the audioBuffer.\n * Supports resampling to different zoom levels and slicing for clip trimming.\n * Load with: `const waveformData = await loadWaveformData('/path/to/peaks.dat')`\n */\n waveformData?: WaveformDataObject;\n}\n\n/**\n * Represents a track containing multiple audio clips\n */\nexport interface ClipTrack {\n /** Unique identifier for this track */\n id: string;\n\n /** Display name for this track */\n name: string;\n\n /** Array of audio clips on this track */\n clips: AudioClip[];\n\n /** Whether this track is muted */\n muted: boolean;\n\n /** Whether this track is soloed */\n soloed: boolean;\n\n /** Track volume (0.0 to 1.0+) */\n volume: number;\n\n /** Stereo pan (-1.0 = left, 0 = center, 1.0 = right) */\n pan: number;\n\n /** Optional track color for visual distinction */\n color?: string;\n\n /** Track height in pixels (for UI) */\n height?: number;\n\n /** Optional effects function for this track */\n effects?: TrackEffectsFunction;\n}\n\n/**\n * Represents the entire timeline/project\n */\nexport interface Timeline {\n /** All tracks in the timeline */\n tracks: ClipTrack[];\n\n /** Total timeline duration in seconds */\n duration: number;\n\n /** Sample rate for all audio (typically 44100 or 48000) */\n sampleRate: number;\n\n /** Optional project name */\n name?: string;\n\n /** Optional tempo (BPM) for grid snapping */\n tempo?: number;\n\n /** Optional time signature for grid snapping */\n timeSignature?: {\n numerator: number;\n denominator: number;\n };\n}\n\n/**\n * Options for creating a new audio clip (using sample counts)\n *\n * Either audioBuffer OR (sampleRate + sourceDurationSamples + waveformData) must be provided.\n * Providing waveformData without audioBuffer enables peaks-first rendering.\n */\nexport interface CreateClipOptions {\n /** Audio buffer - optional for peaks-first rendering */\n audioBuffer?: AudioBuffer;\n startSample: number; // Position on timeline (in samples)\n durationSamples?: number; // Defaults to full buffer/source duration (in samples)\n offsetSamples?: number; // Defaults to 0\n gain?: number; // Defaults to 1.0\n name?: string;\n color?: string;\n fadeIn?: Fade;\n fadeOut?: Fade;\n /** Pre-computed waveform data from waveform-data.js (e.g., from BBC audiowaveform) */\n waveformData?: WaveformDataObject;\n /** Sample rate - required if audioBuffer not provided */\n sampleRate?: number;\n /** Total source audio duration in samples - required if audioBuffer not provided */\n sourceDurationSamples?: number;\n}\n\n/**\n * Options for creating a new audio clip (using seconds for convenience)\n *\n * Either audioBuffer OR (sampleRate + sourceDuration + waveformData) must be provided.\n * Providing waveformData without audioBuffer enables peaks-first rendering.\n */\nexport interface CreateClipOptionsSeconds {\n /** Audio buffer - optional for peaks-first rendering */\n audioBuffer?: AudioBuffer;\n startTime: number; // Position on timeline (in seconds)\n duration?: number; // Defaults to full buffer/source duration (in seconds)\n offset?: number; // Defaults to 0 (in seconds)\n gain?: number; // Defaults to 1.0\n name?: string;\n color?: string;\n fadeIn?: Fade;\n fadeOut?: Fade;\n /** Pre-computed waveform data from waveform-data.js (e.g., from BBC audiowaveform) */\n waveformData?: WaveformDataObject;\n /** Sample rate - required if audioBuffer not provided */\n sampleRate?: number;\n /** Total source audio duration in seconds - required if audioBuffer not provided */\n sourceDuration?: number;\n}\n\n/**\n * Options for creating a new track\n */\nexport interface CreateTrackOptions {\n name: string;\n clips?: AudioClip[];\n muted?: boolean;\n soloed?: boolean;\n volume?: number;\n pan?: number;\n color?: string;\n height?: number;\n}\n\n/**\n * Creates a new AudioClip with sensible defaults (using sample counts)\n *\n * For peaks-first rendering (no audioBuffer), sampleRate and sourceDurationSamples can be:\n * - Provided explicitly via options\n * - Derived from waveformData (sample_rate and duration properties)\n */\nexport function createClip(options: CreateClipOptions): AudioClip {\n const {\n audioBuffer,\n startSample,\n offsetSamples = 0,\n gain = 1.0,\n name,\n color,\n fadeIn,\n fadeOut,\n waveformData,\n } = options;\n\n // Determine sample rate: audioBuffer > explicit option > waveformData\n const sampleRate = audioBuffer?.sampleRate ?? options.sampleRate ?? waveformData?.sample_rate;\n\n // Determine source duration: audioBuffer > explicit option > waveformData (converted to samples)\n const sourceDurationSamples = audioBuffer?.length\n ?? options.sourceDurationSamples\n ?? (waveformData && sampleRate ? Math.ceil(waveformData.duration * sampleRate) : undefined);\n\n if (sampleRate === undefined) {\n throw new Error('createClip: sampleRate is required when audioBuffer is not provided (can use waveformData.sample_rate)');\n }\n if (sourceDurationSamples === undefined) {\n throw new Error('createClip: sourceDurationSamples is required when audioBuffer is not provided (can use waveformData.duration)');\n }\n\n // Warn if sample rates don't match\n if (audioBuffer && waveformData && audioBuffer.sampleRate !== waveformData.sample_rate) {\n console.warn(\n `Sample rate mismatch: audioBuffer (${audioBuffer.sampleRate}) vs waveformData (${waveformData.sample_rate}). ` +\n `Using audioBuffer sample rate. Waveform visualization may be slightly off.`\n );\n }\n\n // Default duration to full source duration\n const durationSamples = options.durationSamples ?? sourceDurationSamples;\n\n return {\n id: generateId(),\n audioBuffer,\n startSample,\n durationSamples,\n offsetSamples,\n sampleRate,\n sourceDurationSamples,\n gain,\n name,\n color,\n fadeIn,\n fadeOut,\n waveformData,\n };\n}\n\n/**\n * Creates a new AudioClip from time-based values (convenience function)\n * Converts seconds to samples using the audioBuffer's sampleRate or explicit sampleRate\n *\n * For peaks-first rendering (no audioBuffer), sampleRate and sourceDuration can be:\n * - Provided explicitly via options\n * - Derived from waveformData (sample_rate and duration properties)\n */\nexport function createClipFromSeconds(options: CreateClipOptionsSeconds): AudioClip {\n const {\n audioBuffer,\n startTime,\n offset = 0,\n gain = 1.0,\n name,\n color,\n fadeIn,\n fadeOut,\n waveformData,\n } = options;\n\n // Determine sample rate: audioBuffer > explicit option > waveformData\n const sampleRate = audioBuffer?.sampleRate ?? options.sampleRate ?? waveformData?.sample_rate;\n if (sampleRate === undefined) {\n throw new Error('createClipFromSeconds: sampleRate is required when audioBuffer is not provided (can use waveformData.sample_rate)');\n }\n\n // Determine source duration: audioBuffer > explicit option > waveformData\n const sourceDuration = audioBuffer?.duration ?? options.sourceDuration ?? waveformData?.duration;\n if (sourceDuration === undefined) {\n throw new Error('createClipFromSeconds: sourceDuration is required when audioBuffer is not provided (can use waveformData.duration)');\n }\n\n // Warn if sample rates don't match (could cause visual/audio sync issues)\n if (audioBuffer && waveformData && audioBuffer.sampleRate !== waveformData.sample_rate) {\n console.warn(\n `Sample rate mismatch: audioBuffer (${audioBuffer.sampleRate}) vs waveformData (${waveformData.sample_rate}). ` +\n `Using audioBuffer sample rate. Waveform visualization may be slightly off.`\n );\n }\n\n // Default clip duration to full source duration\n const duration = options.duration ?? sourceDuration;\n\n return createClip({\n audioBuffer,\n startSample: Math.round(startTime * sampleRate),\n durationSamples: Math.round(duration * sampleRate),\n offsetSamples: Math.round(offset * sampleRate),\n sampleRate,\n sourceDurationSamples: Math.ceil(sourceDuration * sampleRate),\n gain,\n name,\n color,\n fadeIn,\n fadeOut,\n waveformData,\n });\n}\n\n/**\n * Creates a new ClipTrack with sensible defaults\n */\nexport function createTrack(options: CreateTrackOptions): ClipTrack {\n const {\n name,\n clips = [],\n muted = false,\n soloed = false,\n volume = 1.0,\n pan = 0,\n color,\n height,\n } = options;\n\n return {\n id: generateId(),\n name,\n clips,\n muted,\n soloed,\n volume,\n pan,\n color,\n height,\n };\n}\n\n/**\n * Creates a new Timeline with sensible defaults\n */\nexport function createTimeline(\n tracks: ClipTrack[],\n sampleRate: number = 44100,\n options?: {\n name?: string;\n tempo?: number;\n timeSignature?: { numerator: number; denominator: number };\n }\n): Timeline {\n // Calculate total duration from all clips across all tracks (in seconds)\n const durationSamples = tracks.reduce((maxSamples, track) => {\n const trackSamples = track.clips.reduce((max, clip) => {\n return Math.max(max, clip.startSample + clip.durationSamples);\n }, 0);\n return Math.max(maxSamples, trackSamples);\n }, 0);\n\n const duration = durationSamples / sampleRate;\n\n return {\n tracks,\n duration,\n sampleRate,\n name: options?.name,\n tempo: options?.tempo,\n timeSignature: options?.timeSignature,\n };\n}\n\n/**\n * Generates a unique ID for clips and tracks\n */\nfunction generateId(): string {\n return `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;\n}\n\n/**\n * Utility: Get all clips within a sample range\n */\nexport function getClipsInRange(\n track: ClipTrack,\n startSample: number,\n endSample: number\n): AudioClip[] {\n return track.clips.filter((clip) => {\n const clipEnd = clip.startSample + clip.durationSamples;\n // Clip overlaps with range if:\n // - Clip starts before range ends AND\n // - Clip ends after range starts\n return clip.startSample < endSample && clipEnd > startSample;\n });\n}\n\n/**\n * Utility: Get all clips at a specific sample position\n */\nexport function getClipsAtSample(track: ClipTrack, sample: number): AudioClip[] {\n return track.clips.filter((clip) => {\n const clipEnd = clip.startSample + clip.durationSamples;\n return sample >= clip.startSample && sample < clipEnd;\n });\n}\n\n/**\n * Utility: Check if two clips overlap\n */\nexport function clipsOverlap(clip1: AudioClip, clip2: AudioClip): boolean {\n const clip1End = clip1.startSample + clip1.durationSamples;\n const clip2End = clip2.startSample + clip2.durationSamples;\n\n return clip1.startSample < clip2End && clip1End > clip2.startSample;\n}\n\n/**\n * Utility: Sort clips by startSample\n */\nexport function sortClipsByTime(clips: AudioClip[]): AudioClip[] {\n return [...clips].sort((a, b) => a.startSample - b.startSample);\n}\n\n/**\n * Utility: Find gaps between clips (silent regions)\n */\nexport interface Gap {\n startSample: number;\n endSample: number;\n durationSamples: number;\n}\n\nexport function findGaps(track: ClipTrack): Gap[] {\n if (track.clips.length === 0) return [];\n\n const sorted = sortClipsByTime(track.clips);\n const gaps: Gap[] = [];\n\n for (let i = 0; i < sorted.length - 1; i++) {\n const currentClipEnd = sorted[i].startSample + sorted[i].durationSamples;\n const nextClipStart = sorted[i + 1].startSample;\n\n if (nextClipStart > currentClipEnd) {\n gaps.push({\n startSample: currentClipEnd,\n endSample: nextClipStart,\n durationSamples: nextClipStart - currentClipEnd,\n });\n }\n }\n\n return gaps;\n}\n","export interface WaveformConfig {\n sampleRate: number;\n samplesPerPixel: number;\n waveHeight?: number;\n waveOutlineColor?: string;\n waveFillColor?: string;\n waveProgressColor?: string;\n}\n\nexport interface AudioBuffer {\n length: number;\n duration: number;\n numberOfChannels: number;\n sampleRate: number;\n getChannelData(channel: number): Float32Array;\n}\n\nexport interface Track {\n id: string;\n name: string;\n src?: string | AudioBuffer; // Support both URL strings and AudioBuffer objects\n gain: number;\n muted: boolean;\n soloed: boolean;\n stereoPan: number;\n startTime: number;\n endTime?: number;\n fadeIn?: Fade;\n fadeOut?: Fade;\n cueIn?: number;\n cueOut?: number;\n}\n\n/**\n * Simple fade configuration\n */\nexport interface Fade {\n /** Duration of the fade in seconds */\n duration: number;\n /** Type of fade curve (default: 'linear') */\n type?: FadeType;\n}\n\nexport type FadeType = 'logarithmic' | 'linear' | 'sCurve' | 'exponential';\n\nexport interface PlaylistConfig {\n samplesPerPixel?: number;\n waveHeight?: number;\n container?: HTMLElement;\n isAutomaticScroll?: boolean;\n timescale?: boolean;\n colors?: {\n waveOutlineColor?: string;\n waveFillColor?: string;\n waveProgressColor?: string;\n };\n controls?: {\n show?: boolean;\n width?: number;\n };\n zoomLevels?: number[];\n}\n\nexport interface PlayoutState {\n isPlaying: boolean;\n isPaused: boolean;\n cursor: number;\n duration: number;\n}\n\nexport interface TimeSelection {\n start: number;\n end: number;\n}\n\nexport enum InteractionState {\n Cursor = 'cursor',\n Select = 'select',\n Shift = 'shift',\n FadeIn = 'fadein',\n FadeOut = 'fadeout',\n}\n\n// Export clip-based model types\nexport * from './clip';\n","export function samplesToSeconds(samples: number, sampleRate: number): number {\n return samples / sampleRate;\n}\n\nexport function secondsToSamples(seconds: number, sampleRate: number): number {\n return Math.ceil(seconds * sampleRate);\n}\n\nexport function samplesToPixels(samples: number, samplesPerPixel: number): number {\n return Math.floor(samples / samplesPerPixel);\n}\n\nexport function pixelsToSamples(pixels: number, samplesPerPixel: number): number {\n return Math.floor(pixels * samplesPerPixel);\n}\n\nexport function pixelsToSeconds(\n pixels: number,\n samplesPerPixel: number,\n sampleRate: number\n): number {\n return (pixels * samplesPerPixel) / sampleRate;\n}\n\nexport function secondsToPixels(\n seconds: number,\n samplesPerPixel: number,\n sampleRate: number\n): number {\n return Math.ceil((seconds * sampleRate) / samplesPerPixel);\n}\n"],"mappings":";AAgRO,SAAS,WAAW,SAAuC;AAChE,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA,gBAAgB;AAAA,IAChB,OAAO;AAAA,IACP;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,IAAI;AAGJ,QAAM,aAAa,aAAa,cAAc,QAAQ,cAAc,cAAc;AAGlF,QAAM,wBAAwB,aAAa,UACtC,QAAQ,0BACP,gBAAgB,aAAa,KAAK,KAAK,aAAa,WAAW,UAAU,IAAI;AAEnF,MAAI,eAAe,QAAW;AAC5B,UAAM,IAAI,MAAM,wGAAwG;AAAA,EAC1H;AACA,MAAI,0BAA0B,QAAW;AACvC,UAAM,IAAI,MAAM,gHAAgH;AAAA,EAClI;AAGA,MAAI,eAAe,gBAAgB,YAAY,eAAe,aAAa,aAAa;AACtF,YAAQ;AAAA,MACN,sCAAsC,YAAY,UAAU,sBAAsB,aAAa,WAAW;AAAA,IAE5G;AAAA,EACF;AAGA,QAAM,kBAAkB,QAAQ,mBAAmB;AAEnD,SAAO;AAAA,IACL,IAAI,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;AAUO,SAAS,sBAAsB,SAA8C;AAClF,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA,SAAS;AAAA,IACT,OAAO;AAAA,IACP;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,IAAI;AAGJ,QAAM,aAAa,aAAa,cAAc,QAAQ,cAAc,cAAc;AAClF,MAAI,eAAe,QAAW;AAC5B,UAAM,IAAI,MAAM,mHAAmH;AAAA,EACrI;AAGA,QAAM,iBAAiB,aAAa,YAAY,QAAQ,kBAAkB,cAAc;AACxF,MAAI,mBAAmB,QAAW;AAChC,UAAM,IAAI,MAAM,oHAAoH;AAAA,EACtI;AAGA,MAAI,eAAe,gBAAgB,YAAY,eAAe,aAAa,aAAa;AACtF,YAAQ;AAAA,MACN,sCAAsC,YAAY,UAAU,sBAAsB,aAAa,WAAW;AAAA,IAE5G;AAAA,EACF;AAGA,QAAM,WAAW,QAAQ,YAAY;AAErC,SAAO,WAAW;AAAA,IAChB;AAAA,IACA,aAAa,KAAK,MAAM,YAAY,UAAU;AAAA,IAC9C,iBAAiB,KAAK,MAAM,WAAW,UAAU;AAAA,IACjD,eAAe,KAAK,MAAM,SAAS,UAAU;AAAA,IAC7C;AAAA,IACA,uBAAuB,KAAK,KAAK,iBAAiB,UAAU;AAAA,IAC5D;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,CAAC;AACH;AAKO,SAAS,YAAY,SAAwC;AAClE,QAAM;AAAA,IACJ;AAAA,IACA,QAAQ,CAAC;AAAA,IACT,QAAQ;AAAA,IACR,SAAS;AAAA,IACT,SAAS;AAAA,IACT,MAAM;AAAA,IACN;AAAA,IACA;AAAA,EACF,IAAI;AAEJ,SAAO;AAAA,IACL,IAAI,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;AAKO,SAAS,eACd,QACA,aAAqB,OACrB,SAKU;AAEV,QAAM,kBAAkB,OAAO,OAAO,CAAC,YAAY,UAAU;AAC3D,UAAM,eAAe,MAAM,MAAM,OAAO,CAAC,KAAK,SAAS;AACrD,aAAO,KAAK,IAAI,KAAK,KAAK,cAAc,KAAK,eAAe;AAAA,IAC9D,GAAG,CAAC;AACJ,WAAO,KAAK,IAAI,YAAY,YAAY;AAAA,EAC1C,GAAG,CAAC;AAEJ,QAAM,WAAW,kBAAkB;AAEnC,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA,MAAM,SAAS;AAAA,IACf,OAAO,SAAS;AAAA,IAChB,eAAe,SAAS;AAAA,EAC1B;AACF;AAKA,SAAS,aAAqB;AAC5B,SAAO,GAAG,KAAK,IAAI,CAAC,IAAI,KAAK,OAAO,EAAE,SAAS,EAAE,EAAE,OAAO,GAAG,CAAC,CAAC;AACjE;AAKO,SAAS,gBACd,OACA,aACA,WACa;AACb,SAAO,MAAM,MAAM,OAAO,CAAC,SAAS;AAClC,UAAM,UAAU,KAAK,cAAc,KAAK;AAIxC,WAAO,KAAK,cAAc,aAAa,UAAU;AAAA,EACnD,CAAC;AACH;AAKO,SAAS,iBAAiB,OAAkB,QAA6B;AAC9E,SAAO,MAAM,MAAM,OAAO,CAAC,SAAS;AAClC,UAAM,UAAU,KAAK,cAAc,KAAK;AACxC,WAAO,UAAU,KAAK,eAAe,SAAS;AAAA,EAChD,CAAC;AACH;AAKO,SAAS,aAAa,OAAkB,OAA2B;AACxE,QAAM,WAAW,MAAM,cAAc,MAAM;AAC3C,QAAM,WAAW,MAAM,cAAc,MAAM;AAE3C,SAAO,MAAM,cAAc,YAAY,WAAW,MAAM;AAC1D;AAKO,SAAS,gBAAgB,OAAiC;AAC/D,SAAO,CAAC,GAAG,KAAK,EAAE,KAAK,CAAC,GAAG,MAAM,EAAE,cAAc,EAAE,WAAW;AAChE;AAWO,SAAS,SAAS,OAAyB;AAChD,MAAI,MAAM,MAAM,WAAW,EAAG,QAAO,CAAC;AAEtC,QAAM,SAAS,gBAAgB,MAAM,KAAK;AAC1C,QAAM,OAAc,CAAC;AAErB,WAAS,IAAI,GAAG,IAAI,OAAO,SAAS,GAAG,KAAK;AAC1C,UAAM,iBAAiB,OAAO,CAAC,EAAE,cAAc,OAAO,CAAC,EAAE;AACzD,UAAM,gBAAgB,OAAO,IAAI,CAAC,EAAE;AAEpC,QAAI,gBAAgB,gBAAgB;AAClC,WAAK,KAAK;AAAA,QACR,aAAa;AAAA,QACb,WAAW;AAAA,QACX,iBAAiB,gBAAgB;AAAA,MACnC,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO;AACT;;;ACrcO,IAAK,mBAAL,kBAAKA,sBAAL;AACL,EAAAA,kBAAA,YAAS;AACT,EAAAA,kBAAA,YAAS;AACT,EAAAA,kBAAA,WAAQ;AACR,EAAAA,kBAAA,YAAS;AACT,EAAAA,kBAAA,aAAU;AALA,SAAAA;AAAA,GAAA;;;AC3EL,SAAS,iBAAiB,SAAiB,YAA4B;AAC5E,SAAO,UAAU;AACnB;AAEO,SAAS,iBAAiB,SAAiB,YAA4B;AAC5E,SAAO,KAAK,KAAK,UAAU,UAAU;AACvC;AAEO,SAAS,gBAAgB,SAAiB,iBAAiC;AAChF,SAAO,KAAK,MAAM,UAAU,eAAe;AAC7C;AAEO,SAAS,gBAAgB,QAAgB,iBAAiC;AAC/E,SAAO,KAAK,MAAM,SAAS,eAAe;AAC5C;AAEO,SAAS,gBACd,QACA,iBACA,YACQ;AACR,SAAQ,SAAS,kBAAmB;AACtC;AAEO,SAAS,gBACd,SACA,iBACA,YACQ;AACR,SAAO,KAAK,KAAM,UAAU,aAAc,eAAe;AAC3D;","names":["InteractionState"]}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@waveform-playlist/core",
3
- "version": "5.0.0-alpha.8",
3
+ "version": "5.0.0-alpha.9",
4
4
  "description": "Core types, interfaces and utilities for waveform-playlist",
5
5
  "main": "./dist/index.js",
6
6
  "module": "./dist/index.mjs",