@waveform-playlist/browser 5.0.0-alpha.0 → 5.0.0-alpha.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +204 -32
- package/dist/index.js +391 -508
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +5807 -21599
- package/dist/index.mjs.map +1 -1
- package/package.json +14 -12
package/dist/index.d.ts
CHANGED
|
@@ -64,18 +64,37 @@ declare interface AnnotationData {
|
|
|
64
64
|
* IMPORTANT: All positions/durations are stored as SAMPLE COUNTS (integers)
|
|
65
65
|
* to avoid floating-point precision errors. Convert to seconds only when
|
|
66
66
|
* needed for playback using: seconds = samples / sampleRate
|
|
67
|
+
*
|
|
68
|
+
* Clips can be created with just waveformData (for instant visual rendering)
|
|
69
|
+
* and have audioBuffer added later when audio finishes loading.
|
|
67
70
|
*/
|
|
68
|
-
declare interface AudioClip {
|
|
71
|
+
export declare interface AudioClip {
|
|
69
72
|
/** Unique identifier for this clip */
|
|
70
73
|
id: string;
|
|
71
|
-
/**
|
|
72
|
-
|
|
74
|
+
/**
|
|
75
|
+
* The audio buffer containing the audio data.
|
|
76
|
+
* Optional for peaks-first rendering - can be added later.
|
|
77
|
+
* Required for playback and editing operations.
|
|
78
|
+
*/
|
|
79
|
+
audioBuffer?: AudioBuffer;
|
|
73
80
|
/** Position on timeline where this clip starts (in samples at timeline sampleRate) */
|
|
74
81
|
startSample: number;
|
|
75
82
|
/** Duration of this clip (in samples) - how much of the audio buffer to play */
|
|
76
83
|
durationSamples: number;
|
|
77
84
|
/** Offset into the audio buffer where playback starts (in samples) - the "trim start" point */
|
|
78
85
|
offsetSamples: number;
|
|
86
|
+
/**
|
|
87
|
+
* Sample rate for this clip's audio.
|
|
88
|
+
* Required when audioBuffer is not provided (for peaks-first rendering).
|
|
89
|
+
* When audioBuffer is present, this should match audioBuffer.sampleRate.
|
|
90
|
+
*/
|
|
91
|
+
sampleRate: number;
|
|
92
|
+
/**
|
|
93
|
+
* Total duration of the source audio in samples.
|
|
94
|
+
* Required when audioBuffer is not provided (for trim bounds calculation).
|
|
95
|
+
* When audioBuffer is present, this should equal audioBuffer.length.
|
|
96
|
+
*/
|
|
97
|
+
sourceDurationSamples: number;
|
|
79
98
|
/** Optional fade in effect */
|
|
80
99
|
fadeIn?: Fade;
|
|
81
100
|
/** Optional fade out effect */
|
|
@@ -86,6 +105,13 @@ declare interface AudioClip {
|
|
|
86
105
|
name?: string;
|
|
87
106
|
/** Optional color for visual distinction */
|
|
88
107
|
color?: string;
|
|
108
|
+
/**
|
|
109
|
+
* Pre-computed waveform data from waveform-data.js library.
|
|
110
|
+
* When provided, the library will use this instead of computing peaks from the audioBuffer.
|
|
111
|
+
* Supports resampling to different zoom levels and slicing for clip trimming.
|
|
112
|
+
* Load with: `const waveformData = await loadWaveformData('/path/to/peaks.dat')`
|
|
113
|
+
*/
|
|
114
|
+
waveformData?: WaveformDataObject;
|
|
89
115
|
}
|
|
90
116
|
|
|
91
117
|
/**
|
|
@@ -99,9 +125,20 @@ export declare const AudioPosition: default_2.FC<{
|
|
|
99
125
|
|
|
100
126
|
/**
|
|
101
127
|
* Configuration for a single audio track to load
|
|
128
|
+
*
|
|
129
|
+
* Audio can be provided in three ways:
|
|
130
|
+
* 1. `src` - URL to fetch and decode (standard loading)
|
|
131
|
+
* 2. `audioBuffer` - Pre-loaded AudioBuffer (skip fetch/decode)
|
|
132
|
+
* 3. `waveformData` only - Peaks-first rendering (audio loads later)
|
|
133
|
+
*
|
|
134
|
+
* For peaks-first rendering, just provide `waveformData` - the sample rate
|
|
135
|
+
* and duration are derived from the waveform data automatically.
|
|
102
136
|
*/
|
|
103
137
|
export declare interface AudioTrackConfig {
|
|
104
|
-
|
|
138
|
+
/** URL to audio file - used if audioBuffer not provided */
|
|
139
|
+
src?: string;
|
|
140
|
+
/** Pre-loaded AudioBuffer - skips fetch/decode if provided */
|
|
141
|
+
audioBuffer?: AudioBuffer;
|
|
105
142
|
name?: string;
|
|
106
143
|
muted?: boolean;
|
|
107
144
|
soloed?: boolean;
|
|
@@ -114,6 +151,7 @@ export declare interface AudioTrackConfig {
|
|
|
114
151
|
offset?: number;
|
|
115
152
|
fadeIn?: Fade;
|
|
116
153
|
fadeOut?: Fade;
|
|
154
|
+
waveformData?: WaveformDataObject;
|
|
117
155
|
}
|
|
118
156
|
|
|
119
157
|
/**
|
|
@@ -152,7 +190,7 @@ declare interface ClipPeaks {
|
|
|
152
190
|
/**
|
|
153
191
|
* Represents a track containing multiple audio clips
|
|
154
192
|
*/
|
|
155
|
-
declare interface ClipTrack {
|
|
193
|
+
export declare interface ClipTrack {
|
|
156
194
|
/** Unique identifier for this track */
|
|
157
195
|
id: string;
|
|
158
196
|
/** Display name for this track */
|
|
@@ -206,6 +244,37 @@ export declare const DownloadAnnotationsButton: default_2.FC<{
|
|
|
206
244
|
className?: string;
|
|
207
245
|
}>;
|
|
208
246
|
|
|
247
|
+
/**
|
|
248
|
+
* Hook for configuring @dnd-kit sensors for clip dragging
|
|
249
|
+
*
|
|
250
|
+
* Provides consistent drag activation behavior across all examples.
|
|
251
|
+
* Supports both desktop (immediate feedback) and mobile (delay-based) interactions.
|
|
252
|
+
*/
|
|
253
|
+
declare interface DragSensorOptions {
|
|
254
|
+
/**
|
|
255
|
+
* Enable mobile-optimized touch handling with delay-based activation.
|
|
256
|
+
* When true, uses TouchSensor with 250ms delay to distinguish drag from scroll.
|
|
257
|
+
* When false (default), uses PointerSensor with 1px activation for immediate feedback.
|
|
258
|
+
*/
|
|
259
|
+
touchOptimized?: boolean;
|
|
260
|
+
/**
|
|
261
|
+
* Delay in milliseconds before touch drag activates (only when touchOptimized is true).
|
|
262
|
+
* Default: 250ms - long enough to distinguish from scroll intent
|
|
263
|
+
*/
|
|
264
|
+
touchDelay?: number;
|
|
265
|
+
/**
|
|
266
|
+
* Distance tolerance during touch delay (only when touchOptimized is true).
|
|
267
|
+
* If finger moves more than this during delay, drag is cancelled.
|
|
268
|
+
* Default: 5px - allows slight finger movement
|
|
269
|
+
*/
|
|
270
|
+
touchTolerance?: number;
|
|
271
|
+
/**
|
|
272
|
+
* Distance in pixels before mouse drag activates.
|
|
273
|
+
* Default: 1px for immediate feedback on desktop
|
|
274
|
+
*/
|
|
275
|
+
mouseDistance?: number;
|
|
276
|
+
}
|
|
277
|
+
|
|
209
278
|
/**
|
|
210
279
|
* Editable annotations checkbox that uses the playlist context
|
|
211
280
|
* Uses split contexts to avoid re-rendering during animation
|
|
@@ -328,7 +397,7 @@ export declare interface ExportWavButtonProps {
|
|
|
328
397
|
/**
|
|
329
398
|
* Simple fade configuration
|
|
330
399
|
*/
|
|
331
|
-
declare interface Fade {
|
|
400
|
+
export declare interface Fade {
|
|
332
401
|
/** Duration of the fade in seconds */
|
|
333
402
|
duration: number;
|
|
334
403
|
/** Type of fade curve (default: 'linear') */
|
|
@@ -445,6 +514,10 @@ export declare function loadPeaksFromWaveformData(src: string, channelIndex?: nu
|
|
|
445
514
|
*/
|
|
446
515
|
export declare function loadWaveformData(src: string): Promise<default_3>;
|
|
447
516
|
|
|
517
|
+
export declare const LoopButton: default_2.FC<{
|
|
518
|
+
className?: string;
|
|
519
|
+
}>;
|
|
520
|
+
|
|
448
521
|
/**
|
|
449
522
|
* Master volume control that uses the playlist context
|
|
450
523
|
*/
|
|
@@ -553,6 +626,10 @@ declare interface PlaylistControlsContextValue {
|
|
|
553
626
|
setAnnotationsEditable: (enabled: boolean) => void;
|
|
554
627
|
setAnnotations: (annotations: AnnotationData[]) => void;
|
|
555
628
|
setActiveAnnotationId: (id: string | null) => void;
|
|
629
|
+
setLoopEnabled: (enabled: boolean) => void;
|
|
630
|
+
setLoopRegion: (start: number, end: number) => void;
|
|
631
|
+
setLoopRegionFromSelection: () => void;
|
|
632
|
+
clearLoopRegion: () => void;
|
|
556
633
|
}
|
|
557
634
|
|
|
558
635
|
declare interface PlaylistDataContextValue {
|
|
@@ -586,11 +663,14 @@ declare interface PlaylistStateContextValue {
|
|
|
586
663
|
linkEndpoints: boolean;
|
|
587
664
|
annotationsEditable: boolean;
|
|
588
665
|
isAutomaticScroll: boolean;
|
|
666
|
+
isLoopEnabled: boolean;
|
|
589
667
|
annotations: AnnotationData[];
|
|
590
668
|
activeAnnotationId: string | null;
|
|
591
669
|
selectionStart: number;
|
|
592
670
|
selectionEnd: number;
|
|
593
671
|
selectedTrackId: string | null;
|
|
672
|
+
loopStart: number;
|
|
673
|
+
loopEnd: number;
|
|
594
674
|
}
|
|
595
675
|
|
|
596
676
|
/**
|
|
@@ -611,6 +691,10 @@ export declare const SelectionTimeInputs: default_2.FC<{
|
|
|
611
691
|
className?: string;
|
|
612
692
|
}>;
|
|
613
693
|
|
|
694
|
+
export declare const SetLoopRegionButton: default_2.FC<{
|
|
695
|
+
className?: string;
|
|
696
|
+
}>;
|
|
697
|
+
|
|
614
698
|
export declare const SkipBackwardButton: default_2.FC<{
|
|
615
699
|
skipAmount?: number;
|
|
616
700
|
className?: string;
|
|
@@ -660,6 +744,11 @@ declare class TonePlayout {
|
|
|
660
744
|
private gainToDb;
|
|
661
745
|
init(): Promise<void>;
|
|
662
746
|
addTrack(trackOptions: ToneTrackOptions): ToneTrack;
|
|
747
|
+
/**
|
|
748
|
+
* Apply solo muting after all tracks have been added.
|
|
749
|
+
* Call this after adding all tracks to ensure solo logic is applied correctly.
|
|
750
|
+
*/
|
|
751
|
+
applyInitialSoloState(): void;
|
|
663
752
|
removeTrack(trackId: string): void;
|
|
664
753
|
getTrack(trackId: string): ToneTrack | undefined;
|
|
665
754
|
play(when?: number, offset?: number, duration?: number): void;
|
|
@@ -735,17 +824,6 @@ declare type TrackClipPeaks = ClipPeaks[];
|
|
|
735
824
|
|
|
736
825
|
export declare type TrackEffectsFunction = (graphEnd: Gain, masterGainNode: ToneAudioNode, isOffline: boolean) => void | (() => void);
|
|
737
826
|
|
|
738
|
-
/**
|
|
739
|
-
* Clip-Based Model Types
|
|
740
|
-
*
|
|
741
|
-
* These types support a professional multi-track editing model where:
|
|
742
|
-
* - Each track can contain multiple audio clips
|
|
743
|
-
* - Clips can be positioned anywhere on the timeline
|
|
744
|
-
* - Clips have independent trim points (offset/duration)
|
|
745
|
-
* - Gaps between clips are silent
|
|
746
|
-
* - Clips can overlap (for crossfades)
|
|
747
|
-
*/
|
|
748
|
-
|
|
749
827
|
/**
|
|
750
828
|
* Generic effects function type for track-level audio processing.
|
|
751
829
|
*
|
|
@@ -920,7 +998,8 @@ declare interface UseAnnotationKeyboardControlsOptions {
|
|
|
920
998
|
* with a single clip per track. Supports custom positioning for multi-clip arrangements.
|
|
921
999
|
*
|
|
922
1000
|
* @param configs - Array of audio track configurations
|
|
923
|
-
* @
|
|
1001
|
+
* @param options - Optional configuration for loading behavior
|
|
1002
|
+
* @returns Object with tracks array, loading state, and progress info
|
|
924
1003
|
*
|
|
925
1004
|
* @example
|
|
926
1005
|
* ```typescript
|
|
@@ -930,25 +1009,48 @@ declare interface UseAnnotationKeyboardControlsOptions {
|
|
|
930
1009
|
* { src: 'audio/drums.mp3', name: 'Drums' },
|
|
931
1010
|
* ]);
|
|
932
1011
|
*
|
|
933
|
-
* //
|
|
934
|
-
* const { tracks, loading,
|
|
935
|
-
* { src: 'audio/
|
|
936
|
-
* {
|
|
937
|
-
*
|
|
1012
|
+
* // Progressive loading (tracks appear as they load)
|
|
1013
|
+
* const { tracks, loading, loadedCount, totalCount } = useAudioTracks(
|
|
1014
|
+
* [{ src: 'audio/vocals.mp3' }, { src: 'audio/drums.mp3' }],
|
|
1015
|
+
* { progressive: true }
|
|
1016
|
+
* );
|
|
1017
|
+
*
|
|
1018
|
+
* // Pre-loaded AudioBuffer (skip fetch/decode)
|
|
1019
|
+
* const { tracks } = useAudioTracks([
|
|
1020
|
+
* { audioBuffer: myPreloadedBuffer, name: 'Pre-loaded' },
|
|
938
1021
|
* ]);
|
|
939
1022
|
*
|
|
940
|
-
*
|
|
1023
|
+
* // Peaks-first rendering (instant visual, audio loads later)
|
|
1024
|
+
* const { tracks } = useAudioTracks([
|
|
1025
|
+
* { waveformData: preloadedPeaks, name: 'Peaks Only' }, // Renders immediately
|
|
1026
|
+
* ]);
|
|
1027
|
+
*
|
|
1028
|
+
* if (loading) return <div>Loading {loadedCount}/{totalCount}...</div>;
|
|
941
1029
|
* if (error) return <div>Error: {error}</div>;
|
|
942
1030
|
*
|
|
943
1031
|
* return <WaveformPlaylistProvider tracks={tracks}>...</WaveformPlaylistProvider>;
|
|
944
1032
|
* ```
|
|
945
1033
|
*/
|
|
946
|
-
export declare function useAudioTracks(configs: AudioTrackConfig[]): {
|
|
1034
|
+
export declare function useAudioTracks(configs: AudioTrackConfig[], options?: UseAudioTracksOptions): {
|
|
947
1035
|
tracks: ClipTrack[];
|
|
948
1036
|
loading: boolean;
|
|
949
1037
|
error: string | null;
|
|
1038
|
+
loadedCount: number;
|
|
1039
|
+
totalCount: number;
|
|
950
1040
|
};
|
|
951
1041
|
|
|
1042
|
+
/**
|
|
1043
|
+
* Options for useAudioTracks hook
|
|
1044
|
+
*/
|
|
1045
|
+
declare interface UseAudioTracksOptions {
|
|
1046
|
+
/**
|
|
1047
|
+
* When true, tracks are added to the playlist progressively as they load,
|
|
1048
|
+
* rather than waiting for all tracks to finish loading.
|
|
1049
|
+
* Default: false (wait for all tracks)
|
|
1050
|
+
*/
|
|
1051
|
+
progressive?: boolean;
|
|
1052
|
+
}
|
|
1053
|
+
|
|
952
1054
|
/**
|
|
953
1055
|
* Custom hook for handling clip drag operations (movement and trimming)
|
|
954
1056
|
*
|
|
@@ -1045,17 +1147,29 @@ declare interface UseClipSplittingResult {
|
|
|
1045
1147
|
splitClipAt: (trackIndex: number, clipIndex: number, splitTime: number) => boolean;
|
|
1046
1148
|
}
|
|
1047
1149
|
|
|
1048
|
-
/**
|
|
1049
|
-
* Hook for configuring @dnd-kit sensors for clip dragging
|
|
1050
|
-
*
|
|
1051
|
-
* Provides consistent drag activation behavior across all examples
|
|
1052
|
-
*/
|
|
1053
1150
|
/**
|
|
1054
1151
|
* Returns configured sensors for @dnd-kit drag operations
|
|
1055
1152
|
*
|
|
1056
|
-
* @
|
|
1153
|
+
* @param options - Configuration options for drag sensors
|
|
1154
|
+
* @returns Configured sensors appropriate for the interaction mode
|
|
1155
|
+
*
|
|
1156
|
+
* @example
|
|
1157
|
+
* // Desktop-optimized (default)
|
|
1158
|
+
* const sensors = useDragSensors();
|
|
1159
|
+
*
|
|
1160
|
+
* @example
|
|
1161
|
+
* // Mobile-optimized with touch delay
|
|
1162
|
+
* const sensors = useDragSensors({ touchOptimized: true });
|
|
1163
|
+
*
|
|
1164
|
+
* @example
|
|
1165
|
+
* // Custom touch settings
|
|
1166
|
+
* const sensors = useDragSensors({
|
|
1167
|
+
* touchOptimized: true,
|
|
1168
|
+
* touchDelay: 300,
|
|
1169
|
+
* touchTolerance: 8
|
|
1170
|
+
* });
|
|
1057
1171
|
*/
|
|
1058
|
-
export declare function useDragSensors(): SensorDescriptor<SensorOptions>[];
|
|
1172
|
+
export declare function useDragSensors(options?: DragSensorOptions): SensorDescriptor<SensorOptions>[];
|
|
1059
1173
|
|
|
1060
1174
|
/**
|
|
1061
1175
|
* Hook for managing a dynamic chain of audio effects with real-time parameter updates
|
|
@@ -1310,6 +1424,56 @@ export declare const Waveform: default_2.FC<WaveformProps>;
|
|
|
1310
1424
|
*/
|
|
1311
1425
|
declare type WaveformColor = string | WaveformGradient;
|
|
1312
1426
|
|
|
1427
|
+
/**
|
|
1428
|
+
* Clip-Based Model Types
|
|
1429
|
+
*
|
|
1430
|
+
* These types support a professional multi-track editing model where:
|
|
1431
|
+
* - Each track can contain multiple audio clips
|
|
1432
|
+
* - Clips can be positioned anywhere on the timeline
|
|
1433
|
+
* - Clips have independent trim points (offset/duration)
|
|
1434
|
+
* - Gaps between clips are silent
|
|
1435
|
+
* - Clips can overlap (for crossfades)
|
|
1436
|
+
*/
|
|
1437
|
+
|
|
1438
|
+
/**
|
|
1439
|
+
* WaveformData object from waveform-data.js library.
|
|
1440
|
+
* Supports resample() and slice() for dynamic zoom levels.
|
|
1441
|
+
* See: https://github.com/bbc/waveform-data.js
|
|
1442
|
+
*/
|
|
1443
|
+
declare interface WaveformDataObject {
|
|
1444
|
+
/** Sample rate of the original audio */
|
|
1445
|
+
readonly sample_rate: number;
|
|
1446
|
+
/** Number of audio samples per pixel */
|
|
1447
|
+
readonly scale: number;
|
|
1448
|
+
/** Length of waveform data in pixels */
|
|
1449
|
+
readonly length: number;
|
|
1450
|
+
/** Bit depth (8 or 16) */
|
|
1451
|
+
readonly bits: number;
|
|
1452
|
+
/** Duration in seconds */
|
|
1453
|
+
readonly duration: number;
|
|
1454
|
+
/** Number of channels */
|
|
1455
|
+
readonly channels: number;
|
|
1456
|
+
/** Get channel data */
|
|
1457
|
+
channel: (index: number) => {
|
|
1458
|
+
min_array: () => number[];
|
|
1459
|
+
max_array: () => number[];
|
|
1460
|
+
};
|
|
1461
|
+
/** Resample to different scale */
|
|
1462
|
+
resample: (options: {
|
|
1463
|
+
scale: number;
|
|
1464
|
+
} | {
|
|
1465
|
+
width: number;
|
|
1466
|
+
}) => WaveformDataObject;
|
|
1467
|
+
/** Slice a portion of the waveform */
|
|
1468
|
+
slice: (options: {
|
|
1469
|
+
startTime: number;
|
|
1470
|
+
endTime: number;
|
|
1471
|
+
} | {
|
|
1472
|
+
startIndex: number;
|
|
1473
|
+
endIndex: number;
|
|
1474
|
+
}) => WaveformDataObject;
|
|
1475
|
+
}
|
|
1476
|
+
|
|
1313
1477
|
/**
|
|
1314
1478
|
* Convert WaveformData to our internal Peaks format
|
|
1315
1479
|
*
|
|
@@ -1442,6 +1606,8 @@ declare interface WaveformPlaylistTheme {
|
|
|
1442
1606
|
timescaleBackgroundColor: string;
|
|
1443
1607
|
playheadColor: string;
|
|
1444
1608
|
selectionColor: string;
|
|
1609
|
+
loopRegionColor: string;
|
|
1610
|
+
loopMarkerColor: string;
|
|
1445
1611
|
clipHeaderBackgroundColor: string;
|
|
1446
1612
|
clipHeaderBorderColor: string;
|
|
1447
1613
|
clipHeaderTextColor: string;
|
|
@@ -1491,6 +1657,12 @@ export declare interface WaveformProps {
|
|
|
1491
1657
|
showClipHeaders?: boolean;
|
|
1492
1658
|
interactiveClips?: boolean;
|
|
1493
1659
|
showFades?: boolean;
|
|
1660
|
+
/**
|
|
1661
|
+
* Enable mobile-optimized touch interactions.
|
|
1662
|
+
* When true, increases touch target sizes for clip boundaries.
|
|
1663
|
+
* Use with useDragSensors({ touchOptimized: true }) for best results.
|
|
1664
|
+
*/
|
|
1665
|
+
touchOptimized?: boolean;
|
|
1494
1666
|
recordingState?: {
|
|
1495
1667
|
isRecording: boolean;
|
|
1496
1668
|
trackId: string;
|