@djangocfg/ui-nextjs 2.1.83 → 2.1.84

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/package.json +4 -4
  2. package/src/tools/AudioPlayer/README.md +60 -166
  3. package/src/tools/AudioPlayer/components/HybridAudioPlayer.tsx +0 -35
  4. package/src/tools/AudioPlayer/components/HybridSimplePlayer.tsx +0 -11
  5. package/src/tools/AudioPlayer/components/ReactiveCover/AudioReactiveCover.tsx +5 -5
  6. package/src/tools/AudioPlayer/components/index.ts +4 -8
  7. package/src/tools/AudioPlayer/context/index.ts +1 -8
  8. package/src/tools/AudioPlayer/hooks/index.ts +6 -13
  9. package/src/tools/AudioPlayer/index.ts +25 -89
  10. package/src/tools/AudioPlayer/types/index.ts +10 -18
  11. package/src/tools/index.ts +51 -56
  12. package/src/tools/AudioPlayer/@refactoring3/00-IMPLEMENTATION-ROADMAP.md +0 -1146
  13. package/src/tools/AudioPlayer/@refactoring3/01-WAVESURFER-STREAMING-ANALYSIS.md +0 -611
  14. package/src/tools/AudioPlayer/@refactoring3/02-MEDIA-VIEWER-ANALYSIS.md +0 -560
  15. package/src/tools/AudioPlayer/@refactoring3/03-HYBRID-ARCHITECTURE-PROPOSAL.md +0 -769
  16. package/src/tools/AudioPlayer/@refactoring3/04-CRACKLING-ISSUE-DIAGNOSIS.md +0 -373
  17. package/src/tools/AudioPlayer/components/AudioEqualizer.tsx +0 -200
  18. package/src/tools/AudioPlayer/components/AudioPlayer.tsx +0 -236
  19. package/src/tools/AudioPlayer/components/AudioShortcutsPopover.tsx +0 -99
  20. package/src/tools/AudioPlayer/components/SimpleAudioPlayer.tsx +0 -278
  21. package/src/tools/AudioPlayer/components/VisualizationToggle.tsx +0 -64
  22. package/src/tools/AudioPlayer/context/AudioProvider.tsx +0 -376
  23. package/src/tools/AudioPlayer/context/selectors.ts +0 -96
  24. package/src/tools/AudioPlayer/hooks/useAudioAnalysis.ts +0 -110
  25. package/src/tools/AudioPlayer/hooks/useAudioHotkeys.ts +0 -150
  26. package/src/tools/AudioPlayer/hooks/useAudioSource.ts +0 -155
  27. package/src/tools/AudioPlayer/hooks/useSharedWebAudio.ts +0 -109
  28. package/src/tools/AudioPlayer/progressive/ProgressiveAudioPlayer.tsx +0 -303
  29. package/src/tools/AudioPlayer/progressive/WaveformCanvas.tsx +0 -381
  30. package/src/tools/AudioPlayer/progressive/index.ts +0 -40
  31. package/src/tools/AudioPlayer/progressive/peaks.ts +0 -234
  32. package/src/tools/AudioPlayer/progressive/types.ts +0 -179
  33. package/src/tools/AudioPlayer/progressive/useAudioElement.ts +0 -340
  34. package/src/tools/AudioPlayer/progressive/useProgressiveWaveform.ts +0 -267
  35. package/src/tools/AudioPlayer/types/audio.ts +0 -121
  36. package/src/tools/AudioPlayer/types/components.ts +0 -98
@@ -1,150 +0,0 @@
1
- 'use client';
2
-
3
- /**
4
- * useAudioHotkeys - Keyboard shortcuts for audio playback control.
5
- *
6
- * Uses useHotkey from @djangocfg/ui-nextjs.
7
- */
8
-
9
- import { useHotkey, useDeviceDetect } from '@djangocfg/ui-nextjs';
10
- import { useAudioControls, useAudioState } from '../context';
11
-
12
- // =============================================================================
13
- // TYPES
14
- // =============================================================================
15
-
16
- export interface AudioHotkeyOptions {
17
- /** Enable hotkeys (default: true) */
18
- enabled?: boolean;
19
- /** Skip duration in seconds (default: 10) */
20
- skipDuration?: number;
21
- /** Volume step (default: 0.1) */
22
- volumeStep?: number;
23
- }
24
-
25
- // =============================================================================
26
- // HOOK
27
- // =============================================================================
28
-
29
- export function useAudioHotkeys(options: AudioHotkeyOptions = {}) {
30
- const { enabled = true, skipDuration = 10, volumeStep = 0.1 } = options;
31
-
32
- const { togglePlay, skip, setVolume, toggleMute, toggleLoop, isReady } = useAudioControls();
33
- const { volume, duration, currentTime } = useAudioState();
34
- const device = useDeviceDetect();
35
-
36
- // Play/Pause - Space
37
- useHotkey(
38
- 'space',
39
- (e) => {
40
- e.preventDefault();
41
- togglePlay();
42
- },
43
- { enabled: enabled && isReady, description: 'Play/Pause' }
44
- );
45
-
46
- // Skip backward - ArrowLeft or J
47
- useHotkey(
48
- ['ArrowLeft', 'j'],
49
- () => skip(-skipDuration),
50
- { enabled: enabled && isReady, description: `Skip ${skipDuration}s backward` }
51
- );
52
-
53
- // Skip forward - ArrowRight or L
54
- useHotkey(
55
- ['ArrowRight', 'l'],
56
- () => skip(skipDuration),
57
- { enabled: enabled && isReady, description: `Skip ${skipDuration}s forward` }
58
- );
59
-
60
- // Volume up - ArrowUp
61
- useHotkey(
62
- 'ArrowUp',
63
- (e) => {
64
- e.preventDefault();
65
- setVolume(Math.min(1, volume + volumeStep));
66
- },
67
- { enabled: enabled && isReady, description: 'Volume up' }
68
- );
69
-
70
- // Volume down - ArrowDown
71
- useHotkey(
72
- 'ArrowDown',
73
- (e) => {
74
- e.preventDefault();
75
- setVolume(Math.max(0, volume - volumeStep));
76
- },
77
- { enabled: enabled && isReady, description: 'Volume down' }
78
- );
79
-
80
- // Mute/Unmute - M
81
- useHotkey(
82
- 'm',
83
- () => toggleMute(),
84
- { enabled: enabled && isReady, description: 'Mute/Unmute' }
85
- );
86
-
87
- // Loop/Repeat - L key (conflicts with skip forward, using Shift+L)
88
- useHotkey(
89
- 'shift+l',
90
- () => toggleLoop(),
91
- { enabled: enabled && isReady, description: 'Toggle loop' }
92
- );
93
-
94
- // Number keys 0-9 to seek to percentage
95
- useHotkey(
96
- ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'],
97
- (e) => {
98
- if (!duration) return;
99
- const percent = parseInt(e.key, 10) / 10;
100
- const targetTime = duration * percent;
101
- skip(targetTime - currentTime);
102
- },
103
- { enabled: enabled && isReady, description: 'Seek to percentage' }
104
- );
105
-
106
- return {
107
- isMac: device.isMacOs,
108
- isEnabled: enabled && isReady,
109
- };
110
- }
111
-
112
- // =============================================================================
113
- // SHORTCUTS CONFIG (for display)
114
- // =============================================================================
115
-
116
- export interface ShortcutItem {
117
- keys: string[];
118
- label: string;
119
- }
120
-
121
- export interface ShortcutGroup {
122
- title: string;
123
- shortcuts: ShortcutItem[];
124
- }
125
-
126
- export const AUDIO_SHORTCUTS: ShortcutGroup[] = [
127
- {
128
- title: 'Playback',
129
- shortcuts: [
130
- { keys: ['Space'], label: 'Play/Pause' },
131
- { keys: ['←'], label: 'Skip 10s back' },
132
- { keys: ['→'], label: 'Skip 10s forward' },
133
- { keys: ['⇧', 'L'], label: 'Toggle loop' },
134
- ],
135
- },
136
- {
137
- title: 'Volume',
138
- shortcuts: [
139
- { keys: ['↑'], label: 'Volume up' },
140
- { keys: ['↓'], label: 'Volume down' },
141
- { keys: ['M'], label: 'Mute/Unmute' },
142
- ],
143
- },
144
- {
145
- title: 'Seek',
146
- shortcuts: [
147
- { keys: ['0-9'], label: 'Jump to 0-90%' },
148
- ],
149
- },
150
- ];
@@ -1,155 +0,0 @@
1
- 'use client';
2
-
3
- /**
4
- * useAudioSource - Handles audio source loading with optional prefetch
5
- *
6
- * For streaming URLs, WaveSurfer needs the complete file to enable seeking.
7
- * This hook fetches the URL as blob when prefetch is enabled.
8
- */
9
-
10
- import { useState, useEffect, useRef } from 'react';
11
- import type { AudioSource } from '../types';
12
- import { audioDebug } from '../utils/debug';
13
-
14
- export interface UseAudioSourceResult {
15
- /** The resolved URL (blob URL if prefetched, original URL otherwise) */
16
- url: string | null;
17
- /** Whether the source is currently being fetched */
18
- isLoading: boolean;
19
- /** Error message if fetch failed */
20
- error: string | null;
21
- /** Progress percentage (0-100) during fetch */
22
- progress: number;
23
- }
24
-
25
- export function useAudioSource(source: AudioSource): UseAudioSourceResult {
26
- const [url, setUrl] = useState<string | null>(null);
27
- const [isLoading, setIsLoading] = useState(false);
28
- const [error, setError] = useState<string | null>(null);
29
- const [progress, setProgress] = useState(0);
30
- const blobUrlRef = useRef<string | null>(null);
31
-
32
- useEffect(() => {
33
- // Cleanup previous blob URL
34
- if (blobUrlRef.current) {
35
- URL.revokeObjectURL(blobUrlRef.current);
36
- blobUrlRef.current = null;
37
- }
38
-
39
- // Reset state
40
- setError(null);
41
- setProgress(0);
42
-
43
- // No prefetch - use URL directly
44
- if (!source.prefetch) {
45
- audioDebug.debug('Using direct URL (prefetch disabled)', { uri: source.uri });
46
- setUrl(source.uri);
47
- setIsLoading(false);
48
- return;
49
- }
50
-
51
- // Prefetch enabled - fetch as blob
52
- const abortController = new AbortController();
53
- setIsLoading(true);
54
-
55
- const fetchAsBlob = async () => {
56
- try {
57
- audioDebug.info('Prefetching audio as blob', { uri: source.uri });
58
-
59
- const response = await fetch(source.uri, {
60
- signal: abortController.signal,
61
- headers: {
62
- // Request full file - some servers require Range header
63
- 'Range': 'bytes=0-',
64
- },
65
- });
66
-
67
- // Accept 200 OK or 206 Partial Content (response.ok covers both)
68
- if (!response.ok) {
69
- throw new Error(`HTTP ${response.status}: ${response.statusText}`);
70
- }
71
-
72
- // Get content length for progress tracking
73
- // For Range requests, use Content-Range header (format: "bytes 0-1234/5678")
74
- let totalBytes = 0;
75
- const contentRange = response.headers.get('Content-Range');
76
- if (contentRange) {
77
- const match = contentRange.match(/\/(\d+)$/);
78
- if (match) {
79
- totalBytes = parseInt(match[1], 10);
80
- }
81
- } else {
82
- const contentLength = response.headers.get('Content-Length');
83
- totalBytes = contentLength ? parseInt(contentLength, 10) : 0;
84
- }
85
-
86
- if (!response.body) {
87
- // Fallback for browsers without ReadableStream
88
- const blob = await response.blob();
89
- const blobUrl = URL.createObjectURL(blob);
90
- blobUrlRef.current = blobUrl;
91
- setUrl(blobUrl);
92
- setProgress(100);
93
- audioDebug.success('Audio prefetched (no stream)', { size: blob.size });
94
- return;
95
- }
96
-
97
- // Stream the response for progress tracking
98
- const reader = response.body.getReader();
99
- const chunks: ArrayBuffer[] = [];
100
- let receivedBytes = 0;
101
-
102
- while (true) {
103
- const { done, value } = await reader.read();
104
-
105
- if (done) break;
106
-
107
- // Convert Uint8Array to ArrayBuffer for Blob compatibility
108
- chunks.push(value.buffer.slice(value.byteOffset, value.byteOffset + value.byteLength));
109
- receivedBytes += value.length;
110
-
111
- if (totalBytes > 0) {
112
- setProgress(Math.round((receivedBytes / totalBytes) * 100));
113
- }
114
- }
115
-
116
- // Combine chunks into blob
117
- const blob = new Blob(chunks, { type: 'audio/mpeg' });
118
- const blobUrl = URL.createObjectURL(blob);
119
- blobUrlRef.current = blobUrl;
120
- setUrl(blobUrl);
121
- setProgress(100);
122
-
123
- audioDebug.success('Audio prefetched', {
124
- size: blob.size,
125
- sizeFormatted: `${(blob.size / 1024 / 1024).toFixed(2)} MB`,
126
- });
127
- } catch (err) {
128
- if (err instanceof Error && err.name === 'AbortError') {
129
- return; // Ignore abort errors
130
- }
131
-
132
- const errorMessage = err instanceof Error ? err.message : 'Failed to prefetch audio';
133
- audioDebug.error('Failed to prefetch audio', { error: errorMessage, uri: source.uri });
134
- setError(errorMessage);
135
-
136
- // Fallback to direct URL (may have seek issues)
137
- setUrl(source.uri);
138
- } finally {
139
- setIsLoading(false);
140
- }
141
- };
142
-
143
- fetchAsBlob();
144
-
145
- return () => {
146
- abortController.abort();
147
- if (blobUrlRef.current) {
148
- URL.revokeObjectURL(blobUrlRef.current);
149
- blobUrlRef.current = null;
150
- }
151
- };
152
- }, [source.uri, source.prefetch]);
153
-
154
- return { url, isLoading, error, progress };
155
- }
@@ -1,109 +0,0 @@
1
- 'use client';
2
-
3
- /**
4
- * useSharedWebAudio - Manages a shared Web Audio context and source node.
5
- *
6
- * This prevents the "InvalidStateError" from creating multiple MediaElementSourceNodes
7
- * for the same audio element. All analyzers share the same source.
8
- */
9
-
10
- import { useRef, useEffect, useCallback } from 'react';
11
- import type { SharedWebAudioContext } from '../types';
12
-
13
- export function useSharedWebAudio(audioElement: HTMLMediaElement | null): SharedWebAudioContext {
14
- const audioContextRef = useRef<AudioContext | null>(null);
15
- const sourceRef = useRef<MediaElementAudioSourceNode | null>(null);
16
- const connectedElementRef = useRef<HTMLMediaElement | null>(null);
17
- const analyserNodesRef = useRef<Set<AnalyserNode>>(new Set());
18
-
19
- // Initialize Web Audio on first play
20
- useEffect(() => {
21
- if (!audioElement) return;
22
-
23
- // Already connected to this element
24
- if (connectedElementRef.current === audioElement && audioContextRef.current) {
25
- return;
26
- }
27
-
28
- const initAudio = () => {
29
- try {
30
- if (!audioContextRef.current) {
31
- const AudioContextClass = window.AudioContext ||
32
- (window as unknown as { webkitAudioContext: typeof AudioContext }).webkitAudioContext;
33
- audioContextRef.current = new AudioContextClass();
34
- }
35
-
36
- const audioContext = audioContextRef.current;
37
-
38
- // Only create source node once per audio element
39
- if (connectedElementRef.current !== audioElement) {
40
- if (sourceRef.current) {
41
- try { sourceRef.current.disconnect(); } catch { /* ignore */ }
42
- }
43
-
44
- sourceRef.current = audioContext.createMediaElementSource(audioElement);
45
- // Single audio output path: source -> destination
46
- // Analysers connect to source in parallel for frequency reading only (no output)
47
- sourceRef.current.connect(audioContext.destination);
48
- connectedElementRef.current = audioElement;
49
- }
50
- } catch (error) {
51
- console.warn('[SharedWebAudio] Could not initialize:', error);
52
- }
53
- };
54
-
55
- const handlePlay = () => {
56
- initAudio();
57
- if (audioContextRef.current?.state === 'suspended') {
58
- audioContextRef.current.resume();
59
- }
60
- };
61
-
62
- audioElement.addEventListener('play', handlePlay);
63
- if (!audioElement.paused) {
64
- handlePlay();
65
- }
66
-
67
- return () => {
68
- audioElement.removeEventListener('play', handlePlay);
69
- };
70
- }, [audioElement]);
71
-
72
- // Create an analyser connected to the shared source
73
- const createAnalyser = useCallback((options?: { fftSize?: number; smoothing?: number }): AnalyserNode | null => {
74
- if (!audioContextRef.current || !sourceRef.current) return null;
75
-
76
- try {
77
- const analyser = audioContextRef.current.createAnalyser();
78
- analyser.fftSize = options?.fftSize ?? 256;
79
- analyser.smoothingTimeConstant = options?.smoothing ?? 0.85;
80
-
81
- // Connect analyser as passive listener (for frequency analysis only)
82
- // Audio path: source -> destination (already connected in initAudio)
83
- // Analysis path: source -> analyser (no output connection needed)
84
- // NOTE: Do NOT connect analyser to destination - it causes double audio routing and crackling!
85
- sourceRef.current.connect(analyser);
86
-
87
- analyserNodesRef.current.add(analyser);
88
- return analyser;
89
- } catch (error) {
90
- console.warn('[SharedWebAudio] Could not create analyser:', error);
91
- return null;
92
- }
93
- }, []);
94
-
95
- // Disconnect an analyser
96
- const disconnectAnalyser = useCallback((analyser: AnalyserNode) => {
97
- try {
98
- analyser.disconnect();
99
- analyserNodesRef.current.delete(analyser);
100
- } catch { /* ignore */ }
101
- }, []);
102
-
103
- return {
104
- audioContext: audioContextRef.current,
105
- sourceNode: sourceRef.current,
106
- createAnalyser,
107
- disconnectAnalyser,
108
- };
109
- }