audio-channel-queue 1.12.0 → 1.12.1-beta.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/core.js +3 -0
- package/dist/errors.js +0 -40
- package/dist/index.d.ts +2 -1
- package/dist/index.js +16 -2
- package/dist/types.d.ts +37 -0
- package/dist/volume.d.ts +15 -0
- package/dist/volume.js +130 -15
- package/dist/web-audio.d.ts +156 -0
- package/dist/web-audio.js +327 -0
- package/package.json +1 -1
- package/src/core.ts +11 -1
- package/src/errors.ts +1 -49
- package/src/index.ts +20 -1
- package/src/types.ts +40 -0
- package/src/volume.ts +157 -13
- package/src/web-audio.ts +331 -0
package/dist/core.js
CHANGED
|
@@ -244,6 +244,8 @@ const queueAudio = (audioUrl_1, ...args_1) => __awaiter(void 0, [audioUrl_1, ...
|
|
|
244
244
|
(0, errors_1.setupAudioErrorHandling)(audio, channelNumber, validatedUrl, (error) => __awaiter(void 0, void 0, void 0, function* () {
|
|
245
245
|
yield (0, errors_1.handleAudioError)(audio, channelNumber, validatedUrl, error);
|
|
246
246
|
}));
|
|
247
|
+
// Initialize Web Audio API support if needed
|
|
248
|
+
yield (0, volume_1.initializeWebAudioForAudio)(audio, channelNumber);
|
|
247
249
|
// Apply options if provided
|
|
248
250
|
if (options) {
|
|
249
251
|
if (typeof options.loop === 'boolean') {
|
|
@@ -378,6 +380,7 @@ const playAudioQueue = (channelNumber) => __awaiter(void 0, void 0, void 0, func
|
|
|
378
380
|
// For non-looping audio, remove from queue and play next
|
|
379
381
|
currentAudio.pause();
|
|
380
382
|
(0, events_1.cleanupProgressTracking)(currentAudio, channelNumber, info_1.audioChannels);
|
|
383
|
+
(0, volume_1.cleanupWebAudioForAudio)(currentAudio, channelNumber);
|
|
381
384
|
channel.queue.shift();
|
|
382
385
|
channel.isPaused = false; // Reset pause state
|
|
383
386
|
// Restore volume levels AFTER removing audio from queue
|
package/dist/errors.js
CHANGED
|
@@ -56,7 +56,6 @@ let globalErrorRecovery = {
|
|
|
56
56
|
showUserFeedback: false
|
|
57
57
|
};
|
|
58
58
|
const retryAttempts = new WeakMap();
|
|
59
|
-
const loadTimeouts = new WeakMap();
|
|
60
59
|
/**
|
|
61
60
|
* Subscribes to audio error events for a specific channel
|
|
62
61
|
* @param channelNumber - The channel number to listen to (defaults to 0)
|
|
@@ -306,37 +305,9 @@ const setupAudioErrorHandling = (audio, channelNumber, originalUrl, onError) =>
|
|
|
306
305
|
const channel = info_1.audioChannels[channelNumber];
|
|
307
306
|
if (!channel)
|
|
308
307
|
return;
|
|
309
|
-
// Set up loading timeout with test environment compatibility
|
|
310
|
-
let timeoutId;
|
|
311
|
-
if (typeof setTimeout !== 'undefined') {
|
|
312
|
-
timeoutId = setTimeout(() => {
|
|
313
|
-
if (audio.networkState === HTMLMediaElement.NETWORK_LOADING) {
|
|
314
|
-
const timeoutError = new Error(`Audio loading timeout after ${globalRetryConfig.timeoutMs}ms`);
|
|
315
|
-
(0, exports.handleAudioError)(audio, channelNumber, originalUrl, timeoutError);
|
|
316
|
-
}
|
|
317
|
-
}, globalRetryConfig.timeoutMs);
|
|
318
|
-
loadTimeouts.set(audio, timeoutId);
|
|
319
|
-
}
|
|
320
|
-
// Clear timeout when metadata loads successfully
|
|
321
|
-
const handleLoadSuccess = () => {
|
|
322
|
-
if (typeof setTimeout !== 'undefined') {
|
|
323
|
-
const timeoutId = loadTimeouts.get(audio);
|
|
324
|
-
if (timeoutId) {
|
|
325
|
-
clearTimeout(timeoutId);
|
|
326
|
-
loadTimeouts.delete(audio);
|
|
327
|
-
}
|
|
328
|
-
}
|
|
329
|
-
};
|
|
330
308
|
// Handle various error events
|
|
331
309
|
const handleError = (_event) => {
|
|
332
310
|
var _a;
|
|
333
|
-
if (typeof setTimeout !== 'undefined') {
|
|
334
|
-
const timeoutId = loadTimeouts.get(audio);
|
|
335
|
-
if (timeoutId) {
|
|
336
|
-
clearTimeout(timeoutId);
|
|
337
|
-
loadTimeouts.delete(audio);
|
|
338
|
-
}
|
|
339
|
-
}
|
|
340
311
|
const error = new Error(`Audio loading failed: ${((_a = audio.error) === null || _a === void 0 ? void 0 : _a.message) || 'Unknown error'}`);
|
|
341
312
|
(0, exports.handleAudioError)(audio, channelNumber, originalUrl, error);
|
|
342
313
|
};
|
|
@@ -352,8 +323,6 @@ const setupAudioErrorHandling = (audio, channelNumber, originalUrl, onError) =>
|
|
|
352
323
|
audio.addEventListener('error', handleError);
|
|
353
324
|
audio.addEventListener('abort', handleAbort);
|
|
354
325
|
audio.addEventListener('stalled', handleStall);
|
|
355
|
-
audio.addEventListener('loadedmetadata', handleLoadSuccess);
|
|
356
|
-
audio.addEventListener('canplay', handleLoadSuccess);
|
|
357
326
|
// Custom play error handling
|
|
358
327
|
if (onError) {
|
|
359
328
|
const originalPlay = audio.play.bind(audio);
|
|
@@ -451,19 +420,10 @@ exports.handleAudioError = handleAudioError;
|
|
|
451
420
|
const createProtectedAudioElement = (url, channelNumber) => __awaiter(void 0, void 0, void 0, function* () {
|
|
452
421
|
const audio = new Audio();
|
|
453
422
|
return new Promise((resolve, reject) => {
|
|
454
|
-
const cleanup = () => {
|
|
455
|
-
const timeoutId = loadTimeouts.get(audio);
|
|
456
|
-
if (timeoutId) {
|
|
457
|
-
clearTimeout(timeoutId);
|
|
458
|
-
loadTimeouts.delete(audio);
|
|
459
|
-
}
|
|
460
|
-
};
|
|
461
423
|
const handleSuccess = () => {
|
|
462
|
-
cleanup();
|
|
463
424
|
resolve(audio);
|
|
464
425
|
};
|
|
465
426
|
const handleError = (error) => {
|
|
466
|
-
cleanup();
|
|
467
427
|
reject(error);
|
|
468
428
|
};
|
|
469
429
|
// Set up error handling
|
package/dist/index.d.ts
CHANGED
|
@@ -8,8 +8,9 @@ export { clearQueueAfterCurrent, getQueueItemInfo, getQueueLength, removeQueuedI
|
|
|
8
8
|
export { getErrorRecovery, getRetryConfig, offAudioError, onAudioError, retryFailedAudio, setErrorRecovery, setRetryConfig } from './errors';
|
|
9
9
|
export { getAllChannelsPauseState, isChannelPaused, pauseAllChannels, pauseAllWithFade, pauseChannel, pauseWithFade, resumeAllChannels, resumeAllWithFade, resumeChannel, resumeWithFade, togglePauseAllChannels, togglePauseAllWithFade, togglePauseChannel, togglePauseWithFade } from './pause';
|
|
10
10
|
export { cancelAllVolumeTransitions, cancelVolumeTransition, clearVolumeDucking, getAllChannelsVolume, getChannelVolume, getFadeConfig, setAllChannelsVolume, setChannelVolume, setVolumeDucking, transitionVolume } from './volume';
|
|
11
|
+
export { cleanupWebAudioNodes, createWebAudioNodes, getAudioContext, getWebAudioConfig, getWebAudioSupport, getWebAudioVolume, isIOSDevice, isWebAudioSupported, resumeAudioContext, setWebAudioConfig, setWebAudioVolume, shouldUseWebAudio } from './web-audio';
|
|
11
12
|
export { getAllChannelsInfo, getCurrentAudioInfo, getQueueSnapshot, offAudioComplete, offAudioPause, offAudioProgress, offAudioResume, offAudioStart, offQueueChange, onAudioComplete, onAudioPause, onAudioProgress, onAudioResume, onAudioStart, onQueueChange } from './info';
|
|
12
13
|
export { audioChannels } from './info';
|
|
13
14
|
export { cleanWebpackFilename, createQueueSnapshot, extractFileName, getAudioInfoFromElement, sanitizeForDisplay, validateAudioUrl } from './utils';
|
|
14
|
-
export type { AudioCompleteCallback, AudioCompleteInfo, AudioErrorCallback, AudioErrorInfo, AudioInfo, AudioPauseCallback, AudioQueueOptions, AudioResumeCallback, AudioStartCallback, AudioStartInfo, ChannelFadeState, ErrorRecoveryOptions, ExtendedAudioQueueChannel, FadeConfig, ProgressCallback, QueueChangeCallback, QueueItem, QueueManipulationResult, QueueSnapshot, RetryConfig, VolumeConfig,
|
|
15
|
+
export type { AudioCompleteCallback, AudioCompleteInfo, AudioErrorCallback, AudioErrorInfo, AudioInfo, AudioPauseCallback, AudioQueueOptions, AudioResumeCallback, AudioStartCallback, AudioStartInfo, ChannelFadeState, ErrorRecoveryOptions, ExtendedAudioQueueChannel, FadeConfig, ProgressCallback, QueueChangeCallback, QueueConfig, QueueItem, QueueManipulationResult, QueueSnapshot, RetryConfig, VolumeConfig, WebAudioConfig, WebAudioNodeSet, WebAudioSupport } from './types';
|
|
15
16
|
export { AudioErrorType, EasingType, FadeType, MAX_CHANNELS, TimerType, GLOBAL_PROGRESS_KEY } from './types';
|
package/dist/index.js
CHANGED
|
@@ -5,8 +5,8 @@
|
|
|
5
5
|
* volume management with ducking, progress tracking, and comprehensive event system
|
|
6
6
|
*/
|
|
7
7
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
8
|
-
exports.
|
|
9
|
-
exports.GLOBAL_PROGRESS_KEY = exports.TimerType = exports.MAX_CHANNELS = exports.FadeType = exports.EasingType = exports.AudioErrorType = exports.validateAudioUrl = exports.sanitizeForDisplay = exports.getAudioInfoFromElement = exports.extractFileName = exports.createQueueSnapshot = exports.cleanWebpackFilename = exports.audioChannels = exports.onQueueChange = exports.onAudioStart = exports.onAudioResume = exports.onAudioProgress = exports.onAudioPause = exports.onAudioComplete = exports.offQueueChange = exports.offAudioStart = exports.offAudioResume = exports.offAudioProgress = exports.offAudioPause = exports.offAudioComplete = exports.getQueueSnapshot = void 0;
|
|
8
|
+
exports.createWebAudioNodes = exports.cleanupWebAudioNodes = exports.transitionVolume = exports.setVolumeDucking = exports.setChannelVolume = exports.setAllChannelsVolume = exports.getFadeConfig = exports.getChannelVolume = exports.getAllChannelsVolume = exports.clearVolumeDucking = exports.cancelVolumeTransition = exports.cancelAllVolumeTransitions = exports.togglePauseWithFade = exports.togglePauseChannel = exports.togglePauseAllWithFade = exports.togglePauseAllChannels = exports.resumeWithFade = exports.resumeChannel = exports.resumeAllWithFade = exports.resumeAllChannels = exports.pauseWithFade = exports.pauseChannel = exports.pauseAllWithFade = exports.pauseAllChannels = exports.isChannelPaused = exports.getAllChannelsPauseState = exports.setRetryConfig = exports.setErrorRecovery = exports.retryFailedAudio = exports.onAudioError = exports.offAudioError = exports.getRetryConfig = exports.getErrorRecovery = exports.swapQueueItems = exports.reorderQueue = exports.removeQueuedItem = exports.getQueueLength = exports.getQueueItemInfo = exports.clearQueueAfterCurrent = exports.setChannelQueueLimit = exports.getQueueConfig = exports.setQueueConfig = exports.destroyAllChannels = exports.destroyChannel = exports.playAudioQueue = exports.stopAllAudio = exports.stopAllAudioInChannel = exports.stopCurrentAudioInChannel = exports.queueAudioPriority = exports.queueAudio = void 0;
|
|
9
|
+
exports.GLOBAL_PROGRESS_KEY = exports.TimerType = exports.MAX_CHANNELS = exports.FadeType = exports.EasingType = exports.AudioErrorType = exports.validateAudioUrl = exports.sanitizeForDisplay = exports.getAudioInfoFromElement = exports.extractFileName = exports.createQueueSnapshot = exports.cleanWebpackFilename = exports.audioChannels = exports.onQueueChange = exports.onAudioStart = exports.onAudioResume = exports.onAudioProgress = exports.onAudioPause = exports.onAudioComplete = exports.offQueueChange = exports.offAudioStart = exports.offAudioResume = exports.offAudioProgress = exports.offAudioPause = exports.offAudioComplete = exports.getQueueSnapshot = exports.getCurrentAudioInfo = exports.getAllChannelsInfo = exports.shouldUseWebAudio = exports.setWebAudioVolume = exports.setWebAudioConfig = exports.resumeAudioContext = exports.isWebAudioSupported = exports.isIOSDevice = exports.getWebAudioVolume = exports.getWebAudioSupport = exports.getWebAudioConfig = exports.getAudioContext = void 0;
|
|
10
10
|
// Core queue management functions
|
|
11
11
|
var core_1 = require("./core");
|
|
12
12
|
Object.defineProperty(exports, "queueAudio", { enumerable: true, get: function () { return core_1.queueAudio; } });
|
|
@@ -65,6 +65,20 @@ Object.defineProperty(exports, "setAllChannelsVolume", { enumerable: true, get:
|
|
|
65
65
|
Object.defineProperty(exports, "setChannelVolume", { enumerable: true, get: function () { return volume_1.setChannelVolume; } });
|
|
66
66
|
Object.defineProperty(exports, "setVolumeDucking", { enumerable: true, get: function () { return volume_1.setVolumeDucking; } });
|
|
67
67
|
Object.defineProperty(exports, "transitionVolume", { enumerable: true, get: function () { return volume_1.transitionVolume; } });
|
|
68
|
+
// Web Audio API support functions
|
|
69
|
+
var web_audio_1 = require("./web-audio");
|
|
70
|
+
Object.defineProperty(exports, "cleanupWebAudioNodes", { enumerable: true, get: function () { return web_audio_1.cleanupWebAudioNodes; } });
|
|
71
|
+
Object.defineProperty(exports, "createWebAudioNodes", { enumerable: true, get: function () { return web_audio_1.createWebAudioNodes; } });
|
|
72
|
+
Object.defineProperty(exports, "getAudioContext", { enumerable: true, get: function () { return web_audio_1.getAudioContext; } });
|
|
73
|
+
Object.defineProperty(exports, "getWebAudioConfig", { enumerable: true, get: function () { return web_audio_1.getWebAudioConfig; } });
|
|
74
|
+
Object.defineProperty(exports, "getWebAudioSupport", { enumerable: true, get: function () { return web_audio_1.getWebAudioSupport; } });
|
|
75
|
+
Object.defineProperty(exports, "getWebAudioVolume", { enumerable: true, get: function () { return web_audio_1.getWebAudioVolume; } });
|
|
76
|
+
Object.defineProperty(exports, "isIOSDevice", { enumerable: true, get: function () { return web_audio_1.isIOSDevice; } });
|
|
77
|
+
Object.defineProperty(exports, "isWebAudioSupported", { enumerable: true, get: function () { return web_audio_1.isWebAudioSupported; } });
|
|
78
|
+
Object.defineProperty(exports, "resumeAudioContext", { enumerable: true, get: function () { return web_audio_1.resumeAudioContext; } });
|
|
79
|
+
Object.defineProperty(exports, "setWebAudioConfig", { enumerable: true, get: function () { return web_audio_1.setWebAudioConfig; } });
|
|
80
|
+
Object.defineProperty(exports, "setWebAudioVolume", { enumerable: true, get: function () { return web_audio_1.setWebAudioVolume; } });
|
|
81
|
+
Object.defineProperty(exports, "shouldUseWebAudio", { enumerable: true, get: function () { return web_audio_1.shouldUseWebAudio; } });
|
|
68
82
|
// Audio information and progress tracking functions
|
|
69
83
|
var info_1 = require("./info");
|
|
70
84
|
Object.defineProperty(exports, "getAllChannelsInfo", { enumerable: true, get: function () { return info_1.getAllChannelsInfo; } });
|
package/dist/types.d.ts
CHANGED
|
@@ -260,6 +260,39 @@ export interface ErrorRecoveryOptions {
|
|
|
260
260
|
* Callback function type for audio error events
|
|
261
261
|
*/
|
|
262
262
|
export type AudioErrorCallback = (errorInfo: AudioErrorInfo) => void;
|
|
263
|
+
/**
|
|
264
|
+
* Web Audio API configuration options
|
|
265
|
+
*/
|
|
266
|
+
export interface WebAudioConfig {
|
|
267
|
+
/** Whether to automatically use Web Audio API on iOS devices */
|
|
268
|
+
autoDetectIOS: boolean;
|
|
269
|
+
/** Whether Web Audio API support is enabled */
|
|
270
|
+
enabled: boolean;
|
|
271
|
+
/** Whether to force Web Audio API usage on all devices */
|
|
272
|
+
forceWebAudio: boolean;
|
|
273
|
+
}
|
|
274
|
+
/**
|
|
275
|
+
* Web Audio API support information
|
|
276
|
+
*/
|
|
277
|
+
export interface WebAudioSupport {
|
|
278
|
+
/** Whether Web Audio API is available in the current environment */
|
|
279
|
+
available: boolean;
|
|
280
|
+
/** Whether the current device is iOS */
|
|
281
|
+
isIOS: boolean;
|
|
282
|
+
/** Whether Web Audio API is currently being used */
|
|
283
|
+
usingWebAudio: boolean;
|
|
284
|
+
/** Reason for current Web Audio API usage state */
|
|
285
|
+
reason: string;
|
|
286
|
+
}
|
|
287
|
+
/**
|
|
288
|
+
* Web Audio API node set for audio element control
|
|
289
|
+
*/
|
|
290
|
+
export interface WebAudioNodeSet {
|
|
291
|
+
/** Gain node for volume control */
|
|
292
|
+
gainNode: GainNode;
|
|
293
|
+
/** Media element source node */
|
|
294
|
+
sourceNode: MediaElementAudioSourceNode;
|
|
295
|
+
}
|
|
263
296
|
/**
|
|
264
297
|
* Extended audio channel with comprehensive queue management, callback support, and state tracking
|
|
265
298
|
*/
|
|
@@ -292,6 +325,10 @@ export interface ExtendedAudioQueueChannel {
|
|
|
292
325
|
retryConfig?: RetryConfig;
|
|
293
326
|
/** Current volume level for the channel (0-1) */
|
|
294
327
|
volume: number;
|
|
328
|
+
/** Web Audio API context for this channel */
|
|
329
|
+
webAudioContext?: AudioContext;
|
|
330
|
+
/** Map of Web Audio API nodes for each audio element */
|
|
331
|
+
webAudioNodes?: Map<HTMLAudioElement, WebAudioNodeSet>;
|
|
295
332
|
}
|
|
296
333
|
/**
|
|
297
334
|
* Easing function types for smooth volume transitions and animations
|
package/dist/volume.d.ts
CHANGED
|
@@ -28,6 +28,7 @@ export declare const getFadeConfig: (fadeType: FadeType) => FadeConfig;
|
|
|
28
28
|
export declare const transitionVolume: (channelNumber: number, targetVolume: number, duration?: number, easing?: EasingType) => Promise<void>;
|
|
29
29
|
/**
|
|
30
30
|
* Sets the volume for a specific channel with optional smooth transition
|
|
31
|
+
* Automatically uses Web Audio API on iOS devices for enhanced volume control
|
|
31
32
|
* @param channelNumber - The channel number to set volume for
|
|
32
33
|
* @param volume - Volume level (0-1)
|
|
33
34
|
* @param transitionDuration - Optional transition duration in milliseconds
|
|
@@ -120,3 +121,17 @@ export declare const cancelVolumeTransition: (channelNumber: number) => void;
|
|
|
120
121
|
* @internal
|
|
121
122
|
*/
|
|
122
123
|
export declare const cancelAllVolumeTransitions: () => void;
|
|
124
|
+
/**
|
|
125
|
+
* Initializes Web Audio API nodes for a new audio element
|
|
126
|
+
* @param audio - The audio element to initialize nodes for
|
|
127
|
+
* @param channelNumber - The channel number this audio belongs to
|
|
128
|
+
* @internal
|
|
129
|
+
*/
|
|
130
|
+
export declare const initializeWebAudioForAudio: (audio: HTMLAudioElement, channelNumber: number) => Promise<void>;
|
|
131
|
+
/**
|
|
132
|
+
* Cleans up Web Audio API nodes for an audio element
|
|
133
|
+
* @param audio - The audio element to clean up nodes for
|
|
134
|
+
* @param channelNumber - The channel number this audio belongs to
|
|
135
|
+
* @internal
|
|
136
|
+
*/
|
|
137
|
+
export declare const cleanupWebAudioForAudio: (audio: HTMLAudioElement, channelNumber: number) => void;
|
package/dist/volume.js
CHANGED
|
@@ -12,9 +12,10 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
|
|
|
12
12
|
});
|
|
13
13
|
};
|
|
14
14
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
15
|
-
exports.cancelAllVolumeTransitions = exports.cancelVolumeTransition = exports.restoreVolumeLevels = exports.applyVolumeDucking = exports.clearVolumeDucking = exports.setVolumeDucking = exports.setAllChannelsVolume = exports.getAllChannelsVolume = exports.getChannelVolume = exports.setChannelVolume = exports.transitionVolume = exports.getFadeConfig = void 0;
|
|
15
|
+
exports.cleanupWebAudioForAudio = exports.initializeWebAudioForAudio = exports.cancelAllVolumeTransitions = exports.cancelVolumeTransition = exports.restoreVolumeLevels = exports.applyVolumeDucking = exports.clearVolumeDucking = exports.setVolumeDucking = exports.setAllChannelsVolume = exports.getAllChannelsVolume = exports.getChannelVolume = exports.setChannelVolume = exports.transitionVolume = exports.getFadeConfig = void 0;
|
|
16
16
|
const types_1 = require("./types");
|
|
17
17
|
const info_1 = require("./info");
|
|
18
|
+
const web_audio_1 = require("./web-audio");
|
|
18
19
|
// Store active volume transitions to handle interruptions
|
|
19
20
|
const activeTransitions = new Map();
|
|
20
21
|
// Track which timer type was used for each channel
|
|
@@ -119,7 +120,7 @@ const transitionVolume = (channelNumber_1, targetVolume_1, ...args_1) => __await
|
|
|
119
120
|
const startTime = performance.now();
|
|
120
121
|
const easingFn = easingFunctions[easing];
|
|
121
122
|
return new Promise((resolve) => {
|
|
122
|
-
const updateVolume = () => {
|
|
123
|
+
const updateVolume = () => __awaiter(void 0, void 0, void 0, function* () {
|
|
123
124
|
const elapsed = performance.now() - startTime;
|
|
124
125
|
const progress = Math.min(elapsed / duration, 1);
|
|
125
126
|
const easedProgress = easingFn(progress);
|
|
@@ -128,7 +129,7 @@ const transitionVolume = (channelNumber_1, targetVolume_1, ...args_1) => __await
|
|
|
128
129
|
// Apply volume to both channel config and current audio
|
|
129
130
|
channel.volume = clampedVolume;
|
|
130
131
|
if (channel.queue.length > 0) {
|
|
131
|
-
channel.queue[0]
|
|
132
|
+
yield setVolumeForAudio(channel.queue[0], clampedVolume, channelNumber);
|
|
132
133
|
}
|
|
133
134
|
if (progress >= 1) {
|
|
134
135
|
// Transition complete
|
|
@@ -139,24 +140,25 @@ const transitionVolume = (channelNumber_1, targetVolume_1, ...args_1) => __await
|
|
|
139
140
|
else {
|
|
140
141
|
// Use requestAnimationFrame in browser, setTimeout in tests
|
|
141
142
|
if (typeof requestAnimationFrame !== 'undefined') {
|
|
142
|
-
const rafId = requestAnimationFrame(updateVolume);
|
|
143
|
+
const rafId = requestAnimationFrame(() => updateVolume());
|
|
143
144
|
activeTransitions.set(channelNumber, rafId);
|
|
144
145
|
timerTypes.set(channelNumber, types_1.TimerType.RequestAnimationFrame);
|
|
145
146
|
}
|
|
146
147
|
else {
|
|
147
148
|
// In test environment, use shorter intervals
|
|
148
|
-
const timeoutId = setTimeout(updateVolume, 1);
|
|
149
|
+
const timeoutId = setTimeout(() => updateVolume(), 1);
|
|
149
150
|
activeTransitions.set(channelNumber, timeoutId);
|
|
150
151
|
timerTypes.set(channelNumber, types_1.TimerType.Timeout);
|
|
151
152
|
}
|
|
152
153
|
}
|
|
153
|
-
};
|
|
154
|
+
});
|
|
154
155
|
updateVolume();
|
|
155
156
|
});
|
|
156
157
|
});
|
|
157
158
|
exports.transitionVolume = transitionVolume;
|
|
158
159
|
/**
|
|
159
160
|
* Sets the volume for a specific channel with optional smooth transition
|
|
161
|
+
* Automatically uses Web Audio API on iOS devices for enhanced volume control
|
|
160
162
|
* @param channelNumber - The channel number to set volume for
|
|
161
163
|
* @param volume - Volume level (0-1)
|
|
162
164
|
* @param transitionDuration - Optional transition duration in milliseconds
|
|
@@ -192,17 +194,21 @@ const setChannelVolume = (channelNumber, volume, transitionDuration, easing) =>
|
|
|
192
194
|
};
|
|
193
195
|
return;
|
|
194
196
|
}
|
|
197
|
+
const channel = info_1.audioChannels[channelNumber];
|
|
198
|
+
// Initialize Web Audio API if needed and supported
|
|
199
|
+
if ((0, web_audio_1.shouldUseWebAudio)() && !channel.webAudioContext) {
|
|
200
|
+
yield initializeWebAudioForChannel(channelNumber);
|
|
201
|
+
}
|
|
195
202
|
if (transitionDuration && transitionDuration > 0) {
|
|
196
203
|
// Smooth transition
|
|
197
204
|
yield (0, exports.transitionVolume)(channelNumber, clampedVolume, transitionDuration, easing);
|
|
198
205
|
}
|
|
199
206
|
else {
|
|
200
207
|
// Instant change (backward compatibility)
|
|
201
|
-
|
|
202
|
-
const channel = info_1.audioChannels[channelNumber];
|
|
208
|
+
channel.volume = clampedVolume;
|
|
203
209
|
if (channel.queue.length > 0) {
|
|
204
210
|
const currentAudio = channel.queue[0];
|
|
205
|
-
currentAudio
|
|
211
|
+
yield setVolumeForAudio(currentAudio, clampedVolume, channelNumber);
|
|
206
212
|
}
|
|
207
213
|
}
|
|
208
214
|
});
|
|
@@ -333,13 +339,13 @@ const applyVolumeDucking = (activeChannelNumber) => __awaiter(void 0, void 0, vo
|
|
|
333
339
|
// This is the priority channel - set to priority volume
|
|
334
340
|
// Only change audio volume, preserve channel.volume as desired volume
|
|
335
341
|
const currentAudio = channel.queue[0];
|
|
336
|
-
transitionPromises.push(transitionAudioVolume(currentAudio, config.priorityVolume, duration, easing));
|
|
342
|
+
transitionPromises.push(transitionAudioVolume(currentAudio, config.priorityVolume, duration, easing, channelNumber));
|
|
337
343
|
}
|
|
338
344
|
else {
|
|
339
345
|
// This is a background channel - duck it
|
|
340
346
|
// Only change audio volume, preserve channel.volume as desired volume
|
|
341
347
|
const currentAudio = channel.queue[0];
|
|
342
|
-
transitionPromises.push(transitionAudioVolume(currentAudio, config.duckingVolume, duration, easing));
|
|
348
|
+
transitionPromises.push(transitionAudioVolume(currentAudio, config.duckingVolume, duration, easing, channelNumber));
|
|
343
349
|
}
|
|
344
350
|
});
|
|
345
351
|
// Wait for all transitions to complete
|
|
@@ -376,7 +382,7 @@ const restoreVolumeLevels = (stoppedChannelNumber) => __awaiter(void 0, void 0,
|
|
|
376
382
|
const targetVolume = (_c = channel.volume) !== null && _c !== void 0 ? _c : 1.0;
|
|
377
383
|
// Only transition the audio element volume, keep channel.volume as the desired volume
|
|
378
384
|
const currentAudio = channel.queue[0];
|
|
379
|
-
transitionPromises.push(transitionAudioVolume(currentAudio, targetVolume, duration, easing));
|
|
385
|
+
transitionPromises.push(transitionAudioVolume(currentAudio, targetVolume, duration, easing, channelNumber));
|
|
380
386
|
});
|
|
381
387
|
// Wait for all transitions to complete
|
|
382
388
|
yield Promise.all(transitionPromises);
|
|
@@ -385,14 +391,31 @@ exports.restoreVolumeLevels = restoreVolumeLevels;
|
|
|
385
391
|
/**
|
|
386
392
|
* Transitions only the audio element volume without affecting channel.volume
|
|
387
393
|
* This is used for ducking/restoration where channel.volume represents desired volume
|
|
394
|
+
* Uses Web Audio API when available for enhanced volume control
|
|
388
395
|
* @param audio - The audio element to transition
|
|
389
396
|
* @param targetVolume - Target volume level (0-1)
|
|
390
397
|
* @param duration - Transition duration in milliseconds
|
|
391
398
|
* @param easing - Easing function type
|
|
399
|
+
* @param channelNumber - The channel number this audio belongs to (for Web Audio API)
|
|
392
400
|
* @returns Promise that resolves when transition completes
|
|
393
401
|
* @internal
|
|
394
402
|
*/
|
|
395
|
-
const transitionAudioVolume = (audio_1, targetVolume_1, ...args_1) => __awaiter(void 0, [audio_1, targetVolume_1, ...args_1], void 0, function* (audio, targetVolume, duration = 250, easing = types_1.EasingType.EaseOut) {
|
|
403
|
+
const transitionAudioVolume = (audio_1, targetVolume_1, ...args_1) => __awaiter(void 0, [audio_1, targetVolume_1, ...args_1], void 0, function* (audio, targetVolume, duration = 250, easing = types_1.EasingType.EaseOut, channelNumber) {
|
|
404
|
+
// Try to use Web Audio API if available and channel number is provided
|
|
405
|
+
if (channelNumber !== undefined) {
|
|
406
|
+
const channel = info_1.audioChannels[channelNumber];
|
|
407
|
+
if ((channel === null || channel === void 0 ? void 0 : channel.webAudioContext) && channel.webAudioNodes) {
|
|
408
|
+
const nodes = channel.webAudioNodes.get(audio);
|
|
409
|
+
if (nodes) {
|
|
410
|
+
// Use Web Audio API for smooth transitions
|
|
411
|
+
(0, web_audio_1.setWebAudioVolume)(nodes.gainNode, targetVolume, duration);
|
|
412
|
+
// Also update the audio element's volume property for consistency
|
|
413
|
+
audio.volume = targetVolume;
|
|
414
|
+
return;
|
|
415
|
+
}
|
|
416
|
+
}
|
|
417
|
+
}
|
|
418
|
+
// Fallback to standard HTMLAudioElement volume control with manual transition
|
|
396
419
|
const startVolume = audio.volume;
|
|
397
420
|
const volumeDelta = targetVolume - startVolume;
|
|
398
421
|
// If no change needed, resolve immediately
|
|
@@ -401,7 +424,7 @@ const transitionAudioVolume = (audio_1, targetVolume_1, ...args_1) => __awaiter(
|
|
|
401
424
|
}
|
|
402
425
|
// Handle zero or negative duration - instant change
|
|
403
426
|
if (duration <= 0) {
|
|
404
|
-
audio.volume =
|
|
427
|
+
audio.volume = targetVolume;
|
|
405
428
|
return Promise.resolve();
|
|
406
429
|
}
|
|
407
430
|
const startTime = performance.now();
|
|
@@ -424,7 +447,8 @@ const transitionAudioVolume = (audio_1, targetVolume_1, ...args_1) => __awaiter(
|
|
|
424
447
|
requestAnimationFrame(updateVolume);
|
|
425
448
|
}
|
|
426
449
|
else {
|
|
427
|
-
|
|
450
|
+
// In test environment, use longer intervals to prevent stack overflow
|
|
451
|
+
setTimeout(updateVolume, 16);
|
|
428
452
|
}
|
|
429
453
|
}
|
|
430
454
|
};
|
|
@@ -467,3 +491,94 @@ const cancelAllVolumeTransitions = () => {
|
|
|
467
491
|
});
|
|
468
492
|
};
|
|
469
493
|
exports.cancelAllVolumeTransitions = cancelAllVolumeTransitions;
|
|
494
|
+
/**
|
|
495
|
+
* Initializes Web Audio API for a specific channel
|
|
496
|
+
* @param channelNumber - The channel number to initialize Web Audio for
|
|
497
|
+
* @internal
|
|
498
|
+
*/
|
|
499
|
+
const initializeWebAudioForChannel = (channelNumber) => __awaiter(void 0, void 0, void 0, function* () {
|
|
500
|
+
const channel = info_1.audioChannels[channelNumber];
|
|
501
|
+
if (!channel || channel.webAudioContext)
|
|
502
|
+
return;
|
|
503
|
+
const audioContext = (0, web_audio_1.getAudioContext)();
|
|
504
|
+
if (!audioContext) {
|
|
505
|
+
throw new Error('AudioContext creation failed');
|
|
506
|
+
}
|
|
507
|
+
// Resume audio context if needed (for autoplay policy)
|
|
508
|
+
yield (0, web_audio_1.resumeAudioContext)(audioContext);
|
|
509
|
+
channel.webAudioContext = audioContext;
|
|
510
|
+
channel.webAudioNodes = new Map();
|
|
511
|
+
// Initialize Web Audio nodes for existing audio elements
|
|
512
|
+
for (const audio of channel.queue) {
|
|
513
|
+
const nodes = (0, web_audio_1.createWebAudioNodes)(audio, audioContext);
|
|
514
|
+
if (!nodes) {
|
|
515
|
+
throw new Error('Node creation failed');
|
|
516
|
+
}
|
|
517
|
+
channel.webAudioNodes.set(audio, nodes);
|
|
518
|
+
// Set initial volume to match channel volume
|
|
519
|
+
nodes.gainNode.gain.value = channel.volume;
|
|
520
|
+
}
|
|
521
|
+
});
|
|
522
|
+
/**
|
|
523
|
+
* Sets volume for an audio element using the appropriate method (Web Audio API or standard)
|
|
524
|
+
* @param audio - The audio element to set volume for
|
|
525
|
+
* @param volume - Volume level (0-1)
|
|
526
|
+
* @param channelNumber - The channel number this audio belongs to
|
|
527
|
+
* @param transitionDuration - Optional transition duration in milliseconds
|
|
528
|
+
* @internal
|
|
529
|
+
*/
|
|
530
|
+
const setVolumeForAudio = (audio, volume, channelNumber, transitionDuration) => __awaiter(void 0, void 0, void 0, function* () {
|
|
531
|
+
const channel = info_1.audioChannels[channelNumber];
|
|
532
|
+
// Use Web Audio API if available and initialized
|
|
533
|
+
if ((channel === null || channel === void 0 ? void 0 : channel.webAudioContext) && channel.webAudioNodes) {
|
|
534
|
+
const nodes = channel.webAudioNodes.get(audio);
|
|
535
|
+
if (nodes) {
|
|
536
|
+
(0, web_audio_1.setWebAudioVolume)(nodes.gainNode, volume, transitionDuration);
|
|
537
|
+
return;
|
|
538
|
+
}
|
|
539
|
+
}
|
|
540
|
+
// Fallback to standard HTMLAudioElement volume control
|
|
541
|
+
audio.volume = volume;
|
|
542
|
+
});
|
|
543
|
+
/**
|
|
544
|
+
* Initializes Web Audio API nodes for a new audio element
|
|
545
|
+
* @param audio - The audio element to initialize nodes for
|
|
546
|
+
* @param channelNumber - The channel number this audio belongs to
|
|
547
|
+
* @internal
|
|
548
|
+
*/
|
|
549
|
+
const initializeWebAudioForAudio = (audio, channelNumber) => __awaiter(void 0, void 0, void 0, function* () {
|
|
550
|
+
const channel = info_1.audioChannels[channelNumber];
|
|
551
|
+
if (!channel)
|
|
552
|
+
return;
|
|
553
|
+
// Initialize Web Audio API for the channel if needed
|
|
554
|
+
if ((0, web_audio_1.shouldUseWebAudio)() && !channel.webAudioContext) {
|
|
555
|
+
yield initializeWebAudioForChannel(channelNumber);
|
|
556
|
+
}
|
|
557
|
+
// Create nodes for this specific audio element
|
|
558
|
+
if (channel.webAudioContext && channel.webAudioNodes && !channel.webAudioNodes.has(audio)) {
|
|
559
|
+
const nodes = (0, web_audio_1.createWebAudioNodes)(audio, channel.webAudioContext);
|
|
560
|
+
if (nodes) {
|
|
561
|
+
channel.webAudioNodes.set(audio, nodes);
|
|
562
|
+
// Set initial volume to match channel volume
|
|
563
|
+
nodes.gainNode.gain.value = channel.volume;
|
|
564
|
+
}
|
|
565
|
+
}
|
|
566
|
+
});
|
|
567
|
+
exports.initializeWebAudioForAudio = initializeWebAudioForAudio;
|
|
568
|
+
/**
|
|
569
|
+
* Cleans up Web Audio API nodes for an audio element
|
|
570
|
+
* @param audio - The audio element to clean up nodes for
|
|
571
|
+
* @param channelNumber - The channel number this audio belongs to
|
|
572
|
+
* @internal
|
|
573
|
+
*/
|
|
574
|
+
const cleanupWebAudioForAudio = (audio, channelNumber) => {
|
|
575
|
+
const channel = info_1.audioChannels[channelNumber];
|
|
576
|
+
if (!(channel === null || channel === void 0 ? void 0 : channel.webAudioNodes))
|
|
577
|
+
return;
|
|
578
|
+
const nodes = channel.webAudioNodes.get(audio);
|
|
579
|
+
if (nodes) {
|
|
580
|
+
(0, web_audio_1.cleanupWebAudioNodes)(nodes);
|
|
581
|
+
channel.webAudioNodes.delete(audio);
|
|
582
|
+
}
|
|
583
|
+
};
|
|
584
|
+
exports.cleanupWebAudioForAudio = cleanupWebAudioForAudio;
|
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Web Audio API support for enhanced volume control on iOS and other platforms
|
|
3
|
+
*/
|
|
4
|
+
import { WebAudioConfig, WebAudioSupport, WebAudioNodeSet } from './types';
|
|
5
|
+
/**
|
|
6
|
+
* Detects if the current device is iOS
|
|
7
|
+
* @returns True if the device is iOS, false otherwise
|
|
8
|
+
* @example
|
|
9
|
+
* ```typescript
|
|
10
|
+
* if (isIOSDevice()) {
|
|
11
|
+
* console.log('Running on iOS device');
|
|
12
|
+
* }
|
|
13
|
+
* ```
|
|
14
|
+
*/
|
|
15
|
+
export declare const isIOSDevice: () => boolean;
|
|
16
|
+
/**
|
|
17
|
+
* Checks if Web Audio API is available in the current environment
|
|
18
|
+
* @returns True if Web Audio API is supported, false otherwise
|
|
19
|
+
* @example
|
|
20
|
+
* ```typescript
|
|
21
|
+
* if (isWebAudioSupported()) {
|
|
22
|
+
* console.log('Web Audio API is available');
|
|
23
|
+
* }
|
|
24
|
+
* ```
|
|
25
|
+
*/
|
|
26
|
+
export declare const isWebAudioSupported: () => boolean;
|
|
27
|
+
/**
|
|
28
|
+
* Determines if Web Audio API should be used based on configuration and device detection
|
|
29
|
+
* @returns True if Web Audio API should be used, false otherwise
|
|
30
|
+
* @example
|
|
31
|
+
* ```typescript
|
|
32
|
+
* if (shouldUseWebAudio()) {
|
|
33
|
+
* // Use Web Audio API for volume control
|
|
34
|
+
* }
|
|
35
|
+
* ```
|
|
36
|
+
*/
|
|
37
|
+
export declare const shouldUseWebAudio: () => boolean;
|
|
38
|
+
/**
|
|
39
|
+
* Gets information about Web Audio API support and usage
|
|
40
|
+
* @returns Object containing Web Audio API support information
|
|
41
|
+
* @example
|
|
42
|
+
* ```typescript
|
|
43
|
+
* const support = getWebAudioSupport();
|
|
44
|
+
* console.log(`Using Web Audio: ${support.usingWebAudio}`);
|
|
45
|
+
* console.log(`Reason: ${support.reason}`);
|
|
46
|
+
* ```
|
|
47
|
+
*/
|
|
48
|
+
export declare const getWebAudioSupport: () => WebAudioSupport;
|
|
49
|
+
/**
|
|
50
|
+
* Configures Web Audio API usage
|
|
51
|
+
* @param config - Configuration options for Web Audio API
|
|
52
|
+
* @example
|
|
53
|
+
* ```typescript
|
|
54
|
+
* // Force Web Audio API usage on all devices
|
|
55
|
+
* setWebAudioConfig({ forceWebAudio: true });
|
|
56
|
+
*
|
|
57
|
+
* // Disable Web Audio API entirely
|
|
58
|
+
* setWebAudioConfig({ enabled: false });
|
|
59
|
+
* ```
|
|
60
|
+
*/
|
|
61
|
+
export declare const setWebAudioConfig: (config: Partial<WebAudioConfig>) => void;
|
|
62
|
+
/**
|
|
63
|
+
* Gets the current Web Audio API configuration
|
|
64
|
+
* @returns Current Web Audio API configuration
|
|
65
|
+
* @example
|
|
66
|
+
* ```typescript
|
|
67
|
+
* const config = getWebAudioConfig();
|
|
68
|
+
* console.log(`Web Audio enabled: ${config.enabled}`);
|
|
69
|
+
* ```
|
|
70
|
+
*/
|
|
71
|
+
export declare const getWebAudioConfig: () => WebAudioConfig;
|
|
72
|
+
/**
|
|
73
|
+
* Creates or gets an AudioContext for Web Audio API operations
|
|
74
|
+
* @returns AudioContext instance or null if not supported
|
|
75
|
+
* @example
|
|
76
|
+
* ```typescript
|
|
77
|
+
* const context = getAudioContext();
|
|
78
|
+
* if (context) {
|
|
79
|
+
* console.log('Audio context created successfully');
|
|
80
|
+
* }
|
|
81
|
+
* ```
|
|
82
|
+
*/
|
|
83
|
+
export declare const getAudioContext: () => AudioContext | null;
|
|
84
|
+
/**
|
|
85
|
+
* Creates Web Audio API nodes for an audio element
|
|
86
|
+
* @param audioElement - The HTML audio element to create nodes for
|
|
87
|
+
* @param audioContext - The AudioContext to use
|
|
88
|
+
* @returns Web Audio API node set or null if creation fails
|
|
89
|
+
* @example
|
|
90
|
+
* ```typescript
|
|
91
|
+
* const audio = new Audio('song.mp3');
|
|
92
|
+
* const context = getAudioContext();
|
|
93
|
+
* if (context) {
|
|
94
|
+
* const nodes = createWebAudioNodes(audio, context);
|
|
95
|
+
* if (nodes) {
|
|
96
|
+
* nodes.gainNode.gain.value = 0.5; // Set volume to 50%
|
|
97
|
+
* }
|
|
98
|
+
* }
|
|
99
|
+
* ```
|
|
100
|
+
*/
|
|
101
|
+
export declare const createWebAudioNodes: (audioElement: HTMLAudioElement, audioContext: AudioContext) => WebAudioNodeSet | null;
|
|
102
|
+
/**
|
|
103
|
+
* Sets volume using Web Audio API gain node
|
|
104
|
+
* @param gainNode - The gain node to set volume on
|
|
105
|
+
* @param volume - Volume level (0-1)
|
|
106
|
+
* @param transitionDuration - Optional transition duration in milliseconds
|
|
107
|
+
* @example
|
|
108
|
+
* ```typescript
|
|
109
|
+
* const nodes = createWebAudioNodes(audio, context);
|
|
110
|
+
* if (nodes) {
|
|
111
|
+
* setWebAudioVolume(nodes.gainNode, 0.5); // Set to 50% volume
|
|
112
|
+
* setWebAudioVolume(nodes.gainNode, 0.2, 300); // Fade to 20% over 300ms
|
|
113
|
+
* }
|
|
114
|
+
* ```
|
|
115
|
+
*/
|
|
116
|
+
export declare const setWebAudioVolume: (gainNode: GainNode, volume: number, transitionDuration?: number) => void;
|
|
117
|
+
/**
|
|
118
|
+
* Gets the current volume from a Web Audio API gain node
|
|
119
|
+
* @param gainNode - The gain node to get volume from
|
|
120
|
+
* @returns Current volume level (0-1)
|
|
121
|
+
* @example
|
|
122
|
+
* ```typescript
|
|
123
|
+
* const nodes = createWebAudioNodes(audio, context);
|
|
124
|
+
* if (nodes) {
|
|
125
|
+
* const volume = getWebAudioVolume(nodes.gainNode);
|
|
126
|
+
* console.log(`Current volume: ${volume * 100}%`);
|
|
127
|
+
* }
|
|
128
|
+
* ```
|
|
129
|
+
*/
|
|
130
|
+
export declare const getWebAudioVolume: (gainNode: GainNode) => number;
|
|
131
|
+
/**
|
|
132
|
+
* Resumes an AudioContext if it's in suspended state (required for autoplay policy)
|
|
133
|
+
* @param audioContext - The AudioContext to resume
|
|
134
|
+
* @returns Promise that resolves when context is resumed
|
|
135
|
+
* @example
|
|
136
|
+
* ```typescript
|
|
137
|
+
* const context = getAudioContext();
|
|
138
|
+
* if (context) {
|
|
139
|
+
* await resumeAudioContext(context);
|
|
140
|
+
* }
|
|
141
|
+
* ```
|
|
142
|
+
*/
|
|
143
|
+
export declare const resumeAudioContext: (audioContext: AudioContext) => Promise<void>;
|
|
144
|
+
/**
|
|
145
|
+
* Cleans up Web Audio API nodes and connections
|
|
146
|
+
* @param nodes - The Web Audio API node set to clean up
|
|
147
|
+
* @example
|
|
148
|
+
* ```typescript
|
|
149
|
+
* const nodes = createWebAudioNodes(audio, context);
|
|
150
|
+
* if (nodes) {
|
|
151
|
+
* // Use nodes...
|
|
152
|
+
* cleanupWebAudioNodes(nodes); // Clean up when done
|
|
153
|
+
* }
|
|
154
|
+
* ```
|
|
155
|
+
*/
|
|
156
|
+
export declare const cleanupWebAudioNodes: (nodes: WebAudioNodeSet) => void;
|