audio-channel-queue 1.12.0 → 1.13.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +68 -15
- package/dist/core.d.ts +1 -1
- package/dist/core.js +8 -4
- package/dist/errors.d.ts +1 -1
- package/dist/errors.js +2 -42
- package/dist/events.d.ts +1 -1
- package/dist/events.js +1 -1
- package/dist/index.d.ts +4 -3
- package/dist/index.js +19 -3
- package/dist/info.d.ts +1 -1
- package/dist/info.js +4 -2
- package/dist/pause.d.ts +1 -1
- package/dist/pause.js +35 -9
- package/dist/queue-manipulation.d.ts +1 -1
- package/dist/queue-manipulation.js +1 -1
- package/dist/types.d.ts +38 -1
- package/dist/types.js +1 -1
- package/dist/utils.d.ts +1 -1
- package/dist/utils.js +1 -1
- package/dist/volume.d.ts +41 -1
- package/dist/volume.js +194 -19
- package/dist/web-audio.d.ts +156 -0
- package/dist/web-audio.js +327 -0
- package/package.json +9 -5
- package/src/core.ts +17 -5
- package/src/errors.ts +3 -51
- package/src/events.ts +1 -1
- package/src/index.ts +23 -2
- package/src/info.ts +4 -2
- package/src/pause.ts +41 -11
- package/src/queue-manipulation.ts +1 -1
- package/src/types.ts +41 -1
- package/src/utils.ts +1 -1
- package/src/volume.ts +230 -18
- package/src/web-audio.ts +331 -0
|
@@ -0,0 +1,327 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* @fileoverview Web Audio API support for enhanced volume control on iOS and other platforms
|
|
4
|
+
*/
|
|
5
|
+
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
6
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
7
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
8
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
9
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
10
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
11
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
12
|
+
});
|
|
13
|
+
};
|
|
14
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
15
|
+
exports.cleanupWebAudioNodes = exports.resumeAudioContext = exports.getWebAudioVolume = exports.setWebAudioVolume = exports.createWebAudioNodes = exports.getAudioContext = exports.getWebAudioConfig = exports.setWebAudioConfig = exports.getWebAudioSupport = exports.shouldUseWebAudio = exports.isWebAudioSupported = exports.isIOSDevice = void 0;
|
|
16
|
+
/**
|
|
17
|
+
* Global Web Audio API configuration
|
|
18
|
+
*/
|
|
19
|
+
let webAudioConfig = {
|
|
20
|
+
autoDetectIOS: true,
|
|
21
|
+
enabled: true,
|
|
22
|
+
forceWebAudio: false
|
|
23
|
+
};
|
|
24
|
+
/**
|
|
25
|
+
* Detects if the current device is iOS
|
|
26
|
+
* @returns True if the device is iOS, false otherwise
|
|
27
|
+
* @example
|
|
28
|
+
* ```typescript
|
|
29
|
+
* if (isIOSDevice()) {
|
|
30
|
+
* console.log('Running on iOS device');
|
|
31
|
+
* }
|
|
32
|
+
* ```
|
|
33
|
+
*/
|
|
34
|
+
const isIOSDevice = () => {
|
|
35
|
+
if (typeof navigator === 'undefined')
|
|
36
|
+
return false;
|
|
37
|
+
// Modern approach using User-Agent Client Hints API
|
|
38
|
+
const navWithUA = navigator;
|
|
39
|
+
if ('userAgentData' in navigator && navWithUA.userAgentData) {
|
|
40
|
+
return navWithUA.userAgentData.platform === 'iOS';
|
|
41
|
+
}
|
|
42
|
+
// Fallback to userAgent string parsing
|
|
43
|
+
const userAgent = navigator.userAgent || '';
|
|
44
|
+
const isIOS = /iPad|iPhone|iPod/.test(userAgent);
|
|
45
|
+
// Additional check for modern iPads that report as Mac
|
|
46
|
+
const isMacWithTouch = /Macintosh/.test(userAgent) && 'maxTouchPoints' in navigator && navigator.maxTouchPoints > 1;
|
|
47
|
+
return isIOS || isMacWithTouch;
|
|
48
|
+
};
|
|
49
|
+
exports.isIOSDevice = isIOSDevice;
|
|
50
|
+
/**
|
|
51
|
+
* Checks if Web Audio API is available in the current environment
|
|
52
|
+
* @returns True if Web Audio API is supported, false otherwise
|
|
53
|
+
* @example
|
|
54
|
+
* ```typescript
|
|
55
|
+
* if (isWebAudioSupported()) {
|
|
56
|
+
* console.log('Web Audio API is available');
|
|
57
|
+
* }
|
|
58
|
+
* ```
|
|
59
|
+
*/
|
|
60
|
+
const isWebAudioSupported = () => {
|
|
61
|
+
if (typeof window === 'undefined') {
|
|
62
|
+
// In Node.js environment (tests), check if Web Audio API globals are available
|
|
63
|
+
const globalThis = global;
|
|
64
|
+
return (typeof globalThis.AudioContext !== 'undefined' ||
|
|
65
|
+
typeof globalThis.webkitAudioContext !== 'undefined');
|
|
66
|
+
}
|
|
67
|
+
const windowWithWebkit = window;
|
|
68
|
+
return (typeof AudioContext !== 'undefined' ||
|
|
69
|
+
typeof windowWithWebkit.webkitAudioContext !== 'undefined');
|
|
70
|
+
};
|
|
71
|
+
exports.isWebAudioSupported = isWebAudioSupported;
|
|
72
|
+
/**
|
|
73
|
+
* Determines if Web Audio API should be used based on configuration and device detection
|
|
74
|
+
* @returns True if Web Audio API should be used, false otherwise
|
|
75
|
+
* @example
|
|
76
|
+
* ```typescript
|
|
77
|
+
* if (shouldUseWebAudio()) {
|
|
78
|
+
* // Use Web Audio API for volume control
|
|
79
|
+
* }
|
|
80
|
+
* ```
|
|
81
|
+
*/
|
|
82
|
+
const shouldUseWebAudio = () => {
|
|
83
|
+
if (!webAudioConfig.enabled)
|
|
84
|
+
return false;
|
|
85
|
+
if (!(0, exports.isWebAudioSupported)())
|
|
86
|
+
return false;
|
|
87
|
+
if (webAudioConfig.forceWebAudio)
|
|
88
|
+
return true;
|
|
89
|
+
if (webAudioConfig.autoDetectIOS && (0, exports.isIOSDevice)())
|
|
90
|
+
return true;
|
|
91
|
+
return false;
|
|
92
|
+
};
|
|
93
|
+
exports.shouldUseWebAudio = shouldUseWebAudio;
|
|
94
|
+
/**
|
|
95
|
+
* Gets information about Web Audio API support and usage
|
|
96
|
+
* @returns Object containing Web Audio API support information
|
|
97
|
+
* @example
|
|
98
|
+
* ```typescript
|
|
99
|
+
* const support = getWebAudioSupport();
|
|
100
|
+
* console.log(`Using Web Audio: ${support.usingWebAudio}`);
|
|
101
|
+
* console.log(`Reason: ${support.reason}`);
|
|
102
|
+
* ```
|
|
103
|
+
*/
|
|
104
|
+
const getWebAudioSupport = () => {
|
|
105
|
+
const available = (0, exports.isWebAudioSupported)();
|
|
106
|
+
const isIOS = (0, exports.isIOSDevice)();
|
|
107
|
+
const usingWebAudio = (0, exports.shouldUseWebAudio)();
|
|
108
|
+
let reason = '';
|
|
109
|
+
if (!webAudioConfig.enabled) {
|
|
110
|
+
reason = 'Web Audio API disabled in configuration';
|
|
111
|
+
}
|
|
112
|
+
else if (!available) {
|
|
113
|
+
reason = 'Web Audio API not supported in this environment';
|
|
114
|
+
}
|
|
115
|
+
else if (webAudioConfig.forceWebAudio) {
|
|
116
|
+
reason = 'Web Audio API forced via configuration';
|
|
117
|
+
}
|
|
118
|
+
else if (isIOS && webAudioConfig.autoDetectIOS) {
|
|
119
|
+
reason = 'iOS device detected - using Web Audio API for volume control';
|
|
120
|
+
}
|
|
121
|
+
else {
|
|
122
|
+
reason = 'Using standard HTMLAudioElement volume control';
|
|
123
|
+
}
|
|
124
|
+
return {
|
|
125
|
+
available,
|
|
126
|
+
isIOS,
|
|
127
|
+
reason,
|
|
128
|
+
usingWebAudio
|
|
129
|
+
};
|
|
130
|
+
};
|
|
131
|
+
exports.getWebAudioSupport = getWebAudioSupport;
|
|
132
|
+
/**
|
|
133
|
+
* Configures Web Audio API usage
|
|
134
|
+
* @param config - Configuration options for Web Audio API
|
|
135
|
+
* @example
|
|
136
|
+
* ```typescript
|
|
137
|
+
* // Force Web Audio API usage on all devices
|
|
138
|
+
* setWebAudioConfig({ forceWebAudio: true });
|
|
139
|
+
*
|
|
140
|
+
* // Disable Web Audio API entirely
|
|
141
|
+
* setWebAudioConfig({ enabled: false });
|
|
142
|
+
* ```
|
|
143
|
+
*/
|
|
144
|
+
const setWebAudioConfig = (config) => {
|
|
145
|
+
webAudioConfig = Object.assign(Object.assign({}, webAudioConfig), config);
|
|
146
|
+
};
|
|
147
|
+
exports.setWebAudioConfig = setWebAudioConfig;
|
|
148
|
+
/**
|
|
149
|
+
* Gets the current Web Audio API configuration
|
|
150
|
+
* @returns Current Web Audio API configuration
|
|
151
|
+
* @example
|
|
152
|
+
* ```typescript
|
|
153
|
+
* const config = getWebAudioConfig();
|
|
154
|
+
* console.log(`Web Audio enabled: ${config.enabled}`);
|
|
155
|
+
* ```
|
|
156
|
+
*/
|
|
157
|
+
const getWebAudioConfig = () => {
|
|
158
|
+
return Object.assign({}, webAudioConfig);
|
|
159
|
+
};
|
|
160
|
+
exports.getWebAudioConfig = getWebAudioConfig;
|
|
161
|
+
/**
|
|
162
|
+
* Creates or gets an AudioContext for Web Audio API operations
|
|
163
|
+
* @returns AudioContext instance or null if not supported
|
|
164
|
+
* @example
|
|
165
|
+
* ```typescript
|
|
166
|
+
* const context = getAudioContext();
|
|
167
|
+
* if (context) {
|
|
168
|
+
* console.log('Audio context created successfully');
|
|
169
|
+
* }
|
|
170
|
+
* ```
|
|
171
|
+
*/
|
|
172
|
+
const getAudioContext = () => {
|
|
173
|
+
if (!(0, exports.isWebAudioSupported)())
|
|
174
|
+
return null;
|
|
175
|
+
try {
|
|
176
|
+
// In Node.js environment (tests), return null to allow mocking
|
|
177
|
+
if (typeof window === 'undefined') {
|
|
178
|
+
return null;
|
|
179
|
+
}
|
|
180
|
+
// Use existing AudioContext or create new one
|
|
181
|
+
const windowWithWebkit = window;
|
|
182
|
+
const AudioContextClass = window.AudioContext || windowWithWebkit.webkitAudioContext;
|
|
183
|
+
return new AudioContextClass();
|
|
184
|
+
}
|
|
185
|
+
catch (error) {
|
|
186
|
+
// eslint-disable-next-line no-console
|
|
187
|
+
console.error('Failed to create AudioContext:', error);
|
|
188
|
+
return null;
|
|
189
|
+
}
|
|
190
|
+
};
|
|
191
|
+
exports.getAudioContext = getAudioContext;
|
|
192
|
+
/**
|
|
193
|
+
* Creates Web Audio API nodes for an audio element
|
|
194
|
+
* @param audioElement - The HTML audio element to create nodes for
|
|
195
|
+
* @param audioContext - The AudioContext to use
|
|
196
|
+
* @returns Web Audio API node set or null if creation fails
|
|
197
|
+
* @example
|
|
198
|
+
* ```typescript
|
|
199
|
+
* const audio = new Audio('song.mp3');
|
|
200
|
+
* const context = getAudioContext();
|
|
201
|
+
* if (context) {
|
|
202
|
+
* const nodes = createWebAudioNodes(audio, context);
|
|
203
|
+
* if (nodes) {
|
|
204
|
+
* nodes.gainNode.gain.value = 0.5; // Set volume to 50%
|
|
205
|
+
* }
|
|
206
|
+
* }
|
|
207
|
+
* ```
|
|
208
|
+
*/
|
|
209
|
+
const createWebAudioNodes = (audioElement, audioContext) => {
|
|
210
|
+
try {
|
|
211
|
+
// Create media element source node
|
|
212
|
+
const sourceNode = audioContext.createMediaElementSource(audioElement);
|
|
213
|
+
// Create gain node for volume control
|
|
214
|
+
const gainNode = audioContext.createGain();
|
|
215
|
+
// Connect source to gain node
|
|
216
|
+
sourceNode.connect(gainNode);
|
|
217
|
+
// Connect gain node to destination (speakers)
|
|
218
|
+
gainNode.connect(audioContext.destination);
|
|
219
|
+
return {
|
|
220
|
+
gainNode,
|
|
221
|
+
sourceNode
|
|
222
|
+
};
|
|
223
|
+
}
|
|
224
|
+
catch (error) {
|
|
225
|
+
// eslint-disable-next-line no-console
|
|
226
|
+
console.error('Failed to create Web Audio nodes:', error);
|
|
227
|
+
return null;
|
|
228
|
+
}
|
|
229
|
+
};
|
|
230
|
+
exports.createWebAudioNodes = createWebAudioNodes;
|
|
231
|
+
/**
|
|
232
|
+
* Sets volume using Web Audio API gain node
|
|
233
|
+
* @param gainNode - The gain node to set volume on
|
|
234
|
+
* @param volume - Volume level (0-1)
|
|
235
|
+
* @param transitionDuration - Optional transition duration in milliseconds
|
|
236
|
+
* @example
|
|
237
|
+
* ```typescript
|
|
238
|
+
* const nodes = createWebAudioNodes(audio, context);
|
|
239
|
+
* if (nodes) {
|
|
240
|
+
* setWebAudioVolume(nodes.gainNode, 0.5); // Set to 50% volume
|
|
241
|
+
* setWebAudioVolume(nodes.gainNode, 0.2, 300); // Fade to 20% over 300ms
|
|
242
|
+
* }
|
|
243
|
+
* ```
|
|
244
|
+
*/
|
|
245
|
+
const setWebAudioVolume = (gainNode, volume, transitionDuration) => {
|
|
246
|
+
const clampedVolume = Math.max(0, Math.min(1, volume));
|
|
247
|
+
const currentTime = gainNode.context.currentTime;
|
|
248
|
+
if (transitionDuration && transitionDuration > 0) {
|
|
249
|
+
// Smooth transition using Web Audio API's built-in scheduling
|
|
250
|
+
gainNode.gain.cancelScheduledValues(currentTime);
|
|
251
|
+
gainNode.gain.setValueAtTime(gainNode.gain.value, currentTime);
|
|
252
|
+
gainNode.gain.linearRampToValueAtTime(clampedVolume, currentTime + transitionDuration / 1000);
|
|
253
|
+
}
|
|
254
|
+
else {
|
|
255
|
+
// Instant change
|
|
256
|
+
gainNode.gain.cancelScheduledValues(currentTime);
|
|
257
|
+
gainNode.gain.value = clampedVolume;
|
|
258
|
+
}
|
|
259
|
+
};
|
|
260
|
+
exports.setWebAudioVolume = setWebAudioVolume;
|
|
261
|
+
/**
|
|
262
|
+
* Gets the current volume from a Web Audio API gain node
|
|
263
|
+
* @param gainNode - The gain node to get volume from
|
|
264
|
+
* @returns Current volume level (0-1)
|
|
265
|
+
* @example
|
|
266
|
+
* ```typescript
|
|
267
|
+
* const nodes = createWebAudioNodes(audio, context);
|
|
268
|
+
* if (nodes) {
|
|
269
|
+
* const volume = getWebAudioVolume(nodes.gainNode);
|
|
270
|
+
* console.log(`Current volume: ${volume * 100}%`);
|
|
271
|
+
* }
|
|
272
|
+
* ```
|
|
273
|
+
*/
|
|
274
|
+
const getWebAudioVolume = (gainNode) => {
|
|
275
|
+
return gainNode.gain.value;
|
|
276
|
+
};
|
|
277
|
+
exports.getWebAudioVolume = getWebAudioVolume;
|
|
278
|
+
/**
|
|
279
|
+
* Resumes an AudioContext if it's in suspended state (required for autoplay policy)
|
|
280
|
+
* @param audioContext - The AudioContext to resume
|
|
281
|
+
* @returns Promise that resolves when context is resumed
|
|
282
|
+
* @example
|
|
283
|
+
* ```typescript
|
|
284
|
+
* const context = getAudioContext();
|
|
285
|
+
* if (context) {
|
|
286
|
+
* await resumeAudioContext(context);
|
|
287
|
+
* }
|
|
288
|
+
* ```
|
|
289
|
+
*/
|
|
290
|
+
const resumeAudioContext = (audioContext) => __awaiter(void 0, void 0, void 0, function* () {
|
|
291
|
+
if (audioContext.state === 'suspended') {
|
|
292
|
+
try {
|
|
293
|
+
yield audioContext.resume();
|
|
294
|
+
}
|
|
295
|
+
catch (error) {
|
|
296
|
+
// eslint-disable-next-line no-console
|
|
297
|
+
console.error('Failed to resume AudioContext:', error);
|
|
298
|
+
// Don't throw - handle gracefully and continue
|
|
299
|
+
}
|
|
300
|
+
}
|
|
301
|
+
});
|
|
302
|
+
exports.resumeAudioContext = resumeAudioContext;
|
|
303
|
+
/**
|
|
304
|
+
* Cleans up Web Audio API nodes and connections
|
|
305
|
+
* @param nodes - The Web Audio API node set to clean up
|
|
306
|
+
* @example
|
|
307
|
+
* ```typescript
|
|
308
|
+
* const nodes = createWebAudioNodes(audio, context);
|
|
309
|
+
* if (nodes) {
|
|
310
|
+
* // Use nodes...
|
|
311
|
+
* cleanupWebAudioNodes(nodes); // Clean up when done
|
|
312
|
+
* }
|
|
313
|
+
* ```
|
|
314
|
+
*/
|
|
315
|
+
const cleanupWebAudioNodes = (nodes) => {
|
|
316
|
+
try {
|
|
317
|
+
// Disconnect all nodes
|
|
318
|
+
nodes.sourceNode.disconnect();
|
|
319
|
+
nodes.gainNode.disconnect();
|
|
320
|
+
}
|
|
321
|
+
catch (error) {
|
|
322
|
+
// Ignore errors during cleanup
|
|
323
|
+
// eslint-disable-next-line no-console
|
|
324
|
+
console.error('Error during Web Audio cleanup:', error);
|
|
325
|
+
}
|
|
326
|
+
};
|
|
327
|
+
exports.cleanupWebAudioNodes = cleanupWebAudioNodes;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "audio-channel-queue",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.13.2",
|
|
4
4
|
"description": "Allows you to queue audio files to different playback channels.",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"types": "dist/index.d.ts",
|
|
@@ -10,7 +10,7 @@
|
|
|
10
10
|
],
|
|
11
11
|
"scripts": {
|
|
12
12
|
"build": "tsc",
|
|
13
|
-
"prepare": "
|
|
13
|
+
"prepare": "husky",
|
|
14
14
|
"test": "jest",
|
|
15
15
|
"test:watch": "jest --watch",
|
|
16
16
|
"test:coverage": "jest --coverage",
|
|
@@ -22,15 +22,17 @@
|
|
|
22
22
|
"prebuild": "npm run clean",
|
|
23
23
|
"prepack": "npm run validate && npm run build",
|
|
24
24
|
"validate": "npm run format:check && npm run lint && npm run test",
|
|
25
|
+
"login:token": "npm login --auth-type=token",
|
|
25
26
|
"publish:patch": "npm version patch && npm run publish:release",
|
|
26
27
|
"publish:minor": "npm version minor && npm run publish:release",
|
|
27
28
|
"publish:major": "npm version major && npm run publish:release",
|
|
28
29
|
"publish:release": "npm run validate && npm run build && npm publish",
|
|
30
|
+
"publish:beta": "npm version prerelease --preid=beta && npm publish --tag beta",
|
|
29
31
|
"publish:dry": "npm run validate && npm run build && npm publish --dry-run"
|
|
30
32
|
},
|
|
31
33
|
"repository": {
|
|
32
34
|
"type": "git",
|
|
33
|
-
"url": "git+https://github.com/tonycarpenter21/
|
|
35
|
+
"url": "git+https://github.com/tonycarpenter21/audioq.git"
|
|
34
36
|
},
|
|
35
37
|
"keywords": [
|
|
36
38
|
"audio",
|
|
@@ -41,16 +43,18 @@
|
|
|
41
43
|
"author": "Tony Carpenter",
|
|
42
44
|
"license": "MIT",
|
|
43
45
|
"bugs": {
|
|
44
|
-
"url": "https://github.com/tonycarpenter21/
|
|
46
|
+
"url": "https://github.com/tonycarpenter21/audioq/issues"
|
|
45
47
|
},
|
|
46
|
-
"homepage": "https://github.com/tonycarpenter21/
|
|
48
|
+
"homepage": "https://github.com/tonycarpenter21/audioq#readme",
|
|
47
49
|
"engines": {
|
|
48
50
|
"node": ">=14.0.0"
|
|
49
51
|
},
|
|
50
52
|
"devDependencies": {
|
|
51
53
|
"@types/jest": "^29.5.13",
|
|
54
|
+
"husky": "^9.1.7",
|
|
52
55
|
"jest": "^29.7.0",
|
|
53
56
|
"jest-environment-jsdom": "^29.7.0",
|
|
57
|
+
"lint-staged": "^16.2.7",
|
|
54
58
|
"rimraf": "^6.0.1",
|
|
55
59
|
"ts-jest": "^29.2.5",
|
|
56
60
|
"typescript": "^5.6.2",
|
package/src/core.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* @fileoverview Core queue management functions for the
|
|
2
|
+
* @fileoverview Core queue management functions for the audioq package
|
|
3
3
|
*/
|
|
4
4
|
|
|
5
5
|
import { ExtendedAudioQueueChannel, AudioQueueOptions, MAX_CHANNELS, QueueConfig } from './types';
|
|
@@ -12,7 +12,14 @@ import {
|
|
|
12
12
|
setupProgressTracking,
|
|
13
13
|
cleanupProgressTracking
|
|
14
14
|
} from './events';
|
|
15
|
-
import {
|
|
15
|
+
import {
|
|
16
|
+
applyVolumeDucking,
|
|
17
|
+
cancelVolumeTransition,
|
|
18
|
+
cleanupWebAudioForAudio,
|
|
19
|
+
getGlobalVolume,
|
|
20
|
+
initializeWebAudioForAudio,
|
|
21
|
+
restoreVolumeLevels
|
|
22
|
+
} from './volume';
|
|
16
23
|
import { setupAudioErrorHandling, handleAudioError } from './errors';
|
|
17
24
|
|
|
18
25
|
/**
|
|
@@ -207,7 +214,7 @@ const checkQueueLimit = (
|
|
|
207
214
|
|
|
208
215
|
if (globalQueueConfig.showQueueWarnings) {
|
|
209
216
|
// eslint-disable-next-line no-console
|
|
210
|
-
console.
|
|
217
|
+
console.warn(`Dropped oldest queued item to make room for new audio`);
|
|
211
218
|
}
|
|
212
219
|
return true;
|
|
213
220
|
}
|
|
@@ -283,6 +290,9 @@ export const queueAudio = async (
|
|
|
283
290
|
await handleAudioError(audio, channelNumber, validatedUrl, error);
|
|
284
291
|
});
|
|
285
292
|
|
|
293
|
+
// Initialize Web Audio API support if needed
|
|
294
|
+
await initializeWebAudioForAudio(audio, channelNumber);
|
|
295
|
+
|
|
286
296
|
// Apply options if provided
|
|
287
297
|
if (options) {
|
|
288
298
|
if (typeof options.loop === 'boolean') {
|
|
@@ -363,9 +373,10 @@ export const playAudioQueue = async (channelNumber: number): Promise<void> => {
|
|
|
363
373
|
|
|
364
374
|
const currentAudio: HTMLAudioElement = channel.queue[0];
|
|
365
375
|
|
|
366
|
-
// Apply channel volume if not already set
|
|
376
|
+
// Apply channel volume with global volume multiplier if not already set
|
|
367
377
|
if (currentAudio.volume === 1.0 && channel.volume !== undefined) {
|
|
368
|
-
|
|
378
|
+
const globalVolume: number = getGlobalVolume();
|
|
379
|
+
currentAudio.volume = channel.volume * globalVolume;
|
|
369
380
|
}
|
|
370
381
|
|
|
371
382
|
setupProgressTracking(currentAudio, channelNumber, audioChannels);
|
|
@@ -440,6 +451,7 @@ export const playAudioQueue = async (channelNumber: number): Promise<void> => {
|
|
|
440
451
|
// For non-looping audio, remove from queue and play next
|
|
441
452
|
currentAudio.pause();
|
|
442
453
|
cleanupProgressTracking(currentAudio, channelNumber, audioChannels);
|
|
454
|
+
cleanupWebAudioForAudio(currentAudio, channelNumber);
|
|
443
455
|
channel.queue.shift();
|
|
444
456
|
channel.isPaused = false; // Reset pause state
|
|
445
457
|
|
package/src/errors.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* @fileoverview Error handling, retry logic, and recovery mechanisms for the
|
|
2
|
+
* @fileoverview Error handling, retry logic, and recovery mechanisms for the audioq package
|
|
3
3
|
*/
|
|
4
4
|
|
|
5
5
|
import {
|
|
@@ -34,8 +34,6 @@ let globalErrorRecovery: ErrorRecoveryOptions = {
|
|
|
34
34
|
|
|
35
35
|
const retryAttempts: WeakMap<HTMLAudioElement, number> = new WeakMap();
|
|
36
36
|
|
|
37
|
-
const loadTimeouts: WeakMap<HTMLAudioElement, number> = new WeakMap();
|
|
38
|
-
|
|
39
37
|
/**
|
|
40
38
|
* Subscribes to audio error events for a specific channel
|
|
41
39
|
* @param channelNumber - The channel number to listen to (defaults to 0)
|
|
@@ -229,7 +227,7 @@ export const emitAudioError = (
|
|
|
229
227
|
// Log to analytics if enabled
|
|
230
228
|
if (globalErrorRecovery.logErrorsToAnalytics) {
|
|
231
229
|
// eslint-disable-next-line no-console
|
|
232
|
-
console.
|
|
230
|
+
console.error('Audio Error Analytics:', errorInfo);
|
|
233
231
|
}
|
|
234
232
|
|
|
235
233
|
channel.audioErrorCallbacks.forEach((callback) => {
|
|
@@ -310,42 +308,8 @@ export const setupAudioErrorHandling = (
|
|
|
310
308
|
const channel: ExtendedAudioQueueChannel = audioChannels[channelNumber];
|
|
311
309
|
if (!channel) return;
|
|
312
310
|
|
|
313
|
-
// Set up loading timeout with test environment compatibility
|
|
314
|
-
let timeoutId: number;
|
|
315
|
-
if (typeof setTimeout !== 'undefined') {
|
|
316
|
-
timeoutId = setTimeout(() => {
|
|
317
|
-
if (audio.networkState === HTMLMediaElement.NETWORK_LOADING) {
|
|
318
|
-
const timeoutError = new Error(
|
|
319
|
-
`Audio loading timeout after ${globalRetryConfig.timeoutMs}ms`
|
|
320
|
-
);
|
|
321
|
-
handleAudioError(audio, channelNumber, originalUrl, timeoutError);
|
|
322
|
-
}
|
|
323
|
-
}, globalRetryConfig.timeoutMs) as unknown as number;
|
|
324
|
-
|
|
325
|
-
loadTimeouts.set(audio, timeoutId);
|
|
326
|
-
}
|
|
327
|
-
|
|
328
|
-
// Clear timeout when metadata loads successfully
|
|
329
|
-
const handleLoadSuccess = (): void => {
|
|
330
|
-
if (typeof setTimeout !== 'undefined') {
|
|
331
|
-
const timeoutId = loadTimeouts.get(audio);
|
|
332
|
-
if (timeoutId) {
|
|
333
|
-
clearTimeout(timeoutId);
|
|
334
|
-
loadTimeouts.delete(audio);
|
|
335
|
-
}
|
|
336
|
-
}
|
|
337
|
-
};
|
|
338
|
-
|
|
339
311
|
// Handle various error events
|
|
340
312
|
const handleError = (_event: Event): void => {
|
|
341
|
-
if (typeof setTimeout !== 'undefined') {
|
|
342
|
-
const timeoutId = loadTimeouts.get(audio);
|
|
343
|
-
if (timeoutId) {
|
|
344
|
-
clearTimeout(timeoutId);
|
|
345
|
-
loadTimeouts.delete(audio);
|
|
346
|
-
}
|
|
347
|
-
}
|
|
348
|
-
|
|
349
313
|
const error = new Error(`Audio loading failed: ${audio.error?.message || 'Unknown error'}`);
|
|
350
314
|
handleAudioError(audio, channelNumber, originalUrl, error);
|
|
351
315
|
};
|
|
@@ -364,8 +328,6 @@ export const setupAudioErrorHandling = (
|
|
|
364
328
|
audio.addEventListener('error', handleError);
|
|
365
329
|
audio.addEventListener('abort', handleAbort);
|
|
366
330
|
audio.addEventListener('stalled', handleStall);
|
|
367
|
-
audio.addEventListener('loadedmetadata', handleLoadSuccess);
|
|
368
|
-
audio.addEventListener('canplay', handleLoadSuccess);
|
|
369
331
|
|
|
370
332
|
// Custom play error handling
|
|
371
333
|
if (onError) {
|
|
@@ -423,7 +385,7 @@ export const handleAudioError = async (
|
|
|
423
385
|
currentAttempts < retryConfig.maxRetries &&
|
|
424
386
|
globalErrorRecovery.autoRetry
|
|
425
387
|
) {
|
|
426
|
-
const delay = retryConfig.exponentialBackoff
|
|
388
|
+
const delay: number = retryConfig.exponentialBackoff
|
|
427
389
|
? retryConfig.baseDelay * Math.pow(2, currentAttempts)
|
|
428
390
|
: retryConfig.baseDelay;
|
|
429
391
|
|
|
@@ -481,21 +443,11 @@ export const createProtectedAudioElement = async (
|
|
|
481
443
|
const audio = new Audio();
|
|
482
444
|
|
|
483
445
|
return new Promise((resolve, reject) => {
|
|
484
|
-
const cleanup = (): void => {
|
|
485
|
-
const timeoutId = loadTimeouts.get(audio);
|
|
486
|
-
if (timeoutId) {
|
|
487
|
-
clearTimeout(timeoutId);
|
|
488
|
-
loadTimeouts.delete(audio);
|
|
489
|
-
}
|
|
490
|
-
};
|
|
491
|
-
|
|
492
446
|
const handleSuccess = (): void => {
|
|
493
|
-
cleanup();
|
|
494
447
|
resolve(audio);
|
|
495
448
|
};
|
|
496
449
|
|
|
497
450
|
const handleError = (error: Error): void => {
|
|
498
|
-
cleanup();
|
|
499
451
|
reject(error);
|
|
500
452
|
};
|
|
501
453
|
|
package/src/events.ts
CHANGED
package/src/index.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* @fileoverview Main entry point for the
|
|
2
|
+
* @fileoverview Main entry point for the audioq package
|
|
3
3
|
* Exports all public functions and types for audio queue management, pause/resume controls,
|
|
4
4
|
* volume management with ducking, progress tracking, and comprehensive event system
|
|
5
5
|
*/
|
|
@@ -66,12 +66,30 @@ export {
|
|
|
66
66
|
getAllChannelsVolume,
|
|
67
67
|
getChannelVolume,
|
|
68
68
|
getFadeConfig,
|
|
69
|
+
getGlobalVolume,
|
|
69
70
|
setAllChannelsVolume,
|
|
70
71
|
setChannelVolume,
|
|
72
|
+
setGlobalVolume,
|
|
71
73
|
setVolumeDucking,
|
|
72
74
|
transitionVolume
|
|
73
75
|
} from './volume';
|
|
74
76
|
|
|
77
|
+
// Web Audio API support functions
|
|
78
|
+
export {
|
|
79
|
+
cleanupWebAudioNodes,
|
|
80
|
+
createWebAudioNodes,
|
|
81
|
+
getAudioContext,
|
|
82
|
+
getWebAudioConfig,
|
|
83
|
+
getWebAudioSupport,
|
|
84
|
+
getWebAudioVolume,
|
|
85
|
+
isIOSDevice,
|
|
86
|
+
isWebAudioSupported,
|
|
87
|
+
resumeAudioContext,
|
|
88
|
+
setWebAudioConfig,
|
|
89
|
+
setWebAudioVolume,
|
|
90
|
+
shouldUseWebAudio
|
|
91
|
+
} from './web-audio';
|
|
92
|
+
|
|
75
93
|
// Audio information and progress tracking functions
|
|
76
94
|
export {
|
|
77
95
|
getAllChannelsInfo,
|
|
@@ -122,12 +140,15 @@ export type {
|
|
|
122
140
|
FadeConfig,
|
|
123
141
|
ProgressCallback,
|
|
124
142
|
QueueChangeCallback,
|
|
143
|
+
QueueConfig,
|
|
125
144
|
QueueItem,
|
|
126
145
|
QueueManipulationResult,
|
|
127
146
|
QueueSnapshot,
|
|
128
147
|
RetryConfig,
|
|
129
148
|
VolumeConfig,
|
|
130
|
-
|
|
149
|
+
WebAudioConfig,
|
|
150
|
+
WebAudioNodeSet,
|
|
151
|
+
WebAudioSupport
|
|
131
152
|
} from './types';
|
|
132
153
|
|
|
133
154
|
// Enums and constants
|
package/src/info.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* @fileoverview Audio information and progress tracking functions for the
|
|
2
|
+
* @fileoverview Audio information and progress tracking functions for the audioq package
|
|
3
3
|
*/
|
|
4
4
|
|
|
5
5
|
import {
|
|
@@ -47,7 +47,9 @@ export const getWhitelistedChannelProperties = (): string[] => {
|
|
|
47
47
|
'isLocked',
|
|
48
48
|
'maxQueueSize',
|
|
49
49
|
'retryConfig',
|
|
50
|
-
'volumeConfig' // Legacy property that might still be used
|
|
50
|
+
'volumeConfig', // Legacy property that might still be used
|
|
51
|
+
'webAudioContext', // Web Audio API context
|
|
52
|
+
'webAudioNodes' // Web Audio API nodes map
|
|
51
53
|
];
|
|
52
54
|
|
|
53
55
|
return [...new Set(propertyNames)]; // Remove duplicates
|