hume 0.10.4-beta.5 → 0.11.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.mock/definition/empathic-voice/__package__.yml +15 -36
- package/.mock/definition/empathic-voice/chat.yml +18 -3
- package/.mock/definition/empathic-voice/chatGroups.yml +14 -10
- package/.mock/definition/empathic-voice/chatWebhooks.yml +2 -0
- package/.mock/definition/empathic-voice/chats.yml +2 -0
- package/.mock/definition/empathic-voice/configs.yml +2 -0
- package/.mock/definition/empathic-voice/customVoices.yml +1 -0
- package/.mock/definition/empathic-voice/prompts.yml +2 -0
- package/.mock/definition/empathic-voice/tools.yml +2 -0
- package/.mock/definition/expression-measurement/batch/__package__.yml +10 -10
- package/.mock/definition/expression-measurement/stream/stream.yml +1 -1
- package/.mock/definition/tts/__package__.yml +70 -55
- package/.mock/definition/tts/voices.yml +20 -9
- package/.mock/fern.config.json +1 -1
- package/api/resources/empathicVoice/resources/chat/client/Client.js +4 -1
- package/api/resources/empathicVoice/resources/chatGroups/client/Client.js +6 -6
- package/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsGetAudioRequest.d.ts +4 -4
- package/api/resources/empathicVoice/resources/chats/client/Client.js +5 -5
- package/api/resources/empathicVoice/resources/configs/client/Client.js +11 -11
- package/api/resources/empathicVoice/resources/customVoices/client/Client.js +7 -7
- package/api/resources/empathicVoice/resources/prompts/client/Client.js +10 -10
- package/api/resources/empathicVoice/resources/tools/client/Client.js +11 -11
- package/api/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
- package/api/resources/empathicVoice/types/AssistantMessage.d.ts +1 -1
- package/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
- package/api/resources/empathicVoice/types/ToolCallMessage.d.ts +1 -1
- package/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +2 -2
- package/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +3 -3
- package/api/resources/empathicVoice/types/UserInput.d.ts +1 -1
- package/api/resources/empathicVoice/types/UserMessage.d.ts +3 -3
- package/api/resources/empathicVoice/types/index.d.ts +0 -4
- package/api/resources/empathicVoice/types/index.js +0 -4
- package/api/resources/expressionMeasurement/resources/batch/client/Client.js +6 -6
- package/api/resources/tts/client/Client.d.ts +6 -6
- package/api/resources/tts/client/Client.js +10 -10
- package/api/resources/tts/resources/voices/client/Client.d.ts +5 -3
- package/api/resources/tts/resources/voices/client/Client.js +9 -7
- package/api/resources/tts/resources/voices/client/requests/VoicesListRequest.d.ts +4 -1
- package/api/resources/tts/types/PostedTts.d.ts +6 -6
- package/api/resources/tts/types/PostedUtterance.d.ts +5 -5
- package/api/resources/tts/types/PostedUtteranceVoiceWithId.d.ts +6 -7
- package/api/resources/tts/types/PostedUtteranceVoiceWithName.d.ts +6 -7
- package/api/resources/tts/types/ReturnGeneration.d.ts +1 -1
- package/api/resources/tts/types/ReturnTts.d.ts +1 -1
- package/api/resources/tts/types/ReturnVoice.d.ts +2 -8
- package/core/websocket/ws.js +2 -4
- package/dist/api/resources/empathicVoice/resources/chat/client/Client.js +4 -1
- package/dist/api/resources/empathicVoice/resources/chatGroups/client/Client.js +6 -6
- package/dist/api/resources/empathicVoice/resources/chatGroups/client/requests/ChatGroupsGetAudioRequest.d.ts +4 -4
- package/dist/api/resources/empathicVoice/resources/chats/client/Client.js +5 -5
- package/dist/api/resources/empathicVoice/resources/configs/client/Client.js +11 -11
- package/dist/api/resources/empathicVoice/resources/customVoices/client/Client.js +7 -7
- package/dist/api/resources/empathicVoice/resources/prompts/client/Client.js +10 -10
- package/dist/api/resources/empathicVoice/resources/tools/client/Client.js +11 -11
- package/dist/api/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
- package/dist/api/resources/empathicVoice/types/AssistantMessage.d.ts +1 -1
- package/dist/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
- package/dist/api/resources/empathicVoice/types/ToolCallMessage.d.ts +1 -1
- package/dist/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +3 -3
- package/dist/api/resources/empathicVoice/types/UserInput.d.ts +1 -1
- package/dist/api/resources/empathicVoice/types/UserMessage.d.ts +3 -3
- package/dist/api/resources/empathicVoice/types/index.d.ts +0 -4
- package/dist/api/resources/empathicVoice/types/index.js +0 -4
- package/dist/api/resources/expressionMeasurement/resources/batch/client/Client.js +6 -6
- package/dist/api/resources/tts/client/Client.d.ts +6 -6
- package/dist/api/resources/tts/client/Client.js +10 -10
- package/dist/api/resources/tts/resources/voices/client/Client.d.ts +5 -3
- package/dist/api/resources/tts/resources/voices/client/Client.js +9 -7
- package/dist/api/resources/tts/resources/voices/client/requests/VoicesListRequest.d.ts +4 -1
- package/dist/api/resources/tts/types/PostedTts.d.ts +6 -6
- package/dist/api/resources/tts/types/PostedUtterance.d.ts +5 -5
- package/dist/api/resources/tts/types/PostedUtteranceVoiceWithId.d.ts +6 -7
- package/dist/api/resources/tts/types/PostedUtteranceVoiceWithName.d.ts +6 -7
- package/dist/api/resources/tts/types/ReturnGeneration.d.ts +1 -1
- package/dist/api/resources/tts/types/ReturnTts.d.ts +1 -1
- package/dist/api/resources/tts/types/ReturnVoice.d.ts +2 -8
- package/dist/core/websocket/ws.js +2 -4
- package/dist/serialization/resources/empathicVoice/types/index.d.ts +0 -4
- package/dist/serialization/resources/empathicVoice/types/index.js +0 -4
- package/dist/version.d.ts +1 -1
- package/dist/version.js +1 -1
- package/dist/wrapper/EVIWebAudioPlayer.d.ts +174 -0
- package/dist/wrapper/EVIWebAudioPlayer.js +314 -0
- package/dist/wrapper/fetchAccessToken.d.ts +1 -3
- package/dist/wrapper/fetchAccessToken.js +8 -28
- package/dist/wrapper/index.d.ts +1 -0
- package/dist/wrapper/index.js +3 -1
- package/package.json +1 -1
- package/reference.md +11 -9
- package/serialization/resources/empathicVoice/types/index.d.ts +0 -4
- package/serialization/resources/empathicVoice/types/index.js +0 -4
- package/version.d.ts +1 -1
- package/version.js +1 -1
- package/wrapper/EVIWebAudioPlayer.d.ts +174 -0
- package/wrapper/EVIWebAudioPlayer.js +314 -0
- package/wrapper/fetchAccessToken.d.ts +1 -3
- package/wrapper/fetchAccessToken.js +8 -28
- package/wrapper/index.d.ts +1 -0
- package/wrapper/index.js +3 -1
- package/api/resources/empathicVoice/types/FunctionCallResponseInput.d.ts +0 -6
- package/api/resources/empathicVoice/types/FunctionCallResponseInput.js +0 -5
- package/api/resources/empathicVoice/types/PostedPromptSpec.d.ts +0 -9
- package/api/resources/empathicVoice/types/PostedPromptSpec.js +0 -5
- package/api/resources/empathicVoice/types/TextInput.d.ts +0 -6
- package/api/resources/empathicVoice/types/TextInput.js +0 -5
- package/api/resources/empathicVoice/types/TtsInput.d.ts +0 -6
- package/api/resources/empathicVoice/types/TtsInput.js +0 -5
- package/dist/api/resources/empathicVoice/types/FunctionCallResponseInput.d.ts +0 -6
- package/dist/api/resources/empathicVoice/types/FunctionCallResponseInput.js +0 -5
- package/dist/api/resources/empathicVoice/types/PostedPromptSpec.d.ts +0 -9
- package/dist/api/resources/empathicVoice/types/PostedPromptSpec.js +0 -5
- package/dist/api/resources/empathicVoice/types/TextInput.d.ts +0 -6
- package/dist/api/resources/empathicVoice/types/TextInput.js +0 -5
- package/dist/api/resources/empathicVoice/types/TtsInput.d.ts +0 -6
- package/dist/api/resources/empathicVoice/types/TtsInput.js +0 -5
- package/dist/serialization/resources/empathicVoice/types/FunctionCallResponseInput.d.ts +0 -12
- package/dist/serialization/resources/empathicVoice/types/FunctionCallResponseInput.js +0 -43
- package/dist/serialization/resources/empathicVoice/types/PostedPromptSpec.d.ts +0 -12
- package/dist/serialization/resources/empathicVoice/types/PostedPromptSpec.js +0 -43
- package/dist/serialization/resources/empathicVoice/types/TextInput.d.ts +0 -12
- package/dist/serialization/resources/empathicVoice/types/TextInput.js +0 -43
- package/dist/serialization/resources/empathicVoice/types/TtsInput.d.ts +0 -12
- package/dist/serialization/resources/empathicVoice/types/TtsInput.js +0 -43
- package/serialization/resources/empathicVoice/types/FunctionCallResponseInput.d.ts +0 -12
- package/serialization/resources/empathicVoice/types/FunctionCallResponseInput.js +0 -43
- package/serialization/resources/empathicVoice/types/PostedPromptSpec.d.ts +0 -12
- package/serialization/resources/empathicVoice/types/PostedPromptSpec.js +0 -43
- package/serialization/resources/empathicVoice/types/TextInput.d.ts +0 -12
- package/serialization/resources/empathicVoice/types/TextInput.js +0 -43
- package/serialization/resources/empathicVoice/types/TtsInput.d.ts +0 -12
- package/serialization/resources/empathicVoice/types/TtsInput.js +0 -43
package/reference.md
CHANGED
|
@@ -37,7 +37,7 @@ await client.tts.synthesizeJson({
|
|
|
37
37
|
{
|
|
38
38
|
text: "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.",
|
|
39
39
|
description:
|
|
40
|
-
"Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm,
|
|
40
|
+
"Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality.",
|
|
41
41
|
},
|
|
42
42
|
],
|
|
43
43
|
context: {
|
|
@@ -45,7 +45,7 @@ await client.tts.synthesizeJson({
|
|
|
45
45
|
{
|
|
46
46
|
text: "How can people see beauty so differently?",
|
|
47
47
|
description:
|
|
48
|
-
"A curious student with a clear and respectful tone, seeking clarification on Hume's
|
|
48
|
+
"A curious student with a clear and respectful tone, seeking clarification on Hume's ideas with a straightforward question.",
|
|
49
49
|
},
|
|
50
50
|
],
|
|
51
51
|
},
|
|
@@ -123,7 +123,7 @@ await client.tts.synthesizeFile({
|
|
|
123
123
|
{
|
|
124
124
|
text: "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.",
|
|
125
125
|
description:
|
|
126
|
-
"Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm,
|
|
126
|
+
"Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality.",
|
|
127
127
|
},
|
|
128
128
|
],
|
|
129
129
|
context: {
|
|
@@ -201,7 +201,7 @@ await client.tts.synthesizeFileStreaming({
|
|
|
201
201
|
{
|
|
202
202
|
text: "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.",
|
|
203
203
|
description:
|
|
204
|
-
"Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm,
|
|
204
|
+
"Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality.",
|
|
205
205
|
},
|
|
206
206
|
],
|
|
207
207
|
context: {
|
|
@@ -281,7 +281,7 @@ const response = await client.tts.synthesizeJsonStreaming({
|
|
|
281
281
|
{
|
|
282
282
|
text: "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.",
|
|
283
283
|
description:
|
|
284
|
-
"Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm,
|
|
284
|
+
"Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality.",
|
|
285
285
|
},
|
|
286
286
|
],
|
|
287
287
|
context: {
|
|
@@ -289,7 +289,7 @@ const response = await client.tts.synthesizeJsonStreaming({
|
|
|
289
289
|
{
|
|
290
290
|
text: "How can people see beauty so differently?",
|
|
291
291
|
description:
|
|
292
|
-
"A curious student with a clear and respectful tone, seeking clarification on Hume's
|
|
292
|
+
"A curious student with a clear and respectful tone, seeking clarification on Hume's ideas with a straightforward question.",
|
|
293
293
|
},
|
|
294
294
|
],
|
|
295
295
|
},
|
|
@@ -348,7 +348,7 @@ for await (const item of response) {
|
|
|
348
348
|
<dl>
|
|
349
349
|
<dd>
|
|
350
350
|
|
|
351
|
-
Lists voices
|
|
351
|
+
Lists voices you have saved in your account, or voices from the [Voice Library](https://platform.hume.ai/tts/voice-library).
|
|
352
352
|
|
|
353
353
|
</dd>
|
|
354
354
|
</dl>
|
|
@@ -424,7 +424,9 @@ while (page.hasNextPage()) {
|
|
|
424
424
|
<dl>
|
|
425
425
|
<dd>
|
|
426
426
|
|
|
427
|
-
|
|
427
|
+
Saves a new custom voice to your account using the specified TTS generation ID.
|
|
428
|
+
|
|
429
|
+
Once saved, this voice can be reused in subsequent TTS requests, ensuring consistent speech style and prosody. For more details on voice creation, see the [Voices Guide](/docs/text-to-speech-tts/voices).
|
|
428
430
|
|
|
429
431
|
</dd>
|
|
430
432
|
</dl>
|
|
@@ -490,7 +492,7 @@ await client.tts.voices.create({
|
|
|
490
492
|
<dl>
|
|
491
493
|
<dd>
|
|
492
494
|
|
|
493
|
-
|
|
495
|
+
Deletes a previously generated custom voice.
|
|
494
496
|
|
|
495
497
|
</dd>
|
|
496
498
|
</dl>
|
|
@@ -32,9 +32,6 @@ export * from "./UserInput";
|
|
|
32
32
|
export * from "./UserInterruption";
|
|
33
33
|
export * from "./UserMessage";
|
|
34
34
|
export * from "./JsonMessage";
|
|
35
|
-
export * from "./TtsInput";
|
|
36
|
-
export * from "./TextInput";
|
|
37
|
-
export * from "./FunctionCallResponseInput";
|
|
38
35
|
export * from "./HttpValidationError";
|
|
39
36
|
export * from "./LanguageModelType";
|
|
40
37
|
export * from "./ModelProviderEnum";
|
|
@@ -109,7 +106,6 @@ export * from "./PostedEventMessageSpec";
|
|
|
109
106
|
export * from "./PostedTimeoutSpec";
|
|
110
107
|
export * from "./ReturnEventMessageSpec";
|
|
111
108
|
export * from "./ReturnTimeoutSpec";
|
|
112
|
-
export * from "./PostedPromptSpec";
|
|
113
109
|
export * from "./PostedVoiceProvider";
|
|
114
110
|
export * from "./PostedVoice";
|
|
115
111
|
export * from "./ReturnVoiceProvider";
|
|
@@ -48,9 +48,6 @@ __exportStar(require("./UserInput"), exports);
|
|
|
48
48
|
__exportStar(require("./UserInterruption"), exports);
|
|
49
49
|
__exportStar(require("./UserMessage"), exports);
|
|
50
50
|
__exportStar(require("./JsonMessage"), exports);
|
|
51
|
-
__exportStar(require("./TtsInput"), exports);
|
|
52
|
-
__exportStar(require("./TextInput"), exports);
|
|
53
|
-
__exportStar(require("./FunctionCallResponseInput"), exports);
|
|
54
51
|
__exportStar(require("./HttpValidationError"), exports);
|
|
55
52
|
__exportStar(require("./LanguageModelType"), exports);
|
|
56
53
|
__exportStar(require("./ModelProviderEnum"), exports);
|
|
@@ -125,7 +122,6 @@ __exportStar(require("./PostedEventMessageSpec"), exports);
|
|
|
125
122
|
__exportStar(require("./PostedTimeoutSpec"), exports);
|
|
126
123
|
__exportStar(require("./ReturnEventMessageSpec"), exports);
|
|
127
124
|
__exportStar(require("./ReturnTimeoutSpec"), exports);
|
|
128
|
-
__exportStar(require("./PostedPromptSpec"), exports);
|
|
129
125
|
__exportStar(require("./PostedVoiceProvider"), exports);
|
|
130
126
|
__exportStar(require("./PostedVoice"), exports);
|
|
131
127
|
__exportStar(require("./ReturnVoiceProvider"), exports);
|
package/version.d.ts
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
export declare const SDK_VERSION = "0.
|
|
1
|
+
export declare const SDK_VERSION = "0.11.1";
|
package/version.js
CHANGED
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
import type { AudioOutput } from "api/resources/empathicVoice";
|
|
2
|
+
/**
|
|
3
|
+
* Options for configuring an {@link EVIWebAudioPlayer}.
|
|
4
|
+
*
|
|
5
|
+
* @default `{}` for sensible defaults.
|
|
6
|
+
*/
|
|
7
|
+
export interface EVIWebAudioPlayerOptions {
|
|
8
|
+
/**
|
|
9
|
+
* Initial master gain, via a `GainNode`, from `0` (_silent_) to `1` (_full volume_).
|
|
10
|
+
* Values outside this range are clamped.
|
|
11
|
+
*
|
|
12
|
+
* @default 1
|
|
13
|
+
*/
|
|
14
|
+
volume?: number;
|
|
15
|
+
/**
|
|
16
|
+
* Real-time FFT (frequency-domain) settings **only** for visualization.
|
|
17
|
+
*
|
|
18
|
+
* - **Disable**: omit or `{ enabled: false }` – no `AnalyserNode` is created.
|
|
19
|
+
* - **Defaults**: `{ enabled: true }` → 2048-point FFT at 16 ms (~60 Hz), mapped to 24 Bark bands.
|
|
20
|
+
* - **Custom**: supply {@link EVIWebAudioPlayerFFTOptions} to override `size`, `interval`, or `transform`.
|
|
21
|
+
*/
|
|
22
|
+
fft?: EVIWebAudioPlayerFFTOptions;
|
|
23
|
+
}
|
|
24
|
+
/**
|
|
25
|
+
* FFT (frequency-domain) options for visualization.
|
|
26
|
+
*
|
|
27
|
+
* Pass `{ enabled: true }` for defaults, or omit/disable entirely for zero overhead.
|
|
28
|
+
*/
|
|
29
|
+
export type EVIWebAudioPlayerFFTOptions = FftEnabled | FftDisabled;
|
|
30
|
+
type FftDisabled = {
|
|
31
|
+
/**
|
|
32
|
+
* Turn visualization data OFF—skip analyser creation entirely (zero extra CPU).
|
|
33
|
+
*/
|
|
34
|
+
enabled: false;
|
|
35
|
+
};
|
|
36
|
+
type FftEnabled = {
|
|
37
|
+
/**
|
|
38
|
+
* Turn visualization data ON—create an `AnalyserNode`, poll it, and emit `'fft'` events.
|
|
39
|
+
*/
|
|
40
|
+
enabled: true;
|
|
41
|
+
/**
|
|
42
|
+
* FFT size (power-of-two, 32 – 32768).
|
|
43
|
+
* Defaults to 2048 → 1024 bins (~ 23 Hz at 48 kHz).
|
|
44
|
+
* @default 2048
|
|
45
|
+
*/
|
|
46
|
+
size?: number;
|
|
47
|
+
/**
|
|
48
|
+
* Polling interval, in **milliseconds**.
|
|
49
|
+
* Default 16 ms (~ 60 Hz) to sync with `requestAnimationFrame()`.
|
|
50
|
+
* @default 16
|
|
51
|
+
*/
|
|
52
|
+
interval?: number;
|
|
53
|
+
/**
|
|
54
|
+
* Custom post-processing for raw magnitude data. Omit for built-in 24-band Bark mapping.
|
|
55
|
+
*
|
|
56
|
+
* @param bins PCM byte magnitudes (0 – 255) from `AnalyserNode`.
|
|
57
|
+
* @param sampleRate `AudioContext` sample rate in Hz.
|
|
58
|
+
* @returns Payload emitted with each `'fft'` event.
|
|
59
|
+
*/
|
|
60
|
+
transform?: (bins: Uint8Array, sampleRate: number) => number[];
|
|
61
|
+
};
|
|
62
|
+
type PlayerEventMap = {
|
|
63
|
+
play: CustomEvent<{
|
|
64
|
+
id: string;
|
|
65
|
+
}>;
|
|
66
|
+
stop: CustomEvent<{
|
|
67
|
+
id: string;
|
|
68
|
+
}>;
|
|
69
|
+
fft: CustomEvent<{
|
|
70
|
+
fft: number[];
|
|
71
|
+
}>;
|
|
72
|
+
error: CustomEvent<{
|
|
73
|
+
message: string;
|
|
74
|
+
}>;
|
|
75
|
+
};
|
|
76
|
+
/**
|
|
77
|
+
* A sequential, glitch-free Web-Audio player for **EVI** audio output.
|
|
78
|
+
*
|
|
79
|
+
* - **Decoding & playback**: base-64 PCM chunks feed an `AudioWorkletNode` and play in order, without gaps.
|
|
80
|
+
* - **One-time init**: await {@link init} in a user-gesture to build audio graph and unlock the browser’s
|
|
81
|
+
* `AudioContext`; later calls are no-ops.
|
|
82
|
+
* - **Optional FFT**: `{ fft: { enabled: true } }` adds an `AnalyserNode` and emits `'fft'` events; omit to skip.
|
|
83
|
+
* - **Controls**: {@link setVolume}, {@link mute}, {@link unmute}, {@link stop}, {@link dispose}.
|
|
84
|
+
* - **Events**: listen for `'play'`, `'stop'`, `'fft'`, `'error'`.
|
|
85
|
+
*/
|
|
86
|
+
export declare class EVIWebAudioPlayer extends EventTarget {
|
|
87
|
+
#private;
|
|
88
|
+
private readonly opts;
|
|
89
|
+
/** `true` while any clip is currently audible. */
|
|
90
|
+
get playing(): boolean;
|
|
91
|
+
/** `true` if gain is set to 0 via {@link mute}. */
|
|
92
|
+
get muted(): boolean;
|
|
93
|
+
/** Current output gain (0‑1). */
|
|
94
|
+
get volume(): number;
|
|
95
|
+
/** Most recent FFT frame (empty when analyser disabled). */
|
|
96
|
+
get fft(): number[];
|
|
97
|
+
constructor(opts?: EVIWebAudioPlayerOptions);
|
|
98
|
+
/**
|
|
99
|
+
* Generate an empty FFT frame array.
|
|
100
|
+
* Useful as an initial or placeholder FFT dataset before any real analysis.
|
|
101
|
+
*
|
|
102
|
+
* @returns A number[] filled with zeros, length equal to the Bark band count (24).
|
|
103
|
+
*/
|
|
104
|
+
static emptyFft(): number[];
|
|
105
|
+
/**
|
|
106
|
+
* * Subscribes to a player event and returns `this` for chaining.
|
|
107
|
+
*
|
|
108
|
+
* @param type One of `'play'`, `'stop'`, `'fft'`, or `'error'`.
|
|
109
|
+
* @param fn Handler invoked with the event’s typed `detail` payload.
|
|
110
|
+
* @param opts Optional `AddEventListenerOptions` (e.g. `{ once: true }`).
|
|
111
|
+
*
|
|
112
|
+
* @example
|
|
113
|
+
* ```ts
|
|
114
|
+
* const player = new EVIWebAudioPlayer();
|
|
115
|
+
* player
|
|
116
|
+
* .on('play', e => console.log('play', e.detail.id))
|
|
117
|
+
* .on('stop', e => console.log('stop', e.detail.id))
|
|
118
|
+
* .on('fft', e => console.log('stop', e.detail.fft))
|
|
119
|
+
* .on('error', e => console.error('error', e.detail.message));
|
|
120
|
+
* ```
|
|
121
|
+
*/
|
|
122
|
+
on<K extends keyof PlayerEventMap>(type: K, fn: (e: PlayerEventMap[K]) => void, opts?: AddEventListenerOptions): this;
|
|
123
|
+
/**
|
|
124
|
+
* Set up and start the player’s Web-Audio pipeline.
|
|
125
|
+
*
|
|
126
|
+
* - Creates a **suspended** `AudioContext`, loads the worklet processor, wires `AudioWorkletNode → (AnalyserNode?) → GainNode → destination`, then calls `resume()`.
|
|
127
|
+
* - Must be awaited inside a user-gesture (click/tap/key); later calls are no-ops.
|
|
128
|
+
* - If `fft.enabled` is `false` (or `fft` is omitted), no `AnalyserNode` or polling timer is created.
|
|
129
|
+
*
|
|
130
|
+
* **Safari quirk:** Safari locks an `AudioContext` to the device’s current sample rate at creation.
|
|
131
|
+
* If you open a Bluetooth headset mic afterward, the OS may switch to the 16 kHz HFP profile and down-sample playback, which sounds “telephone-y.”
|
|
132
|
+
* To avoid this, call `getUserMedia()` (or otherwise open audio input) **before** `init()`.
|
|
133
|
+
*
|
|
134
|
+
* @throws {Error} If the browser lacks `AudioWorklet` support, or if `AudioContext.resume()` is rejected (autoplay policy, device error).
|
|
135
|
+
*/
|
|
136
|
+
init(): Promise<void>;
|
|
137
|
+
/**
|
|
138
|
+
* Queue one {@link AudioOutput} message for playback.
|
|
139
|
+
*
|
|
140
|
+
* Decodes the base-64 PCM data, sends it to the `AudioWorkletNode` for glitch-free, in-order playback, and emits `'play'` for the first chunk of a new stream.
|
|
141
|
+
*
|
|
142
|
+
* @param message The `AudioOutput` message received from EVI’s WebSocket.
|
|
143
|
+
*
|
|
144
|
+
* @see {@link https://dev.hume.ai/reference/empathic-voice-interface-evi/chat/chat#receive.Audio-Output.type API Reference}
|
|
145
|
+
*/
|
|
146
|
+
enqueue(message: AudioOutput): Promise<void>;
|
|
147
|
+
/**
|
|
148
|
+
* Flush the worklet queue and output silence.
|
|
149
|
+
*/
|
|
150
|
+
stop(): void;
|
|
151
|
+
/**
|
|
152
|
+
* Set the master gain ({@link volume}) to a value between `0` (_silent_) and `1` (_full volume_).
|
|
153
|
+
*
|
|
154
|
+
* - Clamps out-of-range values.
|
|
155
|
+
* - If called before {@link init}, stores volume for when `AudioContext` is created.
|
|
156
|
+
* - If currently {@link muted}, updates stored volume but keeps output silent until {@link unmute}.
|
|
157
|
+
*
|
|
158
|
+
* @param volume Desired gain; clamped to [0, 1].
|
|
159
|
+
*/
|
|
160
|
+
setVolume(volume: number): void;
|
|
161
|
+
/**
|
|
162
|
+
* Mute output instantly by setting the gain to 0. Retains the last volume internally for later restore.
|
|
163
|
+
*/
|
|
164
|
+
mute(): void;
|
|
165
|
+
/**
|
|
166
|
+
* Restore output gain to the last set volume (via setVolume).
|
|
167
|
+
*/
|
|
168
|
+
unmute(): void;
|
|
169
|
+
/**
|
|
170
|
+
* Tear down all Web-Audio resources (worklet, analyser, gain, context) and reset state so {@link init} can be called again.
|
|
171
|
+
*/
|
|
172
|
+
dispose(): void;
|
|
173
|
+
}
|
|
174
|
+
export {};
|
|
@@ -0,0 +1,314 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
3
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
4
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
5
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
6
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
7
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
8
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
9
|
+
});
|
|
10
|
+
};
|
|
11
|
+
var __classPrivateFieldGet = (this && this.__classPrivateFieldGet) || function (receiver, state, kind, f) {
|
|
12
|
+
if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a getter");
|
|
13
|
+
if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot read private member from an object whose class did not declare it");
|
|
14
|
+
return kind === "m" ? f : kind === "a" ? f.call(receiver) : f ? f.value : state.get(receiver);
|
|
15
|
+
};
|
|
16
|
+
var __classPrivateFieldSet = (this && this.__classPrivateFieldSet) || function (receiver, state, value, kind, f) {
|
|
17
|
+
if (kind === "m") throw new TypeError("Private method is not writable");
|
|
18
|
+
if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a setter");
|
|
19
|
+
if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot write private member to an object whose class did not declare it");
|
|
20
|
+
return (kind === "a" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;
|
|
21
|
+
};
|
|
22
|
+
var _EVIWebAudioPlayer_instances, _a, _EVIWebAudioPlayer_DEFAULT_WORKLET_URL, _EVIWebAudioPlayer_DEFAULT_FFT_SIZE, _EVIWebAudioPlayer_DEFAULT_FFT_INTERVAL, _EVIWebAudioPlayer_BARK_CENTER_FREQUENCIES, _EVIWebAudioPlayer_BYTE_MAX, _EVIWebAudioPlayer_ctx, _EVIWebAudioPlayer_workletNode, _EVIWebAudioPlayer_analyserNode, _EVIWebAudioPlayer_gainNode, _EVIWebAudioPlayer_initialized, _EVIWebAudioPlayer_playing, _EVIWebAudioPlayer_muted, _EVIWebAudioPlayer_volume, _EVIWebAudioPlayer_fft, _EVIWebAudioPlayer_fftTimer, _EVIWebAudioPlayer_fftOptions, _EVIWebAudioPlayer_linearHzToBark, _EVIWebAudioPlayer_startAnalyserPollingIfEnabled, _EVIWebAudioPlayer_emitError;
|
|
23
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
24
|
+
exports.EVIWebAudioPlayer = void 0;
|
|
25
|
+
const convertBase64ToBlob_1 = require("./convertBase64ToBlob");
|
|
26
|
+
/**
|
|
27
|
+
* A sequential, glitch-free Web-Audio player for **EVI** audio output.
|
|
28
|
+
*
|
|
29
|
+
* - **Decoding & playback**: base-64 PCM chunks feed an `AudioWorkletNode` and play in order, without gaps.
|
|
30
|
+
* - **One-time init**: await {@link init} in a user-gesture to build audio graph and unlock the browser’s
|
|
31
|
+
* `AudioContext`; later calls are no-ops.
|
|
32
|
+
* - **Optional FFT**: `{ fft: { enabled: true } }` adds an `AnalyserNode` and emits `'fft'` events; omit to skip.
|
|
33
|
+
* - **Controls**: {@link setVolume}, {@link mute}, {@link unmute}, {@link stop}, {@link dispose}.
|
|
34
|
+
* - **Events**: listen for `'play'`, `'stop'`, `'fft'`, `'error'`.
|
|
35
|
+
*/
|
|
36
|
+
class EVIWebAudioPlayer extends EventTarget {
|
|
37
|
+
/** `true` while any clip is currently audible. */
|
|
38
|
+
get playing() {
|
|
39
|
+
return __classPrivateFieldGet(this, _EVIWebAudioPlayer_playing, "f");
|
|
40
|
+
}
|
|
41
|
+
/** `true` if gain is set to 0 via {@link mute}. */
|
|
42
|
+
get muted() {
|
|
43
|
+
return __classPrivateFieldGet(this, _EVIWebAudioPlayer_muted, "f");
|
|
44
|
+
}
|
|
45
|
+
/** Current output gain (0‑1). */
|
|
46
|
+
get volume() {
|
|
47
|
+
return __classPrivateFieldGet(this, _EVIWebAudioPlayer_volume, "f");
|
|
48
|
+
}
|
|
49
|
+
/** Most recent FFT frame (empty when analyser disabled). */
|
|
50
|
+
get fft() {
|
|
51
|
+
return __classPrivateFieldGet(this, _EVIWebAudioPlayer_fft, "f");
|
|
52
|
+
}
|
|
53
|
+
constructor(opts = {}) {
|
|
54
|
+
var _b, _c;
|
|
55
|
+
super();
|
|
56
|
+
_EVIWebAudioPlayer_instances.add(this);
|
|
57
|
+
this.opts = opts;
|
|
58
|
+
_EVIWebAudioPlayer_ctx.set(this, null);
|
|
59
|
+
_EVIWebAudioPlayer_workletNode.set(this, null);
|
|
60
|
+
_EVIWebAudioPlayer_analyserNode.set(this, null);
|
|
61
|
+
_EVIWebAudioPlayer_gainNode.set(this, null);
|
|
62
|
+
_EVIWebAudioPlayer_initialized.set(this, false);
|
|
63
|
+
_EVIWebAudioPlayer_playing.set(this, false);
|
|
64
|
+
_EVIWebAudioPlayer_muted.set(this, false);
|
|
65
|
+
_EVIWebAudioPlayer_volume.set(this, void 0);
|
|
66
|
+
_EVIWebAudioPlayer_fft.set(this, _a.emptyFft());
|
|
67
|
+
_EVIWebAudioPlayer_fftTimer.set(this, null);
|
|
68
|
+
_EVIWebAudioPlayer_fftOptions.set(this, null);
|
|
69
|
+
__classPrivateFieldSet(this, _EVIWebAudioPlayer_volume, (_b = opts.volume) !== null && _b !== void 0 ? _b : 1.0, "f");
|
|
70
|
+
// Resolve FFT options if enabled
|
|
71
|
+
if ((_c = opts.fft) === null || _c === void 0 ? void 0 : _c.enabled) {
|
|
72
|
+
const { size, interval, transform } = opts.fft;
|
|
73
|
+
__classPrivateFieldSet(this, _EVIWebAudioPlayer_fftOptions, {
|
|
74
|
+
size: size !== null && size !== void 0 ? size : __classPrivateFieldGet(_a, _a, "f", _EVIWebAudioPlayer_DEFAULT_FFT_SIZE),
|
|
75
|
+
interval: interval !== null && interval !== void 0 ? interval : __classPrivateFieldGet(_a, _a, "f", _EVIWebAudioPlayer_DEFAULT_FFT_INTERVAL),
|
|
76
|
+
transform: transform !== null && transform !== void 0 ? transform : __classPrivateFieldGet(_a, _a, "m", _EVIWebAudioPlayer_linearHzToBark),
|
|
77
|
+
}, "f");
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
/**
|
|
81
|
+
* Generate an empty FFT frame array.
|
|
82
|
+
* Useful as an initial or placeholder FFT dataset before any real analysis.
|
|
83
|
+
*
|
|
84
|
+
* @returns A number[] filled with zeros, length equal to the Bark band count (24).
|
|
85
|
+
*/
|
|
86
|
+
static emptyFft() {
|
|
87
|
+
return Array(__classPrivateFieldGet(_a, _a, "f", _EVIWebAudioPlayer_BARK_CENTER_FREQUENCIES).length).fill(0);
|
|
88
|
+
}
|
|
89
|
+
/**
|
|
90
|
+
* * Subscribes to a player event and returns `this` for chaining.
|
|
91
|
+
*
|
|
92
|
+
* @param type One of `'play'`, `'stop'`, `'fft'`, or `'error'`.
|
|
93
|
+
* @param fn Handler invoked with the event’s typed `detail` payload.
|
|
94
|
+
* @param opts Optional `AddEventListenerOptions` (e.g. `{ once: true }`).
|
|
95
|
+
*
|
|
96
|
+
* @example
|
|
97
|
+
* ```ts
|
|
98
|
+
* const player = new EVIWebAudioPlayer();
|
|
99
|
+
* player
|
|
100
|
+
* .on('play', e => console.log('play', e.detail.id))
|
|
101
|
+
* .on('stop', e => console.log('stop', e.detail.id))
|
|
102
|
+
* .on('fft', e => console.log('stop', e.detail.fft))
|
|
103
|
+
* .on('error', e => console.error('error', e.detail.message));
|
|
104
|
+
* ```
|
|
105
|
+
*/
|
|
106
|
+
on(type, fn, opts) {
|
|
107
|
+
super.addEventListener(type, fn, opts);
|
|
108
|
+
return this;
|
|
109
|
+
}
|
|
110
|
+
/**
|
|
111
|
+
* Set up and start the player’s Web-Audio pipeline.
|
|
112
|
+
*
|
|
113
|
+
* - Creates a **suspended** `AudioContext`, loads the worklet processor, wires `AudioWorkletNode → (AnalyserNode?) → GainNode → destination`, then calls `resume()`.
|
|
114
|
+
* - Must be awaited inside a user-gesture (click/tap/key); later calls are no-ops.
|
|
115
|
+
* - If `fft.enabled` is `false` (or `fft` is omitted), no `AnalyserNode` or polling timer is created.
|
|
116
|
+
*
|
|
117
|
+
* **Safari quirk:** Safari locks an `AudioContext` to the device’s current sample rate at creation.
|
|
118
|
+
* If you open a Bluetooth headset mic afterward, the OS may switch to the 16 kHz HFP profile and down-sample playback, which sounds “telephone-y.”
|
|
119
|
+
* To avoid this, call `getUserMedia()` (or otherwise open audio input) **before** `init()`.
|
|
120
|
+
*
|
|
121
|
+
* @throws {Error} If the browser lacks `AudioWorklet` support, or if `AudioContext.resume()` is rejected (autoplay policy, device error).
|
|
122
|
+
*/
|
|
123
|
+
init() {
|
|
124
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
125
|
+
if (__classPrivateFieldGet(this, _EVIWebAudioPlayer_initialized, "f"))
|
|
126
|
+
return;
|
|
127
|
+
// Create the AudioContext
|
|
128
|
+
__classPrivateFieldSet(this, _EVIWebAudioPlayer_ctx, new AudioContext(), "f");
|
|
129
|
+
// Fail fast if AudioWorklet isn’t supported
|
|
130
|
+
if (!__classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").audioWorklet) {
|
|
131
|
+
const msg = "AudioWorklet is not supported in this browser";
|
|
132
|
+
__classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_emitError).call(this, msg);
|
|
133
|
+
throw new Error(msg);
|
|
134
|
+
}
|
|
135
|
+
try {
|
|
136
|
+
// Build GainNode
|
|
137
|
+
__classPrivateFieldSet(this, _EVIWebAudioPlayer_gainNode, __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").createGain(), "f");
|
|
138
|
+
__classPrivateFieldGet(this, _EVIWebAudioPlayer_gainNode, "f").gain.value = __classPrivateFieldGet(this, _EVIWebAudioPlayer_volume, "f");
|
|
139
|
+
// Build AnalyserNode (optional)
|
|
140
|
+
if (__classPrivateFieldGet(this, _EVIWebAudioPlayer_fftOptions, "f")) {
|
|
141
|
+
__classPrivateFieldSet(this, _EVIWebAudioPlayer_analyserNode, __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").createAnalyser(), "f");
|
|
142
|
+
__classPrivateFieldGet(this, _EVIWebAudioPlayer_analyserNode, "f").fftSize = __classPrivateFieldGet(this, _EVIWebAudioPlayer_fftOptions, "f").size;
|
|
143
|
+
}
|
|
144
|
+
// Loads the AudioWorklet processor module.
|
|
145
|
+
yield __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").audioWorklet.addModule(__classPrivateFieldGet(_a, _a, "f", _EVIWebAudioPlayer_DEFAULT_WORKLET_URL));
|
|
146
|
+
// Build AudioWorkletNode
|
|
147
|
+
__classPrivateFieldSet(this, _EVIWebAudioPlayer_workletNode, new AudioWorkletNode(__classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f"), "audio-processor"), "f");
|
|
148
|
+
// When the worklet posts { type: "ended" }, mark playback stopped and emit a `'stop'` event.
|
|
149
|
+
__classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f").port.onmessage = (e) => {
|
|
150
|
+
if (e.data.type === "ended") {
|
|
151
|
+
__classPrivateFieldSet(this, _EVIWebAudioPlayer_playing, false, "f");
|
|
152
|
+
this.dispatchEvent(new CustomEvent("stop", { detail: { id: "stream" } }));
|
|
153
|
+
}
|
|
154
|
+
};
|
|
155
|
+
// Audio graph nodes
|
|
156
|
+
const workletNode = __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f"); // AudioWorkletNode (PCM processor)
|
|
157
|
+
const analyserNode = __classPrivateFieldGet(this, _EVIWebAudioPlayer_analyserNode, "f"); // Optional AnalyserNode (FFT)
|
|
158
|
+
const gainNode = __classPrivateFieldGet(this, _EVIWebAudioPlayer_gainNode, "f"); // GainNode (volume control)
|
|
159
|
+
const destination = __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").destination; // AudioDestinationNode (speakers)
|
|
160
|
+
// Analyser node is filtered out of audio graph if null (FFT disabled)
|
|
161
|
+
const audioGraph = [workletNode, analyserNode, gainNode, destination].filter(Boolean);
|
|
162
|
+
// Wire nodes: AudioWorkletNode → (AnalyserNode?) → GainNode → AudioDestinationNode
|
|
163
|
+
audioGraph.reduce((prev, next) => (prev.connect(next), next));
|
|
164
|
+
// If an analyser is configured, begin polling it at the resolved interval and dispatching `'fft'` events for each frame.
|
|
165
|
+
__classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_startAnalyserPollingIfEnabled).call(this);
|
|
166
|
+
// Resume the AudioContext now that the audio graph is fully wired.
|
|
167
|
+
// Browsers allow `resume()` only inside a user-gesture callback.
|
|
168
|
+
// Any rejection (autoplay policy, hardware issue, etc.) is caught by the outer catch-block below, which emits an 'error' event and re-throws.
|
|
169
|
+
yield __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").resume();
|
|
170
|
+
__classPrivateFieldSet(this, _EVIWebAudioPlayer_initialized, true, "f");
|
|
171
|
+
}
|
|
172
|
+
catch (err) {
|
|
173
|
+
const suffix = err instanceof Error ? `: ${err.message}` : String(err);
|
|
174
|
+
__classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_emitError).call(this, `Failed to initialize audio player${suffix}`);
|
|
175
|
+
throw err;
|
|
176
|
+
}
|
|
177
|
+
});
|
|
178
|
+
}
|
|
179
|
+
/**
|
|
180
|
+
* Queue one {@link AudioOutput} message for playback.
|
|
181
|
+
*
|
|
182
|
+
* Decodes the base-64 PCM data, sends it to the `AudioWorkletNode` for glitch-free, in-order playback, and emits `'play'` for the first chunk of a new stream.
|
|
183
|
+
*
|
|
184
|
+
* @param message The `AudioOutput` message received from EVI’s WebSocket.
|
|
185
|
+
*
|
|
186
|
+
* @see {@link https://dev.hume.ai/reference/empathic-voice-interface-evi/chat/chat#receive.Audio-Output.type API Reference}
|
|
187
|
+
*/
|
|
188
|
+
enqueue(message) {
|
|
189
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
190
|
+
if (!__classPrivateFieldGet(this, _EVIWebAudioPlayer_initialized, "f") || !__classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f")) {
|
|
191
|
+
__classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_emitError).call(this, "Audio player is not initialized");
|
|
192
|
+
return;
|
|
193
|
+
}
|
|
194
|
+
try {
|
|
195
|
+
const { data, id } = message;
|
|
196
|
+
const blob = (0, convertBase64ToBlob_1.convertBase64ToBlob)(data);
|
|
197
|
+
const buffer = yield blob.arrayBuffer();
|
|
198
|
+
const audio = yield __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").decodeAudioData(buffer);
|
|
199
|
+
const pcmData = audio.getChannelData(0);
|
|
200
|
+
__classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f").port.postMessage({ type: "audio", data: pcmData });
|
|
201
|
+
__classPrivateFieldSet(this, _EVIWebAudioPlayer_playing, true, "f");
|
|
202
|
+
this.dispatchEvent(new CustomEvent("play", { detail: { id } }));
|
|
203
|
+
}
|
|
204
|
+
catch (err) {
|
|
205
|
+
const msg = err instanceof Error ? err.message : "Unknown error";
|
|
206
|
+
__classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_emitError).call(this, `Failed to queue clip: ${msg}`);
|
|
207
|
+
}
|
|
208
|
+
});
|
|
209
|
+
}
|
|
210
|
+
/**
|
|
211
|
+
* Flush the worklet queue and output silence.
|
|
212
|
+
*/
|
|
213
|
+
stop() {
|
|
214
|
+
var _b;
|
|
215
|
+
// Clear buffered audio from the worklet queue
|
|
216
|
+
(_b = __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f")) === null || _b === void 0 ? void 0 : _b.port.postMessage({ type: "fadeAndClear" });
|
|
217
|
+
// Restart analyser polling so fft events continue after stopping or clearing the queue
|
|
218
|
+
__classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_startAnalyserPollingIfEnabled).call(this);
|
|
219
|
+
__classPrivateFieldSet(this, _EVIWebAudioPlayer_playing, false, "f");
|
|
220
|
+
this.dispatchEvent(new CustomEvent("stop", { detail: { id: "manual" } }));
|
|
221
|
+
}
|
|
222
|
+
/**
|
|
223
|
+
* Set the master gain ({@link volume}) to a value between `0` (_silent_) and `1` (_full volume_).
|
|
224
|
+
*
|
|
225
|
+
* - Clamps out-of-range values.
|
|
226
|
+
* - If called before {@link init}, stores volume for when `AudioContext` is created.
|
|
227
|
+
* - If currently {@link muted}, updates stored volume but keeps output silent until {@link unmute}.
|
|
228
|
+
*
|
|
229
|
+
* @param volume Desired gain; clamped to [0, 1].
|
|
230
|
+
*/
|
|
231
|
+
setVolume(volume) {
|
|
232
|
+
const clampedVolume = Math.max(0, Math.min(volume, 1));
|
|
233
|
+
__classPrivateFieldSet(this, _EVIWebAudioPlayer_volume, clampedVolume, "f");
|
|
234
|
+
if (__classPrivateFieldGet(this, _EVIWebAudioPlayer_gainNode, "f") && __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f") && !__classPrivateFieldGet(this, _EVIWebAudioPlayer_muted, "f")) {
|
|
235
|
+
__classPrivateFieldGet(this, _EVIWebAudioPlayer_gainNode, "f").gain.setValueAtTime(clampedVolume, __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").currentTime);
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
/**
|
|
239
|
+
* Mute output instantly by setting the gain to 0. Retains the last volume internally for later restore.
|
|
240
|
+
*/
|
|
241
|
+
mute() {
|
|
242
|
+
if (!__classPrivateFieldGet(this, _EVIWebAudioPlayer_gainNode, "f") || !__classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f"))
|
|
243
|
+
return;
|
|
244
|
+
__classPrivateFieldGet(this, _EVIWebAudioPlayer_gainNode, "f").gain.setValueAtTime(0, __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").currentTime);
|
|
245
|
+
__classPrivateFieldSet(this, _EVIWebAudioPlayer_muted, true, "f");
|
|
246
|
+
}
|
|
247
|
+
/**
|
|
248
|
+
* Restore output gain to the last set volume (via setVolume).
|
|
249
|
+
*/
|
|
250
|
+
unmute() {
|
|
251
|
+
if (!__classPrivateFieldGet(this, _EVIWebAudioPlayer_gainNode, "f") || !__classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f"))
|
|
252
|
+
return;
|
|
253
|
+
__classPrivateFieldGet(this, _EVIWebAudioPlayer_gainNode, "f").gain.setValueAtTime(__classPrivateFieldGet(this, _EVIWebAudioPlayer_volume, "f"), __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").currentTime);
|
|
254
|
+
__classPrivateFieldSet(this, _EVIWebAudioPlayer_muted, false, "f");
|
|
255
|
+
}
|
|
256
|
+
/**
|
|
257
|
+
* Tear down all Web-Audio resources (worklet, analyser, gain, context) and reset state so {@link init} can be called again.
|
|
258
|
+
*/
|
|
259
|
+
dispose() {
|
|
260
|
+
var _b, _c, _d, _e, _f, _g, _h;
|
|
261
|
+
if (__classPrivateFieldGet(this, _EVIWebAudioPlayer_fftTimer, "f") != null) {
|
|
262
|
+
clearInterval(__classPrivateFieldGet(this, _EVIWebAudioPlayer_fftTimer, "f"));
|
|
263
|
+
__classPrivateFieldSet(this, _EVIWebAudioPlayer_fftTimer, null, "f");
|
|
264
|
+
}
|
|
265
|
+
(_b = __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f")) === null || _b === void 0 ? void 0 : _b.port.postMessage({ type: "fadeAndClear" });
|
|
266
|
+
(_c = __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f")) === null || _c === void 0 ? void 0 : _c.port.postMessage({ type: "end" });
|
|
267
|
+
(_d = __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f")) === null || _d === void 0 ? void 0 : _d.port.close();
|
|
268
|
+
(_e = __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f")) === null || _e === void 0 ? void 0 : _e.disconnect();
|
|
269
|
+
(_f = __classPrivateFieldGet(this, _EVIWebAudioPlayer_analyserNode, "f")) === null || _f === void 0 ? void 0 : _f.disconnect();
|
|
270
|
+
(_g = __classPrivateFieldGet(this, _EVIWebAudioPlayer_gainNode, "f")) === null || _g === void 0 ? void 0 : _g.disconnect();
|
|
271
|
+
(_h = __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f")) === null || _h === void 0 ? void 0 : _h.close().catch(() => void 0);
|
|
272
|
+
__classPrivateFieldSet(this, _EVIWebAudioPlayer_initialized, false, "f");
|
|
273
|
+
__classPrivateFieldSet(this, _EVIWebAudioPlayer_playing, false, "f");
|
|
274
|
+
__classPrivateFieldSet(this, _EVIWebAudioPlayer_fft, _a.emptyFft(), "f");
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
exports.EVIWebAudioPlayer = EVIWebAudioPlayer;
|
|
278
|
+
_a = EVIWebAudioPlayer, _EVIWebAudioPlayer_ctx = new WeakMap(), _EVIWebAudioPlayer_workletNode = new WeakMap(), _EVIWebAudioPlayer_analyserNode = new WeakMap(), _EVIWebAudioPlayer_gainNode = new WeakMap(), _EVIWebAudioPlayer_initialized = new WeakMap(), _EVIWebAudioPlayer_playing = new WeakMap(), _EVIWebAudioPlayer_muted = new WeakMap(), _EVIWebAudioPlayer_volume = new WeakMap(), _EVIWebAudioPlayer_fft = new WeakMap(), _EVIWebAudioPlayer_fftTimer = new WeakMap(), _EVIWebAudioPlayer_fftOptions = new WeakMap(), _EVIWebAudioPlayer_instances = new WeakSet(), _EVIWebAudioPlayer_linearHzToBark = function _EVIWebAudioPlayer_linearHzToBark(linearData, sampleRate) {
|
|
279
|
+
const maxFrequency = sampleRate / 2;
|
|
280
|
+
const frequencyResolution = maxFrequency / linearData.length;
|
|
281
|
+
return __classPrivateFieldGet(_a, _a, "f", _EVIWebAudioPlayer_BARK_CENTER_FREQUENCIES).map((barkFreq) => {
|
|
282
|
+
var _b;
|
|
283
|
+
const linearDataIndex = Math.round(barkFreq / frequencyResolution);
|
|
284
|
+
const magnitude = (_b = linearData[linearDataIndex]) !== null && _b !== void 0 ? _b : 0;
|
|
285
|
+
return (magnitude / __classPrivateFieldGet(_a, _a, "f", _EVIWebAudioPlayer_BYTE_MAX)) * 2;
|
|
286
|
+
});
|
|
287
|
+
}, _EVIWebAudioPlayer_startAnalyserPollingIfEnabled = function _EVIWebAudioPlayer_startAnalyserPollingIfEnabled() {
|
|
288
|
+
if (!__classPrivateFieldGet(this, _EVIWebAudioPlayer_fftOptions, "f") || !__classPrivateFieldGet(this, _EVIWebAudioPlayer_analyserNode, "f"))
|
|
289
|
+
return;
|
|
290
|
+
if (__classPrivateFieldGet(this, _EVIWebAudioPlayer_fftTimer, "f"))
|
|
291
|
+
clearInterval(__classPrivateFieldGet(this, _EVIWebAudioPlayer_fftTimer, "f"));
|
|
292
|
+
const { interval, transform } = __classPrivateFieldGet(this, _EVIWebAudioPlayer_fftOptions, "f");
|
|
293
|
+
__classPrivateFieldSet(this, _EVIWebAudioPlayer_fftTimer, window.setInterval(() => {
|
|
294
|
+
const bins = new Uint8Array(__classPrivateFieldGet(this, _EVIWebAudioPlayer_analyserNode, "f").frequencyBinCount);
|
|
295
|
+
__classPrivateFieldGet(this, _EVIWebAudioPlayer_analyserNode, "f").getByteFrequencyData(bins);
|
|
296
|
+
__classPrivateFieldSet(this, _EVIWebAudioPlayer_fft, transform(bins, __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").sampleRate), "f");
|
|
297
|
+
this.dispatchEvent(new CustomEvent("fft", { detail: { fft: __classPrivateFieldGet(this, _EVIWebAudioPlayer_fft, "f") } }));
|
|
298
|
+
}, interval), "f");
|
|
299
|
+
}, _EVIWebAudioPlayer_emitError = function _EVIWebAudioPlayer_emitError(message) {
|
|
300
|
+
this.dispatchEvent(new CustomEvent("error", { detail: { message } }));
|
|
301
|
+
};
|
|
302
|
+
/** Default URL of the `audio-worklet.js` processor module, fetched from Hume AI’s CDN. */
|
|
303
|
+
_EVIWebAudioPlayer_DEFAULT_WORKLET_URL = { value: "https://storage.googleapis.com/evi-react-sdk-assets/audio-worklet-20250506.js" };
|
|
304
|
+
/** Default FFT size (power-of-two). */
|
|
305
|
+
_EVIWebAudioPlayer_DEFAULT_FFT_SIZE = { value: 2048 };
|
|
306
|
+
/** Default analyser poll interval (16 ms). */
|
|
307
|
+
_EVIWebAudioPlayer_DEFAULT_FFT_INTERVAL = { value: 16 };
|
|
308
|
+
/** Bark‑scale center frequencies (hz) used by the default transform. https://en.wikipedia.org/wiki/Bark_scale */
|
|
309
|
+
_EVIWebAudioPlayer_BARK_CENTER_FREQUENCIES = { value: [
|
|
310
|
+
50, 150, 250, 350, 450, 570, 700, 840, 1000, 1170, 1370, 1600, 1850, 2150, 2500, 2900, 3400, 4000, 4800, 5800,
|
|
311
|
+
7000, 8500, 10500, 13500,
|
|
312
|
+
] };
|
|
313
|
+
/** Max byte magnitude (255) returned by `AnalyserNode.getByteFrequencyData`. */
|
|
314
|
+
_EVIWebAudioPlayer_BYTE_MAX = { value: 255 };
|
|
@@ -2,8 +2,6 @@
|
|
|
2
2
|
* Fetches a new access token from the Hume API using the provided API key and Secret key.
|
|
3
3
|
*
|
|
4
4
|
* @param args - The arguments for the request.
|
|
5
|
-
* @returns Promise that resolves to the new access token or null.
|
|
6
|
-
* @throws If the base64 encoding fails.
|
|
7
5
|
* @example
|
|
8
6
|
* ```typescript
|
|
9
7
|
* async function getToken() {
|
|
@@ -20,4 +18,4 @@ export declare const fetchAccessToken: ({ apiKey, secretKey, host, }: {
|
|
|
20
18
|
apiKey: string;
|
|
21
19
|
secretKey: string;
|
|
22
20
|
host?: string;
|
|
23
|
-
}) => Promise<string
|
|
21
|
+
}) => Promise<string>;
|