hume 0.13.3 → 0.13.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.d.ts +1 -5
- package/api/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
- package/api/resources/empathicVoice/types/AssistantMessage.d.ts +1 -1
- package/api/resources/empathicVoice/types/ChatMetadata.d.ts +2 -2
- package/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
- package/api/resources/empathicVoice/types/SessionSettings.d.ts +9 -7
- package/api/resources/empathicVoice/types/ToolCallMessage.d.ts +1 -1
- package/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +2 -2
- package/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +3 -3
- package/api/resources/empathicVoice/types/UserMessage.d.ts +3 -3
- package/api/resources/tts/types/PublishTts.d.ts +23 -0
- package/api/resources/tts/types/PublishTts.js +5 -0
- package/api/resources/tts/types/SnippetAudioChunk.d.ts +6 -1
- package/api/resources/tts/types/index.d.ts +7 -6
- package/api/resources/tts/types/index.js +7 -6
- package/dist/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.d.ts +1 -5
- package/dist/api/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
- package/dist/api/resources/empathicVoice/types/AssistantMessage.d.ts +1 -1
- package/dist/api/resources/empathicVoice/types/ChatMetadata.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
- package/dist/api/resources/empathicVoice/types/SessionSettings.d.ts +9 -7
- package/dist/api/resources/empathicVoice/types/ToolCallMessage.d.ts +1 -1
- package/dist/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +3 -3
- package/dist/api/resources/empathicVoice/types/UserMessage.d.ts +3 -3
- package/dist/api/resources/tts/types/PublishTts.d.ts +23 -0
- package/dist/api/resources/tts/types/PublishTts.js +5 -0
- package/dist/api/resources/tts/types/SnippetAudioChunk.d.ts +6 -1
- package/dist/api/resources/tts/types/index.d.ts +7 -6
- package/dist/api/resources/tts/types/index.js +7 -6
- package/dist/serialization/resources/empathicVoice/types/SessionSettings.d.ts +1 -0
- package/dist/serialization/resources/empathicVoice/types/SessionSettings.js +1 -0
- package/dist/serialization/resources/tts/types/PublishTts.d.ts +19 -0
- package/dist/serialization/resources/tts/types/PublishTts.js +50 -0
- package/dist/serialization/resources/tts/types/SnippetAudioChunk.d.ts +2 -1
- package/dist/serialization/resources/tts/types/SnippetAudioChunk.js +2 -1
- package/dist/serialization/resources/tts/types/index.d.ts +7 -6
- package/dist/serialization/resources/tts/types/index.js +7 -6
- package/dist/version.d.ts +1 -1
- package/dist/version.js +1 -1
- package/dist/wrapper/EVIWebAudioPlayer.d.ts +6 -7
- package/dist/wrapper/EVIWebAudioPlayer.js +237 -73
- package/dist/wrapper/SilenceFiller.d.ts +85 -0
- package/dist/wrapper/SilenceFiller.js +203 -0
- package/dist/wrapper/collate.d.ts +36 -0
- package/dist/wrapper/collate.js +126 -0
- package/dist/wrapper/convertFrequencyScale.d.ts +1 -0
- package/dist/wrapper/convertFrequencyScale.js +28 -0
- package/dist/wrapper/generateEmptyFft.d.ts +1 -0
- package/dist/wrapper/generateEmptyFft.js +6 -0
- package/dist/wrapper/index.d.ts +2 -0
- package/dist/wrapper/index.js +5 -1
- package/package.json +2 -1
- package/serialization/resources/empathicVoice/types/SessionSettings.d.ts +1 -0
- package/serialization/resources/empathicVoice/types/SessionSettings.js +1 -0
- package/serialization/resources/tts/types/PublishTts.d.ts +19 -0
- package/serialization/resources/tts/types/PublishTts.js +50 -0
- package/serialization/resources/tts/types/SnippetAudioChunk.d.ts +2 -1
- package/serialization/resources/tts/types/SnippetAudioChunk.js +2 -1
- package/serialization/resources/tts/types/index.d.ts +7 -6
- package/serialization/resources/tts/types/index.js +7 -6
- package/version.d.ts +1 -1
- package/version.js +1 -1
- package/wrapper/EVIWebAudioPlayer.d.ts +6 -7
- package/wrapper/EVIWebAudioPlayer.js +237 -73
- package/wrapper/SilenceFiller.d.ts +85 -0
- package/wrapper/SilenceFiller.js +203 -0
- package/wrapper/collate.d.ts +36 -0
- package/wrapper/collate.js +126 -0
- package/wrapper/convertFrequencyScale.d.ts +1 -0
- package/wrapper/convertFrequencyScale.js +28 -0
- package/wrapper/generateEmptyFft.d.ts +1 -0
- package/wrapper/generateEmptyFft.js +6 -0
- package/wrapper/index.d.ts +2 -0
- package/wrapper/index.js +5 -1
- package/.mock/definition/api.yml +0 -12
- package/.mock/definition/empathic-voice/__package__.yml +0 -2973
- package/.mock/definition/empathic-voice/chat.yml +0 -175
- package/.mock/definition/empathic-voice/chatGroups.yml +0 -627
- package/.mock/definition/empathic-voice/chatWebhooks.yml +0 -30
- package/.mock/definition/empathic-voice/chats.yml +0 -506
- package/.mock/definition/empathic-voice/configs.yml +0 -852
- package/.mock/definition/empathic-voice/prompts.yml +0 -558
- package/.mock/definition/empathic-voice/tools.yml +0 -626
- package/.mock/definition/expression-measurement/__package__.yml +0 -1
- package/.mock/definition/expression-measurement/batch/__package__.yml +0 -1803
- package/.mock/definition/expression-measurement/stream/__package__.yml +0 -113
- package/.mock/definition/expression-measurement/stream/stream.yml +0 -438
- package/.mock/definition/tts/__package__.yml +0 -660
- package/.mock/definition/tts/voices.yml +0 -143
- package/.mock/fern.config.json +0 -4
|
@@ -1,7 +1,13 @@
|
|
|
1
|
+
export * from "./PublishTts";
|
|
2
|
+
export * from "./PostedUtteranceVoiceWithId";
|
|
3
|
+
export * from "./PostedUtteranceVoiceWithName";
|
|
4
|
+
export * from "./VoiceProvider";
|
|
5
|
+
export * from "./PostedUtteranceVoice";
|
|
6
|
+
export * from "./AudioFormatType";
|
|
7
|
+
export * from "./SnippetAudioChunk";
|
|
1
8
|
export * from "./PostedContextWithGenerationId";
|
|
2
9
|
export * from "./PostedContextWithUtterances";
|
|
3
10
|
export * from "./AudioEncoding";
|
|
4
|
-
export * from "./AudioFormatType";
|
|
5
11
|
export * from "./ReturnGeneration";
|
|
6
12
|
export * from "./HttpValidationError";
|
|
7
13
|
export * from "./FormatMp3";
|
|
@@ -12,14 +18,9 @@ export * from "./ReturnTts";
|
|
|
12
18
|
export * from "./ReturnVoice";
|
|
13
19
|
export * from "./FormatPcm";
|
|
14
20
|
export * from "./Snippet";
|
|
15
|
-
export * from "./SnippetAudioChunk";
|
|
16
21
|
export * from "./PostedUtterance";
|
|
17
22
|
export * from "./ValidationErrorLocItem";
|
|
18
23
|
export * from "./ValidationError";
|
|
19
|
-
export * from "./PostedUtteranceVoiceWithId";
|
|
20
|
-
export * from "./PostedUtteranceVoiceWithName";
|
|
21
|
-
export * from "./VoiceProvider";
|
|
22
|
-
export * from "./PostedUtteranceVoice";
|
|
23
24
|
export * from "./FormatWav";
|
|
24
25
|
export * from "./ErrorResponse";
|
|
25
26
|
export * from "./ReturnPagedVoices";
|
|
@@ -14,10 +14,16 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
|
14
14
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
15
|
};
|
|
16
16
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
+
__exportStar(require("./PublishTts"), exports);
|
|
18
|
+
__exportStar(require("./PostedUtteranceVoiceWithId"), exports);
|
|
19
|
+
__exportStar(require("./PostedUtteranceVoiceWithName"), exports);
|
|
20
|
+
__exportStar(require("./VoiceProvider"), exports);
|
|
21
|
+
__exportStar(require("./PostedUtteranceVoice"), exports);
|
|
22
|
+
__exportStar(require("./AudioFormatType"), exports);
|
|
23
|
+
__exportStar(require("./SnippetAudioChunk"), exports);
|
|
17
24
|
__exportStar(require("./PostedContextWithGenerationId"), exports);
|
|
18
25
|
__exportStar(require("./PostedContextWithUtterances"), exports);
|
|
19
26
|
__exportStar(require("./AudioEncoding"), exports);
|
|
20
|
-
__exportStar(require("./AudioFormatType"), exports);
|
|
21
27
|
__exportStar(require("./ReturnGeneration"), exports);
|
|
22
28
|
__exportStar(require("./HttpValidationError"), exports);
|
|
23
29
|
__exportStar(require("./FormatMp3"), exports);
|
|
@@ -28,14 +34,9 @@ __exportStar(require("./ReturnTts"), exports);
|
|
|
28
34
|
__exportStar(require("./ReturnVoice"), exports);
|
|
29
35
|
__exportStar(require("./FormatPcm"), exports);
|
|
30
36
|
__exportStar(require("./Snippet"), exports);
|
|
31
|
-
__exportStar(require("./SnippetAudioChunk"), exports);
|
|
32
37
|
__exportStar(require("./PostedUtterance"), exports);
|
|
33
38
|
__exportStar(require("./ValidationErrorLocItem"), exports);
|
|
34
39
|
__exportStar(require("./ValidationError"), exports);
|
|
35
|
-
__exportStar(require("./PostedUtteranceVoiceWithId"), exports);
|
|
36
|
-
__exportStar(require("./PostedUtteranceVoiceWithName"), exports);
|
|
37
|
-
__exportStar(require("./VoiceProvider"), exports);
|
|
38
|
-
__exportStar(require("./PostedUtteranceVoice"), exports);
|
|
39
40
|
__exportStar(require("./FormatWav"), exports);
|
|
40
41
|
__exportStar(require("./ErrorResponse"), exports);
|
|
41
42
|
__exportStar(require("./ReturnPagedVoices"), exports);
|
|
@@ -54,4 +54,5 @@ exports.SessionSettings = core.serialization.object({
|
|
|
54
54
|
builtinTools: core.serialization.property("builtin_tools", core.serialization.list(BuiltinToolConfig_1.BuiltinToolConfig).optional()),
|
|
55
55
|
metadata: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(),
|
|
56
56
|
variables: core.serialization.record(core.serialization.string(), SessionSettingsVariablesValue_1.SessionSettingsVariablesValue).optional(),
|
|
57
|
+
voiceId: core.serialization.property("voice_id", core.serialization.string().optional()),
|
|
57
58
|
});
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* This file was auto-generated by Fern from our API Definition.
|
|
3
|
+
*/
|
|
4
|
+
import * as serializers from "../../../index";
|
|
5
|
+
import * as Hume from "../../../../api/index";
|
|
6
|
+
import * as core from "../../../../core";
|
|
7
|
+
import { PostedUtteranceVoice } from "./PostedUtteranceVoice";
|
|
8
|
+
export declare const PublishTts: core.serialization.ObjectSchema<serializers.tts.PublishTts.Raw, Hume.tts.PublishTts>;
|
|
9
|
+
export declare namespace PublishTts {
|
|
10
|
+
interface Raw {
|
|
11
|
+
text?: string | null;
|
|
12
|
+
description?: string | null;
|
|
13
|
+
voice?: PostedUtteranceVoice.Raw | null;
|
|
14
|
+
speed?: number | null;
|
|
15
|
+
trailing_silence?: number | null;
|
|
16
|
+
flush?: boolean | null;
|
|
17
|
+
close?: boolean | null;
|
|
18
|
+
}
|
|
19
|
+
}
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* This file was auto-generated by Fern from our API Definition.
|
|
4
|
+
*/
|
|
5
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
6
|
+
if (k2 === undefined) k2 = k;
|
|
7
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
8
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
9
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
10
|
+
}
|
|
11
|
+
Object.defineProperty(o, k2, desc);
|
|
12
|
+
}) : (function(o, m, k, k2) {
|
|
13
|
+
if (k2 === undefined) k2 = k;
|
|
14
|
+
o[k2] = m[k];
|
|
15
|
+
}));
|
|
16
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
17
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
18
|
+
}) : function(o, v) {
|
|
19
|
+
o["default"] = v;
|
|
20
|
+
});
|
|
21
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
22
|
+
var ownKeys = function(o) {
|
|
23
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
24
|
+
var ar = [];
|
|
25
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
26
|
+
return ar;
|
|
27
|
+
};
|
|
28
|
+
return ownKeys(o);
|
|
29
|
+
};
|
|
30
|
+
return function (mod) {
|
|
31
|
+
if (mod && mod.__esModule) return mod;
|
|
32
|
+
var result = {};
|
|
33
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
34
|
+
__setModuleDefault(result, mod);
|
|
35
|
+
return result;
|
|
36
|
+
};
|
|
37
|
+
})();
|
|
38
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
39
|
+
exports.PublishTts = void 0;
|
|
40
|
+
const core = __importStar(require("../../../../core"));
|
|
41
|
+
const PostedUtteranceVoice_1 = require("./PostedUtteranceVoice");
|
|
42
|
+
exports.PublishTts = core.serialization.object({
|
|
43
|
+
text: core.serialization.string().optional(),
|
|
44
|
+
description: core.serialization.string().optional(),
|
|
45
|
+
voice: PostedUtteranceVoice_1.PostedUtteranceVoice.optional(),
|
|
46
|
+
speed: core.serialization.number().optional(),
|
|
47
|
+
trailingSilence: core.serialization.property("trailing_silence", core.serialization.number().optional()),
|
|
48
|
+
flush: core.serialization.boolean().optional(),
|
|
49
|
+
close: core.serialization.boolean().optional(),
|
|
50
|
+
});
|
|
@@ -9,6 +9,7 @@ import { Snippet } from "./Snippet";
|
|
|
9
9
|
export declare const SnippetAudioChunk: core.serialization.ObjectSchema<serializers.tts.SnippetAudioChunk.Raw, Hume.tts.SnippetAudioChunk>;
|
|
10
10
|
export declare namespace SnippetAudioChunk {
|
|
11
11
|
interface Raw {
|
|
12
|
+
request_id: string;
|
|
12
13
|
generation_id: string;
|
|
13
14
|
snippet_id: string;
|
|
14
15
|
text: string;
|
|
@@ -18,6 +19,6 @@ export declare namespace SnippetAudioChunk {
|
|
|
18
19
|
audio_format: AudioFormatType.Raw;
|
|
19
20
|
is_last_chunk: boolean;
|
|
20
21
|
utterance_index?: number | null;
|
|
21
|
-
snippet
|
|
22
|
+
snippet?: Snippet.Raw | null;
|
|
22
23
|
}
|
|
23
24
|
}
|
|
@@ -41,6 +41,7 @@ const core = __importStar(require("../../../../core"));
|
|
|
41
41
|
const AudioFormatType_1 = require("./AudioFormatType");
|
|
42
42
|
const Snippet_1 = require("./Snippet");
|
|
43
43
|
exports.SnippetAudioChunk = core.serialization.object({
|
|
44
|
+
requestId: core.serialization.property("request_id", core.serialization.string()),
|
|
44
45
|
generationId: core.serialization.property("generation_id", core.serialization.string()),
|
|
45
46
|
snippetId: core.serialization.property("snippet_id", core.serialization.string()),
|
|
46
47
|
text: core.serialization.string(),
|
|
@@ -50,5 +51,5 @@ exports.SnippetAudioChunk = core.serialization.object({
|
|
|
50
51
|
audioFormat: core.serialization.property("audio_format", AudioFormatType_1.AudioFormatType),
|
|
51
52
|
isLastChunk: core.serialization.property("is_last_chunk", core.serialization.boolean()),
|
|
52
53
|
utteranceIndex: core.serialization.property("utterance_index", core.serialization.number().optional()),
|
|
53
|
-
snippet: Snippet_1.Snippet,
|
|
54
|
+
snippet: Snippet_1.Snippet.optional(),
|
|
54
55
|
});
|
|
@@ -1,7 +1,13 @@
|
|
|
1
|
+
export * from "./PublishTts";
|
|
2
|
+
export * from "./PostedUtteranceVoiceWithId";
|
|
3
|
+
export * from "./PostedUtteranceVoiceWithName";
|
|
4
|
+
export * from "./VoiceProvider";
|
|
5
|
+
export * from "./PostedUtteranceVoice";
|
|
6
|
+
export * from "./AudioFormatType";
|
|
7
|
+
export * from "./SnippetAudioChunk";
|
|
1
8
|
export * from "./PostedContextWithGenerationId";
|
|
2
9
|
export * from "./PostedContextWithUtterances";
|
|
3
10
|
export * from "./AudioEncoding";
|
|
4
|
-
export * from "./AudioFormatType";
|
|
5
11
|
export * from "./ReturnGeneration";
|
|
6
12
|
export * from "./HttpValidationError";
|
|
7
13
|
export * from "./FormatMp3";
|
|
@@ -12,14 +18,9 @@ export * from "./ReturnTts";
|
|
|
12
18
|
export * from "./ReturnVoice";
|
|
13
19
|
export * from "./FormatPcm";
|
|
14
20
|
export * from "./Snippet";
|
|
15
|
-
export * from "./SnippetAudioChunk";
|
|
16
21
|
export * from "./PostedUtterance";
|
|
17
22
|
export * from "./ValidationErrorLocItem";
|
|
18
23
|
export * from "./ValidationError";
|
|
19
|
-
export * from "./PostedUtteranceVoiceWithId";
|
|
20
|
-
export * from "./PostedUtteranceVoiceWithName";
|
|
21
|
-
export * from "./VoiceProvider";
|
|
22
|
-
export * from "./PostedUtteranceVoice";
|
|
23
24
|
export * from "./FormatWav";
|
|
24
25
|
export * from "./ErrorResponse";
|
|
25
26
|
export * from "./ReturnPagedVoices";
|
|
@@ -14,10 +14,16 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
|
14
14
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
15
|
};
|
|
16
16
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
+
__exportStar(require("./PublishTts"), exports);
|
|
18
|
+
__exportStar(require("./PostedUtteranceVoiceWithId"), exports);
|
|
19
|
+
__exportStar(require("./PostedUtteranceVoiceWithName"), exports);
|
|
20
|
+
__exportStar(require("./VoiceProvider"), exports);
|
|
21
|
+
__exportStar(require("./PostedUtteranceVoice"), exports);
|
|
22
|
+
__exportStar(require("./AudioFormatType"), exports);
|
|
23
|
+
__exportStar(require("./SnippetAudioChunk"), exports);
|
|
17
24
|
__exportStar(require("./PostedContextWithGenerationId"), exports);
|
|
18
25
|
__exportStar(require("./PostedContextWithUtterances"), exports);
|
|
19
26
|
__exportStar(require("./AudioEncoding"), exports);
|
|
20
|
-
__exportStar(require("./AudioFormatType"), exports);
|
|
21
27
|
__exportStar(require("./ReturnGeneration"), exports);
|
|
22
28
|
__exportStar(require("./HttpValidationError"), exports);
|
|
23
29
|
__exportStar(require("./FormatMp3"), exports);
|
|
@@ -28,14 +34,9 @@ __exportStar(require("./ReturnTts"), exports);
|
|
|
28
34
|
__exportStar(require("./ReturnVoice"), exports);
|
|
29
35
|
__exportStar(require("./FormatPcm"), exports);
|
|
30
36
|
__exportStar(require("./Snippet"), exports);
|
|
31
|
-
__exportStar(require("./SnippetAudioChunk"), exports);
|
|
32
37
|
__exportStar(require("./PostedUtterance"), exports);
|
|
33
38
|
__exportStar(require("./ValidationErrorLocItem"), exports);
|
|
34
39
|
__exportStar(require("./ValidationError"), exports);
|
|
35
|
-
__exportStar(require("./PostedUtteranceVoiceWithId"), exports);
|
|
36
|
-
__exportStar(require("./PostedUtteranceVoiceWithName"), exports);
|
|
37
|
-
__exportStar(require("./VoiceProvider"), exports);
|
|
38
|
-
__exportStar(require("./PostedUtteranceVoice"), exports);
|
|
39
40
|
__exportStar(require("./FormatWav"), exports);
|
|
40
41
|
__exportStar(require("./ErrorResponse"), exports);
|
|
41
42
|
__exportStar(require("./ReturnPagedVoices"), exports);
|
package/dist/version.d.ts
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
export declare const SDK_VERSION = "0.13.
|
|
1
|
+
export declare const SDK_VERSION = "0.13.5";
|
package/dist/version.js
CHANGED
|
@@ -12,6 +12,12 @@ export interface EVIWebAudioPlayerOptions {
|
|
|
12
12
|
* @default 1
|
|
13
13
|
*/
|
|
14
14
|
volume?: number;
|
|
15
|
+
/**
|
|
16
|
+
* Disable AudioWorklet Mode and use Regular Buffer Mode instead.
|
|
17
|
+
* Regular Buffer Mode falls back to AudioBufferSourceNode in the main thread if worklets aren't available.
|
|
18
|
+
* @default false (AudioWorklet Mode enabled)
|
|
19
|
+
*/
|
|
20
|
+
disableAudioWorklet?: boolean;
|
|
15
21
|
/**
|
|
16
22
|
* Real-time FFT (frequency-domain) settings **only** for visualization.
|
|
17
23
|
*
|
|
@@ -95,13 +101,6 @@ export declare class EVIWebAudioPlayer extends EventTarget {
|
|
|
95
101
|
/** Most recent FFT frame (empty when analyser disabled). */
|
|
96
102
|
get fft(): number[];
|
|
97
103
|
constructor(opts?: EVIWebAudioPlayerOptions);
|
|
98
|
-
/**
|
|
99
|
-
* Generate an empty FFT frame array.
|
|
100
|
-
* Useful as an initial or placeholder FFT dataset before any real analysis.
|
|
101
|
-
*
|
|
102
|
-
* @returns A number[] filled with zeros, length equal to the Bark band count (24).
|
|
103
|
-
*/
|
|
104
|
-
static emptyFft(): number[];
|
|
105
104
|
/**
|
|
106
105
|
* * Subscribes to a player event and returns `this` for chaining.
|
|
107
106
|
*
|