hume 0.13.6 → 0.13.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.mock/definition/empathic-voice/__package__.yml +669 -657
- package/.mock/definition/empathic-voice/chat.yml +27 -27
- package/.mock/definition/empathic-voice/chatWebhooks.yml +2 -2
- package/.mock/definition/tts/__package__.yml +93 -88
- package/api/resources/empathicVoice/types/AssistantEnd.d.ts +2 -2
- package/api/resources/empathicVoice/types/AssistantInput.d.ts +2 -2
- package/api/resources/empathicVoice/types/AssistantMessage.d.ts +8 -8
- package/api/resources/empathicVoice/types/AssistantProsody.d.ts +6 -6
- package/api/resources/empathicVoice/types/AudioConfiguration.d.ts +2 -2
- package/api/resources/empathicVoice/types/AudioInput.d.ts +6 -6
- package/api/resources/empathicVoice/types/AudioOutput.d.ts +4 -4
- package/api/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
- package/api/resources/empathicVoice/types/ChatMessage.d.ts +2 -2
- package/api/resources/empathicVoice/types/ChatMetadata.d.ts +8 -8
- package/api/resources/empathicVoice/types/Context.d.ts +6 -6
- package/api/resources/empathicVoice/types/LanguageModelType.d.ts +7 -1
- package/api/resources/empathicVoice/types/LanguageModelType.js +6 -0
- package/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +2 -2
- package/api/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +2 -2
- package/api/resources/empathicVoice/types/ReturnConfig.d.ts +2 -2
- package/api/resources/empathicVoice/types/SessionSettings.d.ts +27 -27
- package/api/resources/empathicVoice/types/Tool.d.ts +6 -6
- package/api/resources/empathicVoice/types/ToolCallMessage.d.ts +6 -6
- package/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +16 -16
- package/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +8 -8
- package/api/resources/empathicVoice/types/UserInput.d.ts +2 -2
- package/api/resources/empathicVoice/types/UserInterruption.d.ts +4 -4
- package/api/resources/empathicVoice/types/UserMessage.d.ts +12 -12
- package/api/resources/empathicVoice/types/WebSocketError.d.ts +10 -10
- package/api/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +8 -8
- package/api/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +6 -6
- package/api/resources/empathicVoice/types/index.d.ts +16 -16
- package/api/resources/empathicVoice/types/index.js +16 -16
- package/api/resources/index.d.ts +1 -1
- package/api/resources/index.js +2 -2
- package/api/resources/tts/client/Client.d.ts +6 -6
- package/api/resources/tts/client/Client.js +35 -35
- package/api/resources/tts/types/OctaveVersion.d.ts +4 -0
- package/api/resources/tts/types/OctaveVersion.js +5 -0
- package/api/resources/tts/types/PostedTts.d.ts +9 -8
- package/api/resources/tts/types/PostedUtterance.d.ts +6 -6
- package/api/resources/tts/types/ReturnGeneration.d.ts +5 -5
- package/api/resources/tts/types/ReturnTts.d.ts +1 -1
- package/api/resources/tts/types/Snippet.d.ts +6 -6
- package/api/resources/tts/types/SnippetAudioChunk.d.ts +12 -11
- package/api/resources/tts/types/index.d.ts +1 -0
- package/api/resources/tts/types/index.js +1 -0
- package/dist/api/resources/empathicVoice/types/AssistantEnd.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/AssistantInput.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/AssistantMessage.d.ts +8 -8
- package/dist/api/resources/empathicVoice/types/AssistantProsody.d.ts +6 -6
- package/dist/api/resources/empathicVoice/types/AudioConfiguration.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/AudioInput.d.ts +6 -6
- package/dist/api/resources/empathicVoice/types/AudioOutput.d.ts +4 -4
- package/dist/api/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
- package/dist/api/resources/empathicVoice/types/ChatMessage.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/ChatMetadata.d.ts +8 -8
- package/dist/api/resources/empathicVoice/types/Context.d.ts +6 -6
- package/dist/api/resources/empathicVoice/types/LanguageModelType.d.ts +7 -1
- package/dist/api/resources/empathicVoice/types/LanguageModelType.js +6 -0
- package/dist/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/ReturnConfig.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/SessionSettings.d.ts +27 -27
- package/dist/api/resources/empathicVoice/types/Tool.d.ts +6 -6
- package/dist/api/resources/empathicVoice/types/ToolCallMessage.d.ts +6 -6
- package/dist/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +16 -16
- package/dist/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +8 -8
- package/dist/api/resources/empathicVoice/types/UserInput.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/UserInterruption.d.ts +4 -4
- package/dist/api/resources/empathicVoice/types/UserMessage.d.ts +12 -12
- package/dist/api/resources/empathicVoice/types/WebSocketError.d.ts +10 -10
- package/dist/api/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +8 -8
- package/dist/api/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +6 -6
- package/dist/api/resources/empathicVoice/types/index.d.ts +16 -16
- package/dist/api/resources/empathicVoice/types/index.js +16 -16
- package/dist/api/resources/index.d.ts +1 -1
- package/dist/api/resources/index.js +2 -2
- package/dist/api/resources/tts/client/Client.d.ts +6 -6
- package/dist/api/resources/tts/client/Client.js +35 -35
- package/dist/api/resources/tts/types/OctaveVersion.d.ts +4 -0
- package/dist/api/resources/tts/types/OctaveVersion.js +5 -0
- package/dist/api/resources/tts/types/PostedTts.d.ts +9 -8
- package/dist/api/resources/tts/types/PostedUtterance.d.ts +6 -6
- package/dist/api/resources/tts/types/ReturnGeneration.d.ts +5 -5
- package/dist/api/resources/tts/types/ReturnTts.d.ts +1 -1
- package/dist/api/resources/tts/types/Snippet.d.ts +6 -6
- package/dist/api/resources/tts/types/SnippetAudioChunk.d.ts +12 -11
- package/dist/api/resources/tts/types/index.d.ts +1 -0
- package/dist/api/resources/tts/types/index.js +1 -0
- package/dist/serialization/resources/empathicVoice/types/AssistantEnd.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/AssistantEnd.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/AssistantInput.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/AssistantMessage.d.ts +2 -2
- package/dist/serialization/resources/empathicVoice/types/AssistantMessage.js +2 -2
- package/dist/serialization/resources/empathicVoice/types/AssistantProsody.d.ts +2 -2
- package/dist/serialization/resources/empathicVoice/types/AssistantProsody.js +2 -2
- package/dist/serialization/resources/empathicVoice/types/AudioConfiguration.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/AudioConfiguration.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/AudioInput.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/AudioInput.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/AudioOutput.d.ts +2 -2
- package/dist/serialization/resources/empathicVoice/types/AudioOutput.js +2 -2
- package/dist/serialization/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/BuiltinToolConfig.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/ChatMessage.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/ChatMessage.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/ChatMetadata.d.ts +2 -2
- package/dist/serialization/resources/empathicVoice/types/ChatMetadata.js +2 -2
- package/dist/serialization/resources/empathicVoice/types/Context.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/Context.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/LanguageModelType.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/LanguageModelType.js +6 -0
- package/dist/serialization/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/PauseAssistantMessage.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/ResumeAssistantMessage.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/ReturnConfig.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/SessionSettings.d.ts +8 -8
- package/dist/serialization/resources/empathicVoice/types/SessionSettings.js +8 -8
- package/dist/serialization/resources/empathicVoice/types/Tool.d.ts +3 -3
- package/dist/serialization/resources/empathicVoice/types/Tool.js +3 -3
- package/dist/serialization/resources/empathicVoice/types/ToolCallMessage.d.ts +3 -3
- package/dist/serialization/resources/empathicVoice/types/ToolCallMessage.js +3 -3
- package/dist/serialization/resources/empathicVoice/types/ToolErrorMessage.d.ts +6 -6
- package/dist/serialization/resources/empathicVoice/types/ToolErrorMessage.js +6 -6
- package/dist/serialization/resources/empathicVoice/types/ToolResponseMessage.d.ts +2 -2
- package/dist/serialization/resources/empathicVoice/types/ToolResponseMessage.js +2 -2
- package/dist/serialization/resources/empathicVoice/types/UserInput.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/UserInput.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/UserInterruption.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/UserInterruption.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/UserMessage.d.ts +3 -3
- package/dist/serialization/resources/empathicVoice/types/UserMessage.js +3 -3
- package/dist/serialization/resources/empathicVoice/types/WebSocketError.d.ts +3 -3
- package/dist/serialization/resources/empathicVoice/types/WebSocketError.js +3 -3
- package/dist/serialization/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +4 -4
- package/dist/serialization/resources/empathicVoice/types/WebhookEventChatEnded.js +4 -4
- package/dist/serialization/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +3 -3
- package/dist/serialization/resources/empathicVoice/types/WebhookEventChatStarted.js +3 -3
- package/dist/serialization/resources/empathicVoice/types/index.d.ts +16 -16
- package/dist/serialization/resources/empathicVoice/types/index.js +16 -16
- package/dist/serialization/resources/index.d.ts +1 -1
- package/dist/serialization/resources/index.js +2 -2
- package/dist/serialization/resources/tts/types/OctaveVersion.d.ts +10 -0
- package/dist/serialization/resources/tts/types/OctaveVersion.js +41 -0
- package/dist/serialization/resources/tts/types/PostedTts.d.ts +5 -3
- package/dist/serialization/resources/tts/types/PostedTts.js +5 -3
- package/dist/serialization/resources/tts/types/PostedUtterance.d.ts +2 -2
- package/dist/serialization/resources/tts/types/PostedUtterance.js +2 -2
- package/dist/serialization/resources/tts/types/ReturnGeneration.d.ts +3 -3
- package/dist/serialization/resources/tts/types/ReturnGeneration.js +3 -3
- package/dist/serialization/resources/tts/types/ReturnTts.d.ts +1 -1
- package/dist/serialization/resources/tts/types/ReturnTts.js +1 -1
- package/dist/serialization/resources/tts/types/Snippet.d.ts +3 -3
- package/dist/serialization/resources/tts/types/Snippet.js +3 -3
- package/dist/serialization/resources/tts/types/SnippetAudioChunk.d.ts +7 -6
- package/dist/serialization/resources/tts/types/SnippetAudioChunk.js +7 -6
- package/dist/serialization/resources/tts/types/index.d.ts +1 -0
- package/dist/serialization/resources/tts/types/index.js +1 -0
- package/dist/version.d.ts +1 -1
- package/dist/version.js +1 -1
- package/dist/wrapper/index.d.ts +1 -1
- package/dist/wrapper/index.js +53 -3
- package/package.json +1 -1
- package/reference.md +706 -706
- package/serialization/resources/empathicVoice/types/AssistantEnd.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/AssistantEnd.js +1 -1
- package/serialization/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/AssistantInput.js +1 -1
- package/serialization/resources/empathicVoice/types/AssistantMessage.d.ts +2 -2
- package/serialization/resources/empathicVoice/types/AssistantMessage.js +2 -2
- package/serialization/resources/empathicVoice/types/AssistantProsody.d.ts +2 -2
- package/serialization/resources/empathicVoice/types/AssistantProsody.js +2 -2
- package/serialization/resources/empathicVoice/types/AudioConfiguration.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/AudioConfiguration.js +1 -1
- package/serialization/resources/empathicVoice/types/AudioInput.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/AudioInput.js +1 -1
- package/serialization/resources/empathicVoice/types/AudioOutput.d.ts +2 -2
- package/serialization/resources/empathicVoice/types/AudioOutput.js +2 -2
- package/serialization/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/BuiltinToolConfig.js +1 -1
- package/serialization/resources/empathicVoice/types/ChatMessage.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/ChatMessage.js +1 -1
- package/serialization/resources/empathicVoice/types/ChatMetadata.d.ts +2 -2
- package/serialization/resources/empathicVoice/types/ChatMetadata.js +2 -2
- package/serialization/resources/empathicVoice/types/Context.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/Context.js +1 -1
- package/serialization/resources/empathicVoice/types/LanguageModelType.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/LanguageModelType.js +6 -0
- package/serialization/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/PauseAssistantMessage.js +1 -1
- package/serialization/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/ResumeAssistantMessage.js +1 -1
- package/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/ReturnConfig.js +1 -1
- package/serialization/resources/empathicVoice/types/SessionSettings.d.ts +8 -8
- package/serialization/resources/empathicVoice/types/SessionSettings.js +8 -8
- package/serialization/resources/empathicVoice/types/Tool.d.ts +3 -3
- package/serialization/resources/empathicVoice/types/Tool.js +3 -3
- package/serialization/resources/empathicVoice/types/ToolCallMessage.d.ts +3 -3
- package/serialization/resources/empathicVoice/types/ToolCallMessage.js +3 -3
- package/serialization/resources/empathicVoice/types/ToolErrorMessage.d.ts +6 -6
- package/serialization/resources/empathicVoice/types/ToolErrorMessage.js +6 -6
- package/serialization/resources/empathicVoice/types/ToolResponseMessage.d.ts +2 -2
- package/serialization/resources/empathicVoice/types/ToolResponseMessage.js +2 -2
- package/serialization/resources/empathicVoice/types/UserInput.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/UserInput.js +1 -1
- package/serialization/resources/empathicVoice/types/UserInterruption.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/UserInterruption.js +1 -1
- package/serialization/resources/empathicVoice/types/UserMessage.d.ts +3 -3
- package/serialization/resources/empathicVoice/types/UserMessage.js +3 -3
- package/serialization/resources/empathicVoice/types/WebSocketError.d.ts +3 -3
- package/serialization/resources/empathicVoice/types/WebSocketError.js +3 -3
- package/serialization/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +4 -4
- package/serialization/resources/empathicVoice/types/WebhookEventChatEnded.js +4 -4
- package/serialization/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +3 -3
- package/serialization/resources/empathicVoice/types/WebhookEventChatStarted.js +3 -3
- package/serialization/resources/empathicVoice/types/index.d.ts +16 -16
- package/serialization/resources/empathicVoice/types/index.js +16 -16
- package/serialization/resources/index.d.ts +1 -1
- package/serialization/resources/index.js +2 -2
- package/serialization/resources/tts/types/OctaveVersion.d.ts +10 -0
- package/serialization/resources/tts/types/OctaveVersion.js +41 -0
- package/serialization/resources/tts/types/PostedTts.d.ts +5 -3
- package/serialization/resources/tts/types/PostedTts.js +5 -3
- package/serialization/resources/tts/types/PostedUtterance.d.ts +2 -2
- package/serialization/resources/tts/types/PostedUtterance.js +2 -2
- package/serialization/resources/tts/types/ReturnGeneration.d.ts +3 -3
- package/serialization/resources/tts/types/ReturnGeneration.js +3 -3
- package/serialization/resources/tts/types/ReturnTts.d.ts +1 -1
- package/serialization/resources/tts/types/ReturnTts.js +1 -1
- package/serialization/resources/tts/types/Snippet.d.ts +3 -3
- package/serialization/resources/tts/types/Snippet.js +3 -3
- package/serialization/resources/tts/types/SnippetAudioChunk.d.ts +7 -6
- package/serialization/resources/tts/types/SnippetAudioChunk.js +7 -6
- package/serialization/resources/tts/types/index.d.ts +1 -0
- package/serialization/resources/tts/types/index.js +1 -0
- package/version.d.ts +1 -1
- package/version.js +1 -1
- package/wrapper/index.d.ts +1 -1
- package/wrapper/index.js +53 -3
|
@@ -70,6 +70,12 @@ export declare class Tts {
|
|
|
70
70
|
*/
|
|
71
71
|
synthesizeFile(request: Hume.tts.PostedTts, requestOptions?: Tts.RequestOptions): core.HttpResponsePromise<stream.Readable>;
|
|
72
72
|
private __synthesizeFile;
|
|
73
|
+
/**
|
|
74
|
+
* Streams synthesized speech using the specified voice. If no voice is provided, a novel voice will be generated dynamically. Optionally, additional context can be included to influence the speech's style and prosody.
|
|
75
|
+
* @throws {@link Hume.tts.UnprocessableEntityError}
|
|
76
|
+
*/
|
|
77
|
+
synthesizeFileStreaming(request: Hume.tts.PostedTts, requestOptions?: Tts.RequestOptions): core.HttpResponsePromise<stream.Readable>;
|
|
78
|
+
private __synthesizeFileStreaming;
|
|
73
79
|
/**
|
|
74
80
|
* Streams synthesized speech using the specified voice. If no voice is provided, a novel voice will be generated dynamically. Optionally, additional context can be included to influence the speech's style and prosody.
|
|
75
81
|
*
|
|
@@ -77,12 +83,6 @@ export declare class Tts {
|
|
|
77
83
|
*/
|
|
78
84
|
synthesizeJsonStreaming(request: Hume.tts.PostedTts, requestOptions?: Tts.RequestOptions): core.HttpResponsePromise<core.Stream<Hume.tts.SnippetAudioChunk>>;
|
|
79
85
|
private __synthesizeJsonStreaming;
|
|
80
|
-
/**
|
|
81
|
-
* Streams synthesized speech using the specified voice. If no voice is provided, a novel voice will be generated dynamically. Optionally, additional context can be included to influence the speech's style and prosody.
|
|
82
|
-
* @throws {@link Hume.tts.UnprocessableEntityError}
|
|
83
|
-
*/
|
|
84
|
-
synthesizeFileStreaming(request: Hume.tts.PostedTts, requestOptions?: Tts.RequestOptions): core.HttpResponsePromise<stream.Readable>;
|
|
85
|
-
private __synthesizeFileStreaming;
|
|
86
86
|
protected _getCustomAuthorizationHeaders(): Promise<{
|
|
87
87
|
"X-Hume-Api-Key": string | undefined;
|
|
88
88
|
}>;
|
|
@@ -218,47 +218,28 @@ class Tts {
|
|
|
218
218
|
}
|
|
219
219
|
/**
|
|
220
220
|
* Streams synthesized speech using the specified voice. If no voice is provided, a novel voice will be generated dynamically. Optionally, additional context can be included to influence the speech's style and prosody.
|
|
221
|
-
*
|
|
222
|
-
* The response is a stream of JSON objects including audio encoded in base64.
|
|
221
|
+
* @throws {@link Hume.tts.UnprocessableEntityError}
|
|
223
222
|
*/
|
|
224
|
-
|
|
225
|
-
return core.HttpResponsePromise.fromPromise(this.
|
|
223
|
+
synthesizeFileStreaming(request, requestOptions) {
|
|
224
|
+
return core.HttpResponsePromise.fromPromise(this.__synthesizeFileStreaming(request, requestOptions));
|
|
226
225
|
}
|
|
227
|
-
|
|
226
|
+
__synthesizeFileStreaming(request, requestOptions) {
|
|
228
227
|
return __awaiter(this, void 0, void 0, function* () {
|
|
229
228
|
var _a, _b, _c, _d;
|
|
230
229
|
const _response = yield ((_a = this._options.fetcher) !== null && _a !== void 0 ? _a : core.fetcher)({
|
|
231
|
-
url: (0, url_join_1.default)((_c = (_b = (yield core.Supplier.get(this._options.baseUrl))) !== null && _b !== void 0 ? _b : (yield core.Supplier.get(this._options.environment))) !== null && _c !== void 0 ? _c : environments.HumeEnvironment.Production, "v0/tts/stream/
|
|
230
|
+
url: (0, url_join_1.default)((_c = (_b = (yield core.Supplier.get(this._options.baseUrl))) !== null && _b !== void 0 ? _b : (yield core.Supplier.get(this._options.environment))) !== null && _c !== void 0 ? _c : environments.HumeEnvironment.Production, "v0/tts/stream/file"),
|
|
232
231
|
method: "POST",
|
|
233
232
|
headers: (0, headers_js_1.mergeHeaders)((_d = this._options) === null || _d === void 0 ? void 0 : _d.headers, (0, headers_js_1.mergeOnlyDefinedHeaders)(Object.assign({}, (yield this._getCustomAuthorizationHeaders()))), requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.headers),
|
|
234
233
|
contentType: "application/json",
|
|
235
234
|
requestType: "json",
|
|
236
235
|
body: serializers.tts.PostedTts.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }),
|
|
237
|
-
responseType: "
|
|
236
|
+
responseType: "streaming",
|
|
238
237
|
timeoutMs: (requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.timeoutInSeconds) != null ? requestOptions.timeoutInSeconds * 1000 : 60000,
|
|
239
238
|
maxRetries: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.maxRetries,
|
|
240
239
|
abortSignal: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.abortSignal,
|
|
241
240
|
});
|
|
242
241
|
if (_response.ok) {
|
|
243
|
-
return {
|
|
244
|
-
data: new core.Stream({
|
|
245
|
-
stream: _response.body,
|
|
246
|
-
parse: (data) => __awaiter(this, void 0, void 0, function* () {
|
|
247
|
-
return serializers.tts.SnippetAudioChunk.parseOrThrow(data, {
|
|
248
|
-
unrecognizedObjectKeys: "passthrough",
|
|
249
|
-
allowUnrecognizedUnionMembers: true,
|
|
250
|
-
allowUnrecognizedEnumValues: true,
|
|
251
|
-
breadcrumbsPrefix: ["response"],
|
|
252
|
-
});
|
|
253
|
-
}),
|
|
254
|
-
signal: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.abortSignal,
|
|
255
|
-
eventShape: {
|
|
256
|
-
type: "json",
|
|
257
|
-
messageTerminator: "\n",
|
|
258
|
-
},
|
|
259
|
-
}),
|
|
260
|
-
rawResponse: _response.rawResponse,
|
|
261
|
-
};
|
|
242
|
+
return { data: _response.body, rawResponse: _response.rawResponse };
|
|
262
243
|
}
|
|
263
244
|
if (_response.error.reason === "status-code") {
|
|
264
245
|
switch (_response.error.statusCode) {
|
|
@@ -285,7 +266,7 @@ class Tts {
|
|
|
285
266
|
rawResponse: _response.rawResponse,
|
|
286
267
|
});
|
|
287
268
|
case "timeout":
|
|
288
|
-
throw new errors.HumeTimeoutError("Timeout exceeded when calling POST /v0/tts/stream/
|
|
269
|
+
throw new errors.HumeTimeoutError("Timeout exceeded when calling POST /v0/tts/stream/file.");
|
|
289
270
|
case "unknown":
|
|
290
271
|
throw new errors.HumeError({
|
|
291
272
|
message: _response.error.errorMessage,
|
|
@@ -296,28 +277,47 @@ class Tts {
|
|
|
296
277
|
}
|
|
297
278
|
/**
|
|
298
279
|
* Streams synthesized speech using the specified voice. If no voice is provided, a novel voice will be generated dynamically. Optionally, additional context can be included to influence the speech's style and prosody.
|
|
299
|
-
*
|
|
280
|
+
*
|
|
281
|
+
* The response is a stream of JSON objects including audio encoded in base64.
|
|
300
282
|
*/
|
|
301
|
-
|
|
302
|
-
return core.HttpResponsePromise.fromPromise(this.
|
|
283
|
+
synthesizeJsonStreaming(request, requestOptions) {
|
|
284
|
+
return core.HttpResponsePromise.fromPromise(this.__synthesizeJsonStreaming(request, requestOptions));
|
|
303
285
|
}
|
|
304
|
-
|
|
286
|
+
__synthesizeJsonStreaming(request, requestOptions) {
|
|
305
287
|
return __awaiter(this, void 0, void 0, function* () {
|
|
306
288
|
var _a, _b, _c, _d;
|
|
307
289
|
const _response = yield ((_a = this._options.fetcher) !== null && _a !== void 0 ? _a : core.fetcher)({
|
|
308
|
-
url: (0, url_join_1.default)((_c = (_b = (yield core.Supplier.get(this._options.baseUrl))) !== null && _b !== void 0 ? _b : (yield core.Supplier.get(this._options.environment))) !== null && _c !== void 0 ? _c : environments.HumeEnvironment.Production, "v0/tts/stream/
|
|
290
|
+
url: (0, url_join_1.default)((_c = (_b = (yield core.Supplier.get(this._options.baseUrl))) !== null && _b !== void 0 ? _b : (yield core.Supplier.get(this._options.environment))) !== null && _c !== void 0 ? _c : environments.HumeEnvironment.Production, "v0/tts/stream/json"),
|
|
309
291
|
method: "POST",
|
|
310
292
|
headers: (0, headers_js_1.mergeHeaders)((_d = this._options) === null || _d === void 0 ? void 0 : _d.headers, (0, headers_js_1.mergeOnlyDefinedHeaders)(Object.assign({}, (yield this._getCustomAuthorizationHeaders()))), requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.headers),
|
|
311
293
|
contentType: "application/json",
|
|
312
294
|
requestType: "json",
|
|
313
295
|
body: serializers.tts.PostedTts.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }),
|
|
314
|
-
responseType: "
|
|
296
|
+
responseType: "sse",
|
|
315
297
|
timeoutMs: (requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.timeoutInSeconds) != null ? requestOptions.timeoutInSeconds * 1000 : 60000,
|
|
316
298
|
maxRetries: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.maxRetries,
|
|
317
299
|
abortSignal: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.abortSignal,
|
|
318
300
|
});
|
|
319
301
|
if (_response.ok) {
|
|
320
|
-
return {
|
|
302
|
+
return {
|
|
303
|
+
data: new core.Stream({
|
|
304
|
+
stream: _response.body,
|
|
305
|
+
parse: (data) => __awaiter(this, void 0, void 0, function* () {
|
|
306
|
+
return serializers.tts.SnippetAudioChunk.parseOrThrow(data, {
|
|
307
|
+
unrecognizedObjectKeys: "passthrough",
|
|
308
|
+
allowUnrecognizedUnionMembers: true,
|
|
309
|
+
allowUnrecognizedEnumValues: true,
|
|
310
|
+
breadcrumbsPrefix: ["response"],
|
|
311
|
+
});
|
|
312
|
+
}),
|
|
313
|
+
signal: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.abortSignal,
|
|
314
|
+
eventShape: {
|
|
315
|
+
type: "json",
|
|
316
|
+
messageTerminator: "\n",
|
|
317
|
+
},
|
|
318
|
+
}),
|
|
319
|
+
rawResponse: _response.rawResponse,
|
|
320
|
+
};
|
|
321
321
|
}
|
|
322
322
|
if (_response.error.reason === "status-code") {
|
|
323
323
|
switch (_response.error.statusCode) {
|
|
@@ -344,7 +344,7 @@ class Tts {
|
|
|
344
344
|
rawResponse: _response.rawResponse,
|
|
345
345
|
});
|
|
346
346
|
case "timeout":
|
|
347
|
-
throw new errors.HumeTimeoutError("Timeout exceeded when calling POST /v0/tts/stream/
|
|
347
|
+
throw new errors.HumeTimeoutError("Timeout exceeded when calling POST /v0/tts/stream/json.");
|
|
348
348
|
case "unknown":
|
|
349
349
|
throw new errors.HumeError({
|
|
350
350
|
message: _response.error.errorMessage,
|
|
@@ -5,16 +5,10 @@ import * as Hume from "../../../index";
|
|
|
5
5
|
export interface PostedTts {
|
|
6
6
|
/** Utterances to use as context for generating consistent speech style and prosody across multiple requests. These will not be converted to speech output. */
|
|
7
7
|
context?: Hume.tts.PostedContext;
|
|
8
|
-
/**
|
|
9
|
-
* A list of **Utterances** to be converted to speech output.
|
|
10
|
-
*
|
|
11
|
-
* An **Utterance** is a unit of input for [Octave](/docs/text-to-speech-tts/overview), and includes input `text`, an optional `description` to serve as the prompt for how the speech should be delivered, an optional `voice` specification, and additional controls to guide delivery for `speed` and `trailing_silence`.
|
|
12
|
-
*/
|
|
13
|
-
utterances: Hume.tts.PostedUtterance[];
|
|
14
|
-
/** Number of generations of the audio to produce. */
|
|
15
|
-
numGenerations?: number;
|
|
16
8
|
/** Specifies the output audio file format. */
|
|
17
9
|
format?: Hume.tts.Format;
|
|
10
|
+
/** Number of generations of the audio to produce. */
|
|
11
|
+
numGenerations?: number;
|
|
18
12
|
/**
|
|
19
13
|
* Controls how audio output is segmented in the response.
|
|
20
14
|
*
|
|
@@ -27,6 +21,13 @@ export interface PostedTts {
|
|
|
27
21
|
splitUtterances?: boolean;
|
|
28
22
|
/** If enabled, the audio for all the chunks of a generation, once concatenated together, will constitute a single audio file. Otherwise, if disabled, each chunk's audio will be its own audio file, each with its own headers (if applicable). */
|
|
29
23
|
stripHeaders?: boolean;
|
|
24
|
+
/**
|
|
25
|
+
* A list of **Utterances** to be converted to speech output.
|
|
26
|
+
*
|
|
27
|
+
* An **Utterance** is a unit of input for [Octave](/docs/text-to-speech-tts/overview), and includes input `text`, an optional `description` to serve as the prompt for how the speech should be delivered, an optional `voice` specification, and additional controls to guide delivery for `speed` and `trailing_silence`.
|
|
28
|
+
*/
|
|
29
|
+
utterances: Hume.tts.PostedUtterance[];
|
|
30
|
+
version?: Hume.tts.OctaveVersion;
|
|
30
31
|
/**
|
|
31
32
|
* Enables ultra-low latency streaming, significantly reducing the time until the first audio chunk is received. Recommended for real-time applications requiring immediate audio playback. For further details, see our documentation on [instant mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode).
|
|
32
33
|
* - A [voice](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.utterances.voice) must be specified when instant mode is enabled. Dynamic voice generation is not supported with this mode.
|
|
@@ -3,8 +3,6 @@
|
|
|
3
3
|
*/
|
|
4
4
|
import * as Hume from "../../../index";
|
|
5
5
|
export interface PostedUtterance {
|
|
6
|
-
/** The input text to be synthesized into speech. */
|
|
7
|
-
text: string;
|
|
8
6
|
/**
|
|
9
7
|
* Natural language instructions describing how the synthesized speech should sound, including but not limited to tone, intonation, pacing, and accent.
|
|
10
8
|
*
|
|
@@ -13,14 +11,16 @@ export interface PostedUtterance {
|
|
|
13
11
|
* - **Voice not specified**: the description will serve as a voice prompt for generating a voice. See our [prompting guide](/docs/text-to-speech-tts/prompting) for design tips.
|
|
14
12
|
*/
|
|
15
13
|
description?: string;
|
|
14
|
+
/** Speed multiplier for the synthesized speech. Extreme values below 0.75 and above 1.5 may sometimes cause instability to the generated output. */
|
|
15
|
+
speed?: number;
|
|
16
|
+
/** The input text to be synthesized into speech. */
|
|
17
|
+
text: string;
|
|
18
|
+
/** Duration of trailing silence (in seconds) to add to this utterance */
|
|
19
|
+
trailingSilence?: number;
|
|
16
20
|
/**
|
|
17
21
|
* The `name` or `id` associated with a **Voice** from the **Voice Library** to be used as the speaker for this and all subsequent `utterances`, until the `voice` field is updated again.
|
|
18
22
|
*
|
|
19
23
|
* See our [voices guide](/docs/text-to-speech-tts/voices) for more details on generating and specifying **Voices**.
|
|
20
24
|
*/
|
|
21
25
|
voice?: Hume.tts.PostedUtteranceVoice;
|
|
22
|
-
/** Speed multiplier for the synthesized speech. Extreme values below 0.75 and above 1.5 may sometimes cause instability to the generated output. */
|
|
23
|
-
speed?: number;
|
|
24
|
-
/** Duration of trailing silence (in seconds) to add to this utterance */
|
|
25
|
-
trailingSilence?: number;
|
|
26
26
|
}
|
|
@@ -3,15 +3,15 @@
|
|
|
3
3
|
*/
|
|
4
4
|
import * as Hume from "../../../index";
|
|
5
5
|
export interface ReturnGeneration {
|
|
6
|
-
/**
|
|
7
|
-
|
|
6
|
+
/** The generated audio output in the requested format, encoded as a base64 string. */
|
|
7
|
+
audio: string;
|
|
8
8
|
/** Duration of the generated audio in seconds. */
|
|
9
9
|
duration: number;
|
|
10
|
+
encoding: Hume.tts.AudioEncoding;
|
|
10
11
|
/** Size of the generated audio in bytes. */
|
|
11
12
|
fileSize: number;
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
audio: string;
|
|
13
|
+
/** A unique ID associated with this TTS generation that can be used as context for generating consistent speech style and prosody across multiple requests. */
|
|
14
|
+
generationId: string;
|
|
15
15
|
/** A list of snippet groups where each group corresponds to an utterance in the request. Each group contains segmented snippets that represent the original utterance divided into more natural-sounding units optimized for speech delivery. */
|
|
16
16
|
snippets: Hume.tts.Snippet[][];
|
|
17
17
|
}
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
*/
|
|
4
4
|
import * as Hume from "../../../index";
|
|
5
5
|
export interface ReturnTts {
|
|
6
|
+
generations: Hume.tts.ReturnGeneration[];
|
|
6
7
|
/** A unique ID associated with this request for tracking and troubleshooting. Use this ID when contacting [support](/support) for troubleshooting assistance. */
|
|
7
8
|
requestId?: string;
|
|
8
|
-
generations: Hume.tts.ReturnGeneration[];
|
|
9
9
|
}
|
|
@@ -2,16 +2,16 @@
|
|
|
2
2
|
* This file was auto-generated by Fern from our API Definition.
|
|
3
3
|
*/
|
|
4
4
|
export interface Snippet {
|
|
5
|
+
/** The segmented audio output in the requested format, encoded as a base64 string. */
|
|
6
|
+
audio: string;
|
|
7
|
+
/** The generation ID this snippet corresponds to. */
|
|
8
|
+
generationId: string;
|
|
5
9
|
/** A unique ID associated with this **Snippet**. */
|
|
6
10
|
id: string;
|
|
7
11
|
/** The text for this **Snippet**. */
|
|
8
12
|
text: string;
|
|
9
|
-
/** The generation ID this snippet corresponds to. */
|
|
10
|
-
generationId: string;
|
|
11
|
-
/** The index of the utterance in the request this snippet corresponds to. */
|
|
12
|
-
utteranceIndex?: number;
|
|
13
13
|
/** The transcribed text of the generated audio. It is only present if `instant_mode` is set to `false`. */
|
|
14
14
|
transcribedText?: string;
|
|
15
|
-
/** The
|
|
16
|
-
|
|
15
|
+
/** The index of the utterance in the request this snippet corresponds to. */
|
|
16
|
+
utteranceIndex?: number;
|
|
17
17
|
}
|
|
@@ -6,25 +6,26 @@ import * as Hume from "../../../index";
|
|
|
6
6
|
* Metadata for a chunk of generated audio.
|
|
7
7
|
*/
|
|
8
8
|
export interface SnippetAudioChunk {
|
|
9
|
-
/**
|
|
10
|
-
|
|
9
|
+
/** The generated audio output chunk in the requested format. */
|
|
10
|
+
audio: string;
|
|
11
|
+
/** The generated audio output format. */
|
|
12
|
+
audioFormat: Hume.tts.AudioFormatType;
|
|
13
|
+
/** The index of the audio chunk in the snippet. */
|
|
14
|
+
chunkIndex: number;
|
|
11
15
|
/** The generation ID of the parent snippet that this chunk corresponds to. */
|
|
12
16
|
generationId: string;
|
|
17
|
+
/** Whether or not this is the last chunk streamed back from the decoder for one input snippet. */
|
|
18
|
+
isLastChunk: boolean;
|
|
19
|
+
/** ID of the initiating request. */
|
|
20
|
+
requestId: string;
|
|
21
|
+
snippet?: Hume.tts.Snippet;
|
|
13
22
|
/** The ID of the parent snippet that this chunk corresponds to. */
|
|
14
23
|
snippetId: string;
|
|
15
24
|
/** The text of the parent snippet that this chunk corresponds to. */
|
|
16
25
|
text: string;
|
|
17
26
|
/** The transcribed text of the generated audio of the parent snippet that this chunk corresponds to. It is only present if `instant_mode` is set to `false`. */
|
|
18
27
|
transcribedText?: string;
|
|
19
|
-
|
|
20
|
-
chunkIndex: number;
|
|
21
|
-
/** The generated audio output chunk in the requested format. */
|
|
22
|
-
audio: string;
|
|
23
|
-
/** The generated audio output format. */
|
|
24
|
-
audioFormat: Hume.tts.AudioFormatType;
|
|
25
|
-
/** Whether or not this is the last chunk streamed back from the decoder for one input snippet. */
|
|
26
|
-
isLastChunk: boolean;
|
|
28
|
+
type?: "audio";
|
|
27
29
|
/** The index of the utterance in the request that the parent snippet of this chunk corresponds to. */
|
|
28
30
|
utteranceIndex?: number;
|
|
29
|
-
snippet?: Hume.tts.Snippet;
|
|
30
31
|
}
|
|
@@ -31,6 +31,7 @@ __exportStar(require("./PostedContext"), exports);
|
|
|
31
31
|
__exportStar(require("./Format"), exports);
|
|
32
32
|
__exportStar(require("./PostedTts"), exports);
|
|
33
33
|
__exportStar(require("./ReturnTts"), exports);
|
|
34
|
+
__exportStar(require("./OctaveVersion"), exports);
|
|
34
35
|
__exportStar(require("./ReturnVoice"), exports);
|
|
35
36
|
__exportStar(require("./FormatPcm"), exports);
|
|
36
37
|
__exportStar(require("./Snippet"), exports);
|
|
@@ -7,7 +7,7 @@ import * as core from "../../../../core";
|
|
|
7
7
|
export declare const AssistantEnd: core.serialization.ObjectSchema<serializers.empathicVoice.AssistantEnd.Raw, Hume.empathicVoice.AssistantEnd>;
|
|
8
8
|
export declare namespace AssistantEnd {
|
|
9
9
|
interface Raw {
|
|
10
|
-
type: "assistant_end";
|
|
11
10
|
custom_session_id?: string | null;
|
|
11
|
+
type: "assistant_end";
|
|
12
12
|
}
|
|
13
13
|
}
|
|
@@ -39,6 +39,6 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
39
39
|
exports.AssistantEnd = void 0;
|
|
40
40
|
const core = __importStar(require("../../../../core"));
|
|
41
41
|
exports.AssistantEnd = core.serialization.object({
|
|
42
|
-
type: core.serialization.stringLiteral("assistant_end"),
|
|
43
42
|
customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()),
|
|
43
|
+
type: core.serialization.stringLiteral("assistant_end"),
|
|
44
44
|
});
|
|
@@ -7,8 +7,8 @@ import * as core from "../../../../core";
|
|
|
7
7
|
export declare const AssistantInput: core.serialization.ObjectSchema<serializers.empathicVoice.AssistantInput.Raw, Hume.empathicVoice.AssistantInput>;
|
|
8
8
|
export declare namespace AssistantInput {
|
|
9
9
|
interface Raw {
|
|
10
|
-
type: "assistant_input";
|
|
11
10
|
custom_session_id?: string | null;
|
|
12
11
|
text: string;
|
|
12
|
+
type: "assistant_input";
|
|
13
13
|
}
|
|
14
14
|
}
|
|
@@ -39,7 +39,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
39
39
|
exports.AssistantInput = void 0;
|
|
40
40
|
const core = __importStar(require("../../../../core"));
|
|
41
41
|
exports.AssistantInput = core.serialization.object({
|
|
42
|
-
type: core.serialization.stringLiteral("assistant_input"),
|
|
43
42
|
customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()),
|
|
44
43
|
text: core.serialization.string(),
|
|
44
|
+
type: core.serialization.stringLiteral("assistant_input"),
|
|
45
45
|
});
|
|
@@ -9,11 +9,11 @@ import { Inference } from "./Inference";
|
|
|
9
9
|
export declare const AssistantMessage: core.serialization.ObjectSchema<serializers.empathicVoice.AssistantMessage.Raw, Hume.empathicVoice.AssistantMessage>;
|
|
10
10
|
export declare namespace AssistantMessage {
|
|
11
11
|
interface Raw {
|
|
12
|
-
type: "assistant_message";
|
|
13
12
|
custom_session_id?: string | null;
|
|
13
|
+
from_text: boolean;
|
|
14
14
|
id?: string | null;
|
|
15
15
|
message: ChatMessage.Raw;
|
|
16
16
|
models: Inference.Raw;
|
|
17
|
-
|
|
17
|
+
type: "assistant_message";
|
|
18
18
|
}
|
|
19
19
|
}
|
|
@@ -41,10 +41,10 @@ const core = __importStar(require("../../../../core"));
|
|
|
41
41
|
const ChatMessage_1 = require("./ChatMessage");
|
|
42
42
|
const Inference_1 = require("./Inference");
|
|
43
43
|
exports.AssistantMessage = core.serialization.object({
|
|
44
|
-
type: core.serialization.stringLiteral("assistant_message"),
|
|
45
44
|
customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()),
|
|
45
|
+
fromText: core.serialization.property("from_text", core.serialization.boolean()),
|
|
46
46
|
id: core.serialization.string().optional(),
|
|
47
47
|
message: ChatMessage_1.ChatMessage,
|
|
48
48
|
models: Inference_1.Inference,
|
|
49
|
-
|
|
49
|
+
type: core.serialization.stringLiteral("assistant_message"),
|
|
50
50
|
});
|
|
@@ -8,9 +8,9 @@ import { Inference } from "./Inference";
|
|
|
8
8
|
export declare const AssistantProsody: core.serialization.ObjectSchema<serializers.empathicVoice.AssistantProsody.Raw, Hume.empathicVoice.AssistantProsody>;
|
|
9
9
|
export declare namespace AssistantProsody {
|
|
10
10
|
interface Raw {
|
|
11
|
-
type: "assistant_prosody";
|
|
12
11
|
custom_session_id?: string | null;
|
|
13
|
-
models: Inference.Raw;
|
|
14
12
|
id?: string | null;
|
|
13
|
+
models: Inference.Raw;
|
|
14
|
+
type: "assistant_prosody";
|
|
15
15
|
}
|
|
16
16
|
}
|
|
@@ -40,8 +40,8 @@ exports.AssistantProsody = void 0;
|
|
|
40
40
|
const core = __importStar(require("../../../../core"));
|
|
41
41
|
const Inference_1 = require("./Inference");
|
|
42
42
|
exports.AssistantProsody = core.serialization.object({
|
|
43
|
-
type: core.serialization.stringLiteral("assistant_prosody"),
|
|
44
43
|
customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()),
|
|
45
|
-
models: Inference_1.Inference,
|
|
46
44
|
id: core.serialization.string().optional(),
|
|
45
|
+
models: Inference_1.Inference,
|
|
46
|
+
type: core.serialization.stringLiteral("assistant_prosody"),
|
|
47
47
|
});
|
|
@@ -8,8 +8,8 @@ import { Encoding } from "./Encoding";
|
|
|
8
8
|
export declare const AudioConfiguration: core.serialization.ObjectSchema<serializers.empathicVoice.AudioConfiguration.Raw, Hume.empathicVoice.AudioConfiguration>;
|
|
9
9
|
export declare namespace AudioConfiguration {
|
|
10
10
|
interface Raw {
|
|
11
|
-
encoding: Encoding.Raw;
|
|
12
11
|
channels: number;
|
|
12
|
+
encoding: Encoding.Raw;
|
|
13
13
|
sample_rate: number;
|
|
14
14
|
}
|
|
15
15
|
}
|
|
@@ -40,7 +40,7 @@ exports.AudioConfiguration = void 0;
|
|
|
40
40
|
const core = __importStar(require("../../../../core"));
|
|
41
41
|
const Encoding_1 = require("./Encoding");
|
|
42
42
|
exports.AudioConfiguration = core.serialization.object({
|
|
43
|
-
encoding: Encoding_1.Encoding,
|
|
44
43
|
channels: core.serialization.number(),
|
|
44
|
+
encoding: Encoding_1.Encoding,
|
|
45
45
|
sampleRate: core.serialization.property("sample_rate", core.serialization.number()),
|
|
46
46
|
});
|
|
@@ -7,8 +7,8 @@ import * as core from "../../../../core";
|
|
|
7
7
|
export declare const AudioInput: core.serialization.ObjectSchema<serializers.empathicVoice.AudioInput.Raw, Hume.empathicVoice.AudioInput>;
|
|
8
8
|
export declare namespace AudioInput {
|
|
9
9
|
interface Raw {
|
|
10
|
-
type: "audio_input";
|
|
11
10
|
custom_session_id?: string | null;
|
|
12
11
|
data: string;
|
|
12
|
+
type: "audio_input";
|
|
13
13
|
}
|
|
14
14
|
}
|
|
@@ -39,7 +39,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
39
39
|
exports.AudioInput = void 0;
|
|
40
40
|
const core = __importStar(require("../../../../core"));
|
|
41
41
|
exports.AudioInput = core.serialization.object({
|
|
42
|
-
type: core.serialization.stringLiteral("audio_input"),
|
|
43
42
|
customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()),
|
|
44
43
|
data: core.serialization.string(),
|
|
44
|
+
type: core.serialization.stringLiteral("audio_input"),
|
|
45
45
|
});
|
|
@@ -7,10 +7,10 @@ import * as core from "../../../../core";
|
|
|
7
7
|
export declare const AudioOutput: core.serialization.ObjectSchema<serializers.empathicVoice.AudioOutput.Raw, Hume.empathicVoice.AudioOutput>;
|
|
8
8
|
export declare namespace AudioOutput {
|
|
9
9
|
interface Raw {
|
|
10
|
-
type: "audio_output";
|
|
11
10
|
custom_session_id?: string | null;
|
|
11
|
+
data: string;
|
|
12
12
|
id: string;
|
|
13
13
|
index: number;
|
|
14
|
-
|
|
14
|
+
type: "audio_output";
|
|
15
15
|
}
|
|
16
16
|
}
|
|
@@ -39,9 +39,9 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
39
39
|
exports.AudioOutput = void 0;
|
|
40
40
|
const core = __importStar(require("../../../../core"));
|
|
41
41
|
exports.AudioOutput = core.serialization.object({
|
|
42
|
-
type: core.serialization.stringLiteral("audio_output"),
|
|
43
42
|
customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()),
|
|
43
|
+
data: core.serialization.string(),
|
|
44
44
|
id: core.serialization.string(),
|
|
45
45
|
index: core.serialization.number(),
|
|
46
|
-
|
|
46
|
+
type: core.serialization.stringLiteral("audio_output"),
|
|
47
47
|
});
|
|
@@ -8,7 +8,7 @@ import { BuiltInTool } from "./BuiltInTool";
|
|
|
8
8
|
export declare const BuiltinToolConfig: core.serialization.ObjectSchema<serializers.empathicVoice.BuiltinToolConfig.Raw, Hume.empathicVoice.BuiltinToolConfig>;
|
|
9
9
|
export declare namespace BuiltinToolConfig {
|
|
10
10
|
interface Raw {
|
|
11
|
-
name: BuiltInTool.Raw;
|
|
12
11
|
fallback_content?: string | null;
|
|
12
|
+
name: BuiltInTool.Raw;
|
|
13
13
|
}
|
|
14
14
|
}
|
|
@@ -40,6 +40,6 @@ exports.BuiltinToolConfig = void 0;
|
|
|
40
40
|
const core = __importStar(require("../../../../core"));
|
|
41
41
|
const BuiltInTool_1 = require("./BuiltInTool");
|
|
42
42
|
exports.BuiltinToolConfig = core.serialization.object({
|
|
43
|
-
name: BuiltInTool_1.BuiltInTool,
|
|
44
43
|
fallbackContent: core.serialization.property("fallback_content", core.serialization.string().optional()),
|
|
44
|
+
name: BuiltInTool_1.BuiltInTool,
|
|
45
45
|
});
|
|
@@ -10,8 +10,8 @@ import { ChatMessageToolResult } from "./ChatMessageToolResult";
|
|
|
10
10
|
export declare const ChatMessage: core.serialization.ObjectSchema<serializers.empathicVoice.ChatMessage.Raw, Hume.empathicVoice.ChatMessage>;
|
|
11
11
|
export declare namespace ChatMessage {
|
|
12
12
|
interface Raw {
|
|
13
|
-
role: Role.Raw;
|
|
14
13
|
content?: string | null;
|
|
14
|
+
role: Role.Raw;
|
|
15
15
|
tool_call?: ToolCallMessage.Raw | null;
|
|
16
16
|
tool_result?: ChatMessageToolResult.Raw | null;
|
|
17
17
|
}
|
|
@@ -42,8 +42,8 @@ const Role_1 = require("./Role");
|
|
|
42
42
|
const ToolCallMessage_1 = require("./ToolCallMessage");
|
|
43
43
|
const ChatMessageToolResult_1 = require("./ChatMessageToolResult");
|
|
44
44
|
exports.ChatMessage = core.serialization.object({
|
|
45
|
-
role: Role_1.Role,
|
|
46
45
|
content: core.serialization.string().optional(),
|
|
46
|
+
role: Role_1.Role,
|
|
47
47
|
toolCall: core.serialization.property("tool_call", ToolCallMessage_1.ToolCallMessage.optional()),
|
|
48
48
|
toolResult: core.serialization.property("tool_result", ChatMessageToolResult_1.ChatMessageToolResult.optional()),
|
|
49
49
|
});
|
|
@@ -7,10 +7,10 @@ import * as core from "../../../../core";
|
|
|
7
7
|
export declare const ChatMetadata: core.serialization.ObjectSchema<serializers.empathicVoice.ChatMetadata.Raw, Hume.empathicVoice.ChatMetadata>;
|
|
8
8
|
export declare namespace ChatMetadata {
|
|
9
9
|
interface Raw {
|
|
10
|
-
type: "chat_metadata";
|
|
11
|
-
custom_session_id?: string | null;
|
|
12
10
|
chat_group_id: string;
|
|
13
11
|
chat_id: string;
|
|
12
|
+
custom_session_id?: string | null;
|
|
14
13
|
request_id?: string | null;
|
|
14
|
+
type: "chat_metadata";
|
|
15
15
|
}
|
|
16
16
|
}
|
|
@@ -39,9 +39,9 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
39
39
|
exports.ChatMetadata = void 0;
|
|
40
40
|
const core = __importStar(require("../../../../core"));
|
|
41
41
|
exports.ChatMetadata = core.serialization.object({
|
|
42
|
-
type: core.serialization.stringLiteral("chat_metadata"),
|
|
43
|
-
customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()),
|
|
44
42
|
chatGroupId: core.serialization.property("chat_group_id", core.serialization.string()),
|
|
45
43
|
chatId: core.serialization.property("chat_id", core.serialization.string()),
|
|
44
|
+
customSessionId: core.serialization.property("custom_session_id", core.serialization.string().optional()),
|
|
46
45
|
requestId: core.serialization.property("request_id", core.serialization.string().optional()),
|
|
46
|
+
type: core.serialization.stringLiteral("chat_metadata"),
|
|
47
47
|
});
|
|
@@ -8,7 +8,7 @@ import { ContextType } from "./ContextType";
|
|
|
8
8
|
export declare const Context: core.serialization.ObjectSchema<serializers.empathicVoice.Context.Raw, Hume.empathicVoice.Context>;
|
|
9
9
|
export declare namespace Context {
|
|
10
10
|
interface Raw {
|
|
11
|
-
type?: ContextType.Raw | null;
|
|
12
11
|
text: string;
|
|
12
|
+
type?: ContextType.Raw | null;
|
|
13
13
|
}
|
|
14
14
|
}
|
|
@@ -40,6 +40,6 @@ exports.Context = void 0;
|
|
|
40
40
|
const core = __importStar(require("../../../../core"));
|
|
41
41
|
const ContextType_1 = require("./ContextType");
|
|
42
42
|
exports.Context = core.serialization.object({
|
|
43
|
-
type: ContextType_1.ContextType.optional(),
|
|
44
43
|
text: core.serialization.string(),
|
|
44
|
+
type: ContextType_1.ContextType.optional(),
|
|
45
45
|
});
|
|
@@ -6,5 +6,5 @@ import * as Hume from "../../../../api/index";
|
|
|
6
6
|
import * as core from "../../../../core";
|
|
7
7
|
export declare const LanguageModelType: core.serialization.Schema<serializers.empathicVoice.LanguageModelType.Raw, Hume.empathicVoice.LanguageModelType>;
|
|
8
8
|
export declare namespace LanguageModelType {
|
|
9
|
-
type Raw = "claude-3-7-sonnet-latest" | "claude-3-5-sonnet-latest" | "claude-3-5-haiku-latest" | "claude-3-5-sonnet-20240620" | "claude-3-opus-20240229" | "claude-3-sonnet-20240229" | "claude-3-haiku-20240307" | "claude-sonnet-4-20250514" | "us.anthropic.claude-3-5-haiku-20241022-v1:0" | "us.anthropic.claude-3-5-sonnet-20240620-v1:0" | "us.anthropic.claude-3-haiku-20240307-v1:0" | "gpt-oss-120b" | "qwen-3-235b-a22b" | "qwen-3-235b-a22b-instruct-2507" | "qwen-3-235b-a22b-thinking-2507" | "gemini-1.5-pro" | "gemini-1.5-flash" | "gemini-1.5-pro-002" | "gemini-1.5-flash-002" | "gemini-2.0-flash" | "gemini-2.5-flash" | "gemini-2.5-flash-preview-04-17" | "gpt-4-turbo" | "gpt-4-turbo-preview" | "gpt-3.5-turbo-0125" | "gpt-3.5-turbo" | "gpt-4o" | "gpt-4o-mini" | "gpt-4.1" | "gpt-5" | "gpt-5-mini" | "gpt-5-nano" | "gemma-7b-it" | "llama3-8b-8192" | "llama3-70b-8192" | "llama-3.1-70b-versatile" | "llama-3.3-70b-versatile" | "llama-3.1-8b-instant" | "moonshotai/kimi-k2-instruct" | "accounts/fireworks/models/mixtral-8x7b-instruct" | "accounts/fireworks/models/llama-v3p1-405b-instruct" | "accounts/fireworks/models/llama-v3p1-70b-instruct" | "accounts/fireworks/models/llama-v3p1-8b-instruct" | "sonar" | "sonar-pro" | "sambanova" | "DeepSeek-R1-Distill-Llama-70B" | "Llama-4-Maverick-17B-128E-Instruct" | "Qwen3-32B" | "ellm" | "custom-language-model" | "hume-evi-3-web-search";
|
|
9
|
+
type Raw = "claude-3-7-sonnet-latest" | "claude-3-5-sonnet-latest" | "claude-3-5-haiku-latest" | "claude-3-5-sonnet-20240620" | "claude-3-opus-20240229" | "claude-3-sonnet-20240229" | "claude-3-haiku-20240307" | "claude-sonnet-4-20250514" | "us.anthropic.claude-3-5-haiku-20241022-v1:0" | "us.anthropic.claude-3-5-sonnet-20240620-v1:0" | "us.anthropic.claude-3-haiku-20240307-v1:0" | "gpt-oss-120b" | "qwen-3-235b-a22b" | "qwen-3-235b-a22b-instruct-2507" | "qwen-3-235b-a22b-thinking-2507" | "gemini-1.5-pro" | "gemini-1.5-flash" | "gemini-1.5-pro-002" | "gemini-1.5-flash-002" | "gemini-2.0-flash" | "gemini-2.5-flash" | "gemini-2.5-flash-preview-04-17" | "gpt-4-turbo" | "gpt-4-turbo-preview" | "gpt-3.5-turbo-0125" | "gpt-3.5-turbo" | "gpt-4o" | "gpt-4o-mini" | "gpt-4.1" | "gpt-5" | "gpt-5-mini" | "gpt-5-nano" | "gpt-4o-priority" | "gpt-4o-mini-priority" | "gpt-4.1-priority" | "gpt-5-priority" | "gpt-5-mini-priority" | "gpt-5-nano-priority" | "gemma-7b-it" | "llama3-8b-8192" | "llama3-70b-8192" | "llama-3.1-70b-versatile" | "llama-3.3-70b-versatile" | "llama-3.1-8b-instant" | "moonshotai/kimi-k2-instruct" | "accounts/fireworks/models/mixtral-8x7b-instruct" | "accounts/fireworks/models/llama-v3p1-405b-instruct" | "accounts/fireworks/models/llama-v3p1-70b-instruct" | "accounts/fireworks/models/llama-v3p1-8b-instruct" | "sonar" | "sonar-pro" | "sambanova" | "DeepSeek-R1-Distill-Llama-70B" | "Llama-4-Maverick-17B-128E-Instruct" | "Qwen3-32B" | "ellm" | "custom-language-model" | "hume-evi-3-web-search";
|
|
10
10
|
}
|