hume 0.12.1 → 0.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.mock/definition/empathic-voice/__package__.yml +760 -711
- package/.mock/definition/empathic-voice/chat.yml +29 -23
- package/.mock/definition/empathic-voice/chatWebhooks.yml +3 -3
- package/.mock/definition/empathic-voice/configs.yml +10 -4
- package/.mock/definition/tts/__package__.yml +77 -125
- package/.mock/fern.config.json +1 -1
- package/api/resources/empathicVoice/resources/chatGroups/client/index.d.ts +1 -0
- package/api/resources/empathicVoice/resources/chats/client/index.d.ts +1 -0
- package/api/resources/empathicVoice/resources/configs/client/index.d.ts +1 -0
- package/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.d.ts +5 -1
- package/api/resources/empathicVoice/resources/prompts/client/index.d.ts +1 -0
- package/api/resources/empathicVoice/resources/tools/client/index.d.ts +1 -0
- package/api/resources/empathicVoice/types/AssistantEnd.d.ts +2 -2
- package/api/resources/empathicVoice/types/AssistantInput.d.ts +2 -2
- package/api/resources/empathicVoice/types/AssistantMessage.d.ts +8 -8
- package/api/resources/empathicVoice/types/AssistantProsody.d.ts +6 -6
- package/api/resources/empathicVoice/types/AudioConfiguration.d.ts +2 -2
- package/api/resources/empathicVoice/types/AudioInput.d.ts +6 -6
- package/api/resources/empathicVoice/types/AudioOutput.d.ts +4 -4
- package/api/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
- package/api/resources/empathicVoice/types/ChatMessage.d.ts +2 -2
- package/api/resources/empathicVoice/types/ChatMetadata.d.ts +8 -8
- package/api/resources/empathicVoice/types/Context.d.ts +8 -14
- package/api/resources/empathicVoice/types/ContextType.d.ts +2 -3
- package/api/resources/empathicVoice/types/ContextType.js +1 -2
- package/api/resources/empathicVoice/types/LanguageModelType.d.ts +20 -1
- package/api/resources/empathicVoice/types/LanguageModelType.js +19 -0
- package/api/resources/empathicVoice/types/ModelProviderEnum.d.ts +4 -1
- package/api/resources/empathicVoice/types/ModelProviderEnum.js +3 -0
- package/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +2 -2
- package/api/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +2 -2
- package/api/resources/empathicVoice/types/ReturnChatEvent.d.ts +12 -17
- package/api/resources/empathicVoice/types/ReturnChatEventRole.d.ts +0 -4
- package/api/resources/empathicVoice/types/ReturnChatEventType.d.ts +22 -18
- package/api/resources/empathicVoice/types/ReturnChatEventType.js +9 -4
- package/api/resources/empathicVoice/types/ReturnConfig.d.ts +18 -14
- package/api/resources/empathicVoice/types/ReturnPrompt.d.ts +2 -2
- package/api/resources/empathicVoice/types/SessionSettings.d.ts +29 -29
- package/api/resources/empathicVoice/types/Tool.d.ts +6 -6
- package/api/resources/empathicVoice/types/ToolCallMessage.d.ts +6 -6
- package/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +16 -16
- package/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +8 -8
- package/api/resources/empathicVoice/types/UserInput.d.ts +2 -2
- package/api/resources/empathicVoice/types/UserInterruption.d.ts +4 -4
- package/api/resources/empathicVoice/types/UserMessage.d.ts +14 -7
- package/api/resources/empathicVoice/types/VoiceProvider.d.ts +2 -1
- package/api/resources/empathicVoice/types/VoiceProvider.js +1 -0
- package/api/resources/empathicVoice/types/WebSocketError.d.ts +10 -10
- package/api/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +8 -8
- package/api/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +6 -6
- package/api/resources/empathicVoice/types/index.d.ts +16 -16
- package/api/resources/empathicVoice/types/index.js +16 -16
- package/api/resources/expressionMeasurement/resources/batch/client/Client.d.ts +2 -4
- package/api/resources/expressionMeasurement/resources/batch/client/Client.js +1 -1
- package/api/resources/expressionMeasurement/resources/batch/client/index.d.ts +1 -0
- package/api/resources/tts/client/Client.d.ts +21 -23
- package/api/resources/tts/client/Client.js +50 -58
- package/api/resources/tts/client/index.d.ts +1 -1
- package/api/resources/tts/client/index.js +0 -15
- package/api/resources/tts/resources/voices/client/index.d.ts +1 -0
- package/api/resources/tts/types/PostedTts.d.ts +8 -8
- package/api/resources/tts/types/PostedUtterance.d.ts +6 -6
- package/api/resources/tts/types/ReturnGeneration.d.ts +5 -5
- package/api/resources/tts/types/ReturnTts.d.ts +1 -1
- package/api/resources/tts/types/Snippet.d.ts +6 -6
- package/api/resources/tts/types/SnippetAudioChunk.d.ts +0 -16
- package/core/fetcher/BinaryResponse.d.ts +17 -0
- package/core/fetcher/BinaryResponse.js +14 -0
- package/core/fetcher/Fetcher.d.ts +1 -1
- package/core/fetcher/ResponseWithBody.d.ts +4 -0
- package/core/fetcher/ResponseWithBody.js +6 -0
- package/core/fetcher/getFetchFn.js +3 -3
- package/core/fetcher/getResponseBody.js +33 -32
- package/core/fetcher/index.d.ts +1 -0
- package/core/file.d.ts +1 -0
- package/core/form-data-utils/FormDataWrapper.d.ts +5 -52
- package/core/form-data-utils/FormDataWrapper.js +104 -124
- package/core/index.d.ts +1 -0
- package/core/index.js +1 -0
- package/dist/api/resources/empathicVoice/resources/chatGroups/client/index.d.ts +1 -0
- package/dist/api/resources/empathicVoice/resources/chats/client/index.d.ts +1 -0
- package/dist/api/resources/empathicVoice/resources/configs/client/index.d.ts +1 -0
- package/dist/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.d.ts +5 -1
- package/dist/api/resources/empathicVoice/resources/prompts/client/index.d.ts +1 -0
- package/dist/api/resources/empathicVoice/resources/tools/client/index.d.ts +1 -0
- package/dist/api/resources/empathicVoice/types/AssistantEnd.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/AssistantInput.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/AssistantMessage.d.ts +8 -8
- package/dist/api/resources/empathicVoice/types/AssistantProsody.d.ts +6 -6
- package/dist/api/resources/empathicVoice/types/AudioConfiguration.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/AudioInput.d.ts +6 -6
- package/dist/api/resources/empathicVoice/types/AudioOutput.d.ts +4 -4
- package/dist/api/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
- package/dist/api/resources/empathicVoice/types/ChatMessage.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/ChatMetadata.d.ts +8 -8
- package/dist/api/resources/empathicVoice/types/Context.d.ts +8 -14
- package/dist/api/resources/empathicVoice/types/ContextType.d.ts +2 -3
- package/dist/api/resources/empathicVoice/types/ContextType.js +1 -2
- package/dist/api/resources/empathicVoice/types/LanguageModelType.d.ts +20 -1
- package/dist/api/resources/empathicVoice/types/LanguageModelType.js +19 -0
- package/dist/api/resources/empathicVoice/types/ModelProviderEnum.d.ts +4 -1
- package/dist/api/resources/empathicVoice/types/ModelProviderEnum.js +3 -0
- package/dist/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/ReturnChatEvent.d.ts +12 -17
- package/dist/api/resources/empathicVoice/types/ReturnChatEventRole.d.ts +0 -4
- package/dist/api/resources/empathicVoice/types/ReturnChatEventType.d.ts +22 -18
- package/dist/api/resources/empathicVoice/types/ReturnChatEventType.js +9 -4
- package/dist/api/resources/empathicVoice/types/ReturnConfig.d.ts +18 -14
- package/dist/api/resources/empathicVoice/types/ReturnPrompt.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/SessionSettings.d.ts +29 -29
- package/dist/api/resources/empathicVoice/types/Tool.d.ts +6 -6
- package/dist/api/resources/empathicVoice/types/ToolCallMessage.d.ts +6 -6
- package/dist/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +16 -16
- package/dist/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +8 -8
- package/dist/api/resources/empathicVoice/types/UserInput.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/UserInterruption.d.ts +4 -4
- package/dist/api/resources/empathicVoice/types/UserMessage.d.ts +14 -7
- package/dist/api/resources/empathicVoice/types/VoiceProvider.d.ts +2 -1
- package/dist/api/resources/empathicVoice/types/VoiceProvider.js +1 -0
- package/dist/api/resources/empathicVoice/types/WebSocketError.d.ts +10 -10
- package/dist/api/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +8 -8
- package/dist/api/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +6 -6
- package/dist/api/resources/empathicVoice/types/index.d.ts +16 -16
- package/dist/api/resources/empathicVoice/types/index.js +16 -16
- package/dist/api/resources/expressionMeasurement/resources/batch/client/Client.d.ts +2 -4
- package/dist/api/resources/expressionMeasurement/resources/batch/client/Client.js +1 -1
- package/dist/api/resources/expressionMeasurement/resources/batch/client/index.d.ts +1 -0
- package/dist/api/resources/tts/client/Client.d.ts +21 -23
- package/dist/api/resources/tts/client/Client.js +50 -58
- package/dist/api/resources/tts/client/index.d.ts +1 -1
- package/dist/api/resources/tts/client/index.js +0 -15
- package/dist/api/resources/tts/resources/voices/client/index.d.ts +1 -0
- package/dist/api/resources/tts/types/PostedTts.d.ts +8 -8
- package/dist/api/resources/tts/types/PostedUtterance.d.ts +6 -6
- package/dist/api/resources/tts/types/ReturnGeneration.d.ts +5 -5
- package/dist/api/resources/tts/types/ReturnTts.d.ts +1 -1
- package/dist/api/resources/tts/types/Snippet.d.ts +6 -6
- package/dist/api/resources/tts/types/SnippetAudioChunk.d.ts +0 -16
- package/dist/core/fetcher/BinaryResponse.d.ts +17 -0
- package/dist/core/fetcher/BinaryResponse.js +14 -0
- package/dist/core/fetcher/Fetcher.d.ts +1 -1
- package/dist/core/fetcher/ResponseWithBody.d.ts +4 -0
- package/dist/core/fetcher/ResponseWithBody.js +6 -0
- package/dist/core/fetcher/getFetchFn.js +3 -3
- package/dist/core/fetcher/getResponseBody.js +33 -32
- package/dist/core/fetcher/index.d.ts +1 -0
- package/dist/core/file.d.ts +1 -0
- package/dist/core/form-data-utils/FormDataWrapper.d.ts +5 -52
- package/dist/core/form-data-utils/FormDataWrapper.js +104 -124
- package/dist/core/index.d.ts +1 -0
- package/dist/core/index.js +1 -0
- package/dist/serialization/resources/empathicVoice/types/AssistantEnd.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/AssistantEnd.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/AssistantInput.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/AssistantMessage.d.ts +2 -2
- package/dist/serialization/resources/empathicVoice/types/AssistantMessage.js +2 -2
- package/dist/serialization/resources/empathicVoice/types/AssistantProsody.d.ts +2 -2
- package/dist/serialization/resources/empathicVoice/types/AssistantProsody.js +2 -2
- package/dist/serialization/resources/empathicVoice/types/AudioConfiguration.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/AudioConfiguration.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/AudioInput.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/AudioInput.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/AudioOutput.d.ts +2 -2
- package/dist/serialization/resources/empathicVoice/types/AudioOutput.js +2 -2
- package/dist/serialization/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/BuiltinToolConfig.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/ChatMessage.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/ChatMessage.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/ChatMetadata.d.ts +2 -2
- package/dist/serialization/resources/empathicVoice/types/ChatMetadata.js +2 -2
- package/dist/serialization/resources/empathicVoice/types/Context.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/Context.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/ContextType.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/ContextType.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/LanguageModelType.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/LanguageModelType.js +19 -0
- package/dist/serialization/resources/empathicVoice/types/ModelProviderEnum.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/ModelProviderEnum.js +3 -0
- package/dist/serialization/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/PauseAssistantMessage.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/ResumeAssistantMessage.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/ReturnChatEventType.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/ReturnChatEventType.js +9 -4
- package/dist/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +9 -9
- package/dist/serialization/resources/empathicVoice/types/ReturnConfig.js +9 -9
- package/dist/serialization/resources/empathicVoice/types/ReturnPrompt.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/ReturnPrompt.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/SessionSettings.d.ts +8 -8
- package/dist/serialization/resources/empathicVoice/types/SessionSettings.js +8 -8
- package/dist/serialization/resources/empathicVoice/types/Tool.d.ts +3 -3
- package/dist/serialization/resources/empathicVoice/types/Tool.js +3 -3
- package/dist/serialization/resources/empathicVoice/types/ToolCallMessage.d.ts +3 -3
- package/dist/serialization/resources/empathicVoice/types/ToolCallMessage.js +3 -3
- package/dist/serialization/resources/empathicVoice/types/ToolErrorMessage.d.ts +6 -6
- package/dist/serialization/resources/empathicVoice/types/ToolErrorMessage.js +6 -6
- package/dist/serialization/resources/empathicVoice/types/ToolResponseMessage.d.ts +2 -2
- package/dist/serialization/resources/empathicVoice/types/ToolResponseMessage.js +2 -2
- package/dist/serialization/resources/empathicVoice/types/UserInput.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/UserInput.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/UserInterruption.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/UserInterruption.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/UserMessage.d.ts +3 -3
- package/dist/serialization/resources/empathicVoice/types/UserMessage.js +3 -3
- package/dist/serialization/resources/empathicVoice/types/VoiceProvider.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/VoiceProvider.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/WebSocketError.d.ts +3 -3
- package/dist/serialization/resources/empathicVoice/types/WebSocketError.js +3 -3
- package/dist/serialization/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +4 -4
- package/dist/serialization/resources/empathicVoice/types/WebhookEventChatEnded.js +4 -4
- package/dist/serialization/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +3 -3
- package/dist/serialization/resources/empathicVoice/types/WebhookEventChatStarted.js +3 -3
- package/dist/serialization/resources/empathicVoice/types/index.d.ts +16 -16
- package/dist/serialization/resources/empathicVoice/types/index.js +16 -16
- package/dist/serialization/resources/tts/types/PostedTts.d.ts +3 -3
- package/dist/serialization/resources/tts/types/PostedTts.js +3 -3
- package/dist/serialization/resources/tts/types/PostedUtterance.d.ts +2 -2
- package/dist/serialization/resources/tts/types/PostedUtterance.js +2 -2
- package/dist/serialization/resources/tts/types/ReturnGeneration.d.ts +3 -3
- package/dist/serialization/resources/tts/types/ReturnGeneration.js +3 -3
- package/dist/serialization/resources/tts/types/ReturnTts.d.ts +1 -1
- package/dist/serialization/resources/tts/types/ReturnTts.js +1 -1
- package/dist/serialization/resources/tts/types/Snippet.d.ts +3 -3
- package/dist/serialization/resources/tts/types/Snippet.js +3 -3
- package/dist/serialization/resources/tts/types/SnippetAudioChunk.d.ts +0 -8
- package/dist/serialization/resources/tts/types/SnippetAudioChunk.js +1 -10
- package/dist/version.d.ts +1 -1
- package/dist/version.js +1 -1
- package/jest.browser.config.mjs +10 -0
- package/jest.config.mjs +1 -0
- package/package.json +6 -7
- package/reference.md +25 -27
- package/serialization/resources/empathicVoice/types/AssistantEnd.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/AssistantEnd.js +1 -1
- package/serialization/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/AssistantInput.js +1 -1
- package/serialization/resources/empathicVoice/types/AssistantMessage.d.ts +2 -2
- package/serialization/resources/empathicVoice/types/AssistantMessage.js +2 -2
- package/serialization/resources/empathicVoice/types/AssistantProsody.d.ts +2 -2
- package/serialization/resources/empathicVoice/types/AssistantProsody.js +2 -2
- package/serialization/resources/empathicVoice/types/AudioConfiguration.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/AudioConfiguration.js +1 -1
- package/serialization/resources/empathicVoice/types/AudioInput.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/AudioInput.js +1 -1
- package/serialization/resources/empathicVoice/types/AudioOutput.d.ts +2 -2
- package/serialization/resources/empathicVoice/types/AudioOutput.js +2 -2
- package/serialization/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/BuiltinToolConfig.js +1 -1
- package/serialization/resources/empathicVoice/types/ChatMessage.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/ChatMessage.js +1 -1
- package/serialization/resources/empathicVoice/types/ChatMetadata.d.ts +2 -2
- package/serialization/resources/empathicVoice/types/ChatMetadata.js +2 -2
- package/serialization/resources/empathicVoice/types/Context.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/Context.js +1 -1
- package/serialization/resources/empathicVoice/types/ContextType.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/ContextType.js +1 -1
- package/serialization/resources/empathicVoice/types/LanguageModelType.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/LanguageModelType.js +19 -0
- package/serialization/resources/empathicVoice/types/ModelProviderEnum.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/ModelProviderEnum.js +3 -0
- package/serialization/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/PauseAssistantMessage.js +1 -1
- package/serialization/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/ResumeAssistantMessage.js +1 -1
- package/serialization/resources/empathicVoice/types/ReturnChatEventType.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/ReturnChatEventType.js +9 -4
- package/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +9 -9
- package/serialization/resources/empathicVoice/types/ReturnConfig.js +9 -9
- package/serialization/resources/empathicVoice/types/ReturnPrompt.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/ReturnPrompt.js +1 -1
- package/serialization/resources/empathicVoice/types/SessionSettings.d.ts +8 -8
- package/serialization/resources/empathicVoice/types/SessionSettings.js +8 -8
- package/serialization/resources/empathicVoice/types/Tool.d.ts +3 -3
- package/serialization/resources/empathicVoice/types/Tool.js +3 -3
- package/serialization/resources/empathicVoice/types/ToolCallMessage.d.ts +3 -3
- package/serialization/resources/empathicVoice/types/ToolCallMessage.js +3 -3
- package/serialization/resources/empathicVoice/types/ToolErrorMessage.d.ts +6 -6
- package/serialization/resources/empathicVoice/types/ToolErrorMessage.js +6 -6
- package/serialization/resources/empathicVoice/types/ToolResponseMessage.d.ts +2 -2
- package/serialization/resources/empathicVoice/types/ToolResponseMessage.js +2 -2
- package/serialization/resources/empathicVoice/types/UserInput.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/UserInput.js +1 -1
- package/serialization/resources/empathicVoice/types/UserInterruption.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/UserInterruption.js +1 -1
- package/serialization/resources/empathicVoice/types/UserMessage.d.ts +3 -3
- package/serialization/resources/empathicVoice/types/UserMessage.js +3 -3
- package/serialization/resources/empathicVoice/types/VoiceProvider.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/VoiceProvider.js +1 -1
- package/serialization/resources/empathicVoice/types/WebSocketError.d.ts +3 -3
- package/serialization/resources/empathicVoice/types/WebSocketError.js +3 -3
- package/serialization/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +4 -4
- package/serialization/resources/empathicVoice/types/WebhookEventChatEnded.js +4 -4
- package/serialization/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +3 -3
- package/serialization/resources/empathicVoice/types/WebhookEventChatStarted.js +3 -3
- package/serialization/resources/empathicVoice/types/index.d.ts +16 -16
- package/serialization/resources/empathicVoice/types/index.js +16 -16
- package/serialization/resources/tts/types/PostedTts.d.ts +3 -3
- package/serialization/resources/tts/types/PostedTts.js +3 -3
- package/serialization/resources/tts/types/PostedUtterance.d.ts +2 -2
- package/serialization/resources/tts/types/PostedUtterance.js +2 -2
- package/serialization/resources/tts/types/ReturnGeneration.d.ts +3 -3
- package/serialization/resources/tts/types/ReturnGeneration.js +3 -3
- package/serialization/resources/tts/types/ReturnTts.d.ts +1 -1
- package/serialization/resources/tts/types/ReturnTts.js +1 -1
- package/serialization/resources/tts/types/Snippet.d.ts +3 -3
- package/serialization/resources/tts/types/Snippet.js +3 -3
- package/serialization/resources/tts/types/SnippetAudioChunk.d.ts +0 -8
- package/serialization/resources/tts/types/SnippetAudioChunk.js +1 -10
- package/version.d.ts +1 -1
- package/version.js +1 -1
- package/api/resources/tts/client/requests/SynthesizeJsonRequest.d.ts +0 -36
- package/api/resources/tts/client/requests/SynthesizeJsonRequest.js +0 -5
- package/api/resources/tts/client/requests/index.d.ts +0 -1
- package/core/form-data-utils/toReadableStream.d.ts +0 -1
- package/core/form-data-utils/toReadableStream.js +0 -50
- package/dist/api/resources/tts/client/requests/SynthesizeJsonRequest.d.ts +0 -36
- package/dist/api/resources/tts/client/requests/SynthesizeJsonRequest.js +0 -5
- package/dist/api/resources/tts/client/requests/index.d.ts +0 -1
- package/dist/core/form-data-utils/toReadableStream.d.ts +0 -1
- package/dist/core/form-data-utils/toReadableStream.js +0 -50
- /package/{api/resources/tts/client/requests/index.js → core/file.js} +0 -0
- /package/dist/{api/resources/tts/client/requests/index.js → core/file.js} +0 -0
|
@@ -7,6 +7,8 @@
|
|
|
7
7
|
* Expression measurement results are not available for User Input messages, as the prosody model relies on audio input and cannot process text alone.
|
|
8
8
|
*/
|
|
9
9
|
export interface UserInput {
|
|
10
|
+
/** The type of message sent through the socket; must be `user_input` for our server to correctly identify and process it as a User Input message. */
|
|
11
|
+
type: "user_input";
|
|
10
12
|
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
11
13
|
customSessionId?: string;
|
|
12
14
|
/**
|
|
@@ -15,6 +17,4 @@ export interface UserInput {
|
|
|
15
17
|
* Expression measurement results are not available for User Input messages, as the prosody model relies on audio input and cannot process text alone.
|
|
16
18
|
*/
|
|
17
19
|
text: string;
|
|
18
|
-
/** The type of message sent through the socket; must be `user_input` for our server to correctly identify and process it as a User Input message. */
|
|
19
|
-
type: "user_input";
|
|
20
20
|
}
|
|
@@ -5,14 +5,14 @@
|
|
|
5
5
|
* When provided, the output is an interruption.
|
|
6
6
|
*/
|
|
7
7
|
export interface UserInterruption {
|
|
8
|
-
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
9
|
-
customSessionId?: string;
|
|
10
|
-
/** Unix timestamp of the detected user interruption. */
|
|
11
|
-
time: number;
|
|
12
8
|
/**
|
|
13
9
|
* The type of message sent through the socket; for a User Interruption message, this must be `user_interruption`.
|
|
14
10
|
*
|
|
15
11
|
* This message indicates the user has interrupted the assistant’s response. EVI detects the interruption in real-time and sends this message to signal the interruption event. This message allows the system to stop the current audio playback, clear the audio queue, and prepare to handle new user input.
|
|
16
12
|
*/
|
|
17
13
|
type: "user_interruption";
|
|
14
|
+
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
15
|
+
customSessionId?: string;
|
|
16
|
+
/** Unix timestamp of the detected user interruption. */
|
|
17
|
+
time: number;
|
|
18
18
|
}
|
|
@@ -6,22 +6,29 @@ import * as Hume from "../../../index";
|
|
|
6
6
|
* When provided, the output is a user message.
|
|
7
7
|
*/
|
|
8
8
|
export interface UserMessage {
|
|
9
|
+
/**
|
|
10
|
+
* The type of message sent through the socket; for a User Message, this must be `user_message`.
|
|
11
|
+
*
|
|
12
|
+
* This message contains both a transcript of the user’s input and the expression measurement predictions if the input was sent as an [Audio Input message](/reference/empathic-voice-interface-evi/chat/chat#send.AudioInput.type). Expression measurement predictions are not provided for a [User Input message](/reference/empathic-voice-interface-evi/chat/chat#send.UserInput.type), as the prosody model relies on audio input and cannot process text alone.
|
|
13
|
+
*/
|
|
14
|
+
type: "user_message";
|
|
9
15
|
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
10
16
|
customSessionId?: string;
|
|
11
|
-
/** Indicates if this message was inserted into the conversation as text from a [User Input](/reference/empathic-voice-interface-evi/chat/chat#send.UserInput.text) message. */
|
|
12
|
-
fromText: boolean;
|
|
13
|
-
/** Indicates if this message contains an immediate and unfinalized transcript of the user’s audio input. If it does, words may be repeated across successive `UserMessage` messages as our transcription model becomes more confident about what was said with additional context. Interim messages are useful to detect if the user is interrupting during audio playback on the client. Even without a finalized transcription, along with [UserInterrupt](/reference/empathic-voice-interface-evi/chat/chat#receive.UserInterruption.type) messages, interim `UserMessages` are useful for detecting if the user is interrupting during audio playback on the client, signaling to stop playback in your application. Interim `UserMessages` will only be received if the [verbose_transcription](/reference/empathic-voice-interface-evi/chat/chat#request.query.verbose_transcription) query parameter is set to `true` in the handshake request. */
|
|
14
|
-
interim: boolean;
|
|
15
17
|
/** Transcript of the message. */
|
|
16
18
|
message: Hume.empathicVoice.ChatMessage;
|
|
17
19
|
/** Inference model results. */
|
|
18
20
|
models: Hume.empathicVoice.Inference;
|
|
19
21
|
/** Start and End time of user message. */
|
|
20
22
|
time: Hume.empathicVoice.MillisecondInterval;
|
|
23
|
+
/** Indicates if this message was inserted into the conversation as text from a [User Input](/reference/empathic-voice-interface-evi/chat/chat#send.UserInput.text) message. */
|
|
24
|
+
fromText: boolean;
|
|
21
25
|
/**
|
|
22
|
-
*
|
|
26
|
+
* Indicates whether this `UserMessage` contains an interim (unfinalized) transcript.
|
|
23
27
|
*
|
|
24
|
-
*
|
|
28
|
+
* - `true`: the transcript is provisional; words may be repeated or refined in subsequent `UserMessage` responses as additional audio is processed.
|
|
29
|
+
* - `false`: the transcript is final and complete.
|
|
30
|
+
*
|
|
31
|
+
* Interim transcripts are only sent when the [`verbose_transcription`](/reference/empathic-voice-interface-evi/chat/chat#request.query.verbose_transcription) query parameter is set to `true` in the initial handshake.
|
|
25
32
|
*/
|
|
26
|
-
|
|
33
|
+
interim: boolean;
|
|
27
34
|
}
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* This file was auto-generated by Fern from our API Definition.
|
|
3
3
|
*/
|
|
4
|
-
export type VoiceProvider = "HUME_AI" | "CUSTOM_VOICE";
|
|
4
|
+
export type VoiceProvider = "HUME_AI" | "CUSTOM_VOICE" | "OCTAVE_COMBINED";
|
|
5
5
|
export declare const VoiceProvider: {
|
|
6
6
|
readonly HumeAi: "HUME_AI";
|
|
7
7
|
readonly CustomVoice: "CUSTOM_VOICE";
|
|
8
|
+
readonly OctaveCombined: "OCTAVE_COMBINED";
|
|
8
9
|
};
|
|
@@ -5,20 +5,20 @@
|
|
|
5
5
|
* When provided, the output is an error message.
|
|
6
6
|
*/
|
|
7
7
|
export interface WebSocketError {
|
|
8
|
-
/** Error code. Identifies the type of error encountered. */
|
|
9
|
-
code: string;
|
|
10
|
-
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
11
|
-
customSessionId?: string;
|
|
12
|
-
/** Detailed description of the error. */
|
|
13
|
-
message: string;
|
|
14
|
-
/** ID of the initiating request. */
|
|
15
|
-
requestId?: string;
|
|
16
|
-
/** Short, human-readable identifier and description for the error. See a complete list of error slugs on the [Errors page](/docs/resources/errors). */
|
|
17
|
-
slug: string;
|
|
18
8
|
/**
|
|
19
9
|
* The type of message sent through the socket; for a Web Socket Error message, this must be `error`.
|
|
20
10
|
*
|
|
21
11
|
* This message indicates a disruption in the WebSocket connection, such as an unexpected disconnection, protocol error, or data transmission issue.
|
|
22
12
|
*/
|
|
23
13
|
type: "error";
|
|
14
|
+
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
15
|
+
customSessionId?: string;
|
|
16
|
+
/** Error code. Identifies the type of error encountered. */
|
|
17
|
+
code: string;
|
|
18
|
+
/** Short, human-readable identifier and description for the error. See a complete list of error slugs on the [Errors page](/docs/resources/errors). */
|
|
19
|
+
slug: string;
|
|
20
|
+
/** Detailed description of the error. */
|
|
21
|
+
message: string;
|
|
22
|
+
/** ID of the initiating request. */
|
|
23
|
+
requestId?: string;
|
|
24
24
|
}
|
|
@@ -3,16 +3,16 @@
|
|
|
3
3
|
*/
|
|
4
4
|
import * as Hume from "../../../index";
|
|
5
5
|
export interface WebhookEventChatEnded extends Hume.empathicVoice.WebhookEventBase {
|
|
6
|
-
/**
|
|
7
|
-
|
|
8
|
-
/**
|
|
9
|
-
|
|
6
|
+
/** Always `chat_ended`. */
|
|
7
|
+
eventName?: "chat_ended";
|
|
8
|
+
/** Unix timestamp (in milliseconds) indicating when the session ended. */
|
|
9
|
+
endTime: number;
|
|
10
10
|
/** Total duration of the session in seconds. */
|
|
11
11
|
durationSeconds: number;
|
|
12
12
|
/** Reason for the session's termination. */
|
|
13
13
|
endReason: Hume.empathicVoice.WebhookEventChatStatus;
|
|
14
|
-
/**
|
|
15
|
-
|
|
16
|
-
/**
|
|
17
|
-
|
|
14
|
+
/** Phone number of the caller in E.164 format (e.g., `+12223333333`). This field is included only if the Chat was created via the [Twilio phone calling](/docs/empathic-voice-interface-evi/phone-calling) integration. */
|
|
15
|
+
callerNumber?: string;
|
|
16
|
+
/** User-defined session ID. Relevant only when employing a [custom language model](/docs/empathic-voice-interface-evi/custom-language-model) in the EVI Config. */
|
|
17
|
+
customSessionId?: string;
|
|
18
18
|
}
|
|
@@ -3,14 +3,14 @@
|
|
|
3
3
|
*/
|
|
4
4
|
import * as Hume from "../../../index";
|
|
5
5
|
export interface WebhookEventChatStarted extends Hume.empathicVoice.WebhookEventBase {
|
|
6
|
-
/** Phone number of the caller in E.164 format (e.g., `+12223333333`). This field is included only if the Chat was created via the [Twilio phone calling](/docs/empathic-voice-interface-evi/phone-calling) integration. */
|
|
7
|
-
callerNumber?: string;
|
|
8
|
-
/** Indicates whether the chat is the first in a new Chat Group (`new_chat_group`) or the continuation of an existing chat group (`resumed_chat_group`). */
|
|
9
|
-
chatStartType: Hume.empathicVoice.WebhookEventChatStartType;
|
|
10
|
-
/** User-defined session ID. Relevant only when employing a [custom language model](/docs/empathic-voice-interface-evi/custom-language-model) in the EVI Config. */
|
|
11
|
-
customSessionId?: string;
|
|
12
6
|
/** Always `chat_started`. */
|
|
13
7
|
eventName?: "chat_started";
|
|
14
8
|
/** Unix timestamp (in milliseconds) indicating when the session started. */
|
|
15
9
|
startTime: number;
|
|
10
|
+
/** Indicates whether the chat is the first in a new Chat Group (`new_chat_group`) or the continuation of an existing chat group (`resumed_chat_group`). */
|
|
11
|
+
chatStartType: Hume.empathicVoice.WebhookEventChatStartType;
|
|
12
|
+
/** Phone number of the caller in E.164 format (e.g., `+12223333333`). This field is included only if the Chat was created via the [Twilio phone calling](/docs/empathic-voice-interface-evi/phone-calling) integration. */
|
|
13
|
+
callerNumber?: string;
|
|
14
|
+
/** User-defined session ID. Relevant only when employing a [custom language model](/docs/empathic-voice-interface-evi/custom-language-model) in the EVI Config. */
|
|
15
|
+
customSessionId?: string;
|
|
16
16
|
}
|
|
@@ -1,35 +1,35 @@
|
|
|
1
|
-
export * from "./AssistantEnd";
|
|
2
1
|
export * from "./AssistantInput";
|
|
3
|
-
export * from "./AssistantMessage";
|
|
4
|
-
export * from "./AssistantProsody";
|
|
5
2
|
export * from "./AudioConfiguration";
|
|
6
3
|
export * from "./AudioInput";
|
|
7
|
-
export * from "./AudioOutput";
|
|
8
4
|
export * from "./BuiltInTool";
|
|
9
5
|
export * from "./BuiltinToolConfig";
|
|
10
|
-
export * from "./ChatMessageToolResult";
|
|
11
|
-
export * from "./ChatMessage";
|
|
12
|
-
export * from "./ChatMetadata";
|
|
13
6
|
export * from "./Context";
|
|
14
7
|
export * from "./ContextType";
|
|
15
|
-
export * from "./EmotionScores";
|
|
16
8
|
export * from "./Encoding";
|
|
17
|
-
export * from "./WebSocketError";
|
|
18
9
|
export * from "./ErrorLevel";
|
|
19
|
-
export * from "./Inference";
|
|
20
|
-
export * from "./MillisecondInterval";
|
|
21
10
|
export * from "./PauseAssistantMessage";
|
|
22
|
-
export * from "./ProsodyInference";
|
|
23
11
|
export * from "./ResumeAssistantMessage";
|
|
24
|
-
export * from "./Role";
|
|
25
12
|
export * from "./SessionSettingsVariablesValue";
|
|
26
13
|
export * from "./SessionSettings";
|
|
27
14
|
export * from "./Tool";
|
|
28
|
-
export * from "./ToolCallMessage";
|
|
29
15
|
export * from "./ToolErrorMessage";
|
|
30
16
|
export * from "./ToolResponseMessage";
|
|
31
17
|
export * from "./ToolType";
|
|
32
18
|
export * from "./UserInput";
|
|
19
|
+
export * from "./AssistantEnd";
|
|
20
|
+
export * from "./AssistantMessage";
|
|
21
|
+
export * from "./AssistantProsody";
|
|
22
|
+
export * from "./AudioOutput";
|
|
23
|
+
export * from "./ChatMessageToolResult";
|
|
24
|
+
export * from "./ChatMessage";
|
|
25
|
+
export * from "./ChatMetadata";
|
|
26
|
+
export * from "./EmotionScores";
|
|
27
|
+
export * from "./WebSocketError";
|
|
28
|
+
export * from "./Inference";
|
|
29
|
+
export * from "./MillisecondInterval";
|
|
30
|
+
export * from "./ProsodyInference";
|
|
31
|
+
export * from "./Role";
|
|
32
|
+
export * from "./ToolCallMessage";
|
|
33
33
|
export * from "./UserInterruption";
|
|
34
34
|
export * from "./UserMessage";
|
|
35
35
|
export * from "./JsonMessage";
|
|
@@ -38,12 +38,12 @@ export * from "./LanguageModelType";
|
|
|
38
38
|
export * from "./ModelProviderEnum";
|
|
39
39
|
export * from "./ValidationErrorLocItem";
|
|
40
40
|
export * from "./ValidationError";
|
|
41
|
-
export * from "./WebhookEventBase";
|
|
42
|
-
export * from "./WebhookEvent";
|
|
43
41
|
export * from "./WebhookEventChatEnded";
|
|
44
42
|
export * from "./WebhookEventChatStartType";
|
|
45
43
|
export * from "./WebhookEventChatStarted";
|
|
46
44
|
export * from "./WebhookEventChatStatus";
|
|
45
|
+
export * from "./WebhookEvent";
|
|
46
|
+
export * from "./WebhookEventBase";
|
|
47
47
|
export * from "./ErrorResponse";
|
|
48
48
|
export * from "./ReturnPagedUserDefinedTools";
|
|
49
49
|
export * from "./ReturnUserDefinedToolToolType";
|
|
@@ -14,38 +14,38 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
|
14
14
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
15
|
};
|
|
16
16
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
-
__exportStar(require("./AssistantEnd"), exports);
|
|
18
17
|
__exportStar(require("./AssistantInput"), exports);
|
|
19
|
-
__exportStar(require("./AssistantMessage"), exports);
|
|
20
|
-
__exportStar(require("./AssistantProsody"), exports);
|
|
21
18
|
__exportStar(require("./AudioConfiguration"), exports);
|
|
22
19
|
__exportStar(require("./AudioInput"), exports);
|
|
23
|
-
__exportStar(require("./AudioOutput"), exports);
|
|
24
20
|
__exportStar(require("./BuiltInTool"), exports);
|
|
25
21
|
__exportStar(require("./BuiltinToolConfig"), exports);
|
|
26
|
-
__exportStar(require("./ChatMessageToolResult"), exports);
|
|
27
|
-
__exportStar(require("./ChatMessage"), exports);
|
|
28
|
-
__exportStar(require("./ChatMetadata"), exports);
|
|
29
22
|
__exportStar(require("./Context"), exports);
|
|
30
23
|
__exportStar(require("./ContextType"), exports);
|
|
31
|
-
__exportStar(require("./EmotionScores"), exports);
|
|
32
24
|
__exportStar(require("./Encoding"), exports);
|
|
33
|
-
__exportStar(require("./WebSocketError"), exports);
|
|
34
25
|
__exportStar(require("./ErrorLevel"), exports);
|
|
35
|
-
__exportStar(require("./Inference"), exports);
|
|
36
|
-
__exportStar(require("./MillisecondInterval"), exports);
|
|
37
26
|
__exportStar(require("./PauseAssistantMessage"), exports);
|
|
38
|
-
__exportStar(require("./ProsodyInference"), exports);
|
|
39
27
|
__exportStar(require("./ResumeAssistantMessage"), exports);
|
|
40
|
-
__exportStar(require("./Role"), exports);
|
|
41
28
|
__exportStar(require("./SessionSettingsVariablesValue"), exports);
|
|
42
29
|
__exportStar(require("./SessionSettings"), exports);
|
|
43
30
|
__exportStar(require("./Tool"), exports);
|
|
44
|
-
__exportStar(require("./ToolCallMessage"), exports);
|
|
45
31
|
__exportStar(require("./ToolErrorMessage"), exports);
|
|
46
32
|
__exportStar(require("./ToolResponseMessage"), exports);
|
|
47
33
|
__exportStar(require("./ToolType"), exports);
|
|
48
34
|
__exportStar(require("./UserInput"), exports);
|
|
35
|
+
__exportStar(require("./AssistantEnd"), exports);
|
|
36
|
+
__exportStar(require("./AssistantMessage"), exports);
|
|
37
|
+
__exportStar(require("./AssistantProsody"), exports);
|
|
38
|
+
__exportStar(require("./AudioOutput"), exports);
|
|
39
|
+
__exportStar(require("./ChatMessageToolResult"), exports);
|
|
40
|
+
__exportStar(require("./ChatMessage"), exports);
|
|
41
|
+
__exportStar(require("./ChatMetadata"), exports);
|
|
42
|
+
__exportStar(require("./EmotionScores"), exports);
|
|
43
|
+
__exportStar(require("./WebSocketError"), exports);
|
|
44
|
+
__exportStar(require("./Inference"), exports);
|
|
45
|
+
__exportStar(require("./MillisecondInterval"), exports);
|
|
46
|
+
__exportStar(require("./ProsodyInference"), exports);
|
|
47
|
+
__exportStar(require("./Role"), exports);
|
|
48
|
+
__exportStar(require("./ToolCallMessage"), exports);
|
|
49
49
|
__exportStar(require("./UserInterruption"), exports);
|
|
50
50
|
__exportStar(require("./UserMessage"), exports);
|
|
51
51
|
__exportStar(require("./JsonMessage"), exports);
|
|
@@ -54,12 +54,12 @@ __exportStar(require("./LanguageModelType"), exports);
|
|
|
54
54
|
__exportStar(require("./ModelProviderEnum"), exports);
|
|
55
55
|
__exportStar(require("./ValidationErrorLocItem"), exports);
|
|
56
56
|
__exportStar(require("./ValidationError"), exports);
|
|
57
|
-
__exportStar(require("./WebhookEventBase"), exports);
|
|
58
|
-
__exportStar(require("./WebhookEvent"), exports);
|
|
59
57
|
__exportStar(require("./WebhookEventChatEnded"), exports);
|
|
60
58
|
__exportStar(require("./WebhookEventChatStartType"), exports);
|
|
61
59
|
__exportStar(require("./WebhookEventChatStarted"), exports);
|
|
62
60
|
__exportStar(require("./WebhookEventChatStatus"), exports);
|
|
61
|
+
__exportStar(require("./WebhookEvent"), exports);
|
|
62
|
+
__exportStar(require("./WebhookEventBase"), exports);
|
|
63
63
|
__exportStar(require("./ErrorResponse"), exports);
|
|
64
64
|
__exportStar(require("./ReturnPagedUserDefinedTools"), exports);
|
|
65
65
|
__exportStar(require("./ReturnUserDefinedToolToolType"), exports);
|
|
@@ -5,8 +5,6 @@ import * as environments from "../../../../../../environments";
|
|
|
5
5
|
import * as core from "../../../../../../core";
|
|
6
6
|
import * as Hume from "../../../../../index";
|
|
7
7
|
import * as stream from "stream";
|
|
8
|
-
import * as fs from "fs";
|
|
9
|
-
import { Blob } from "buffer";
|
|
10
8
|
export declare namespace Batch {
|
|
11
9
|
interface Options {
|
|
12
10
|
environment?: core.Supplier<environments.HumeEnvironment | string>;
|
|
@@ -86,14 +84,14 @@ export declare class Batch {
|
|
|
86
84
|
/**
|
|
87
85
|
* Start a new batch inference job.
|
|
88
86
|
*
|
|
89
|
-
* @param {
|
|
87
|
+
* @param {core.FileLike[]} file
|
|
90
88
|
* @param {Hume.expressionMeasurement.batch.BatchStartInferenceJobFromLocalFileRequest} request
|
|
91
89
|
* @param {Batch.RequestOptions} requestOptions - Request-specific configuration.
|
|
92
90
|
*
|
|
93
91
|
* @example
|
|
94
92
|
* await client.expressionMeasurement.batch.startInferenceJobFromLocalFile([fs.createReadStream("/path/to/your/file")], {})
|
|
95
93
|
*/
|
|
96
|
-
startInferenceJobFromLocalFile(file:
|
|
94
|
+
startInferenceJobFromLocalFile(file: core.FileLike[], request: Hume.expressionMeasurement.batch.BatchStartInferenceJobFromLocalFileRequest, requestOptions?: Batch.RequestOptions): core.HttpResponsePromise<Hume.expressionMeasurement.batch.JobId>;
|
|
97
95
|
private __startInferenceJobFromLocalFile;
|
|
98
96
|
protected _getCustomAuthorizationHeaders(): Promise<{
|
|
99
97
|
"X-Hume-Api-Key": string | undefined;
|
|
@@ -384,7 +384,7 @@ class Batch {
|
|
|
384
384
|
/**
|
|
385
385
|
* Start a new batch inference job.
|
|
386
386
|
*
|
|
387
|
-
* @param {
|
|
387
|
+
* @param {core.FileLike[]} file
|
|
388
388
|
* @param {Hume.expressionMeasurement.batch.BatchStartInferenceJobFromLocalFileRequest} request
|
|
389
389
|
* @param {Batch.RequestOptions} requestOptions - Request-specific configuration.
|
|
390
390
|
*
|
|
@@ -37,32 +37,30 @@ export declare class Tts {
|
|
|
37
37
|
*
|
|
38
38
|
* The response includes the base64-encoded audio and metadata in JSON format.
|
|
39
39
|
*
|
|
40
|
-
* @param {Hume.tts.
|
|
40
|
+
* @param {Hume.tts.PostedTts} request
|
|
41
41
|
* @param {Tts.RequestOptions} requestOptions - Request-specific configuration.
|
|
42
42
|
*
|
|
43
43
|
* @throws {@link Hume.tts.UnprocessableEntityError}
|
|
44
44
|
*
|
|
45
45
|
* @example
|
|
46
46
|
* await client.tts.synthesizeJson({
|
|
47
|
-
*
|
|
47
|
+
* utterances: [{
|
|
48
|
+
* text: "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.",
|
|
49
|
+
* description: "Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality."
|
|
50
|
+
* }],
|
|
51
|
+
* context: {
|
|
48
52
|
* utterances: [{
|
|
49
|
-
* text: "
|
|
50
|
-
* description: "
|
|
51
|
-
* }]
|
|
52
|
-
*
|
|
53
|
-
*
|
|
54
|
-
*
|
|
55
|
-
*
|
|
56
|
-
*
|
|
57
|
-
* },
|
|
58
|
-
* format: {
|
|
59
|
-
* type: "mp3"
|
|
60
|
-
* },
|
|
61
|
-
* numGenerations: 1
|
|
62
|
-
* }
|
|
53
|
+
* text: "How can people see beauty so differently?",
|
|
54
|
+
* description: "A curious student with a clear and respectful tone, seeking clarification on Hume's ideas with a straightforward question."
|
|
55
|
+
* }]
|
|
56
|
+
* },
|
|
57
|
+
* format: {
|
|
58
|
+
* type: "mp3"
|
|
59
|
+
* },
|
|
60
|
+
* numGenerations: 1
|
|
63
61
|
* })
|
|
64
62
|
*/
|
|
65
|
-
synthesizeJson(request: Hume.tts.
|
|
63
|
+
synthesizeJson(request: Hume.tts.PostedTts, requestOptions?: Tts.RequestOptions): core.HttpResponsePromise<Hume.tts.ReturnTts>;
|
|
66
64
|
private __synthesizeJson;
|
|
67
65
|
/**
|
|
68
66
|
* Synthesizes one or more input texts into speech using the specified voice. If no voice is provided, a novel voice will be generated dynamically. Optionally, additional context can be included to influence the speech's style and prosody.
|
|
@@ -72,12 +70,6 @@ export declare class Tts {
|
|
|
72
70
|
*/
|
|
73
71
|
synthesizeFile(request: Hume.tts.PostedTts, requestOptions?: Tts.RequestOptions): core.HttpResponsePromise<stream.Readable>;
|
|
74
72
|
private __synthesizeFile;
|
|
75
|
-
/**
|
|
76
|
-
* Streams synthesized speech using the specified voice. If no voice is provided, a novel voice will be generated dynamically. Optionally, additional context can be included to influence the speech's style and prosody.
|
|
77
|
-
* @throws {@link Hume.tts.UnprocessableEntityError}
|
|
78
|
-
*/
|
|
79
|
-
synthesizeFileStreaming(request: Hume.tts.PostedTts, requestOptions?: Tts.RequestOptions): core.HttpResponsePromise<stream.Readable>;
|
|
80
|
-
private __synthesizeFileStreaming;
|
|
81
73
|
/**
|
|
82
74
|
* Streams synthesized speech using the specified voice. If no voice is provided, a novel voice will be generated dynamically. Optionally, additional context can be included to influence the speech's style and prosody.
|
|
83
75
|
*
|
|
@@ -85,6 +77,12 @@ export declare class Tts {
|
|
|
85
77
|
*/
|
|
86
78
|
synthesizeJsonStreaming(request: Hume.tts.PostedTts, requestOptions?: Tts.RequestOptions): core.HttpResponsePromise<core.Stream<Hume.tts.SnippetAudioChunk>>;
|
|
87
79
|
private __synthesizeJsonStreaming;
|
|
80
|
+
/**
|
|
81
|
+
* Streams synthesized speech using the specified voice. If no voice is provided, a novel voice will be generated dynamically. Optionally, additional context can be included to influence the speech's style and prosody.
|
|
82
|
+
* @throws {@link Hume.tts.UnprocessableEntityError}
|
|
83
|
+
*/
|
|
84
|
+
synthesizeFileStreaming(request: Hume.tts.PostedTts, requestOptions?: Tts.RequestOptions): core.HttpResponsePromise<stream.Readable>;
|
|
85
|
+
private __synthesizeFileStreaming;
|
|
88
86
|
protected _getCustomAuthorizationHeaders(): Promise<{
|
|
89
87
|
"X-Hume-Api-Key": string | undefined;
|
|
90
88
|
}>;
|
|
@@ -70,29 +70,27 @@ class Tts {
|
|
|
70
70
|
*
|
|
71
71
|
* The response includes the base64-encoded audio and metadata in JSON format.
|
|
72
72
|
*
|
|
73
|
-
* @param {Hume.tts.
|
|
73
|
+
* @param {Hume.tts.PostedTts} request
|
|
74
74
|
* @param {Tts.RequestOptions} requestOptions - Request-specific configuration.
|
|
75
75
|
*
|
|
76
76
|
* @throws {@link Hume.tts.UnprocessableEntityError}
|
|
77
77
|
*
|
|
78
78
|
* @example
|
|
79
79
|
* await client.tts.synthesizeJson({
|
|
80
|
-
*
|
|
80
|
+
* utterances: [{
|
|
81
|
+
* text: "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.",
|
|
82
|
+
* description: "Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality."
|
|
83
|
+
* }],
|
|
84
|
+
* context: {
|
|
81
85
|
* utterances: [{
|
|
82
|
-
* text: "
|
|
83
|
-
* description: "
|
|
84
|
-
* }]
|
|
85
|
-
*
|
|
86
|
-
*
|
|
87
|
-
*
|
|
88
|
-
*
|
|
89
|
-
*
|
|
90
|
-
* },
|
|
91
|
-
* format: {
|
|
92
|
-
* type: "mp3"
|
|
93
|
-
* },
|
|
94
|
-
* numGenerations: 1
|
|
95
|
-
* }
|
|
86
|
+
* text: "How can people see beauty so differently?",
|
|
87
|
+
* description: "A curious student with a clear and respectful tone, seeking clarification on Hume's ideas with a straightforward question."
|
|
88
|
+
* }]
|
|
89
|
+
* },
|
|
90
|
+
* format: {
|
|
91
|
+
* type: "mp3"
|
|
92
|
+
* },
|
|
93
|
+
* numGenerations: 1
|
|
96
94
|
* })
|
|
97
95
|
*/
|
|
98
96
|
synthesizeJson(request, requestOptions) {
|
|
@@ -101,19 +99,13 @@ class Tts {
|
|
|
101
99
|
__synthesizeJson(request, requestOptions) {
|
|
102
100
|
return __awaiter(this, void 0, void 0, function* () {
|
|
103
101
|
var _a, _b, _c, _d;
|
|
104
|
-
const { accessToken, body: _body } = request;
|
|
105
|
-
const _queryParams = {};
|
|
106
|
-
if (accessToken != null) {
|
|
107
|
-
_queryParams["access_token"] = accessToken;
|
|
108
|
-
}
|
|
109
102
|
const _response = yield ((_a = this._options.fetcher) !== null && _a !== void 0 ? _a : core.fetcher)({
|
|
110
103
|
url: (0, url_join_1.default)((_c = (_b = (yield core.Supplier.get(this._options.baseUrl))) !== null && _b !== void 0 ? _b : (yield core.Supplier.get(this._options.environment))) !== null && _c !== void 0 ? _c : environments.HumeEnvironment.Production, "v0/tts"),
|
|
111
104
|
method: "POST",
|
|
112
105
|
headers: (0, headers_js_1.mergeHeaders)((_d = this._options) === null || _d === void 0 ? void 0 : _d.headers, (0, headers_js_1.mergeOnlyDefinedHeaders)(Object.assign({}, (yield this._getCustomAuthorizationHeaders()))), requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.headers),
|
|
113
106
|
contentType: "application/json",
|
|
114
|
-
queryParameters: _queryParams,
|
|
115
107
|
requestType: "json",
|
|
116
|
-
body: serializers.tts.PostedTts.jsonOrThrow(
|
|
108
|
+
body: serializers.tts.PostedTts.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }),
|
|
117
109
|
timeoutMs: (requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.timeoutInSeconds) != null ? requestOptions.timeoutInSeconds * 1000 : 60000,
|
|
118
110
|
maxRetries: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.maxRetries,
|
|
119
111
|
abortSignal: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.abortSignal,
|
|
@@ -226,28 +218,47 @@ class Tts {
|
|
|
226
218
|
}
|
|
227
219
|
/**
|
|
228
220
|
* Streams synthesized speech using the specified voice. If no voice is provided, a novel voice will be generated dynamically. Optionally, additional context can be included to influence the speech's style and prosody.
|
|
229
|
-
*
|
|
221
|
+
*
|
|
222
|
+
* The response is a stream of JSON objects including audio encoded in base64.
|
|
230
223
|
*/
|
|
231
|
-
|
|
232
|
-
return core.HttpResponsePromise.fromPromise(this.
|
|
224
|
+
synthesizeJsonStreaming(request, requestOptions) {
|
|
225
|
+
return core.HttpResponsePromise.fromPromise(this.__synthesizeJsonStreaming(request, requestOptions));
|
|
233
226
|
}
|
|
234
|
-
|
|
227
|
+
__synthesizeJsonStreaming(request, requestOptions) {
|
|
235
228
|
return __awaiter(this, void 0, void 0, function* () {
|
|
236
229
|
var _a, _b, _c, _d;
|
|
237
230
|
const _response = yield ((_a = this._options.fetcher) !== null && _a !== void 0 ? _a : core.fetcher)({
|
|
238
|
-
url: (0, url_join_1.default)((_c = (_b = (yield core.Supplier.get(this._options.baseUrl))) !== null && _b !== void 0 ? _b : (yield core.Supplier.get(this._options.environment))) !== null && _c !== void 0 ? _c : environments.HumeEnvironment.Production, "v0/tts/stream/
|
|
231
|
+
url: (0, url_join_1.default)((_c = (_b = (yield core.Supplier.get(this._options.baseUrl))) !== null && _b !== void 0 ? _b : (yield core.Supplier.get(this._options.environment))) !== null && _c !== void 0 ? _c : environments.HumeEnvironment.Production, "v0/tts/stream/json"),
|
|
239
232
|
method: "POST",
|
|
240
233
|
headers: (0, headers_js_1.mergeHeaders)((_d = this._options) === null || _d === void 0 ? void 0 : _d.headers, (0, headers_js_1.mergeOnlyDefinedHeaders)(Object.assign({}, (yield this._getCustomAuthorizationHeaders()))), requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.headers),
|
|
241
234
|
contentType: "application/json",
|
|
242
235
|
requestType: "json",
|
|
243
236
|
body: serializers.tts.PostedTts.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }),
|
|
244
|
-
responseType: "
|
|
237
|
+
responseType: "sse",
|
|
245
238
|
timeoutMs: (requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.timeoutInSeconds) != null ? requestOptions.timeoutInSeconds * 1000 : 60000,
|
|
246
239
|
maxRetries: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.maxRetries,
|
|
247
240
|
abortSignal: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.abortSignal,
|
|
248
241
|
});
|
|
249
242
|
if (_response.ok) {
|
|
250
|
-
return {
|
|
243
|
+
return {
|
|
244
|
+
data: new core.Stream({
|
|
245
|
+
stream: _response.body,
|
|
246
|
+
parse: (data) => __awaiter(this, void 0, void 0, function* () {
|
|
247
|
+
return serializers.tts.SnippetAudioChunk.parseOrThrow(data, {
|
|
248
|
+
unrecognizedObjectKeys: "passthrough",
|
|
249
|
+
allowUnrecognizedUnionMembers: true,
|
|
250
|
+
allowUnrecognizedEnumValues: true,
|
|
251
|
+
breadcrumbsPrefix: ["response"],
|
|
252
|
+
});
|
|
253
|
+
}),
|
|
254
|
+
signal: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.abortSignal,
|
|
255
|
+
eventShape: {
|
|
256
|
+
type: "json",
|
|
257
|
+
messageTerminator: "\n",
|
|
258
|
+
},
|
|
259
|
+
}),
|
|
260
|
+
rawResponse: _response.rawResponse,
|
|
261
|
+
};
|
|
251
262
|
}
|
|
252
263
|
if (_response.error.reason === "status-code") {
|
|
253
264
|
switch (_response.error.statusCode) {
|
|
@@ -274,7 +285,7 @@ class Tts {
|
|
|
274
285
|
rawResponse: _response.rawResponse,
|
|
275
286
|
});
|
|
276
287
|
case "timeout":
|
|
277
|
-
throw new errors.HumeTimeoutError("Timeout exceeded when calling POST /v0/tts/stream/
|
|
288
|
+
throw new errors.HumeTimeoutError("Timeout exceeded when calling POST /v0/tts/stream/json.");
|
|
278
289
|
case "unknown":
|
|
279
290
|
throw new errors.HumeError({
|
|
280
291
|
message: _response.error.errorMessage,
|
|
@@ -285,47 +296,28 @@ class Tts {
|
|
|
285
296
|
}
|
|
286
297
|
/**
|
|
287
298
|
* Streams synthesized speech using the specified voice. If no voice is provided, a novel voice will be generated dynamically. Optionally, additional context can be included to influence the speech's style and prosody.
|
|
288
|
-
*
|
|
289
|
-
* The response is a stream of JSON objects including audio encoded in base64.
|
|
299
|
+
* @throws {@link Hume.tts.UnprocessableEntityError}
|
|
290
300
|
*/
|
|
291
|
-
|
|
292
|
-
return core.HttpResponsePromise.fromPromise(this.
|
|
301
|
+
synthesizeFileStreaming(request, requestOptions) {
|
|
302
|
+
return core.HttpResponsePromise.fromPromise(this.__synthesizeFileStreaming(request, requestOptions));
|
|
293
303
|
}
|
|
294
|
-
|
|
304
|
+
__synthesizeFileStreaming(request, requestOptions) {
|
|
295
305
|
return __awaiter(this, void 0, void 0, function* () {
|
|
296
306
|
var _a, _b, _c, _d;
|
|
297
307
|
const _response = yield ((_a = this._options.fetcher) !== null && _a !== void 0 ? _a : core.fetcher)({
|
|
298
|
-
url: (0, url_join_1.default)((_c = (_b = (yield core.Supplier.get(this._options.baseUrl))) !== null && _b !== void 0 ? _b : (yield core.Supplier.get(this._options.environment))) !== null && _c !== void 0 ? _c : environments.HumeEnvironment.Production, "v0/tts/stream/
|
|
308
|
+
url: (0, url_join_1.default)((_c = (_b = (yield core.Supplier.get(this._options.baseUrl))) !== null && _b !== void 0 ? _b : (yield core.Supplier.get(this._options.environment))) !== null && _c !== void 0 ? _c : environments.HumeEnvironment.Production, "v0/tts/stream/file"),
|
|
299
309
|
method: "POST",
|
|
300
310
|
headers: (0, headers_js_1.mergeHeaders)((_d = this._options) === null || _d === void 0 ? void 0 : _d.headers, (0, headers_js_1.mergeOnlyDefinedHeaders)(Object.assign({}, (yield this._getCustomAuthorizationHeaders()))), requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.headers),
|
|
301
311
|
contentType: "application/json",
|
|
302
312
|
requestType: "json",
|
|
303
313
|
body: serializers.tts.PostedTts.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }),
|
|
304
|
-
responseType: "
|
|
314
|
+
responseType: "streaming",
|
|
305
315
|
timeoutMs: (requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.timeoutInSeconds) != null ? requestOptions.timeoutInSeconds * 1000 : 60000,
|
|
306
316
|
maxRetries: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.maxRetries,
|
|
307
317
|
abortSignal: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.abortSignal,
|
|
308
318
|
});
|
|
309
319
|
if (_response.ok) {
|
|
310
|
-
return {
|
|
311
|
-
data: new core.Stream({
|
|
312
|
-
stream: _response.body,
|
|
313
|
-
parse: (data) => __awaiter(this, void 0, void 0, function* () {
|
|
314
|
-
return serializers.tts.SnippetAudioChunk.parseOrThrow(data, {
|
|
315
|
-
unrecognizedObjectKeys: "passthrough",
|
|
316
|
-
allowUnrecognizedUnionMembers: true,
|
|
317
|
-
allowUnrecognizedEnumValues: true,
|
|
318
|
-
breadcrumbsPrefix: ["response"],
|
|
319
|
-
});
|
|
320
|
-
}),
|
|
321
|
-
signal: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.abortSignal,
|
|
322
|
-
eventShape: {
|
|
323
|
-
type: "json",
|
|
324
|
-
messageTerminator: "\n",
|
|
325
|
-
},
|
|
326
|
-
}),
|
|
327
|
-
rawResponse: _response.rawResponse,
|
|
328
|
-
};
|
|
320
|
+
return { data: _response.body, rawResponse: _response.rawResponse };
|
|
329
321
|
}
|
|
330
322
|
if (_response.error.reason === "status-code") {
|
|
331
323
|
switch (_response.error.statusCode) {
|
|
@@ -352,7 +344,7 @@ class Tts {
|
|
|
352
344
|
rawResponse: _response.rawResponse,
|
|
353
345
|
});
|
|
354
346
|
case "timeout":
|
|
355
|
-
throw new errors.HumeTimeoutError("Timeout exceeded when calling POST /v0/tts/stream/
|
|
347
|
+
throw new errors.HumeTimeoutError("Timeout exceeded when calling POST /v0/tts/stream/file.");
|
|
356
348
|
case "unknown":
|
|
357
349
|
throw new errors.HumeError({
|
|
358
350
|
message: _response.error.errorMessage,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
export
|
|
1
|
+
export {};
|