hume 0.12.2 → 0.13.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.mock/definition/empathic-voice/__package__.yml +688 -735
- package/.mock/definition/empathic-voice/chat.yml +29 -23
- package/.mock/definition/empathic-voice/chatWebhooks.yml +8 -12
- package/.mock/definition/empathic-voice/prompts.yml +2 -2
- package/.mock/definition/empathic-voice/tools.yml +2 -2
- package/.mock/definition/tts/__package__.yml +70 -87
- package/.mock/fern.config.json +1 -1
- package/api/resources/empathicVoice/client/index.d.ts +0 -1
- package/api/resources/empathicVoice/client/index.js +0 -15
- package/api/resources/empathicVoice/errors/index.d.ts +0 -1
- package/api/resources/empathicVoice/errors/index.js +0 -1
- package/api/resources/empathicVoice/resources/chat/client/Client.d.ts +2 -0
- package/api/resources/empathicVoice/resources/prompts/client/requests/PostedPrompt.d.ts +1 -1
- package/api/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersion.d.ts +1 -1
- package/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedTool.d.ts +1 -1
- package/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersion.d.ts +1 -1
- package/api/resources/empathicVoice/types/AssistantEnd.d.ts +3 -3
- package/api/resources/empathicVoice/types/AssistantInput.d.ts +3 -3
- package/api/resources/empathicVoice/types/AssistantMessage.d.ts +8 -8
- package/api/resources/empathicVoice/types/AssistantProsody.d.ts +6 -6
- package/api/resources/empathicVoice/types/AudioConfiguration.d.ts +2 -2
- package/api/resources/empathicVoice/types/AudioInput.d.ts +7 -7
- package/api/resources/empathicVoice/types/AudioOutput.d.ts +4 -4
- package/api/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
- package/api/resources/empathicVoice/types/ChatMessage.d.ts +2 -2
- package/api/resources/empathicVoice/types/ChatMetadata.d.ts +8 -8
- package/api/resources/empathicVoice/types/Context.d.ts +8 -14
- package/api/resources/empathicVoice/types/ContextType.d.ts +2 -3
- package/api/resources/empathicVoice/types/ContextType.js +1 -2
- package/api/resources/empathicVoice/types/LanguageModelType.d.ts +4 -1
- package/api/resources/empathicVoice/types/LanguageModelType.js +3 -0
- package/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +3 -3
- package/api/resources/empathicVoice/types/PostedLanguageModel.d.ts +1 -1
- package/api/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.d.ts +2 -2
- package/api/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.d.ts +2 -2
- package/api/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +2 -2
- package/api/resources/empathicVoice/types/ReturnConfig.d.ts +7 -7
- package/api/resources/empathicVoice/types/ReturnLanguageModel.d.ts +1 -1
- package/api/resources/empathicVoice/types/ReturnPrompt.d.ts +4 -6
- package/api/resources/empathicVoice/types/ReturnUserDefinedTool.d.ts +1 -1
- package/api/resources/empathicVoice/types/SessionSettings.d.ts +30 -30
- package/api/resources/empathicVoice/types/Tool.d.ts +7 -7
- package/api/resources/empathicVoice/types/ToolCallMessage.d.ts +7 -7
- package/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +16 -16
- package/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +8 -8
- package/api/resources/empathicVoice/types/UserInput.d.ts +3 -3
- package/api/resources/empathicVoice/types/UserInterruption.d.ts +5 -5
- package/api/resources/empathicVoice/types/UserMessage.d.ts +12 -12
- package/api/resources/empathicVoice/types/VoiceProvider.d.ts +2 -1
- package/api/resources/empathicVoice/types/VoiceProvider.js +1 -0
- package/api/resources/empathicVoice/types/WebSocketError.d.ts +10 -10
- package/api/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +8 -8
- package/api/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +6 -6
- package/api/resources/empathicVoice/types/index.d.ts +16 -17
- package/api/resources/empathicVoice/types/index.js +16 -17
- package/api/resources/tts/client/Client.d.ts +21 -23
- package/api/resources/tts/client/Client.js +50 -58
- package/api/resources/tts/client/index.d.ts +0 -1
- package/api/resources/tts/client/index.js +0 -15
- package/api/resources/tts/types/PostedTts.d.ts +8 -8
- package/api/resources/tts/types/PostedUtterance.d.ts +6 -6
- package/api/resources/tts/types/ReturnGeneration.d.ts +5 -5
- package/api/resources/tts/types/ReturnTts.d.ts +1 -1
- package/api/resources/tts/types/Snippet.d.ts +6 -6
- package/dist/api/resources/empathicVoice/client/index.d.ts +0 -1
- package/dist/api/resources/empathicVoice/client/index.js +0 -15
- package/dist/api/resources/empathicVoice/errors/index.d.ts +0 -1
- package/dist/api/resources/empathicVoice/errors/index.js +0 -1
- package/dist/api/resources/empathicVoice/resources/chat/client/Client.d.ts +2 -0
- package/dist/api/resources/empathicVoice/resources/prompts/client/requests/PostedPrompt.d.ts +1 -1
- package/dist/api/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersion.d.ts +1 -1
- package/dist/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedTool.d.ts +1 -1
- package/dist/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersion.d.ts +1 -1
- package/dist/api/resources/empathicVoice/types/AssistantEnd.d.ts +3 -3
- package/dist/api/resources/empathicVoice/types/AssistantInput.d.ts +3 -3
- package/dist/api/resources/empathicVoice/types/AssistantMessage.d.ts +8 -8
- package/dist/api/resources/empathicVoice/types/AssistantProsody.d.ts +6 -6
- package/dist/api/resources/empathicVoice/types/AudioConfiguration.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/AudioInput.d.ts +7 -7
- package/dist/api/resources/empathicVoice/types/AudioOutput.d.ts +4 -4
- package/dist/api/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
- package/dist/api/resources/empathicVoice/types/ChatMessage.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/ChatMetadata.d.ts +8 -8
- package/dist/api/resources/empathicVoice/types/Context.d.ts +8 -14
- package/dist/api/resources/empathicVoice/types/ContextType.d.ts +2 -3
- package/dist/api/resources/empathicVoice/types/ContextType.js +1 -2
- package/dist/api/resources/empathicVoice/types/LanguageModelType.d.ts +4 -1
- package/dist/api/resources/empathicVoice/types/LanguageModelType.js +3 -0
- package/dist/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +3 -3
- package/dist/api/resources/empathicVoice/types/PostedLanguageModel.d.ts +1 -1
- package/dist/api/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/ReturnConfig.d.ts +7 -7
- package/dist/api/resources/empathicVoice/types/ReturnLanguageModel.d.ts +1 -1
- package/dist/api/resources/empathicVoice/types/ReturnPrompt.d.ts +4 -6
- package/dist/api/resources/empathicVoice/types/ReturnUserDefinedTool.d.ts +1 -1
- package/dist/api/resources/empathicVoice/types/SessionSettings.d.ts +30 -30
- package/dist/api/resources/empathicVoice/types/Tool.d.ts +7 -7
- package/dist/api/resources/empathicVoice/types/ToolCallMessage.d.ts +7 -7
- package/dist/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +16 -16
- package/dist/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +8 -8
- package/dist/api/resources/empathicVoice/types/UserInput.d.ts +3 -3
- package/dist/api/resources/empathicVoice/types/UserInterruption.d.ts +5 -5
- package/dist/api/resources/empathicVoice/types/UserMessage.d.ts +12 -12
- package/dist/api/resources/empathicVoice/types/VoiceProvider.d.ts +2 -1
- package/dist/api/resources/empathicVoice/types/VoiceProvider.js +1 -0
- package/dist/api/resources/empathicVoice/types/WebSocketError.d.ts +10 -10
- package/dist/api/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +8 -8
- package/dist/api/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +6 -6
- package/dist/api/resources/empathicVoice/types/index.d.ts +16 -17
- package/dist/api/resources/empathicVoice/types/index.js +16 -17
- package/dist/api/resources/tts/client/Client.d.ts +21 -23
- package/dist/api/resources/tts/client/Client.js +50 -58
- package/dist/api/resources/tts/client/index.d.ts +0 -1
- package/dist/api/resources/tts/client/index.js +0 -15
- package/dist/api/resources/tts/types/PostedTts.d.ts +8 -8
- package/dist/api/resources/tts/types/PostedUtterance.d.ts +6 -6
- package/dist/api/resources/tts/types/ReturnGeneration.d.ts +5 -5
- package/dist/api/resources/tts/types/ReturnTts.d.ts +1 -1
- package/dist/api/resources/tts/types/Snippet.d.ts +6 -6
- package/dist/serialization/resources/empathicVoice/index.d.ts +0 -1
- package/dist/serialization/resources/empathicVoice/index.js +0 -1
- package/dist/serialization/resources/empathicVoice/types/AssistantEnd.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/AssistantEnd.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/AssistantInput.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/AssistantMessage.d.ts +2 -2
- package/dist/serialization/resources/empathicVoice/types/AssistantMessage.js +2 -2
- package/dist/serialization/resources/empathicVoice/types/AssistantProsody.d.ts +2 -2
- package/dist/serialization/resources/empathicVoice/types/AssistantProsody.js +2 -2
- package/dist/serialization/resources/empathicVoice/types/AudioConfiguration.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/AudioConfiguration.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/AudioInput.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/AudioInput.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/AudioOutput.d.ts +2 -2
- package/dist/serialization/resources/empathicVoice/types/AudioOutput.js +2 -2
- package/dist/serialization/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/BuiltinToolConfig.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/ChatMessage.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/ChatMessage.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/ChatMetadata.d.ts +2 -2
- package/dist/serialization/resources/empathicVoice/types/ChatMetadata.js +2 -2
- package/dist/serialization/resources/empathicVoice/types/Context.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/Context.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/ContextType.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/ContextType.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/LanguageModelType.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/LanguageModelType.js +3 -0
- package/dist/serialization/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/PauseAssistantMessage.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/ResumeAssistantMessage.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +8 -8
- package/dist/serialization/resources/empathicVoice/types/ReturnConfig.js +8 -8
- package/dist/serialization/resources/empathicVoice/types/ReturnPrompt.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/ReturnPrompt.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/SessionSettings.d.ts +8 -8
- package/dist/serialization/resources/empathicVoice/types/SessionSettings.js +8 -8
- package/dist/serialization/resources/empathicVoice/types/Tool.d.ts +3 -3
- package/dist/serialization/resources/empathicVoice/types/Tool.js +3 -3
- package/dist/serialization/resources/empathicVoice/types/ToolCallMessage.d.ts +3 -3
- package/dist/serialization/resources/empathicVoice/types/ToolCallMessage.js +3 -3
- package/dist/serialization/resources/empathicVoice/types/ToolErrorMessage.d.ts +6 -6
- package/dist/serialization/resources/empathicVoice/types/ToolErrorMessage.js +6 -6
- package/dist/serialization/resources/empathicVoice/types/ToolResponseMessage.d.ts +2 -2
- package/dist/serialization/resources/empathicVoice/types/ToolResponseMessage.js +2 -2
- package/dist/serialization/resources/empathicVoice/types/UserInput.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/UserInput.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/UserInterruption.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/UserInterruption.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/UserMessage.d.ts +3 -3
- package/dist/serialization/resources/empathicVoice/types/UserMessage.js +3 -3
- package/dist/serialization/resources/empathicVoice/types/VoiceProvider.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/VoiceProvider.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/WebSocketError.d.ts +3 -3
- package/dist/serialization/resources/empathicVoice/types/WebSocketError.js +3 -3
- package/dist/serialization/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +4 -4
- package/dist/serialization/resources/empathicVoice/types/WebhookEventChatEnded.js +4 -4
- package/dist/serialization/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +3 -3
- package/dist/serialization/resources/empathicVoice/types/WebhookEventChatStarted.js +3 -3
- package/dist/serialization/resources/empathicVoice/types/index.d.ts +16 -17
- package/dist/serialization/resources/empathicVoice/types/index.js +16 -17
- package/dist/serialization/resources/tts/types/PostedTts.d.ts +3 -3
- package/dist/serialization/resources/tts/types/PostedTts.js +3 -3
- package/dist/serialization/resources/tts/types/PostedUtterance.d.ts +2 -2
- package/dist/serialization/resources/tts/types/PostedUtterance.js +2 -2
- package/dist/serialization/resources/tts/types/ReturnGeneration.d.ts +3 -3
- package/dist/serialization/resources/tts/types/ReturnGeneration.js +3 -3
- package/dist/serialization/resources/tts/types/ReturnTts.d.ts +1 -1
- package/dist/serialization/resources/tts/types/ReturnTts.js +1 -1
- package/dist/serialization/resources/tts/types/Snippet.d.ts +3 -3
- package/dist/serialization/resources/tts/types/Snippet.js +3 -3
- package/dist/version.d.ts +1 -1
- package/dist/version.js +1 -1
- package/package.json +1 -1
- package/reference.md +24 -78
- package/serialization/resources/empathicVoice/index.d.ts +0 -1
- package/serialization/resources/empathicVoice/index.js +0 -1
- package/serialization/resources/empathicVoice/types/AssistantEnd.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/AssistantEnd.js +1 -1
- package/serialization/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/AssistantInput.js +1 -1
- package/serialization/resources/empathicVoice/types/AssistantMessage.d.ts +2 -2
- package/serialization/resources/empathicVoice/types/AssistantMessage.js +2 -2
- package/serialization/resources/empathicVoice/types/AssistantProsody.d.ts +2 -2
- package/serialization/resources/empathicVoice/types/AssistantProsody.js +2 -2
- package/serialization/resources/empathicVoice/types/AudioConfiguration.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/AudioConfiguration.js +1 -1
- package/serialization/resources/empathicVoice/types/AudioInput.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/AudioInput.js +1 -1
- package/serialization/resources/empathicVoice/types/AudioOutput.d.ts +2 -2
- package/serialization/resources/empathicVoice/types/AudioOutput.js +2 -2
- package/serialization/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/BuiltinToolConfig.js +1 -1
- package/serialization/resources/empathicVoice/types/ChatMessage.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/ChatMessage.js +1 -1
- package/serialization/resources/empathicVoice/types/ChatMetadata.d.ts +2 -2
- package/serialization/resources/empathicVoice/types/ChatMetadata.js +2 -2
- package/serialization/resources/empathicVoice/types/Context.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/Context.js +1 -1
- package/serialization/resources/empathicVoice/types/ContextType.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/ContextType.js +1 -1
- package/serialization/resources/empathicVoice/types/LanguageModelType.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/LanguageModelType.js +3 -0
- package/serialization/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/PauseAssistantMessage.js +1 -1
- package/serialization/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.js +1 -1
- package/serialization/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.js +1 -1
- package/serialization/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/ResumeAssistantMessage.js +1 -1
- package/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +8 -8
- package/serialization/resources/empathicVoice/types/ReturnConfig.js +8 -8
- package/serialization/resources/empathicVoice/types/ReturnPrompt.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/ReturnPrompt.js +1 -1
- package/serialization/resources/empathicVoice/types/SessionSettings.d.ts +8 -8
- package/serialization/resources/empathicVoice/types/SessionSettings.js +8 -8
- package/serialization/resources/empathicVoice/types/Tool.d.ts +3 -3
- package/serialization/resources/empathicVoice/types/Tool.js +3 -3
- package/serialization/resources/empathicVoice/types/ToolCallMessage.d.ts +3 -3
- package/serialization/resources/empathicVoice/types/ToolCallMessage.js +3 -3
- package/serialization/resources/empathicVoice/types/ToolErrorMessage.d.ts +6 -6
- package/serialization/resources/empathicVoice/types/ToolErrorMessage.js +6 -6
- package/serialization/resources/empathicVoice/types/ToolResponseMessage.d.ts +2 -2
- package/serialization/resources/empathicVoice/types/ToolResponseMessage.js +2 -2
- package/serialization/resources/empathicVoice/types/UserInput.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/UserInput.js +1 -1
- package/serialization/resources/empathicVoice/types/UserInterruption.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/UserInterruption.js +1 -1
- package/serialization/resources/empathicVoice/types/UserMessage.d.ts +3 -3
- package/serialization/resources/empathicVoice/types/UserMessage.js +3 -3
- package/serialization/resources/empathicVoice/types/VoiceProvider.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/VoiceProvider.js +1 -1
- package/serialization/resources/empathicVoice/types/WebSocketError.d.ts +3 -3
- package/serialization/resources/empathicVoice/types/WebSocketError.js +3 -3
- package/serialization/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +4 -4
- package/serialization/resources/empathicVoice/types/WebhookEventChatEnded.js +4 -4
- package/serialization/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +3 -3
- package/serialization/resources/empathicVoice/types/WebhookEventChatStarted.js +3 -3
- package/serialization/resources/empathicVoice/types/index.d.ts +16 -17
- package/serialization/resources/empathicVoice/types/index.js +16 -17
- package/serialization/resources/tts/types/PostedTts.d.ts +3 -3
- package/serialization/resources/tts/types/PostedTts.js +3 -3
- package/serialization/resources/tts/types/PostedUtterance.d.ts +2 -2
- package/serialization/resources/tts/types/PostedUtterance.js +2 -2
- package/serialization/resources/tts/types/ReturnGeneration.d.ts +3 -3
- package/serialization/resources/tts/types/ReturnGeneration.js +3 -3
- package/serialization/resources/tts/types/ReturnTts.d.ts +1 -1
- package/serialization/resources/tts/types/ReturnTts.js +1 -1
- package/serialization/resources/tts/types/Snippet.d.ts +3 -3
- package/serialization/resources/tts/types/Snippet.js +3 -3
- package/version.d.ts +1 -1
- package/version.js +1 -1
- package/api/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.d.ts +0 -12
- package/api/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.js +0 -5
- package/api/resources/empathicVoice/client/requests/index.d.ts +0 -1
- package/api/resources/empathicVoice/client/requests/index.js +0 -2
- package/api/resources/empathicVoice/errors/UnprocessableEntityError.d.ts +0 -9
- package/api/resources/empathicVoice/errors/UnprocessableEntityError.js +0 -52
- package/api/resources/empathicVoice/types/SupportsToolUse.d.ts +0 -7
- package/api/resources/empathicVoice/types/SupportsToolUse.js +0 -5
- package/api/resources/tts/client/requests/SynthesizeJsonRequest.d.ts +0 -36
- package/api/resources/tts/client/requests/SynthesizeJsonRequest.js +0 -5
- package/api/resources/tts/client/requests/index.d.ts +0 -1
- package/api/resources/tts/client/requests/index.js +0 -2
- package/dist/api/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.d.ts +0 -12
- package/dist/api/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.js +0 -5
- package/dist/api/resources/empathicVoice/client/requests/index.d.ts +0 -1
- package/dist/api/resources/empathicVoice/client/requests/index.js +0 -2
- package/dist/api/resources/empathicVoice/errors/UnprocessableEntityError.d.ts +0 -9
- package/dist/api/resources/empathicVoice/errors/UnprocessableEntityError.js +0 -52
- package/dist/api/resources/empathicVoice/types/SupportsToolUse.d.ts +0 -7
- package/dist/api/resources/empathicVoice/types/SupportsToolUse.js +0 -5
- package/dist/api/resources/tts/client/requests/SynthesizeJsonRequest.d.ts +0 -36
- package/dist/api/resources/tts/client/requests/SynthesizeJsonRequest.js +0 -5
- package/dist/api/resources/tts/client/requests/index.d.ts +0 -1
- package/dist/api/resources/tts/client/requests/index.js +0 -2
- package/dist/serialization/resources/empathicVoice/client/index.d.ts +0 -1
- package/dist/serialization/resources/empathicVoice/client/index.js +0 -17
- package/dist/serialization/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.d.ts +0 -12
- package/dist/serialization/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.js +0 -43
- package/dist/serialization/resources/empathicVoice/client/requests/index.d.ts +0 -1
- package/dist/serialization/resources/empathicVoice/client/requests/index.js +0 -5
- package/dist/serialization/resources/empathicVoice/types/SupportsToolUse.d.ts +0 -13
- package/dist/serialization/resources/empathicVoice/types/SupportsToolUse.js +0 -44
- package/serialization/resources/empathicVoice/client/index.d.ts +0 -1
- package/serialization/resources/empathicVoice/client/index.js +0 -17
- package/serialization/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.d.ts +0 -12
- package/serialization/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.js +0 -43
- package/serialization/resources/empathicVoice/client/requests/index.d.ts +0 -1
- package/serialization/resources/empathicVoice/client/requests/index.js +0 -5
- package/serialization/resources/empathicVoice/types/SupportsToolUse.d.ts +0 -13
- package/serialization/resources/empathicVoice/types/SupportsToolUse.js +0 -44
|
@@ -4,21 +4,6 @@ channel:
|
|
|
4
4
|
auth: false
|
|
5
5
|
docs: Chat with Empathic Voice Interface (EVI)
|
|
6
6
|
query-parameters:
|
|
7
|
-
access_token:
|
|
8
|
-
type: optional<string>
|
|
9
|
-
default: ''
|
|
10
|
-
docs: >-
|
|
11
|
-
Access token used for authenticating the client. If not provided, an
|
|
12
|
-
`api_key` must be provided to authenticate.
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
The access token is generated using both an API key and a Secret key,
|
|
16
|
-
which provides an additional layer of security compared to using just an
|
|
17
|
-
API key.
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
For more details, refer to the [Authentication Strategies
|
|
21
|
-
Guide](/docs/introduction/api-key#authentication-strategies).
|
|
22
7
|
config_id:
|
|
23
8
|
type: optional<string>
|
|
24
9
|
docs: >-
|
|
@@ -47,13 +32,6 @@ channel:
|
|
|
47
32
|
|
|
48
33
|
Include this parameter to apply a specific version of an EVI
|
|
49
34
|
configuration. If omitted, the latest version will be applied.
|
|
50
|
-
event_limit:
|
|
51
|
-
type: optional<integer>
|
|
52
|
-
docs: >-
|
|
53
|
-
The maximum number of chat events to return from chat history. By
|
|
54
|
-
default, the system returns up to 300 events (100 events per page × 3
|
|
55
|
-
pages). Set this parameter to a smaller value to limit the number of
|
|
56
|
-
events returned.
|
|
57
35
|
resumed_chat_group_id:
|
|
58
36
|
type: optional<string>
|
|
59
37
|
docs: >-
|
|
@@ -98,6 +76,12 @@ channel:
|
|
|
98
76
|
Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs
|
|
99
77
|
of all Chat Groups associated with an API key. This endpoint returns a
|
|
100
78
|
list of all available chat groups.
|
|
79
|
+
voice_id:
|
|
80
|
+
type: optional<string>
|
|
81
|
+
docs: >-
|
|
82
|
+
The name or ID of the voice from the `Voice Library` to be used as the
|
|
83
|
+
speaker for this EVI session. This will override the speaker set in the
|
|
84
|
+
selected configuration.
|
|
101
85
|
verbose_transcription:
|
|
102
86
|
type: optional<boolean>
|
|
103
87
|
default: false
|
|
@@ -109,6 +93,28 @@ channel:
|
|
|
109
93
|
field on a
|
|
110
94
|
[UserMessage](/reference/empathic-voice-interface-evi/chat/chat#receive.UserMessage.type)
|
|
111
95
|
denotes whether the message is "interim" or "final."
|
|
96
|
+
event_limit:
|
|
97
|
+
type: optional<integer>
|
|
98
|
+
docs: >-
|
|
99
|
+
The maximum number of chat events to return from chat history. By
|
|
100
|
+
default, the system returns up to 300 events (100 events per page × 3
|
|
101
|
+
pages). Set this parameter to a smaller value to limit the number of
|
|
102
|
+
events returned.
|
|
103
|
+
access_token:
|
|
104
|
+
type: optional<string>
|
|
105
|
+
default: ''
|
|
106
|
+
docs: >-
|
|
107
|
+
Access token used for authenticating the client. If not provided, an
|
|
108
|
+
`api_key` must be provided to authenticate.
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
The access token is generated using both an API key and a Secret key,
|
|
112
|
+
which provides an additional layer of security compared to using just an
|
|
113
|
+
API key.
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
For more details, refer to the [Authentication Strategies
|
|
117
|
+
Guide](/docs/introduction/api-key#authentication-strategies).
|
|
112
118
|
api_key:
|
|
113
119
|
type: optional<string>
|
|
114
120
|
default: ''
|
|
@@ -130,8 +136,8 @@ channel:
|
|
|
130
136
|
- messages:
|
|
131
137
|
- type: publish
|
|
132
138
|
body:
|
|
133
|
-
data: data
|
|
134
139
|
type: audio_input
|
|
140
|
+
data: data
|
|
135
141
|
- type: subscribe
|
|
136
142
|
body:
|
|
137
143
|
type: assistant_end
|
|
@@ -9,13 +9,11 @@ webhooks:
|
|
|
9
9
|
payload: root.WebhookEventChatEnded
|
|
10
10
|
examples:
|
|
11
11
|
- payload:
|
|
12
|
-
chat_group_id:
|
|
13
|
-
chat_id:
|
|
14
|
-
|
|
15
|
-
duration_seconds:
|
|
16
|
-
end_reason:
|
|
17
|
-
end_time: 1716244958546
|
|
18
|
-
event_name: chat_ended
|
|
12
|
+
chat_group_id: chat_group_id
|
|
13
|
+
chat_id: chat_id
|
|
14
|
+
end_time: 1
|
|
15
|
+
duration_seconds: 1
|
|
16
|
+
end_reason: ACTIVE
|
|
19
17
|
docs: Sent when an EVI chat ends.
|
|
20
18
|
chatStarted:
|
|
21
19
|
audiences: []
|
|
@@ -25,10 +23,8 @@ webhooks:
|
|
|
25
23
|
payload: root.WebhookEventChatStarted
|
|
26
24
|
examples:
|
|
27
25
|
- payload:
|
|
28
|
-
chat_group_id:
|
|
29
|
-
chat_id:
|
|
30
|
-
|
|
26
|
+
chat_group_id: chat_group_id
|
|
27
|
+
chat_id: chat_id
|
|
28
|
+
start_time: 1
|
|
31
29
|
chat_start_type: new_chat_group
|
|
32
|
-
event_name: chat_started
|
|
33
|
-
start_time: 1716244940648
|
|
34
30
|
docs: Sent when an EVI chat is started.
|
|
@@ -126,7 +126,7 @@ service:
|
|
|
126
126
|
text:
|
|
127
127
|
type: string
|
|
128
128
|
docs: >-
|
|
129
|
-
Instructions used to shape EVI
|
|
129
|
+
Instructions used to shape EVI's behavior, responses, and style.
|
|
130
130
|
|
|
131
131
|
|
|
132
132
|
You can use the Prompt to define a specific goal or role for
|
|
@@ -278,7 +278,7 @@ service:
|
|
|
278
278
|
text:
|
|
279
279
|
type: string
|
|
280
280
|
docs: >-
|
|
281
|
-
Instructions used to shape EVI
|
|
281
|
+
Instructions used to shape EVI's behavior, responses, and style
|
|
282
282
|
for this version of the Prompt.
|
|
283
283
|
|
|
284
284
|
|
|
@@ -145,7 +145,7 @@ service:
|
|
|
145
145
|
the Tool.
|
|
146
146
|
|
|
147
147
|
|
|
148
|
-
These parameters define the inputs needed for the Tool
|
|
148
|
+
These parameters define the inputs needed for the Tool's
|
|
149
149
|
execution, including the expected data type and description for
|
|
150
150
|
each input field. Structured as a stringified JSON schema, this
|
|
151
151
|
format ensures the Tool receives data in the expected format.
|
|
@@ -324,7 +324,7 @@ service:
|
|
|
324
324
|
the Tool.
|
|
325
325
|
|
|
326
326
|
|
|
327
|
-
These parameters define the inputs needed for the Tool
|
|
327
|
+
These parameters define the inputs needed for the Tool's
|
|
328
328
|
execution, including the expected data type and description for
|
|
329
329
|
each input field. Structured as a stringified JSON schema, this
|
|
330
330
|
format ensures the Tool receives data in the expected format.
|
|
@@ -34,23 +34,6 @@ service:
|
|
|
34
34
|
request:
|
|
35
35
|
body:
|
|
36
36
|
type: PostedTts
|
|
37
|
-
query-parameters:
|
|
38
|
-
access_token:
|
|
39
|
-
type: optional<string>
|
|
40
|
-
default: ''
|
|
41
|
-
docs: >-
|
|
42
|
-
Access token used for authenticating the client. If not provided,
|
|
43
|
-
an `api_key` must be provided to authenticate.
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
The access token is generated using both an API key and a Secret
|
|
47
|
-
key, which provides an additional layer of security compared to
|
|
48
|
-
using just an API key.
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
For more details, refer to the [Authentication Strategies
|
|
52
|
-
Guide](/docs/introduction/api-key#authentication-strategies).
|
|
53
|
-
name: SynthesizeJsonRequest
|
|
54
37
|
content-type: application/json
|
|
55
38
|
response:
|
|
56
39
|
docs: Successful Response
|
|
@@ -137,26 +120,30 @@ service:
|
|
|
137
120
|
format:
|
|
138
121
|
type: mp3
|
|
139
122
|
num_generations: 1
|
|
140
|
-
synthesize-
|
|
141
|
-
path: /v0/tts/stream/
|
|
123
|
+
synthesize-json-streaming:
|
|
124
|
+
path: /v0/tts/stream/json
|
|
142
125
|
method: POST
|
|
143
126
|
auth: true
|
|
144
127
|
docs: >-
|
|
145
128
|
Streams synthesized speech using the specified voice. If no voice is
|
|
146
129
|
provided, a novel voice will be generated dynamically. Optionally,
|
|
147
130
|
additional context can be included to influence the speech's style and
|
|
148
|
-
prosody.
|
|
131
|
+
prosody.
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
The response is a stream of JSON objects including audio encoded in
|
|
135
|
+
base64.
|
|
149
136
|
source:
|
|
150
137
|
openapi: tts-openapi.yml
|
|
151
|
-
display-name: Text-to-speech (Streamed
|
|
138
|
+
display-name: Text-to-speech (Streamed JSON)
|
|
152
139
|
request:
|
|
153
140
|
body:
|
|
154
141
|
type: PostedTts
|
|
155
142
|
content-type: application/json
|
|
156
|
-
response:
|
|
157
|
-
docs:
|
|
158
|
-
type:
|
|
159
|
-
|
|
143
|
+
response-stream:
|
|
144
|
+
docs: Successful Response
|
|
145
|
+
type: SnippetAudioChunk
|
|
146
|
+
format: json
|
|
160
147
|
errors:
|
|
161
148
|
- UnprocessableEntityError
|
|
162
149
|
examples:
|
|
@@ -168,30 +155,26 @@ service:
|
|
|
168
155
|
voice:
|
|
169
156
|
name: Male English Actor
|
|
170
157
|
provider: HUME_AI
|
|
171
|
-
synthesize-
|
|
172
|
-
path: /v0/tts/stream/
|
|
158
|
+
synthesize-file-streaming:
|
|
159
|
+
path: /v0/tts/stream/file
|
|
173
160
|
method: POST
|
|
174
161
|
auth: true
|
|
175
162
|
docs: >-
|
|
176
163
|
Streams synthesized speech using the specified voice. If no voice is
|
|
177
164
|
provided, a novel voice will be generated dynamically. Optionally,
|
|
178
165
|
additional context can be included to influence the speech's style and
|
|
179
|
-
prosody.
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
The response is a stream of JSON objects including audio encoded in
|
|
183
|
-
base64.
|
|
166
|
+
prosody.
|
|
184
167
|
source:
|
|
185
168
|
openapi: tts-openapi.yml
|
|
186
|
-
display-name: Text-to-speech (Streamed
|
|
169
|
+
display-name: Text-to-speech (Streamed File)
|
|
187
170
|
request:
|
|
188
171
|
body:
|
|
189
172
|
type: PostedTts
|
|
190
173
|
content-type: application/json
|
|
191
|
-
response
|
|
192
|
-
docs:
|
|
193
|
-
type:
|
|
194
|
-
|
|
174
|
+
response:
|
|
175
|
+
docs: OK
|
|
176
|
+
type: file
|
|
177
|
+
status-code: 200
|
|
195
178
|
errors:
|
|
196
179
|
- UnprocessableEntityError
|
|
197
180
|
examples:
|
|
@@ -246,25 +229,25 @@ types:
|
|
|
246
229
|
openapi: tts-openapi.yml
|
|
247
230
|
ReturnGeneration:
|
|
248
231
|
properties:
|
|
249
|
-
|
|
232
|
+
generation_id:
|
|
250
233
|
type: string
|
|
251
234
|
docs: >-
|
|
252
|
-
|
|
253
|
-
|
|
235
|
+
A unique ID associated with this TTS generation that can be used as
|
|
236
|
+
context for generating consistent speech style and prosody across
|
|
237
|
+
multiple requests.
|
|
254
238
|
duration:
|
|
255
239
|
type: double
|
|
256
240
|
docs: Duration of the generated audio in seconds.
|
|
257
|
-
encoding:
|
|
258
|
-
type: AudioEncoding
|
|
259
241
|
file_size:
|
|
260
242
|
type: integer
|
|
261
243
|
docs: Size of the generated audio in bytes.
|
|
262
|
-
|
|
244
|
+
encoding:
|
|
245
|
+
type: AudioEncoding
|
|
246
|
+
audio:
|
|
263
247
|
type: string
|
|
264
248
|
docs: >-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
multiple requests.
|
|
249
|
+
The generated audio output in the requested format, encoded as a
|
|
250
|
+
base64 string.
|
|
268
251
|
snippets:
|
|
269
252
|
docs: >-
|
|
270
253
|
A list of snippet groups where each group corresponds to an utterance
|
|
@@ -317,9 +300,18 @@ types:
|
|
|
317
300
|
Utterances to use as context for generating consistent speech style
|
|
318
301
|
and prosody across multiple requests. These will not be converted to
|
|
319
302
|
speech output.
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
303
|
+
utterances:
|
|
304
|
+
docs: >-
|
|
305
|
+
A list of **Utterances** to be converted to speech output.
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
An **Utterance** is a unit of input for
|
|
309
|
+
[Octave](/docs/text-to-speech-tts/overview), and includes input
|
|
310
|
+
`text`, an optional `description` to serve as the prompt for how the
|
|
311
|
+
speech should be delivered, an optional `voice` specification, and
|
|
312
|
+
additional controls to guide delivery for `speed` and
|
|
313
|
+
`trailing_silence`.
|
|
314
|
+
type: list<PostedUtterance>
|
|
323
315
|
num_generations:
|
|
324
316
|
type: optional<integer>
|
|
325
317
|
docs: Number of generations of the audio to produce.
|
|
@@ -327,6 +319,9 @@ types:
|
|
|
327
319
|
validation:
|
|
328
320
|
min: 1
|
|
329
321
|
max: 5
|
|
322
|
+
format:
|
|
323
|
+
type: optional<Format>
|
|
324
|
+
docs: Specifies the output audio file format.
|
|
330
325
|
split_utterances:
|
|
331
326
|
type: optional<boolean>
|
|
332
327
|
docs: >-
|
|
@@ -355,18 +350,6 @@ types:
|
|
|
355
350
|
if disabled, each chunk's audio will be its own audio file, each with
|
|
356
351
|
its own headers (if applicable).
|
|
357
352
|
default: false
|
|
358
|
-
utterances:
|
|
359
|
-
docs: >-
|
|
360
|
-
A list of **Utterances** to be converted to speech output.
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
An **Utterance** is a unit of input for
|
|
364
|
-
[Octave](/docs/text-to-speech-tts/overview), and includes input
|
|
365
|
-
`text`, an optional `description` to serve as the prompt for how the
|
|
366
|
-
speech should be delivered, an optional `voice` specification, and
|
|
367
|
-
additional controls to guide delivery for `speed` and
|
|
368
|
-
`trailing_silence`.
|
|
369
|
-
type: list<PostedUtterance>
|
|
370
353
|
instant_mode:
|
|
371
354
|
type: optional<boolean>
|
|
372
355
|
docs: >-
|
|
@@ -393,14 +376,14 @@ types:
|
|
|
393
376
|
openapi: tts-openapi.yml
|
|
394
377
|
ReturnTts:
|
|
395
378
|
properties:
|
|
396
|
-
generations:
|
|
397
|
-
type: list<ReturnGeneration>
|
|
398
379
|
request_id:
|
|
399
380
|
type: optional<string>
|
|
400
381
|
docs: >-
|
|
401
382
|
A unique ID associated with this request for tracking and
|
|
402
383
|
troubleshooting. Use this ID when contacting [support](/support) for
|
|
403
384
|
troubleshooting assistance.
|
|
385
|
+
generations:
|
|
386
|
+
type: list<ReturnGeneration>
|
|
404
387
|
source:
|
|
405
388
|
openapi: tts-openapi.yml
|
|
406
389
|
ReturnVoice:
|
|
@@ -428,28 +411,28 @@ types:
|
|
|
428
411
|
openapi: tts-openapi.yml
|
|
429
412
|
Snippet:
|
|
430
413
|
properties:
|
|
431
|
-
audio:
|
|
432
|
-
type: string
|
|
433
|
-
docs: >-
|
|
434
|
-
The segmented audio output in the requested format, encoded as a
|
|
435
|
-
base64 string.
|
|
436
|
-
generation_id:
|
|
437
|
-
type: string
|
|
438
|
-
docs: The generation ID this snippet corresponds to.
|
|
439
414
|
id:
|
|
440
415
|
type: string
|
|
441
416
|
docs: A unique ID associated with this **Snippet**.
|
|
442
417
|
text:
|
|
443
418
|
type: string
|
|
444
419
|
docs: The text for this **Snippet**.
|
|
420
|
+
generation_id:
|
|
421
|
+
type: string
|
|
422
|
+
docs: The generation ID this snippet corresponds to.
|
|
423
|
+
utterance_index:
|
|
424
|
+
type: optional<integer>
|
|
425
|
+
docs: The index of the utterance in the request this snippet corresponds to.
|
|
445
426
|
transcribed_text:
|
|
446
427
|
type: optional<string>
|
|
447
428
|
docs: >-
|
|
448
429
|
The transcribed text of the generated audio. It is only present if
|
|
449
430
|
`instant_mode` is set to `false`.
|
|
450
|
-
|
|
451
|
-
type:
|
|
452
|
-
docs:
|
|
431
|
+
audio:
|
|
432
|
+
type: string
|
|
433
|
+
docs: >-
|
|
434
|
+
The segmented audio output in the requested format, encoded as a
|
|
435
|
+
base64 string.
|
|
453
436
|
source:
|
|
454
437
|
openapi: tts-openapi.yml
|
|
455
438
|
SnippetAudioChunk:
|
|
@@ -458,6 +441,11 @@ types:
|
|
|
458
441
|
openapi: tts-openapi.yml
|
|
459
442
|
PostedUtterance:
|
|
460
443
|
properties:
|
|
444
|
+
text:
|
|
445
|
+
type: string
|
|
446
|
+
docs: The input text to be synthesized into speech.
|
|
447
|
+
validation:
|
|
448
|
+
maxLength: 5000
|
|
461
449
|
description:
|
|
462
450
|
type: optional<string>
|
|
463
451
|
docs: >-
|
|
@@ -479,6 +467,14 @@ types:
|
|
|
479
467
|
guide](/docs/text-to-speech-tts/prompting) for design tips.
|
|
480
468
|
validation:
|
|
481
469
|
maxLength: 1000
|
|
470
|
+
voice:
|
|
471
|
+
type: optional<PostedUtteranceVoice>
|
|
472
|
+
docs: >-
|
|
473
|
+
The `name` or `id` associated with a **Voice** from the **Voice
|
|
474
|
+
Library** to be used as the speaker for this and all subsequent
|
|
475
|
+
`utterances`, until the `voice` field is updated again.
|
|
476
|
+
|
|
477
|
+
See our [voices guide](/docs/text-to-speech-tts/voices) for more details on generating and specifying **Voices**.
|
|
482
478
|
speed:
|
|
483
479
|
type: optional<double>
|
|
484
480
|
docs: >-
|
|
@@ -488,11 +484,6 @@ types:
|
|
|
488
484
|
validation:
|
|
489
485
|
min: 0.5
|
|
490
486
|
max: 2
|
|
491
|
-
text:
|
|
492
|
-
type: string
|
|
493
|
-
docs: The input text to be synthesized into speech.
|
|
494
|
-
validation:
|
|
495
|
-
maxLength: 5000
|
|
496
487
|
trailing_silence:
|
|
497
488
|
type: optional<double>
|
|
498
489
|
docs: Duration of trailing silence (in seconds) to add to this utterance
|
|
@@ -500,14 +491,6 @@ types:
|
|
|
500
491
|
validation:
|
|
501
492
|
min: 0
|
|
502
493
|
max: 5
|
|
503
|
-
voice:
|
|
504
|
-
type: optional<PostedUtteranceVoice>
|
|
505
|
-
docs: >-
|
|
506
|
-
The `name` or `id` associated with a **Voice** from the **Voice
|
|
507
|
-
Library** to be used as the speaker for this and all subsequent
|
|
508
|
-
`utterances`, until the `voice` field is updated again.
|
|
509
|
-
|
|
510
|
-
See our [voices guide](/docs/text-to-speech-tts/voices) for more details on generating and specifying **Voices**.
|
|
511
494
|
source:
|
|
512
495
|
openapi: tts-openapi.yml
|
|
513
496
|
ValidationErrorLocItem:
|
package/.mock/fern.config.json
CHANGED
|
@@ -1,17 +1,2 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
-
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
-
if (k2 === undefined) k2 = k;
|
|
4
|
-
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
-
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
-
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
-
}
|
|
8
|
-
Object.defineProperty(o, k2, desc);
|
|
9
|
-
}) : (function(o, m, k, k2) {
|
|
10
|
-
if (k2 === undefined) k2 = k;
|
|
11
|
-
o[k2] = m[k];
|
|
12
|
-
}));
|
|
13
|
-
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
14
|
-
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
|
-
};
|
|
16
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
-
__exportStar(require("./requests"), exports);
|
|
@@ -14,5 +14,4 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
|
14
14
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
15
|
};
|
|
16
16
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
-
__exportStar(require("./UnprocessableEntityError"), exports);
|
|
18
17
|
__exportStar(require("./BadRequestError"), exports);
|
|
@@ -22,6 +22,8 @@ export declare namespace Chat {
|
|
|
22
22
|
resumedChatGroupId?: string;
|
|
23
23
|
/** A flag to enable verbose transcription. Set this query parameter to `true` to have unfinalized user transcripts be sent to the client as interim UserMessage messages. The [interim](/reference/empathic-voice-interface-evi/chat/chat#receive.User%20Message.interim) field on a [UserMessage](/reference/empathic-voice-interface-evi/chat/chat#receive.User%20Message.type) denotes whether the message is "interim" or "final." */
|
|
24
24
|
verboseTranscription?: boolean;
|
|
25
|
+
/** ID of the Voice to use for this chat. If specified, will override the voice set in the Config */
|
|
26
|
+
voiceId?: string;
|
|
25
27
|
/** Extra query parameters sent at WebSocket connection */
|
|
26
28
|
queryParams?: Record<string, string | string[] | object | object[]>;
|
|
27
29
|
}
|
|
@@ -14,7 +14,7 @@ export interface PostedPrompt {
|
|
|
14
14
|
/** An optional description of the Prompt version. */
|
|
15
15
|
versionDescription?: string;
|
|
16
16
|
/**
|
|
17
|
-
* Instructions used to shape EVI
|
|
17
|
+
* Instructions used to shape EVI's behavior, responses, and style.
|
|
18
18
|
*
|
|
19
19
|
* You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles.
|
|
20
20
|
*
|
package/api/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersion.d.ts
CHANGED
|
@@ -12,7 +12,7 @@ export interface PostedPromptVersion {
|
|
|
12
12
|
/** An optional description of the Prompt version. */
|
|
13
13
|
versionDescription?: string;
|
|
14
14
|
/**
|
|
15
|
-
* Instructions used to shape EVI
|
|
15
|
+
* Instructions used to shape EVI's behavior, responses, and style for this version of the Prompt.
|
|
16
16
|
*
|
|
17
17
|
* You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles.
|
|
18
18
|
*
|
package/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedTool.d.ts
CHANGED
|
@@ -21,7 +21,7 @@ export interface PostedUserDefinedTool {
|
|
|
21
21
|
/**
|
|
22
22
|
* Stringified JSON defining the parameters used by this version of the Tool.
|
|
23
23
|
*
|
|
24
|
-
* These parameters define the inputs needed for the Tool
|
|
24
|
+
* These parameters define the inputs needed for the Tool's execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format.
|
|
25
25
|
*/
|
|
26
26
|
parameters: string;
|
|
27
27
|
/** Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. */
|
|
@@ -18,7 +18,7 @@ export interface PostedUserDefinedToolVersion {
|
|
|
18
18
|
/**
|
|
19
19
|
* Stringified JSON defining the parameters used by this version of the Tool.
|
|
20
20
|
*
|
|
21
|
-
* These parameters define the inputs needed for the Tool
|
|
21
|
+
* These parameters define the inputs needed for the Tool's execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format.
|
|
22
22
|
*/
|
|
23
23
|
parameters: string;
|
|
24
24
|
/** Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. */
|
|
@@ -5,12 +5,12 @@
|
|
|
5
5
|
* When provided, the output is an assistant end message.
|
|
6
6
|
*/
|
|
7
7
|
export interface AssistantEnd {
|
|
8
|
-
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
9
|
-
customSessionId?: string;
|
|
10
8
|
/**
|
|
11
9
|
* The type of message sent through the socket; for an Assistant End message, this must be `assistant_end`.
|
|
12
10
|
*
|
|
13
|
-
* This message indicates the conclusion of the assistant
|
|
11
|
+
* This message indicates the conclusion of the assistant's response, signaling that the assistant has finished speaking for the current conversational turn.
|
|
14
12
|
*/
|
|
15
13
|
type: "assistant_end";
|
|
14
|
+
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
15
|
+
customSessionId?: string;
|
|
16
16
|
}
|
|
@@ -5,14 +5,14 @@
|
|
|
5
5
|
* When provided, the input is spoken by EVI.
|
|
6
6
|
*/
|
|
7
7
|
export interface AssistantInput {
|
|
8
|
+
/** The type of message sent through the socket; must be `assistant_input` for our server to correctly identify and process it as an Assistant Input message. */
|
|
9
|
+
type: "assistant_input";
|
|
8
10
|
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
9
11
|
customSessionId?: string;
|
|
10
12
|
/**
|
|
11
13
|
* Assistant text to synthesize into spoken audio and insert into the conversation.
|
|
12
14
|
*
|
|
13
|
-
* EVI uses this text to generate spoken audio using our proprietary expressive text-to-speech model. Our model adds appropriate emotional inflections and tones to the text based on the user
|
|
15
|
+
* EVI uses this text to generate spoken audio using our proprietary expressive text-to-speech model. Our model adds appropriate emotional inflections and tones to the text based on the user's expressions and the context of the conversation. The synthesized audio is streamed back to the user as an [Assistant Message](/reference/empathic-voice-interface-evi/chat/chat#receive.AssistantMessage.type).
|
|
14
16
|
*/
|
|
15
17
|
text: string;
|
|
16
|
-
/** The type of message sent through the socket; must be `assistant_input` for our server to correctly identify and process it as an Assistant Input message. */
|
|
17
|
-
type: "assistant_input";
|
|
18
18
|
}
|
|
@@ -6,20 +6,20 @@ import * as Hume from "../../../index";
|
|
|
6
6
|
* When provided, the output is an assistant message.
|
|
7
7
|
*/
|
|
8
8
|
export interface AssistantMessage {
|
|
9
|
+
/**
|
|
10
|
+
* The type of message sent through the socket; for an Assistant Message, this must be `assistant_message`.
|
|
11
|
+
*
|
|
12
|
+
* This message contains both a transcript of the assistant's response and the expression measurement predictions of the assistant's audio output.
|
|
13
|
+
*/
|
|
14
|
+
type: "assistant_message";
|
|
9
15
|
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
10
16
|
customSessionId?: string;
|
|
11
|
-
/** Indicates if this message was inserted into the conversation as text from an [Assistant Input message](/reference/empathic-voice-interface-evi/chat/chat#send.AssistantInput.text). */
|
|
12
|
-
fromText: boolean;
|
|
13
17
|
/** ID of the assistant message. Allows the Assistant Message to be tracked and referenced. */
|
|
14
18
|
id?: string;
|
|
15
19
|
/** Transcript of the message. */
|
|
16
20
|
message: Hume.empathicVoice.ChatMessage;
|
|
17
21
|
/** Inference model results. */
|
|
18
22
|
models: Hume.empathicVoice.Inference;
|
|
19
|
-
/**
|
|
20
|
-
|
|
21
|
-
*
|
|
22
|
-
* This message contains both a transcript of the assistant’s response and the expression measurement predictions of the assistant’s audio output.
|
|
23
|
-
*/
|
|
24
|
-
type: "assistant_message";
|
|
23
|
+
/** Indicates if this message was inserted into the conversation as text from an [Assistant Input message](/reference/empathic-voice-interface-evi/chat/chat#send.AssistantInput.text). */
|
|
24
|
+
fromText: boolean;
|
|
25
25
|
}
|
|
@@ -6,16 +6,16 @@ import * as Hume from "../../../index";
|
|
|
6
6
|
* When provided, the output is an Assistant Prosody message.
|
|
7
7
|
*/
|
|
8
8
|
export interface AssistantProsody {
|
|
9
|
-
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
10
|
-
customSessionId?: string;
|
|
11
|
-
/** Unique identifier for the segment. */
|
|
12
|
-
id?: string;
|
|
13
|
-
/** Inference model results. */
|
|
14
|
-
models: Hume.empathicVoice.Inference;
|
|
15
9
|
/**
|
|
16
10
|
* The type of message sent through the socket; for an Assistant Prosody message, this must be `assistant_PROSODY`.
|
|
17
11
|
*
|
|
18
12
|
* This message the expression measurement predictions of the assistant's audio output.
|
|
19
13
|
*/
|
|
20
14
|
type: "assistant_prosody";
|
|
15
|
+
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
16
|
+
customSessionId?: string;
|
|
17
|
+
/** Inference model results. */
|
|
18
|
+
models: Hume.empathicVoice.Inference;
|
|
19
|
+
/** Unique identifier for the segment. */
|
|
20
|
+
id?: string;
|
|
21
21
|
}
|
|
@@ -3,10 +3,10 @@
|
|
|
3
3
|
*/
|
|
4
4
|
import * as Hume from "../../../index";
|
|
5
5
|
export interface AudioConfiguration {
|
|
6
|
-
/** Number of audio channels. */
|
|
7
|
-
channels: number;
|
|
8
6
|
/** Encoding format of the audio input, such as `linear16`. */
|
|
9
7
|
encoding: Hume.empathicVoice.Encoding;
|
|
8
|
+
/** Number of audio channels. */
|
|
9
|
+
channels: number;
|
|
10
10
|
/** Audio sample rate. Number of samples per second in the audio input, measured in Hertz. */
|
|
11
11
|
sampleRate: number;
|
|
12
12
|
}
|