hume 0.12.2 → 0.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.mock/definition/empathic-voice/__package__.yml +667 -712
- package/.mock/definition/empathic-voice/chat.yml +29 -23
- package/.mock/definition/empathic-voice/chatWebhooks.yml +3 -3
- package/.mock/definition/tts/__package__.yml +70 -87
- package/.mock/fern.config.json +1 -1
- package/api/resources/empathicVoice/client/index.d.ts +0 -1
- package/api/resources/empathicVoice/client/index.js +0 -15
- package/api/resources/empathicVoice/errors/index.d.ts +0 -1
- package/api/resources/empathicVoice/errors/index.js +0 -1
- package/api/resources/empathicVoice/types/AssistantEnd.d.ts +2 -2
- package/api/resources/empathicVoice/types/AssistantInput.d.ts +2 -2
- package/api/resources/empathicVoice/types/AssistantMessage.d.ts +8 -8
- package/api/resources/empathicVoice/types/AssistantProsody.d.ts +6 -6
- package/api/resources/empathicVoice/types/AudioConfiguration.d.ts +2 -2
- package/api/resources/empathicVoice/types/AudioInput.d.ts +6 -6
- package/api/resources/empathicVoice/types/AudioOutput.d.ts +4 -4
- package/api/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
- package/api/resources/empathicVoice/types/ChatMessage.d.ts +2 -2
- package/api/resources/empathicVoice/types/ChatMetadata.d.ts +8 -8
- package/api/resources/empathicVoice/types/Context.d.ts +8 -14
- package/api/resources/empathicVoice/types/ContextType.d.ts +2 -3
- package/api/resources/empathicVoice/types/ContextType.js +1 -2
- package/api/resources/empathicVoice/types/LanguageModelType.d.ts +4 -1
- package/api/resources/empathicVoice/types/LanguageModelType.js +3 -0
- package/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +2 -2
- package/api/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +2 -2
- package/api/resources/empathicVoice/types/ReturnConfig.d.ts +7 -7
- package/api/resources/empathicVoice/types/ReturnPrompt.d.ts +2 -2
- package/api/resources/empathicVoice/types/SessionSettings.d.ts +29 -29
- package/api/resources/empathicVoice/types/Tool.d.ts +6 -6
- package/api/resources/empathicVoice/types/ToolCallMessage.d.ts +6 -6
- package/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +16 -16
- package/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +8 -8
- package/api/resources/empathicVoice/types/UserInput.d.ts +2 -2
- package/api/resources/empathicVoice/types/UserInterruption.d.ts +4 -4
- package/api/resources/empathicVoice/types/UserMessage.d.ts +12 -12
- package/api/resources/empathicVoice/types/VoiceProvider.d.ts +2 -1
- package/api/resources/empathicVoice/types/VoiceProvider.js +1 -0
- package/api/resources/empathicVoice/types/WebSocketError.d.ts +10 -10
- package/api/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +8 -8
- package/api/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +6 -6
- package/api/resources/empathicVoice/types/index.d.ts +16 -17
- package/api/resources/empathicVoice/types/index.js +16 -17
- package/api/resources/tts/client/Client.d.ts +21 -23
- package/api/resources/tts/client/Client.js +50 -58
- package/api/resources/tts/client/index.d.ts +0 -1
- package/api/resources/tts/client/index.js +0 -15
- package/api/resources/tts/types/PostedTts.d.ts +8 -8
- package/api/resources/tts/types/PostedUtterance.d.ts +6 -6
- package/api/resources/tts/types/ReturnGeneration.d.ts +5 -5
- package/api/resources/tts/types/ReturnTts.d.ts +1 -1
- package/api/resources/tts/types/Snippet.d.ts +6 -6
- package/dist/api/resources/empathicVoice/client/index.d.ts +0 -1
- package/dist/api/resources/empathicVoice/client/index.js +0 -15
- package/dist/api/resources/empathicVoice/errors/index.d.ts +0 -1
- package/dist/api/resources/empathicVoice/errors/index.js +0 -1
- package/dist/api/resources/empathicVoice/types/AssistantEnd.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/AssistantInput.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/AssistantMessage.d.ts +8 -8
- package/dist/api/resources/empathicVoice/types/AssistantProsody.d.ts +6 -6
- package/dist/api/resources/empathicVoice/types/AudioConfiguration.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/AudioInput.d.ts +6 -6
- package/dist/api/resources/empathicVoice/types/AudioOutput.d.ts +4 -4
- package/dist/api/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
- package/dist/api/resources/empathicVoice/types/ChatMessage.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/ChatMetadata.d.ts +8 -8
- package/dist/api/resources/empathicVoice/types/Context.d.ts +8 -14
- package/dist/api/resources/empathicVoice/types/ContextType.d.ts +2 -3
- package/dist/api/resources/empathicVoice/types/ContextType.js +1 -2
- package/dist/api/resources/empathicVoice/types/LanguageModelType.d.ts +4 -1
- package/dist/api/resources/empathicVoice/types/LanguageModelType.js +3 -0
- package/dist/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/ReturnConfig.d.ts +7 -7
- package/dist/api/resources/empathicVoice/types/ReturnPrompt.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/SessionSettings.d.ts +29 -29
- package/dist/api/resources/empathicVoice/types/Tool.d.ts +6 -6
- package/dist/api/resources/empathicVoice/types/ToolCallMessage.d.ts +6 -6
- package/dist/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +16 -16
- package/dist/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +8 -8
- package/dist/api/resources/empathicVoice/types/UserInput.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/UserInterruption.d.ts +4 -4
- package/dist/api/resources/empathicVoice/types/UserMessage.d.ts +12 -12
- package/dist/api/resources/empathicVoice/types/VoiceProvider.d.ts +2 -1
- package/dist/api/resources/empathicVoice/types/VoiceProvider.js +1 -0
- package/dist/api/resources/empathicVoice/types/WebSocketError.d.ts +10 -10
- package/dist/api/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +8 -8
- package/dist/api/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +6 -6
- package/dist/api/resources/empathicVoice/types/index.d.ts +16 -17
- package/dist/api/resources/empathicVoice/types/index.js +16 -17
- package/dist/api/resources/tts/client/Client.d.ts +21 -23
- package/dist/api/resources/tts/client/Client.js +50 -58
- package/dist/api/resources/tts/client/index.d.ts +0 -1
- package/dist/api/resources/tts/client/index.js +0 -15
- package/dist/api/resources/tts/types/PostedTts.d.ts +8 -8
- package/dist/api/resources/tts/types/PostedUtterance.d.ts +6 -6
- package/dist/api/resources/tts/types/ReturnGeneration.d.ts +5 -5
- package/dist/api/resources/tts/types/ReturnTts.d.ts +1 -1
- package/dist/api/resources/tts/types/Snippet.d.ts +6 -6
- package/dist/serialization/resources/empathicVoice/index.d.ts +0 -1
- package/dist/serialization/resources/empathicVoice/index.js +0 -1
- package/dist/serialization/resources/empathicVoice/types/AssistantEnd.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/AssistantEnd.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/AssistantInput.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/AssistantMessage.d.ts +2 -2
- package/dist/serialization/resources/empathicVoice/types/AssistantMessage.js +2 -2
- package/dist/serialization/resources/empathicVoice/types/AssistantProsody.d.ts +2 -2
- package/dist/serialization/resources/empathicVoice/types/AssistantProsody.js +2 -2
- package/dist/serialization/resources/empathicVoice/types/AudioConfiguration.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/AudioConfiguration.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/AudioInput.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/AudioInput.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/AudioOutput.d.ts +2 -2
- package/dist/serialization/resources/empathicVoice/types/AudioOutput.js +2 -2
- package/dist/serialization/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/BuiltinToolConfig.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/ChatMessage.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/ChatMessage.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/ChatMetadata.d.ts +2 -2
- package/dist/serialization/resources/empathicVoice/types/ChatMetadata.js +2 -2
- package/dist/serialization/resources/empathicVoice/types/Context.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/Context.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/ContextType.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/ContextType.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/LanguageModelType.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/LanguageModelType.js +3 -0
- package/dist/serialization/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/PauseAssistantMessage.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/ResumeAssistantMessage.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +8 -8
- package/dist/serialization/resources/empathicVoice/types/ReturnConfig.js +8 -8
- package/dist/serialization/resources/empathicVoice/types/ReturnPrompt.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/ReturnPrompt.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/SessionSettings.d.ts +8 -8
- package/dist/serialization/resources/empathicVoice/types/SessionSettings.js +8 -8
- package/dist/serialization/resources/empathicVoice/types/Tool.d.ts +3 -3
- package/dist/serialization/resources/empathicVoice/types/Tool.js +3 -3
- package/dist/serialization/resources/empathicVoice/types/ToolCallMessage.d.ts +3 -3
- package/dist/serialization/resources/empathicVoice/types/ToolCallMessage.js +3 -3
- package/dist/serialization/resources/empathicVoice/types/ToolErrorMessage.d.ts +6 -6
- package/dist/serialization/resources/empathicVoice/types/ToolErrorMessage.js +6 -6
- package/dist/serialization/resources/empathicVoice/types/ToolResponseMessage.d.ts +2 -2
- package/dist/serialization/resources/empathicVoice/types/ToolResponseMessage.js +2 -2
- package/dist/serialization/resources/empathicVoice/types/UserInput.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/UserInput.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/UserInterruption.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/UserInterruption.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/UserMessage.d.ts +3 -3
- package/dist/serialization/resources/empathicVoice/types/UserMessage.js +3 -3
- package/dist/serialization/resources/empathicVoice/types/VoiceProvider.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/VoiceProvider.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/WebSocketError.d.ts +3 -3
- package/dist/serialization/resources/empathicVoice/types/WebSocketError.js +3 -3
- package/dist/serialization/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +4 -4
- package/dist/serialization/resources/empathicVoice/types/WebhookEventChatEnded.js +4 -4
- package/dist/serialization/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +3 -3
- package/dist/serialization/resources/empathicVoice/types/WebhookEventChatStarted.js +3 -3
- package/dist/serialization/resources/empathicVoice/types/index.d.ts +16 -17
- package/dist/serialization/resources/empathicVoice/types/index.js +16 -17
- package/dist/serialization/resources/tts/types/PostedTts.d.ts +3 -3
- package/dist/serialization/resources/tts/types/PostedTts.js +3 -3
- package/dist/serialization/resources/tts/types/PostedUtterance.d.ts +2 -2
- package/dist/serialization/resources/tts/types/PostedUtterance.js +2 -2
- package/dist/serialization/resources/tts/types/ReturnGeneration.d.ts +3 -3
- package/dist/serialization/resources/tts/types/ReturnGeneration.js +3 -3
- package/dist/serialization/resources/tts/types/ReturnTts.d.ts +1 -1
- package/dist/serialization/resources/tts/types/ReturnTts.js +1 -1
- package/dist/serialization/resources/tts/types/Snippet.d.ts +3 -3
- package/dist/serialization/resources/tts/types/Snippet.js +3 -3
- package/dist/version.d.ts +1 -1
- package/dist/version.js +1 -1
- package/package.json +1 -1
- package/reference.md +24 -78
- package/serialization/resources/empathicVoice/index.d.ts +0 -1
- package/serialization/resources/empathicVoice/index.js +0 -1
- package/serialization/resources/empathicVoice/types/AssistantEnd.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/AssistantEnd.js +1 -1
- package/serialization/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/AssistantInput.js +1 -1
- package/serialization/resources/empathicVoice/types/AssistantMessage.d.ts +2 -2
- package/serialization/resources/empathicVoice/types/AssistantMessage.js +2 -2
- package/serialization/resources/empathicVoice/types/AssistantProsody.d.ts +2 -2
- package/serialization/resources/empathicVoice/types/AssistantProsody.js +2 -2
- package/serialization/resources/empathicVoice/types/AudioConfiguration.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/AudioConfiguration.js +1 -1
- package/serialization/resources/empathicVoice/types/AudioInput.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/AudioInput.js +1 -1
- package/serialization/resources/empathicVoice/types/AudioOutput.d.ts +2 -2
- package/serialization/resources/empathicVoice/types/AudioOutput.js +2 -2
- package/serialization/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/BuiltinToolConfig.js +1 -1
- package/serialization/resources/empathicVoice/types/ChatMessage.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/ChatMessage.js +1 -1
- package/serialization/resources/empathicVoice/types/ChatMetadata.d.ts +2 -2
- package/serialization/resources/empathicVoice/types/ChatMetadata.js +2 -2
- package/serialization/resources/empathicVoice/types/Context.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/Context.js +1 -1
- package/serialization/resources/empathicVoice/types/ContextType.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/ContextType.js +1 -1
- package/serialization/resources/empathicVoice/types/LanguageModelType.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/LanguageModelType.js +3 -0
- package/serialization/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/PauseAssistantMessage.js +1 -1
- package/serialization/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/ResumeAssistantMessage.js +1 -1
- package/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +8 -8
- package/serialization/resources/empathicVoice/types/ReturnConfig.js +8 -8
- package/serialization/resources/empathicVoice/types/ReturnPrompt.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/ReturnPrompt.js +1 -1
- package/serialization/resources/empathicVoice/types/SessionSettings.d.ts +8 -8
- package/serialization/resources/empathicVoice/types/SessionSettings.js +8 -8
- package/serialization/resources/empathicVoice/types/Tool.d.ts +3 -3
- package/serialization/resources/empathicVoice/types/Tool.js +3 -3
- package/serialization/resources/empathicVoice/types/ToolCallMessage.d.ts +3 -3
- package/serialization/resources/empathicVoice/types/ToolCallMessage.js +3 -3
- package/serialization/resources/empathicVoice/types/ToolErrorMessage.d.ts +6 -6
- package/serialization/resources/empathicVoice/types/ToolErrorMessage.js +6 -6
- package/serialization/resources/empathicVoice/types/ToolResponseMessage.d.ts +2 -2
- package/serialization/resources/empathicVoice/types/ToolResponseMessage.js +2 -2
- package/serialization/resources/empathicVoice/types/UserInput.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/UserInput.js +1 -1
- package/serialization/resources/empathicVoice/types/UserInterruption.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/UserInterruption.js +1 -1
- package/serialization/resources/empathicVoice/types/UserMessage.d.ts +3 -3
- package/serialization/resources/empathicVoice/types/UserMessage.js +3 -3
- package/serialization/resources/empathicVoice/types/VoiceProvider.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/VoiceProvider.js +1 -1
- package/serialization/resources/empathicVoice/types/WebSocketError.d.ts +3 -3
- package/serialization/resources/empathicVoice/types/WebSocketError.js +3 -3
- package/serialization/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +4 -4
- package/serialization/resources/empathicVoice/types/WebhookEventChatEnded.js +4 -4
- package/serialization/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +3 -3
- package/serialization/resources/empathicVoice/types/WebhookEventChatStarted.js +3 -3
- package/serialization/resources/empathicVoice/types/index.d.ts +16 -17
- package/serialization/resources/empathicVoice/types/index.js +16 -17
- package/serialization/resources/tts/types/PostedTts.d.ts +3 -3
- package/serialization/resources/tts/types/PostedTts.js +3 -3
- package/serialization/resources/tts/types/PostedUtterance.d.ts +2 -2
- package/serialization/resources/tts/types/PostedUtterance.js +2 -2
- package/serialization/resources/tts/types/ReturnGeneration.d.ts +3 -3
- package/serialization/resources/tts/types/ReturnGeneration.js +3 -3
- package/serialization/resources/tts/types/ReturnTts.d.ts +1 -1
- package/serialization/resources/tts/types/ReturnTts.js +1 -1
- package/serialization/resources/tts/types/Snippet.d.ts +3 -3
- package/serialization/resources/tts/types/Snippet.js +3 -3
- package/version.d.ts +1 -1
- package/version.js +1 -1
- package/api/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.d.ts +0 -12
- package/api/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.js +0 -5
- package/api/resources/empathicVoice/client/requests/index.d.ts +0 -1
- package/api/resources/empathicVoice/client/requests/index.js +0 -2
- package/api/resources/empathicVoice/errors/UnprocessableEntityError.d.ts +0 -9
- package/api/resources/empathicVoice/errors/UnprocessableEntityError.js +0 -52
- package/api/resources/empathicVoice/types/SupportsToolUse.d.ts +0 -7
- package/api/resources/empathicVoice/types/SupportsToolUse.js +0 -5
- package/api/resources/tts/client/requests/SynthesizeJsonRequest.d.ts +0 -36
- package/api/resources/tts/client/requests/SynthesizeJsonRequest.js +0 -5
- package/api/resources/tts/client/requests/index.d.ts +0 -1
- package/api/resources/tts/client/requests/index.js +0 -2
- package/dist/api/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.d.ts +0 -12
- package/dist/api/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.js +0 -5
- package/dist/api/resources/empathicVoice/client/requests/index.d.ts +0 -1
- package/dist/api/resources/empathicVoice/client/requests/index.js +0 -2
- package/dist/api/resources/empathicVoice/errors/UnprocessableEntityError.d.ts +0 -9
- package/dist/api/resources/empathicVoice/errors/UnprocessableEntityError.js +0 -52
- package/dist/api/resources/empathicVoice/types/SupportsToolUse.d.ts +0 -7
- package/dist/api/resources/empathicVoice/types/SupportsToolUse.js +0 -5
- package/dist/api/resources/tts/client/requests/SynthesizeJsonRequest.d.ts +0 -36
- package/dist/api/resources/tts/client/requests/SynthesizeJsonRequest.js +0 -5
- package/dist/api/resources/tts/client/requests/index.d.ts +0 -1
- package/dist/api/resources/tts/client/requests/index.js +0 -2
- package/dist/serialization/resources/empathicVoice/client/index.d.ts +0 -1
- package/dist/serialization/resources/empathicVoice/client/index.js +0 -17
- package/dist/serialization/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.d.ts +0 -12
- package/dist/serialization/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.js +0 -43
- package/dist/serialization/resources/empathicVoice/client/requests/index.d.ts +0 -1
- package/dist/serialization/resources/empathicVoice/client/requests/index.js +0 -5
- package/dist/serialization/resources/empathicVoice/types/SupportsToolUse.d.ts +0 -13
- package/dist/serialization/resources/empathicVoice/types/SupportsToolUse.js +0 -44
- package/serialization/resources/empathicVoice/client/index.d.ts +0 -1
- package/serialization/resources/empathicVoice/client/index.js +0 -17
- package/serialization/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.d.ts +0 -12
- package/serialization/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.js +0 -43
- package/serialization/resources/empathicVoice/client/requests/index.d.ts +0 -1
- package/serialization/resources/empathicVoice/client/requests/index.js +0 -5
- package/serialization/resources/empathicVoice/types/SupportsToolUse.d.ts +0 -13
- package/serialization/resources/empathicVoice/types/SupportsToolUse.js +0 -44
|
@@ -4,21 +4,6 @@ channel:
|
|
|
4
4
|
auth: false
|
|
5
5
|
docs: Chat with Empathic Voice Interface (EVI)
|
|
6
6
|
query-parameters:
|
|
7
|
-
access_token:
|
|
8
|
-
type: optional<string>
|
|
9
|
-
default: ''
|
|
10
|
-
docs: >-
|
|
11
|
-
Access token used for authenticating the client. If not provided, an
|
|
12
|
-
`api_key` must be provided to authenticate.
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
The access token is generated using both an API key and a Secret key,
|
|
16
|
-
which provides an additional layer of security compared to using just an
|
|
17
|
-
API key.
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
For more details, refer to the [Authentication Strategies
|
|
21
|
-
Guide](/docs/introduction/api-key#authentication-strategies).
|
|
22
7
|
config_id:
|
|
23
8
|
type: optional<string>
|
|
24
9
|
docs: >-
|
|
@@ -47,13 +32,6 @@ channel:
|
|
|
47
32
|
|
|
48
33
|
Include this parameter to apply a specific version of an EVI
|
|
49
34
|
configuration. If omitted, the latest version will be applied.
|
|
50
|
-
event_limit:
|
|
51
|
-
type: optional<integer>
|
|
52
|
-
docs: >-
|
|
53
|
-
The maximum number of chat events to return from chat history. By
|
|
54
|
-
default, the system returns up to 300 events (100 events per page × 3
|
|
55
|
-
pages). Set this parameter to a smaller value to limit the number of
|
|
56
|
-
events returned.
|
|
57
35
|
resumed_chat_group_id:
|
|
58
36
|
type: optional<string>
|
|
59
37
|
docs: >-
|
|
@@ -98,6 +76,12 @@ channel:
|
|
|
98
76
|
Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs
|
|
99
77
|
of all Chat Groups associated with an API key. This endpoint returns a
|
|
100
78
|
list of all available chat groups.
|
|
79
|
+
voice_id:
|
|
80
|
+
type: optional<string>
|
|
81
|
+
docs: >-
|
|
82
|
+
The name or ID of the voice from the `Voice Library` to be used as the
|
|
83
|
+
speaker for this EVI session. This will override the speaker set in the
|
|
84
|
+
selected configuration.
|
|
101
85
|
verbose_transcription:
|
|
102
86
|
type: optional<boolean>
|
|
103
87
|
default: false
|
|
@@ -109,6 +93,28 @@ channel:
|
|
|
109
93
|
field on a
|
|
110
94
|
[UserMessage](/reference/empathic-voice-interface-evi/chat/chat#receive.UserMessage.type)
|
|
111
95
|
denotes whether the message is "interim" or "final."
|
|
96
|
+
event_limit:
|
|
97
|
+
type: optional<integer>
|
|
98
|
+
docs: >-
|
|
99
|
+
The maximum number of chat events to return from chat history. By
|
|
100
|
+
default, the system returns up to 300 events (100 events per page × 3
|
|
101
|
+
pages). Set this parameter to a smaller value to limit the number of
|
|
102
|
+
events returned.
|
|
103
|
+
access_token:
|
|
104
|
+
type: optional<string>
|
|
105
|
+
default: ''
|
|
106
|
+
docs: >-
|
|
107
|
+
Access token used for authenticating the client. If not provided, an
|
|
108
|
+
`api_key` must be provided to authenticate.
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
The access token is generated using both an API key and a Secret key,
|
|
112
|
+
which provides an additional layer of security compared to using just an
|
|
113
|
+
API key.
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
For more details, refer to the [Authentication Strategies
|
|
117
|
+
Guide](/docs/introduction/api-key#authentication-strategies).
|
|
112
118
|
api_key:
|
|
113
119
|
type: optional<string>
|
|
114
120
|
default: ''
|
|
@@ -130,8 +136,8 @@ channel:
|
|
|
130
136
|
- messages:
|
|
131
137
|
- type: publish
|
|
132
138
|
body:
|
|
133
|
-
data: data
|
|
134
139
|
type: audio_input
|
|
140
|
+
data: data
|
|
135
141
|
- type: subscribe
|
|
136
142
|
body:
|
|
137
143
|
type: assistant_end
|
|
@@ -12,10 +12,10 @@ webhooks:
|
|
|
12
12
|
chat_group_id: 9fc18597-3567-42d5-94d6-935bde84bf2f
|
|
13
13
|
chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0
|
|
14
14
|
config_id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3
|
|
15
|
+
event_name: chat_ended
|
|
16
|
+
end_time: 1716244958546
|
|
15
17
|
duration_seconds: 180
|
|
16
18
|
end_reason: USER_ENDED
|
|
17
|
-
end_time: 1716244958546
|
|
18
|
-
event_name: chat_ended
|
|
19
19
|
docs: Sent when an EVI chat ends.
|
|
20
20
|
chatStarted:
|
|
21
21
|
audiences: []
|
|
@@ -28,7 +28,7 @@ webhooks:
|
|
|
28
28
|
chat_group_id: 9fc18597-3567-42d5-94d6-935bde84bf2f
|
|
29
29
|
chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0
|
|
30
30
|
config_id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3
|
|
31
|
-
chat_start_type: new_chat_group
|
|
32
31
|
event_name: chat_started
|
|
33
32
|
start_time: 1716244940648
|
|
33
|
+
chat_start_type: new_chat_group
|
|
34
34
|
docs: Sent when an EVI chat is started.
|
|
@@ -34,23 +34,6 @@ service:
|
|
|
34
34
|
request:
|
|
35
35
|
body:
|
|
36
36
|
type: PostedTts
|
|
37
|
-
query-parameters:
|
|
38
|
-
access_token:
|
|
39
|
-
type: optional<string>
|
|
40
|
-
default: ''
|
|
41
|
-
docs: >-
|
|
42
|
-
Access token used for authenticating the client. If not provided,
|
|
43
|
-
an `api_key` must be provided to authenticate.
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
The access token is generated using both an API key and a Secret
|
|
47
|
-
key, which provides an additional layer of security compared to
|
|
48
|
-
using just an API key.
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
For more details, refer to the [Authentication Strategies
|
|
52
|
-
Guide](/docs/introduction/api-key#authentication-strategies).
|
|
53
|
-
name: SynthesizeJsonRequest
|
|
54
37
|
content-type: application/json
|
|
55
38
|
response:
|
|
56
39
|
docs: Successful Response
|
|
@@ -137,26 +120,30 @@ service:
|
|
|
137
120
|
format:
|
|
138
121
|
type: mp3
|
|
139
122
|
num_generations: 1
|
|
140
|
-
synthesize-
|
|
141
|
-
path: /v0/tts/stream/
|
|
123
|
+
synthesize-json-streaming:
|
|
124
|
+
path: /v0/tts/stream/json
|
|
142
125
|
method: POST
|
|
143
126
|
auth: true
|
|
144
127
|
docs: >-
|
|
145
128
|
Streams synthesized speech using the specified voice. If no voice is
|
|
146
129
|
provided, a novel voice will be generated dynamically. Optionally,
|
|
147
130
|
additional context can be included to influence the speech's style and
|
|
148
|
-
prosody.
|
|
131
|
+
prosody.
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
The response is a stream of JSON objects including audio encoded in
|
|
135
|
+
base64.
|
|
149
136
|
source:
|
|
150
137
|
openapi: tts-openapi.yml
|
|
151
|
-
display-name: Text-to-speech (Streamed
|
|
138
|
+
display-name: Text-to-speech (Streamed JSON)
|
|
152
139
|
request:
|
|
153
140
|
body:
|
|
154
141
|
type: PostedTts
|
|
155
142
|
content-type: application/json
|
|
156
|
-
response:
|
|
157
|
-
docs:
|
|
158
|
-
type:
|
|
159
|
-
|
|
143
|
+
response-stream:
|
|
144
|
+
docs: Successful Response
|
|
145
|
+
type: SnippetAudioChunk
|
|
146
|
+
format: json
|
|
160
147
|
errors:
|
|
161
148
|
- UnprocessableEntityError
|
|
162
149
|
examples:
|
|
@@ -168,30 +155,26 @@ service:
|
|
|
168
155
|
voice:
|
|
169
156
|
name: Male English Actor
|
|
170
157
|
provider: HUME_AI
|
|
171
|
-
synthesize-
|
|
172
|
-
path: /v0/tts/stream/
|
|
158
|
+
synthesize-file-streaming:
|
|
159
|
+
path: /v0/tts/stream/file
|
|
173
160
|
method: POST
|
|
174
161
|
auth: true
|
|
175
162
|
docs: >-
|
|
176
163
|
Streams synthesized speech using the specified voice. If no voice is
|
|
177
164
|
provided, a novel voice will be generated dynamically. Optionally,
|
|
178
165
|
additional context can be included to influence the speech's style and
|
|
179
|
-
prosody.
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
The response is a stream of JSON objects including audio encoded in
|
|
183
|
-
base64.
|
|
166
|
+
prosody.
|
|
184
167
|
source:
|
|
185
168
|
openapi: tts-openapi.yml
|
|
186
|
-
display-name: Text-to-speech (Streamed
|
|
169
|
+
display-name: Text-to-speech (Streamed File)
|
|
187
170
|
request:
|
|
188
171
|
body:
|
|
189
172
|
type: PostedTts
|
|
190
173
|
content-type: application/json
|
|
191
|
-
response
|
|
192
|
-
docs:
|
|
193
|
-
type:
|
|
194
|
-
|
|
174
|
+
response:
|
|
175
|
+
docs: OK
|
|
176
|
+
type: file
|
|
177
|
+
status-code: 200
|
|
195
178
|
errors:
|
|
196
179
|
- UnprocessableEntityError
|
|
197
180
|
examples:
|
|
@@ -246,25 +229,25 @@ types:
|
|
|
246
229
|
openapi: tts-openapi.yml
|
|
247
230
|
ReturnGeneration:
|
|
248
231
|
properties:
|
|
249
|
-
|
|
232
|
+
generation_id:
|
|
250
233
|
type: string
|
|
251
234
|
docs: >-
|
|
252
|
-
|
|
253
|
-
|
|
235
|
+
A unique ID associated with this TTS generation that can be used as
|
|
236
|
+
context for generating consistent speech style and prosody across
|
|
237
|
+
multiple requests.
|
|
254
238
|
duration:
|
|
255
239
|
type: double
|
|
256
240
|
docs: Duration of the generated audio in seconds.
|
|
257
|
-
encoding:
|
|
258
|
-
type: AudioEncoding
|
|
259
241
|
file_size:
|
|
260
242
|
type: integer
|
|
261
243
|
docs: Size of the generated audio in bytes.
|
|
262
|
-
|
|
244
|
+
encoding:
|
|
245
|
+
type: AudioEncoding
|
|
246
|
+
audio:
|
|
263
247
|
type: string
|
|
264
248
|
docs: >-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
multiple requests.
|
|
249
|
+
The generated audio output in the requested format, encoded as a
|
|
250
|
+
base64 string.
|
|
268
251
|
snippets:
|
|
269
252
|
docs: >-
|
|
270
253
|
A list of snippet groups where each group corresponds to an utterance
|
|
@@ -317,9 +300,18 @@ types:
|
|
|
317
300
|
Utterances to use as context for generating consistent speech style
|
|
318
301
|
and prosody across multiple requests. These will not be converted to
|
|
319
302
|
speech output.
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
303
|
+
utterances:
|
|
304
|
+
docs: >-
|
|
305
|
+
A list of **Utterances** to be converted to speech output.
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
An **Utterance** is a unit of input for
|
|
309
|
+
[Octave](/docs/text-to-speech-tts/overview), and includes input
|
|
310
|
+
`text`, an optional `description` to serve as the prompt for how the
|
|
311
|
+
speech should be delivered, an optional `voice` specification, and
|
|
312
|
+
additional controls to guide delivery for `speed` and
|
|
313
|
+
`trailing_silence`.
|
|
314
|
+
type: list<PostedUtterance>
|
|
323
315
|
num_generations:
|
|
324
316
|
type: optional<integer>
|
|
325
317
|
docs: Number of generations of the audio to produce.
|
|
@@ -327,6 +319,9 @@ types:
|
|
|
327
319
|
validation:
|
|
328
320
|
min: 1
|
|
329
321
|
max: 5
|
|
322
|
+
format:
|
|
323
|
+
type: optional<Format>
|
|
324
|
+
docs: Specifies the output audio file format.
|
|
330
325
|
split_utterances:
|
|
331
326
|
type: optional<boolean>
|
|
332
327
|
docs: >-
|
|
@@ -355,18 +350,6 @@ types:
|
|
|
355
350
|
if disabled, each chunk's audio will be its own audio file, each with
|
|
356
351
|
its own headers (if applicable).
|
|
357
352
|
default: false
|
|
358
|
-
utterances:
|
|
359
|
-
docs: >-
|
|
360
|
-
A list of **Utterances** to be converted to speech output.
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
An **Utterance** is a unit of input for
|
|
364
|
-
[Octave](/docs/text-to-speech-tts/overview), and includes input
|
|
365
|
-
`text`, an optional `description` to serve as the prompt for how the
|
|
366
|
-
speech should be delivered, an optional `voice` specification, and
|
|
367
|
-
additional controls to guide delivery for `speed` and
|
|
368
|
-
`trailing_silence`.
|
|
369
|
-
type: list<PostedUtterance>
|
|
370
353
|
instant_mode:
|
|
371
354
|
type: optional<boolean>
|
|
372
355
|
docs: >-
|
|
@@ -393,14 +376,14 @@ types:
|
|
|
393
376
|
openapi: tts-openapi.yml
|
|
394
377
|
ReturnTts:
|
|
395
378
|
properties:
|
|
396
|
-
generations:
|
|
397
|
-
type: list<ReturnGeneration>
|
|
398
379
|
request_id:
|
|
399
380
|
type: optional<string>
|
|
400
381
|
docs: >-
|
|
401
382
|
A unique ID associated with this request for tracking and
|
|
402
383
|
troubleshooting. Use this ID when contacting [support](/support) for
|
|
403
384
|
troubleshooting assistance.
|
|
385
|
+
generations:
|
|
386
|
+
type: list<ReturnGeneration>
|
|
404
387
|
source:
|
|
405
388
|
openapi: tts-openapi.yml
|
|
406
389
|
ReturnVoice:
|
|
@@ -428,28 +411,28 @@ types:
|
|
|
428
411
|
openapi: tts-openapi.yml
|
|
429
412
|
Snippet:
|
|
430
413
|
properties:
|
|
431
|
-
audio:
|
|
432
|
-
type: string
|
|
433
|
-
docs: >-
|
|
434
|
-
The segmented audio output in the requested format, encoded as a
|
|
435
|
-
base64 string.
|
|
436
|
-
generation_id:
|
|
437
|
-
type: string
|
|
438
|
-
docs: The generation ID this snippet corresponds to.
|
|
439
414
|
id:
|
|
440
415
|
type: string
|
|
441
416
|
docs: A unique ID associated with this **Snippet**.
|
|
442
417
|
text:
|
|
443
418
|
type: string
|
|
444
419
|
docs: The text for this **Snippet**.
|
|
420
|
+
generation_id:
|
|
421
|
+
type: string
|
|
422
|
+
docs: The generation ID this snippet corresponds to.
|
|
423
|
+
utterance_index:
|
|
424
|
+
type: optional<integer>
|
|
425
|
+
docs: The index of the utterance in the request this snippet corresponds to.
|
|
445
426
|
transcribed_text:
|
|
446
427
|
type: optional<string>
|
|
447
428
|
docs: >-
|
|
448
429
|
The transcribed text of the generated audio. It is only present if
|
|
449
430
|
`instant_mode` is set to `false`.
|
|
450
|
-
|
|
451
|
-
type:
|
|
452
|
-
docs:
|
|
431
|
+
audio:
|
|
432
|
+
type: string
|
|
433
|
+
docs: >-
|
|
434
|
+
The segmented audio output in the requested format, encoded as a
|
|
435
|
+
base64 string.
|
|
453
436
|
source:
|
|
454
437
|
openapi: tts-openapi.yml
|
|
455
438
|
SnippetAudioChunk:
|
|
@@ -458,6 +441,11 @@ types:
|
|
|
458
441
|
openapi: tts-openapi.yml
|
|
459
442
|
PostedUtterance:
|
|
460
443
|
properties:
|
|
444
|
+
text:
|
|
445
|
+
type: string
|
|
446
|
+
docs: The input text to be synthesized into speech.
|
|
447
|
+
validation:
|
|
448
|
+
maxLength: 5000
|
|
461
449
|
description:
|
|
462
450
|
type: optional<string>
|
|
463
451
|
docs: >-
|
|
@@ -479,6 +467,14 @@ types:
|
|
|
479
467
|
guide](/docs/text-to-speech-tts/prompting) for design tips.
|
|
480
468
|
validation:
|
|
481
469
|
maxLength: 1000
|
|
470
|
+
voice:
|
|
471
|
+
type: optional<PostedUtteranceVoice>
|
|
472
|
+
docs: >-
|
|
473
|
+
The `name` or `id` associated with a **Voice** from the **Voice
|
|
474
|
+
Library** to be used as the speaker for this and all subsequent
|
|
475
|
+
`utterances`, until the `voice` field is updated again.
|
|
476
|
+
|
|
477
|
+
See our [voices guide](/docs/text-to-speech-tts/voices) for more details on generating and specifying **Voices**.
|
|
482
478
|
speed:
|
|
483
479
|
type: optional<double>
|
|
484
480
|
docs: >-
|
|
@@ -488,11 +484,6 @@ types:
|
|
|
488
484
|
validation:
|
|
489
485
|
min: 0.5
|
|
490
486
|
max: 2
|
|
491
|
-
text:
|
|
492
|
-
type: string
|
|
493
|
-
docs: The input text to be synthesized into speech.
|
|
494
|
-
validation:
|
|
495
|
-
maxLength: 5000
|
|
496
487
|
trailing_silence:
|
|
497
488
|
type: optional<double>
|
|
498
489
|
docs: Duration of trailing silence (in seconds) to add to this utterance
|
|
@@ -500,14 +491,6 @@ types:
|
|
|
500
491
|
validation:
|
|
501
492
|
min: 0
|
|
502
493
|
max: 5
|
|
503
|
-
voice:
|
|
504
|
-
type: optional<PostedUtteranceVoice>
|
|
505
|
-
docs: >-
|
|
506
|
-
The `name` or `id` associated with a **Voice** from the **Voice
|
|
507
|
-
Library** to be used as the speaker for this and all subsequent
|
|
508
|
-
`utterances`, until the `voice` field is updated again.
|
|
509
|
-
|
|
510
|
-
See our [voices guide](/docs/text-to-speech-tts/voices) for more details on generating and specifying **Voices**.
|
|
511
494
|
source:
|
|
512
495
|
openapi: tts-openapi.yml
|
|
513
496
|
ValidationErrorLocItem:
|
package/.mock/fern.config.json
CHANGED
|
@@ -1,17 +1,2 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
-
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
-
if (k2 === undefined) k2 = k;
|
|
4
|
-
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
-
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
-
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
-
}
|
|
8
|
-
Object.defineProperty(o, k2, desc);
|
|
9
|
-
}) : (function(o, m, k, k2) {
|
|
10
|
-
if (k2 === undefined) k2 = k;
|
|
11
|
-
o[k2] = m[k];
|
|
12
|
-
}));
|
|
13
|
-
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
14
|
-
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
|
-
};
|
|
16
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
-
__exportStar(require("./requests"), exports);
|
|
@@ -14,5 +14,4 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
|
14
14
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
15
|
};
|
|
16
16
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
-
__exportStar(require("./UnprocessableEntityError"), exports);
|
|
18
17
|
__exportStar(require("./BadRequestError"), exports);
|
|
@@ -5,12 +5,12 @@
|
|
|
5
5
|
* When provided, the output is an assistant end message.
|
|
6
6
|
*/
|
|
7
7
|
export interface AssistantEnd {
|
|
8
|
-
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
9
|
-
customSessionId?: string;
|
|
10
8
|
/**
|
|
11
9
|
* The type of message sent through the socket; for an Assistant End message, this must be `assistant_end`.
|
|
12
10
|
*
|
|
13
11
|
* This message indicates the conclusion of the assistant’s response, signaling that the assistant has finished speaking for the current conversational turn.
|
|
14
12
|
*/
|
|
15
13
|
type: "assistant_end";
|
|
14
|
+
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
15
|
+
customSessionId?: string;
|
|
16
16
|
}
|
|
@@ -5,6 +5,8 @@
|
|
|
5
5
|
* When provided, the input is spoken by EVI.
|
|
6
6
|
*/
|
|
7
7
|
export interface AssistantInput {
|
|
8
|
+
/** The type of message sent through the socket; must be `assistant_input` for our server to correctly identify and process it as an Assistant Input message. */
|
|
9
|
+
type: "assistant_input";
|
|
8
10
|
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
9
11
|
customSessionId?: string;
|
|
10
12
|
/**
|
|
@@ -13,6 +15,4 @@ export interface AssistantInput {
|
|
|
13
15
|
* EVI uses this text to generate spoken audio using our proprietary expressive text-to-speech model. Our model adds appropriate emotional inflections and tones to the text based on the user’s expressions and the context of the conversation. The synthesized audio is streamed back to the user as an [Assistant Message](/reference/empathic-voice-interface-evi/chat/chat#receive.AssistantMessage.type).
|
|
14
16
|
*/
|
|
15
17
|
text: string;
|
|
16
|
-
/** The type of message sent through the socket; must be `assistant_input` for our server to correctly identify and process it as an Assistant Input message. */
|
|
17
|
-
type: "assistant_input";
|
|
18
18
|
}
|
|
@@ -6,20 +6,20 @@ import * as Hume from "../../../index";
|
|
|
6
6
|
* When provided, the output is an assistant message.
|
|
7
7
|
*/
|
|
8
8
|
export interface AssistantMessage {
|
|
9
|
+
/**
|
|
10
|
+
* The type of message sent through the socket; for an Assistant Message, this must be `assistant_message`.
|
|
11
|
+
*
|
|
12
|
+
* This message contains both a transcript of the assistant’s response and the expression measurement predictions of the assistant’s audio output.
|
|
13
|
+
*/
|
|
14
|
+
type: "assistant_message";
|
|
9
15
|
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
10
16
|
customSessionId?: string;
|
|
11
|
-
/** Indicates if this message was inserted into the conversation as text from an [Assistant Input message](/reference/empathic-voice-interface-evi/chat/chat#send.AssistantInput.text). */
|
|
12
|
-
fromText: boolean;
|
|
13
17
|
/** ID of the assistant message. Allows the Assistant Message to be tracked and referenced. */
|
|
14
18
|
id?: string;
|
|
15
19
|
/** Transcript of the message. */
|
|
16
20
|
message: Hume.empathicVoice.ChatMessage;
|
|
17
21
|
/** Inference model results. */
|
|
18
22
|
models: Hume.empathicVoice.Inference;
|
|
19
|
-
/**
|
|
20
|
-
|
|
21
|
-
*
|
|
22
|
-
* This message contains both a transcript of the assistant’s response and the expression measurement predictions of the assistant’s audio output.
|
|
23
|
-
*/
|
|
24
|
-
type: "assistant_message";
|
|
23
|
+
/** Indicates if this message was inserted into the conversation as text from an [Assistant Input message](/reference/empathic-voice-interface-evi/chat/chat#send.AssistantInput.text). */
|
|
24
|
+
fromText: boolean;
|
|
25
25
|
}
|
|
@@ -6,16 +6,16 @@ import * as Hume from "../../../index";
|
|
|
6
6
|
* When provided, the output is an Assistant Prosody message.
|
|
7
7
|
*/
|
|
8
8
|
export interface AssistantProsody {
|
|
9
|
-
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
10
|
-
customSessionId?: string;
|
|
11
|
-
/** Unique identifier for the segment. */
|
|
12
|
-
id?: string;
|
|
13
|
-
/** Inference model results. */
|
|
14
|
-
models: Hume.empathicVoice.Inference;
|
|
15
9
|
/**
|
|
16
10
|
* The type of message sent through the socket; for an Assistant Prosody message, this must be `assistant_PROSODY`.
|
|
17
11
|
*
|
|
18
12
|
* This message the expression measurement predictions of the assistant's audio output.
|
|
19
13
|
*/
|
|
20
14
|
type: "assistant_prosody";
|
|
15
|
+
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
16
|
+
customSessionId?: string;
|
|
17
|
+
/** Inference model results. */
|
|
18
|
+
models: Hume.empathicVoice.Inference;
|
|
19
|
+
/** Unique identifier for the segment. */
|
|
20
|
+
id?: string;
|
|
21
21
|
}
|
|
@@ -3,10 +3,10 @@
|
|
|
3
3
|
*/
|
|
4
4
|
import * as Hume from "../../../index";
|
|
5
5
|
export interface AudioConfiguration {
|
|
6
|
-
/** Number of audio channels. */
|
|
7
|
-
channels: number;
|
|
8
6
|
/** Encoding format of the audio input, such as `linear16`. */
|
|
9
7
|
encoding: Hume.empathicVoice.Encoding;
|
|
8
|
+
/** Number of audio channels. */
|
|
9
|
+
channels: number;
|
|
10
10
|
/** Audio sample rate. Number of samples per second in the audio input, measured in Hertz. */
|
|
11
11
|
sampleRate: number;
|
|
12
12
|
}
|
|
@@ -5,6 +5,12 @@
|
|
|
5
5
|
* When provided, the input is audio.
|
|
6
6
|
*/
|
|
7
7
|
export interface AudioInput {
|
|
8
|
+
/**
|
|
9
|
+
* The type of message sent through the socket; must be `audio_input` for our server to correctly identify and process it as an Audio Input message.
|
|
10
|
+
*
|
|
11
|
+
* This message is used for sending audio input data to EVI for processing and expression measurement. Audio data should be sent as a continuous stream, encoded in Base64.
|
|
12
|
+
*/
|
|
13
|
+
type: "audio_input";
|
|
8
14
|
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
9
15
|
customSessionId?: string;
|
|
10
16
|
/**
|
|
@@ -17,10 +23,4 @@ export interface AudioInput {
|
|
|
17
23
|
* Hume recommends streaming audio with a buffer window of 20 milliseconds (ms), or 100 milliseconds (ms) for web applications.
|
|
18
24
|
*/
|
|
19
25
|
data: string;
|
|
20
|
-
/**
|
|
21
|
-
* The type of message sent through the socket; must be `audio_input` for our server to correctly identify and process it as an Audio Input message.
|
|
22
|
-
*
|
|
23
|
-
* This message is used for sending audio input data to EVI for processing and expression measurement. Audio data should be sent as a continuous stream, encoded in Base64.
|
|
24
|
-
*/
|
|
25
|
-
type: "audio_input";
|
|
26
26
|
}
|
|
@@ -5,14 +5,14 @@
|
|
|
5
5
|
* The type of message sent through the socket; for an Audio Output message, this must be `audio_output`.
|
|
6
6
|
*/
|
|
7
7
|
export interface AudioOutput {
|
|
8
|
+
/** The type of message sent through the socket; for an Audio Output message, this must be `audio_output`. */
|
|
9
|
+
type: "audio_output";
|
|
8
10
|
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
9
11
|
customSessionId?: string;
|
|
10
|
-
/** Base64 encoded audio output. This encoded audio is transmitted to the client, where it can be decoded and played back as part of the user interaction. */
|
|
11
|
-
data: string;
|
|
12
12
|
/** ID of the audio output. Allows the Audio Output message to be tracked and referenced. */
|
|
13
13
|
id: string;
|
|
14
14
|
/** Index of the chunk of audio relative to the whole audio segment. */
|
|
15
15
|
index: number;
|
|
16
|
-
/**
|
|
17
|
-
|
|
16
|
+
/** Base64 encoded audio output. This encoded audio is transmitted to the client, where it can be decoded and played back as part of the user interaction. */
|
|
17
|
+
data: string;
|
|
18
18
|
}
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
*/
|
|
4
4
|
import * as Hume from "../../../index";
|
|
5
5
|
export interface BuiltinToolConfig {
|
|
6
|
+
name: Hume.empathicVoice.BuiltInTool;
|
|
6
7
|
/** Optional text passed to the supplemental LLM if the tool call fails. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation. */
|
|
7
8
|
fallbackContent?: string;
|
|
8
|
-
name: Hume.empathicVoice.BuiltInTool;
|
|
9
9
|
}
|
|
@@ -3,10 +3,10 @@
|
|
|
3
3
|
*/
|
|
4
4
|
import * as Hume from "../../../index";
|
|
5
5
|
export interface ChatMessage {
|
|
6
|
-
/** Transcript of the message. */
|
|
7
|
-
content?: string;
|
|
8
6
|
/** Role of who is providing the message. */
|
|
9
7
|
role: Hume.empathicVoice.Role;
|
|
8
|
+
/** Transcript of the message. */
|
|
9
|
+
content?: string;
|
|
10
10
|
/** Function call name and arguments. */
|
|
11
11
|
toolCall?: Hume.empathicVoice.ToolCallMessage;
|
|
12
12
|
/** Function call response from client. */
|
|
@@ -5,6 +5,14 @@
|
|
|
5
5
|
* When provided, the output is a chat metadata message.
|
|
6
6
|
*/
|
|
7
7
|
export interface ChatMetadata {
|
|
8
|
+
/**
|
|
9
|
+
* The type of message sent through the socket; for a Chat Metadata message, this must be `chat_metadata`.
|
|
10
|
+
*
|
|
11
|
+
* The Chat Metadata message is the first message you receive after establishing a connection with EVI and contains important identifiers for the current Chat session.
|
|
12
|
+
*/
|
|
13
|
+
type: "chat_metadata";
|
|
14
|
+
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
15
|
+
customSessionId?: string;
|
|
8
16
|
/**
|
|
9
17
|
* ID of the Chat Group.
|
|
10
18
|
*
|
|
@@ -15,14 +23,6 @@ export interface ChatMetadata {
|
|
|
15
23
|
chatGroupId: string;
|
|
16
24
|
/** ID of the Chat session. Allows the Chat session to be tracked and referenced. */
|
|
17
25
|
chatId: string;
|
|
18
|
-
/** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
|
|
19
|
-
customSessionId?: string;
|
|
20
26
|
/** ID of the initiating request. */
|
|
21
27
|
requestId?: string;
|
|
22
|
-
/**
|
|
23
|
-
* The type of message sent through the socket; for a Chat Metadata message, this must be `chat_metadata`.
|
|
24
|
-
*
|
|
25
|
-
* The Chat Metadata message is the first message you receive after establishing a connection with EVI and contains important identifiers for the current Chat session.
|
|
26
|
-
*/
|
|
27
|
-
type: "chat_metadata";
|
|
28
28
|
}
|