hume 0.12.2 → 0.13.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (319) hide show
  1. package/.mock/definition/empathic-voice/__package__.yml +688 -735
  2. package/.mock/definition/empathic-voice/chat.yml +29 -23
  3. package/.mock/definition/empathic-voice/chatWebhooks.yml +8 -12
  4. package/.mock/definition/empathic-voice/prompts.yml +2 -2
  5. package/.mock/definition/empathic-voice/tools.yml +2 -2
  6. package/.mock/definition/tts/__package__.yml +70 -87
  7. package/.mock/fern.config.json +1 -1
  8. package/api/resources/empathicVoice/client/index.d.ts +0 -1
  9. package/api/resources/empathicVoice/client/index.js +0 -15
  10. package/api/resources/empathicVoice/errors/index.d.ts +0 -1
  11. package/api/resources/empathicVoice/errors/index.js +0 -1
  12. package/api/resources/empathicVoice/resources/chat/client/Client.d.ts +2 -0
  13. package/api/resources/empathicVoice/resources/prompts/client/requests/PostedPrompt.d.ts +1 -1
  14. package/api/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersion.d.ts +1 -1
  15. package/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedTool.d.ts +1 -1
  16. package/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersion.d.ts +1 -1
  17. package/api/resources/empathicVoice/types/AssistantEnd.d.ts +3 -3
  18. package/api/resources/empathicVoice/types/AssistantInput.d.ts +3 -3
  19. package/api/resources/empathicVoice/types/AssistantMessage.d.ts +8 -8
  20. package/api/resources/empathicVoice/types/AssistantProsody.d.ts +6 -6
  21. package/api/resources/empathicVoice/types/AudioConfiguration.d.ts +2 -2
  22. package/api/resources/empathicVoice/types/AudioInput.d.ts +7 -7
  23. package/api/resources/empathicVoice/types/AudioOutput.d.ts +4 -4
  24. package/api/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
  25. package/api/resources/empathicVoice/types/ChatMessage.d.ts +2 -2
  26. package/api/resources/empathicVoice/types/ChatMetadata.d.ts +8 -8
  27. package/api/resources/empathicVoice/types/Context.d.ts +8 -14
  28. package/api/resources/empathicVoice/types/ContextType.d.ts +2 -3
  29. package/api/resources/empathicVoice/types/ContextType.js +1 -2
  30. package/api/resources/empathicVoice/types/LanguageModelType.d.ts +4 -1
  31. package/api/resources/empathicVoice/types/LanguageModelType.js +3 -0
  32. package/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +3 -3
  33. package/api/resources/empathicVoice/types/PostedLanguageModel.d.ts +1 -1
  34. package/api/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.d.ts +2 -2
  35. package/api/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.d.ts +2 -2
  36. package/api/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +2 -2
  37. package/api/resources/empathicVoice/types/ReturnConfig.d.ts +7 -7
  38. package/api/resources/empathicVoice/types/ReturnLanguageModel.d.ts +1 -1
  39. package/api/resources/empathicVoice/types/ReturnPrompt.d.ts +4 -6
  40. package/api/resources/empathicVoice/types/ReturnUserDefinedTool.d.ts +1 -1
  41. package/api/resources/empathicVoice/types/SessionSettings.d.ts +30 -30
  42. package/api/resources/empathicVoice/types/Tool.d.ts +7 -7
  43. package/api/resources/empathicVoice/types/ToolCallMessage.d.ts +7 -7
  44. package/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +16 -16
  45. package/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +8 -8
  46. package/api/resources/empathicVoice/types/UserInput.d.ts +3 -3
  47. package/api/resources/empathicVoice/types/UserInterruption.d.ts +5 -5
  48. package/api/resources/empathicVoice/types/UserMessage.d.ts +12 -12
  49. package/api/resources/empathicVoice/types/VoiceProvider.d.ts +2 -1
  50. package/api/resources/empathicVoice/types/VoiceProvider.js +1 -0
  51. package/api/resources/empathicVoice/types/WebSocketError.d.ts +10 -10
  52. package/api/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +8 -8
  53. package/api/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +6 -6
  54. package/api/resources/empathicVoice/types/index.d.ts +16 -17
  55. package/api/resources/empathicVoice/types/index.js +16 -17
  56. package/api/resources/tts/client/Client.d.ts +21 -23
  57. package/api/resources/tts/client/Client.js +50 -58
  58. package/api/resources/tts/client/index.d.ts +0 -1
  59. package/api/resources/tts/client/index.js +0 -15
  60. package/api/resources/tts/types/PostedTts.d.ts +8 -8
  61. package/api/resources/tts/types/PostedUtterance.d.ts +6 -6
  62. package/api/resources/tts/types/ReturnGeneration.d.ts +5 -5
  63. package/api/resources/tts/types/ReturnTts.d.ts +1 -1
  64. package/api/resources/tts/types/Snippet.d.ts +6 -6
  65. package/dist/api/resources/empathicVoice/client/index.d.ts +0 -1
  66. package/dist/api/resources/empathicVoice/client/index.js +0 -15
  67. package/dist/api/resources/empathicVoice/errors/index.d.ts +0 -1
  68. package/dist/api/resources/empathicVoice/errors/index.js +0 -1
  69. package/dist/api/resources/empathicVoice/resources/chat/client/Client.d.ts +2 -0
  70. package/dist/api/resources/empathicVoice/resources/prompts/client/requests/PostedPrompt.d.ts +1 -1
  71. package/dist/api/resources/empathicVoice/resources/prompts/client/requests/PostedPromptVersion.d.ts +1 -1
  72. package/dist/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedTool.d.ts +1 -1
  73. package/dist/api/resources/empathicVoice/resources/tools/client/requests/PostedUserDefinedToolVersion.d.ts +1 -1
  74. package/dist/api/resources/empathicVoice/types/AssistantEnd.d.ts +3 -3
  75. package/dist/api/resources/empathicVoice/types/AssistantInput.d.ts +3 -3
  76. package/dist/api/resources/empathicVoice/types/AssistantMessage.d.ts +8 -8
  77. package/dist/api/resources/empathicVoice/types/AssistantProsody.d.ts +6 -6
  78. package/dist/api/resources/empathicVoice/types/AudioConfiguration.d.ts +2 -2
  79. package/dist/api/resources/empathicVoice/types/AudioInput.d.ts +7 -7
  80. package/dist/api/resources/empathicVoice/types/AudioOutput.d.ts +4 -4
  81. package/dist/api/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
  82. package/dist/api/resources/empathicVoice/types/ChatMessage.d.ts +2 -2
  83. package/dist/api/resources/empathicVoice/types/ChatMetadata.d.ts +8 -8
  84. package/dist/api/resources/empathicVoice/types/Context.d.ts +8 -14
  85. package/dist/api/resources/empathicVoice/types/ContextType.d.ts +2 -3
  86. package/dist/api/resources/empathicVoice/types/ContextType.js +1 -2
  87. package/dist/api/resources/empathicVoice/types/LanguageModelType.d.ts +4 -1
  88. package/dist/api/resources/empathicVoice/types/LanguageModelType.js +3 -0
  89. package/dist/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +3 -3
  90. package/dist/api/resources/empathicVoice/types/PostedLanguageModel.d.ts +1 -1
  91. package/dist/api/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.d.ts +2 -2
  92. package/dist/api/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.d.ts +2 -2
  93. package/dist/api/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +2 -2
  94. package/dist/api/resources/empathicVoice/types/ReturnConfig.d.ts +7 -7
  95. package/dist/api/resources/empathicVoice/types/ReturnLanguageModel.d.ts +1 -1
  96. package/dist/api/resources/empathicVoice/types/ReturnPrompt.d.ts +4 -6
  97. package/dist/api/resources/empathicVoice/types/ReturnUserDefinedTool.d.ts +1 -1
  98. package/dist/api/resources/empathicVoice/types/SessionSettings.d.ts +30 -30
  99. package/dist/api/resources/empathicVoice/types/Tool.d.ts +7 -7
  100. package/dist/api/resources/empathicVoice/types/ToolCallMessage.d.ts +7 -7
  101. package/dist/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +16 -16
  102. package/dist/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +8 -8
  103. package/dist/api/resources/empathicVoice/types/UserInput.d.ts +3 -3
  104. package/dist/api/resources/empathicVoice/types/UserInterruption.d.ts +5 -5
  105. package/dist/api/resources/empathicVoice/types/UserMessage.d.ts +12 -12
  106. package/dist/api/resources/empathicVoice/types/VoiceProvider.d.ts +2 -1
  107. package/dist/api/resources/empathicVoice/types/VoiceProvider.js +1 -0
  108. package/dist/api/resources/empathicVoice/types/WebSocketError.d.ts +10 -10
  109. package/dist/api/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +8 -8
  110. package/dist/api/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +6 -6
  111. package/dist/api/resources/empathicVoice/types/index.d.ts +16 -17
  112. package/dist/api/resources/empathicVoice/types/index.js +16 -17
  113. package/dist/api/resources/tts/client/Client.d.ts +21 -23
  114. package/dist/api/resources/tts/client/Client.js +50 -58
  115. package/dist/api/resources/tts/client/index.d.ts +0 -1
  116. package/dist/api/resources/tts/client/index.js +0 -15
  117. package/dist/api/resources/tts/types/PostedTts.d.ts +8 -8
  118. package/dist/api/resources/tts/types/PostedUtterance.d.ts +6 -6
  119. package/dist/api/resources/tts/types/ReturnGeneration.d.ts +5 -5
  120. package/dist/api/resources/tts/types/ReturnTts.d.ts +1 -1
  121. package/dist/api/resources/tts/types/Snippet.d.ts +6 -6
  122. package/dist/serialization/resources/empathicVoice/index.d.ts +0 -1
  123. package/dist/serialization/resources/empathicVoice/index.js +0 -1
  124. package/dist/serialization/resources/empathicVoice/types/AssistantEnd.d.ts +1 -1
  125. package/dist/serialization/resources/empathicVoice/types/AssistantEnd.js +1 -1
  126. package/dist/serialization/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
  127. package/dist/serialization/resources/empathicVoice/types/AssistantInput.js +1 -1
  128. package/dist/serialization/resources/empathicVoice/types/AssistantMessage.d.ts +2 -2
  129. package/dist/serialization/resources/empathicVoice/types/AssistantMessage.js +2 -2
  130. package/dist/serialization/resources/empathicVoice/types/AssistantProsody.d.ts +2 -2
  131. package/dist/serialization/resources/empathicVoice/types/AssistantProsody.js +2 -2
  132. package/dist/serialization/resources/empathicVoice/types/AudioConfiguration.d.ts +1 -1
  133. package/dist/serialization/resources/empathicVoice/types/AudioConfiguration.js +1 -1
  134. package/dist/serialization/resources/empathicVoice/types/AudioInput.d.ts +1 -1
  135. package/dist/serialization/resources/empathicVoice/types/AudioInput.js +1 -1
  136. package/dist/serialization/resources/empathicVoice/types/AudioOutput.d.ts +2 -2
  137. package/dist/serialization/resources/empathicVoice/types/AudioOutput.js +2 -2
  138. package/dist/serialization/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
  139. package/dist/serialization/resources/empathicVoice/types/BuiltinToolConfig.js +1 -1
  140. package/dist/serialization/resources/empathicVoice/types/ChatMessage.d.ts +1 -1
  141. package/dist/serialization/resources/empathicVoice/types/ChatMessage.js +1 -1
  142. package/dist/serialization/resources/empathicVoice/types/ChatMetadata.d.ts +2 -2
  143. package/dist/serialization/resources/empathicVoice/types/ChatMetadata.js +2 -2
  144. package/dist/serialization/resources/empathicVoice/types/Context.d.ts +1 -1
  145. package/dist/serialization/resources/empathicVoice/types/Context.js +1 -1
  146. package/dist/serialization/resources/empathicVoice/types/ContextType.d.ts +1 -1
  147. package/dist/serialization/resources/empathicVoice/types/ContextType.js +1 -1
  148. package/dist/serialization/resources/empathicVoice/types/LanguageModelType.d.ts +1 -1
  149. package/dist/serialization/resources/empathicVoice/types/LanguageModelType.js +3 -0
  150. package/dist/serialization/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
  151. package/dist/serialization/resources/empathicVoice/types/PauseAssistantMessage.js +1 -1
  152. package/dist/serialization/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.d.ts +1 -1
  153. package/dist/serialization/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.js +1 -1
  154. package/dist/serialization/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.d.ts +1 -1
  155. package/dist/serialization/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.js +1 -1
  156. package/dist/serialization/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +1 -1
  157. package/dist/serialization/resources/empathicVoice/types/ResumeAssistantMessage.js +1 -1
  158. package/dist/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +8 -8
  159. package/dist/serialization/resources/empathicVoice/types/ReturnConfig.js +8 -8
  160. package/dist/serialization/resources/empathicVoice/types/ReturnPrompt.d.ts +1 -1
  161. package/dist/serialization/resources/empathicVoice/types/ReturnPrompt.js +1 -1
  162. package/dist/serialization/resources/empathicVoice/types/SessionSettings.d.ts +8 -8
  163. package/dist/serialization/resources/empathicVoice/types/SessionSettings.js +8 -8
  164. package/dist/serialization/resources/empathicVoice/types/Tool.d.ts +3 -3
  165. package/dist/serialization/resources/empathicVoice/types/Tool.js +3 -3
  166. package/dist/serialization/resources/empathicVoice/types/ToolCallMessage.d.ts +3 -3
  167. package/dist/serialization/resources/empathicVoice/types/ToolCallMessage.js +3 -3
  168. package/dist/serialization/resources/empathicVoice/types/ToolErrorMessage.d.ts +6 -6
  169. package/dist/serialization/resources/empathicVoice/types/ToolErrorMessage.js +6 -6
  170. package/dist/serialization/resources/empathicVoice/types/ToolResponseMessage.d.ts +2 -2
  171. package/dist/serialization/resources/empathicVoice/types/ToolResponseMessage.js +2 -2
  172. package/dist/serialization/resources/empathicVoice/types/UserInput.d.ts +1 -1
  173. package/dist/serialization/resources/empathicVoice/types/UserInput.js +1 -1
  174. package/dist/serialization/resources/empathicVoice/types/UserInterruption.d.ts +1 -1
  175. package/dist/serialization/resources/empathicVoice/types/UserInterruption.js +1 -1
  176. package/dist/serialization/resources/empathicVoice/types/UserMessage.d.ts +3 -3
  177. package/dist/serialization/resources/empathicVoice/types/UserMessage.js +3 -3
  178. package/dist/serialization/resources/empathicVoice/types/VoiceProvider.d.ts +1 -1
  179. package/dist/serialization/resources/empathicVoice/types/VoiceProvider.js +1 -1
  180. package/dist/serialization/resources/empathicVoice/types/WebSocketError.d.ts +3 -3
  181. package/dist/serialization/resources/empathicVoice/types/WebSocketError.js +3 -3
  182. package/dist/serialization/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +4 -4
  183. package/dist/serialization/resources/empathicVoice/types/WebhookEventChatEnded.js +4 -4
  184. package/dist/serialization/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +3 -3
  185. package/dist/serialization/resources/empathicVoice/types/WebhookEventChatStarted.js +3 -3
  186. package/dist/serialization/resources/empathicVoice/types/index.d.ts +16 -17
  187. package/dist/serialization/resources/empathicVoice/types/index.js +16 -17
  188. package/dist/serialization/resources/tts/types/PostedTts.d.ts +3 -3
  189. package/dist/serialization/resources/tts/types/PostedTts.js +3 -3
  190. package/dist/serialization/resources/tts/types/PostedUtterance.d.ts +2 -2
  191. package/dist/serialization/resources/tts/types/PostedUtterance.js +2 -2
  192. package/dist/serialization/resources/tts/types/ReturnGeneration.d.ts +3 -3
  193. package/dist/serialization/resources/tts/types/ReturnGeneration.js +3 -3
  194. package/dist/serialization/resources/tts/types/ReturnTts.d.ts +1 -1
  195. package/dist/serialization/resources/tts/types/ReturnTts.js +1 -1
  196. package/dist/serialization/resources/tts/types/Snippet.d.ts +3 -3
  197. package/dist/serialization/resources/tts/types/Snippet.js +3 -3
  198. package/dist/version.d.ts +1 -1
  199. package/dist/version.js +1 -1
  200. package/package.json +1 -1
  201. package/reference.md +24 -78
  202. package/serialization/resources/empathicVoice/index.d.ts +0 -1
  203. package/serialization/resources/empathicVoice/index.js +0 -1
  204. package/serialization/resources/empathicVoice/types/AssistantEnd.d.ts +1 -1
  205. package/serialization/resources/empathicVoice/types/AssistantEnd.js +1 -1
  206. package/serialization/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
  207. package/serialization/resources/empathicVoice/types/AssistantInput.js +1 -1
  208. package/serialization/resources/empathicVoice/types/AssistantMessage.d.ts +2 -2
  209. package/serialization/resources/empathicVoice/types/AssistantMessage.js +2 -2
  210. package/serialization/resources/empathicVoice/types/AssistantProsody.d.ts +2 -2
  211. package/serialization/resources/empathicVoice/types/AssistantProsody.js +2 -2
  212. package/serialization/resources/empathicVoice/types/AudioConfiguration.d.ts +1 -1
  213. package/serialization/resources/empathicVoice/types/AudioConfiguration.js +1 -1
  214. package/serialization/resources/empathicVoice/types/AudioInput.d.ts +1 -1
  215. package/serialization/resources/empathicVoice/types/AudioInput.js +1 -1
  216. package/serialization/resources/empathicVoice/types/AudioOutput.d.ts +2 -2
  217. package/serialization/resources/empathicVoice/types/AudioOutput.js +2 -2
  218. package/serialization/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
  219. package/serialization/resources/empathicVoice/types/BuiltinToolConfig.js +1 -1
  220. package/serialization/resources/empathicVoice/types/ChatMessage.d.ts +1 -1
  221. package/serialization/resources/empathicVoice/types/ChatMessage.js +1 -1
  222. package/serialization/resources/empathicVoice/types/ChatMetadata.d.ts +2 -2
  223. package/serialization/resources/empathicVoice/types/ChatMetadata.js +2 -2
  224. package/serialization/resources/empathicVoice/types/Context.d.ts +1 -1
  225. package/serialization/resources/empathicVoice/types/Context.js +1 -1
  226. package/serialization/resources/empathicVoice/types/ContextType.d.ts +1 -1
  227. package/serialization/resources/empathicVoice/types/ContextType.js +1 -1
  228. package/serialization/resources/empathicVoice/types/LanguageModelType.d.ts +1 -1
  229. package/serialization/resources/empathicVoice/types/LanguageModelType.js +3 -0
  230. package/serialization/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
  231. package/serialization/resources/empathicVoice/types/PauseAssistantMessage.js +1 -1
  232. package/serialization/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.d.ts +1 -1
  233. package/serialization/resources/empathicVoice/types/PostedTimeoutSpecsInactivity.js +1 -1
  234. package/serialization/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.d.ts +1 -1
  235. package/serialization/resources/empathicVoice/types/PostedTimeoutSpecsMaxDuration.js +1 -1
  236. package/serialization/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +1 -1
  237. package/serialization/resources/empathicVoice/types/ResumeAssistantMessage.js +1 -1
  238. package/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +8 -8
  239. package/serialization/resources/empathicVoice/types/ReturnConfig.js +8 -8
  240. package/serialization/resources/empathicVoice/types/ReturnPrompt.d.ts +1 -1
  241. package/serialization/resources/empathicVoice/types/ReturnPrompt.js +1 -1
  242. package/serialization/resources/empathicVoice/types/SessionSettings.d.ts +8 -8
  243. package/serialization/resources/empathicVoice/types/SessionSettings.js +8 -8
  244. package/serialization/resources/empathicVoice/types/Tool.d.ts +3 -3
  245. package/serialization/resources/empathicVoice/types/Tool.js +3 -3
  246. package/serialization/resources/empathicVoice/types/ToolCallMessage.d.ts +3 -3
  247. package/serialization/resources/empathicVoice/types/ToolCallMessage.js +3 -3
  248. package/serialization/resources/empathicVoice/types/ToolErrorMessage.d.ts +6 -6
  249. package/serialization/resources/empathicVoice/types/ToolErrorMessage.js +6 -6
  250. package/serialization/resources/empathicVoice/types/ToolResponseMessage.d.ts +2 -2
  251. package/serialization/resources/empathicVoice/types/ToolResponseMessage.js +2 -2
  252. package/serialization/resources/empathicVoice/types/UserInput.d.ts +1 -1
  253. package/serialization/resources/empathicVoice/types/UserInput.js +1 -1
  254. package/serialization/resources/empathicVoice/types/UserInterruption.d.ts +1 -1
  255. package/serialization/resources/empathicVoice/types/UserInterruption.js +1 -1
  256. package/serialization/resources/empathicVoice/types/UserMessage.d.ts +3 -3
  257. package/serialization/resources/empathicVoice/types/UserMessage.js +3 -3
  258. package/serialization/resources/empathicVoice/types/VoiceProvider.d.ts +1 -1
  259. package/serialization/resources/empathicVoice/types/VoiceProvider.js +1 -1
  260. package/serialization/resources/empathicVoice/types/WebSocketError.d.ts +3 -3
  261. package/serialization/resources/empathicVoice/types/WebSocketError.js +3 -3
  262. package/serialization/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +4 -4
  263. package/serialization/resources/empathicVoice/types/WebhookEventChatEnded.js +4 -4
  264. package/serialization/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +3 -3
  265. package/serialization/resources/empathicVoice/types/WebhookEventChatStarted.js +3 -3
  266. package/serialization/resources/empathicVoice/types/index.d.ts +16 -17
  267. package/serialization/resources/empathicVoice/types/index.js +16 -17
  268. package/serialization/resources/tts/types/PostedTts.d.ts +3 -3
  269. package/serialization/resources/tts/types/PostedTts.js +3 -3
  270. package/serialization/resources/tts/types/PostedUtterance.d.ts +2 -2
  271. package/serialization/resources/tts/types/PostedUtterance.js +2 -2
  272. package/serialization/resources/tts/types/ReturnGeneration.d.ts +3 -3
  273. package/serialization/resources/tts/types/ReturnGeneration.js +3 -3
  274. package/serialization/resources/tts/types/ReturnTts.d.ts +1 -1
  275. package/serialization/resources/tts/types/ReturnTts.js +1 -1
  276. package/serialization/resources/tts/types/Snippet.d.ts +3 -3
  277. package/serialization/resources/tts/types/Snippet.js +3 -3
  278. package/version.d.ts +1 -1
  279. package/version.js +1 -1
  280. package/api/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.d.ts +0 -12
  281. package/api/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.js +0 -5
  282. package/api/resources/empathicVoice/client/requests/index.d.ts +0 -1
  283. package/api/resources/empathicVoice/client/requests/index.js +0 -2
  284. package/api/resources/empathicVoice/errors/UnprocessableEntityError.d.ts +0 -9
  285. package/api/resources/empathicVoice/errors/UnprocessableEntityError.js +0 -52
  286. package/api/resources/empathicVoice/types/SupportsToolUse.d.ts +0 -7
  287. package/api/resources/empathicVoice/types/SupportsToolUse.js +0 -5
  288. package/api/resources/tts/client/requests/SynthesizeJsonRequest.d.ts +0 -36
  289. package/api/resources/tts/client/requests/SynthesizeJsonRequest.js +0 -5
  290. package/api/resources/tts/client/requests/index.d.ts +0 -1
  291. package/api/resources/tts/client/requests/index.js +0 -2
  292. package/dist/api/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.d.ts +0 -12
  293. package/dist/api/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.js +0 -5
  294. package/dist/api/resources/empathicVoice/client/requests/index.d.ts +0 -1
  295. package/dist/api/resources/empathicVoice/client/requests/index.js +0 -2
  296. package/dist/api/resources/empathicVoice/errors/UnprocessableEntityError.d.ts +0 -9
  297. package/dist/api/resources/empathicVoice/errors/UnprocessableEntityError.js +0 -52
  298. package/dist/api/resources/empathicVoice/types/SupportsToolUse.d.ts +0 -7
  299. package/dist/api/resources/empathicVoice/types/SupportsToolUse.js +0 -5
  300. package/dist/api/resources/tts/client/requests/SynthesizeJsonRequest.d.ts +0 -36
  301. package/dist/api/resources/tts/client/requests/SynthesizeJsonRequest.js +0 -5
  302. package/dist/api/resources/tts/client/requests/index.d.ts +0 -1
  303. package/dist/api/resources/tts/client/requests/index.js +0 -2
  304. package/dist/serialization/resources/empathicVoice/client/index.d.ts +0 -1
  305. package/dist/serialization/resources/empathicVoice/client/index.js +0 -17
  306. package/dist/serialization/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.d.ts +0 -12
  307. package/dist/serialization/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.js +0 -43
  308. package/dist/serialization/resources/empathicVoice/client/requests/index.d.ts +0 -1
  309. package/dist/serialization/resources/empathicVoice/client/requests/index.js +0 -5
  310. package/dist/serialization/resources/empathicVoice/types/SupportsToolUse.d.ts +0 -13
  311. package/dist/serialization/resources/empathicVoice/types/SupportsToolUse.js +0 -44
  312. package/serialization/resources/empathicVoice/client/index.d.ts +0 -1
  313. package/serialization/resources/empathicVoice/client/index.js +0 -17
  314. package/serialization/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.d.ts +0 -12
  315. package/serialization/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.js +0 -43
  316. package/serialization/resources/empathicVoice/client/requests/index.d.ts +0 -1
  317. package/serialization/resources/empathicVoice/client/requests/index.js +0 -5
  318. package/serialization/resources/empathicVoice/types/SupportsToolUse.d.ts +0 -13
  319. package/serialization/resources/empathicVoice/types/SupportsToolUse.js +0 -44
@@ -5,10 +5,16 @@ import * as Hume from "../../../index";
5
5
  export interface PostedTts {
6
6
  /** Utterances to use as context for generating consistent speech style and prosody across multiple requests. These will not be converted to speech output. */
7
7
  context?: Hume.tts.PostedContext;
8
- /** Specifies the output audio file format. */
9
- format?: Hume.tts.Format;
8
+ /**
9
+ * A list of **Utterances** to be converted to speech output.
10
+ *
11
+ * An **Utterance** is a unit of input for [Octave](/docs/text-to-speech-tts/overview), and includes input `text`, an optional `description` to serve as the prompt for how the speech should be delivered, an optional `voice` specification, and additional controls to guide delivery for `speed` and `trailing_silence`.
12
+ */
13
+ utterances: Hume.tts.PostedUtterance[];
10
14
  /** Number of generations of the audio to produce. */
11
15
  numGenerations?: number;
16
+ /** Specifies the output audio file format. */
17
+ format?: Hume.tts.Format;
12
18
  /**
13
19
  * Controls how audio output is segmented in the response.
14
20
  *
@@ -21,12 +27,6 @@ export interface PostedTts {
21
27
  splitUtterances?: boolean;
22
28
  /** If enabled, the audio for all the chunks of a generation, once concatenated together, will constitute a single audio file. Otherwise, if disabled, each chunk's audio will be its own audio file, each with its own headers (if applicable). */
23
29
  stripHeaders?: boolean;
24
- /**
25
- * A list of **Utterances** to be converted to speech output.
26
- *
27
- * An **Utterance** is a unit of input for [Octave](/docs/text-to-speech-tts/overview), and includes input `text`, an optional `description` to serve as the prompt for how the speech should be delivered, an optional `voice` specification, and additional controls to guide delivery for `speed` and `trailing_silence`.
28
- */
29
- utterances: Hume.tts.PostedUtterance[];
30
30
  /**
31
31
  * Enables ultra-low latency streaming, significantly reducing the time until the first audio chunk is received. Recommended for real-time applications requiring immediate audio playback. For further details, see our documentation on [instant mode](/docs/text-to-speech-tts/overview#ultra-low-latency-streaming-instant-mode).
32
32
  * - A [voice](/reference/text-to-speech-tts/synthesize-json-streaming#request.body.utterances.voice) must be specified when instant mode is enabled. Dynamic voice generation is not supported with this mode.
@@ -3,6 +3,8 @@
3
3
  */
4
4
  import * as Hume from "../../../index";
5
5
  export interface PostedUtterance {
6
+ /** The input text to be synthesized into speech. */
7
+ text: string;
6
8
  /**
7
9
  * Natural language instructions describing how the synthesized speech should sound, including but not limited to tone, intonation, pacing, and accent.
8
10
  *
@@ -11,16 +13,14 @@ export interface PostedUtterance {
11
13
  * - **Voice not specified**: the description will serve as a voice prompt for generating a voice. See our [prompting guide](/docs/text-to-speech-tts/prompting) for design tips.
12
14
  */
13
15
  description?: string;
14
- /** Speed multiplier for the synthesized speech. Extreme values below 0.75 and above 1.5 may sometimes cause instability to the generated output. */
15
- speed?: number;
16
- /** The input text to be synthesized into speech. */
17
- text: string;
18
- /** Duration of trailing silence (in seconds) to add to this utterance */
19
- trailingSilence?: number;
20
16
  /**
21
17
  * The `name` or `id` associated with a **Voice** from the **Voice Library** to be used as the speaker for this and all subsequent `utterances`, until the `voice` field is updated again.
22
18
  *
23
19
  * See our [voices guide](/docs/text-to-speech-tts/voices) for more details on generating and specifying **Voices**.
24
20
  */
25
21
  voice?: Hume.tts.PostedUtteranceVoice;
22
+ /** Speed multiplier for the synthesized speech. Extreme values below 0.75 and above 1.5 may sometimes cause instability to the generated output. */
23
+ speed?: number;
24
+ /** Duration of trailing silence (in seconds) to add to this utterance */
25
+ trailingSilence?: number;
26
26
  }
@@ -3,15 +3,15 @@
3
3
  */
4
4
  import * as Hume from "../../../index";
5
5
  export interface ReturnGeneration {
6
- /** The generated audio output in the requested format, encoded as a base64 string. */
7
- audio: string;
6
+ /** A unique ID associated with this TTS generation that can be used as context for generating consistent speech style and prosody across multiple requests. */
7
+ generationId: string;
8
8
  /** Duration of the generated audio in seconds. */
9
9
  duration: number;
10
- encoding: Hume.tts.AudioEncoding;
11
10
  /** Size of the generated audio in bytes. */
12
11
  fileSize: number;
13
- /** A unique ID associated with this TTS generation that can be used as context for generating consistent speech style and prosody across multiple requests. */
14
- generationId: string;
12
+ encoding: Hume.tts.AudioEncoding;
13
+ /** The generated audio output in the requested format, encoded as a base64 string. */
14
+ audio: string;
15
15
  /** A list of snippet groups where each group corresponds to an utterance in the request. Each group contains segmented snippets that represent the original utterance divided into more natural-sounding units optimized for speech delivery. */
16
16
  snippets: Hume.tts.Snippet[][];
17
17
  }
@@ -3,7 +3,7 @@
3
3
  */
4
4
  import * as Hume from "../../../index";
5
5
  export interface ReturnTts {
6
- generations: Hume.tts.ReturnGeneration[];
7
6
  /** A unique ID associated with this request for tracking and troubleshooting. Use this ID when contacting [support](/support) for troubleshooting assistance. */
8
7
  requestId?: string;
8
+ generations: Hume.tts.ReturnGeneration[];
9
9
  }
@@ -2,16 +2,16 @@
2
2
  * This file was auto-generated by Fern from our API Definition.
3
3
  */
4
4
  export interface Snippet {
5
- /** The segmented audio output in the requested format, encoded as a base64 string. */
6
- audio: string;
7
- /** The generation ID this snippet corresponds to. */
8
- generationId: string;
9
5
  /** A unique ID associated with this **Snippet**. */
10
6
  id: string;
11
7
  /** The text for this **Snippet**. */
12
8
  text: string;
13
- /** The transcribed text of the generated audio. It is only present if `instant_mode` is set to `false`. */
14
- transcribedText?: string;
9
+ /** The generation ID this snippet corresponds to. */
10
+ generationId: string;
15
11
  /** The index of the utterance in the request this snippet corresponds to. */
16
12
  utteranceIndex?: number;
13
+ /** The transcribed text of the generated audio. It is only present if `instant_mode` is set to `false`. */
14
+ transcribedText?: string;
15
+ /** The segmented audio output in the requested format, encoded as a base64 string. */
16
+ audio: string;
17
17
  }
@@ -1,2 +1 @@
1
1
  export {};
2
- export * from "./requests";
@@ -1,17 +1,2 @@
1
1
  "use strict";
2
- var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
- if (k2 === undefined) k2 = k;
4
- var desc = Object.getOwnPropertyDescriptor(m, k);
5
- if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
- desc = { enumerable: true, get: function() { return m[k]; } };
7
- }
8
- Object.defineProperty(o, k2, desc);
9
- }) : (function(o, m, k, k2) {
10
- if (k2 === undefined) k2 = k;
11
- o[k2] = m[k];
12
- }));
13
- var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
- for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
- };
16
2
  Object.defineProperty(exports, "__esModule", { value: true });
17
- __exportStar(require("./requests"), exports);
@@ -1,2 +1 @@
1
- export * from "./UnprocessableEntityError";
2
1
  export * from "./BadRequestError";
@@ -14,5 +14,4 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
14
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
15
  };
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
- __exportStar(require("./UnprocessableEntityError"), exports);
18
17
  __exportStar(require("./BadRequestError"), exports);
@@ -22,6 +22,8 @@ export declare namespace Chat {
22
22
  resumedChatGroupId?: string;
23
23
  /** A flag to enable verbose transcription. Set this query parameter to `true` to have unfinalized user transcripts be sent to the client as interim UserMessage messages. The [interim](/reference/empathic-voice-interface-evi/chat/chat#receive.User%20Message.interim) field on a [UserMessage](/reference/empathic-voice-interface-evi/chat/chat#receive.User%20Message.type) denotes whether the message is "interim" or "final." */
24
24
  verboseTranscription?: boolean;
25
+ /** ID of the Voice to use for this chat. If specified, will override the voice set in the Config */
26
+ voiceId?: string;
25
27
  /** Extra query parameters sent at WebSocket connection */
26
28
  queryParams?: Record<string, string | string[] | object | object[]>;
27
29
  }
@@ -14,7 +14,7 @@ export interface PostedPrompt {
14
14
  /** An optional description of the Prompt version. */
15
15
  versionDescription?: string;
16
16
  /**
17
- * Instructions used to shape EVIs behavior, responses, and style.
17
+ * Instructions used to shape EVI's behavior, responses, and style.
18
18
  *
19
19
  * You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles.
20
20
  *
@@ -12,7 +12,7 @@ export interface PostedPromptVersion {
12
12
  /** An optional description of the Prompt version. */
13
13
  versionDescription?: string;
14
14
  /**
15
- * Instructions used to shape EVIs behavior, responses, and style for this version of the Prompt.
15
+ * Instructions used to shape EVI's behavior, responses, and style for this version of the Prompt.
16
16
  *
17
17
  * You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles.
18
18
  *
@@ -21,7 +21,7 @@ export interface PostedUserDefinedTool {
21
21
  /**
22
22
  * Stringified JSON defining the parameters used by this version of the Tool.
23
23
  *
24
- * These parameters define the inputs needed for the Tools execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format.
24
+ * These parameters define the inputs needed for the Tool's execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format.
25
25
  */
26
26
  parameters: string;
27
27
  /** Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. */
@@ -18,7 +18,7 @@ export interface PostedUserDefinedToolVersion {
18
18
  /**
19
19
  * Stringified JSON defining the parameters used by this version of the Tool.
20
20
  *
21
- * These parameters define the inputs needed for the Tools execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format.
21
+ * These parameters define the inputs needed for the Tool's execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format.
22
22
  */
23
23
  parameters: string;
24
24
  /** Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. */
@@ -5,12 +5,12 @@
5
5
  * When provided, the output is an assistant end message.
6
6
  */
7
7
  export interface AssistantEnd {
8
- /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
9
- customSessionId?: string;
10
8
  /**
11
9
  * The type of message sent through the socket; for an Assistant End message, this must be `assistant_end`.
12
10
  *
13
- * This message indicates the conclusion of the assistants response, signaling that the assistant has finished speaking for the current conversational turn.
11
+ * This message indicates the conclusion of the assistant's response, signaling that the assistant has finished speaking for the current conversational turn.
14
12
  */
15
13
  type: "assistant_end";
14
+ /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
15
+ customSessionId?: string;
16
16
  }
@@ -5,14 +5,14 @@
5
5
  * When provided, the input is spoken by EVI.
6
6
  */
7
7
  export interface AssistantInput {
8
+ /** The type of message sent through the socket; must be `assistant_input` for our server to correctly identify and process it as an Assistant Input message. */
9
+ type: "assistant_input";
8
10
  /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
9
11
  customSessionId?: string;
10
12
  /**
11
13
  * Assistant text to synthesize into spoken audio and insert into the conversation.
12
14
  *
13
- * EVI uses this text to generate spoken audio using our proprietary expressive text-to-speech model. Our model adds appropriate emotional inflections and tones to the text based on the users expressions and the context of the conversation. The synthesized audio is streamed back to the user as an [Assistant Message](/reference/empathic-voice-interface-evi/chat/chat#receive.AssistantMessage.type).
15
+ * EVI uses this text to generate spoken audio using our proprietary expressive text-to-speech model. Our model adds appropriate emotional inflections and tones to the text based on the user's expressions and the context of the conversation. The synthesized audio is streamed back to the user as an [Assistant Message](/reference/empathic-voice-interface-evi/chat/chat#receive.AssistantMessage.type).
14
16
  */
15
17
  text: string;
16
- /** The type of message sent through the socket; must be `assistant_input` for our server to correctly identify and process it as an Assistant Input message. */
17
- type: "assistant_input";
18
18
  }
@@ -6,20 +6,20 @@ import * as Hume from "../../../index";
6
6
  * When provided, the output is an assistant message.
7
7
  */
8
8
  export interface AssistantMessage {
9
+ /**
10
+ * The type of message sent through the socket; for an Assistant Message, this must be `assistant_message`.
11
+ *
12
+ * This message contains both a transcript of the assistant's response and the expression measurement predictions of the assistant's audio output.
13
+ */
14
+ type: "assistant_message";
9
15
  /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
10
16
  customSessionId?: string;
11
- /** Indicates if this message was inserted into the conversation as text from an [Assistant Input message](/reference/empathic-voice-interface-evi/chat/chat#send.AssistantInput.text). */
12
- fromText: boolean;
13
17
  /** ID of the assistant message. Allows the Assistant Message to be tracked and referenced. */
14
18
  id?: string;
15
19
  /** Transcript of the message. */
16
20
  message: Hume.empathicVoice.ChatMessage;
17
21
  /** Inference model results. */
18
22
  models: Hume.empathicVoice.Inference;
19
- /**
20
- * The type of message sent through the socket; for an Assistant Message, this must be `assistant_message`.
21
- *
22
- * This message contains both a transcript of the assistant’s response and the expression measurement predictions of the assistant’s audio output.
23
- */
24
- type: "assistant_message";
23
+ /** Indicates if this message was inserted into the conversation as text from an [Assistant Input message](/reference/empathic-voice-interface-evi/chat/chat#send.AssistantInput.text). */
24
+ fromText: boolean;
25
25
  }
@@ -6,16 +6,16 @@ import * as Hume from "../../../index";
6
6
  * When provided, the output is an Assistant Prosody message.
7
7
  */
8
8
  export interface AssistantProsody {
9
- /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
10
- customSessionId?: string;
11
- /** Unique identifier for the segment. */
12
- id?: string;
13
- /** Inference model results. */
14
- models: Hume.empathicVoice.Inference;
15
9
  /**
16
10
  * The type of message sent through the socket; for an Assistant Prosody message, this must be `assistant_PROSODY`.
17
11
  *
18
12
  * This message the expression measurement predictions of the assistant's audio output.
19
13
  */
20
14
  type: "assistant_prosody";
15
+ /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
16
+ customSessionId?: string;
17
+ /** Inference model results. */
18
+ models: Hume.empathicVoice.Inference;
19
+ /** Unique identifier for the segment. */
20
+ id?: string;
21
21
  }
@@ -3,10 +3,10 @@
3
3
  */
4
4
  import * as Hume from "../../../index";
5
5
  export interface AudioConfiguration {
6
- /** Number of audio channels. */
7
- channels: number;
8
6
  /** Encoding format of the audio input, such as `linear16`. */
9
7
  encoding: Hume.empathicVoice.Encoding;
8
+ /** Number of audio channels. */
9
+ channels: number;
10
10
  /** Audio sample rate. Number of samples per second in the audio input, measured in Hertz. */
11
11
  sampleRate: number;
12
12
  }
@@ -5,22 +5,22 @@
5
5
  * When provided, the input is audio.
6
6
  */
7
7
  export interface AudioInput {
8
+ /**
9
+ * The type of message sent through the socket; must be `audio_input` for our server to correctly identify and process it as an Audio Input message.
10
+ *
11
+ * This message is used for sending audio input data to EVI for processing and expression measurement. Audio data should be sent as a continuous stream, encoded in Base64.
12
+ */
13
+ type: "audio_input";
8
14
  /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
9
15
  customSessionId?: string;
10
16
  /**
11
17
  * Base64 encoded audio input to insert into the conversation.
12
18
  *
13
- * The content of an Audio Input message is treated as the users speech to EVI and must be streamed continuously. Pre-recorded audio files are not supported.
19
+ * The content of an Audio Input message is treated as the user's speech to EVI and must be streamed continuously. Pre-recorded audio files are not supported.
14
20
  *
15
21
  * For optimal transcription quality, the audio data should be transmitted in small chunks.
16
22
  *
17
23
  * Hume recommends streaming audio with a buffer window of 20 milliseconds (ms), or 100 milliseconds (ms) for web applications.
18
24
  */
19
25
  data: string;
20
- /**
21
- * The type of message sent through the socket; must be `audio_input` for our server to correctly identify and process it as an Audio Input message.
22
- *
23
- * This message is used for sending audio input data to EVI for processing and expression measurement. Audio data should be sent as a continuous stream, encoded in Base64.
24
- */
25
- type: "audio_input";
26
26
  }
@@ -5,14 +5,14 @@
5
5
  * The type of message sent through the socket; for an Audio Output message, this must be `audio_output`.
6
6
  */
7
7
  export interface AudioOutput {
8
+ /** The type of message sent through the socket; for an Audio Output message, this must be `audio_output`. */
9
+ type: "audio_output";
8
10
  /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
9
11
  customSessionId?: string;
10
- /** Base64 encoded audio output. This encoded audio is transmitted to the client, where it can be decoded and played back as part of the user interaction. */
11
- data: string;
12
12
  /** ID of the audio output. Allows the Audio Output message to be tracked and referenced. */
13
13
  id: string;
14
14
  /** Index of the chunk of audio relative to the whole audio segment. */
15
15
  index: number;
16
- /** The type of message sent through the socket; for an Audio Output message, this must be `audio_output`. */
17
- type: "audio_output";
16
+ /** Base64 encoded audio output. This encoded audio is transmitted to the client, where it can be decoded and played back as part of the user interaction. */
17
+ data: string;
18
18
  }
@@ -3,7 +3,7 @@
3
3
  */
4
4
  import * as Hume from "../../../index";
5
5
  export interface BuiltinToolConfig {
6
+ name: Hume.empathicVoice.BuiltInTool;
6
7
  /** Optional text passed to the supplemental LLM if the tool call fails. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation. */
7
8
  fallbackContent?: string;
8
- name: Hume.empathicVoice.BuiltInTool;
9
9
  }
@@ -3,10 +3,10 @@
3
3
  */
4
4
  import * as Hume from "../../../index";
5
5
  export interface ChatMessage {
6
- /** Transcript of the message. */
7
- content?: string;
8
6
  /** Role of who is providing the message. */
9
7
  role: Hume.empathicVoice.Role;
8
+ /** Transcript of the message. */
9
+ content?: string;
10
10
  /** Function call name and arguments. */
11
11
  toolCall?: Hume.empathicVoice.ToolCallMessage;
12
12
  /** Function call response from client. */
@@ -5,6 +5,14 @@
5
5
  * When provided, the output is a chat metadata message.
6
6
  */
7
7
  export interface ChatMetadata {
8
+ /**
9
+ * The type of message sent through the socket; for a Chat Metadata message, this must be `chat_metadata`.
10
+ *
11
+ * The Chat Metadata message is the first message you receive after establishing a connection with EVI and contains important identifiers for the current Chat session.
12
+ */
13
+ type: "chat_metadata";
14
+ /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
15
+ customSessionId?: string;
8
16
  /**
9
17
  * ID of the Chat Group.
10
18
  *
@@ -15,14 +23,6 @@ export interface ChatMetadata {
15
23
  chatGroupId: string;
16
24
  /** ID of the Chat session. Allows the Chat session to be tracked and referenced. */
17
25
  chatId: string;
18
- /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
19
- customSessionId?: string;
20
26
  /** ID of the initiating request. */
21
27
  requestId?: string;
22
- /**
23
- * The type of message sent through the socket; for a Chat Metadata message, this must be `chat_metadata`.
24
- *
25
- * The Chat Metadata message is the first message you receive after establishing a connection with EVI and contains important identifiers for the current Chat session.
26
- */
27
- type: "chat_metadata";
28
28
  }
@@ -3,24 +3,18 @@
3
3
  */
4
4
  import * as Hume from "../../../index";
5
5
  export interface Context {
6
- /**
7
- * The context to be injected into the conversation. Helps inform the LLM's response by providing relevant information about the ongoing conversation.
8
- *
9
- * This text will be appended to the end of user messages based on the chosen persistence level. For example, if you want to remind EVI of its role as a helpful weather assistant, the context you insert will be appended to the end of user messages as `{Context: You are a helpful weather assistant}`.
10
- */
11
- text: string;
12
6
  /**
13
7
  * The persistence level of the injected context. Specifies how long the injected context will remain active in the session.
14
8
  *
15
- * There are three possible context types:
16
- *
17
- * - **Persistent**: The context is appended to all user messages for the duration of the session.
18
- *
19
- * - **Temporary**: The context is appended only to the next user message.
9
+ * - **Temporary**: Context that is only applied to the following assistant response.
20
10
  *
21
- * - **Editable**: The original context is updated to reflect the new context.
22
- *
23
- * If the type is not specified, it will default to `temporary`.
11
+ * - **Persistent**: Context that is applied to all subsequent assistant responses for the remainder of the Chat.
24
12
  */
25
13
  type?: Hume.empathicVoice.ContextType;
14
+ /**
15
+ * The context to be injected into the conversation. Helps inform the LLM's response by providing relevant information about the ongoing conversation.
16
+ *
17
+ * This text will be appended to the end of [user_messages](/reference/speech-to-speech-evi/chat#receive.UserMessage.message.content) based on the chosen persistence level. For example, if you want to remind EVI of its role as a helpful weather assistant, the context you insert will be appended to the end of user messages as `{Context: You are a helpful weather assistant}`.
18
+ */
19
+ text: string;
26
20
  }
@@ -1,9 +1,8 @@
1
1
  /**
2
2
  * This file was auto-generated by Fern from our API Definition.
3
3
  */
4
- export type ContextType = "editable" | "persistent" | "temporary";
4
+ export type ContextType = "temporary" | "persistent";
5
5
  export declare const ContextType: {
6
- readonly Editable: "editable";
7
- readonly Persistent: "persistent";
8
6
  readonly Temporary: "temporary";
7
+ readonly Persistent: "persistent";
9
8
  };
@@ -5,7 +5,6 @@
5
5
  Object.defineProperty(exports, "__esModule", { value: true });
6
6
  exports.ContextType = void 0;
7
7
  exports.ContextType = {
8
- Editable: "editable",
9
- Persistent: "persistent",
10
8
  Temporary: "temporary",
9
+ Persistent: "persistent",
11
10
  };
@@ -1,7 +1,7 @@
1
1
  /**
2
2
  * This file was auto-generated by Fern from our API Definition.
3
3
  */
4
- export type LanguageModelType = "claude-3-7-sonnet-latest" | "claude-3-5-sonnet-latest" | "claude-3-5-haiku-latest" | "claude-3-5-sonnet-20240620" | "claude-3-opus-20240229" | "claude-3-sonnet-20240229" | "claude-3-haiku-20240307" | "claude-sonnet-4-20250514" | "us.anthropic.claude-3-5-haiku-20241022-v1:0" | "us.anthropic.claude-3-5-sonnet-20240620-v1:0" | "us.anthropic.claude-3-haiku-20240307-v1:0" | "gpt-oss-120b" | "qwen-3-235b-a22b" | "qwen-3-235b-a22b-instruct-2507" | "qwen-3-235b-a22b-thinking-2507" | "gemini-1.5-pro" | "gemini-1.5-flash" | "gemini-1.5-pro-002" | "gemini-1.5-flash-002" | "gemini-2.0-flash" | "gemini-2.5-flash" | "gemini-2.5-flash-preview-04-17" | "gpt-4-turbo" | "gpt-4-turbo-preview" | "gpt-3.5-turbo-0125" | "gpt-3.5-turbo" | "gpt-4o" | "gpt-4o-mini" | "gpt-4.1" | "gemma-7b-it" | "llama3-8b-8192" | "llama3-70b-8192" | "llama-3.1-70b-versatile" | "llama-3.3-70b-versatile" | "llama-3.1-8b-instant" | "moonshotai/kimi-k2-instruct" | "accounts/fireworks/models/mixtral-8x7b-instruct" | "accounts/fireworks/models/llama-v3p1-405b-instruct" | "accounts/fireworks/models/llama-v3p1-70b-instruct" | "accounts/fireworks/models/llama-v3p1-8b-instruct" | "sonar" | "sonar-pro" | "sambanova" | "DeepSeek-R1-Distill-Llama-70B" | "Llama-4-Maverick-17B-128E-Instruct" | "Qwen3-32B" | "ellm" | "custom-language-model" | "hume-evi-3-web-search";
4
+ export type LanguageModelType = "claude-3-7-sonnet-latest" | "claude-3-5-sonnet-latest" | "claude-3-5-haiku-latest" | "claude-3-5-sonnet-20240620" | "claude-3-opus-20240229" | "claude-3-sonnet-20240229" | "claude-3-haiku-20240307" | "claude-sonnet-4-20250514" | "us.anthropic.claude-3-5-haiku-20241022-v1:0" | "us.anthropic.claude-3-5-sonnet-20240620-v1:0" | "us.anthropic.claude-3-haiku-20240307-v1:0" | "gpt-oss-120b" | "qwen-3-235b-a22b" | "qwen-3-235b-a22b-instruct-2507" | "qwen-3-235b-a22b-thinking-2507" | "gemini-1.5-pro" | "gemini-1.5-flash" | "gemini-1.5-pro-002" | "gemini-1.5-flash-002" | "gemini-2.0-flash" | "gemini-2.5-flash" | "gemini-2.5-flash-preview-04-17" | "gpt-4-turbo" | "gpt-4-turbo-preview" | "gpt-3.5-turbo-0125" | "gpt-3.5-turbo" | "gpt-4o" | "gpt-4o-mini" | "gpt-4.1" | "gpt-5" | "gpt-5-mini" | "gpt-5-nano" | "gemma-7b-it" | "llama3-8b-8192" | "llama3-70b-8192" | "llama-3.1-70b-versatile" | "llama-3.3-70b-versatile" | "llama-3.1-8b-instant" | "moonshotai/kimi-k2-instruct" | "accounts/fireworks/models/mixtral-8x7b-instruct" | "accounts/fireworks/models/llama-v3p1-405b-instruct" | "accounts/fireworks/models/llama-v3p1-70b-instruct" | "accounts/fireworks/models/llama-v3p1-8b-instruct" | "sonar" | "sonar-pro" | "sambanova" | "DeepSeek-R1-Distill-Llama-70B" | "Llama-4-Maverick-17B-128E-Instruct" | "Qwen3-32B" | "ellm" | "custom-language-model" | "hume-evi-3-web-search";
5
5
  export declare const LanguageModelType: {
6
6
  readonly Claude37SonnetLatest: "claude-3-7-sonnet-latest";
7
7
  readonly Claude35SonnetLatest: "claude-3-5-sonnet-latest";
@@ -32,6 +32,9 @@ export declare const LanguageModelType: {
32
32
  readonly Gpt4O: "gpt-4o";
33
33
  readonly Gpt4OMini: "gpt-4o-mini";
34
34
  readonly Gpt41: "gpt-4.1";
35
+ readonly Gpt5: "gpt-5";
36
+ readonly Gpt5Mini: "gpt-5-mini";
37
+ readonly Gpt5Nano: "gpt-5-nano";
35
38
  readonly Gemma7BIt: "gemma-7b-it";
36
39
  readonly Llama38B8192: "llama3-8b-8192";
37
40
  readonly Llama370B8192: "llama3-70b-8192";
@@ -34,6 +34,9 @@ exports.LanguageModelType = {
34
34
  Gpt4O: "gpt-4o",
35
35
  Gpt4OMini: "gpt-4o-mini",
36
36
  Gpt41: "gpt-4.1",
37
+ Gpt5: "gpt-5",
38
+ Gpt5Mini: "gpt-5-mini",
39
+ Gpt5Nano: "gpt-5-nano",
37
40
  Gemma7BIt: "gemma-7b-it",
38
41
  Llama38B8192: "llama3-8b-8192",
39
42
  Llama370B8192: "llama3-70b-8192",
@@ -5,12 +5,12 @@
5
5
  * Pause responses from EVI. Chat history is still saved and sent after resuming.
6
6
  */
7
7
  export interface PauseAssistantMessage {
8
- /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
9
- customSessionId?: string;
10
8
  /**
11
9
  * The type of message sent through the socket; must be `pause_assistant_message` for our server to correctly identify and process it as a Pause Assistant message.
12
10
  *
13
- * Once this message is sent, EVI will not respond until a [Resume Assistant message](/reference/empathic-voice-interface-evi/chat/chat#send.ResumeAssistantMessage.type) is sent. When paused, EVI wont respond, but transcriptions of your audio inputs will still be recorded.
11
+ * Once this message is sent, EVI will not respond until a [Resume Assistant message](/reference/empathic-voice-interface-evi/chat/chat#send.ResumeAssistantMessage.type) is sent. When paused, EVI won't respond, but transcriptions of your audio inputs will still be recorded.
14
12
  */
15
13
  type: "pause_assistant_message";
14
+ /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
15
+ customSessionId?: string;
16
16
  }
@@ -13,7 +13,7 @@ export interface PostedLanguageModel {
13
13
  /**
14
14
  * The model temperature, with values between 0 to 1 (inclusive).
15
15
  *
16
- * Controls the randomness of the LLMs output, with values closer to 0 yielding focused, deterministic responses and values closer to 1 producing more creative, diverse responses.
16
+ * Controls the randomness of the LLM's output, with values closer to 0 yielding focused, deterministic responses and values closer to 1 producing more creative, diverse responses.
17
17
  */
18
18
  temperature?: number;
19
19
  }
@@ -7,12 +7,12 @@
7
7
  * Accepts a minimum value of 30 seconds and a maximum value of 1,800 seconds.
8
8
  */
9
9
  export interface PostedTimeoutSpecsInactivity {
10
+ /** Duration in seconds for the timeout (e.g. 600 seconds represents 10 minutes). */
11
+ durationSecs?: number;
10
12
  /**
11
13
  * Boolean indicating if this timeout is enabled.
12
14
  *
13
15
  * If set to false, EVI will not timeout due to a specified duration of user inactivity being reached. However, the conversation will eventually disconnect after 1,800 seconds (30 minutes), which is the maximum WebSocket duration limit for EVI.
14
16
  */
15
17
  enabled: boolean;
16
- /** Duration in seconds for the timeout (e.g. 600 seconds represents 10 minutes). */
17
- durationSecs?: number;
18
18
  }
@@ -7,12 +7,12 @@
7
7
  * Accepts a minimum value of 30 seconds and a maximum value of 1,800 seconds.
8
8
  */
9
9
  export interface PostedTimeoutSpecsMaxDuration {
10
+ /** Duration in seconds for the timeout (e.g. 600 seconds represents 10 minutes). */
11
+ durationSecs?: number;
10
12
  /**
11
13
  * Boolean indicating if this timeout is enabled.
12
14
  *
13
15
  * If set to false, EVI will not timeout due to a specified maximum duration being reached. However, the conversation will eventually disconnect after 1,800 seconds (30 minutes), which is the maximum WebSocket duration limit for EVI.
14
16
  */
15
17
  enabled: boolean;
16
- /** Duration in seconds for the timeout (e.g. 600 seconds represents 10 minutes). */
17
- durationSecs?: number;
18
18
  }
@@ -5,12 +5,12 @@
5
5
  * Resume responses from EVI. Chat history sent while paused will now be sent.
6
6
  */
7
7
  export interface ResumeAssistantMessage {
8
- /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
9
- customSessionId?: string;
10
8
  /**
11
9
  * The type of message sent through the socket; must be `resume_assistant_message` for our server to correctly identify and process it as a Resume Assistant message.
12
10
  *
13
11
  * Upon resuming, if any audio input was sent during the pause, EVI will retain context from all messages sent but only respond to the last user message. (e.g., If you ask EVI two questions while paused and then send a `resume_assistant_message`, EVI will respond to the second question and have added the first question to its conversation context.)
14
12
  */
15
13
  type: "resume_assistant_message";
14
+ /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
15
+ customSessionId?: string;
16
16
  }