hume 0.12.1 → 0.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (324) hide show
  1. package/.mock/definition/empathic-voice/__package__.yml +760 -711
  2. package/.mock/definition/empathic-voice/chat.yml +29 -23
  3. package/.mock/definition/empathic-voice/chatWebhooks.yml +3 -3
  4. package/.mock/definition/empathic-voice/configs.yml +10 -4
  5. package/.mock/definition/tts/__package__.yml +77 -125
  6. package/.mock/fern.config.json +1 -1
  7. package/api/resources/empathicVoice/resources/chatGroups/client/index.d.ts +1 -0
  8. package/api/resources/empathicVoice/resources/chats/client/index.d.ts +1 -0
  9. package/api/resources/empathicVoice/resources/configs/client/index.d.ts +1 -0
  10. package/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.d.ts +5 -1
  11. package/api/resources/empathicVoice/resources/prompts/client/index.d.ts +1 -0
  12. package/api/resources/empathicVoice/resources/tools/client/index.d.ts +1 -0
  13. package/api/resources/empathicVoice/types/AssistantEnd.d.ts +2 -2
  14. package/api/resources/empathicVoice/types/AssistantInput.d.ts +2 -2
  15. package/api/resources/empathicVoice/types/AssistantMessage.d.ts +8 -8
  16. package/api/resources/empathicVoice/types/AssistantProsody.d.ts +6 -6
  17. package/api/resources/empathicVoice/types/AudioConfiguration.d.ts +2 -2
  18. package/api/resources/empathicVoice/types/AudioInput.d.ts +6 -6
  19. package/api/resources/empathicVoice/types/AudioOutput.d.ts +4 -4
  20. package/api/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
  21. package/api/resources/empathicVoice/types/ChatMessage.d.ts +2 -2
  22. package/api/resources/empathicVoice/types/ChatMetadata.d.ts +8 -8
  23. package/api/resources/empathicVoice/types/Context.d.ts +8 -14
  24. package/api/resources/empathicVoice/types/ContextType.d.ts +2 -3
  25. package/api/resources/empathicVoice/types/ContextType.js +1 -2
  26. package/api/resources/empathicVoice/types/LanguageModelType.d.ts +20 -1
  27. package/api/resources/empathicVoice/types/LanguageModelType.js +19 -0
  28. package/api/resources/empathicVoice/types/ModelProviderEnum.d.ts +4 -1
  29. package/api/resources/empathicVoice/types/ModelProviderEnum.js +3 -0
  30. package/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +2 -2
  31. package/api/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +2 -2
  32. package/api/resources/empathicVoice/types/ReturnChatEvent.d.ts +12 -17
  33. package/api/resources/empathicVoice/types/ReturnChatEventRole.d.ts +0 -4
  34. package/api/resources/empathicVoice/types/ReturnChatEventType.d.ts +22 -18
  35. package/api/resources/empathicVoice/types/ReturnChatEventType.js +9 -4
  36. package/api/resources/empathicVoice/types/ReturnConfig.d.ts +18 -14
  37. package/api/resources/empathicVoice/types/ReturnPrompt.d.ts +2 -2
  38. package/api/resources/empathicVoice/types/SessionSettings.d.ts +29 -29
  39. package/api/resources/empathicVoice/types/Tool.d.ts +6 -6
  40. package/api/resources/empathicVoice/types/ToolCallMessage.d.ts +6 -6
  41. package/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +16 -16
  42. package/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +8 -8
  43. package/api/resources/empathicVoice/types/UserInput.d.ts +2 -2
  44. package/api/resources/empathicVoice/types/UserInterruption.d.ts +4 -4
  45. package/api/resources/empathicVoice/types/UserMessage.d.ts +14 -7
  46. package/api/resources/empathicVoice/types/VoiceProvider.d.ts +2 -1
  47. package/api/resources/empathicVoice/types/VoiceProvider.js +1 -0
  48. package/api/resources/empathicVoice/types/WebSocketError.d.ts +10 -10
  49. package/api/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +8 -8
  50. package/api/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +6 -6
  51. package/api/resources/empathicVoice/types/index.d.ts +16 -16
  52. package/api/resources/empathicVoice/types/index.js +16 -16
  53. package/api/resources/expressionMeasurement/resources/batch/client/Client.d.ts +2 -4
  54. package/api/resources/expressionMeasurement/resources/batch/client/Client.js +1 -1
  55. package/api/resources/expressionMeasurement/resources/batch/client/index.d.ts +1 -0
  56. package/api/resources/tts/client/Client.d.ts +21 -23
  57. package/api/resources/tts/client/Client.js +50 -58
  58. package/api/resources/tts/client/index.d.ts +1 -1
  59. package/api/resources/tts/client/index.js +0 -15
  60. package/api/resources/tts/resources/voices/client/index.d.ts +1 -0
  61. package/api/resources/tts/types/PostedTts.d.ts +8 -8
  62. package/api/resources/tts/types/PostedUtterance.d.ts +6 -6
  63. package/api/resources/tts/types/ReturnGeneration.d.ts +5 -5
  64. package/api/resources/tts/types/ReturnTts.d.ts +1 -1
  65. package/api/resources/tts/types/Snippet.d.ts +6 -6
  66. package/api/resources/tts/types/SnippetAudioChunk.d.ts +0 -16
  67. package/core/fetcher/BinaryResponse.d.ts +17 -0
  68. package/core/fetcher/BinaryResponse.js +14 -0
  69. package/core/fetcher/Fetcher.d.ts +1 -1
  70. package/core/fetcher/ResponseWithBody.d.ts +4 -0
  71. package/core/fetcher/ResponseWithBody.js +6 -0
  72. package/core/fetcher/getFetchFn.js +3 -3
  73. package/core/fetcher/getResponseBody.js +33 -32
  74. package/core/fetcher/index.d.ts +1 -0
  75. package/core/file.d.ts +1 -0
  76. package/core/form-data-utils/FormDataWrapper.d.ts +5 -52
  77. package/core/form-data-utils/FormDataWrapper.js +104 -124
  78. package/core/index.d.ts +1 -0
  79. package/core/index.js +1 -0
  80. package/dist/api/resources/empathicVoice/resources/chatGroups/client/index.d.ts +1 -0
  81. package/dist/api/resources/empathicVoice/resources/chats/client/index.d.ts +1 -0
  82. package/dist/api/resources/empathicVoice/resources/configs/client/index.d.ts +1 -0
  83. package/dist/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.d.ts +5 -1
  84. package/dist/api/resources/empathicVoice/resources/prompts/client/index.d.ts +1 -0
  85. package/dist/api/resources/empathicVoice/resources/tools/client/index.d.ts +1 -0
  86. package/dist/api/resources/empathicVoice/types/AssistantEnd.d.ts +2 -2
  87. package/dist/api/resources/empathicVoice/types/AssistantInput.d.ts +2 -2
  88. package/dist/api/resources/empathicVoice/types/AssistantMessage.d.ts +8 -8
  89. package/dist/api/resources/empathicVoice/types/AssistantProsody.d.ts +6 -6
  90. package/dist/api/resources/empathicVoice/types/AudioConfiguration.d.ts +2 -2
  91. package/dist/api/resources/empathicVoice/types/AudioInput.d.ts +6 -6
  92. package/dist/api/resources/empathicVoice/types/AudioOutput.d.ts +4 -4
  93. package/dist/api/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
  94. package/dist/api/resources/empathicVoice/types/ChatMessage.d.ts +2 -2
  95. package/dist/api/resources/empathicVoice/types/ChatMetadata.d.ts +8 -8
  96. package/dist/api/resources/empathicVoice/types/Context.d.ts +8 -14
  97. package/dist/api/resources/empathicVoice/types/ContextType.d.ts +2 -3
  98. package/dist/api/resources/empathicVoice/types/ContextType.js +1 -2
  99. package/dist/api/resources/empathicVoice/types/LanguageModelType.d.ts +20 -1
  100. package/dist/api/resources/empathicVoice/types/LanguageModelType.js +19 -0
  101. package/dist/api/resources/empathicVoice/types/ModelProviderEnum.d.ts +4 -1
  102. package/dist/api/resources/empathicVoice/types/ModelProviderEnum.js +3 -0
  103. package/dist/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +2 -2
  104. package/dist/api/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +2 -2
  105. package/dist/api/resources/empathicVoice/types/ReturnChatEvent.d.ts +12 -17
  106. package/dist/api/resources/empathicVoice/types/ReturnChatEventRole.d.ts +0 -4
  107. package/dist/api/resources/empathicVoice/types/ReturnChatEventType.d.ts +22 -18
  108. package/dist/api/resources/empathicVoice/types/ReturnChatEventType.js +9 -4
  109. package/dist/api/resources/empathicVoice/types/ReturnConfig.d.ts +18 -14
  110. package/dist/api/resources/empathicVoice/types/ReturnPrompt.d.ts +2 -2
  111. package/dist/api/resources/empathicVoice/types/SessionSettings.d.ts +29 -29
  112. package/dist/api/resources/empathicVoice/types/Tool.d.ts +6 -6
  113. package/dist/api/resources/empathicVoice/types/ToolCallMessage.d.ts +6 -6
  114. package/dist/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +16 -16
  115. package/dist/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +8 -8
  116. package/dist/api/resources/empathicVoice/types/UserInput.d.ts +2 -2
  117. package/dist/api/resources/empathicVoice/types/UserInterruption.d.ts +4 -4
  118. package/dist/api/resources/empathicVoice/types/UserMessage.d.ts +14 -7
  119. package/dist/api/resources/empathicVoice/types/VoiceProvider.d.ts +2 -1
  120. package/dist/api/resources/empathicVoice/types/VoiceProvider.js +1 -0
  121. package/dist/api/resources/empathicVoice/types/WebSocketError.d.ts +10 -10
  122. package/dist/api/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +8 -8
  123. package/dist/api/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +6 -6
  124. package/dist/api/resources/empathicVoice/types/index.d.ts +16 -16
  125. package/dist/api/resources/empathicVoice/types/index.js +16 -16
  126. package/dist/api/resources/expressionMeasurement/resources/batch/client/Client.d.ts +2 -4
  127. package/dist/api/resources/expressionMeasurement/resources/batch/client/Client.js +1 -1
  128. package/dist/api/resources/expressionMeasurement/resources/batch/client/index.d.ts +1 -0
  129. package/dist/api/resources/tts/client/Client.d.ts +21 -23
  130. package/dist/api/resources/tts/client/Client.js +50 -58
  131. package/dist/api/resources/tts/client/index.d.ts +1 -1
  132. package/dist/api/resources/tts/client/index.js +0 -15
  133. package/dist/api/resources/tts/resources/voices/client/index.d.ts +1 -0
  134. package/dist/api/resources/tts/types/PostedTts.d.ts +8 -8
  135. package/dist/api/resources/tts/types/PostedUtterance.d.ts +6 -6
  136. package/dist/api/resources/tts/types/ReturnGeneration.d.ts +5 -5
  137. package/dist/api/resources/tts/types/ReturnTts.d.ts +1 -1
  138. package/dist/api/resources/tts/types/Snippet.d.ts +6 -6
  139. package/dist/api/resources/tts/types/SnippetAudioChunk.d.ts +0 -16
  140. package/dist/core/fetcher/BinaryResponse.d.ts +17 -0
  141. package/dist/core/fetcher/BinaryResponse.js +14 -0
  142. package/dist/core/fetcher/Fetcher.d.ts +1 -1
  143. package/dist/core/fetcher/ResponseWithBody.d.ts +4 -0
  144. package/dist/core/fetcher/ResponseWithBody.js +6 -0
  145. package/dist/core/fetcher/getFetchFn.js +3 -3
  146. package/dist/core/fetcher/getResponseBody.js +33 -32
  147. package/dist/core/fetcher/index.d.ts +1 -0
  148. package/dist/core/file.d.ts +1 -0
  149. package/dist/core/form-data-utils/FormDataWrapper.d.ts +5 -52
  150. package/dist/core/form-data-utils/FormDataWrapper.js +104 -124
  151. package/dist/core/index.d.ts +1 -0
  152. package/dist/core/index.js +1 -0
  153. package/dist/serialization/resources/empathicVoice/types/AssistantEnd.d.ts +1 -1
  154. package/dist/serialization/resources/empathicVoice/types/AssistantEnd.js +1 -1
  155. package/dist/serialization/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
  156. package/dist/serialization/resources/empathicVoice/types/AssistantInput.js +1 -1
  157. package/dist/serialization/resources/empathicVoice/types/AssistantMessage.d.ts +2 -2
  158. package/dist/serialization/resources/empathicVoice/types/AssistantMessage.js +2 -2
  159. package/dist/serialization/resources/empathicVoice/types/AssistantProsody.d.ts +2 -2
  160. package/dist/serialization/resources/empathicVoice/types/AssistantProsody.js +2 -2
  161. package/dist/serialization/resources/empathicVoice/types/AudioConfiguration.d.ts +1 -1
  162. package/dist/serialization/resources/empathicVoice/types/AudioConfiguration.js +1 -1
  163. package/dist/serialization/resources/empathicVoice/types/AudioInput.d.ts +1 -1
  164. package/dist/serialization/resources/empathicVoice/types/AudioInput.js +1 -1
  165. package/dist/serialization/resources/empathicVoice/types/AudioOutput.d.ts +2 -2
  166. package/dist/serialization/resources/empathicVoice/types/AudioOutput.js +2 -2
  167. package/dist/serialization/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
  168. package/dist/serialization/resources/empathicVoice/types/BuiltinToolConfig.js +1 -1
  169. package/dist/serialization/resources/empathicVoice/types/ChatMessage.d.ts +1 -1
  170. package/dist/serialization/resources/empathicVoice/types/ChatMessage.js +1 -1
  171. package/dist/serialization/resources/empathicVoice/types/ChatMetadata.d.ts +2 -2
  172. package/dist/serialization/resources/empathicVoice/types/ChatMetadata.js +2 -2
  173. package/dist/serialization/resources/empathicVoice/types/Context.d.ts +1 -1
  174. package/dist/serialization/resources/empathicVoice/types/Context.js +1 -1
  175. package/dist/serialization/resources/empathicVoice/types/ContextType.d.ts +1 -1
  176. package/dist/serialization/resources/empathicVoice/types/ContextType.js +1 -1
  177. package/dist/serialization/resources/empathicVoice/types/LanguageModelType.d.ts +1 -1
  178. package/dist/serialization/resources/empathicVoice/types/LanguageModelType.js +19 -0
  179. package/dist/serialization/resources/empathicVoice/types/ModelProviderEnum.d.ts +1 -1
  180. package/dist/serialization/resources/empathicVoice/types/ModelProviderEnum.js +3 -0
  181. package/dist/serialization/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
  182. package/dist/serialization/resources/empathicVoice/types/PauseAssistantMessage.js +1 -1
  183. package/dist/serialization/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +1 -1
  184. package/dist/serialization/resources/empathicVoice/types/ResumeAssistantMessage.js +1 -1
  185. package/dist/serialization/resources/empathicVoice/types/ReturnChatEventType.d.ts +1 -1
  186. package/dist/serialization/resources/empathicVoice/types/ReturnChatEventType.js +9 -4
  187. package/dist/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +9 -9
  188. package/dist/serialization/resources/empathicVoice/types/ReturnConfig.js +9 -9
  189. package/dist/serialization/resources/empathicVoice/types/ReturnPrompt.d.ts +1 -1
  190. package/dist/serialization/resources/empathicVoice/types/ReturnPrompt.js +1 -1
  191. package/dist/serialization/resources/empathicVoice/types/SessionSettings.d.ts +8 -8
  192. package/dist/serialization/resources/empathicVoice/types/SessionSettings.js +8 -8
  193. package/dist/serialization/resources/empathicVoice/types/Tool.d.ts +3 -3
  194. package/dist/serialization/resources/empathicVoice/types/Tool.js +3 -3
  195. package/dist/serialization/resources/empathicVoice/types/ToolCallMessage.d.ts +3 -3
  196. package/dist/serialization/resources/empathicVoice/types/ToolCallMessage.js +3 -3
  197. package/dist/serialization/resources/empathicVoice/types/ToolErrorMessage.d.ts +6 -6
  198. package/dist/serialization/resources/empathicVoice/types/ToolErrorMessage.js +6 -6
  199. package/dist/serialization/resources/empathicVoice/types/ToolResponseMessage.d.ts +2 -2
  200. package/dist/serialization/resources/empathicVoice/types/ToolResponseMessage.js +2 -2
  201. package/dist/serialization/resources/empathicVoice/types/UserInput.d.ts +1 -1
  202. package/dist/serialization/resources/empathicVoice/types/UserInput.js +1 -1
  203. package/dist/serialization/resources/empathicVoice/types/UserInterruption.d.ts +1 -1
  204. package/dist/serialization/resources/empathicVoice/types/UserInterruption.js +1 -1
  205. package/dist/serialization/resources/empathicVoice/types/UserMessage.d.ts +3 -3
  206. package/dist/serialization/resources/empathicVoice/types/UserMessage.js +3 -3
  207. package/dist/serialization/resources/empathicVoice/types/VoiceProvider.d.ts +1 -1
  208. package/dist/serialization/resources/empathicVoice/types/VoiceProvider.js +1 -1
  209. package/dist/serialization/resources/empathicVoice/types/WebSocketError.d.ts +3 -3
  210. package/dist/serialization/resources/empathicVoice/types/WebSocketError.js +3 -3
  211. package/dist/serialization/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +4 -4
  212. package/dist/serialization/resources/empathicVoice/types/WebhookEventChatEnded.js +4 -4
  213. package/dist/serialization/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +3 -3
  214. package/dist/serialization/resources/empathicVoice/types/WebhookEventChatStarted.js +3 -3
  215. package/dist/serialization/resources/empathicVoice/types/index.d.ts +16 -16
  216. package/dist/serialization/resources/empathicVoice/types/index.js +16 -16
  217. package/dist/serialization/resources/tts/types/PostedTts.d.ts +3 -3
  218. package/dist/serialization/resources/tts/types/PostedTts.js +3 -3
  219. package/dist/serialization/resources/tts/types/PostedUtterance.d.ts +2 -2
  220. package/dist/serialization/resources/tts/types/PostedUtterance.js +2 -2
  221. package/dist/serialization/resources/tts/types/ReturnGeneration.d.ts +3 -3
  222. package/dist/serialization/resources/tts/types/ReturnGeneration.js +3 -3
  223. package/dist/serialization/resources/tts/types/ReturnTts.d.ts +1 -1
  224. package/dist/serialization/resources/tts/types/ReturnTts.js +1 -1
  225. package/dist/serialization/resources/tts/types/Snippet.d.ts +3 -3
  226. package/dist/serialization/resources/tts/types/Snippet.js +3 -3
  227. package/dist/serialization/resources/tts/types/SnippetAudioChunk.d.ts +0 -8
  228. package/dist/serialization/resources/tts/types/SnippetAudioChunk.js +1 -10
  229. package/dist/version.d.ts +1 -1
  230. package/dist/version.js +1 -1
  231. package/jest.browser.config.mjs +10 -0
  232. package/jest.config.mjs +1 -0
  233. package/package.json +6 -7
  234. package/reference.md +25 -27
  235. package/serialization/resources/empathicVoice/types/AssistantEnd.d.ts +1 -1
  236. package/serialization/resources/empathicVoice/types/AssistantEnd.js +1 -1
  237. package/serialization/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
  238. package/serialization/resources/empathicVoice/types/AssistantInput.js +1 -1
  239. package/serialization/resources/empathicVoice/types/AssistantMessage.d.ts +2 -2
  240. package/serialization/resources/empathicVoice/types/AssistantMessage.js +2 -2
  241. package/serialization/resources/empathicVoice/types/AssistantProsody.d.ts +2 -2
  242. package/serialization/resources/empathicVoice/types/AssistantProsody.js +2 -2
  243. package/serialization/resources/empathicVoice/types/AudioConfiguration.d.ts +1 -1
  244. package/serialization/resources/empathicVoice/types/AudioConfiguration.js +1 -1
  245. package/serialization/resources/empathicVoice/types/AudioInput.d.ts +1 -1
  246. package/serialization/resources/empathicVoice/types/AudioInput.js +1 -1
  247. package/serialization/resources/empathicVoice/types/AudioOutput.d.ts +2 -2
  248. package/serialization/resources/empathicVoice/types/AudioOutput.js +2 -2
  249. package/serialization/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
  250. package/serialization/resources/empathicVoice/types/BuiltinToolConfig.js +1 -1
  251. package/serialization/resources/empathicVoice/types/ChatMessage.d.ts +1 -1
  252. package/serialization/resources/empathicVoice/types/ChatMessage.js +1 -1
  253. package/serialization/resources/empathicVoice/types/ChatMetadata.d.ts +2 -2
  254. package/serialization/resources/empathicVoice/types/ChatMetadata.js +2 -2
  255. package/serialization/resources/empathicVoice/types/Context.d.ts +1 -1
  256. package/serialization/resources/empathicVoice/types/Context.js +1 -1
  257. package/serialization/resources/empathicVoice/types/ContextType.d.ts +1 -1
  258. package/serialization/resources/empathicVoice/types/ContextType.js +1 -1
  259. package/serialization/resources/empathicVoice/types/LanguageModelType.d.ts +1 -1
  260. package/serialization/resources/empathicVoice/types/LanguageModelType.js +19 -0
  261. package/serialization/resources/empathicVoice/types/ModelProviderEnum.d.ts +1 -1
  262. package/serialization/resources/empathicVoice/types/ModelProviderEnum.js +3 -0
  263. package/serialization/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
  264. package/serialization/resources/empathicVoice/types/PauseAssistantMessage.js +1 -1
  265. package/serialization/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +1 -1
  266. package/serialization/resources/empathicVoice/types/ResumeAssistantMessage.js +1 -1
  267. package/serialization/resources/empathicVoice/types/ReturnChatEventType.d.ts +1 -1
  268. package/serialization/resources/empathicVoice/types/ReturnChatEventType.js +9 -4
  269. package/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +9 -9
  270. package/serialization/resources/empathicVoice/types/ReturnConfig.js +9 -9
  271. package/serialization/resources/empathicVoice/types/ReturnPrompt.d.ts +1 -1
  272. package/serialization/resources/empathicVoice/types/ReturnPrompt.js +1 -1
  273. package/serialization/resources/empathicVoice/types/SessionSettings.d.ts +8 -8
  274. package/serialization/resources/empathicVoice/types/SessionSettings.js +8 -8
  275. package/serialization/resources/empathicVoice/types/Tool.d.ts +3 -3
  276. package/serialization/resources/empathicVoice/types/Tool.js +3 -3
  277. package/serialization/resources/empathicVoice/types/ToolCallMessage.d.ts +3 -3
  278. package/serialization/resources/empathicVoice/types/ToolCallMessage.js +3 -3
  279. package/serialization/resources/empathicVoice/types/ToolErrorMessage.d.ts +6 -6
  280. package/serialization/resources/empathicVoice/types/ToolErrorMessage.js +6 -6
  281. package/serialization/resources/empathicVoice/types/ToolResponseMessage.d.ts +2 -2
  282. package/serialization/resources/empathicVoice/types/ToolResponseMessage.js +2 -2
  283. package/serialization/resources/empathicVoice/types/UserInput.d.ts +1 -1
  284. package/serialization/resources/empathicVoice/types/UserInput.js +1 -1
  285. package/serialization/resources/empathicVoice/types/UserInterruption.d.ts +1 -1
  286. package/serialization/resources/empathicVoice/types/UserInterruption.js +1 -1
  287. package/serialization/resources/empathicVoice/types/UserMessage.d.ts +3 -3
  288. package/serialization/resources/empathicVoice/types/UserMessage.js +3 -3
  289. package/serialization/resources/empathicVoice/types/VoiceProvider.d.ts +1 -1
  290. package/serialization/resources/empathicVoice/types/VoiceProvider.js +1 -1
  291. package/serialization/resources/empathicVoice/types/WebSocketError.d.ts +3 -3
  292. package/serialization/resources/empathicVoice/types/WebSocketError.js +3 -3
  293. package/serialization/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +4 -4
  294. package/serialization/resources/empathicVoice/types/WebhookEventChatEnded.js +4 -4
  295. package/serialization/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +3 -3
  296. package/serialization/resources/empathicVoice/types/WebhookEventChatStarted.js +3 -3
  297. package/serialization/resources/empathicVoice/types/index.d.ts +16 -16
  298. package/serialization/resources/empathicVoice/types/index.js +16 -16
  299. package/serialization/resources/tts/types/PostedTts.d.ts +3 -3
  300. package/serialization/resources/tts/types/PostedTts.js +3 -3
  301. package/serialization/resources/tts/types/PostedUtterance.d.ts +2 -2
  302. package/serialization/resources/tts/types/PostedUtterance.js +2 -2
  303. package/serialization/resources/tts/types/ReturnGeneration.d.ts +3 -3
  304. package/serialization/resources/tts/types/ReturnGeneration.js +3 -3
  305. package/serialization/resources/tts/types/ReturnTts.d.ts +1 -1
  306. package/serialization/resources/tts/types/ReturnTts.js +1 -1
  307. package/serialization/resources/tts/types/Snippet.d.ts +3 -3
  308. package/serialization/resources/tts/types/Snippet.js +3 -3
  309. package/serialization/resources/tts/types/SnippetAudioChunk.d.ts +0 -8
  310. package/serialization/resources/tts/types/SnippetAudioChunk.js +1 -10
  311. package/version.d.ts +1 -1
  312. package/version.js +1 -1
  313. package/api/resources/tts/client/requests/SynthesizeJsonRequest.d.ts +0 -36
  314. package/api/resources/tts/client/requests/SynthesizeJsonRequest.js +0 -5
  315. package/api/resources/tts/client/requests/index.d.ts +0 -1
  316. package/core/form-data-utils/toReadableStream.d.ts +0 -1
  317. package/core/form-data-utils/toReadableStream.js +0 -50
  318. package/dist/api/resources/tts/client/requests/SynthesizeJsonRequest.d.ts +0 -36
  319. package/dist/api/resources/tts/client/requests/SynthesizeJsonRequest.js +0 -5
  320. package/dist/api/resources/tts/client/requests/index.d.ts +0 -1
  321. package/dist/core/form-data-utils/toReadableStream.d.ts +0 -1
  322. package/dist/core/form-data-utils/toReadableStream.js +0 -50
  323. /package/{api/resources/tts/client/requests/index.js → core/file.js} +0 -0
  324. /package/dist/{api/resources/tts/client/requests/index.js → core/file.js} +0 -0
@@ -4,21 +4,6 @@ channel:
4
4
  auth: false
5
5
  docs: Chat with Empathic Voice Interface (EVI)
6
6
  query-parameters:
7
- access_token:
8
- type: optional<string>
9
- default: ''
10
- docs: >-
11
- Access token used for authenticating the client. If not provided, an
12
- `api_key` must be provided to authenticate.
13
-
14
-
15
- The access token is generated using both an API key and a Secret key,
16
- which provides an additional layer of security compared to using just an
17
- API key.
18
-
19
-
20
- For more details, refer to the [Authentication Strategies
21
- Guide](/docs/introduction/api-key#authentication-strategies).
22
7
  config_id:
23
8
  type: optional<string>
24
9
  docs: >-
@@ -47,13 +32,6 @@ channel:
47
32
 
48
33
  Include this parameter to apply a specific version of an EVI
49
34
  configuration. If omitted, the latest version will be applied.
50
- event_limit:
51
- type: optional<integer>
52
- docs: >-
53
- The maximum number of chat events to return from chat history. By
54
- default, the system returns up to 300 events (100 events per page × 3
55
- pages). Set this parameter to a smaller value to limit the number of
56
- events returned.
57
35
  resumed_chat_group_id:
58
36
  type: optional<string>
59
37
  docs: >-
@@ -98,6 +76,12 @@ channel:
98
76
  Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs
99
77
  of all Chat Groups associated with an API key. This endpoint returns a
100
78
  list of all available chat groups.
79
+ voice_id:
80
+ type: optional<string>
81
+ docs: >-
82
+ The name or ID of the voice from the `Voice Library` to be used as the
83
+ speaker for this EVI session. This will override the speaker set in the
84
+ selected configuration.
101
85
  verbose_transcription:
102
86
  type: optional<boolean>
103
87
  default: false
@@ -109,6 +93,28 @@ channel:
109
93
  field on a
110
94
  [UserMessage](/reference/empathic-voice-interface-evi/chat/chat#receive.UserMessage.type)
111
95
  denotes whether the message is "interim" or "final."
96
+ event_limit:
97
+ type: optional<integer>
98
+ docs: >-
99
+ The maximum number of chat events to return from chat history. By
100
+ default, the system returns up to 300 events (100 events per page × 3
101
+ pages). Set this parameter to a smaller value to limit the number of
102
+ events returned.
103
+ access_token:
104
+ type: optional<string>
105
+ default: ''
106
+ docs: >-
107
+ Access token used for authenticating the client. If not provided, an
108
+ `api_key` must be provided to authenticate.
109
+
110
+
111
+ The access token is generated using both an API key and a Secret key,
112
+ which provides an additional layer of security compared to using just an
113
+ API key.
114
+
115
+
116
+ For more details, refer to the [Authentication Strategies
117
+ Guide](/docs/introduction/api-key#authentication-strategies).
112
118
  api_key:
113
119
  type: optional<string>
114
120
  default: ''
@@ -130,8 +136,8 @@ channel:
130
136
  - messages:
131
137
  - type: publish
132
138
  body:
133
- data: data
134
139
  type: audio_input
140
+ data: data
135
141
  - type: subscribe
136
142
  body:
137
143
  type: assistant_end
@@ -12,10 +12,10 @@ webhooks:
12
12
  chat_group_id: 9fc18597-3567-42d5-94d6-935bde84bf2f
13
13
  chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0
14
14
  config_id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3
15
+ event_name: chat_ended
16
+ end_time: 1716244958546
15
17
  duration_seconds: 180
16
18
  end_reason: USER_ENDED
17
- end_time: 1716244958546
18
- event_name: chat_ended
19
19
  docs: Sent when an EVI chat ends.
20
20
  chatStarted:
21
21
  audiences: []
@@ -28,7 +28,7 @@ webhooks:
28
28
  chat_group_id: 9fc18597-3567-42d5-94d6-935bde84bf2f
29
29
  chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0
30
30
  config_id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3
31
- chat_start_type: new_chat_group
32
31
  event_name: chat_started
33
32
  start_time: 1716244940648
33
+ chat_start_type: new_chat_group
34
34
  docs: Sent when an EVI chat is started.
@@ -141,10 +141,16 @@ service:
141
141
  evi_version:
142
142
  type: string
143
143
  docs: >-
144
- Specifies the EVI version to use. Use `"1"` for version 1, or
145
- `"2"` for the latest enhanced version. For a detailed comparison
146
- of the two versions, refer to our
147
- [guide](/docs/speech-to-speech-evi/configuration/evi-version).
144
+ Specifies the EVI version to use. See our [EVI Version
145
+ Guide](/docs/speech-to-speech-evi/configuration/evi-version) for
146
+ differences between versions.
147
+
148
+
149
+ **We're officially sunsetting EVI versions 1 and 2 on August 30,
150
+ 2025**. To keep things running smoothly, be sure to [migrate to
151
+ EVI
152
+ 3](/docs/speech-to-speech-evi/configuration/evi-version#migrating-to-evi-3)
153
+ before then.
148
154
  name:
149
155
  type: string
150
156
  docs: Name applied to all versions of a particular Config.
@@ -34,23 +34,6 @@ service:
34
34
  request:
35
35
  body:
36
36
  type: PostedTts
37
- query-parameters:
38
- access_token:
39
- type: optional<string>
40
- default: ''
41
- docs: >-
42
- Access token used for authenticating the client. If not provided,
43
- an `api_key` must be provided to authenticate.
44
-
45
-
46
- The access token is generated using both an API key and a Secret
47
- key, which provides an additional layer of security compared to
48
- using just an API key.
49
-
50
-
51
- For more details, refer to the [Authentication Strategies
52
- Guide](/docs/introduction/api-key#authentication-strategies).
53
- name: SynthesizeJsonRequest
54
37
  content-type: application/json
55
38
  response:
56
39
  docs: Successful Response
@@ -137,26 +120,30 @@ service:
137
120
  format:
138
121
  type: mp3
139
122
  num_generations: 1
140
- synthesize-file-streaming:
141
- path: /v0/tts/stream/file
123
+ synthesize-json-streaming:
124
+ path: /v0/tts/stream/json
142
125
  method: POST
143
126
  auth: true
144
127
  docs: >-
145
128
  Streams synthesized speech using the specified voice. If no voice is
146
129
  provided, a novel voice will be generated dynamically. Optionally,
147
130
  additional context can be included to influence the speech's style and
148
- prosody.
131
+ prosody.
132
+
133
+
134
+ The response is a stream of JSON objects including audio encoded in
135
+ base64.
149
136
  source:
150
137
  openapi: tts-openapi.yml
151
- display-name: Text-to-speech (Streamed File)
138
+ display-name: Text-to-speech (Streamed JSON)
152
139
  request:
153
140
  body:
154
141
  type: PostedTts
155
142
  content-type: application/json
156
- response:
157
- docs: OK
158
- type: file
159
- status-code: 200
143
+ response-stream:
144
+ docs: Successful Response
145
+ type: SnippetAudioChunk
146
+ format: json
160
147
  errors:
161
148
  - UnprocessableEntityError
162
149
  examples:
@@ -168,30 +155,26 @@ service:
168
155
  voice:
169
156
  name: Male English Actor
170
157
  provider: HUME_AI
171
- synthesize-json-streaming:
172
- path: /v0/tts/stream/json
158
+ synthesize-file-streaming:
159
+ path: /v0/tts/stream/file
173
160
  method: POST
174
161
  auth: true
175
162
  docs: >-
176
163
  Streams synthesized speech using the specified voice. If no voice is
177
164
  provided, a novel voice will be generated dynamically. Optionally,
178
165
  additional context can be included to influence the speech's style and
179
- prosody.
180
-
181
-
182
- The response is a stream of JSON objects including audio encoded in
183
- base64.
166
+ prosody.
184
167
  source:
185
168
  openapi: tts-openapi.yml
186
- display-name: Text-to-speech (Streamed JSON)
169
+ display-name: Text-to-speech (Streamed File)
187
170
  request:
188
171
  body:
189
172
  type: PostedTts
190
173
  content-type: application/json
191
- response-stream:
192
- docs: Successful Response
193
- type: SnippetAudioChunk
194
- format: json
174
+ response:
175
+ docs: OK
176
+ type: file
177
+ status-code: 200
195
178
  errors:
196
179
  - UnprocessableEntityError
197
180
  examples:
@@ -246,25 +229,25 @@ types:
246
229
  openapi: tts-openapi.yml
247
230
  ReturnGeneration:
248
231
  properties:
249
- audio:
232
+ generation_id:
250
233
  type: string
251
234
  docs: >-
252
- The generated audio output in the requested format, encoded as a
253
- base64 string.
235
+ A unique ID associated with this TTS generation that can be used as
236
+ context for generating consistent speech style and prosody across
237
+ multiple requests.
254
238
  duration:
255
239
  type: double
256
240
  docs: Duration of the generated audio in seconds.
257
- encoding:
258
- type: AudioEncoding
259
241
  file_size:
260
242
  type: integer
261
243
  docs: Size of the generated audio in bytes.
262
- generation_id:
244
+ encoding:
245
+ type: AudioEncoding
246
+ audio:
263
247
  type: string
264
248
  docs: >-
265
- A unique ID associated with this TTS generation that can be used as
266
- context for generating consistent speech style and prosody across
267
- multiple requests.
249
+ The generated audio output in the requested format, encoded as a
250
+ base64 string.
268
251
  snippets:
269
252
  docs: >-
270
253
  A list of snippet groups where each group corresponds to an utterance
@@ -317,9 +300,18 @@ types:
317
300
  Utterances to use as context for generating consistent speech style
318
301
  and prosody across multiple requests. These will not be converted to
319
302
  speech output.
320
- format:
321
- type: optional<Format>
322
- docs: Specifies the output audio file format.
303
+ utterances:
304
+ docs: >-
305
+ A list of **Utterances** to be converted to speech output.
306
+
307
+
308
+ An **Utterance** is a unit of input for
309
+ [Octave](/docs/text-to-speech-tts/overview), and includes input
310
+ `text`, an optional `description` to serve as the prompt for how the
311
+ speech should be delivered, an optional `voice` specification, and
312
+ additional controls to guide delivery for `speed` and
313
+ `trailing_silence`.
314
+ type: list<PostedUtterance>
323
315
  num_generations:
324
316
  type: optional<integer>
325
317
  docs: Number of generations of the audio to produce.
@@ -327,6 +319,9 @@ types:
327
319
  validation:
328
320
  min: 1
329
321
  max: 5
322
+ format:
323
+ type: optional<Format>
324
+ docs: Specifies the output audio file format.
330
325
  split_utterances:
331
326
  type: optional<boolean>
332
327
  docs: >-
@@ -355,18 +350,6 @@ types:
355
350
  if disabled, each chunk's audio will be its own audio file, each with
356
351
  its own headers (if applicable).
357
352
  default: false
358
- utterances:
359
- docs: >-
360
- A list of **Utterances** to be converted to speech output.
361
-
362
-
363
- An **Utterance** is a unit of input for
364
- [Octave](/docs/text-to-speech-tts/overview), and includes input
365
- `text`, an optional `description` to serve as the prompt for how the
366
- speech should be delivered, an optional `voice` specification, and
367
- additional controls to guide delivery for `speed` and
368
- `trailing_silence`.
369
- type: list<PostedUtterance>
370
353
  instant_mode:
371
354
  type: optional<boolean>
372
355
  docs: >-
@@ -393,14 +376,14 @@ types:
393
376
  openapi: tts-openapi.yml
394
377
  ReturnTts:
395
378
  properties:
396
- generations:
397
- type: list<ReturnGeneration>
398
379
  request_id:
399
380
  type: optional<string>
400
381
  docs: >-
401
382
  A unique ID associated with this request for tracking and
402
383
  troubleshooting. Use this ID when contacting [support](/support) for
403
384
  troubleshooting assistance.
385
+ generations:
386
+ type: list<ReturnGeneration>
404
387
  source:
405
388
  openapi: tts-openapi.yml
406
389
  ReturnVoice:
@@ -428,69 +411,41 @@ types:
428
411
  openapi: tts-openapi.yml
429
412
  Snippet:
430
413
  properties:
431
- audio:
432
- type: string
433
- docs: >-
434
- The segmented audio output in the requested format, encoded as a
435
- base64 string.
436
- generation_id:
437
- type: string
438
- docs: The generation ID this snippet corresponds to.
439
414
  id:
440
415
  type: string
441
416
  docs: A unique ID associated with this **Snippet**.
442
417
  text:
443
418
  type: string
444
419
  docs: The text for this **Snippet**.
420
+ generation_id:
421
+ type: string
422
+ docs: The generation ID this snippet corresponds to.
423
+ utterance_index:
424
+ type: optional<integer>
425
+ docs: The index of the utterance in the request this snippet corresponds to.
445
426
  transcribed_text:
446
427
  type: optional<string>
447
428
  docs: >-
448
429
  The transcribed text of the generated audio. It is only present if
449
430
  `instant_mode` is set to `false`.
450
- utterance_index:
451
- type: optional<integer>
452
- docs: The index of the utterance in the request this snippet corresponds to.
453
- source:
454
- openapi: tts-openapi.yml
455
- SnippetAudioChunk:
456
- properties:
457
431
  audio:
458
432
  type: string
459
- docs: The generated audio output chunk in the requested format.
460
- chunk_index:
461
- type: integer
462
- docs: The index of the audio chunk in the snippet.
463
- generation_id:
464
- type: string
465
- docs: >-
466
- The generation ID of the parent snippet that this chunk corresponds
467
- to.
468
- is_last_chunk:
469
- type: boolean
470
433
  docs: >-
471
- Whether or not this is the last chunk streamed back from the decoder
472
- for one input snippet.
473
- snippet_id:
474
- type: string
475
- docs: The ID of the parent snippet that this chunk corresponds to.
476
- text:
477
- type: string
478
- docs: The text of the parent snippet that this chunk corresponds to.
479
- transcribed_text:
480
- type: optional<string>
481
- docs: >-
482
- The transcribed text of the generated audio of the parent snippet that
483
- this chunk corresponds to. It is only present if `instant_mode` is set
484
- to `false`.
485
- utterance_index:
486
- type: optional<integer>
487
- docs: >-
488
- The index of the utterance in the request that the parent snippet of
489
- this chunk corresponds to.
434
+ The segmented audio output in the requested format, encoded as a
435
+ base64 string.
436
+ source:
437
+ openapi: tts-openapi.yml
438
+ SnippetAudioChunk:
439
+ properties: {}
490
440
  source:
491
441
  openapi: tts-openapi.yml
492
442
  PostedUtterance:
493
443
  properties:
444
+ text:
445
+ type: string
446
+ docs: The input text to be synthesized into speech.
447
+ validation:
448
+ maxLength: 5000
494
449
  description:
495
450
  type: optional<string>
496
451
  docs: >-
@@ -512,33 +467,30 @@ types:
512
467
  guide](/docs/text-to-speech-tts/prompting) for design tips.
513
468
  validation:
514
469
  maxLength: 1000
470
+ voice:
471
+ type: optional<PostedUtteranceVoice>
472
+ docs: >-
473
+ The `name` or `id` associated with a **Voice** from the **Voice
474
+ Library** to be used as the speaker for this and all subsequent
475
+ `utterances`, until the `voice` field is updated again.
476
+
477
+ See our [voices guide](/docs/text-to-speech-tts/voices) for more details on generating and specifying **Voices**.
515
478
  speed:
516
479
  type: optional<double>
517
- docs: Speed multiplier for the synthesized speech.
480
+ docs: >-
481
+ Speed multiplier for the synthesized speech. Extreme values below 0.75
482
+ and above 1.5 may sometimes cause instability to the generated output.
518
483
  default: 1
519
484
  validation:
520
- min: 0.25
521
- max: 3
522
- text:
523
- type: string
524
- docs: The input text to be synthesized into speech.
525
- validation:
526
- maxLength: 5000
485
+ min: 0.5
486
+ max: 2
527
487
  trailing_silence:
528
488
  type: optional<double>
529
489
  docs: Duration of trailing silence (in seconds) to add to this utterance
530
- default: 0.35
490
+ default: 0
531
491
  validation:
532
492
  min: 0
533
493
  max: 5
534
- voice:
535
- type: optional<PostedUtteranceVoice>
536
- docs: >-
537
- The `name` or `id` associated with a **Voice** from the **Voice
538
- Library** to be used as the speaker for this and all subsequent
539
- `utterances`, until the `voice` field is updated again.
540
-
541
- See our [voices guide](/docs/text-to-speech-tts/voices) for more details on generating and specifying **Voices**.
542
494
  source:
543
495
  openapi: tts-openapi.yml
544
496
  ValidationErrorLocItem:
@@ -1,4 +1,4 @@
1
1
  {
2
2
  "organization" : "hume",
3
- "version" : "0.64.10"
3
+ "version" : "0.65.42"
4
4
  }
@@ -1 +1,2 @@
1
+ export {};
1
2
  export * from "./requests";
@@ -1 +1,2 @@
1
+ export {};
1
2
  export * from "./requests";
@@ -1 +1,2 @@
1
+ export {};
1
2
  export * from "./requests";
@@ -37,7 +37,11 @@ import * as Hume from "../../../../../../index";
37
37
  * }
38
38
  */
39
39
  export interface PostedConfig {
40
- /** Specifies the EVI version to use. Use `"1"` for version 1, or `"2"` for the latest enhanced version. For a detailed comparison of the two versions, refer to our [guide](/docs/speech-to-speech-evi/configuration/evi-version). */
40
+ /**
41
+ * Specifies the EVI version to use. See our [EVI Version Guide](/docs/speech-to-speech-evi/configuration/evi-version) for differences between versions.
42
+ *
43
+ * **We're officially sunsetting EVI versions 1 and 2 on August 30, 2025**. To keep things running smoothly, be sure to [migrate to EVI 3](/docs/speech-to-speech-evi/configuration/evi-version#migrating-to-evi-3) before then.
44
+ */
41
45
  eviVersion: string;
42
46
  /** Name applied to all versions of a particular Config. */
43
47
  name: string;
@@ -1 +1,2 @@
1
+ export {};
1
2
  export * from "./requests";
@@ -1 +1,2 @@
1
+ export {};
1
2
  export * from "./requests";
@@ -5,12 +5,12 @@
5
5
  * When provided, the output is an assistant end message.
6
6
  */
7
7
  export interface AssistantEnd {
8
- /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
9
- customSessionId?: string;
10
8
  /**
11
9
  * The type of message sent through the socket; for an Assistant End message, this must be `assistant_end`.
12
10
  *
13
11
  * This message indicates the conclusion of the assistant’s response, signaling that the assistant has finished speaking for the current conversational turn.
14
12
  */
15
13
  type: "assistant_end";
14
+ /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
15
+ customSessionId?: string;
16
16
  }
@@ -5,6 +5,8 @@
5
5
  * When provided, the input is spoken by EVI.
6
6
  */
7
7
  export interface AssistantInput {
8
+ /** The type of message sent through the socket; must be `assistant_input` for our server to correctly identify and process it as an Assistant Input message. */
9
+ type: "assistant_input";
8
10
  /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
9
11
  customSessionId?: string;
10
12
  /**
@@ -13,6 +15,4 @@ export interface AssistantInput {
13
15
  * EVI uses this text to generate spoken audio using our proprietary expressive text-to-speech model. Our model adds appropriate emotional inflections and tones to the text based on the user’s expressions and the context of the conversation. The synthesized audio is streamed back to the user as an [Assistant Message](/reference/empathic-voice-interface-evi/chat/chat#receive.AssistantMessage.type).
14
16
  */
15
17
  text: string;
16
- /** The type of message sent through the socket; must be `assistant_input` for our server to correctly identify and process it as an Assistant Input message. */
17
- type: "assistant_input";
18
18
  }
@@ -6,20 +6,20 @@ import * as Hume from "../../../index";
6
6
  * When provided, the output is an assistant message.
7
7
  */
8
8
  export interface AssistantMessage {
9
+ /**
10
+ * The type of message sent through the socket; for an Assistant Message, this must be `assistant_message`.
11
+ *
12
+ * This message contains both a transcript of the assistant’s response and the expression measurement predictions of the assistant’s audio output.
13
+ */
14
+ type: "assistant_message";
9
15
  /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
10
16
  customSessionId?: string;
11
- /** Indicates if this message was inserted into the conversation as text from an [Assistant Input message](/reference/empathic-voice-interface-evi/chat/chat#send.AssistantInput.text). */
12
- fromText: boolean;
13
17
  /** ID of the assistant message. Allows the Assistant Message to be tracked and referenced. */
14
18
  id?: string;
15
19
  /** Transcript of the message. */
16
20
  message: Hume.empathicVoice.ChatMessage;
17
21
  /** Inference model results. */
18
22
  models: Hume.empathicVoice.Inference;
19
- /**
20
- * The type of message sent through the socket; for an Assistant Message, this must be `assistant_message`.
21
- *
22
- * This message contains both a transcript of the assistant’s response and the expression measurement predictions of the assistant’s audio output.
23
- */
24
- type: "assistant_message";
23
+ /** Indicates if this message was inserted into the conversation as text from an [Assistant Input message](/reference/empathic-voice-interface-evi/chat/chat#send.AssistantInput.text). */
24
+ fromText: boolean;
25
25
  }
@@ -6,16 +6,16 @@ import * as Hume from "../../../index";
6
6
  * When provided, the output is an Assistant Prosody message.
7
7
  */
8
8
  export interface AssistantProsody {
9
- /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
10
- customSessionId?: string;
11
- /** Unique identifier for the segment. */
12
- id?: string;
13
- /** Inference model results. */
14
- models: Hume.empathicVoice.Inference;
15
9
  /**
16
10
  * The type of message sent through the socket; for an Assistant Prosody message, this must be `assistant_PROSODY`.
17
11
  *
18
12
  * This message the expression measurement predictions of the assistant's audio output.
19
13
  */
20
14
  type: "assistant_prosody";
15
+ /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
16
+ customSessionId?: string;
17
+ /** Inference model results. */
18
+ models: Hume.empathicVoice.Inference;
19
+ /** Unique identifier for the segment. */
20
+ id?: string;
21
21
  }
@@ -3,10 +3,10 @@
3
3
  */
4
4
  import * as Hume from "../../../index";
5
5
  export interface AudioConfiguration {
6
- /** Number of audio channels. */
7
- channels: number;
8
6
  /** Encoding format of the audio input, such as `linear16`. */
9
7
  encoding: Hume.empathicVoice.Encoding;
8
+ /** Number of audio channels. */
9
+ channels: number;
10
10
  /** Audio sample rate. Number of samples per second in the audio input, measured in Hertz. */
11
11
  sampleRate: number;
12
12
  }
@@ -5,6 +5,12 @@
5
5
  * When provided, the input is audio.
6
6
  */
7
7
  export interface AudioInput {
8
+ /**
9
+ * The type of message sent through the socket; must be `audio_input` for our server to correctly identify and process it as an Audio Input message.
10
+ *
11
+ * This message is used for sending audio input data to EVI for processing and expression measurement. Audio data should be sent as a continuous stream, encoded in Base64.
12
+ */
13
+ type: "audio_input";
8
14
  /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
9
15
  customSessionId?: string;
10
16
  /**
@@ -17,10 +23,4 @@ export interface AudioInput {
17
23
  * Hume recommends streaming audio with a buffer window of 20 milliseconds (ms), or 100 milliseconds (ms) for web applications.
18
24
  */
19
25
  data: string;
20
- /**
21
- * The type of message sent through the socket; must be `audio_input` for our server to correctly identify and process it as an Audio Input message.
22
- *
23
- * This message is used for sending audio input data to EVI for processing and expression measurement. Audio data should be sent as a continuous stream, encoded in Base64.
24
- */
25
- type: "audio_input";
26
26
  }
@@ -5,14 +5,14 @@
5
5
  * The type of message sent through the socket; for an Audio Output message, this must be `audio_output`.
6
6
  */
7
7
  export interface AudioOutput {
8
+ /** The type of message sent through the socket; for an Audio Output message, this must be `audio_output`. */
9
+ type: "audio_output";
8
10
  /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
9
11
  customSessionId?: string;
10
- /** Base64 encoded audio output. This encoded audio is transmitted to the client, where it can be decoded and played back as part of the user interaction. */
11
- data: string;
12
12
  /** ID of the audio output. Allows the Audio Output message to be tracked and referenced. */
13
13
  id: string;
14
14
  /** Index of the chunk of audio relative to the whole audio segment. */
15
15
  index: number;
16
- /** The type of message sent through the socket; for an Audio Output message, this must be `audio_output`. */
17
- type: "audio_output";
16
+ /** Base64 encoded audio output. This encoded audio is transmitted to the client, where it can be decoded and played back as part of the user interaction. */
17
+ data: string;
18
18
  }
@@ -3,7 +3,7 @@
3
3
  */
4
4
  import * as Hume from "../../../index";
5
5
  export interface BuiltinToolConfig {
6
+ name: Hume.empathicVoice.BuiltInTool;
6
7
  /** Optional text passed to the supplemental LLM if the tool call fails. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation. */
7
8
  fallbackContent?: string;
8
- name: Hume.empathicVoice.BuiltInTool;
9
9
  }