hume 0.12.2 → 0.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (289) hide show
  1. package/.mock/definition/empathic-voice/__package__.yml +667 -712
  2. package/.mock/definition/empathic-voice/chat.yml +29 -23
  3. package/.mock/definition/empathic-voice/chatWebhooks.yml +3 -3
  4. package/.mock/definition/tts/__package__.yml +70 -87
  5. package/.mock/fern.config.json +1 -1
  6. package/api/resources/empathicVoice/client/index.d.ts +0 -1
  7. package/api/resources/empathicVoice/client/index.js +0 -15
  8. package/api/resources/empathicVoice/errors/index.d.ts +0 -1
  9. package/api/resources/empathicVoice/errors/index.js +0 -1
  10. package/api/resources/empathicVoice/types/AssistantEnd.d.ts +2 -2
  11. package/api/resources/empathicVoice/types/AssistantInput.d.ts +2 -2
  12. package/api/resources/empathicVoice/types/AssistantMessage.d.ts +8 -8
  13. package/api/resources/empathicVoice/types/AssistantProsody.d.ts +6 -6
  14. package/api/resources/empathicVoice/types/AudioConfiguration.d.ts +2 -2
  15. package/api/resources/empathicVoice/types/AudioInput.d.ts +6 -6
  16. package/api/resources/empathicVoice/types/AudioOutput.d.ts +4 -4
  17. package/api/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
  18. package/api/resources/empathicVoice/types/ChatMessage.d.ts +2 -2
  19. package/api/resources/empathicVoice/types/ChatMetadata.d.ts +8 -8
  20. package/api/resources/empathicVoice/types/Context.d.ts +8 -14
  21. package/api/resources/empathicVoice/types/ContextType.d.ts +2 -3
  22. package/api/resources/empathicVoice/types/ContextType.js +1 -2
  23. package/api/resources/empathicVoice/types/LanguageModelType.d.ts +4 -1
  24. package/api/resources/empathicVoice/types/LanguageModelType.js +3 -0
  25. package/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +2 -2
  26. package/api/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +2 -2
  27. package/api/resources/empathicVoice/types/ReturnConfig.d.ts +7 -7
  28. package/api/resources/empathicVoice/types/ReturnPrompt.d.ts +2 -2
  29. package/api/resources/empathicVoice/types/SessionSettings.d.ts +29 -29
  30. package/api/resources/empathicVoice/types/Tool.d.ts +6 -6
  31. package/api/resources/empathicVoice/types/ToolCallMessage.d.ts +6 -6
  32. package/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +16 -16
  33. package/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +8 -8
  34. package/api/resources/empathicVoice/types/UserInput.d.ts +2 -2
  35. package/api/resources/empathicVoice/types/UserInterruption.d.ts +4 -4
  36. package/api/resources/empathicVoice/types/UserMessage.d.ts +12 -12
  37. package/api/resources/empathicVoice/types/VoiceProvider.d.ts +2 -1
  38. package/api/resources/empathicVoice/types/VoiceProvider.js +1 -0
  39. package/api/resources/empathicVoice/types/WebSocketError.d.ts +10 -10
  40. package/api/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +8 -8
  41. package/api/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +6 -6
  42. package/api/resources/empathicVoice/types/index.d.ts +16 -17
  43. package/api/resources/empathicVoice/types/index.js +16 -17
  44. package/api/resources/tts/client/Client.d.ts +21 -23
  45. package/api/resources/tts/client/Client.js +50 -58
  46. package/api/resources/tts/client/index.d.ts +0 -1
  47. package/api/resources/tts/client/index.js +0 -15
  48. package/api/resources/tts/types/PostedTts.d.ts +8 -8
  49. package/api/resources/tts/types/PostedUtterance.d.ts +6 -6
  50. package/api/resources/tts/types/ReturnGeneration.d.ts +5 -5
  51. package/api/resources/tts/types/ReturnTts.d.ts +1 -1
  52. package/api/resources/tts/types/Snippet.d.ts +6 -6
  53. package/dist/api/resources/empathicVoice/client/index.d.ts +0 -1
  54. package/dist/api/resources/empathicVoice/client/index.js +0 -15
  55. package/dist/api/resources/empathicVoice/errors/index.d.ts +0 -1
  56. package/dist/api/resources/empathicVoice/errors/index.js +0 -1
  57. package/dist/api/resources/empathicVoice/types/AssistantEnd.d.ts +2 -2
  58. package/dist/api/resources/empathicVoice/types/AssistantInput.d.ts +2 -2
  59. package/dist/api/resources/empathicVoice/types/AssistantMessage.d.ts +8 -8
  60. package/dist/api/resources/empathicVoice/types/AssistantProsody.d.ts +6 -6
  61. package/dist/api/resources/empathicVoice/types/AudioConfiguration.d.ts +2 -2
  62. package/dist/api/resources/empathicVoice/types/AudioInput.d.ts +6 -6
  63. package/dist/api/resources/empathicVoice/types/AudioOutput.d.ts +4 -4
  64. package/dist/api/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
  65. package/dist/api/resources/empathicVoice/types/ChatMessage.d.ts +2 -2
  66. package/dist/api/resources/empathicVoice/types/ChatMetadata.d.ts +8 -8
  67. package/dist/api/resources/empathicVoice/types/Context.d.ts +8 -14
  68. package/dist/api/resources/empathicVoice/types/ContextType.d.ts +2 -3
  69. package/dist/api/resources/empathicVoice/types/ContextType.js +1 -2
  70. package/dist/api/resources/empathicVoice/types/LanguageModelType.d.ts +4 -1
  71. package/dist/api/resources/empathicVoice/types/LanguageModelType.js +3 -0
  72. package/dist/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +2 -2
  73. package/dist/api/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +2 -2
  74. package/dist/api/resources/empathicVoice/types/ReturnConfig.d.ts +7 -7
  75. package/dist/api/resources/empathicVoice/types/ReturnPrompt.d.ts +2 -2
  76. package/dist/api/resources/empathicVoice/types/SessionSettings.d.ts +29 -29
  77. package/dist/api/resources/empathicVoice/types/Tool.d.ts +6 -6
  78. package/dist/api/resources/empathicVoice/types/ToolCallMessage.d.ts +6 -6
  79. package/dist/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +16 -16
  80. package/dist/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +8 -8
  81. package/dist/api/resources/empathicVoice/types/UserInput.d.ts +2 -2
  82. package/dist/api/resources/empathicVoice/types/UserInterruption.d.ts +4 -4
  83. package/dist/api/resources/empathicVoice/types/UserMessage.d.ts +12 -12
  84. package/dist/api/resources/empathicVoice/types/VoiceProvider.d.ts +2 -1
  85. package/dist/api/resources/empathicVoice/types/VoiceProvider.js +1 -0
  86. package/dist/api/resources/empathicVoice/types/WebSocketError.d.ts +10 -10
  87. package/dist/api/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +8 -8
  88. package/dist/api/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +6 -6
  89. package/dist/api/resources/empathicVoice/types/index.d.ts +16 -17
  90. package/dist/api/resources/empathicVoice/types/index.js +16 -17
  91. package/dist/api/resources/tts/client/Client.d.ts +21 -23
  92. package/dist/api/resources/tts/client/Client.js +50 -58
  93. package/dist/api/resources/tts/client/index.d.ts +0 -1
  94. package/dist/api/resources/tts/client/index.js +0 -15
  95. package/dist/api/resources/tts/types/PostedTts.d.ts +8 -8
  96. package/dist/api/resources/tts/types/PostedUtterance.d.ts +6 -6
  97. package/dist/api/resources/tts/types/ReturnGeneration.d.ts +5 -5
  98. package/dist/api/resources/tts/types/ReturnTts.d.ts +1 -1
  99. package/dist/api/resources/tts/types/Snippet.d.ts +6 -6
  100. package/dist/serialization/resources/empathicVoice/index.d.ts +0 -1
  101. package/dist/serialization/resources/empathicVoice/index.js +0 -1
  102. package/dist/serialization/resources/empathicVoice/types/AssistantEnd.d.ts +1 -1
  103. package/dist/serialization/resources/empathicVoice/types/AssistantEnd.js +1 -1
  104. package/dist/serialization/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
  105. package/dist/serialization/resources/empathicVoice/types/AssistantInput.js +1 -1
  106. package/dist/serialization/resources/empathicVoice/types/AssistantMessage.d.ts +2 -2
  107. package/dist/serialization/resources/empathicVoice/types/AssistantMessage.js +2 -2
  108. package/dist/serialization/resources/empathicVoice/types/AssistantProsody.d.ts +2 -2
  109. package/dist/serialization/resources/empathicVoice/types/AssistantProsody.js +2 -2
  110. package/dist/serialization/resources/empathicVoice/types/AudioConfiguration.d.ts +1 -1
  111. package/dist/serialization/resources/empathicVoice/types/AudioConfiguration.js +1 -1
  112. package/dist/serialization/resources/empathicVoice/types/AudioInput.d.ts +1 -1
  113. package/dist/serialization/resources/empathicVoice/types/AudioInput.js +1 -1
  114. package/dist/serialization/resources/empathicVoice/types/AudioOutput.d.ts +2 -2
  115. package/dist/serialization/resources/empathicVoice/types/AudioOutput.js +2 -2
  116. package/dist/serialization/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
  117. package/dist/serialization/resources/empathicVoice/types/BuiltinToolConfig.js +1 -1
  118. package/dist/serialization/resources/empathicVoice/types/ChatMessage.d.ts +1 -1
  119. package/dist/serialization/resources/empathicVoice/types/ChatMessage.js +1 -1
  120. package/dist/serialization/resources/empathicVoice/types/ChatMetadata.d.ts +2 -2
  121. package/dist/serialization/resources/empathicVoice/types/ChatMetadata.js +2 -2
  122. package/dist/serialization/resources/empathicVoice/types/Context.d.ts +1 -1
  123. package/dist/serialization/resources/empathicVoice/types/Context.js +1 -1
  124. package/dist/serialization/resources/empathicVoice/types/ContextType.d.ts +1 -1
  125. package/dist/serialization/resources/empathicVoice/types/ContextType.js +1 -1
  126. package/dist/serialization/resources/empathicVoice/types/LanguageModelType.d.ts +1 -1
  127. package/dist/serialization/resources/empathicVoice/types/LanguageModelType.js +3 -0
  128. package/dist/serialization/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
  129. package/dist/serialization/resources/empathicVoice/types/PauseAssistantMessage.js +1 -1
  130. package/dist/serialization/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +1 -1
  131. package/dist/serialization/resources/empathicVoice/types/ResumeAssistantMessage.js +1 -1
  132. package/dist/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +8 -8
  133. package/dist/serialization/resources/empathicVoice/types/ReturnConfig.js +8 -8
  134. package/dist/serialization/resources/empathicVoice/types/ReturnPrompt.d.ts +1 -1
  135. package/dist/serialization/resources/empathicVoice/types/ReturnPrompt.js +1 -1
  136. package/dist/serialization/resources/empathicVoice/types/SessionSettings.d.ts +8 -8
  137. package/dist/serialization/resources/empathicVoice/types/SessionSettings.js +8 -8
  138. package/dist/serialization/resources/empathicVoice/types/Tool.d.ts +3 -3
  139. package/dist/serialization/resources/empathicVoice/types/Tool.js +3 -3
  140. package/dist/serialization/resources/empathicVoice/types/ToolCallMessage.d.ts +3 -3
  141. package/dist/serialization/resources/empathicVoice/types/ToolCallMessage.js +3 -3
  142. package/dist/serialization/resources/empathicVoice/types/ToolErrorMessage.d.ts +6 -6
  143. package/dist/serialization/resources/empathicVoice/types/ToolErrorMessage.js +6 -6
  144. package/dist/serialization/resources/empathicVoice/types/ToolResponseMessage.d.ts +2 -2
  145. package/dist/serialization/resources/empathicVoice/types/ToolResponseMessage.js +2 -2
  146. package/dist/serialization/resources/empathicVoice/types/UserInput.d.ts +1 -1
  147. package/dist/serialization/resources/empathicVoice/types/UserInput.js +1 -1
  148. package/dist/serialization/resources/empathicVoice/types/UserInterruption.d.ts +1 -1
  149. package/dist/serialization/resources/empathicVoice/types/UserInterruption.js +1 -1
  150. package/dist/serialization/resources/empathicVoice/types/UserMessage.d.ts +3 -3
  151. package/dist/serialization/resources/empathicVoice/types/UserMessage.js +3 -3
  152. package/dist/serialization/resources/empathicVoice/types/VoiceProvider.d.ts +1 -1
  153. package/dist/serialization/resources/empathicVoice/types/VoiceProvider.js +1 -1
  154. package/dist/serialization/resources/empathicVoice/types/WebSocketError.d.ts +3 -3
  155. package/dist/serialization/resources/empathicVoice/types/WebSocketError.js +3 -3
  156. package/dist/serialization/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +4 -4
  157. package/dist/serialization/resources/empathicVoice/types/WebhookEventChatEnded.js +4 -4
  158. package/dist/serialization/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +3 -3
  159. package/dist/serialization/resources/empathicVoice/types/WebhookEventChatStarted.js +3 -3
  160. package/dist/serialization/resources/empathicVoice/types/index.d.ts +16 -17
  161. package/dist/serialization/resources/empathicVoice/types/index.js +16 -17
  162. package/dist/serialization/resources/tts/types/PostedTts.d.ts +3 -3
  163. package/dist/serialization/resources/tts/types/PostedTts.js +3 -3
  164. package/dist/serialization/resources/tts/types/PostedUtterance.d.ts +2 -2
  165. package/dist/serialization/resources/tts/types/PostedUtterance.js +2 -2
  166. package/dist/serialization/resources/tts/types/ReturnGeneration.d.ts +3 -3
  167. package/dist/serialization/resources/tts/types/ReturnGeneration.js +3 -3
  168. package/dist/serialization/resources/tts/types/ReturnTts.d.ts +1 -1
  169. package/dist/serialization/resources/tts/types/ReturnTts.js +1 -1
  170. package/dist/serialization/resources/tts/types/Snippet.d.ts +3 -3
  171. package/dist/serialization/resources/tts/types/Snippet.js +3 -3
  172. package/dist/version.d.ts +1 -1
  173. package/dist/version.js +1 -1
  174. package/package.json +1 -1
  175. package/reference.md +24 -78
  176. package/serialization/resources/empathicVoice/index.d.ts +0 -1
  177. package/serialization/resources/empathicVoice/index.js +0 -1
  178. package/serialization/resources/empathicVoice/types/AssistantEnd.d.ts +1 -1
  179. package/serialization/resources/empathicVoice/types/AssistantEnd.js +1 -1
  180. package/serialization/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
  181. package/serialization/resources/empathicVoice/types/AssistantInput.js +1 -1
  182. package/serialization/resources/empathicVoice/types/AssistantMessage.d.ts +2 -2
  183. package/serialization/resources/empathicVoice/types/AssistantMessage.js +2 -2
  184. package/serialization/resources/empathicVoice/types/AssistantProsody.d.ts +2 -2
  185. package/serialization/resources/empathicVoice/types/AssistantProsody.js +2 -2
  186. package/serialization/resources/empathicVoice/types/AudioConfiguration.d.ts +1 -1
  187. package/serialization/resources/empathicVoice/types/AudioConfiguration.js +1 -1
  188. package/serialization/resources/empathicVoice/types/AudioInput.d.ts +1 -1
  189. package/serialization/resources/empathicVoice/types/AudioInput.js +1 -1
  190. package/serialization/resources/empathicVoice/types/AudioOutput.d.ts +2 -2
  191. package/serialization/resources/empathicVoice/types/AudioOutput.js +2 -2
  192. package/serialization/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
  193. package/serialization/resources/empathicVoice/types/BuiltinToolConfig.js +1 -1
  194. package/serialization/resources/empathicVoice/types/ChatMessage.d.ts +1 -1
  195. package/serialization/resources/empathicVoice/types/ChatMessage.js +1 -1
  196. package/serialization/resources/empathicVoice/types/ChatMetadata.d.ts +2 -2
  197. package/serialization/resources/empathicVoice/types/ChatMetadata.js +2 -2
  198. package/serialization/resources/empathicVoice/types/Context.d.ts +1 -1
  199. package/serialization/resources/empathicVoice/types/Context.js +1 -1
  200. package/serialization/resources/empathicVoice/types/ContextType.d.ts +1 -1
  201. package/serialization/resources/empathicVoice/types/ContextType.js +1 -1
  202. package/serialization/resources/empathicVoice/types/LanguageModelType.d.ts +1 -1
  203. package/serialization/resources/empathicVoice/types/LanguageModelType.js +3 -0
  204. package/serialization/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
  205. package/serialization/resources/empathicVoice/types/PauseAssistantMessage.js +1 -1
  206. package/serialization/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +1 -1
  207. package/serialization/resources/empathicVoice/types/ResumeAssistantMessage.js +1 -1
  208. package/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +8 -8
  209. package/serialization/resources/empathicVoice/types/ReturnConfig.js +8 -8
  210. package/serialization/resources/empathicVoice/types/ReturnPrompt.d.ts +1 -1
  211. package/serialization/resources/empathicVoice/types/ReturnPrompt.js +1 -1
  212. package/serialization/resources/empathicVoice/types/SessionSettings.d.ts +8 -8
  213. package/serialization/resources/empathicVoice/types/SessionSettings.js +8 -8
  214. package/serialization/resources/empathicVoice/types/Tool.d.ts +3 -3
  215. package/serialization/resources/empathicVoice/types/Tool.js +3 -3
  216. package/serialization/resources/empathicVoice/types/ToolCallMessage.d.ts +3 -3
  217. package/serialization/resources/empathicVoice/types/ToolCallMessage.js +3 -3
  218. package/serialization/resources/empathicVoice/types/ToolErrorMessage.d.ts +6 -6
  219. package/serialization/resources/empathicVoice/types/ToolErrorMessage.js +6 -6
  220. package/serialization/resources/empathicVoice/types/ToolResponseMessage.d.ts +2 -2
  221. package/serialization/resources/empathicVoice/types/ToolResponseMessage.js +2 -2
  222. package/serialization/resources/empathicVoice/types/UserInput.d.ts +1 -1
  223. package/serialization/resources/empathicVoice/types/UserInput.js +1 -1
  224. package/serialization/resources/empathicVoice/types/UserInterruption.d.ts +1 -1
  225. package/serialization/resources/empathicVoice/types/UserInterruption.js +1 -1
  226. package/serialization/resources/empathicVoice/types/UserMessage.d.ts +3 -3
  227. package/serialization/resources/empathicVoice/types/UserMessage.js +3 -3
  228. package/serialization/resources/empathicVoice/types/VoiceProvider.d.ts +1 -1
  229. package/serialization/resources/empathicVoice/types/VoiceProvider.js +1 -1
  230. package/serialization/resources/empathicVoice/types/WebSocketError.d.ts +3 -3
  231. package/serialization/resources/empathicVoice/types/WebSocketError.js +3 -3
  232. package/serialization/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +4 -4
  233. package/serialization/resources/empathicVoice/types/WebhookEventChatEnded.js +4 -4
  234. package/serialization/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +3 -3
  235. package/serialization/resources/empathicVoice/types/WebhookEventChatStarted.js +3 -3
  236. package/serialization/resources/empathicVoice/types/index.d.ts +16 -17
  237. package/serialization/resources/empathicVoice/types/index.js +16 -17
  238. package/serialization/resources/tts/types/PostedTts.d.ts +3 -3
  239. package/serialization/resources/tts/types/PostedTts.js +3 -3
  240. package/serialization/resources/tts/types/PostedUtterance.d.ts +2 -2
  241. package/serialization/resources/tts/types/PostedUtterance.js +2 -2
  242. package/serialization/resources/tts/types/ReturnGeneration.d.ts +3 -3
  243. package/serialization/resources/tts/types/ReturnGeneration.js +3 -3
  244. package/serialization/resources/tts/types/ReturnTts.d.ts +1 -1
  245. package/serialization/resources/tts/types/ReturnTts.js +1 -1
  246. package/serialization/resources/tts/types/Snippet.d.ts +3 -3
  247. package/serialization/resources/tts/types/Snippet.js +3 -3
  248. package/version.d.ts +1 -1
  249. package/version.js +1 -1
  250. package/api/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.d.ts +0 -12
  251. package/api/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.js +0 -5
  252. package/api/resources/empathicVoice/client/requests/index.d.ts +0 -1
  253. package/api/resources/empathicVoice/client/requests/index.js +0 -2
  254. package/api/resources/empathicVoice/errors/UnprocessableEntityError.d.ts +0 -9
  255. package/api/resources/empathicVoice/errors/UnprocessableEntityError.js +0 -52
  256. package/api/resources/empathicVoice/types/SupportsToolUse.d.ts +0 -7
  257. package/api/resources/empathicVoice/types/SupportsToolUse.js +0 -5
  258. package/api/resources/tts/client/requests/SynthesizeJsonRequest.d.ts +0 -36
  259. package/api/resources/tts/client/requests/SynthesizeJsonRequest.js +0 -5
  260. package/api/resources/tts/client/requests/index.d.ts +0 -1
  261. package/api/resources/tts/client/requests/index.js +0 -2
  262. package/dist/api/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.d.ts +0 -12
  263. package/dist/api/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.js +0 -5
  264. package/dist/api/resources/empathicVoice/client/requests/index.d.ts +0 -1
  265. package/dist/api/resources/empathicVoice/client/requests/index.js +0 -2
  266. package/dist/api/resources/empathicVoice/errors/UnprocessableEntityError.d.ts +0 -9
  267. package/dist/api/resources/empathicVoice/errors/UnprocessableEntityError.js +0 -52
  268. package/dist/api/resources/empathicVoice/types/SupportsToolUse.d.ts +0 -7
  269. package/dist/api/resources/empathicVoice/types/SupportsToolUse.js +0 -5
  270. package/dist/api/resources/tts/client/requests/SynthesizeJsonRequest.d.ts +0 -36
  271. package/dist/api/resources/tts/client/requests/SynthesizeJsonRequest.js +0 -5
  272. package/dist/api/resources/tts/client/requests/index.d.ts +0 -1
  273. package/dist/api/resources/tts/client/requests/index.js +0 -2
  274. package/dist/serialization/resources/empathicVoice/client/index.d.ts +0 -1
  275. package/dist/serialization/resources/empathicVoice/client/index.js +0 -17
  276. package/dist/serialization/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.d.ts +0 -12
  277. package/dist/serialization/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.js +0 -43
  278. package/dist/serialization/resources/empathicVoice/client/requests/index.d.ts +0 -1
  279. package/dist/serialization/resources/empathicVoice/client/requests/index.js +0 -5
  280. package/dist/serialization/resources/empathicVoice/types/SupportsToolUse.d.ts +0 -13
  281. package/dist/serialization/resources/empathicVoice/types/SupportsToolUse.js +0 -44
  282. package/serialization/resources/empathicVoice/client/index.d.ts +0 -1
  283. package/serialization/resources/empathicVoice/client/index.js +0 -17
  284. package/serialization/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.d.ts +0 -12
  285. package/serialization/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.js +0 -43
  286. package/serialization/resources/empathicVoice/client/requests/index.d.ts +0 -1
  287. package/serialization/resources/empathicVoice/client/requests/index.js +0 -5
  288. package/serialization/resources/empathicVoice/types/SupportsToolUse.d.ts +0 -13
  289. package/serialization/resources/empathicVoice/types/SupportsToolUse.js +0 -44
@@ -4,21 +4,6 @@ channel:
4
4
  auth: false
5
5
  docs: Chat with Empathic Voice Interface (EVI)
6
6
  query-parameters:
7
- access_token:
8
- type: optional<string>
9
- default: ''
10
- docs: >-
11
- Access token used for authenticating the client. If not provided, an
12
- `api_key` must be provided to authenticate.
13
-
14
-
15
- The access token is generated using both an API key and a Secret key,
16
- which provides an additional layer of security compared to using just an
17
- API key.
18
-
19
-
20
- For more details, refer to the [Authentication Strategies
21
- Guide](/docs/introduction/api-key#authentication-strategies).
22
7
  config_id:
23
8
  type: optional<string>
24
9
  docs: >-
@@ -47,13 +32,6 @@ channel:
47
32
 
48
33
  Include this parameter to apply a specific version of an EVI
49
34
  configuration. If omitted, the latest version will be applied.
50
- event_limit:
51
- type: optional<integer>
52
- docs: >-
53
- The maximum number of chat events to return from chat history. By
54
- default, the system returns up to 300 events (100 events per page × 3
55
- pages). Set this parameter to a smaller value to limit the number of
56
- events returned.
57
35
  resumed_chat_group_id:
58
36
  type: optional<string>
59
37
  docs: >-
@@ -98,6 +76,12 @@ channel:
98
76
  Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs
99
77
  of all Chat Groups associated with an API key. This endpoint returns a
100
78
  list of all available chat groups.
79
+ voice_id:
80
+ type: optional<string>
81
+ docs: >-
82
+ The name or ID of the voice from the `Voice Library` to be used as the
83
+ speaker for this EVI session. This will override the speaker set in the
84
+ selected configuration.
101
85
  verbose_transcription:
102
86
  type: optional<boolean>
103
87
  default: false
@@ -109,6 +93,28 @@ channel:
109
93
  field on a
110
94
  [UserMessage](/reference/empathic-voice-interface-evi/chat/chat#receive.UserMessage.type)
111
95
  denotes whether the message is "interim" or "final."
96
+ event_limit:
97
+ type: optional<integer>
98
+ docs: >-
99
+ The maximum number of chat events to return from chat history. By
100
+ default, the system returns up to 300 events (100 events per page × 3
101
+ pages). Set this parameter to a smaller value to limit the number of
102
+ events returned.
103
+ access_token:
104
+ type: optional<string>
105
+ default: ''
106
+ docs: >-
107
+ Access token used for authenticating the client. If not provided, an
108
+ `api_key` must be provided to authenticate.
109
+
110
+
111
+ The access token is generated using both an API key and a Secret key,
112
+ which provides an additional layer of security compared to using just an
113
+ API key.
114
+
115
+
116
+ For more details, refer to the [Authentication Strategies
117
+ Guide](/docs/introduction/api-key#authentication-strategies).
112
118
  api_key:
113
119
  type: optional<string>
114
120
  default: ''
@@ -130,8 +136,8 @@ channel:
130
136
  - messages:
131
137
  - type: publish
132
138
  body:
133
- data: data
134
139
  type: audio_input
140
+ data: data
135
141
  - type: subscribe
136
142
  body:
137
143
  type: assistant_end
@@ -12,10 +12,10 @@ webhooks:
12
12
  chat_group_id: 9fc18597-3567-42d5-94d6-935bde84bf2f
13
13
  chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0
14
14
  config_id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3
15
+ event_name: chat_ended
16
+ end_time: 1716244958546
15
17
  duration_seconds: 180
16
18
  end_reason: USER_ENDED
17
- end_time: 1716244958546
18
- event_name: chat_ended
19
19
  docs: Sent when an EVI chat ends.
20
20
  chatStarted:
21
21
  audiences: []
@@ -28,7 +28,7 @@ webhooks:
28
28
  chat_group_id: 9fc18597-3567-42d5-94d6-935bde84bf2f
29
29
  chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0
30
30
  config_id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3
31
- chat_start_type: new_chat_group
32
31
  event_name: chat_started
33
32
  start_time: 1716244940648
33
+ chat_start_type: new_chat_group
34
34
  docs: Sent when an EVI chat is started.
@@ -34,23 +34,6 @@ service:
34
34
  request:
35
35
  body:
36
36
  type: PostedTts
37
- query-parameters:
38
- access_token:
39
- type: optional<string>
40
- default: ''
41
- docs: >-
42
- Access token used for authenticating the client. If not provided,
43
- an `api_key` must be provided to authenticate.
44
-
45
-
46
- The access token is generated using both an API key and a Secret
47
- key, which provides an additional layer of security compared to
48
- using just an API key.
49
-
50
-
51
- For more details, refer to the [Authentication Strategies
52
- Guide](/docs/introduction/api-key#authentication-strategies).
53
- name: SynthesizeJsonRequest
54
37
  content-type: application/json
55
38
  response:
56
39
  docs: Successful Response
@@ -137,26 +120,30 @@ service:
137
120
  format:
138
121
  type: mp3
139
122
  num_generations: 1
140
- synthesize-file-streaming:
141
- path: /v0/tts/stream/file
123
+ synthesize-json-streaming:
124
+ path: /v0/tts/stream/json
142
125
  method: POST
143
126
  auth: true
144
127
  docs: >-
145
128
  Streams synthesized speech using the specified voice. If no voice is
146
129
  provided, a novel voice will be generated dynamically. Optionally,
147
130
  additional context can be included to influence the speech's style and
148
- prosody.
131
+ prosody.
132
+
133
+
134
+ The response is a stream of JSON objects including audio encoded in
135
+ base64.
149
136
  source:
150
137
  openapi: tts-openapi.yml
151
- display-name: Text-to-speech (Streamed File)
138
+ display-name: Text-to-speech (Streamed JSON)
152
139
  request:
153
140
  body:
154
141
  type: PostedTts
155
142
  content-type: application/json
156
- response:
157
- docs: OK
158
- type: file
159
- status-code: 200
143
+ response-stream:
144
+ docs: Successful Response
145
+ type: SnippetAudioChunk
146
+ format: json
160
147
  errors:
161
148
  - UnprocessableEntityError
162
149
  examples:
@@ -168,30 +155,26 @@ service:
168
155
  voice:
169
156
  name: Male English Actor
170
157
  provider: HUME_AI
171
- synthesize-json-streaming:
172
- path: /v0/tts/stream/json
158
+ synthesize-file-streaming:
159
+ path: /v0/tts/stream/file
173
160
  method: POST
174
161
  auth: true
175
162
  docs: >-
176
163
  Streams synthesized speech using the specified voice. If no voice is
177
164
  provided, a novel voice will be generated dynamically. Optionally,
178
165
  additional context can be included to influence the speech's style and
179
- prosody.
180
-
181
-
182
- The response is a stream of JSON objects including audio encoded in
183
- base64.
166
+ prosody.
184
167
  source:
185
168
  openapi: tts-openapi.yml
186
- display-name: Text-to-speech (Streamed JSON)
169
+ display-name: Text-to-speech (Streamed File)
187
170
  request:
188
171
  body:
189
172
  type: PostedTts
190
173
  content-type: application/json
191
- response-stream:
192
- docs: Successful Response
193
- type: SnippetAudioChunk
194
- format: json
174
+ response:
175
+ docs: OK
176
+ type: file
177
+ status-code: 200
195
178
  errors:
196
179
  - UnprocessableEntityError
197
180
  examples:
@@ -246,25 +229,25 @@ types:
246
229
  openapi: tts-openapi.yml
247
230
  ReturnGeneration:
248
231
  properties:
249
- audio:
232
+ generation_id:
250
233
  type: string
251
234
  docs: >-
252
- The generated audio output in the requested format, encoded as a
253
- base64 string.
235
+ A unique ID associated with this TTS generation that can be used as
236
+ context for generating consistent speech style and prosody across
237
+ multiple requests.
254
238
  duration:
255
239
  type: double
256
240
  docs: Duration of the generated audio in seconds.
257
- encoding:
258
- type: AudioEncoding
259
241
  file_size:
260
242
  type: integer
261
243
  docs: Size of the generated audio in bytes.
262
- generation_id:
244
+ encoding:
245
+ type: AudioEncoding
246
+ audio:
263
247
  type: string
264
248
  docs: >-
265
- A unique ID associated with this TTS generation that can be used as
266
- context for generating consistent speech style and prosody across
267
- multiple requests.
249
+ The generated audio output in the requested format, encoded as a
250
+ base64 string.
268
251
  snippets:
269
252
  docs: >-
270
253
  A list of snippet groups where each group corresponds to an utterance
@@ -317,9 +300,18 @@ types:
317
300
  Utterances to use as context for generating consistent speech style
318
301
  and prosody across multiple requests. These will not be converted to
319
302
  speech output.
320
- format:
321
- type: optional<Format>
322
- docs: Specifies the output audio file format.
303
+ utterances:
304
+ docs: >-
305
+ A list of **Utterances** to be converted to speech output.
306
+
307
+
308
+ An **Utterance** is a unit of input for
309
+ [Octave](/docs/text-to-speech-tts/overview), and includes input
310
+ `text`, an optional `description` to serve as the prompt for how the
311
+ speech should be delivered, an optional `voice` specification, and
312
+ additional controls to guide delivery for `speed` and
313
+ `trailing_silence`.
314
+ type: list<PostedUtterance>
323
315
  num_generations:
324
316
  type: optional<integer>
325
317
  docs: Number of generations of the audio to produce.
@@ -327,6 +319,9 @@ types:
327
319
  validation:
328
320
  min: 1
329
321
  max: 5
322
+ format:
323
+ type: optional<Format>
324
+ docs: Specifies the output audio file format.
330
325
  split_utterances:
331
326
  type: optional<boolean>
332
327
  docs: >-
@@ -355,18 +350,6 @@ types:
355
350
  if disabled, each chunk's audio will be its own audio file, each with
356
351
  its own headers (if applicable).
357
352
  default: false
358
- utterances:
359
- docs: >-
360
- A list of **Utterances** to be converted to speech output.
361
-
362
-
363
- An **Utterance** is a unit of input for
364
- [Octave](/docs/text-to-speech-tts/overview), and includes input
365
- `text`, an optional `description` to serve as the prompt for how the
366
- speech should be delivered, an optional `voice` specification, and
367
- additional controls to guide delivery for `speed` and
368
- `trailing_silence`.
369
- type: list<PostedUtterance>
370
353
  instant_mode:
371
354
  type: optional<boolean>
372
355
  docs: >-
@@ -393,14 +376,14 @@ types:
393
376
  openapi: tts-openapi.yml
394
377
  ReturnTts:
395
378
  properties:
396
- generations:
397
- type: list<ReturnGeneration>
398
379
  request_id:
399
380
  type: optional<string>
400
381
  docs: >-
401
382
  A unique ID associated with this request for tracking and
402
383
  troubleshooting. Use this ID when contacting [support](/support) for
403
384
  troubleshooting assistance.
385
+ generations:
386
+ type: list<ReturnGeneration>
404
387
  source:
405
388
  openapi: tts-openapi.yml
406
389
  ReturnVoice:
@@ -428,28 +411,28 @@ types:
428
411
  openapi: tts-openapi.yml
429
412
  Snippet:
430
413
  properties:
431
- audio:
432
- type: string
433
- docs: >-
434
- The segmented audio output in the requested format, encoded as a
435
- base64 string.
436
- generation_id:
437
- type: string
438
- docs: The generation ID this snippet corresponds to.
439
414
  id:
440
415
  type: string
441
416
  docs: A unique ID associated with this **Snippet**.
442
417
  text:
443
418
  type: string
444
419
  docs: The text for this **Snippet**.
420
+ generation_id:
421
+ type: string
422
+ docs: The generation ID this snippet corresponds to.
423
+ utterance_index:
424
+ type: optional<integer>
425
+ docs: The index of the utterance in the request this snippet corresponds to.
445
426
  transcribed_text:
446
427
  type: optional<string>
447
428
  docs: >-
448
429
  The transcribed text of the generated audio. It is only present if
449
430
  `instant_mode` is set to `false`.
450
- utterance_index:
451
- type: optional<integer>
452
- docs: The index of the utterance in the request this snippet corresponds to.
431
+ audio:
432
+ type: string
433
+ docs: >-
434
+ The segmented audio output in the requested format, encoded as a
435
+ base64 string.
453
436
  source:
454
437
  openapi: tts-openapi.yml
455
438
  SnippetAudioChunk:
@@ -458,6 +441,11 @@ types:
458
441
  openapi: tts-openapi.yml
459
442
  PostedUtterance:
460
443
  properties:
444
+ text:
445
+ type: string
446
+ docs: The input text to be synthesized into speech.
447
+ validation:
448
+ maxLength: 5000
461
449
  description:
462
450
  type: optional<string>
463
451
  docs: >-
@@ -479,6 +467,14 @@ types:
479
467
  guide](/docs/text-to-speech-tts/prompting) for design tips.
480
468
  validation:
481
469
  maxLength: 1000
470
+ voice:
471
+ type: optional<PostedUtteranceVoice>
472
+ docs: >-
473
+ The `name` or `id` associated with a **Voice** from the **Voice
474
+ Library** to be used as the speaker for this and all subsequent
475
+ `utterances`, until the `voice` field is updated again.
476
+
477
+ See our [voices guide](/docs/text-to-speech-tts/voices) for more details on generating and specifying **Voices**.
482
478
  speed:
483
479
  type: optional<double>
484
480
  docs: >-
@@ -488,11 +484,6 @@ types:
488
484
  validation:
489
485
  min: 0.5
490
486
  max: 2
491
- text:
492
- type: string
493
- docs: The input text to be synthesized into speech.
494
- validation:
495
- maxLength: 5000
496
487
  trailing_silence:
497
488
  type: optional<double>
498
489
  docs: Duration of trailing silence (in seconds) to add to this utterance
@@ -500,14 +491,6 @@ types:
500
491
  validation:
501
492
  min: 0
502
493
  max: 5
503
- voice:
504
- type: optional<PostedUtteranceVoice>
505
- docs: >-
506
- The `name` or `id` associated with a **Voice** from the **Voice
507
- Library** to be used as the speaker for this and all subsequent
508
- `utterances`, until the `voice` field is updated again.
509
-
510
- See our [voices guide](/docs/text-to-speech-tts/voices) for more details on generating and specifying **Voices**.
511
494
  source:
512
495
  openapi: tts-openapi.yml
513
496
  ValidationErrorLocItem:
@@ -1,4 +1,4 @@
1
1
  {
2
2
  "organization" : "hume",
3
- "version" : "0.64.10"
3
+ "version" : "0.65.42"
4
4
  }
@@ -1,2 +1 @@
1
1
  export {};
2
- export * from "./requests";
@@ -1,17 +1,2 @@
1
1
  "use strict";
2
- var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
- if (k2 === undefined) k2 = k;
4
- var desc = Object.getOwnPropertyDescriptor(m, k);
5
- if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
- desc = { enumerable: true, get: function() { return m[k]; } };
7
- }
8
- Object.defineProperty(o, k2, desc);
9
- }) : (function(o, m, k, k2) {
10
- if (k2 === undefined) k2 = k;
11
- o[k2] = m[k];
12
- }));
13
- var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
- for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
- };
16
2
  Object.defineProperty(exports, "__esModule", { value: true });
17
- __exportStar(require("./requests"), exports);
@@ -1,2 +1 @@
1
- export * from "./UnprocessableEntityError";
2
1
  export * from "./BadRequestError";
@@ -14,5 +14,4 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
14
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
15
  };
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
- __exportStar(require("./UnprocessableEntityError"), exports);
18
17
  __exportStar(require("./BadRequestError"), exports);
@@ -5,12 +5,12 @@
5
5
  * When provided, the output is an assistant end message.
6
6
  */
7
7
  export interface AssistantEnd {
8
- /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
9
- customSessionId?: string;
10
8
  /**
11
9
  * The type of message sent through the socket; for an Assistant End message, this must be `assistant_end`.
12
10
  *
13
11
  * This message indicates the conclusion of the assistant’s response, signaling that the assistant has finished speaking for the current conversational turn.
14
12
  */
15
13
  type: "assistant_end";
14
+ /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
15
+ customSessionId?: string;
16
16
  }
@@ -5,6 +5,8 @@
5
5
  * When provided, the input is spoken by EVI.
6
6
  */
7
7
  export interface AssistantInput {
8
+ /** The type of message sent through the socket; must be `assistant_input` for our server to correctly identify and process it as an Assistant Input message. */
9
+ type: "assistant_input";
8
10
  /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
9
11
  customSessionId?: string;
10
12
  /**
@@ -13,6 +15,4 @@ export interface AssistantInput {
13
15
  * EVI uses this text to generate spoken audio using our proprietary expressive text-to-speech model. Our model adds appropriate emotional inflections and tones to the text based on the user’s expressions and the context of the conversation. The synthesized audio is streamed back to the user as an [Assistant Message](/reference/empathic-voice-interface-evi/chat/chat#receive.AssistantMessage.type).
14
16
  */
15
17
  text: string;
16
- /** The type of message sent through the socket; must be `assistant_input` for our server to correctly identify and process it as an Assistant Input message. */
17
- type: "assistant_input";
18
18
  }
@@ -6,20 +6,20 @@ import * as Hume from "../../../index";
6
6
  * When provided, the output is an assistant message.
7
7
  */
8
8
  export interface AssistantMessage {
9
+ /**
10
+ * The type of message sent through the socket; for an Assistant Message, this must be `assistant_message`.
11
+ *
12
+ * This message contains both a transcript of the assistant’s response and the expression measurement predictions of the assistant’s audio output.
13
+ */
14
+ type: "assistant_message";
9
15
  /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
10
16
  customSessionId?: string;
11
- /** Indicates if this message was inserted into the conversation as text from an [Assistant Input message](/reference/empathic-voice-interface-evi/chat/chat#send.AssistantInput.text). */
12
- fromText: boolean;
13
17
  /** ID of the assistant message. Allows the Assistant Message to be tracked and referenced. */
14
18
  id?: string;
15
19
  /** Transcript of the message. */
16
20
  message: Hume.empathicVoice.ChatMessage;
17
21
  /** Inference model results. */
18
22
  models: Hume.empathicVoice.Inference;
19
- /**
20
- * The type of message sent through the socket; for an Assistant Message, this must be `assistant_message`.
21
- *
22
- * This message contains both a transcript of the assistant’s response and the expression measurement predictions of the assistant’s audio output.
23
- */
24
- type: "assistant_message";
23
+ /** Indicates if this message was inserted into the conversation as text from an [Assistant Input message](/reference/empathic-voice-interface-evi/chat/chat#send.AssistantInput.text). */
24
+ fromText: boolean;
25
25
  }
@@ -6,16 +6,16 @@ import * as Hume from "../../../index";
6
6
  * When provided, the output is an Assistant Prosody message.
7
7
  */
8
8
  export interface AssistantProsody {
9
- /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
10
- customSessionId?: string;
11
- /** Unique identifier for the segment. */
12
- id?: string;
13
- /** Inference model results. */
14
- models: Hume.empathicVoice.Inference;
15
9
  /**
16
10
  * The type of message sent through the socket; for an Assistant Prosody message, this must be `assistant_PROSODY`.
17
11
  *
18
12
  * This message the expression measurement predictions of the assistant's audio output.
19
13
  */
20
14
  type: "assistant_prosody";
15
+ /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
16
+ customSessionId?: string;
17
+ /** Inference model results. */
18
+ models: Hume.empathicVoice.Inference;
19
+ /** Unique identifier for the segment. */
20
+ id?: string;
21
21
  }
@@ -3,10 +3,10 @@
3
3
  */
4
4
  import * as Hume from "../../../index";
5
5
  export interface AudioConfiguration {
6
- /** Number of audio channels. */
7
- channels: number;
8
6
  /** Encoding format of the audio input, such as `linear16`. */
9
7
  encoding: Hume.empathicVoice.Encoding;
8
+ /** Number of audio channels. */
9
+ channels: number;
10
10
  /** Audio sample rate. Number of samples per second in the audio input, measured in Hertz. */
11
11
  sampleRate: number;
12
12
  }
@@ -5,6 +5,12 @@
5
5
  * When provided, the input is audio.
6
6
  */
7
7
  export interface AudioInput {
8
+ /**
9
+ * The type of message sent through the socket; must be `audio_input` for our server to correctly identify and process it as an Audio Input message.
10
+ *
11
+ * This message is used for sending audio input data to EVI for processing and expression measurement. Audio data should be sent as a continuous stream, encoded in Base64.
12
+ */
13
+ type: "audio_input";
8
14
  /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
9
15
  customSessionId?: string;
10
16
  /**
@@ -17,10 +23,4 @@ export interface AudioInput {
17
23
  * Hume recommends streaming audio with a buffer window of 20 milliseconds (ms), or 100 milliseconds (ms) for web applications.
18
24
  */
19
25
  data: string;
20
- /**
21
- * The type of message sent through the socket; must be `audio_input` for our server to correctly identify and process it as an Audio Input message.
22
- *
23
- * This message is used for sending audio input data to EVI for processing and expression measurement. Audio data should be sent as a continuous stream, encoded in Base64.
24
- */
25
- type: "audio_input";
26
26
  }
@@ -5,14 +5,14 @@
5
5
  * The type of message sent through the socket; for an Audio Output message, this must be `audio_output`.
6
6
  */
7
7
  export interface AudioOutput {
8
+ /** The type of message sent through the socket; for an Audio Output message, this must be `audio_output`. */
9
+ type: "audio_output";
8
10
  /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
9
11
  customSessionId?: string;
10
- /** Base64 encoded audio output. This encoded audio is transmitted to the client, where it can be decoded and played back as part of the user interaction. */
11
- data: string;
12
12
  /** ID of the audio output. Allows the Audio Output message to be tracked and referenced. */
13
13
  id: string;
14
14
  /** Index of the chunk of audio relative to the whole audio segment. */
15
15
  index: number;
16
- /** The type of message sent through the socket; for an Audio Output message, this must be `audio_output`. */
17
- type: "audio_output";
16
+ /** Base64 encoded audio output. This encoded audio is transmitted to the client, where it can be decoded and played back as part of the user interaction. */
17
+ data: string;
18
18
  }
@@ -3,7 +3,7 @@
3
3
  */
4
4
  import * as Hume from "../../../index";
5
5
  export interface BuiltinToolConfig {
6
+ name: Hume.empathicVoice.BuiltInTool;
6
7
  /** Optional text passed to the supplemental LLM if the tool call fails. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation. */
7
8
  fallbackContent?: string;
8
- name: Hume.empathicVoice.BuiltInTool;
9
9
  }
@@ -3,10 +3,10 @@
3
3
  */
4
4
  import * as Hume from "../../../index";
5
5
  export interface ChatMessage {
6
- /** Transcript of the message. */
7
- content?: string;
8
6
  /** Role of who is providing the message. */
9
7
  role: Hume.empathicVoice.Role;
8
+ /** Transcript of the message. */
9
+ content?: string;
10
10
  /** Function call name and arguments. */
11
11
  toolCall?: Hume.empathicVoice.ToolCallMessage;
12
12
  /** Function call response from client. */
@@ -5,6 +5,14 @@
5
5
  * When provided, the output is a chat metadata message.
6
6
  */
7
7
  export interface ChatMetadata {
8
+ /**
9
+ * The type of message sent through the socket; for a Chat Metadata message, this must be `chat_metadata`.
10
+ *
11
+ * The Chat Metadata message is the first message you receive after establishing a connection with EVI and contains important identifiers for the current Chat session.
12
+ */
13
+ type: "chat_metadata";
14
+ /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
15
+ customSessionId?: string;
8
16
  /**
9
17
  * ID of the Chat Group.
10
18
  *
@@ -15,14 +23,6 @@ export interface ChatMetadata {
15
23
  chatGroupId: string;
16
24
  /** ID of the Chat session. Allows the Chat session to be tracked and referenced. */
17
25
  chatId: string;
18
- /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
19
- customSessionId?: string;
20
26
  /** ID of the initiating request. */
21
27
  requestId?: string;
22
- /**
23
- * The type of message sent through the socket; for a Chat Metadata message, this must be `chat_metadata`.
24
- *
25
- * The Chat Metadata message is the first message you receive after establishing a connection with EVI and contains important identifiers for the current Chat session.
26
- */
27
- type: "chat_metadata";
28
28
  }