hume 0.12.1 → 0.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (324) hide show
  1. package/.mock/definition/empathic-voice/__package__.yml +760 -711
  2. package/.mock/definition/empathic-voice/chat.yml +29 -23
  3. package/.mock/definition/empathic-voice/chatWebhooks.yml +3 -3
  4. package/.mock/definition/empathic-voice/configs.yml +10 -4
  5. package/.mock/definition/tts/__package__.yml +77 -125
  6. package/.mock/fern.config.json +1 -1
  7. package/api/resources/empathicVoice/resources/chatGroups/client/index.d.ts +1 -0
  8. package/api/resources/empathicVoice/resources/chats/client/index.d.ts +1 -0
  9. package/api/resources/empathicVoice/resources/configs/client/index.d.ts +1 -0
  10. package/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.d.ts +5 -1
  11. package/api/resources/empathicVoice/resources/prompts/client/index.d.ts +1 -0
  12. package/api/resources/empathicVoice/resources/tools/client/index.d.ts +1 -0
  13. package/api/resources/empathicVoice/types/AssistantEnd.d.ts +2 -2
  14. package/api/resources/empathicVoice/types/AssistantInput.d.ts +2 -2
  15. package/api/resources/empathicVoice/types/AssistantMessage.d.ts +8 -8
  16. package/api/resources/empathicVoice/types/AssistantProsody.d.ts +6 -6
  17. package/api/resources/empathicVoice/types/AudioConfiguration.d.ts +2 -2
  18. package/api/resources/empathicVoice/types/AudioInput.d.ts +6 -6
  19. package/api/resources/empathicVoice/types/AudioOutput.d.ts +4 -4
  20. package/api/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
  21. package/api/resources/empathicVoice/types/ChatMessage.d.ts +2 -2
  22. package/api/resources/empathicVoice/types/ChatMetadata.d.ts +8 -8
  23. package/api/resources/empathicVoice/types/Context.d.ts +8 -14
  24. package/api/resources/empathicVoice/types/ContextType.d.ts +2 -3
  25. package/api/resources/empathicVoice/types/ContextType.js +1 -2
  26. package/api/resources/empathicVoice/types/LanguageModelType.d.ts +20 -1
  27. package/api/resources/empathicVoice/types/LanguageModelType.js +19 -0
  28. package/api/resources/empathicVoice/types/ModelProviderEnum.d.ts +4 -1
  29. package/api/resources/empathicVoice/types/ModelProviderEnum.js +3 -0
  30. package/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +2 -2
  31. package/api/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +2 -2
  32. package/api/resources/empathicVoice/types/ReturnChatEvent.d.ts +12 -17
  33. package/api/resources/empathicVoice/types/ReturnChatEventRole.d.ts +0 -4
  34. package/api/resources/empathicVoice/types/ReturnChatEventType.d.ts +22 -18
  35. package/api/resources/empathicVoice/types/ReturnChatEventType.js +9 -4
  36. package/api/resources/empathicVoice/types/ReturnConfig.d.ts +18 -14
  37. package/api/resources/empathicVoice/types/ReturnPrompt.d.ts +2 -2
  38. package/api/resources/empathicVoice/types/SessionSettings.d.ts +29 -29
  39. package/api/resources/empathicVoice/types/Tool.d.ts +6 -6
  40. package/api/resources/empathicVoice/types/ToolCallMessage.d.ts +6 -6
  41. package/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +16 -16
  42. package/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +8 -8
  43. package/api/resources/empathicVoice/types/UserInput.d.ts +2 -2
  44. package/api/resources/empathicVoice/types/UserInterruption.d.ts +4 -4
  45. package/api/resources/empathicVoice/types/UserMessage.d.ts +14 -7
  46. package/api/resources/empathicVoice/types/VoiceProvider.d.ts +2 -1
  47. package/api/resources/empathicVoice/types/VoiceProvider.js +1 -0
  48. package/api/resources/empathicVoice/types/WebSocketError.d.ts +10 -10
  49. package/api/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +8 -8
  50. package/api/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +6 -6
  51. package/api/resources/empathicVoice/types/index.d.ts +16 -16
  52. package/api/resources/empathicVoice/types/index.js +16 -16
  53. package/api/resources/expressionMeasurement/resources/batch/client/Client.d.ts +2 -4
  54. package/api/resources/expressionMeasurement/resources/batch/client/Client.js +1 -1
  55. package/api/resources/expressionMeasurement/resources/batch/client/index.d.ts +1 -0
  56. package/api/resources/tts/client/Client.d.ts +21 -23
  57. package/api/resources/tts/client/Client.js +50 -58
  58. package/api/resources/tts/client/index.d.ts +1 -1
  59. package/api/resources/tts/client/index.js +0 -15
  60. package/api/resources/tts/resources/voices/client/index.d.ts +1 -0
  61. package/api/resources/tts/types/PostedTts.d.ts +8 -8
  62. package/api/resources/tts/types/PostedUtterance.d.ts +6 -6
  63. package/api/resources/tts/types/ReturnGeneration.d.ts +5 -5
  64. package/api/resources/tts/types/ReturnTts.d.ts +1 -1
  65. package/api/resources/tts/types/Snippet.d.ts +6 -6
  66. package/api/resources/tts/types/SnippetAudioChunk.d.ts +0 -16
  67. package/core/fetcher/BinaryResponse.d.ts +17 -0
  68. package/core/fetcher/BinaryResponse.js +14 -0
  69. package/core/fetcher/Fetcher.d.ts +1 -1
  70. package/core/fetcher/ResponseWithBody.d.ts +4 -0
  71. package/core/fetcher/ResponseWithBody.js +6 -0
  72. package/core/fetcher/getFetchFn.js +3 -3
  73. package/core/fetcher/getResponseBody.js +33 -32
  74. package/core/fetcher/index.d.ts +1 -0
  75. package/core/file.d.ts +1 -0
  76. package/core/form-data-utils/FormDataWrapper.d.ts +5 -52
  77. package/core/form-data-utils/FormDataWrapper.js +104 -124
  78. package/core/index.d.ts +1 -0
  79. package/core/index.js +1 -0
  80. package/dist/api/resources/empathicVoice/resources/chatGroups/client/index.d.ts +1 -0
  81. package/dist/api/resources/empathicVoice/resources/chats/client/index.d.ts +1 -0
  82. package/dist/api/resources/empathicVoice/resources/configs/client/index.d.ts +1 -0
  83. package/dist/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.d.ts +5 -1
  84. package/dist/api/resources/empathicVoice/resources/prompts/client/index.d.ts +1 -0
  85. package/dist/api/resources/empathicVoice/resources/tools/client/index.d.ts +1 -0
  86. package/dist/api/resources/empathicVoice/types/AssistantEnd.d.ts +2 -2
  87. package/dist/api/resources/empathicVoice/types/AssistantInput.d.ts +2 -2
  88. package/dist/api/resources/empathicVoice/types/AssistantMessage.d.ts +8 -8
  89. package/dist/api/resources/empathicVoice/types/AssistantProsody.d.ts +6 -6
  90. package/dist/api/resources/empathicVoice/types/AudioConfiguration.d.ts +2 -2
  91. package/dist/api/resources/empathicVoice/types/AudioInput.d.ts +6 -6
  92. package/dist/api/resources/empathicVoice/types/AudioOutput.d.ts +4 -4
  93. package/dist/api/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
  94. package/dist/api/resources/empathicVoice/types/ChatMessage.d.ts +2 -2
  95. package/dist/api/resources/empathicVoice/types/ChatMetadata.d.ts +8 -8
  96. package/dist/api/resources/empathicVoice/types/Context.d.ts +8 -14
  97. package/dist/api/resources/empathicVoice/types/ContextType.d.ts +2 -3
  98. package/dist/api/resources/empathicVoice/types/ContextType.js +1 -2
  99. package/dist/api/resources/empathicVoice/types/LanguageModelType.d.ts +20 -1
  100. package/dist/api/resources/empathicVoice/types/LanguageModelType.js +19 -0
  101. package/dist/api/resources/empathicVoice/types/ModelProviderEnum.d.ts +4 -1
  102. package/dist/api/resources/empathicVoice/types/ModelProviderEnum.js +3 -0
  103. package/dist/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +2 -2
  104. package/dist/api/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +2 -2
  105. package/dist/api/resources/empathicVoice/types/ReturnChatEvent.d.ts +12 -17
  106. package/dist/api/resources/empathicVoice/types/ReturnChatEventRole.d.ts +0 -4
  107. package/dist/api/resources/empathicVoice/types/ReturnChatEventType.d.ts +22 -18
  108. package/dist/api/resources/empathicVoice/types/ReturnChatEventType.js +9 -4
  109. package/dist/api/resources/empathicVoice/types/ReturnConfig.d.ts +18 -14
  110. package/dist/api/resources/empathicVoice/types/ReturnPrompt.d.ts +2 -2
  111. package/dist/api/resources/empathicVoice/types/SessionSettings.d.ts +29 -29
  112. package/dist/api/resources/empathicVoice/types/Tool.d.ts +6 -6
  113. package/dist/api/resources/empathicVoice/types/ToolCallMessage.d.ts +6 -6
  114. package/dist/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +16 -16
  115. package/dist/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +8 -8
  116. package/dist/api/resources/empathicVoice/types/UserInput.d.ts +2 -2
  117. package/dist/api/resources/empathicVoice/types/UserInterruption.d.ts +4 -4
  118. package/dist/api/resources/empathicVoice/types/UserMessage.d.ts +14 -7
  119. package/dist/api/resources/empathicVoice/types/VoiceProvider.d.ts +2 -1
  120. package/dist/api/resources/empathicVoice/types/VoiceProvider.js +1 -0
  121. package/dist/api/resources/empathicVoice/types/WebSocketError.d.ts +10 -10
  122. package/dist/api/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +8 -8
  123. package/dist/api/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +6 -6
  124. package/dist/api/resources/empathicVoice/types/index.d.ts +16 -16
  125. package/dist/api/resources/empathicVoice/types/index.js +16 -16
  126. package/dist/api/resources/expressionMeasurement/resources/batch/client/Client.d.ts +2 -4
  127. package/dist/api/resources/expressionMeasurement/resources/batch/client/Client.js +1 -1
  128. package/dist/api/resources/expressionMeasurement/resources/batch/client/index.d.ts +1 -0
  129. package/dist/api/resources/tts/client/Client.d.ts +21 -23
  130. package/dist/api/resources/tts/client/Client.js +50 -58
  131. package/dist/api/resources/tts/client/index.d.ts +1 -1
  132. package/dist/api/resources/tts/client/index.js +0 -15
  133. package/dist/api/resources/tts/resources/voices/client/index.d.ts +1 -0
  134. package/dist/api/resources/tts/types/PostedTts.d.ts +8 -8
  135. package/dist/api/resources/tts/types/PostedUtterance.d.ts +6 -6
  136. package/dist/api/resources/tts/types/ReturnGeneration.d.ts +5 -5
  137. package/dist/api/resources/tts/types/ReturnTts.d.ts +1 -1
  138. package/dist/api/resources/tts/types/Snippet.d.ts +6 -6
  139. package/dist/api/resources/tts/types/SnippetAudioChunk.d.ts +0 -16
  140. package/dist/core/fetcher/BinaryResponse.d.ts +17 -0
  141. package/dist/core/fetcher/BinaryResponse.js +14 -0
  142. package/dist/core/fetcher/Fetcher.d.ts +1 -1
  143. package/dist/core/fetcher/ResponseWithBody.d.ts +4 -0
  144. package/dist/core/fetcher/ResponseWithBody.js +6 -0
  145. package/dist/core/fetcher/getFetchFn.js +3 -3
  146. package/dist/core/fetcher/getResponseBody.js +33 -32
  147. package/dist/core/fetcher/index.d.ts +1 -0
  148. package/dist/core/file.d.ts +1 -0
  149. package/dist/core/form-data-utils/FormDataWrapper.d.ts +5 -52
  150. package/dist/core/form-data-utils/FormDataWrapper.js +104 -124
  151. package/dist/core/index.d.ts +1 -0
  152. package/dist/core/index.js +1 -0
  153. package/dist/serialization/resources/empathicVoice/types/AssistantEnd.d.ts +1 -1
  154. package/dist/serialization/resources/empathicVoice/types/AssistantEnd.js +1 -1
  155. package/dist/serialization/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
  156. package/dist/serialization/resources/empathicVoice/types/AssistantInput.js +1 -1
  157. package/dist/serialization/resources/empathicVoice/types/AssistantMessage.d.ts +2 -2
  158. package/dist/serialization/resources/empathicVoice/types/AssistantMessage.js +2 -2
  159. package/dist/serialization/resources/empathicVoice/types/AssistantProsody.d.ts +2 -2
  160. package/dist/serialization/resources/empathicVoice/types/AssistantProsody.js +2 -2
  161. package/dist/serialization/resources/empathicVoice/types/AudioConfiguration.d.ts +1 -1
  162. package/dist/serialization/resources/empathicVoice/types/AudioConfiguration.js +1 -1
  163. package/dist/serialization/resources/empathicVoice/types/AudioInput.d.ts +1 -1
  164. package/dist/serialization/resources/empathicVoice/types/AudioInput.js +1 -1
  165. package/dist/serialization/resources/empathicVoice/types/AudioOutput.d.ts +2 -2
  166. package/dist/serialization/resources/empathicVoice/types/AudioOutput.js +2 -2
  167. package/dist/serialization/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
  168. package/dist/serialization/resources/empathicVoice/types/BuiltinToolConfig.js +1 -1
  169. package/dist/serialization/resources/empathicVoice/types/ChatMessage.d.ts +1 -1
  170. package/dist/serialization/resources/empathicVoice/types/ChatMessage.js +1 -1
  171. package/dist/serialization/resources/empathicVoice/types/ChatMetadata.d.ts +2 -2
  172. package/dist/serialization/resources/empathicVoice/types/ChatMetadata.js +2 -2
  173. package/dist/serialization/resources/empathicVoice/types/Context.d.ts +1 -1
  174. package/dist/serialization/resources/empathicVoice/types/Context.js +1 -1
  175. package/dist/serialization/resources/empathicVoice/types/ContextType.d.ts +1 -1
  176. package/dist/serialization/resources/empathicVoice/types/ContextType.js +1 -1
  177. package/dist/serialization/resources/empathicVoice/types/LanguageModelType.d.ts +1 -1
  178. package/dist/serialization/resources/empathicVoice/types/LanguageModelType.js +19 -0
  179. package/dist/serialization/resources/empathicVoice/types/ModelProviderEnum.d.ts +1 -1
  180. package/dist/serialization/resources/empathicVoice/types/ModelProviderEnum.js +3 -0
  181. package/dist/serialization/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
  182. package/dist/serialization/resources/empathicVoice/types/PauseAssistantMessage.js +1 -1
  183. package/dist/serialization/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +1 -1
  184. package/dist/serialization/resources/empathicVoice/types/ResumeAssistantMessage.js +1 -1
  185. package/dist/serialization/resources/empathicVoice/types/ReturnChatEventType.d.ts +1 -1
  186. package/dist/serialization/resources/empathicVoice/types/ReturnChatEventType.js +9 -4
  187. package/dist/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +9 -9
  188. package/dist/serialization/resources/empathicVoice/types/ReturnConfig.js +9 -9
  189. package/dist/serialization/resources/empathicVoice/types/ReturnPrompt.d.ts +1 -1
  190. package/dist/serialization/resources/empathicVoice/types/ReturnPrompt.js +1 -1
  191. package/dist/serialization/resources/empathicVoice/types/SessionSettings.d.ts +8 -8
  192. package/dist/serialization/resources/empathicVoice/types/SessionSettings.js +8 -8
  193. package/dist/serialization/resources/empathicVoice/types/Tool.d.ts +3 -3
  194. package/dist/serialization/resources/empathicVoice/types/Tool.js +3 -3
  195. package/dist/serialization/resources/empathicVoice/types/ToolCallMessage.d.ts +3 -3
  196. package/dist/serialization/resources/empathicVoice/types/ToolCallMessage.js +3 -3
  197. package/dist/serialization/resources/empathicVoice/types/ToolErrorMessage.d.ts +6 -6
  198. package/dist/serialization/resources/empathicVoice/types/ToolErrorMessage.js +6 -6
  199. package/dist/serialization/resources/empathicVoice/types/ToolResponseMessage.d.ts +2 -2
  200. package/dist/serialization/resources/empathicVoice/types/ToolResponseMessage.js +2 -2
  201. package/dist/serialization/resources/empathicVoice/types/UserInput.d.ts +1 -1
  202. package/dist/serialization/resources/empathicVoice/types/UserInput.js +1 -1
  203. package/dist/serialization/resources/empathicVoice/types/UserInterruption.d.ts +1 -1
  204. package/dist/serialization/resources/empathicVoice/types/UserInterruption.js +1 -1
  205. package/dist/serialization/resources/empathicVoice/types/UserMessage.d.ts +3 -3
  206. package/dist/serialization/resources/empathicVoice/types/UserMessage.js +3 -3
  207. package/dist/serialization/resources/empathicVoice/types/VoiceProvider.d.ts +1 -1
  208. package/dist/serialization/resources/empathicVoice/types/VoiceProvider.js +1 -1
  209. package/dist/serialization/resources/empathicVoice/types/WebSocketError.d.ts +3 -3
  210. package/dist/serialization/resources/empathicVoice/types/WebSocketError.js +3 -3
  211. package/dist/serialization/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +4 -4
  212. package/dist/serialization/resources/empathicVoice/types/WebhookEventChatEnded.js +4 -4
  213. package/dist/serialization/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +3 -3
  214. package/dist/serialization/resources/empathicVoice/types/WebhookEventChatStarted.js +3 -3
  215. package/dist/serialization/resources/empathicVoice/types/index.d.ts +16 -16
  216. package/dist/serialization/resources/empathicVoice/types/index.js +16 -16
  217. package/dist/serialization/resources/tts/types/PostedTts.d.ts +3 -3
  218. package/dist/serialization/resources/tts/types/PostedTts.js +3 -3
  219. package/dist/serialization/resources/tts/types/PostedUtterance.d.ts +2 -2
  220. package/dist/serialization/resources/tts/types/PostedUtterance.js +2 -2
  221. package/dist/serialization/resources/tts/types/ReturnGeneration.d.ts +3 -3
  222. package/dist/serialization/resources/tts/types/ReturnGeneration.js +3 -3
  223. package/dist/serialization/resources/tts/types/ReturnTts.d.ts +1 -1
  224. package/dist/serialization/resources/tts/types/ReturnTts.js +1 -1
  225. package/dist/serialization/resources/tts/types/Snippet.d.ts +3 -3
  226. package/dist/serialization/resources/tts/types/Snippet.js +3 -3
  227. package/dist/serialization/resources/tts/types/SnippetAudioChunk.d.ts +0 -8
  228. package/dist/serialization/resources/tts/types/SnippetAudioChunk.js +1 -10
  229. package/dist/version.d.ts +1 -1
  230. package/dist/version.js +1 -1
  231. package/jest.browser.config.mjs +10 -0
  232. package/jest.config.mjs +1 -0
  233. package/package.json +6 -7
  234. package/reference.md +25 -27
  235. package/serialization/resources/empathicVoice/types/AssistantEnd.d.ts +1 -1
  236. package/serialization/resources/empathicVoice/types/AssistantEnd.js +1 -1
  237. package/serialization/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
  238. package/serialization/resources/empathicVoice/types/AssistantInput.js +1 -1
  239. package/serialization/resources/empathicVoice/types/AssistantMessage.d.ts +2 -2
  240. package/serialization/resources/empathicVoice/types/AssistantMessage.js +2 -2
  241. package/serialization/resources/empathicVoice/types/AssistantProsody.d.ts +2 -2
  242. package/serialization/resources/empathicVoice/types/AssistantProsody.js +2 -2
  243. package/serialization/resources/empathicVoice/types/AudioConfiguration.d.ts +1 -1
  244. package/serialization/resources/empathicVoice/types/AudioConfiguration.js +1 -1
  245. package/serialization/resources/empathicVoice/types/AudioInput.d.ts +1 -1
  246. package/serialization/resources/empathicVoice/types/AudioInput.js +1 -1
  247. package/serialization/resources/empathicVoice/types/AudioOutput.d.ts +2 -2
  248. package/serialization/resources/empathicVoice/types/AudioOutput.js +2 -2
  249. package/serialization/resources/empathicVoice/types/BuiltinToolConfig.d.ts +1 -1
  250. package/serialization/resources/empathicVoice/types/BuiltinToolConfig.js +1 -1
  251. package/serialization/resources/empathicVoice/types/ChatMessage.d.ts +1 -1
  252. package/serialization/resources/empathicVoice/types/ChatMessage.js +1 -1
  253. package/serialization/resources/empathicVoice/types/ChatMetadata.d.ts +2 -2
  254. package/serialization/resources/empathicVoice/types/ChatMetadata.js +2 -2
  255. package/serialization/resources/empathicVoice/types/Context.d.ts +1 -1
  256. package/serialization/resources/empathicVoice/types/Context.js +1 -1
  257. package/serialization/resources/empathicVoice/types/ContextType.d.ts +1 -1
  258. package/serialization/resources/empathicVoice/types/ContextType.js +1 -1
  259. package/serialization/resources/empathicVoice/types/LanguageModelType.d.ts +1 -1
  260. package/serialization/resources/empathicVoice/types/LanguageModelType.js +19 -0
  261. package/serialization/resources/empathicVoice/types/ModelProviderEnum.d.ts +1 -1
  262. package/serialization/resources/empathicVoice/types/ModelProviderEnum.js +3 -0
  263. package/serialization/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
  264. package/serialization/resources/empathicVoice/types/PauseAssistantMessage.js +1 -1
  265. package/serialization/resources/empathicVoice/types/ResumeAssistantMessage.d.ts +1 -1
  266. package/serialization/resources/empathicVoice/types/ResumeAssistantMessage.js +1 -1
  267. package/serialization/resources/empathicVoice/types/ReturnChatEventType.d.ts +1 -1
  268. package/serialization/resources/empathicVoice/types/ReturnChatEventType.js +9 -4
  269. package/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +9 -9
  270. package/serialization/resources/empathicVoice/types/ReturnConfig.js +9 -9
  271. package/serialization/resources/empathicVoice/types/ReturnPrompt.d.ts +1 -1
  272. package/serialization/resources/empathicVoice/types/ReturnPrompt.js +1 -1
  273. package/serialization/resources/empathicVoice/types/SessionSettings.d.ts +8 -8
  274. package/serialization/resources/empathicVoice/types/SessionSettings.js +8 -8
  275. package/serialization/resources/empathicVoice/types/Tool.d.ts +3 -3
  276. package/serialization/resources/empathicVoice/types/Tool.js +3 -3
  277. package/serialization/resources/empathicVoice/types/ToolCallMessage.d.ts +3 -3
  278. package/serialization/resources/empathicVoice/types/ToolCallMessage.js +3 -3
  279. package/serialization/resources/empathicVoice/types/ToolErrorMessage.d.ts +6 -6
  280. package/serialization/resources/empathicVoice/types/ToolErrorMessage.js +6 -6
  281. package/serialization/resources/empathicVoice/types/ToolResponseMessage.d.ts +2 -2
  282. package/serialization/resources/empathicVoice/types/ToolResponseMessage.js +2 -2
  283. package/serialization/resources/empathicVoice/types/UserInput.d.ts +1 -1
  284. package/serialization/resources/empathicVoice/types/UserInput.js +1 -1
  285. package/serialization/resources/empathicVoice/types/UserInterruption.d.ts +1 -1
  286. package/serialization/resources/empathicVoice/types/UserInterruption.js +1 -1
  287. package/serialization/resources/empathicVoice/types/UserMessage.d.ts +3 -3
  288. package/serialization/resources/empathicVoice/types/UserMessage.js +3 -3
  289. package/serialization/resources/empathicVoice/types/VoiceProvider.d.ts +1 -1
  290. package/serialization/resources/empathicVoice/types/VoiceProvider.js +1 -1
  291. package/serialization/resources/empathicVoice/types/WebSocketError.d.ts +3 -3
  292. package/serialization/resources/empathicVoice/types/WebSocketError.js +3 -3
  293. package/serialization/resources/empathicVoice/types/WebhookEventChatEnded.d.ts +4 -4
  294. package/serialization/resources/empathicVoice/types/WebhookEventChatEnded.js +4 -4
  295. package/serialization/resources/empathicVoice/types/WebhookEventChatStarted.d.ts +3 -3
  296. package/serialization/resources/empathicVoice/types/WebhookEventChatStarted.js +3 -3
  297. package/serialization/resources/empathicVoice/types/index.d.ts +16 -16
  298. package/serialization/resources/empathicVoice/types/index.js +16 -16
  299. package/serialization/resources/tts/types/PostedTts.d.ts +3 -3
  300. package/serialization/resources/tts/types/PostedTts.js +3 -3
  301. package/serialization/resources/tts/types/PostedUtterance.d.ts +2 -2
  302. package/serialization/resources/tts/types/PostedUtterance.js +2 -2
  303. package/serialization/resources/tts/types/ReturnGeneration.d.ts +3 -3
  304. package/serialization/resources/tts/types/ReturnGeneration.js +3 -3
  305. package/serialization/resources/tts/types/ReturnTts.d.ts +1 -1
  306. package/serialization/resources/tts/types/ReturnTts.js +1 -1
  307. package/serialization/resources/tts/types/Snippet.d.ts +3 -3
  308. package/serialization/resources/tts/types/Snippet.js +3 -3
  309. package/serialization/resources/tts/types/SnippetAudioChunk.d.ts +0 -8
  310. package/serialization/resources/tts/types/SnippetAudioChunk.js +1 -10
  311. package/version.d.ts +1 -1
  312. package/version.js +1 -1
  313. package/api/resources/tts/client/requests/SynthesizeJsonRequest.d.ts +0 -36
  314. package/api/resources/tts/client/requests/SynthesizeJsonRequest.js +0 -5
  315. package/api/resources/tts/client/requests/index.d.ts +0 -1
  316. package/core/form-data-utils/toReadableStream.d.ts +0 -1
  317. package/core/form-data-utils/toReadableStream.js +0 -50
  318. package/dist/api/resources/tts/client/requests/SynthesizeJsonRequest.d.ts +0 -36
  319. package/dist/api/resources/tts/client/requests/SynthesizeJsonRequest.js +0 -5
  320. package/dist/api/resources/tts/client/requests/index.d.ts +0 -1
  321. package/dist/core/form-data-utils/toReadableStream.d.ts +0 -1
  322. package/dist/core/form-data-utils/toReadableStream.js +0 -50
  323. /package/{api/resources/tts/client/requests/index.js → core/file.js} +0 -0
  324. /package/dist/{api/resources/tts/client/requests/index.js → core/file.js} +0 -0
@@ -7,6 +7,8 @@
7
7
  * Expression measurement results are not available for User Input messages, as the prosody model relies on audio input and cannot process text alone.
8
8
  */
9
9
  export interface UserInput {
10
+ /** The type of message sent through the socket; must be `user_input` for our server to correctly identify and process it as a User Input message. */
11
+ type: "user_input";
10
12
  /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
11
13
  customSessionId?: string;
12
14
  /**
@@ -15,6 +17,4 @@ export interface UserInput {
15
17
  * Expression measurement results are not available for User Input messages, as the prosody model relies on audio input and cannot process text alone.
16
18
  */
17
19
  text: string;
18
- /** The type of message sent through the socket; must be `user_input` for our server to correctly identify and process it as a User Input message. */
19
- type: "user_input";
20
20
  }
@@ -5,14 +5,14 @@
5
5
  * When provided, the output is an interruption.
6
6
  */
7
7
  export interface UserInterruption {
8
- /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
9
- customSessionId?: string;
10
- /** Unix timestamp of the detected user interruption. */
11
- time: number;
12
8
  /**
13
9
  * The type of message sent through the socket; for a User Interruption message, this must be `user_interruption`.
14
10
  *
15
11
  * This message indicates the user has interrupted the assistant’s response. EVI detects the interruption in real-time and sends this message to signal the interruption event. This message allows the system to stop the current audio playback, clear the audio queue, and prepare to handle new user input.
16
12
  */
17
13
  type: "user_interruption";
14
+ /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
15
+ customSessionId?: string;
16
+ /** Unix timestamp of the detected user interruption. */
17
+ time: number;
18
18
  }
@@ -6,22 +6,29 @@ import * as Hume from "../../../index";
6
6
  * When provided, the output is a user message.
7
7
  */
8
8
  export interface UserMessage {
9
+ /**
10
+ * The type of message sent through the socket; for a User Message, this must be `user_message`.
11
+ *
12
+ * This message contains both a transcript of the user’s input and the expression measurement predictions if the input was sent as an [Audio Input message](/reference/empathic-voice-interface-evi/chat/chat#send.AudioInput.type). Expression measurement predictions are not provided for a [User Input message](/reference/empathic-voice-interface-evi/chat/chat#send.UserInput.type), as the prosody model relies on audio input and cannot process text alone.
13
+ */
14
+ type: "user_message";
9
15
  /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
10
16
  customSessionId?: string;
11
- /** Indicates if this message was inserted into the conversation as text from a [User Input](/reference/empathic-voice-interface-evi/chat/chat#send.UserInput.text) message. */
12
- fromText: boolean;
13
- /** Indicates if this message contains an immediate and unfinalized transcript of the user’s audio input. If it does, words may be repeated across successive `UserMessage` messages as our transcription model becomes more confident about what was said with additional context. Interim messages are useful to detect if the user is interrupting during audio playback on the client. Even without a finalized transcription, along with [UserInterrupt](/reference/empathic-voice-interface-evi/chat/chat#receive.UserInterruption.type) messages, interim `UserMessages` are useful for detecting if the user is interrupting during audio playback on the client, signaling to stop playback in your application. Interim `UserMessages` will only be received if the [verbose_transcription](/reference/empathic-voice-interface-evi/chat/chat#request.query.verbose_transcription) query parameter is set to `true` in the handshake request. */
14
- interim: boolean;
15
17
  /** Transcript of the message. */
16
18
  message: Hume.empathicVoice.ChatMessage;
17
19
  /** Inference model results. */
18
20
  models: Hume.empathicVoice.Inference;
19
21
  /** Start and End time of user message. */
20
22
  time: Hume.empathicVoice.MillisecondInterval;
23
+ /** Indicates if this message was inserted into the conversation as text from a [User Input](/reference/empathic-voice-interface-evi/chat/chat#send.UserInput.text) message. */
24
+ fromText: boolean;
21
25
  /**
22
- * The type of message sent through the socket; for a User Message, this must be `user_message`.
26
+ * Indicates whether this `UserMessage` contains an interim (unfinalized) transcript.
23
27
  *
24
- * This message contains both a transcript of the user’s input and the expression measurement predictions if the input was sent as an [Audio Input message](/reference/empathic-voice-interface-evi/chat/chat#send.AudioInput.type). Expression measurement predictions are not provided for a [User Input message](/reference/empathic-voice-interface-evi/chat/chat#send.UserInput.type), as the prosody model relies on audio input and cannot process text alone.
28
+ * - `true`: the transcript is provisional; words may be repeated or refined in subsequent `UserMessage` responses as additional audio is processed.
29
+ * - `false`: the transcript is final and complete.
30
+ *
31
+ * Interim transcripts are only sent when the [`verbose_transcription`](/reference/empathic-voice-interface-evi/chat/chat#request.query.verbose_transcription) query parameter is set to `true` in the initial handshake.
25
32
  */
26
- type: "user_message";
33
+ interim: boolean;
27
34
  }
@@ -1,8 +1,9 @@
1
1
  /**
2
2
  * This file was auto-generated by Fern from our API Definition.
3
3
  */
4
- export type VoiceProvider = "HUME_AI" | "CUSTOM_VOICE";
4
+ export type VoiceProvider = "HUME_AI" | "CUSTOM_VOICE" | "OCTAVE_COMBINED";
5
5
  export declare const VoiceProvider: {
6
6
  readonly HumeAi: "HUME_AI";
7
7
  readonly CustomVoice: "CUSTOM_VOICE";
8
+ readonly OctaveCombined: "OCTAVE_COMBINED";
8
9
  };
@@ -7,4 +7,5 @@ exports.VoiceProvider = void 0;
7
7
  exports.VoiceProvider = {
8
8
  HumeAi: "HUME_AI",
9
9
  CustomVoice: "CUSTOM_VOICE",
10
+ OctaveCombined: "OCTAVE_COMBINED",
10
11
  };
@@ -5,20 +5,20 @@
5
5
  * When provided, the output is an error message.
6
6
  */
7
7
  export interface WebSocketError {
8
- /** Error code. Identifies the type of error encountered. */
9
- code: string;
10
- /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
11
- customSessionId?: string;
12
- /** Detailed description of the error. */
13
- message: string;
14
- /** ID of the initiating request. */
15
- requestId?: string;
16
- /** Short, human-readable identifier and description for the error. See a complete list of error slugs on the [Errors page](/docs/resources/errors). */
17
- slug: string;
18
8
  /**
19
9
  * The type of message sent through the socket; for a Web Socket Error message, this must be `error`.
20
10
  *
21
11
  * This message indicates a disruption in the WebSocket connection, such as an unexpected disconnection, protocol error, or data transmission issue.
22
12
  */
23
13
  type: "error";
14
+ /** Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. */
15
+ customSessionId?: string;
16
+ /** Error code. Identifies the type of error encountered. */
17
+ code: string;
18
+ /** Short, human-readable identifier and description for the error. See a complete list of error slugs on the [Errors page](/docs/resources/errors). */
19
+ slug: string;
20
+ /** Detailed description of the error. */
21
+ message: string;
22
+ /** ID of the initiating request. */
23
+ requestId?: string;
24
24
  }
@@ -3,16 +3,16 @@
3
3
  */
4
4
  import * as Hume from "../../../index";
5
5
  export interface WebhookEventChatEnded extends Hume.empathicVoice.WebhookEventBase {
6
- /** Phone number of the caller in E.164 format (e.g., `+12223333333`). This field is included only if the Chat was created via the [Twilio phone calling](/docs/empathic-voice-interface-evi/phone-calling) integration. */
7
- callerNumber?: string;
8
- /** User-defined session ID. Relevant only when employing a [custom language model](/docs/empathic-voice-interface-evi/custom-language-model) in the EVI Config. */
9
- customSessionId?: string;
6
+ /** Always `chat_ended`. */
7
+ eventName?: "chat_ended";
8
+ /** Unix timestamp (in milliseconds) indicating when the session ended. */
9
+ endTime: number;
10
10
  /** Total duration of the session in seconds. */
11
11
  durationSeconds: number;
12
12
  /** Reason for the session's termination. */
13
13
  endReason: Hume.empathicVoice.WebhookEventChatStatus;
14
- /** Unix timestamp (in milliseconds) indicating when the session ended. */
15
- endTime: number;
16
- /** Always `chat_ended`. */
17
- eventName?: "chat_ended";
14
+ /** Phone number of the caller in E.164 format (e.g., `+12223333333`). This field is included only if the Chat was created via the [Twilio phone calling](/docs/empathic-voice-interface-evi/phone-calling) integration. */
15
+ callerNumber?: string;
16
+ /** User-defined session ID. Relevant only when employing a [custom language model](/docs/empathic-voice-interface-evi/custom-language-model) in the EVI Config. */
17
+ customSessionId?: string;
18
18
  }
@@ -3,14 +3,14 @@
3
3
  */
4
4
  import * as Hume from "../../../index";
5
5
  export interface WebhookEventChatStarted extends Hume.empathicVoice.WebhookEventBase {
6
- /** Phone number of the caller in E.164 format (e.g., `+12223333333`). This field is included only if the Chat was created via the [Twilio phone calling](/docs/empathic-voice-interface-evi/phone-calling) integration. */
7
- callerNumber?: string;
8
- /** Indicates whether the chat is the first in a new Chat Group (`new_chat_group`) or the continuation of an existing chat group (`resumed_chat_group`). */
9
- chatStartType: Hume.empathicVoice.WebhookEventChatStartType;
10
- /** User-defined session ID. Relevant only when employing a [custom language model](/docs/empathic-voice-interface-evi/custom-language-model) in the EVI Config. */
11
- customSessionId?: string;
12
6
  /** Always `chat_started`. */
13
7
  eventName?: "chat_started";
14
8
  /** Unix timestamp (in milliseconds) indicating when the session started. */
15
9
  startTime: number;
10
+ /** Indicates whether the chat is the first in a new Chat Group (`new_chat_group`) or the continuation of an existing chat group (`resumed_chat_group`). */
11
+ chatStartType: Hume.empathicVoice.WebhookEventChatStartType;
12
+ /** Phone number of the caller in E.164 format (e.g., `+12223333333`). This field is included only if the Chat was created via the [Twilio phone calling](/docs/empathic-voice-interface-evi/phone-calling) integration. */
13
+ callerNumber?: string;
14
+ /** User-defined session ID. Relevant only when employing a [custom language model](/docs/empathic-voice-interface-evi/custom-language-model) in the EVI Config. */
15
+ customSessionId?: string;
16
16
  }
@@ -1,35 +1,35 @@
1
- export * from "./AssistantEnd";
2
1
  export * from "./AssistantInput";
3
- export * from "./AssistantMessage";
4
- export * from "./AssistantProsody";
5
2
  export * from "./AudioConfiguration";
6
3
  export * from "./AudioInput";
7
- export * from "./AudioOutput";
8
4
  export * from "./BuiltInTool";
9
5
  export * from "./BuiltinToolConfig";
10
- export * from "./ChatMessageToolResult";
11
- export * from "./ChatMessage";
12
- export * from "./ChatMetadata";
13
6
  export * from "./Context";
14
7
  export * from "./ContextType";
15
- export * from "./EmotionScores";
16
8
  export * from "./Encoding";
17
- export * from "./WebSocketError";
18
9
  export * from "./ErrorLevel";
19
- export * from "./Inference";
20
- export * from "./MillisecondInterval";
21
10
  export * from "./PauseAssistantMessage";
22
- export * from "./ProsodyInference";
23
11
  export * from "./ResumeAssistantMessage";
24
- export * from "./Role";
25
12
  export * from "./SessionSettingsVariablesValue";
26
13
  export * from "./SessionSettings";
27
14
  export * from "./Tool";
28
- export * from "./ToolCallMessage";
29
15
  export * from "./ToolErrorMessage";
30
16
  export * from "./ToolResponseMessage";
31
17
  export * from "./ToolType";
32
18
  export * from "./UserInput";
19
+ export * from "./AssistantEnd";
20
+ export * from "./AssistantMessage";
21
+ export * from "./AssistantProsody";
22
+ export * from "./AudioOutput";
23
+ export * from "./ChatMessageToolResult";
24
+ export * from "./ChatMessage";
25
+ export * from "./ChatMetadata";
26
+ export * from "./EmotionScores";
27
+ export * from "./WebSocketError";
28
+ export * from "./Inference";
29
+ export * from "./MillisecondInterval";
30
+ export * from "./ProsodyInference";
31
+ export * from "./Role";
32
+ export * from "./ToolCallMessage";
33
33
  export * from "./UserInterruption";
34
34
  export * from "./UserMessage";
35
35
  export * from "./JsonMessage";
@@ -38,12 +38,12 @@ export * from "./LanguageModelType";
38
38
  export * from "./ModelProviderEnum";
39
39
  export * from "./ValidationErrorLocItem";
40
40
  export * from "./ValidationError";
41
- export * from "./WebhookEventBase";
42
- export * from "./WebhookEvent";
43
41
  export * from "./WebhookEventChatEnded";
44
42
  export * from "./WebhookEventChatStartType";
45
43
  export * from "./WebhookEventChatStarted";
46
44
  export * from "./WebhookEventChatStatus";
45
+ export * from "./WebhookEvent";
46
+ export * from "./WebhookEventBase";
47
47
  export * from "./ErrorResponse";
48
48
  export * from "./ReturnPagedUserDefinedTools";
49
49
  export * from "./ReturnUserDefinedToolToolType";
@@ -14,38 +14,38 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
14
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
15
  };
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
- __exportStar(require("./AssistantEnd"), exports);
18
17
  __exportStar(require("./AssistantInput"), exports);
19
- __exportStar(require("./AssistantMessage"), exports);
20
- __exportStar(require("./AssistantProsody"), exports);
21
18
  __exportStar(require("./AudioConfiguration"), exports);
22
19
  __exportStar(require("./AudioInput"), exports);
23
- __exportStar(require("./AudioOutput"), exports);
24
20
  __exportStar(require("./BuiltInTool"), exports);
25
21
  __exportStar(require("./BuiltinToolConfig"), exports);
26
- __exportStar(require("./ChatMessageToolResult"), exports);
27
- __exportStar(require("./ChatMessage"), exports);
28
- __exportStar(require("./ChatMetadata"), exports);
29
22
  __exportStar(require("./Context"), exports);
30
23
  __exportStar(require("./ContextType"), exports);
31
- __exportStar(require("./EmotionScores"), exports);
32
24
  __exportStar(require("./Encoding"), exports);
33
- __exportStar(require("./WebSocketError"), exports);
34
25
  __exportStar(require("./ErrorLevel"), exports);
35
- __exportStar(require("./Inference"), exports);
36
- __exportStar(require("./MillisecondInterval"), exports);
37
26
  __exportStar(require("./PauseAssistantMessage"), exports);
38
- __exportStar(require("./ProsodyInference"), exports);
39
27
  __exportStar(require("./ResumeAssistantMessage"), exports);
40
- __exportStar(require("./Role"), exports);
41
28
  __exportStar(require("./SessionSettingsVariablesValue"), exports);
42
29
  __exportStar(require("./SessionSettings"), exports);
43
30
  __exportStar(require("./Tool"), exports);
44
- __exportStar(require("./ToolCallMessage"), exports);
45
31
  __exportStar(require("./ToolErrorMessage"), exports);
46
32
  __exportStar(require("./ToolResponseMessage"), exports);
47
33
  __exportStar(require("./ToolType"), exports);
48
34
  __exportStar(require("./UserInput"), exports);
35
+ __exportStar(require("./AssistantEnd"), exports);
36
+ __exportStar(require("./AssistantMessage"), exports);
37
+ __exportStar(require("./AssistantProsody"), exports);
38
+ __exportStar(require("./AudioOutput"), exports);
39
+ __exportStar(require("./ChatMessageToolResult"), exports);
40
+ __exportStar(require("./ChatMessage"), exports);
41
+ __exportStar(require("./ChatMetadata"), exports);
42
+ __exportStar(require("./EmotionScores"), exports);
43
+ __exportStar(require("./WebSocketError"), exports);
44
+ __exportStar(require("./Inference"), exports);
45
+ __exportStar(require("./MillisecondInterval"), exports);
46
+ __exportStar(require("./ProsodyInference"), exports);
47
+ __exportStar(require("./Role"), exports);
48
+ __exportStar(require("./ToolCallMessage"), exports);
49
49
  __exportStar(require("./UserInterruption"), exports);
50
50
  __exportStar(require("./UserMessage"), exports);
51
51
  __exportStar(require("./JsonMessage"), exports);
@@ -54,12 +54,12 @@ __exportStar(require("./LanguageModelType"), exports);
54
54
  __exportStar(require("./ModelProviderEnum"), exports);
55
55
  __exportStar(require("./ValidationErrorLocItem"), exports);
56
56
  __exportStar(require("./ValidationError"), exports);
57
- __exportStar(require("./WebhookEventBase"), exports);
58
- __exportStar(require("./WebhookEvent"), exports);
59
57
  __exportStar(require("./WebhookEventChatEnded"), exports);
60
58
  __exportStar(require("./WebhookEventChatStartType"), exports);
61
59
  __exportStar(require("./WebhookEventChatStarted"), exports);
62
60
  __exportStar(require("./WebhookEventChatStatus"), exports);
61
+ __exportStar(require("./WebhookEvent"), exports);
62
+ __exportStar(require("./WebhookEventBase"), exports);
63
63
  __exportStar(require("./ErrorResponse"), exports);
64
64
  __exportStar(require("./ReturnPagedUserDefinedTools"), exports);
65
65
  __exportStar(require("./ReturnUserDefinedToolToolType"), exports);
@@ -5,8 +5,6 @@ import * as environments from "../../../../../../environments";
5
5
  import * as core from "../../../../../../core";
6
6
  import * as Hume from "../../../../../index";
7
7
  import * as stream from "stream";
8
- import * as fs from "fs";
9
- import { Blob } from "buffer";
10
8
  export declare namespace Batch {
11
9
  interface Options {
12
10
  environment?: core.Supplier<environments.HumeEnvironment | string>;
@@ -86,14 +84,14 @@ export declare class Batch {
86
84
  /**
87
85
  * Start a new batch inference job.
88
86
  *
89
- * @param {File[] | fs.ReadStream[] | Blob[]} file
87
+ * @param {core.FileLike[]} file
90
88
  * @param {Hume.expressionMeasurement.batch.BatchStartInferenceJobFromLocalFileRequest} request
91
89
  * @param {Batch.RequestOptions} requestOptions - Request-specific configuration.
92
90
  *
93
91
  * @example
94
92
  * await client.expressionMeasurement.batch.startInferenceJobFromLocalFile([fs.createReadStream("/path/to/your/file")], {})
95
93
  */
96
- startInferenceJobFromLocalFile(file: File[] | fs.ReadStream[] | Blob[], request: Hume.expressionMeasurement.batch.BatchStartInferenceJobFromLocalFileRequest, requestOptions?: Batch.RequestOptions): core.HttpResponsePromise<Hume.expressionMeasurement.batch.JobId>;
94
+ startInferenceJobFromLocalFile(file: core.FileLike[], request: Hume.expressionMeasurement.batch.BatchStartInferenceJobFromLocalFileRequest, requestOptions?: Batch.RequestOptions): core.HttpResponsePromise<Hume.expressionMeasurement.batch.JobId>;
97
95
  private __startInferenceJobFromLocalFile;
98
96
  protected _getCustomAuthorizationHeaders(): Promise<{
99
97
  "X-Hume-Api-Key": string | undefined;
@@ -384,7 +384,7 @@ class Batch {
384
384
  /**
385
385
  * Start a new batch inference job.
386
386
  *
387
- * @param {File[] | fs.ReadStream[] | Blob[]} file
387
+ * @param {core.FileLike[]} file
388
388
  * @param {Hume.expressionMeasurement.batch.BatchStartInferenceJobFromLocalFileRequest} request
389
389
  * @param {Batch.RequestOptions} requestOptions - Request-specific configuration.
390
390
  *
@@ -1 +1,2 @@
1
+ export {};
1
2
  export * from "./requests";
@@ -37,32 +37,30 @@ export declare class Tts {
37
37
  *
38
38
  * The response includes the base64-encoded audio and metadata in JSON format.
39
39
  *
40
- * @param {Hume.tts.SynthesizeJsonRequest} request
40
+ * @param {Hume.tts.PostedTts} request
41
41
  * @param {Tts.RequestOptions} requestOptions - Request-specific configuration.
42
42
  *
43
43
  * @throws {@link Hume.tts.UnprocessableEntityError}
44
44
  *
45
45
  * @example
46
46
  * await client.tts.synthesizeJson({
47
- * body: {
47
+ * utterances: [{
48
+ * text: "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.",
49
+ * description: "Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality."
50
+ * }],
51
+ * context: {
48
52
  * utterances: [{
49
- * text: "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.",
50
- * description: "Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality."
51
- * }],
52
- * context: {
53
- * utterances: [{
54
- * text: "How can people see beauty so differently?",
55
- * description: "A curious student with a clear and respectful tone, seeking clarification on Hume's ideas with a straightforward question."
56
- * }]
57
- * },
58
- * format: {
59
- * type: "mp3"
60
- * },
61
- * numGenerations: 1
62
- * }
53
+ * text: "How can people see beauty so differently?",
54
+ * description: "A curious student with a clear and respectful tone, seeking clarification on Hume's ideas with a straightforward question."
55
+ * }]
56
+ * },
57
+ * format: {
58
+ * type: "mp3"
59
+ * },
60
+ * numGenerations: 1
63
61
  * })
64
62
  */
65
- synthesizeJson(request: Hume.tts.SynthesizeJsonRequest, requestOptions?: Tts.RequestOptions): core.HttpResponsePromise<Hume.tts.ReturnTts>;
63
+ synthesizeJson(request: Hume.tts.PostedTts, requestOptions?: Tts.RequestOptions): core.HttpResponsePromise<Hume.tts.ReturnTts>;
66
64
  private __synthesizeJson;
67
65
  /**
68
66
  * Synthesizes one or more input texts into speech using the specified voice. If no voice is provided, a novel voice will be generated dynamically. Optionally, additional context can be included to influence the speech's style and prosody.
@@ -72,12 +70,6 @@ export declare class Tts {
72
70
  */
73
71
  synthesizeFile(request: Hume.tts.PostedTts, requestOptions?: Tts.RequestOptions): core.HttpResponsePromise<stream.Readable>;
74
72
  private __synthesizeFile;
75
- /**
76
- * Streams synthesized speech using the specified voice. If no voice is provided, a novel voice will be generated dynamically. Optionally, additional context can be included to influence the speech's style and prosody.
77
- * @throws {@link Hume.tts.UnprocessableEntityError}
78
- */
79
- synthesizeFileStreaming(request: Hume.tts.PostedTts, requestOptions?: Tts.RequestOptions): core.HttpResponsePromise<stream.Readable>;
80
- private __synthesizeFileStreaming;
81
73
  /**
82
74
  * Streams synthesized speech using the specified voice. If no voice is provided, a novel voice will be generated dynamically. Optionally, additional context can be included to influence the speech's style and prosody.
83
75
  *
@@ -85,6 +77,12 @@ export declare class Tts {
85
77
  */
86
78
  synthesizeJsonStreaming(request: Hume.tts.PostedTts, requestOptions?: Tts.RequestOptions): core.HttpResponsePromise<core.Stream<Hume.tts.SnippetAudioChunk>>;
87
79
  private __synthesizeJsonStreaming;
80
+ /**
81
+ * Streams synthesized speech using the specified voice. If no voice is provided, a novel voice will be generated dynamically. Optionally, additional context can be included to influence the speech's style and prosody.
82
+ * @throws {@link Hume.tts.UnprocessableEntityError}
83
+ */
84
+ synthesizeFileStreaming(request: Hume.tts.PostedTts, requestOptions?: Tts.RequestOptions): core.HttpResponsePromise<stream.Readable>;
85
+ private __synthesizeFileStreaming;
88
86
  protected _getCustomAuthorizationHeaders(): Promise<{
89
87
  "X-Hume-Api-Key": string | undefined;
90
88
  }>;
@@ -70,29 +70,27 @@ class Tts {
70
70
  *
71
71
  * The response includes the base64-encoded audio and metadata in JSON format.
72
72
  *
73
- * @param {Hume.tts.SynthesizeJsonRequest} request
73
+ * @param {Hume.tts.PostedTts} request
74
74
  * @param {Tts.RequestOptions} requestOptions - Request-specific configuration.
75
75
  *
76
76
  * @throws {@link Hume.tts.UnprocessableEntityError}
77
77
  *
78
78
  * @example
79
79
  * await client.tts.synthesizeJson({
80
- * body: {
80
+ * utterances: [{
81
+ * text: "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.",
82
+ * description: "Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality."
83
+ * }],
84
+ * context: {
81
85
  * utterances: [{
82
- * text: "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.",
83
- * description: "Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality."
84
- * }],
85
- * context: {
86
- * utterances: [{
87
- * text: "How can people see beauty so differently?",
88
- * description: "A curious student with a clear and respectful tone, seeking clarification on Hume's ideas with a straightforward question."
89
- * }]
90
- * },
91
- * format: {
92
- * type: "mp3"
93
- * },
94
- * numGenerations: 1
95
- * }
86
+ * text: "How can people see beauty so differently?",
87
+ * description: "A curious student with a clear and respectful tone, seeking clarification on Hume's ideas with a straightforward question."
88
+ * }]
89
+ * },
90
+ * format: {
91
+ * type: "mp3"
92
+ * },
93
+ * numGenerations: 1
96
94
  * })
97
95
  */
98
96
  synthesizeJson(request, requestOptions) {
@@ -101,19 +99,13 @@ class Tts {
101
99
  __synthesizeJson(request, requestOptions) {
102
100
  return __awaiter(this, void 0, void 0, function* () {
103
101
  var _a, _b, _c, _d;
104
- const { accessToken, body: _body } = request;
105
- const _queryParams = {};
106
- if (accessToken != null) {
107
- _queryParams["access_token"] = accessToken;
108
- }
109
102
  const _response = yield ((_a = this._options.fetcher) !== null && _a !== void 0 ? _a : core.fetcher)({
110
103
  url: (0, url_join_1.default)((_c = (_b = (yield core.Supplier.get(this._options.baseUrl))) !== null && _b !== void 0 ? _b : (yield core.Supplier.get(this._options.environment))) !== null && _c !== void 0 ? _c : environments.HumeEnvironment.Production, "v0/tts"),
111
104
  method: "POST",
112
105
  headers: (0, headers_js_1.mergeHeaders)((_d = this._options) === null || _d === void 0 ? void 0 : _d.headers, (0, headers_js_1.mergeOnlyDefinedHeaders)(Object.assign({}, (yield this._getCustomAuthorizationHeaders()))), requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.headers),
113
106
  contentType: "application/json",
114
- queryParameters: _queryParams,
115
107
  requestType: "json",
116
- body: serializers.tts.PostedTts.jsonOrThrow(_body, { unrecognizedObjectKeys: "strip" }),
108
+ body: serializers.tts.PostedTts.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }),
117
109
  timeoutMs: (requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.timeoutInSeconds) != null ? requestOptions.timeoutInSeconds * 1000 : 60000,
118
110
  maxRetries: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.maxRetries,
119
111
  abortSignal: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.abortSignal,
@@ -226,28 +218,47 @@ class Tts {
226
218
  }
227
219
  /**
228
220
  * Streams synthesized speech using the specified voice. If no voice is provided, a novel voice will be generated dynamically. Optionally, additional context can be included to influence the speech's style and prosody.
229
- * @throws {@link Hume.tts.UnprocessableEntityError}
221
+ *
222
+ * The response is a stream of JSON objects including audio encoded in base64.
230
223
  */
231
- synthesizeFileStreaming(request, requestOptions) {
232
- return core.HttpResponsePromise.fromPromise(this.__synthesizeFileStreaming(request, requestOptions));
224
+ synthesizeJsonStreaming(request, requestOptions) {
225
+ return core.HttpResponsePromise.fromPromise(this.__synthesizeJsonStreaming(request, requestOptions));
233
226
  }
234
- __synthesizeFileStreaming(request, requestOptions) {
227
+ __synthesizeJsonStreaming(request, requestOptions) {
235
228
  return __awaiter(this, void 0, void 0, function* () {
236
229
  var _a, _b, _c, _d;
237
230
  const _response = yield ((_a = this._options.fetcher) !== null && _a !== void 0 ? _a : core.fetcher)({
238
- url: (0, url_join_1.default)((_c = (_b = (yield core.Supplier.get(this._options.baseUrl))) !== null && _b !== void 0 ? _b : (yield core.Supplier.get(this._options.environment))) !== null && _c !== void 0 ? _c : environments.HumeEnvironment.Production, "v0/tts/stream/file"),
231
+ url: (0, url_join_1.default)((_c = (_b = (yield core.Supplier.get(this._options.baseUrl))) !== null && _b !== void 0 ? _b : (yield core.Supplier.get(this._options.environment))) !== null && _c !== void 0 ? _c : environments.HumeEnvironment.Production, "v0/tts/stream/json"),
239
232
  method: "POST",
240
233
  headers: (0, headers_js_1.mergeHeaders)((_d = this._options) === null || _d === void 0 ? void 0 : _d.headers, (0, headers_js_1.mergeOnlyDefinedHeaders)(Object.assign({}, (yield this._getCustomAuthorizationHeaders()))), requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.headers),
241
234
  contentType: "application/json",
242
235
  requestType: "json",
243
236
  body: serializers.tts.PostedTts.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }),
244
- responseType: "streaming",
237
+ responseType: "sse",
245
238
  timeoutMs: (requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.timeoutInSeconds) != null ? requestOptions.timeoutInSeconds * 1000 : 60000,
246
239
  maxRetries: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.maxRetries,
247
240
  abortSignal: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.abortSignal,
248
241
  });
249
242
  if (_response.ok) {
250
- return { data: _response.body, rawResponse: _response.rawResponse };
243
+ return {
244
+ data: new core.Stream({
245
+ stream: _response.body,
246
+ parse: (data) => __awaiter(this, void 0, void 0, function* () {
247
+ return serializers.tts.SnippetAudioChunk.parseOrThrow(data, {
248
+ unrecognizedObjectKeys: "passthrough",
249
+ allowUnrecognizedUnionMembers: true,
250
+ allowUnrecognizedEnumValues: true,
251
+ breadcrumbsPrefix: ["response"],
252
+ });
253
+ }),
254
+ signal: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.abortSignal,
255
+ eventShape: {
256
+ type: "json",
257
+ messageTerminator: "\n",
258
+ },
259
+ }),
260
+ rawResponse: _response.rawResponse,
261
+ };
251
262
  }
252
263
  if (_response.error.reason === "status-code") {
253
264
  switch (_response.error.statusCode) {
@@ -274,7 +285,7 @@ class Tts {
274
285
  rawResponse: _response.rawResponse,
275
286
  });
276
287
  case "timeout":
277
- throw new errors.HumeTimeoutError("Timeout exceeded when calling POST /v0/tts/stream/file.");
288
+ throw new errors.HumeTimeoutError("Timeout exceeded when calling POST /v0/tts/stream/json.");
278
289
  case "unknown":
279
290
  throw new errors.HumeError({
280
291
  message: _response.error.errorMessage,
@@ -285,47 +296,28 @@ class Tts {
285
296
  }
286
297
  /**
287
298
  * Streams synthesized speech using the specified voice. If no voice is provided, a novel voice will be generated dynamically. Optionally, additional context can be included to influence the speech's style and prosody.
288
- *
289
- * The response is a stream of JSON objects including audio encoded in base64.
299
+ * @throws {@link Hume.tts.UnprocessableEntityError}
290
300
  */
291
- synthesizeJsonStreaming(request, requestOptions) {
292
- return core.HttpResponsePromise.fromPromise(this.__synthesizeJsonStreaming(request, requestOptions));
301
+ synthesizeFileStreaming(request, requestOptions) {
302
+ return core.HttpResponsePromise.fromPromise(this.__synthesizeFileStreaming(request, requestOptions));
293
303
  }
294
- __synthesizeJsonStreaming(request, requestOptions) {
304
+ __synthesizeFileStreaming(request, requestOptions) {
295
305
  return __awaiter(this, void 0, void 0, function* () {
296
306
  var _a, _b, _c, _d;
297
307
  const _response = yield ((_a = this._options.fetcher) !== null && _a !== void 0 ? _a : core.fetcher)({
298
- url: (0, url_join_1.default)((_c = (_b = (yield core.Supplier.get(this._options.baseUrl))) !== null && _b !== void 0 ? _b : (yield core.Supplier.get(this._options.environment))) !== null && _c !== void 0 ? _c : environments.HumeEnvironment.Production, "v0/tts/stream/json"),
308
+ url: (0, url_join_1.default)((_c = (_b = (yield core.Supplier.get(this._options.baseUrl))) !== null && _b !== void 0 ? _b : (yield core.Supplier.get(this._options.environment))) !== null && _c !== void 0 ? _c : environments.HumeEnvironment.Production, "v0/tts/stream/file"),
299
309
  method: "POST",
300
310
  headers: (0, headers_js_1.mergeHeaders)((_d = this._options) === null || _d === void 0 ? void 0 : _d.headers, (0, headers_js_1.mergeOnlyDefinedHeaders)(Object.assign({}, (yield this._getCustomAuthorizationHeaders()))), requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.headers),
301
311
  contentType: "application/json",
302
312
  requestType: "json",
303
313
  body: serializers.tts.PostedTts.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }),
304
- responseType: "sse",
314
+ responseType: "streaming",
305
315
  timeoutMs: (requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.timeoutInSeconds) != null ? requestOptions.timeoutInSeconds * 1000 : 60000,
306
316
  maxRetries: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.maxRetries,
307
317
  abortSignal: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.abortSignal,
308
318
  });
309
319
  if (_response.ok) {
310
- return {
311
- data: new core.Stream({
312
- stream: _response.body,
313
- parse: (data) => __awaiter(this, void 0, void 0, function* () {
314
- return serializers.tts.SnippetAudioChunk.parseOrThrow(data, {
315
- unrecognizedObjectKeys: "passthrough",
316
- allowUnrecognizedUnionMembers: true,
317
- allowUnrecognizedEnumValues: true,
318
- breadcrumbsPrefix: ["response"],
319
- });
320
- }),
321
- signal: requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.abortSignal,
322
- eventShape: {
323
- type: "json",
324
- messageTerminator: "\n",
325
- },
326
- }),
327
- rawResponse: _response.rawResponse,
328
- };
320
+ return { data: _response.body, rawResponse: _response.rawResponse };
329
321
  }
330
322
  if (_response.error.reason === "status-code") {
331
323
  switch (_response.error.statusCode) {
@@ -352,7 +344,7 @@ class Tts {
352
344
  rawResponse: _response.rawResponse,
353
345
  });
354
346
  case "timeout":
355
- throw new errors.HumeTimeoutError("Timeout exceeded when calling POST /v0/tts/stream/json.");
347
+ throw new errors.HumeTimeoutError("Timeout exceeded when calling POST /v0/tts/stream/file.");
356
348
  case "unknown":
357
349
  throw new errors.HumeError({
358
350
  message: _response.error.errorMessage,
@@ -1 +1 @@
1
- export * from "./requests";
1
+ export {};