hume 0.13.2 → 0.13.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/.mock/definition/empathic-voice/__package__.yml +25 -27
  2. package/.mock/definition/empathic-voice/chat.yml +10 -10
  3. package/.mock/definition/empathic-voice/configs.yml +1 -11
  4. package/.mock/definition/tts/__package__.yml +169 -120
  5. package/.mock/definition/tts/streamInput.yml +56 -0
  6. package/.mock/fern.config.json +1 -1
  7. package/api/resources/empathicVoice/resources/chat/client/Client.js +3 -0
  8. package/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.d.ts +1 -5
  9. package/api/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
  10. package/api/resources/empathicVoice/types/AssistantMessage.d.ts +1 -1
  11. package/api/resources/empathicVoice/types/ChatMetadata.d.ts +2 -2
  12. package/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
  13. package/api/resources/empathicVoice/types/SessionSettings.d.ts +7 -7
  14. package/api/resources/empathicVoice/types/ToolCallMessage.d.ts +1 -1
  15. package/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +2 -2
  16. package/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +3 -3
  17. package/api/resources/empathicVoice/types/UserMessage.d.ts +3 -3
  18. package/api/resources/tts/types/PublishTts.d.ts +23 -0
  19. package/api/resources/tts/types/PublishTts.js +5 -0
  20. package/api/resources/tts/types/SnippetAudioChunk.d.ts +1 -1
  21. package/api/resources/tts/types/index.d.ts +7 -6
  22. package/api/resources/tts/types/index.js +7 -6
  23. package/dist/api/resources/empathicVoice/resources/chat/client/Client.js +3 -0
  24. package/dist/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.d.ts +1 -5
  25. package/dist/api/resources/empathicVoice/types/AssistantInput.d.ts +1 -1
  26. package/dist/api/resources/empathicVoice/types/AssistantMessage.d.ts +1 -1
  27. package/dist/api/resources/empathicVoice/types/ChatMetadata.d.ts +2 -2
  28. package/dist/api/resources/empathicVoice/types/PauseAssistantMessage.d.ts +1 -1
  29. package/dist/api/resources/empathicVoice/types/SessionSettings.d.ts +7 -7
  30. package/dist/api/resources/empathicVoice/types/ToolCallMessage.d.ts +1 -1
  31. package/dist/api/resources/empathicVoice/types/ToolErrorMessage.d.ts +2 -2
  32. package/dist/api/resources/empathicVoice/types/ToolResponseMessage.d.ts +3 -3
  33. package/dist/api/resources/empathicVoice/types/UserMessage.d.ts +3 -3
  34. package/dist/api/resources/tts/types/PublishTts.d.ts +23 -0
  35. package/dist/api/resources/tts/types/PublishTts.js +5 -0
  36. package/dist/api/resources/tts/types/SnippetAudioChunk.d.ts +1 -1
  37. package/dist/api/resources/tts/types/index.d.ts +7 -6
  38. package/dist/api/resources/tts/types/index.js +7 -6
  39. package/dist/serialization/resources/tts/types/PublishTts.d.ts +19 -0
  40. package/dist/serialization/resources/tts/types/PublishTts.js +50 -0
  41. package/dist/serialization/resources/tts/types/SnippetAudioChunk.d.ts +1 -1
  42. package/dist/serialization/resources/tts/types/SnippetAudioChunk.js +1 -1
  43. package/dist/serialization/resources/tts/types/index.d.ts +7 -6
  44. package/dist/serialization/resources/tts/types/index.js +7 -6
  45. package/dist/version.d.ts +1 -1
  46. package/dist/version.js +1 -1
  47. package/dist/wrapper/EVIWebAudioPlayer.d.ts +6 -7
  48. package/dist/wrapper/EVIWebAudioPlayer.js +237 -73
  49. package/dist/wrapper/convertFrequencyScale.d.ts +1 -0
  50. package/dist/wrapper/convertFrequencyScale.js +28 -0
  51. package/dist/wrapper/generateEmptyFft.d.ts +1 -0
  52. package/dist/wrapper/generateEmptyFft.js +6 -0
  53. package/package.json +2 -1
  54. package/serialization/resources/tts/types/PublishTts.d.ts +19 -0
  55. package/serialization/resources/tts/types/PublishTts.js +50 -0
  56. package/serialization/resources/tts/types/SnippetAudioChunk.d.ts +1 -1
  57. package/serialization/resources/tts/types/SnippetAudioChunk.js +1 -1
  58. package/serialization/resources/tts/types/index.d.ts +7 -6
  59. package/serialization/resources/tts/types/index.js +7 -6
  60. package/version.d.ts +1 -1
  61. package/version.js +1 -1
  62. package/wrapper/EVIWebAudioPlayer.d.ts +6 -7
  63. package/wrapper/EVIWebAudioPlayer.js +237 -73
  64. package/wrapper/convertFrequencyScale.d.ts +1 -0
  65. package/wrapper/convertFrequencyScale.js +28 -0
  66. package/wrapper/generateEmptyFft.d.ts +1 -0
  67. package/wrapper/generateEmptyFft.js +6 -0
@@ -19,10 +19,12 @@ var __classPrivateFieldSet = (this && this.__classPrivateFieldSet) || function (
19
19
  if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot write private member to an object whose class did not declare it");
20
20
  return (kind === "a" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;
21
21
  };
22
- var _EVIWebAudioPlayer_instances, _a, _EVIWebAudioPlayer_DEFAULT_WORKLET_URL, _EVIWebAudioPlayer_DEFAULT_FFT_SIZE, _EVIWebAudioPlayer_DEFAULT_FFT_INTERVAL, _EVIWebAudioPlayer_BARK_CENTER_FREQUENCIES, _EVIWebAudioPlayer_BYTE_MAX, _EVIWebAudioPlayer_ctx, _EVIWebAudioPlayer_workletNode, _EVIWebAudioPlayer_analyserNode, _EVIWebAudioPlayer_gainNode, _EVIWebAudioPlayer_initialized, _EVIWebAudioPlayer_playing, _EVIWebAudioPlayer_muted, _EVIWebAudioPlayer_volume, _EVIWebAudioPlayer_fft, _EVIWebAudioPlayer_fftTimer, _EVIWebAudioPlayer_fftOptions, _EVIWebAudioPlayer_linearHzToBark, _EVIWebAudioPlayer_startAnalyserPollingIfEnabled, _EVIWebAudioPlayer_emitError;
22
+ var _EVIWebAudioPlayer_instances, _a, _EVIWebAudioPlayer_DEFAULT_WORKLET_URL, _EVIWebAudioPlayer_DEFAULT_FFT_SIZE, _EVIWebAudioPlayer_DEFAULT_FFT_INTERVAL, _EVIWebAudioPlayer_ctx, _EVIWebAudioPlayer_workletNode, _EVIWebAudioPlayer_analyserNode, _EVIWebAudioPlayer_gainNode, _EVIWebAudioPlayer_initialized, _EVIWebAudioPlayer_playing, _EVIWebAudioPlayer_muted, _EVIWebAudioPlayer_volume, _EVIWebAudioPlayer_disableAudioWorklet, _EVIWebAudioPlayer_fft, _EVIWebAudioPlayer_fftTimer, _EVIWebAudioPlayer_fftOptions, _EVIWebAudioPlayer_clipQueue, _EVIWebAudioPlayer_currentlyPlayingAudioBuffer, _EVIWebAudioPlayer_isProcessing, _EVIWebAudioPlayer_lastQueuedChunk, _EVIWebAudioPlayer_chunkBufferQueues, _EVIWebAudioPlayer_startAnalyserPollingIfEnabled, _EVIWebAudioPlayer_emitError, _EVIWebAudioPlayer_convertToAudioBuffer, _EVIWebAudioPlayer_getNextAudioBuffers, _EVIWebAudioPlayer_playNextClip;
23
23
  Object.defineProperty(exports, "__esModule", { value: true });
24
24
  exports.EVIWebAudioPlayer = void 0;
25
25
  const convertBase64ToBlob_1 = require("./convertBase64ToBlob");
26
+ const convertFrequencyScale_1 = require("./convertFrequencyScale");
27
+ const generateEmptyFft_1 = require("./generateEmptyFft");
26
28
  /**
27
29
  * A sequential, glitch-free Web-Audio player for **EVI** audio output.
28
30
  *
@@ -51,7 +53,7 @@ class EVIWebAudioPlayer extends EventTarget {
51
53
  return __classPrivateFieldGet(this, _EVIWebAudioPlayer_fft, "f");
52
54
  }
53
55
  constructor(opts = {}) {
54
- var _b, _c;
56
+ var _b, _c, _d;
55
57
  super();
56
58
  _EVIWebAudioPlayer_instances.add(this);
57
59
  this.opts = opts;
@@ -63,29 +65,30 @@ class EVIWebAudioPlayer extends EventTarget {
63
65
  _EVIWebAudioPlayer_playing.set(this, false);
64
66
  _EVIWebAudioPlayer_muted.set(this, false);
65
67
  _EVIWebAudioPlayer_volume.set(this, void 0);
66
- _EVIWebAudioPlayer_fft.set(this, _a.emptyFft());
68
+ _EVIWebAudioPlayer_disableAudioWorklet.set(this, void 0);
69
+ _EVIWebAudioPlayer_fft.set(this, (0, generateEmptyFft_1.generateEmptyFft)());
67
70
  _EVIWebAudioPlayer_fftTimer.set(this, null);
68
71
  _EVIWebAudioPlayer_fftOptions.set(this, null);
72
+ _EVIWebAudioPlayer_clipQueue.set(this, []);
73
+ _EVIWebAudioPlayer_currentlyPlayingAudioBuffer.set(this, null);
74
+ _EVIWebAudioPlayer_isProcessing.set(this, false);
75
+ // chunkBufferQueues and #lastQueuedChunk are used to make sure that
76
+ // we don't play chunks out of order. #chunkBufferQueues is NOT the
77
+ // audio playback queue.
78
+ _EVIWebAudioPlayer_lastQueuedChunk.set(this, null);
79
+ _EVIWebAudioPlayer_chunkBufferQueues.set(this, {});
69
80
  __classPrivateFieldSet(this, _EVIWebAudioPlayer_volume, (_b = opts.volume) !== null && _b !== void 0 ? _b : 1.0, "f");
81
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_disableAudioWorklet, (_c = opts.disableAudioWorklet) !== null && _c !== void 0 ? _c : false, "f");
70
82
  // Resolve FFT options if enabled
71
- if ((_c = opts.fft) === null || _c === void 0 ? void 0 : _c.enabled) {
83
+ if ((_d = opts.fft) === null || _d === void 0 ? void 0 : _d.enabled) {
72
84
  const { size, interval, transform } = opts.fft;
73
85
  __classPrivateFieldSet(this, _EVIWebAudioPlayer_fftOptions, {
74
86
  size: size !== null && size !== void 0 ? size : __classPrivateFieldGet(_a, _a, "f", _EVIWebAudioPlayer_DEFAULT_FFT_SIZE),
75
87
  interval: interval !== null && interval !== void 0 ? interval : __classPrivateFieldGet(_a, _a, "f", _EVIWebAudioPlayer_DEFAULT_FFT_INTERVAL),
76
- transform: transform !== null && transform !== void 0 ? transform : __classPrivateFieldGet(_a, _a, "m", _EVIWebAudioPlayer_linearHzToBark),
88
+ transform: transform !== null && transform !== void 0 ? transform : ((bins, sampleRate) => (0, convertFrequencyScale_1.convertLinearFrequenciesToBark)(bins, sampleRate)),
77
89
  }, "f");
78
90
  }
79
91
  }
80
- /**
81
- * Generate an empty FFT frame array.
82
- * Useful as an initial or placeholder FFT dataset before any real analysis.
83
- *
84
- * @returns A number[] filled with zeros, length equal to the Bark band count (24).
85
- */
86
- static emptyFft() {
87
- return Array(__classPrivateFieldGet(_a, _a, "f", _EVIWebAudioPlayer_BARK_CENTER_FREQUENCIES).length).fill(0);
88
- }
89
92
  /**
90
93
  * * Subscribes to a player event and returns `this` for chaining.
91
94
  *
@@ -128,9 +131,8 @@ class EVIWebAudioPlayer extends EventTarget {
128
131
  __classPrivateFieldSet(this, _EVIWebAudioPlayer_ctx, new AudioContext(), "f");
129
132
  // Fail fast if AudioWorklet isn’t supported
130
133
  if (!__classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").audioWorklet) {
131
- const msg = "AudioWorklet is not supported in this browser";
132
- __classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_emitError).call(this, msg);
133
- throw new Error(msg);
134
+ console.warn("AudioWorklet is not supported in this browser. Falling back to Regular Buffer Mode.");
135
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_disableAudioWorklet, true, "f");
134
136
  }
135
137
  try {
136
138
  // Build GainNode
@@ -141,26 +143,42 @@ class EVIWebAudioPlayer extends EventTarget {
141
143
  __classPrivateFieldSet(this, _EVIWebAudioPlayer_analyserNode, __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").createAnalyser(), "f");
142
144
  __classPrivateFieldGet(this, _EVIWebAudioPlayer_analyserNode, "f").fftSize = __classPrivateFieldGet(this, _EVIWebAudioPlayer_fftOptions, "f").size;
143
145
  }
144
- // Loads the AudioWorklet processor module.
145
- yield __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").audioWorklet.addModule(__classPrivateFieldGet(_a, _a, "f", _EVIWebAudioPlayer_DEFAULT_WORKLET_URL));
146
- // Build AudioWorkletNode
147
- __classPrivateFieldSet(this, _EVIWebAudioPlayer_workletNode, new AudioWorkletNode(__classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f"), "audio-processor"), "f");
148
- // When the worklet posts { type: "ended" }, mark playback stopped and emit a `'stop'` event.
149
- __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f").port.onmessage = (e) => {
150
- if (e.data.type === "ended") {
151
- __classPrivateFieldSet(this, _EVIWebAudioPlayer_playing, false, "f");
152
- this.dispatchEvent(new CustomEvent("stop", { detail: { id: "stream" } }));
153
- }
154
- };
155
- // Audio graph nodes
156
- const workletNode = __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f"); // AudioWorkletNode (PCM processor)
157
- const analyserNode = __classPrivateFieldGet(this, _EVIWebAudioPlayer_analyserNode, "f"); // Optional AnalyserNode (FFT)
158
- const gainNode = __classPrivateFieldGet(this, _EVIWebAudioPlayer_gainNode, "f"); // GainNode (volume control)
159
- const destination = __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").destination; // AudioDestinationNode (speakers)
160
- // Analyser node is filtered out of audio graph if null (FFT disabled)
161
- const audioGraph = [workletNode, analyserNode, gainNode, destination].filter(Boolean);
162
- // Wire nodes: AudioWorkletNode (AnalyserNode?) GainNode AudioDestinationNode
163
- audioGraph.reduce((prev, next) => (prev.connect(next), next));
146
+ else {
147
+ // Always create AnalyserNode, even if FFT is disabled, to avoid null checks in Buffer Mode
148
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_analyserNode, __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").createAnalyser(), "f");
149
+ __classPrivateFieldGet(this, _EVIWebAudioPlayer_analyserNode, "f").fftSize = __classPrivateFieldGet(_a, _a, "f", _EVIWebAudioPlayer_DEFAULT_FFT_SIZE);
150
+ }
151
+ if (!__classPrivateFieldGet(this, _EVIWebAudioPlayer_disableAudioWorklet, "f")) {
152
+ // Loads the AudioWorklet processor module.
153
+ yield __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").audioWorklet.addModule(__classPrivateFieldGet(_a, _a, "f", _EVIWebAudioPlayer_DEFAULT_WORKLET_URL));
154
+ // Build AudioWorkletNode
155
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_workletNode, new AudioWorkletNode(__classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f"), "audio-processor"), "f");
156
+ // When the worklet posts { type: "ended" }, mark playback stopped and emit a `'stop'` event.
157
+ __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f").port.onmessage = (e) => {
158
+ if (e.data.type === "ended") {
159
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_playing, false, "f");
160
+ this.dispatchEvent(new CustomEvent("stop", { detail: { id: "stream" } }));
161
+ }
162
+ };
163
+ // Audio graph nodes
164
+ const workletNode = __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f"); // AudioWorkletNode (PCM processor)
165
+ const analyserNode = __classPrivateFieldGet(this, _EVIWebAudioPlayer_analyserNode, "f"); // Optional AnalyserNode (FFT)
166
+ const gainNode = __classPrivateFieldGet(this, _EVIWebAudioPlayer_gainNode, "f"); // GainNode (volume control)
167
+ const destination = __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").destination; // AudioDestinationNode (speakers)
168
+ // Analyser node is filtered out of audio graph if null (FFT disabled)
169
+ const audioGraph = [workletNode, analyserNode, gainNode, destination].filter(Boolean);
170
+ // Wire nodes: AudioWorkletNode → (AnalyserNode?) → GainNode → AudioDestinationNode
171
+ audioGraph.reduce((prev, next) => (prev.connect(next), next));
172
+ }
173
+ else {
174
+ // Regular Buffer Mode
175
+ const analyserNode = __classPrivateFieldGet(this, _EVIWebAudioPlayer_analyserNode, "f");
176
+ const gainNode = __classPrivateFieldGet(this, _EVIWebAudioPlayer_gainNode, "f");
177
+ const destination = __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").destination;
178
+ // Wire nodes: (AnalyserNode?) → GainNode → AudioDestinationNode
179
+ const audioGraph = [analyserNode, gainNode, destination].filter(Boolean);
180
+ audioGraph.reduce((prev, next) => (prev.connect(next), next));
181
+ }
164
182
  // If an analyser is configured, begin polling it at the resolved interval and dispatching `'fft'` events for each frame.
165
183
  __classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_startAnalyserPollingIfEnabled).call(this);
166
184
  // Resume the AudioContext now that the audio graph is fully wired.
@@ -191,19 +209,49 @@ class EVIWebAudioPlayer extends EventTarget {
191
209
  __classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_emitError).call(this, "Audio player is not initialized");
192
210
  return;
193
211
  }
194
- try {
195
- const { data, id } = message;
196
- const blob = (0, convertBase64ToBlob_1.convertBase64ToBlob)(data);
197
- const buffer = yield blob.arrayBuffer();
198
- const audio = yield __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").decodeAudioData(buffer);
199
- const pcmData = audio.getChannelData(0);
200
- __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f").port.postMessage({ type: "audio", data: pcmData });
201
- __classPrivateFieldSet(this, _EVIWebAudioPlayer_playing, true, "f");
202
- this.dispatchEvent(new CustomEvent("play", { detail: { id } }));
212
+ if (!__classPrivateFieldGet(this, _EVIWebAudioPlayer_disableAudioWorklet, "f")) {
213
+ try {
214
+ const { data, id } = message;
215
+ const blob = (0, convertBase64ToBlob_1.convertBase64ToBlob)(data);
216
+ const buffer = yield blob.arrayBuffer();
217
+ const audio = yield __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").decodeAudioData(buffer);
218
+ const pcmData = audio.getChannelData(0);
219
+ __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f").port.postMessage({ type: "audio", data: pcmData });
220
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_playing, true, "f");
221
+ this.dispatchEvent(new CustomEvent("play", { detail: { id } }));
222
+ }
223
+ catch (err) {
224
+ const msg = err instanceof Error ? err.message : "Unknown error";
225
+ __classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_emitError).call(this, `Failed to queue clip: ${msg}`);
226
+ }
203
227
  }
204
- catch (err) {
205
- const msg = err instanceof Error ? err.message : "Unknown error";
206
- __classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_emitError).call(this, `Failed to queue clip: ${msg}`);
228
+ else {
229
+ // Regular Buffer Mode
230
+ try {
231
+ const audioBuffer = yield __classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_convertToAudioBuffer).call(this, message);
232
+ if (!audioBuffer) {
233
+ __classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_emitError).call(this, "Failed to convert data to audio buffer");
234
+ return;
235
+ }
236
+ const playableBuffers = __classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_getNextAudioBuffers).call(this, message, audioBuffer);
237
+ if (playableBuffers.length === 0) {
238
+ return;
239
+ }
240
+ for (const nextAudioBufferToPlay of playableBuffers) {
241
+ __classPrivateFieldGet(this, _EVIWebAudioPlayer_clipQueue, "f").push({
242
+ id: nextAudioBufferToPlay.id,
243
+ buffer: nextAudioBufferToPlay.buffer,
244
+ index: nextAudioBufferToPlay.index,
245
+ });
246
+ if (__classPrivateFieldGet(this, _EVIWebAudioPlayer_clipQueue, "f").length === 1) {
247
+ __classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_playNextClip).call(this);
248
+ }
249
+ }
250
+ }
251
+ catch (e) {
252
+ const eMessage = e instanceof Error ? e.message : "Unknown error";
253
+ __classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_emitError).call(this, `Failed to add clip to queue: ${eMessage}`);
254
+ }
207
255
  }
208
256
  });
209
257
  }
@@ -212,8 +260,20 @@ class EVIWebAudioPlayer extends EventTarget {
212
260
  */
213
261
  stop() {
214
262
  var _b;
215
- // Clear buffered audio from the worklet queue
216
- (_b = __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f")) === null || _b === void 0 ? void 0 : _b.port.postMessage({ type: "fadeAndClear" });
263
+ if (!__classPrivateFieldGet(this, _EVIWebAudioPlayer_disableAudioWorklet, "f")) {
264
+ // Clear buffered audio from the worklet queue
265
+ (_b = __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f")) === null || _b === void 0 ? void 0 : _b.port.postMessage({ type: "fadeAndClear" });
266
+ }
267
+ else {
268
+ // Regular Buffer mode
269
+ if (__classPrivateFieldGet(this, _EVIWebAudioPlayer_currentlyPlayingAudioBuffer, "f")) {
270
+ __classPrivateFieldGet(this, _EVIWebAudioPlayer_currentlyPlayingAudioBuffer, "f").stop();
271
+ __classPrivateFieldGet(this, _EVIWebAudioPlayer_currentlyPlayingAudioBuffer, "f").disconnect();
272
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_currentlyPlayingAudioBuffer, null, "f");
273
+ }
274
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_clipQueue, [], "f");
275
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_isProcessing, false, "f");
276
+ }
217
277
  // Restart analyser polling so fft events continue after stopping or clearing the queue
218
278
  __classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_startAnalyserPollingIfEnabled).call(this);
219
279
  __classPrivateFieldSet(this, _EVIWebAudioPlayer_playing, false, "f");
@@ -262,29 +322,32 @@ class EVIWebAudioPlayer extends EventTarget {
262
322
  clearInterval(__classPrivateFieldGet(this, _EVIWebAudioPlayer_fftTimer, "f"));
263
323
  __classPrivateFieldSet(this, _EVIWebAudioPlayer_fftTimer, null, "f");
264
324
  }
265
- (_b = __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f")) === null || _b === void 0 ? void 0 : _b.port.postMessage({ type: "fadeAndClear" });
266
- (_c = __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f")) === null || _c === void 0 ? void 0 : _c.port.postMessage({ type: "end" });
267
- (_d = __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f")) === null || _d === void 0 ? void 0 : _d.port.close();
268
- (_e = __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f")) === null || _e === void 0 ? void 0 : _e.disconnect();
325
+ if (!__classPrivateFieldGet(this, _EVIWebAudioPlayer_disableAudioWorklet, "f")) {
326
+ (_b = __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f")) === null || _b === void 0 ? void 0 : _b.port.postMessage({ type: "fadeAndClear" });
327
+ (_c = __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f")) === null || _c === void 0 ? void 0 : _c.port.postMessage({ type: "end" });
328
+ (_d = __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f")) === null || _d === void 0 ? void 0 : _d.port.close();
329
+ (_e = __classPrivateFieldGet(this, _EVIWebAudioPlayer_workletNode, "f")) === null || _e === void 0 ? void 0 : _e.disconnect();
330
+ }
331
+ else {
332
+ // Regular Buffer mode
333
+ if (__classPrivateFieldGet(this, _EVIWebAudioPlayer_currentlyPlayingAudioBuffer, "f")) {
334
+ __classPrivateFieldGet(this, _EVIWebAudioPlayer_currentlyPlayingAudioBuffer, "f").stop();
335
+ __classPrivateFieldGet(this, _EVIWebAudioPlayer_currentlyPlayingAudioBuffer, "f").disconnect();
336
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_currentlyPlayingAudioBuffer, null, "f");
337
+ }
338
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_clipQueue, [], "f");
339
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_isProcessing, false, "f");
340
+ }
269
341
  (_f = __classPrivateFieldGet(this, _EVIWebAudioPlayer_analyserNode, "f")) === null || _f === void 0 ? void 0 : _f.disconnect();
270
342
  (_g = __classPrivateFieldGet(this, _EVIWebAudioPlayer_gainNode, "f")) === null || _g === void 0 ? void 0 : _g.disconnect();
271
343
  (_h = __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f")) === null || _h === void 0 ? void 0 : _h.close().catch(() => void 0);
272
344
  __classPrivateFieldSet(this, _EVIWebAudioPlayer_initialized, false, "f");
273
345
  __classPrivateFieldSet(this, _EVIWebAudioPlayer_playing, false, "f");
274
- __classPrivateFieldSet(this, _EVIWebAudioPlayer_fft, _a.emptyFft(), "f");
346
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_fft, (0, generateEmptyFft_1.generateEmptyFft)(), "f");
275
347
  }
276
348
  }
277
349
  exports.EVIWebAudioPlayer = EVIWebAudioPlayer;
278
- _a = EVIWebAudioPlayer, _EVIWebAudioPlayer_ctx = new WeakMap(), _EVIWebAudioPlayer_workletNode = new WeakMap(), _EVIWebAudioPlayer_analyserNode = new WeakMap(), _EVIWebAudioPlayer_gainNode = new WeakMap(), _EVIWebAudioPlayer_initialized = new WeakMap(), _EVIWebAudioPlayer_playing = new WeakMap(), _EVIWebAudioPlayer_muted = new WeakMap(), _EVIWebAudioPlayer_volume = new WeakMap(), _EVIWebAudioPlayer_fft = new WeakMap(), _EVIWebAudioPlayer_fftTimer = new WeakMap(), _EVIWebAudioPlayer_fftOptions = new WeakMap(), _EVIWebAudioPlayer_instances = new WeakSet(), _EVIWebAudioPlayer_linearHzToBark = function _EVIWebAudioPlayer_linearHzToBark(linearData, sampleRate) {
279
- const maxFrequency = sampleRate / 2;
280
- const frequencyResolution = maxFrequency / linearData.length;
281
- return __classPrivateFieldGet(_a, _a, "f", _EVIWebAudioPlayer_BARK_CENTER_FREQUENCIES).map((barkFreq) => {
282
- var _b;
283
- const linearDataIndex = Math.round(barkFreq / frequencyResolution);
284
- const magnitude = (_b = linearData[linearDataIndex]) !== null && _b !== void 0 ? _b : 0;
285
- return (magnitude / __classPrivateFieldGet(_a, _a, "f", _EVIWebAudioPlayer_BYTE_MAX)) * 2;
286
- });
287
- }, _EVIWebAudioPlayer_startAnalyserPollingIfEnabled = function _EVIWebAudioPlayer_startAnalyserPollingIfEnabled() {
350
+ _a = EVIWebAudioPlayer, _EVIWebAudioPlayer_ctx = new WeakMap(), _EVIWebAudioPlayer_workletNode = new WeakMap(), _EVIWebAudioPlayer_analyserNode = new WeakMap(), _EVIWebAudioPlayer_gainNode = new WeakMap(), _EVIWebAudioPlayer_initialized = new WeakMap(), _EVIWebAudioPlayer_playing = new WeakMap(), _EVIWebAudioPlayer_muted = new WeakMap(), _EVIWebAudioPlayer_volume = new WeakMap(), _EVIWebAudioPlayer_disableAudioWorklet = new WeakMap(), _EVIWebAudioPlayer_fft = new WeakMap(), _EVIWebAudioPlayer_fftTimer = new WeakMap(), _EVIWebAudioPlayer_fftOptions = new WeakMap(), _EVIWebAudioPlayer_clipQueue = new WeakMap(), _EVIWebAudioPlayer_currentlyPlayingAudioBuffer = new WeakMap(), _EVIWebAudioPlayer_isProcessing = new WeakMap(), _EVIWebAudioPlayer_lastQueuedChunk = new WeakMap(), _EVIWebAudioPlayer_chunkBufferQueues = new WeakMap(), _EVIWebAudioPlayer_instances = new WeakSet(), _EVIWebAudioPlayer_startAnalyserPollingIfEnabled = function _EVIWebAudioPlayer_startAnalyserPollingIfEnabled() {
288
351
  if (!__classPrivateFieldGet(this, _EVIWebAudioPlayer_fftOptions, "f") || !__classPrivateFieldGet(this, _EVIWebAudioPlayer_analyserNode, "f"))
289
352
  return;
290
353
  if (__classPrivateFieldGet(this, _EVIWebAudioPlayer_fftTimer, "f"))
@@ -298,6 +361,114 @@ _a = EVIWebAudioPlayer, _EVIWebAudioPlayer_ctx = new WeakMap(), _EVIWebAudioPlay
298
361
  }, interval), "f");
299
362
  }, _EVIWebAudioPlayer_emitError = function _EVIWebAudioPlayer_emitError(message) {
300
363
  this.dispatchEvent(new CustomEvent("error", { detail: { message } }));
364
+ }, _EVIWebAudioPlayer_convertToAudioBuffer = function _EVIWebAudioPlayer_convertToAudioBuffer(message) {
365
+ return __awaiter(this, void 0, void 0, function* () {
366
+ if (!__classPrivateFieldGet(this, _EVIWebAudioPlayer_initialized, "f") || !__classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f")) {
367
+ __classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_emitError).call(this, "Audio player has not been initialized");
368
+ return;
369
+ }
370
+ const blob = (0, convertBase64ToBlob_1.convertBase64ToBlob)(message.data);
371
+ const arrayBuffer = yield blob.arrayBuffer();
372
+ const audioBuffer = yield __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").decodeAudioData(arrayBuffer);
373
+ return audioBuffer;
374
+ });
375
+ }, _EVIWebAudioPlayer_getNextAudioBuffers = function _EVIWebAudioPlayer_getNextAudioBuffers(message, audioBuffer) {
376
+ var _b, _c;
377
+ // Prevent prototype pollution by restricting dangerous property names.
378
+ if (message.id === "__proto__" || message.id === "constructor" || message.id === "prototype") {
379
+ __classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_emitError).call(this, `Attempted to use a dangerous property name as message ID: ${message.id}`);
380
+ return [];
381
+ }
382
+ //1. Add the current buffer to the queue
383
+ if (!__classPrivateFieldGet(this, _EVIWebAudioPlayer_chunkBufferQueues, "f")[message.id]) {
384
+ __classPrivateFieldGet(this, _EVIWebAudioPlayer_chunkBufferQueues, "f")[message.id] = [];
385
+ }
386
+ // Ensure message.index is a safe, non-negative integer to prevent prototype pollution.
387
+ if (!Number.isInteger(message.index) || message.index < 0) {
388
+ __classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_emitError).call(this, `Attempted to use an invalid index: ${message.index}`);
389
+ return [];
390
+ }
391
+ const queueForCurrMessage = __classPrivateFieldGet(this, _EVIWebAudioPlayer_chunkBufferQueues, "f")[message.id] || [];
392
+ queueForCurrMessage[message.index] = audioBuffer;
393
+ // 2. Now collect buffers that are ready to be played
394
+ const lastId = (_b = __classPrivateFieldGet(this, _EVIWebAudioPlayer_lastQueuedChunk, "f")) === null || _b === void 0 ? void 0 : _b.id;
395
+ const buffers = [];
396
+ // If the current message ID is different from the last one that was added
397
+ // to the queue, that means that we're playing a new message now, so the first chunk
398
+ // we play needs to be at index 0.
399
+ if (message.id !== lastId) {
400
+ if (queueForCurrMessage[0]) {
401
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_lastQueuedChunk, { id: message.id, index: 0 }, "f");
402
+ buffers.push({
403
+ id: message.id,
404
+ index: 0,
405
+ buffer: queueForCurrMessage[0],
406
+ });
407
+ queueForCurrMessage[0] = undefined;
408
+ }
409
+ else {
410
+ return [];
411
+ }
412
+ }
413
+ // Drain the queue - basically if any chunks were received out of order previously,
414
+ // and they're now ready to be played because the earlier chunks
415
+ // have been received, we can add them to the buffers array.
416
+ let nextIdx = (((_c = __classPrivateFieldGet(this, _EVIWebAudioPlayer_lastQueuedChunk, "f")) === null || _c === void 0 ? void 0 : _c.index) || 0) + 1;
417
+ let nextBuf = queueForCurrMessage[nextIdx];
418
+ while (nextBuf) {
419
+ buffers.push({ index: nextIdx, buffer: nextBuf, id: message.id });
420
+ queueForCurrMessage[nextIdx] = undefined;
421
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_lastQueuedChunk, { id: message.id, index: nextIdx }, "f");
422
+ nextIdx += 1;
423
+ nextBuf = queueForCurrMessage[nextIdx];
424
+ }
425
+ return buffers;
426
+ }, _EVIWebAudioPlayer_playNextClip = function _EVIWebAudioPlayer_playNextClip() {
427
+ var _b, _c;
428
+ if (__classPrivateFieldGet(this, _EVIWebAudioPlayer_clipQueue, "f").length === 0 || __classPrivateFieldGet(this, _EVIWebAudioPlayer_isProcessing, "f")) {
429
+ return;
430
+ }
431
+ if (__classPrivateFieldGet(this, _EVIWebAudioPlayer_analyserNode, "f") === null || __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f") === null) {
432
+ __classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_emitError).call(this, "Audio player is not initialized");
433
+ return;
434
+ }
435
+ const nextClip = __classPrivateFieldGet(this, _EVIWebAudioPlayer_clipQueue, "f").shift();
436
+ if (!nextClip) {
437
+ return;
438
+ }
439
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_isProcessing, true, "f");
440
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_playing, true, "f");
441
+ const bufferSource = __classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").createBufferSource();
442
+ bufferSource.buffer = nextClip.buffer;
443
+ if (__classPrivateFieldGet(this, _EVIWebAudioPlayer_analyserNode, "f")) {
444
+ bufferSource.connect(__classPrivateFieldGet(this, _EVIWebAudioPlayer_analyserNode, "f"));
445
+ }
446
+ if (__classPrivateFieldGet(this, _EVIWebAudioPlayer_gainNode, "f")) {
447
+ (_b = __classPrivateFieldGet(this, _EVIWebAudioPlayer_analyserNode, "f")) === null || _b === void 0 ? void 0 : _b.connect(__classPrivateFieldGet(this, _EVIWebAudioPlayer_gainNode, "f"));
448
+ __classPrivateFieldGet(this, _EVIWebAudioPlayer_gainNode, "f").connect(__classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").destination);
449
+ }
450
+ else {
451
+ (_c = __classPrivateFieldGet(this, _EVIWebAudioPlayer_analyserNode, "f")) === null || _c === void 0 ? void 0 : _c.connect(__classPrivateFieldGet(this, _EVIWebAudioPlayer_ctx, "f").destination);
452
+ }
453
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_currentlyPlayingAudioBuffer, bufferSource, "f");
454
+ __classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_startAnalyserPollingIfEnabled).call(this);
455
+ bufferSource.start(0);
456
+ if (nextClip.index === 0) {
457
+ this.dispatchEvent(new CustomEvent("play", { detail: { id: nextClip.id } }));
458
+ }
459
+ bufferSource.onended = () => {
460
+ if (__classPrivateFieldGet(this, _EVIWebAudioPlayer_fftTimer, "f")) {
461
+ clearInterval(__classPrivateFieldGet(this, _EVIWebAudioPlayer_fftTimer, "f"));
462
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_fftTimer, null, "f");
463
+ }
464
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_fft, (0, generateEmptyFft_1.generateEmptyFft)(), "f");
465
+ bufferSource.disconnect();
466
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_isProcessing, false, "f");
467
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_playing, false, "f");
468
+ this.dispatchEvent(new CustomEvent("stop", { detail: { id: nextClip.id } }));
469
+ __classPrivateFieldSet(this, _EVIWebAudioPlayer_currentlyPlayingAudioBuffer, null, "f");
470
+ __classPrivateFieldGet(this, _EVIWebAudioPlayer_instances, "m", _EVIWebAudioPlayer_playNextClip).call(this);
471
+ };
301
472
  };
302
473
  /** Default URL of the `audio-worklet.js` processor module, fetched from Hume AI’s CDN. */
303
474
  _EVIWebAudioPlayer_DEFAULT_WORKLET_URL = { value: "https://storage.googleapis.com/evi-react-sdk-assets/audio-worklet-20250506.js" };
@@ -305,10 +476,3 @@ _EVIWebAudioPlayer_DEFAULT_WORKLET_URL = { value: "https://storage.googleapis.co
305
476
  _EVIWebAudioPlayer_DEFAULT_FFT_SIZE = { value: 2048 };
306
477
  /** Default analyser poll interval (16 ms). */
307
478
  _EVIWebAudioPlayer_DEFAULT_FFT_INTERVAL = { value: 16 };
308
- /** Bark‑scale center frequencies (hz) used by the default transform. https://en.wikipedia.org/wiki/Bark_scale */
309
- _EVIWebAudioPlayer_BARK_CENTER_FREQUENCIES = { value: [
310
- 50, 150, 250, 350, 450, 570, 700, 840, 1000, 1170, 1370, 1600, 1850, 2150, 2500, 2900, 3400, 4000, 4800, 5800,
311
- 7000, 8500, 10500, 13500,
312
- ] };
313
- /** Max byte magnitude (255) returned by `AnalyserNode.getByteFrequencyData`. */
314
- _EVIWebAudioPlayer_BYTE_MAX = { value: 255 };
@@ -0,0 +1 @@
1
+ export declare function convertLinearFrequenciesToBark(linearData: Uint8Array, sampleRate: number): number[];
@@ -0,0 +1,28 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.convertLinearFrequenciesToBark = convertLinearFrequenciesToBark;
4
+ // This function converts linear-scaled frequency decibels from an AnalyserNode's frequncy data to Bark scale [https://en.wikipedia.org/wiki/Bark_scale]
5
+ // This implementation uses a simple approach of mapping indices in the linear-scaled array to the closest
6
+ // Bark scale center frequency and is not intended to be an accurate representation, but rather "close-enough" for visualization purposes
7
+ const barkCenterFrequencies = [
8
+ 50, 150, 250, 350, 450, 570, 700, 840, 1000, 1170, 1370, 1600, 1850, 2150, 2500, 2900, 3400, 4000, 4800, 5800, 7000,
9
+ 8500, 10500, 13500,
10
+ ]; // Center frequency value in Hz
11
+ // Min/max values from https://developer.mozilla.org/en-US/docs/Web/API/AnalyserNode/getByteFrequencyData
12
+ const minValue = 0;
13
+ const maxValue = 255;
14
+ function convertLinearFrequenciesToBark(linearData, sampleRate) {
15
+ const maxFrequency = sampleRate / 2;
16
+ const frequencyResolution = maxFrequency / linearData.length;
17
+ const barkFrequencies = barkCenterFrequencies.map((barkFreq) => {
18
+ var _a;
19
+ const linearDataIndex = Math.round(barkFreq / frequencyResolution);
20
+ if (linearDataIndex >= 0 && linearDataIndex < linearData.length) {
21
+ return ((((_a = linearData[linearDataIndex]) !== null && _a !== void 0 ? _a : 0) - minValue) / (maxValue - minValue)) * 2;
22
+ }
23
+ else {
24
+ return 0;
25
+ }
26
+ });
27
+ return barkFrequencies;
28
+ }
@@ -0,0 +1 @@
1
+ export declare function generateEmptyFft(): number[];
@@ -0,0 +1,6 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.generateEmptyFft = generateEmptyFft;
4
+ function generateEmptyFft() {
5
+ return Array.from({ length: 24 }).map(() => 0);
6
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "hume",
3
- "version": "0.13.2",
3
+ "version": "0.13.4",
4
4
  "private": false,
5
5
  "repository": "https://github.com/HumeAI/hume-typescript-sdk",
6
6
  "main": "./index.js",
@@ -20,6 +20,7 @@
20
20
  "qs": "^6.13.1",
21
21
  "readable-stream": "^4.5.2",
22
22
  "ws": "^8.14.2",
23
+ "isomorphic-ws": "^5.0.0",
23
24
  "uuid": "9.0.1",
24
25
  "zod": "^3.23.8"
25
26
  },
@@ -0,0 +1,19 @@
1
+ /**
2
+ * This file was auto-generated by Fern from our API Definition.
3
+ */
4
+ import * as serializers from "../../../index";
5
+ import * as Hume from "../../../../api/index";
6
+ import * as core from "../../../../core";
7
+ import { PostedUtteranceVoice } from "./PostedUtteranceVoice";
8
+ export declare const PublishTts: core.serialization.ObjectSchema<serializers.tts.PublishTts.Raw, Hume.tts.PublishTts>;
9
+ export declare namespace PublishTts {
10
+ interface Raw {
11
+ text?: string | null;
12
+ description?: string | null;
13
+ voice?: PostedUtteranceVoice.Raw | null;
14
+ speed?: number | null;
15
+ trailing_silence?: number | null;
16
+ flush?: boolean | null;
17
+ close?: boolean | null;
18
+ }
19
+ }
@@ -0,0 +1,50 @@
1
+ "use strict";
2
+ /**
3
+ * This file was auto-generated by Fern from our API Definition.
4
+ */
5
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
6
+ if (k2 === undefined) k2 = k;
7
+ var desc = Object.getOwnPropertyDescriptor(m, k);
8
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
9
+ desc = { enumerable: true, get: function() { return m[k]; } };
10
+ }
11
+ Object.defineProperty(o, k2, desc);
12
+ }) : (function(o, m, k, k2) {
13
+ if (k2 === undefined) k2 = k;
14
+ o[k2] = m[k];
15
+ }));
16
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
17
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
18
+ }) : function(o, v) {
19
+ o["default"] = v;
20
+ });
21
+ var __importStar = (this && this.__importStar) || (function () {
22
+ var ownKeys = function(o) {
23
+ ownKeys = Object.getOwnPropertyNames || function (o) {
24
+ var ar = [];
25
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
26
+ return ar;
27
+ };
28
+ return ownKeys(o);
29
+ };
30
+ return function (mod) {
31
+ if (mod && mod.__esModule) return mod;
32
+ var result = {};
33
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
34
+ __setModuleDefault(result, mod);
35
+ return result;
36
+ };
37
+ })();
38
+ Object.defineProperty(exports, "__esModule", { value: true });
39
+ exports.PublishTts = void 0;
40
+ const core = __importStar(require("../../../../core"));
41
+ const PostedUtteranceVoice_1 = require("./PostedUtteranceVoice");
42
+ exports.PublishTts = core.serialization.object({
43
+ text: core.serialization.string().optional(),
44
+ description: core.serialization.string().optional(),
45
+ voice: PostedUtteranceVoice_1.PostedUtteranceVoice.optional(),
46
+ speed: core.serialization.number().optional(),
47
+ trailingSilence: core.serialization.property("trailing_silence", core.serialization.number().optional()),
48
+ flush: core.serialization.boolean().optional(),
49
+ close: core.serialization.boolean().optional(),
50
+ });
@@ -18,6 +18,6 @@ export declare namespace SnippetAudioChunk {
18
18
  audio_format: AudioFormatType.Raw;
19
19
  is_last_chunk: boolean;
20
20
  utterance_index?: number | null;
21
- snippet: Snippet.Raw;
21
+ snippet?: Snippet.Raw | null;
22
22
  }
23
23
  }
@@ -50,5 +50,5 @@ exports.SnippetAudioChunk = core.serialization.object({
50
50
  audioFormat: core.serialization.property("audio_format", AudioFormatType_1.AudioFormatType),
51
51
  isLastChunk: core.serialization.property("is_last_chunk", core.serialization.boolean()),
52
52
  utteranceIndex: core.serialization.property("utterance_index", core.serialization.number().optional()),
53
- snippet: Snippet_1.Snippet,
53
+ snippet: Snippet_1.Snippet.optional(),
54
54
  });
@@ -1,7 +1,13 @@
1
+ export * from "./PublishTts";
2
+ export * from "./PostedUtteranceVoiceWithId";
3
+ export * from "./PostedUtteranceVoiceWithName";
4
+ export * from "./VoiceProvider";
5
+ export * from "./PostedUtteranceVoice";
6
+ export * from "./AudioFormatType";
7
+ export * from "./SnippetAudioChunk";
1
8
  export * from "./PostedContextWithGenerationId";
2
9
  export * from "./PostedContextWithUtterances";
3
10
  export * from "./AudioEncoding";
4
- export * from "./AudioFormatType";
5
11
  export * from "./ReturnGeneration";
6
12
  export * from "./HttpValidationError";
7
13
  export * from "./FormatMp3";
@@ -12,14 +18,9 @@ export * from "./ReturnTts";
12
18
  export * from "./ReturnVoice";
13
19
  export * from "./FormatPcm";
14
20
  export * from "./Snippet";
15
- export * from "./SnippetAudioChunk";
16
21
  export * from "./PostedUtterance";
17
22
  export * from "./ValidationErrorLocItem";
18
23
  export * from "./ValidationError";
19
- export * from "./PostedUtteranceVoiceWithId";
20
- export * from "./PostedUtteranceVoiceWithName";
21
- export * from "./VoiceProvider";
22
- export * from "./PostedUtteranceVoice";
23
24
  export * from "./FormatWav";
24
25
  export * from "./ErrorResponse";
25
26
  export * from "./ReturnPagedVoices";
@@ -14,10 +14,16 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
14
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
15
  };
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
+ __exportStar(require("./PublishTts"), exports);
18
+ __exportStar(require("./PostedUtteranceVoiceWithId"), exports);
19
+ __exportStar(require("./PostedUtteranceVoiceWithName"), exports);
20
+ __exportStar(require("./VoiceProvider"), exports);
21
+ __exportStar(require("./PostedUtteranceVoice"), exports);
22
+ __exportStar(require("./AudioFormatType"), exports);
23
+ __exportStar(require("./SnippetAudioChunk"), exports);
17
24
  __exportStar(require("./PostedContextWithGenerationId"), exports);
18
25
  __exportStar(require("./PostedContextWithUtterances"), exports);
19
26
  __exportStar(require("./AudioEncoding"), exports);
20
- __exportStar(require("./AudioFormatType"), exports);
21
27
  __exportStar(require("./ReturnGeneration"), exports);
22
28
  __exportStar(require("./HttpValidationError"), exports);
23
29
  __exportStar(require("./FormatMp3"), exports);
@@ -28,14 +34,9 @@ __exportStar(require("./ReturnTts"), exports);
28
34
  __exportStar(require("./ReturnVoice"), exports);
29
35
  __exportStar(require("./FormatPcm"), exports);
30
36
  __exportStar(require("./Snippet"), exports);
31
- __exportStar(require("./SnippetAudioChunk"), exports);
32
37
  __exportStar(require("./PostedUtterance"), exports);
33
38
  __exportStar(require("./ValidationErrorLocItem"), exports);
34
39
  __exportStar(require("./ValidationError"), exports);
35
- __exportStar(require("./PostedUtteranceVoiceWithId"), exports);
36
- __exportStar(require("./PostedUtteranceVoiceWithName"), exports);
37
- __exportStar(require("./VoiceProvider"), exports);
38
- __exportStar(require("./PostedUtteranceVoice"), exports);
39
40
  __exportStar(require("./FormatWav"), exports);
40
41
  __exportStar(require("./ErrorResponse"), exports);
41
42
  __exportStar(require("./ReturnPagedVoices"), exports);
package/version.d.ts CHANGED
@@ -1 +1 @@
1
- export declare const SDK_VERSION = "0.13.2";
1
+ export declare const SDK_VERSION = "0.13.4";