hume 0.13.1 → 0.13.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/.mock/definition/empathic-voice/__package__.yml +39 -30
  2. package/.mock/definition/tts/__package__.yml +85 -47
  3. package/.mock/definition/tts/voices.yml +9 -9
  4. package/api/resources/empathicVoice/types/ContextType.d.ts +2 -2
  5. package/api/resources/empathicVoice/types/ContextType.js +1 -1
  6. package/api/resources/empathicVoice/types/JsonMessage.d.ts +1 -1
  7. package/api/resources/empathicVoice/types/ReturnConfig.d.ts +2 -1
  8. package/api/resources/empathicVoice/types/ReturnVoice.d.ts +12 -0
  9. package/api/resources/empathicVoice/types/ReturnVoice.js +5 -0
  10. package/api/resources/empathicVoice/types/VoiceProvider.d.ts +1 -2
  11. package/api/resources/empathicVoice/types/VoiceProvider.js +0 -1
  12. package/api/resources/empathicVoice/types/index.d.ts +4 -3
  13. package/api/resources/empathicVoice/types/index.js +4 -3
  14. package/api/resources/tts/client/Client.d.ts +5 -5
  15. package/api/resources/tts/client/Client.js +5 -5
  16. package/api/resources/tts/types/SnippetAudioChunk.d.ts +20 -0
  17. package/dist/api/resources/empathicVoice/types/ContextType.d.ts +2 -2
  18. package/dist/api/resources/empathicVoice/types/ContextType.js +1 -1
  19. package/dist/api/resources/empathicVoice/types/JsonMessage.d.ts +1 -1
  20. package/dist/api/resources/empathicVoice/types/ReturnConfig.d.ts +2 -1
  21. package/dist/api/resources/empathicVoice/types/ReturnVoice.d.ts +12 -0
  22. package/dist/api/resources/empathicVoice/types/ReturnVoice.js +5 -0
  23. package/dist/api/resources/empathicVoice/types/VoiceProvider.d.ts +1 -2
  24. package/dist/api/resources/empathicVoice/types/VoiceProvider.js +0 -1
  25. package/dist/api/resources/empathicVoice/types/index.d.ts +4 -3
  26. package/dist/api/resources/empathicVoice/types/index.js +4 -3
  27. package/dist/api/resources/tts/client/Client.d.ts +5 -5
  28. package/dist/api/resources/tts/client/Client.js +5 -5
  29. package/dist/api/resources/tts/types/SnippetAudioChunk.d.ts +20 -0
  30. package/dist/serialization/resources/empathicVoice/types/ContextType.d.ts +1 -1
  31. package/dist/serialization/resources/empathicVoice/types/ContextType.js +1 -1
  32. package/dist/serialization/resources/empathicVoice/types/JsonMessage.d.ts +2 -2
  33. package/dist/serialization/resources/empathicVoice/types/JsonMessage.js +2 -2
  34. package/dist/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +2 -1
  35. package/dist/serialization/resources/empathicVoice/types/ReturnConfig.js +2 -1
  36. package/dist/serialization/resources/empathicVoice/types/ReturnVoice.d.ts +15 -0
  37. package/dist/serialization/resources/empathicVoice/types/ReturnVoice.js +46 -0
  38. package/dist/serialization/resources/empathicVoice/types/VoiceProvider.d.ts +1 -1
  39. package/dist/serialization/resources/empathicVoice/types/VoiceProvider.js +1 -1
  40. package/dist/serialization/resources/empathicVoice/types/index.d.ts +4 -3
  41. package/dist/serialization/resources/empathicVoice/types/index.js +4 -3
  42. package/dist/serialization/resources/tts/types/SnippetAudioChunk.d.ts +12 -0
  43. package/dist/serialization/resources/tts/types/SnippetAudioChunk.js +14 -1
  44. package/dist/version.d.ts +1 -1
  45. package/dist/version.js +1 -1
  46. package/package.json +1 -1
  47. package/reference.md +14 -14
  48. package/serialization/resources/empathicVoice/types/ContextType.d.ts +1 -1
  49. package/serialization/resources/empathicVoice/types/ContextType.js +1 -1
  50. package/serialization/resources/empathicVoice/types/JsonMessage.d.ts +2 -2
  51. package/serialization/resources/empathicVoice/types/JsonMessage.js +2 -2
  52. package/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +2 -1
  53. package/serialization/resources/empathicVoice/types/ReturnConfig.js +2 -1
  54. package/serialization/resources/empathicVoice/types/ReturnVoice.d.ts +15 -0
  55. package/serialization/resources/empathicVoice/types/ReturnVoice.js +46 -0
  56. package/serialization/resources/empathicVoice/types/VoiceProvider.d.ts +1 -1
  57. package/serialization/resources/empathicVoice/types/VoiceProvider.js +1 -1
  58. package/serialization/resources/empathicVoice/types/index.d.ts +4 -3
  59. package/serialization/resources/empathicVoice/types/index.js +4 -3
  60. package/serialization/resources/tts/types/SnippetAudioChunk.d.ts +12 -0
  61. package/serialization/resources/tts/types/SnippetAudioChunk.js +14 -1
  62. package/version.d.ts +1 -1
  63. package/version.js +1 -1
@@ -1,5 +1,25 @@
1
1
  /**
2
2
  * This file was auto-generated by Fern from our API Definition.
3
3
  */
4
+ import * as Hume from "../../../index";
4
5
  export interface SnippetAudioChunk {
6
+ /** The generation ID of the parent snippet that this chunk corresponds to. */
7
+ generationId: string;
8
+ /** The ID of the parent snippet that this chunk corresponds to. */
9
+ snippetId: string;
10
+ /** The text of the parent snippet that this chunk corresponds to. */
11
+ text: string;
12
+ /** The transcribed text of the generated audio of the parent snippet that this chunk corresponds to. It is only present if `instant_mode` is set to `false`. */
13
+ transcribedText?: string;
14
+ /** The index of the audio chunk in the snippet. */
15
+ chunkIndex: number;
16
+ /** The generated audio output chunk in the requested format. */
17
+ audio: string;
18
+ /** The generated audio output format. */
19
+ audioFormat: Hume.tts.AudioFormatType;
20
+ /** Whether or not this is the last chunk streamed back from the decoder for one input snippet. */
21
+ isLastChunk: boolean;
22
+ /** The index of the utterance in the request that the parent snippet of this chunk corresponds to. */
23
+ utteranceIndex?: number;
24
+ snippet: Hume.tts.Snippet;
5
25
  }
@@ -1,8 +1,8 @@
1
1
  /**
2
2
  * This file was auto-generated by Fern from our API Definition.
3
3
  */
4
- export type ContextType = "temporary" | "persistent";
4
+ export type ContextType = "persistent" | "temporary";
5
5
  export declare const ContextType: {
6
- readonly Temporary: "temporary";
7
6
  readonly Persistent: "persistent";
7
+ readonly Temporary: "temporary";
8
8
  };
@@ -5,6 +5,6 @@
5
5
  Object.defineProperty(exports, "__esModule", { value: true });
6
6
  exports.ContextType = void 0;
7
7
  exports.ContextType = {
8
- Temporary: "temporary",
9
8
  Persistent: "persistent",
9
+ Temporary: "temporary",
10
10
  };
@@ -2,4 +2,4 @@
2
2
  * This file was auto-generated by Fern from our API Definition.
3
3
  */
4
4
  import * as Hume from "../../../index";
5
- export type JsonMessage = Hume.empathicVoice.AssistantEnd | Hume.empathicVoice.AssistantMessage | Hume.empathicVoice.ChatMetadata | Hume.empathicVoice.WebSocketError | Hume.empathicVoice.UserInterruption | Hume.empathicVoice.UserMessage | Hume.empathicVoice.ToolCallMessage | Hume.empathicVoice.ToolResponseMessage | Hume.empathicVoice.ToolErrorMessage | Hume.empathicVoice.AssistantProsody;
5
+ export type JsonMessage = Hume.empathicVoice.AssistantEnd | Hume.empathicVoice.AssistantMessage | Hume.empathicVoice.AssistantProsody | Hume.empathicVoice.ChatMetadata | Hume.empathicVoice.WebSocketError | Hume.empathicVoice.UserInterruption | Hume.empathicVoice.UserMessage | Hume.empathicVoice.ToolCallMessage | Hume.empathicVoice.ToolResponseMessage | Hume.empathicVoice.ToolErrorMessage;
@@ -45,7 +45,8 @@ export interface ReturnConfig {
45
45
  * Hume's eLLM (empathic Large Language Model) is a multimodal language model that takes into account both expression measures and language. The eLLM generates short, empathic language responses and guides text-to-speech (TTS) prosody.
46
46
  */
47
47
  ellmModel?: Hume.empathicVoice.ReturnEllmModel;
48
- voice?: unknown;
48
+ /** A voice specification associated with this Config. */
49
+ voice?: Hume.empathicVoice.ReturnVoice;
49
50
  prompt?: Hume.empathicVoice.ReturnPrompt;
50
51
  /** Map of webhooks associated with this config. */
51
52
  webhooks?: (Hume.empathicVoice.ReturnWebhookSpec | undefined)[];
@@ -0,0 +1,12 @@
1
+ /**
2
+ * This file was auto-generated by Fern from our API Definition.
3
+ */
4
+ import * as Hume from "../../../index";
5
+ /**
6
+ * An Octave voice available for text-to-speech
7
+ */
8
+ export interface ReturnVoice {
9
+ id?: string;
10
+ name?: string;
11
+ provider?: Hume.empathicVoice.VoiceProvider;
12
+ }
@@ -0,0 +1,5 @@
1
+ "use strict";
2
+ /**
3
+ * This file was auto-generated by Fern from our API Definition.
4
+ */
5
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -1,9 +1,8 @@
1
1
  /**
2
2
  * This file was auto-generated by Fern from our API Definition.
3
3
  */
4
- export type VoiceProvider = "HUME_AI" | "CUSTOM_VOICE" | "OCTAVE_COMBINED";
4
+ export type VoiceProvider = "HUME_AI" | "CUSTOM_VOICE";
5
5
  export declare const VoiceProvider: {
6
6
  readonly HumeAi: "HUME_AI";
7
7
  readonly CustomVoice: "CUSTOM_VOICE";
8
- readonly OctaveCombined: "OCTAVE_COMBINED";
9
8
  };
@@ -7,5 +7,4 @@ exports.VoiceProvider = void 0;
7
7
  exports.VoiceProvider = {
8
8
  HumeAi: "HUME_AI",
9
9
  CustomVoice: "CUSTOM_VOICE",
10
- OctaveCombined: "OCTAVE_COMBINED",
11
10
  };
@@ -38,6 +38,8 @@ export * from "./LanguageModelType";
38
38
  export * from "./ModelProviderEnum";
39
39
  export * from "./ValidationErrorLocItem";
40
40
  export * from "./ValidationError";
41
+ export * from "./VoiceId";
42
+ export * from "./VoiceName";
41
43
  export * from "./WebhookEventChatEnded";
42
44
  export * from "./WebhookEventChatStartType";
43
45
  export * from "./WebhookEventChatStarted";
@@ -102,7 +104,6 @@ export * from "./PostedEventMessageSpec";
102
104
  export * from "./PostedTimeoutSpec";
103
105
  export * from "./ReturnEventMessageSpec";
104
106
  export * from "./ReturnTimeoutSpec";
105
- export * from "./VoiceProvider";
106
- export * from "./VoiceId";
107
- export * from "./VoiceName";
108
107
  export * from "./VoiceRef";
108
+ export * from "./ReturnVoice";
109
+ export * from "./VoiceProvider";
@@ -54,6 +54,8 @@ __exportStar(require("./LanguageModelType"), exports);
54
54
  __exportStar(require("./ModelProviderEnum"), exports);
55
55
  __exportStar(require("./ValidationErrorLocItem"), exports);
56
56
  __exportStar(require("./ValidationError"), exports);
57
+ __exportStar(require("./VoiceId"), exports);
58
+ __exportStar(require("./VoiceName"), exports);
57
59
  __exportStar(require("./WebhookEventChatEnded"), exports);
58
60
  __exportStar(require("./WebhookEventChatStartType"), exports);
59
61
  __exportStar(require("./WebhookEventChatStarted"), exports);
@@ -118,7 +120,6 @@ __exportStar(require("./PostedEventMessageSpec"), exports);
118
120
  __exportStar(require("./PostedTimeoutSpec"), exports);
119
121
  __exportStar(require("./ReturnEventMessageSpec"), exports);
120
122
  __exportStar(require("./ReturnTimeoutSpec"), exports);
121
- __exportStar(require("./VoiceProvider"), exports);
122
- __exportStar(require("./VoiceId"), exports);
123
- __exportStar(require("./VoiceName"), exports);
124
123
  __exportStar(require("./VoiceRef"), exports);
124
+ __exportStar(require("./ReturnVoice"), exports);
125
+ __exportStar(require("./VoiceProvider"), exports);
@@ -44,10 +44,6 @@ export declare class Tts {
44
44
  *
45
45
  * @example
46
46
  * await client.tts.synthesizeJson({
47
- * utterances: [{
48
- * text: "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.",
49
- * description: "Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality."
50
- * }],
51
47
  * context: {
52
48
  * utterances: [{
53
49
  * text: "How can people see beauty so differently?",
@@ -57,7 +53,11 @@ export declare class Tts {
57
53
  * format: {
58
54
  * type: "mp3"
59
55
  * },
60
- * numGenerations: 1
56
+ * numGenerations: 1,
57
+ * utterances: [{
58
+ * text: "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.",
59
+ * description: "Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality."
60
+ * }]
61
61
  * })
62
62
  */
63
63
  synthesizeJson(request: Hume.tts.PostedTts, requestOptions?: Tts.RequestOptions): core.HttpResponsePromise<Hume.tts.ReturnTts>;
@@ -77,10 +77,6 @@ class Tts {
77
77
  *
78
78
  * @example
79
79
  * await client.tts.synthesizeJson({
80
- * utterances: [{
81
- * text: "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.",
82
- * description: "Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality."
83
- * }],
84
80
  * context: {
85
81
  * utterances: [{
86
82
  * text: "How can people see beauty so differently?",
@@ -90,7 +86,11 @@ class Tts {
90
86
  * format: {
91
87
  * type: "mp3"
92
88
  * },
93
- * numGenerations: 1
89
+ * numGenerations: 1,
90
+ * utterances: [{
91
+ * text: "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.",
92
+ * description: "Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality."
93
+ * }]
94
94
  * })
95
95
  */
96
96
  synthesizeJson(request, requestOptions) {
@@ -1,5 +1,25 @@
1
1
  /**
2
2
  * This file was auto-generated by Fern from our API Definition.
3
3
  */
4
+ import * as Hume from "../../../index";
4
5
  export interface SnippetAudioChunk {
6
+ /** The generation ID of the parent snippet that this chunk corresponds to. */
7
+ generationId: string;
8
+ /** The ID of the parent snippet that this chunk corresponds to. */
9
+ snippetId: string;
10
+ /** The text of the parent snippet that this chunk corresponds to. */
11
+ text: string;
12
+ /** The transcribed text of the generated audio of the parent snippet that this chunk corresponds to. It is only present if `instant_mode` is set to `false`. */
13
+ transcribedText?: string;
14
+ /** The index of the audio chunk in the snippet. */
15
+ chunkIndex: number;
16
+ /** The generated audio output chunk in the requested format. */
17
+ audio: string;
18
+ /** The generated audio output format. */
19
+ audioFormat: Hume.tts.AudioFormatType;
20
+ /** Whether or not this is the last chunk streamed back from the decoder for one input snippet. */
21
+ isLastChunk: boolean;
22
+ /** The index of the utterance in the request that the parent snippet of this chunk corresponds to. */
23
+ utteranceIndex?: number;
24
+ snippet: Hume.tts.Snippet;
5
25
  }
@@ -6,5 +6,5 @@ import * as Hume from "../../../../api/index";
6
6
  import * as core from "../../../../core";
7
7
  export declare const ContextType: core.serialization.Schema<serializers.empathicVoice.ContextType.Raw, Hume.empathicVoice.ContextType>;
8
8
  export declare namespace ContextType {
9
- type Raw = "temporary" | "persistent";
9
+ type Raw = "persistent" | "temporary";
10
10
  }
@@ -38,4 +38,4 @@ var __importStar = (this && this.__importStar) || (function () {
38
38
  Object.defineProperty(exports, "__esModule", { value: true });
39
39
  exports.ContextType = void 0;
40
40
  const core = __importStar(require("../../../../core"));
41
- exports.ContextType = core.serialization.enum_(["temporary", "persistent"]);
41
+ exports.ContextType = core.serialization.enum_(["persistent", "temporary"]);
@@ -6,6 +6,7 @@ import * as Hume from "../../../../api/index";
6
6
  import * as core from "../../../../core";
7
7
  import { AssistantEnd } from "./AssistantEnd";
8
8
  import { AssistantMessage } from "./AssistantMessage";
9
+ import { AssistantProsody } from "./AssistantProsody";
9
10
  import { ChatMetadata } from "./ChatMetadata";
10
11
  import { WebSocketError } from "./WebSocketError";
11
12
  import { UserInterruption } from "./UserInterruption";
@@ -13,8 +14,7 @@ import { UserMessage } from "./UserMessage";
13
14
  import { ToolCallMessage } from "./ToolCallMessage";
14
15
  import { ToolResponseMessage } from "./ToolResponseMessage";
15
16
  import { ToolErrorMessage } from "./ToolErrorMessage";
16
- import { AssistantProsody } from "./AssistantProsody";
17
17
  export declare const JsonMessage: core.serialization.Schema<serializers.empathicVoice.JsonMessage.Raw, Hume.empathicVoice.JsonMessage>;
18
18
  export declare namespace JsonMessage {
19
- type Raw = AssistantEnd.Raw | AssistantMessage.Raw | ChatMetadata.Raw | WebSocketError.Raw | UserInterruption.Raw | UserMessage.Raw | ToolCallMessage.Raw | ToolResponseMessage.Raw | ToolErrorMessage.Raw | AssistantProsody.Raw;
19
+ type Raw = AssistantEnd.Raw | AssistantMessage.Raw | AssistantProsody.Raw | ChatMetadata.Raw | WebSocketError.Raw | UserInterruption.Raw | UserMessage.Raw | ToolCallMessage.Raw | ToolResponseMessage.Raw | ToolErrorMessage.Raw;
20
20
  }
@@ -40,6 +40,7 @@ exports.JsonMessage = void 0;
40
40
  const core = __importStar(require("../../../../core"));
41
41
  const AssistantEnd_1 = require("./AssistantEnd");
42
42
  const AssistantMessage_1 = require("./AssistantMessage");
43
+ const AssistantProsody_1 = require("./AssistantProsody");
43
44
  const ChatMetadata_1 = require("./ChatMetadata");
44
45
  const WebSocketError_1 = require("./WebSocketError");
45
46
  const UserInterruption_1 = require("./UserInterruption");
@@ -47,10 +48,10 @@ const UserMessage_1 = require("./UserMessage");
47
48
  const ToolCallMessage_1 = require("./ToolCallMessage");
48
49
  const ToolResponseMessage_1 = require("./ToolResponseMessage");
49
50
  const ToolErrorMessage_1 = require("./ToolErrorMessage");
50
- const AssistantProsody_1 = require("./AssistantProsody");
51
51
  exports.JsonMessage = core.serialization.undiscriminatedUnion([
52
52
  AssistantEnd_1.AssistantEnd,
53
53
  AssistantMessage_1.AssistantMessage,
54
+ AssistantProsody_1.AssistantProsody,
54
55
  ChatMetadata_1.ChatMetadata,
55
56
  WebSocketError_1.WebSocketError,
56
57
  UserInterruption_1.UserInterruption,
@@ -58,5 +59,4 @@ exports.JsonMessage = core.serialization.undiscriminatedUnion([
58
59
  ToolCallMessage_1.ToolCallMessage,
59
60
  ToolResponseMessage_1.ToolResponseMessage,
60
61
  ToolErrorMessage_1.ToolErrorMessage,
61
- AssistantProsody_1.AssistantProsody,
62
62
  ]);
@@ -11,6 +11,7 @@ import { ReturnTimeoutSpecs } from "./ReturnTimeoutSpecs";
11
11
  import { ReturnNudgeSpec } from "./ReturnNudgeSpec";
12
12
  import { ReturnEventMessageSpecs } from "./ReturnEventMessageSpecs";
13
13
  import { ReturnEllmModel } from "./ReturnEllmModel";
14
+ import { ReturnVoice } from "./ReturnVoice";
14
15
  import { ReturnPrompt } from "./ReturnPrompt";
15
16
  import { ReturnWebhookSpec } from "./ReturnWebhookSpec";
16
17
  export declare const ReturnConfig: core.serialization.ObjectSchema<serializers.empathicVoice.ReturnConfig.Raw, Hume.empathicVoice.ReturnConfig>;
@@ -28,7 +29,7 @@ export declare namespace ReturnConfig {
28
29
  nudges?: ReturnNudgeSpec.Raw | null;
29
30
  event_messages?: ReturnEventMessageSpecs.Raw | null;
30
31
  ellm_model?: ReturnEllmModel.Raw | null;
31
- voice?: unknown | null;
32
+ voice?: ReturnVoice.Raw | null;
32
33
  prompt?: ReturnPrompt.Raw | null;
33
34
  webhooks?: (ReturnWebhookSpec.Raw | null | undefined)[] | null;
34
35
  created_on?: number | null;
@@ -45,6 +45,7 @@ const ReturnTimeoutSpecs_1 = require("./ReturnTimeoutSpecs");
45
45
  const ReturnNudgeSpec_1 = require("./ReturnNudgeSpec");
46
46
  const ReturnEventMessageSpecs_1 = require("./ReturnEventMessageSpecs");
47
47
  const ReturnEllmModel_1 = require("./ReturnEllmModel");
48
+ const ReturnVoice_1 = require("./ReturnVoice");
48
49
  const ReturnPrompt_1 = require("./ReturnPrompt");
49
50
  const ReturnWebhookSpec_1 = require("./ReturnWebhookSpec");
50
51
  exports.ReturnConfig = core.serialization.object({
@@ -60,7 +61,7 @@ exports.ReturnConfig = core.serialization.object({
60
61
  nudges: ReturnNudgeSpec_1.ReturnNudgeSpec.optional(),
61
62
  eventMessages: core.serialization.property("event_messages", ReturnEventMessageSpecs_1.ReturnEventMessageSpecs.optional()),
62
63
  ellmModel: core.serialization.property("ellm_model", ReturnEllmModel_1.ReturnEllmModel.optional()),
63
- voice: core.serialization.unknown().optional(),
64
+ voice: ReturnVoice_1.ReturnVoice.optional(),
64
65
  prompt: ReturnPrompt_1.ReturnPrompt.optional(),
65
66
  webhooks: core.serialization.list(ReturnWebhookSpec_1.ReturnWebhookSpec.optional()).optional(),
66
67
  createdOn: core.serialization.property("created_on", core.serialization.number().optional()),
@@ -0,0 +1,15 @@
1
+ /**
2
+ * This file was auto-generated by Fern from our API Definition.
3
+ */
4
+ import * as serializers from "../../../index";
5
+ import * as Hume from "../../../../api/index";
6
+ import * as core from "../../../../core";
7
+ import { VoiceProvider } from "./VoiceProvider";
8
+ export declare const ReturnVoice: core.serialization.ObjectSchema<serializers.empathicVoice.ReturnVoice.Raw, Hume.empathicVoice.ReturnVoice>;
9
+ export declare namespace ReturnVoice {
10
+ interface Raw {
11
+ id?: string | null;
12
+ name?: string | null;
13
+ provider?: VoiceProvider.Raw | null;
14
+ }
15
+ }
@@ -0,0 +1,46 @@
1
+ "use strict";
2
+ /**
3
+ * This file was auto-generated by Fern from our API Definition.
4
+ */
5
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
6
+ if (k2 === undefined) k2 = k;
7
+ var desc = Object.getOwnPropertyDescriptor(m, k);
8
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
9
+ desc = { enumerable: true, get: function() { return m[k]; } };
10
+ }
11
+ Object.defineProperty(o, k2, desc);
12
+ }) : (function(o, m, k, k2) {
13
+ if (k2 === undefined) k2 = k;
14
+ o[k2] = m[k];
15
+ }));
16
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
17
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
18
+ }) : function(o, v) {
19
+ o["default"] = v;
20
+ });
21
+ var __importStar = (this && this.__importStar) || (function () {
22
+ var ownKeys = function(o) {
23
+ ownKeys = Object.getOwnPropertyNames || function (o) {
24
+ var ar = [];
25
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
26
+ return ar;
27
+ };
28
+ return ownKeys(o);
29
+ };
30
+ return function (mod) {
31
+ if (mod && mod.__esModule) return mod;
32
+ var result = {};
33
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
34
+ __setModuleDefault(result, mod);
35
+ return result;
36
+ };
37
+ })();
38
+ Object.defineProperty(exports, "__esModule", { value: true });
39
+ exports.ReturnVoice = void 0;
40
+ const core = __importStar(require("../../../../core"));
41
+ const VoiceProvider_1 = require("./VoiceProvider");
42
+ exports.ReturnVoice = core.serialization.object({
43
+ id: core.serialization.string().optional(),
44
+ name: core.serialization.string().optional(),
45
+ provider: VoiceProvider_1.VoiceProvider.optional(),
46
+ });
@@ -6,5 +6,5 @@ import * as Hume from "../../../../api/index";
6
6
  import * as core from "../../../../core";
7
7
  export declare const VoiceProvider: core.serialization.Schema<serializers.empathicVoice.VoiceProvider.Raw, Hume.empathicVoice.VoiceProvider>;
8
8
  export declare namespace VoiceProvider {
9
- type Raw = "HUME_AI" | "CUSTOM_VOICE" | "OCTAVE_COMBINED";
9
+ type Raw = "HUME_AI" | "CUSTOM_VOICE";
10
10
  }
@@ -38,4 +38,4 @@ var __importStar = (this && this.__importStar) || (function () {
38
38
  Object.defineProperty(exports, "__esModule", { value: true });
39
39
  exports.VoiceProvider = void 0;
40
40
  const core = __importStar(require("../../../../core"));
41
- exports.VoiceProvider = core.serialization.enum_(["HUME_AI", "CUSTOM_VOICE", "OCTAVE_COMBINED"]);
41
+ exports.VoiceProvider = core.serialization.enum_(["HUME_AI", "CUSTOM_VOICE"]);
@@ -38,6 +38,8 @@ export * from "./LanguageModelType";
38
38
  export * from "./ModelProviderEnum";
39
39
  export * from "./ValidationErrorLocItem";
40
40
  export * from "./ValidationError";
41
+ export * from "./VoiceId";
42
+ export * from "./VoiceName";
41
43
  export * from "./WebhookEventChatEnded";
42
44
  export * from "./WebhookEventChatStartType";
43
45
  export * from "./WebhookEventChatStarted";
@@ -102,7 +104,6 @@ export * from "./PostedEventMessageSpec";
102
104
  export * from "./PostedTimeoutSpec";
103
105
  export * from "./ReturnEventMessageSpec";
104
106
  export * from "./ReturnTimeoutSpec";
105
- export * from "./VoiceProvider";
106
- export * from "./VoiceId";
107
- export * from "./VoiceName";
108
107
  export * from "./VoiceRef";
108
+ export * from "./ReturnVoice";
109
+ export * from "./VoiceProvider";
@@ -54,6 +54,8 @@ __exportStar(require("./LanguageModelType"), exports);
54
54
  __exportStar(require("./ModelProviderEnum"), exports);
55
55
  __exportStar(require("./ValidationErrorLocItem"), exports);
56
56
  __exportStar(require("./ValidationError"), exports);
57
+ __exportStar(require("./VoiceId"), exports);
58
+ __exportStar(require("./VoiceName"), exports);
57
59
  __exportStar(require("./WebhookEventChatEnded"), exports);
58
60
  __exportStar(require("./WebhookEventChatStartType"), exports);
59
61
  __exportStar(require("./WebhookEventChatStarted"), exports);
@@ -118,7 +120,6 @@ __exportStar(require("./PostedEventMessageSpec"), exports);
118
120
  __exportStar(require("./PostedTimeoutSpec"), exports);
119
121
  __exportStar(require("./ReturnEventMessageSpec"), exports);
120
122
  __exportStar(require("./ReturnTimeoutSpec"), exports);
121
- __exportStar(require("./VoiceProvider"), exports);
122
- __exportStar(require("./VoiceId"), exports);
123
- __exportStar(require("./VoiceName"), exports);
124
123
  __exportStar(require("./VoiceRef"), exports);
124
+ __exportStar(require("./ReturnVoice"), exports);
125
+ __exportStar(require("./VoiceProvider"), exports);
@@ -4,8 +4,20 @@
4
4
  import * as serializers from "../../../index";
5
5
  import * as Hume from "../../../../api/index";
6
6
  import * as core from "../../../../core";
7
+ import { AudioFormatType } from "./AudioFormatType";
8
+ import { Snippet } from "./Snippet";
7
9
  export declare const SnippetAudioChunk: core.serialization.ObjectSchema<serializers.tts.SnippetAudioChunk.Raw, Hume.tts.SnippetAudioChunk>;
8
10
  export declare namespace SnippetAudioChunk {
9
11
  interface Raw {
12
+ generation_id: string;
13
+ snippet_id: string;
14
+ text: string;
15
+ transcribed_text?: string | null;
16
+ chunk_index: number;
17
+ audio: string;
18
+ audio_format: AudioFormatType.Raw;
19
+ is_last_chunk: boolean;
20
+ utterance_index?: number | null;
21
+ snippet: Snippet.Raw;
10
22
  }
11
23
  }
@@ -38,4 +38,17 @@ var __importStar = (this && this.__importStar) || (function () {
38
38
  Object.defineProperty(exports, "__esModule", { value: true });
39
39
  exports.SnippetAudioChunk = void 0;
40
40
  const core = __importStar(require("../../../../core"));
41
- exports.SnippetAudioChunk = core.serialization.object({});
41
+ const AudioFormatType_1 = require("./AudioFormatType");
42
+ const Snippet_1 = require("./Snippet");
43
+ exports.SnippetAudioChunk = core.serialization.object({
44
+ generationId: core.serialization.property("generation_id", core.serialization.string()),
45
+ snippetId: core.serialization.property("snippet_id", core.serialization.string()),
46
+ text: core.serialization.string(),
47
+ transcribedText: core.serialization.property("transcribed_text", core.serialization.string().optional()),
48
+ chunkIndex: core.serialization.property("chunk_index", core.serialization.number()),
49
+ audio: core.serialization.string(),
50
+ audioFormat: core.serialization.property("audio_format", AudioFormatType_1.AudioFormatType),
51
+ isLastChunk: core.serialization.property("is_last_chunk", core.serialization.boolean()),
52
+ utteranceIndex: core.serialization.property("utterance_index", core.serialization.number().optional()),
53
+ snippet: Snippet_1.Snippet,
54
+ });
package/dist/version.d.ts CHANGED
@@ -1 +1 @@
1
- export declare const SDK_VERSION = "0.13.1";
1
+ export declare const SDK_VERSION = "0.13.2";
package/dist/version.js CHANGED
@@ -1,4 +1,4 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.SDK_VERSION = void 0;
4
- exports.SDK_VERSION = "0.13.1";
4
+ exports.SDK_VERSION = "0.13.2";
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "hume",
3
- "version": "0.13.1",
3
+ "version": "0.13.2",
4
4
  "private": false,
5
5
  "repository": "https://github.com/HumeAI/hume-typescript-sdk",
6
6
  "main": "./index.js",
package/reference.md CHANGED
@@ -33,13 +33,6 @@ The response includes the base64-encoded audio and metadata in JSON format.
33
33
 
34
34
  ```typescript
35
35
  await client.tts.synthesizeJson({
36
- utterances: [
37
- {
38
- text: "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.",
39
- description:
40
- "Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality.",
41
- },
42
- ],
43
36
  context: {
44
37
  utterances: [
45
38
  {
@@ -53,6 +46,13 @@ await client.tts.synthesizeJson({
53
46
  type: "mp3",
54
47
  },
55
48
  numGenerations: 1,
49
+ utterances: [
50
+ {
51
+ text: "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.",
52
+ description:
53
+ "Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality.",
54
+ },
55
+ ],
56
56
  });
57
57
  ```
58
58
 
@@ -119,13 +119,6 @@ The response contains the generated audio file in the requested format.
119
119
 
120
120
  ```typescript
121
121
  await client.tts.synthesizeFile({
122
- utterances: [
123
- {
124
- text: "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.",
125
- description:
126
- "Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality.",
127
- },
128
- ],
129
122
  context: {
130
123
  generationId: "09ad914d-8e7f-40f8-a279-e34f07f7dab2",
131
124
  },
@@ -133,6 +126,13 @@ await client.tts.synthesizeFile({
133
126
  type: "mp3",
134
127
  },
135
128
  numGenerations: 1,
129
+ utterances: [
130
+ {
131
+ text: "Beauty is no quality in things themselves: It exists merely in the mind which contemplates them.",
132
+ description:
133
+ "Middle-aged masculine voice with a clear, rhythmic Scots lilt, rounded vowels, and a warm, steady tone with an articulate, academic quality.",
134
+ },
135
+ ],
136
136
  });
137
137
  ```
138
138
 
@@ -6,5 +6,5 @@ import * as Hume from "../../../../api/index";
6
6
  import * as core from "../../../../core";
7
7
  export declare const ContextType: core.serialization.Schema<serializers.empathicVoice.ContextType.Raw, Hume.empathicVoice.ContextType>;
8
8
  export declare namespace ContextType {
9
- type Raw = "temporary" | "persistent";
9
+ type Raw = "persistent" | "temporary";
10
10
  }
@@ -38,4 +38,4 @@ var __importStar = (this && this.__importStar) || (function () {
38
38
  Object.defineProperty(exports, "__esModule", { value: true });
39
39
  exports.ContextType = void 0;
40
40
  const core = __importStar(require("../../../../core"));
41
- exports.ContextType = core.serialization.enum_(["temporary", "persistent"]);
41
+ exports.ContextType = core.serialization.enum_(["persistent", "temporary"]);
@@ -6,6 +6,7 @@ import * as Hume from "../../../../api/index";
6
6
  import * as core from "../../../../core";
7
7
  import { AssistantEnd } from "./AssistantEnd";
8
8
  import { AssistantMessage } from "./AssistantMessage";
9
+ import { AssistantProsody } from "./AssistantProsody";
9
10
  import { ChatMetadata } from "./ChatMetadata";
10
11
  import { WebSocketError } from "./WebSocketError";
11
12
  import { UserInterruption } from "./UserInterruption";
@@ -13,8 +14,7 @@ import { UserMessage } from "./UserMessage";
13
14
  import { ToolCallMessage } from "./ToolCallMessage";
14
15
  import { ToolResponseMessage } from "./ToolResponseMessage";
15
16
  import { ToolErrorMessage } from "./ToolErrorMessage";
16
- import { AssistantProsody } from "./AssistantProsody";
17
17
  export declare const JsonMessage: core.serialization.Schema<serializers.empathicVoice.JsonMessage.Raw, Hume.empathicVoice.JsonMessage>;
18
18
  export declare namespace JsonMessage {
19
- type Raw = AssistantEnd.Raw | AssistantMessage.Raw | ChatMetadata.Raw | WebSocketError.Raw | UserInterruption.Raw | UserMessage.Raw | ToolCallMessage.Raw | ToolResponseMessage.Raw | ToolErrorMessage.Raw | AssistantProsody.Raw;
19
+ type Raw = AssistantEnd.Raw | AssistantMessage.Raw | AssistantProsody.Raw | ChatMetadata.Raw | WebSocketError.Raw | UserInterruption.Raw | UserMessage.Raw | ToolCallMessage.Raw | ToolResponseMessage.Raw | ToolErrorMessage.Raw;
20
20
  }
@@ -40,6 +40,7 @@ exports.JsonMessage = void 0;
40
40
  const core = __importStar(require("../../../../core"));
41
41
  const AssistantEnd_1 = require("./AssistantEnd");
42
42
  const AssistantMessage_1 = require("./AssistantMessage");
43
+ const AssistantProsody_1 = require("./AssistantProsody");
43
44
  const ChatMetadata_1 = require("./ChatMetadata");
44
45
  const WebSocketError_1 = require("./WebSocketError");
45
46
  const UserInterruption_1 = require("./UserInterruption");
@@ -47,10 +48,10 @@ const UserMessage_1 = require("./UserMessage");
47
48
  const ToolCallMessage_1 = require("./ToolCallMessage");
48
49
  const ToolResponseMessage_1 = require("./ToolResponseMessage");
49
50
  const ToolErrorMessage_1 = require("./ToolErrorMessage");
50
- const AssistantProsody_1 = require("./AssistantProsody");
51
51
  exports.JsonMessage = core.serialization.undiscriminatedUnion([
52
52
  AssistantEnd_1.AssistantEnd,
53
53
  AssistantMessage_1.AssistantMessage,
54
+ AssistantProsody_1.AssistantProsody,
54
55
  ChatMetadata_1.ChatMetadata,
55
56
  WebSocketError_1.WebSocketError,
56
57
  UserInterruption_1.UserInterruption,
@@ -58,5 +59,4 @@ exports.JsonMessage = core.serialization.undiscriminatedUnion([
58
59
  ToolCallMessage_1.ToolCallMessage,
59
60
  ToolResponseMessage_1.ToolResponseMessage,
60
61
  ToolErrorMessage_1.ToolErrorMessage,
61
- AssistantProsody_1.AssistantProsody,
62
62
  ]);
@@ -11,6 +11,7 @@ import { ReturnTimeoutSpecs } from "./ReturnTimeoutSpecs";
11
11
  import { ReturnNudgeSpec } from "./ReturnNudgeSpec";
12
12
  import { ReturnEventMessageSpecs } from "./ReturnEventMessageSpecs";
13
13
  import { ReturnEllmModel } from "./ReturnEllmModel";
14
+ import { ReturnVoice } from "./ReturnVoice";
14
15
  import { ReturnPrompt } from "./ReturnPrompt";
15
16
  import { ReturnWebhookSpec } from "./ReturnWebhookSpec";
16
17
  export declare const ReturnConfig: core.serialization.ObjectSchema<serializers.empathicVoice.ReturnConfig.Raw, Hume.empathicVoice.ReturnConfig>;
@@ -28,7 +29,7 @@ export declare namespace ReturnConfig {
28
29
  nudges?: ReturnNudgeSpec.Raw | null;
29
30
  event_messages?: ReturnEventMessageSpecs.Raw | null;
30
31
  ellm_model?: ReturnEllmModel.Raw | null;
31
- voice?: unknown | null;
32
+ voice?: ReturnVoice.Raw | null;
32
33
  prompt?: ReturnPrompt.Raw | null;
33
34
  webhooks?: (ReturnWebhookSpec.Raw | null | undefined)[] | null;
34
35
  created_on?: number | null;