hume 0.9.18 → 0.10.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.mock/definition/empathic-voice/__package__.yml +151 -224
- package/.mock/definition/empathic-voice/chat.yml +16 -16
- package/.mock/definition/empathic-voice/chatGroups.yml +10 -4
- package/.mock/definition/empathic-voice/chats.yml +3 -3
- package/.mock/definition/empathic-voice/configs.yml +23 -17
- package/.mock/definition/empathic-voice/customVoices.yml +9 -6
- package/.mock/definition/empathic-voice/prompts.yml +9 -9
- package/.mock/definition/empathic-voice/tools.yml +12 -9
- package/.mock/definition/expression-measurement/stream/stream.yml +206 -206
- package/.mock/definition/tts/__package__.yml +32 -3
- package/.mock/definition/tts/voices.yml +4 -2
- package/.mock/fern.config.json +1 -1
- package/api/resources/empathicVoice/resources/chat/types/index.d.ts +1 -1
- package/api/resources/empathicVoice/resources/chat/types/index.js +1 -1
- package/api/resources/empathicVoice/resources/chatGroups/client/Client.d.ts +2 -2
- package/api/resources/empathicVoice/resources/chatGroups/client/Client.js +139 -115
- package/api/resources/empathicVoice/resources/chats/client/Client.js +3 -3
- package/api/resources/empathicVoice/resources/configs/client/Client.d.ts +4 -4
- package/api/resources/empathicVoice/resources/configs/client/Client.js +146 -122
- package/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.d.ts +1 -1
- package/api/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersion.d.ts +1 -1
- package/api/resources/empathicVoice/resources/customVoices/client/Client.d.ts +1 -1
- package/api/resources/empathicVoice/resources/customVoices/client/Client.js +72 -60
- package/api/resources/empathicVoice/resources/prompts/client/Client.js +9 -9
- package/api/resources/empathicVoice/resources/tools/client/Client.d.ts +1 -1
- package/api/resources/empathicVoice/resources/tools/client/Client.js +75 -63
- package/api/resources/empathicVoice/types/{ReturnLanguageModelModelResource.d.ts → LanguageModelType.d.ts} +3 -6
- package/api/resources/empathicVoice/types/{ReturnLanguageModelModelResource.js → LanguageModelType.js} +3 -3
- package/api/resources/empathicVoice/types/{PostedLanguageModelModelProvider.d.ts → ModelProviderEnum.d.ts} +2 -5
- package/{dist/api/resources/empathicVoice/types/PostedLanguageModelModelProvider.js → api/resources/empathicVoice/types/ModelProviderEnum.js} +2 -2
- package/api/resources/empathicVoice/types/PostedLanguageModel.d.ts +2 -2
- package/api/resources/empathicVoice/types/ReturnLanguageModel.d.ts +2 -2
- package/api/resources/empathicVoice/types/index.d.ts +6 -8
- package/api/resources/empathicVoice/types/index.js +6 -8
- package/api/resources/expressionMeasurement/resources/batch/client/Client.js +6 -6
- package/api/resources/expressionMeasurement/resources/stream/resources/stream/types/index.d.ts +4 -4
- package/api/resources/expressionMeasurement/resources/stream/resources/stream/types/index.js +4 -4
- package/api/resources/tts/client/Client.js +4 -4
- package/api/resources/tts/resources/voices/client/Client.d.ts +1 -1
- package/api/resources/tts/resources/voices/client/Client.js +72 -60
- package/api/resources/tts/types/PostedTts.d.ts +7 -0
- package/api/resources/tts/types/SnippetAudioChunk.d.ts +6 -2
- package/dist/api/resources/empathicVoice/resources/chat/types/index.d.ts +1 -1
- package/dist/api/resources/empathicVoice/resources/chat/types/index.js +1 -1
- package/dist/api/resources/empathicVoice/resources/chatGroups/client/Client.d.ts +2 -2
- package/dist/api/resources/empathicVoice/resources/chatGroups/client/Client.js +139 -115
- package/dist/api/resources/empathicVoice/resources/chats/client/Client.js +3 -3
- package/dist/api/resources/empathicVoice/resources/configs/client/Client.d.ts +4 -4
- package/dist/api/resources/empathicVoice/resources/configs/client/Client.js +146 -122
- package/dist/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.d.ts +1 -1
- package/dist/api/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersion.d.ts +1 -1
- package/dist/api/resources/empathicVoice/resources/customVoices/client/Client.d.ts +1 -1
- package/dist/api/resources/empathicVoice/resources/customVoices/client/Client.js +72 -60
- package/dist/api/resources/empathicVoice/resources/prompts/client/Client.js +9 -9
- package/dist/api/resources/empathicVoice/resources/tools/client/Client.d.ts +1 -1
- package/dist/api/resources/empathicVoice/resources/tools/client/Client.js +75 -63
- package/dist/api/resources/empathicVoice/types/{ReturnLanguageModelModelResource.d.ts → LanguageModelType.d.ts} +3 -6
- package/dist/api/resources/empathicVoice/types/{ReturnLanguageModelModelResource.js → LanguageModelType.js} +3 -3
- package/dist/api/resources/empathicVoice/types/{PostedLanguageModelModelProvider.d.ts → ModelProviderEnum.d.ts} +2 -5
- package/{api/resources/empathicVoice/types/PostedLanguageModelModelProvider.js → dist/api/resources/empathicVoice/types/ModelProviderEnum.js} +2 -2
- package/dist/api/resources/empathicVoice/types/PostedLanguageModel.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/ReturnLanguageModel.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/index.d.ts +6 -8
- package/dist/api/resources/empathicVoice/types/index.js +6 -8
- package/dist/api/resources/expressionMeasurement/resources/batch/client/Client.js +6 -6
- package/dist/api/resources/expressionMeasurement/resources/stream/resources/stream/types/index.d.ts +4 -4
- package/dist/api/resources/expressionMeasurement/resources/stream/resources/stream/types/index.js +4 -4
- package/dist/api/resources/tts/client/Client.js +4 -4
- package/dist/api/resources/tts/resources/voices/client/Client.d.ts +1 -1
- package/dist/api/resources/tts/resources/voices/client/Client.js +72 -60
- package/dist/api/resources/tts/types/PostedTts.d.ts +7 -0
- package/dist/api/resources/tts/types/SnippetAudioChunk.d.ts +6 -2
- package/dist/serialization/resources/empathicVoice/resources/chat/types/index.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/resources/chat/types/index.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/LanguageModelType.d.ts +10 -0
- package/{serialization/resources/empathicVoice/types/ReturnLanguageModelModelResource.js → dist/serialization/resources/empathicVoice/types/LanguageModelType.js} +3 -3
- package/dist/serialization/resources/empathicVoice/types/{PostedLanguageModelModelProvider.d.ts → ModelProviderEnum.d.ts} +2 -2
- package/dist/serialization/resources/empathicVoice/types/{ReturnLanguageModelModelProvider.js → ModelProviderEnum.js} +2 -2
- package/dist/serialization/resources/empathicVoice/types/PostedLanguageModel.d.ts +4 -4
- package/dist/serialization/resources/empathicVoice/types/PostedLanguageModel.js +4 -4
- package/dist/serialization/resources/empathicVoice/types/ReturnLanguageModel.d.ts +4 -4
- package/dist/serialization/resources/empathicVoice/types/ReturnLanguageModel.js +4 -4
- package/dist/serialization/resources/empathicVoice/types/index.d.ts +6 -8
- package/dist/serialization/resources/empathicVoice/types/index.js +6 -8
- package/dist/serialization/resources/expressionMeasurement/resources/stream/resources/stream/types/index.d.ts +4 -4
- package/dist/serialization/resources/expressionMeasurement/resources/stream/resources/stream/types/index.js +4 -4
- package/dist/serialization/resources/tts/types/PostedTts.d.ts +1 -0
- package/dist/serialization/resources/tts/types/PostedTts.js +1 -0
- package/dist/serialization/resources/tts/types/SnippetAudioChunk.d.ts +2 -0
- package/dist/serialization/resources/tts/types/SnippetAudioChunk.js +2 -0
- package/dist/version.d.ts +1 -1
- package/dist/version.js +1 -1
- package/package.json +1 -1
- package/reference.md +93 -16
- package/serialization/resources/empathicVoice/resources/chat/types/index.d.ts +1 -1
- package/serialization/resources/empathicVoice/resources/chat/types/index.js +1 -1
- package/serialization/resources/empathicVoice/types/LanguageModelType.d.ts +10 -0
- package/{dist/serialization/resources/empathicVoice/types/ReturnLanguageModelModelResource.js → serialization/resources/empathicVoice/types/LanguageModelType.js} +3 -3
- package/{dist/serialization/resources/empathicVoice/types/ReturnLanguageModelModelProvider.d.ts → serialization/resources/empathicVoice/types/ModelProviderEnum.d.ts} +2 -2
- package/{dist/serialization/resources/empathicVoice/types/PostedLanguageModelModelProvider.js → serialization/resources/empathicVoice/types/ModelProviderEnum.js} +2 -2
- package/serialization/resources/empathicVoice/types/PostedLanguageModel.d.ts +4 -4
- package/serialization/resources/empathicVoice/types/PostedLanguageModel.js +4 -4
- package/serialization/resources/empathicVoice/types/ReturnLanguageModel.d.ts +4 -4
- package/serialization/resources/empathicVoice/types/ReturnLanguageModel.js +4 -4
- package/serialization/resources/empathicVoice/types/index.d.ts +6 -8
- package/serialization/resources/empathicVoice/types/index.js +6 -8
- package/serialization/resources/expressionMeasurement/resources/stream/resources/stream/types/index.d.ts +4 -4
- package/serialization/resources/expressionMeasurement/resources/stream/resources/stream/types/index.js +4 -4
- package/serialization/resources/tts/types/PostedTts.d.ts +1 -0
- package/serialization/resources/tts/types/PostedTts.js +1 -0
- package/serialization/resources/tts/types/SnippetAudioChunk.d.ts +2 -0
- package/serialization/resources/tts/types/SnippetAudioChunk.js +2 -0
- package/version.d.ts +1 -1
- package/version.js +1 -1
- package/api/resources/empathicVoice/types/PostedLanguageModelModelResource.d.ts +0 -34
- package/api/resources/empathicVoice/types/PostedLanguageModelModelResource.js +0 -33
- package/api/resources/empathicVoice/types/ReturnLanguageModelModelProvider.d.ts +0 -17
- package/api/resources/empathicVoice/types/ReturnLanguageModelModelProvider.js +0 -16
- package/dist/api/resources/empathicVoice/types/PostedLanguageModelModelResource.d.ts +0 -34
- package/dist/api/resources/empathicVoice/types/PostedLanguageModelModelResource.js +0 -33
- package/dist/api/resources/empathicVoice/types/ReturnLanguageModelModelProvider.d.ts +0 -17
- package/dist/api/resources/empathicVoice/types/ReturnLanguageModelModelProvider.js +0 -16
- package/dist/serialization/resources/empathicVoice/types/PostedLanguageModelModelResource.d.ts +0 -10
- package/dist/serialization/resources/empathicVoice/types/PostedLanguageModelModelResource.js +0 -67
- package/dist/serialization/resources/empathicVoice/types/ReturnLanguageModelModelResource.d.ts +0 -10
- package/serialization/resources/empathicVoice/types/PostedLanguageModelModelProvider.d.ts +0 -10
- package/serialization/resources/empathicVoice/types/PostedLanguageModelModelProvider.js +0 -50
- package/serialization/resources/empathicVoice/types/PostedLanguageModelModelResource.d.ts +0 -10
- package/serialization/resources/empathicVoice/types/PostedLanguageModelModelResource.js +0 -67
- package/serialization/resources/empathicVoice/types/ReturnLanguageModelModelProvider.d.ts +0 -10
- package/serialization/resources/empathicVoice/types/ReturnLanguageModelModelProvider.js +0 -50
- package/serialization/resources/empathicVoice/types/ReturnLanguageModelModelResource.d.ts +0 -10
|
@@ -36,6 +36,8 @@ export * from "./TtsInput";
|
|
|
36
36
|
export * from "./TextInput";
|
|
37
37
|
export * from "./FunctionCallResponseInput";
|
|
38
38
|
export * from "./HttpValidationError";
|
|
39
|
+
export * from "./LanguageModelType";
|
|
40
|
+
export * from "./ModelProviderEnum";
|
|
39
41
|
export * from "./ValidationErrorLocItem";
|
|
40
42
|
export * from "./ValidationError";
|
|
41
43
|
export * from "./WebhookEventBase";
|
|
@@ -77,10 +79,6 @@ export * from "./ReturnChatGroupPagedEvents";
|
|
|
77
79
|
export * from "./ReturnChatGroupPagedAudioReconstructionsPaginationDirection";
|
|
78
80
|
export * from "./ReturnChatGroupPagedAudioReconstructions";
|
|
79
81
|
export * from "./PostedConfigPromptSpec";
|
|
80
|
-
export * from "./PostedVoiceProvider";
|
|
81
|
-
export * from "./PostedVoice";
|
|
82
|
-
export * from "./PostedLanguageModelModelProvider";
|
|
83
|
-
export * from "./PostedLanguageModelModelResource";
|
|
84
82
|
export * from "./PostedLanguageModel";
|
|
85
83
|
export * from "./PostedEllmModel";
|
|
86
84
|
export * from "./PostedUserDefinedToolSpec";
|
|
@@ -92,10 +90,6 @@ export * from "./PostedTimeoutSpecsMaxDuration";
|
|
|
92
90
|
export * from "./PostedTimeoutSpecs";
|
|
93
91
|
export * from "./PostedWebhookEventType";
|
|
94
92
|
export * from "./PostedWebhookSpec";
|
|
95
|
-
export * from "./ReturnVoiceProvider";
|
|
96
|
-
export * from "./ReturnVoice";
|
|
97
|
-
export * from "./ReturnLanguageModelModelProvider";
|
|
98
|
-
export * from "./ReturnLanguageModelModelResource";
|
|
99
93
|
export * from "./ReturnLanguageModel";
|
|
100
94
|
export * from "./ReturnEllmModel";
|
|
101
95
|
export * from "./ReturnBuiltinToolToolType";
|
|
@@ -116,3 +110,7 @@ export * from "./PostedTimeoutSpec";
|
|
|
116
110
|
export * from "./ReturnEventMessageSpec";
|
|
117
111
|
export * from "./ReturnTimeoutSpec";
|
|
118
112
|
export * from "./PostedPromptSpec";
|
|
113
|
+
export * from "./PostedVoiceProvider";
|
|
114
|
+
export * from "./PostedVoice";
|
|
115
|
+
export * from "./ReturnVoiceProvider";
|
|
116
|
+
export * from "./ReturnVoice";
|
|
@@ -52,6 +52,8 @@ __exportStar(require("./TtsInput"), exports);
|
|
|
52
52
|
__exportStar(require("./TextInput"), exports);
|
|
53
53
|
__exportStar(require("./FunctionCallResponseInput"), exports);
|
|
54
54
|
__exportStar(require("./HttpValidationError"), exports);
|
|
55
|
+
__exportStar(require("./LanguageModelType"), exports);
|
|
56
|
+
__exportStar(require("./ModelProviderEnum"), exports);
|
|
55
57
|
__exportStar(require("./ValidationErrorLocItem"), exports);
|
|
56
58
|
__exportStar(require("./ValidationError"), exports);
|
|
57
59
|
__exportStar(require("./WebhookEventBase"), exports);
|
|
@@ -93,10 +95,6 @@ __exportStar(require("./ReturnChatGroupPagedEvents"), exports);
|
|
|
93
95
|
__exportStar(require("./ReturnChatGroupPagedAudioReconstructionsPaginationDirection"), exports);
|
|
94
96
|
__exportStar(require("./ReturnChatGroupPagedAudioReconstructions"), exports);
|
|
95
97
|
__exportStar(require("./PostedConfigPromptSpec"), exports);
|
|
96
|
-
__exportStar(require("./PostedVoiceProvider"), exports);
|
|
97
|
-
__exportStar(require("./PostedVoice"), exports);
|
|
98
|
-
__exportStar(require("./PostedLanguageModelModelProvider"), exports);
|
|
99
|
-
__exportStar(require("./PostedLanguageModelModelResource"), exports);
|
|
100
98
|
__exportStar(require("./PostedLanguageModel"), exports);
|
|
101
99
|
__exportStar(require("./PostedEllmModel"), exports);
|
|
102
100
|
__exportStar(require("./PostedUserDefinedToolSpec"), exports);
|
|
@@ -108,10 +106,6 @@ __exportStar(require("./PostedTimeoutSpecsMaxDuration"), exports);
|
|
|
108
106
|
__exportStar(require("./PostedTimeoutSpecs"), exports);
|
|
109
107
|
__exportStar(require("./PostedWebhookEventType"), exports);
|
|
110
108
|
__exportStar(require("./PostedWebhookSpec"), exports);
|
|
111
|
-
__exportStar(require("./ReturnVoiceProvider"), exports);
|
|
112
|
-
__exportStar(require("./ReturnVoice"), exports);
|
|
113
|
-
__exportStar(require("./ReturnLanguageModelModelProvider"), exports);
|
|
114
|
-
__exportStar(require("./ReturnLanguageModelModelResource"), exports);
|
|
115
109
|
__exportStar(require("./ReturnLanguageModel"), exports);
|
|
116
110
|
__exportStar(require("./ReturnEllmModel"), exports);
|
|
117
111
|
__exportStar(require("./ReturnBuiltinToolToolType"), exports);
|
|
@@ -132,3 +126,7 @@ __exportStar(require("./PostedTimeoutSpec"), exports);
|
|
|
132
126
|
__exportStar(require("./ReturnEventMessageSpec"), exports);
|
|
133
127
|
__exportStar(require("./ReturnTimeoutSpec"), exports);
|
|
134
128
|
__exportStar(require("./PostedPromptSpec"), exports);
|
|
129
|
+
__exportStar(require("./PostedVoiceProvider"), exports);
|
|
130
|
+
__exportStar(require("./PostedVoice"), exports);
|
|
131
|
+
__exportStar(require("./ReturnVoiceProvider"), exports);
|
|
132
|
+
__exportStar(require("./ReturnVoice"), exports);
|
|
@@ -1,3 +1,7 @@
|
|
|
1
|
+
export * from "./StreamFace";
|
|
2
|
+
export * from "./StreamLanguage";
|
|
3
|
+
export * from "./Config";
|
|
4
|
+
export * from "./StreamModelsEndpointPayload";
|
|
1
5
|
export * from "./StreamModelPredictionsJobDetails";
|
|
2
6
|
export * from "./StreamModelPredictionsBurstPredictionsItem";
|
|
3
7
|
export * from "./StreamModelPredictionsBurst";
|
|
@@ -15,7 +19,3 @@ export * from "./StreamErrorMessage";
|
|
|
15
19
|
export * from "./StreamWarningMessageJobDetails";
|
|
16
20
|
export * from "./StreamWarningMessage";
|
|
17
21
|
export * from "./SubscribeEvent";
|
|
18
|
-
export * from "./StreamFace";
|
|
19
|
-
export * from "./StreamLanguage";
|
|
20
|
-
export * from "./Config";
|
|
21
|
-
export * from "./StreamModelsEndpointPayload";
|
|
@@ -14,6 +14,10 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
|
14
14
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
15
|
};
|
|
16
16
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
+
__exportStar(require("./StreamFace"), exports);
|
|
18
|
+
__exportStar(require("./StreamLanguage"), exports);
|
|
19
|
+
__exportStar(require("./Config"), exports);
|
|
20
|
+
__exportStar(require("./StreamModelsEndpointPayload"), exports);
|
|
17
21
|
__exportStar(require("./StreamModelPredictionsJobDetails"), exports);
|
|
18
22
|
__exportStar(require("./StreamModelPredictionsBurstPredictionsItem"), exports);
|
|
19
23
|
__exportStar(require("./StreamModelPredictionsBurst"), exports);
|
|
@@ -31,7 +35,3 @@ __exportStar(require("./StreamErrorMessage"), exports);
|
|
|
31
35
|
__exportStar(require("./StreamWarningMessageJobDetails"), exports);
|
|
32
36
|
__exportStar(require("./StreamWarningMessage"), exports);
|
|
33
37
|
__exportStar(require("./SubscribeEvent"), exports);
|
|
34
|
-
__exportStar(require("./StreamFace"), exports);
|
|
35
|
-
__exportStar(require("./StreamLanguage"), exports);
|
|
36
|
-
__exportStar(require("./Config"), exports);
|
|
37
|
-
__exportStar(require("./StreamModelsEndpointPayload"), exports);
|
|
@@ -47,4 +47,5 @@ exports.PostedTts = core.serialization.object({
|
|
|
47
47
|
numGenerations: core.serialization.property("num_generations", core.serialization.number().optional()),
|
|
48
48
|
splitUtterances: core.serialization.property("split_utterances", core.serialization.boolean().optional()),
|
|
49
49
|
utterances: core.serialization.list(PostedUtterance_1.PostedUtterance),
|
|
50
|
+
instantMode: core.serialization.property("instant_mode", core.serialization.boolean().optional()),
|
|
50
51
|
});
|
|
@@ -43,5 +43,7 @@ exports.SnippetAudioChunk = core.serialization.object({
|
|
|
43
43
|
chunkIndex: core.serialization.property("chunk_index", core.serialization.number()),
|
|
44
44
|
generationId: core.serialization.property("generation_id", core.serialization.string()),
|
|
45
45
|
isLastChunk: core.serialization.property("is_last_chunk", core.serialization.boolean()),
|
|
46
|
+
snippetId: core.serialization.property("snippet_id", core.serialization.string()),
|
|
47
|
+
text: core.serialization.string(),
|
|
46
48
|
utteranceIndex: core.serialization.property("utterance_index", core.serialization.number().optional()),
|
|
47
49
|
});
|
package/dist/version.d.ts
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
export declare const SDK_VERSION = "0.
|
|
1
|
+
export declare const SDK_VERSION = "0.10.1";
|
package/dist/version.js
CHANGED
package/package.json
CHANGED
package/reference.md
CHANGED
|
@@ -336,7 +336,7 @@ for await (const item of response) {
|
|
|
336
336
|
|
|
337
337
|
## Tts Voices
|
|
338
338
|
|
|
339
|
-
<details><summary><code>client.tts.voices.<a href="/src/api/resources/tts/resources/voices/client/Client.ts">list</a>({ ...params }) -> Hume.
|
|
339
|
+
<details><summary><code>client.tts.voices.<a href="/src/api/resources/tts/resources/voices/client/Client.ts">list</a>({ ...params }) -> core.Page<Hume.ReturnVoice></code></summary>
|
|
340
340
|
<dl>
|
|
341
341
|
<dd>
|
|
342
342
|
|
|
@@ -364,9 +364,20 @@ Lists voices in your **Voice Library**. Set provider to `HUME_AI` to list Hume's
|
|
|
364
364
|
<dd>
|
|
365
365
|
|
|
366
366
|
```typescript
|
|
367
|
-
await client.tts.voices.list({
|
|
367
|
+
const response = await client.tts.voices.list({
|
|
368
368
|
provider: "CUSTOM_VOICE",
|
|
369
369
|
});
|
|
370
|
+
for await (const item of response) {
|
|
371
|
+
console.log(item);
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
// Or you can manually iterate page-by-page
|
|
375
|
+
const page = await client.tts.voices.list({
|
|
376
|
+
provider: "CUSTOM_VOICE",
|
|
377
|
+
});
|
|
378
|
+
while (page.hasNextPage()) {
|
|
379
|
+
page = page.getNextPage();
|
|
380
|
+
}
|
|
370
381
|
```
|
|
371
382
|
|
|
372
383
|
</dd>
|
|
@@ -686,7 +697,7 @@ await client.empathicVoice.tools.createTool({
|
|
|
686
697
|
</dl>
|
|
687
698
|
</details>
|
|
688
699
|
|
|
689
|
-
<details><summary><code>client.empathicVoice.tools.<a href="/src/api/resources/empathicVoice/resources/tools/client/Client.ts">listToolVersions</a>(id, { ...params }) -> Hume.
|
|
700
|
+
<details><summary><code>client.empathicVoice.tools.<a href="/src/api/resources/empathicVoice/resources/tools/client/Client.ts">listToolVersions</a>(id, { ...params }) -> core.Page<Hume.ReturnUserDefinedTool | undefined></code></summary>
|
|
690
701
|
<dl>
|
|
691
702
|
<dd>
|
|
692
703
|
|
|
@@ -716,7 +727,16 @@ Refer to our [tool use](/docs/empathic-voice-interface-evi/features/tool-use#fun
|
|
|
716
727
|
<dd>
|
|
717
728
|
|
|
718
729
|
```typescript
|
|
719
|
-
await client.empathicVoice.tools.listToolVersions("00183a3f-79ba-413d-9f3b-609864268bea");
|
|
730
|
+
const response = await client.empathicVoice.tools.listToolVersions("00183a3f-79ba-413d-9f3b-609864268bea");
|
|
731
|
+
for await (const item of response) {
|
|
732
|
+
console.log(item);
|
|
733
|
+
}
|
|
734
|
+
|
|
735
|
+
// Or you can manually iterate page-by-page
|
|
736
|
+
const page = await client.empathicVoice.tools.listToolVersions("00183a3f-79ba-413d-9f3b-609864268bea");
|
|
737
|
+
while (page.hasNextPage()) {
|
|
738
|
+
page = page.getNextPage();
|
|
739
|
+
}
|
|
720
740
|
```
|
|
721
741
|
|
|
722
742
|
</dd>
|
|
@@ -1914,7 +1934,7 @@ Version numbers are integer values representing different iterations of the Prom
|
|
|
1914
1934
|
|
|
1915
1935
|
## EmpathicVoice CustomVoices
|
|
1916
1936
|
|
|
1917
|
-
<details><summary><code>client.empathicVoice.customVoices.<a href="/src/api/resources/empathicVoice/resources/customVoices/client/Client.ts">listCustomVoices</a>({ ...params }) -> Hume.
|
|
1937
|
+
<details><summary><code>client.empathicVoice.customVoices.<a href="/src/api/resources/empathicVoice/resources/customVoices/client/Client.ts">listCustomVoices</a>({ ...params }) -> core.Page<Hume.ReturnCustomVoice></code></summary>
|
|
1918
1938
|
<dl>
|
|
1919
1939
|
<dd>
|
|
1920
1940
|
|
|
@@ -1944,7 +1964,16 @@ Refer to our [voices guide](/docs/empathic-voice-interface-evi/configuration/voi
|
|
|
1944
1964
|
<dd>
|
|
1945
1965
|
|
|
1946
1966
|
```typescript
|
|
1947
|
-
await client.empathicVoice.customVoices.listCustomVoices();
|
|
1967
|
+
const response = await client.empathicVoice.customVoices.listCustomVoices();
|
|
1968
|
+
for await (const item of response) {
|
|
1969
|
+
console.log(item);
|
|
1970
|
+
}
|
|
1971
|
+
|
|
1972
|
+
// Or you can manually iterate page-by-page
|
|
1973
|
+
const page = await client.empathicVoice.customVoices.listCustomVoices();
|
|
1974
|
+
while (page.hasNextPage()) {
|
|
1975
|
+
page = page.getNextPage();
|
|
1976
|
+
}
|
|
1948
1977
|
```
|
|
1949
1978
|
|
|
1950
1979
|
</dd>
|
|
@@ -2332,7 +2361,7 @@ await client.empathicVoice.customVoices.updateCustomVoiceName("id", {
|
|
|
2332
2361
|
|
|
2333
2362
|
## EmpathicVoice Configs
|
|
2334
2363
|
|
|
2335
|
-
<details><summary><code>client.empathicVoice.configs.<a href="/src/api/resources/empathicVoice/resources/configs/client/Client.ts">listConfigs</a>({ ...params }) -> Hume.
|
|
2364
|
+
<details><summary><code>client.empathicVoice.configs.<a href="/src/api/resources/empathicVoice/resources/configs/client/Client.ts">listConfigs</a>({ ...params }) -> core.Page<Hume.ReturnConfig></code></summary>
|
|
2336
2365
|
<dl>
|
|
2337
2366
|
<dd>
|
|
2338
2367
|
|
|
@@ -2362,10 +2391,22 @@ For more details on configuration options and how to configure EVI, see our [con
|
|
|
2362
2391
|
<dd>
|
|
2363
2392
|
|
|
2364
2393
|
```typescript
|
|
2365
|
-
await client.empathicVoice.configs.listConfigs({
|
|
2394
|
+
const response = await client.empathicVoice.configs.listConfigs({
|
|
2366
2395
|
pageNumber: 0,
|
|
2367
2396
|
pageSize: 1,
|
|
2368
2397
|
});
|
|
2398
|
+
for await (const item of response) {
|
|
2399
|
+
console.log(item);
|
|
2400
|
+
}
|
|
2401
|
+
|
|
2402
|
+
// Or you can manually iterate page-by-page
|
|
2403
|
+
const page = await client.empathicVoice.configs.listConfigs({
|
|
2404
|
+
pageNumber: 0,
|
|
2405
|
+
pageSize: 1,
|
|
2406
|
+
});
|
|
2407
|
+
while (page.hasNextPage()) {
|
|
2408
|
+
page = page.getNextPage();
|
|
2409
|
+
}
|
|
2369
2410
|
```
|
|
2370
2411
|
|
|
2371
2412
|
</dd>
|
|
@@ -2443,7 +2484,7 @@ await client.empathicVoice.configs.createConfig({
|
|
|
2443
2484
|
},
|
|
2444
2485
|
languageModel: {
|
|
2445
2486
|
modelProvider: "ANTHROPIC",
|
|
2446
|
-
modelResource: "claude-3-7-sonnet",
|
|
2487
|
+
modelResource: "claude-3-7-sonnet-latest",
|
|
2447
2488
|
temperature: 1,
|
|
2448
2489
|
},
|
|
2449
2490
|
eventMessages: {
|
|
@@ -2495,7 +2536,7 @@ await client.empathicVoice.configs.createConfig({
|
|
|
2495
2536
|
</dl>
|
|
2496
2537
|
</details>
|
|
2497
2538
|
|
|
2498
|
-
<details><summary><code>client.empathicVoice.configs.<a href="/src/api/resources/empathicVoice/resources/configs/client/Client.ts">listConfigVersions</a>(id, { ...params }) -> Hume.
|
|
2539
|
+
<details><summary><code>client.empathicVoice.configs.<a href="/src/api/resources/empathicVoice/resources/configs/client/Client.ts">listConfigVersions</a>(id, { ...params }) -> core.Page<Hume.ReturnConfig></code></summary>
|
|
2499
2540
|
<dl>
|
|
2500
2541
|
<dd>
|
|
2501
2542
|
|
|
@@ -2525,7 +2566,16 @@ For more details on configuration options and how to configure EVI, see our [con
|
|
|
2525
2566
|
<dd>
|
|
2526
2567
|
|
|
2527
2568
|
```typescript
|
|
2528
|
-
await client.empathicVoice.configs.listConfigVersions("1b60e1a0-cc59-424a-8d2c-189d354db3f3");
|
|
2569
|
+
const response = await client.empathicVoice.configs.listConfigVersions("1b60e1a0-cc59-424a-8d2c-189d354db3f3");
|
|
2570
|
+
for await (const item of response) {
|
|
2571
|
+
console.log(item);
|
|
2572
|
+
}
|
|
2573
|
+
|
|
2574
|
+
// Or you can manually iterate page-by-page
|
|
2575
|
+
const page = await client.empathicVoice.configs.listConfigVersions("1b60e1a0-cc59-424a-8d2c-189d354db3f3");
|
|
2576
|
+
while (page.hasNextPage()) {
|
|
2577
|
+
page = page.getNextPage();
|
|
2578
|
+
}
|
|
2529
2579
|
```
|
|
2530
2580
|
|
|
2531
2581
|
</dd>
|
|
@@ -2611,7 +2661,7 @@ await client.empathicVoice.configs.createConfigVersion("1b60e1a0-cc59-424a-8d2c-
|
|
|
2611
2661
|
},
|
|
2612
2662
|
languageModel: {
|
|
2613
2663
|
modelProvider: "ANTHROPIC",
|
|
2614
|
-
modelResource: "claude-3-7-sonnet",
|
|
2664
|
+
modelResource: "claude-3-7-sonnet-latest",
|
|
2615
2665
|
temperature: 1,
|
|
2616
2666
|
},
|
|
2617
2667
|
ellmModel: {
|
|
@@ -3296,7 +3346,7 @@ await client.empathicVoice.chats.getAudio("470a49f6-1dec-4afe-8b61-035d3b2d63b0"
|
|
|
3296
3346
|
|
|
3297
3347
|
## EmpathicVoice ChatGroups
|
|
3298
3348
|
|
|
3299
|
-
<details><summary><code>client.empathicVoice.chatGroups.<a href="/src/api/resources/empathicVoice/resources/chatGroups/client/Client.ts">listChatGroups</a>({ ...params }) -> Hume.
|
|
3349
|
+
<details><summary><code>client.empathicVoice.chatGroups.<a href="/src/api/resources/empathicVoice/resources/chatGroups/client/Client.ts">listChatGroups</a>({ ...params }) -> core.Page<Hume.ReturnChatGroup></code></summary>
|
|
3300
3350
|
<dl>
|
|
3301
3351
|
<dd>
|
|
3302
3352
|
|
|
@@ -3324,12 +3374,26 @@ Fetches a paginated list of **Chat Groups**.
|
|
|
3324
3374
|
<dd>
|
|
3325
3375
|
|
|
3326
3376
|
```typescript
|
|
3327
|
-
await client.empathicVoice.chatGroups.listChatGroups({
|
|
3377
|
+
const response = await client.empathicVoice.chatGroups.listChatGroups({
|
|
3328
3378
|
pageNumber: 0,
|
|
3329
3379
|
pageSize: 1,
|
|
3330
3380
|
ascendingOrder: true,
|
|
3331
3381
|
configId: "1b60e1a0-cc59-424a-8d2c-189d354db3f3",
|
|
3332
3382
|
});
|
|
3383
|
+
for await (const item of response) {
|
|
3384
|
+
console.log(item);
|
|
3385
|
+
}
|
|
3386
|
+
|
|
3387
|
+
// Or you can manually iterate page-by-page
|
|
3388
|
+
const page = await client.empathicVoice.chatGroups.listChatGroups({
|
|
3389
|
+
pageNumber: 0,
|
|
3390
|
+
pageSize: 1,
|
|
3391
|
+
ascendingOrder: true,
|
|
3392
|
+
configId: "1b60e1a0-cc59-424a-8d2c-189d354db3f3",
|
|
3393
|
+
});
|
|
3394
|
+
while (page.hasNextPage()) {
|
|
3395
|
+
page = page.getNextPage();
|
|
3396
|
+
}
|
|
3333
3397
|
```
|
|
3334
3398
|
|
|
3335
3399
|
</dd>
|
|
@@ -3439,7 +3503,7 @@ await client.empathicVoice.chatGroups.getChatGroup("697056f0-6c7e-487d-9bd8-9c19
|
|
|
3439
3503
|
</dl>
|
|
3440
3504
|
</details>
|
|
3441
3505
|
|
|
3442
|
-
<details><summary><code>client.empathicVoice.chatGroups.<a href="/src/api/resources/empathicVoice/resources/chatGroups/client/Client.ts">listChatGroupEvents</a>(id, { ...params }) -> Hume.
|
|
3506
|
+
<details><summary><code>client.empathicVoice.chatGroups.<a href="/src/api/resources/empathicVoice/resources/chatGroups/client/Client.ts">listChatGroupEvents</a>(id, { ...params }) -> core.Page<Hume.ReturnChatEvent></code></summary>
|
|
3443
3507
|
<dl>
|
|
3444
3508
|
<dd>
|
|
3445
3509
|
|
|
@@ -3467,11 +3531,24 @@ Fetches a paginated list of **Chat** events associated with a **Chat Group**.
|
|
|
3467
3531
|
<dd>
|
|
3468
3532
|
|
|
3469
3533
|
```typescript
|
|
3470
|
-
await client.empathicVoice.chatGroups.listChatGroupEvents("697056f0-6c7e-487d-9bd8-9c19df79f05f", {
|
|
3534
|
+
const response = await client.empathicVoice.chatGroups.listChatGroupEvents("697056f0-6c7e-487d-9bd8-9c19df79f05f", {
|
|
3471
3535
|
pageNumber: 0,
|
|
3472
3536
|
pageSize: 3,
|
|
3473
3537
|
ascendingOrder: true,
|
|
3474
3538
|
});
|
|
3539
|
+
for await (const item of response) {
|
|
3540
|
+
console.log(item);
|
|
3541
|
+
}
|
|
3542
|
+
|
|
3543
|
+
// Or you can manually iterate page-by-page
|
|
3544
|
+
const page = await client.empathicVoice.chatGroups.listChatGroupEvents("697056f0-6c7e-487d-9bd8-9c19df79f05f", {
|
|
3545
|
+
pageNumber: 0,
|
|
3546
|
+
pageSize: 3,
|
|
3547
|
+
ascendingOrder: true,
|
|
3548
|
+
});
|
|
3549
|
+
while (page.hasNextPage()) {
|
|
3550
|
+
page = page.getNextPage();
|
|
3551
|
+
}
|
|
3475
3552
|
```
|
|
3476
3553
|
|
|
3477
3554
|
</dd>
|
|
@@ -14,5 +14,5 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
|
14
14
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
15
|
};
|
|
16
16
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
-
__exportStar(require("./SubscribeEvent"), exports);
|
|
18
17
|
__exportStar(require("./PublishEvent"), exports);
|
|
18
|
+
__exportStar(require("./SubscribeEvent"), exports);
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* This file was auto-generated by Fern from our API Definition.
|
|
3
|
+
*/
|
|
4
|
+
import * as serializers from "../../../index";
|
|
5
|
+
import * as Hume from "../../../../api/index";
|
|
6
|
+
import * as core from "../../../../core";
|
|
7
|
+
export declare const LanguageModelType: core.serialization.Schema<serializers.empathicVoice.LanguageModelType.Raw, Hume.empathicVoice.LanguageModelType>;
|
|
8
|
+
export declare namespace LanguageModelType {
|
|
9
|
+
type Raw = "claude-3-7-sonnet-latest" | "claude-3-5-sonnet-latest" | "claude-3-5-haiku-latest" | "claude-3-5-sonnet-20240620" | "claude-3-opus-20240229" | "claude-3-sonnet-20240229" | "claude-3-haiku-20240307" | "us.anthropic.claude-3-5-haiku-20241022-v1:0" | "us.anthropic.claude-3-5-sonnet-20240620-v1:0" | "us.anthropic.claude-3-haiku-20240307-v1:0" | "gemini-1.5-pro" | "gemini-1.5-flash" | "gemini-1.5-pro-002" | "gemini-1.5-flash-002" | "gemini-2.0-flash" | "gpt-4-turbo" | "gpt-4-turbo-preview" | "gpt-3.5-turbo-0125" | "gpt-3.5-turbo" | "gpt-4o" | "gpt-4o-mini" | "gemma-7b-it" | "llama3-8b-8192" | "llama3-70b-8192" | "llama-3.1-70b-versatile" | "llama-3.3-70b-versatile" | "llama-3.1-8b-instant" | "accounts/fireworks/models/mixtral-8x7b-instruct" | "accounts/fireworks/models/llama-v3p1-405b-instruct" | "accounts/fireworks/models/llama-v3p1-70b-instruct" | "accounts/fireworks/models/llama-v3p1-8b-instruct" | "ellm" | "custom-language-model";
|
|
10
|
+
}
|
|
@@ -36,10 +36,10 @@ var __importStar = (this && this.__importStar) || (function () {
|
|
|
36
36
|
};
|
|
37
37
|
})();
|
|
38
38
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
39
|
-
exports.
|
|
39
|
+
exports.LanguageModelType = void 0;
|
|
40
40
|
const core = __importStar(require("../../../../core"));
|
|
41
|
-
exports.
|
|
42
|
-
"claude-3-7-sonnet",
|
|
41
|
+
exports.LanguageModelType = core.serialization.enum_([
|
|
42
|
+
"claude-3-7-sonnet-latest",
|
|
43
43
|
"claude-3-5-sonnet-latest",
|
|
44
44
|
"claude-3-5-haiku-latest",
|
|
45
45
|
"claude-3-5-sonnet-20240620",
|
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
import * as serializers from "../../../index";
|
|
5
5
|
import * as Hume from "../../../../api/index";
|
|
6
6
|
import * as core from "../../../../core";
|
|
7
|
-
export declare const
|
|
8
|
-
export declare namespace
|
|
7
|
+
export declare const ModelProviderEnum: core.serialization.Schema<serializers.empathicVoice.ModelProviderEnum.Raw, Hume.empathicVoice.ModelProviderEnum>;
|
|
8
|
+
export declare namespace ModelProviderEnum {
|
|
9
9
|
type Raw = "GROQ" | "OPEN_AI" | "FIREWORKS" | "ANTHROPIC" | "CUSTOM_LANGUAGE_MODEL" | "GOOGLE" | "HUME_AI" | "AMAZON_BEDROCK";
|
|
10
10
|
}
|
|
@@ -36,9 +36,9 @@ var __importStar = (this && this.__importStar) || (function () {
|
|
|
36
36
|
};
|
|
37
37
|
})();
|
|
38
38
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
39
|
-
exports.
|
|
39
|
+
exports.ModelProviderEnum = void 0;
|
|
40
40
|
const core = __importStar(require("../../../../core"));
|
|
41
|
-
exports.
|
|
41
|
+
exports.ModelProviderEnum = core.serialization.enum_([
|
|
42
42
|
"GROQ",
|
|
43
43
|
"OPEN_AI",
|
|
44
44
|
"FIREWORKS",
|
|
@@ -4,13 +4,13 @@
|
|
|
4
4
|
import * as serializers from "../../../index";
|
|
5
5
|
import * as Hume from "../../../../api/index";
|
|
6
6
|
import * as core from "../../../../core";
|
|
7
|
-
import {
|
|
8
|
-
import {
|
|
7
|
+
import { ModelProviderEnum } from "./ModelProviderEnum";
|
|
8
|
+
import { LanguageModelType } from "./LanguageModelType";
|
|
9
9
|
export declare const PostedLanguageModel: core.serialization.ObjectSchema<serializers.empathicVoice.PostedLanguageModel.Raw, Hume.empathicVoice.PostedLanguageModel>;
|
|
10
10
|
export declare namespace PostedLanguageModel {
|
|
11
11
|
interface Raw {
|
|
12
|
-
model_provider?:
|
|
13
|
-
model_resource?:
|
|
12
|
+
model_provider?: ModelProviderEnum.Raw | null;
|
|
13
|
+
model_resource?: LanguageModelType.Raw | null;
|
|
14
14
|
temperature?: number | null;
|
|
15
15
|
}
|
|
16
16
|
}
|
|
@@ -38,10 +38,10 @@ var __importStar = (this && this.__importStar) || (function () {
|
|
|
38
38
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
39
39
|
exports.PostedLanguageModel = void 0;
|
|
40
40
|
const core = __importStar(require("../../../../core"));
|
|
41
|
-
const
|
|
42
|
-
const
|
|
41
|
+
const ModelProviderEnum_1 = require("./ModelProviderEnum");
|
|
42
|
+
const LanguageModelType_1 = require("./LanguageModelType");
|
|
43
43
|
exports.PostedLanguageModel = core.serialization.object({
|
|
44
|
-
modelProvider: core.serialization.property("model_provider",
|
|
45
|
-
modelResource: core.serialization.property("model_resource",
|
|
44
|
+
modelProvider: core.serialization.property("model_provider", ModelProviderEnum_1.ModelProviderEnum.optional()),
|
|
45
|
+
modelResource: core.serialization.property("model_resource", LanguageModelType_1.LanguageModelType.optional()),
|
|
46
46
|
temperature: core.serialization.number().optional(),
|
|
47
47
|
});
|
|
@@ -4,13 +4,13 @@
|
|
|
4
4
|
import * as serializers from "../../../index";
|
|
5
5
|
import * as Hume from "../../../../api/index";
|
|
6
6
|
import * as core from "../../../../core";
|
|
7
|
-
import {
|
|
8
|
-
import {
|
|
7
|
+
import { ModelProviderEnum } from "./ModelProviderEnum";
|
|
8
|
+
import { LanguageModelType } from "./LanguageModelType";
|
|
9
9
|
export declare const ReturnLanguageModel: core.serialization.ObjectSchema<serializers.empathicVoice.ReturnLanguageModel.Raw, Hume.empathicVoice.ReturnLanguageModel>;
|
|
10
10
|
export declare namespace ReturnLanguageModel {
|
|
11
11
|
interface Raw {
|
|
12
|
-
model_provider?:
|
|
13
|
-
model_resource?:
|
|
12
|
+
model_provider?: ModelProviderEnum.Raw | null;
|
|
13
|
+
model_resource?: LanguageModelType.Raw | null;
|
|
14
14
|
temperature?: number | null;
|
|
15
15
|
}
|
|
16
16
|
}
|
|
@@ -38,10 +38,10 @@ var __importStar = (this && this.__importStar) || (function () {
|
|
|
38
38
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
39
39
|
exports.ReturnLanguageModel = void 0;
|
|
40
40
|
const core = __importStar(require("../../../../core"));
|
|
41
|
-
const
|
|
42
|
-
const
|
|
41
|
+
const ModelProviderEnum_1 = require("./ModelProviderEnum");
|
|
42
|
+
const LanguageModelType_1 = require("./LanguageModelType");
|
|
43
43
|
exports.ReturnLanguageModel = core.serialization.object({
|
|
44
|
-
modelProvider: core.serialization.property("model_provider",
|
|
45
|
-
modelResource: core.serialization.property("model_resource",
|
|
44
|
+
modelProvider: core.serialization.property("model_provider", ModelProviderEnum_1.ModelProviderEnum.optional()),
|
|
45
|
+
modelResource: core.serialization.property("model_resource", LanguageModelType_1.LanguageModelType.optional()),
|
|
46
46
|
temperature: core.serialization.number().optional(),
|
|
47
47
|
});
|
|
@@ -36,6 +36,8 @@ export * from "./TtsInput";
|
|
|
36
36
|
export * from "./TextInput";
|
|
37
37
|
export * from "./FunctionCallResponseInput";
|
|
38
38
|
export * from "./HttpValidationError";
|
|
39
|
+
export * from "./LanguageModelType";
|
|
40
|
+
export * from "./ModelProviderEnum";
|
|
39
41
|
export * from "./ValidationErrorLocItem";
|
|
40
42
|
export * from "./ValidationError";
|
|
41
43
|
export * from "./WebhookEventBase";
|
|
@@ -77,10 +79,6 @@ export * from "./ReturnChatGroupPagedEvents";
|
|
|
77
79
|
export * from "./ReturnChatGroupPagedAudioReconstructionsPaginationDirection";
|
|
78
80
|
export * from "./ReturnChatGroupPagedAudioReconstructions";
|
|
79
81
|
export * from "./PostedConfigPromptSpec";
|
|
80
|
-
export * from "./PostedVoiceProvider";
|
|
81
|
-
export * from "./PostedVoice";
|
|
82
|
-
export * from "./PostedLanguageModelModelProvider";
|
|
83
|
-
export * from "./PostedLanguageModelModelResource";
|
|
84
82
|
export * from "./PostedLanguageModel";
|
|
85
83
|
export * from "./PostedEllmModel";
|
|
86
84
|
export * from "./PostedUserDefinedToolSpec";
|
|
@@ -92,10 +90,6 @@ export * from "./PostedTimeoutSpecsMaxDuration";
|
|
|
92
90
|
export * from "./PostedTimeoutSpecs";
|
|
93
91
|
export * from "./PostedWebhookEventType";
|
|
94
92
|
export * from "./PostedWebhookSpec";
|
|
95
|
-
export * from "./ReturnVoiceProvider";
|
|
96
|
-
export * from "./ReturnVoice";
|
|
97
|
-
export * from "./ReturnLanguageModelModelProvider";
|
|
98
|
-
export * from "./ReturnLanguageModelModelResource";
|
|
99
93
|
export * from "./ReturnLanguageModel";
|
|
100
94
|
export * from "./ReturnEllmModel";
|
|
101
95
|
export * from "./ReturnBuiltinToolToolType";
|
|
@@ -116,3 +110,7 @@ export * from "./PostedTimeoutSpec";
|
|
|
116
110
|
export * from "./ReturnEventMessageSpec";
|
|
117
111
|
export * from "./ReturnTimeoutSpec";
|
|
118
112
|
export * from "./PostedPromptSpec";
|
|
113
|
+
export * from "./PostedVoiceProvider";
|
|
114
|
+
export * from "./PostedVoice";
|
|
115
|
+
export * from "./ReturnVoiceProvider";
|
|
116
|
+
export * from "./ReturnVoice";
|
|
@@ -52,6 +52,8 @@ __exportStar(require("./TtsInput"), exports);
|
|
|
52
52
|
__exportStar(require("./TextInput"), exports);
|
|
53
53
|
__exportStar(require("./FunctionCallResponseInput"), exports);
|
|
54
54
|
__exportStar(require("./HttpValidationError"), exports);
|
|
55
|
+
__exportStar(require("./LanguageModelType"), exports);
|
|
56
|
+
__exportStar(require("./ModelProviderEnum"), exports);
|
|
55
57
|
__exportStar(require("./ValidationErrorLocItem"), exports);
|
|
56
58
|
__exportStar(require("./ValidationError"), exports);
|
|
57
59
|
__exportStar(require("./WebhookEventBase"), exports);
|
|
@@ -93,10 +95,6 @@ __exportStar(require("./ReturnChatGroupPagedEvents"), exports);
|
|
|
93
95
|
__exportStar(require("./ReturnChatGroupPagedAudioReconstructionsPaginationDirection"), exports);
|
|
94
96
|
__exportStar(require("./ReturnChatGroupPagedAudioReconstructions"), exports);
|
|
95
97
|
__exportStar(require("./PostedConfigPromptSpec"), exports);
|
|
96
|
-
__exportStar(require("./PostedVoiceProvider"), exports);
|
|
97
|
-
__exportStar(require("./PostedVoice"), exports);
|
|
98
|
-
__exportStar(require("./PostedLanguageModelModelProvider"), exports);
|
|
99
|
-
__exportStar(require("./PostedLanguageModelModelResource"), exports);
|
|
100
98
|
__exportStar(require("./PostedLanguageModel"), exports);
|
|
101
99
|
__exportStar(require("./PostedEllmModel"), exports);
|
|
102
100
|
__exportStar(require("./PostedUserDefinedToolSpec"), exports);
|
|
@@ -108,10 +106,6 @@ __exportStar(require("./PostedTimeoutSpecsMaxDuration"), exports);
|
|
|
108
106
|
__exportStar(require("./PostedTimeoutSpecs"), exports);
|
|
109
107
|
__exportStar(require("./PostedWebhookEventType"), exports);
|
|
110
108
|
__exportStar(require("./PostedWebhookSpec"), exports);
|
|
111
|
-
__exportStar(require("./ReturnVoiceProvider"), exports);
|
|
112
|
-
__exportStar(require("./ReturnVoice"), exports);
|
|
113
|
-
__exportStar(require("./ReturnLanguageModelModelProvider"), exports);
|
|
114
|
-
__exportStar(require("./ReturnLanguageModelModelResource"), exports);
|
|
115
109
|
__exportStar(require("./ReturnLanguageModel"), exports);
|
|
116
110
|
__exportStar(require("./ReturnEllmModel"), exports);
|
|
117
111
|
__exportStar(require("./ReturnBuiltinToolToolType"), exports);
|
|
@@ -132,3 +126,7 @@ __exportStar(require("./PostedTimeoutSpec"), exports);
|
|
|
132
126
|
__exportStar(require("./ReturnEventMessageSpec"), exports);
|
|
133
127
|
__exportStar(require("./ReturnTimeoutSpec"), exports);
|
|
134
128
|
__exportStar(require("./PostedPromptSpec"), exports);
|
|
129
|
+
__exportStar(require("./PostedVoiceProvider"), exports);
|
|
130
|
+
__exportStar(require("./PostedVoice"), exports);
|
|
131
|
+
__exportStar(require("./ReturnVoiceProvider"), exports);
|
|
132
|
+
__exportStar(require("./ReturnVoice"), exports);
|
|
@@ -1,3 +1,7 @@
|
|
|
1
|
+
export * from "./StreamFace";
|
|
2
|
+
export * from "./StreamLanguage";
|
|
3
|
+
export * from "./Config";
|
|
4
|
+
export * from "./StreamModelsEndpointPayload";
|
|
1
5
|
export * from "./StreamModelPredictionsJobDetails";
|
|
2
6
|
export * from "./StreamModelPredictionsBurstPredictionsItem";
|
|
3
7
|
export * from "./StreamModelPredictionsBurst";
|
|
@@ -15,7 +19,3 @@ export * from "./StreamErrorMessage";
|
|
|
15
19
|
export * from "./StreamWarningMessageJobDetails";
|
|
16
20
|
export * from "./StreamWarningMessage";
|
|
17
21
|
export * from "./SubscribeEvent";
|
|
18
|
-
export * from "./StreamFace";
|
|
19
|
-
export * from "./StreamLanguage";
|
|
20
|
-
export * from "./Config";
|
|
21
|
-
export * from "./StreamModelsEndpointPayload";
|