hume 0.12.1 → 0.12.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.mock/definition/empathic-voice/__package__.yml +169 -75
- package/.mock/definition/empathic-voice/configs.yml +10 -4
- package/.mock/definition/tts/__package__.yml +7 -38
- package/api/resources/empathicVoice/client/index.d.ts +1 -0
- package/api/resources/empathicVoice/client/index.js +15 -0
- package/api/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.d.ts +12 -0
- package/api/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.js +5 -0
- package/api/resources/empathicVoice/client/requests/index.d.ts +1 -0
- package/api/resources/empathicVoice/client/requests/index.js +2 -0
- package/api/resources/empathicVoice/errors/UnprocessableEntityError.d.ts +9 -0
- package/{core/form-data-utils/toReadableStream.js → api/resources/empathicVoice/errors/UnprocessableEntityError.js} +16 -14
- package/api/resources/empathicVoice/errors/index.d.ts +1 -0
- package/api/resources/empathicVoice/errors/index.js +1 -0
- package/api/resources/empathicVoice/resources/chatGroups/client/index.d.ts +1 -0
- package/api/resources/empathicVoice/resources/chats/client/index.d.ts +1 -0
- package/api/resources/empathicVoice/resources/configs/client/index.d.ts +1 -0
- package/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.d.ts +5 -1
- package/api/resources/empathicVoice/resources/prompts/client/index.d.ts +1 -0
- package/api/resources/empathicVoice/resources/tools/client/index.d.ts +1 -0
- package/api/resources/empathicVoice/types/LanguageModelType.d.ts +17 -1
- package/api/resources/empathicVoice/types/LanguageModelType.js +16 -0
- package/api/resources/empathicVoice/types/ModelProviderEnum.d.ts +4 -1
- package/api/resources/empathicVoice/types/ModelProviderEnum.js +3 -0
- package/api/resources/empathicVoice/types/ReturnChatEvent.d.ts +12 -17
- package/api/resources/empathicVoice/types/ReturnChatEventRole.d.ts +0 -4
- package/api/resources/empathicVoice/types/ReturnChatEventType.d.ts +22 -18
- package/api/resources/empathicVoice/types/ReturnChatEventType.js +9 -4
- package/api/resources/empathicVoice/types/ReturnConfig.d.ts +21 -17
- package/api/resources/empathicVoice/types/ReturnPrompt.d.ts +2 -2
- package/api/resources/empathicVoice/types/SupportsToolUse.d.ts +7 -0
- package/api/resources/empathicVoice/types/SupportsToolUse.js +5 -0
- package/api/resources/empathicVoice/types/UserMessage.d.ts +8 -1
- package/api/resources/empathicVoice/types/index.d.ts +1 -0
- package/api/resources/empathicVoice/types/index.js +1 -0
- package/api/resources/expressionMeasurement/resources/batch/client/Client.d.ts +2 -4
- package/api/resources/expressionMeasurement/resources/batch/client/Client.js +1 -1
- package/api/resources/expressionMeasurement/resources/batch/client/index.d.ts +1 -0
- package/api/resources/tts/client/index.d.ts +1 -0
- package/api/resources/tts/resources/voices/client/index.d.ts +1 -0
- package/api/resources/tts/types/PostedUtterance.d.ts +1 -1
- package/api/resources/tts/types/SnippetAudioChunk.d.ts +0 -16
- package/core/fetcher/BinaryResponse.d.ts +17 -0
- package/core/fetcher/BinaryResponse.js +14 -0
- package/core/fetcher/Fetcher.d.ts +1 -1
- package/core/fetcher/ResponseWithBody.d.ts +4 -0
- package/core/fetcher/ResponseWithBody.js +6 -0
- package/core/fetcher/getFetchFn.js +3 -3
- package/core/fetcher/getResponseBody.js +33 -32
- package/core/fetcher/index.d.ts +1 -0
- package/core/file.d.ts +1 -0
- package/core/file.js +2 -0
- package/core/form-data-utils/FormDataWrapper.d.ts +5 -52
- package/core/form-data-utils/FormDataWrapper.js +104 -124
- package/core/index.d.ts +1 -0
- package/core/index.js +1 -0
- package/dist/api/resources/empathicVoice/client/index.d.ts +1 -0
- package/dist/api/resources/empathicVoice/client/index.js +15 -0
- package/dist/api/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.d.ts +12 -0
- package/dist/api/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.js +5 -0
- package/dist/api/resources/empathicVoice/client/requests/index.d.ts +1 -0
- package/dist/api/resources/empathicVoice/client/requests/index.js +2 -0
- package/dist/api/resources/empathicVoice/errors/UnprocessableEntityError.d.ts +9 -0
- package/dist/{core/form-data-utils/toReadableStream.js → api/resources/empathicVoice/errors/UnprocessableEntityError.js} +16 -14
- package/dist/api/resources/empathicVoice/errors/index.d.ts +1 -0
- package/dist/api/resources/empathicVoice/errors/index.js +1 -0
- package/dist/api/resources/empathicVoice/resources/chatGroups/client/index.d.ts +1 -0
- package/dist/api/resources/empathicVoice/resources/chats/client/index.d.ts +1 -0
- package/dist/api/resources/empathicVoice/resources/configs/client/index.d.ts +1 -0
- package/dist/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.d.ts +5 -1
- package/dist/api/resources/empathicVoice/resources/prompts/client/index.d.ts +1 -0
- package/dist/api/resources/empathicVoice/resources/tools/client/index.d.ts +1 -0
- package/dist/api/resources/empathicVoice/types/LanguageModelType.d.ts +17 -1
- package/dist/api/resources/empathicVoice/types/LanguageModelType.js +16 -0
- package/dist/api/resources/empathicVoice/types/ModelProviderEnum.d.ts +4 -1
- package/dist/api/resources/empathicVoice/types/ModelProviderEnum.js +3 -0
- package/dist/api/resources/empathicVoice/types/ReturnChatEvent.d.ts +12 -17
- package/dist/api/resources/empathicVoice/types/ReturnChatEventRole.d.ts +0 -4
- package/dist/api/resources/empathicVoice/types/ReturnChatEventType.d.ts +22 -18
- package/dist/api/resources/empathicVoice/types/ReturnChatEventType.js +9 -4
- package/dist/api/resources/empathicVoice/types/ReturnConfig.d.ts +21 -17
- package/dist/api/resources/empathicVoice/types/ReturnPrompt.d.ts +2 -2
- package/dist/api/resources/empathicVoice/types/SupportsToolUse.d.ts +7 -0
- package/dist/api/resources/empathicVoice/types/SupportsToolUse.js +5 -0
- package/dist/api/resources/empathicVoice/types/UserMessage.d.ts +8 -1
- package/dist/api/resources/empathicVoice/types/index.d.ts +1 -0
- package/dist/api/resources/empathicVoice/types/index.js +1 -0
- package/dist/api/resources/expressionMeasurement/resources/batch/client/Client.d.ts +2 -4
- package/dist/api/resources/expressionMeasurement/resources/batch/client/Client.js +1 -1
- package/dist/api/resources/expressionMeasurement/resources/batch/client/index.d.ts +1 -0
- package/dist/api/resources/tts/client/index.d.ts +1 -0
- package/dist/api/resources/tts/resources/voices/client/index.d.ts +1 -0
- package/dist/api/resources/tts/types/PostedUtterance.d.ts +1 -1
- package/dist/api/resources/tts/types/SnippetAudioChunk.d.ts +0 -16
- package/dist/core/fetcher/BinaryResponse.d.ts +17 -0
- package/dist/core/fetcher/BinaryResponse.js +14 -0
- package/dist/core/fetcher/Fetcher.d.ts +1 -1
- package/dist/core/fetcher/ResponseWithBody.d.ts +4 -0
- package/dist/core/fetcher/ResponseWithBody.js +6 -0
- package/dist/core/fetcher/getFetchFn.js +3 -3
- package/dist/core/fetcher/getResponseBody.js +33 -32
- package/dist/core/fetcher/index.d.ts +1 -0
- package/dist/core/file.d.ts +1 -0
- package/dist/core/file.js +2 -0
- package/dist/core/form-data-utils/FormDataWrapper.d.ts +5 -52
- package/dist/core/form-data-utils/FormDataWrapper.js +104 -124
- package/dist/core/index.d.ts +1 -0
- package/dist/core/index.js +1 -0
- package/dist/serialization/resources/empathicVoice/client/index.d.ts +1 -0
- package/dist/serialization/resources/empathicVoice/client/index.js +17 -0
- package/dist/serialization/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.d.ts +12 -0
- package/dist/serialization/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.js +43 -0
- package/dist/serialization/resources/empathicVoice/client/requests/index.d.ts +1 -0
- package/dist/serialization/resources/empathicVoice/client/requests/index.js +5 -0
- package/dist/serialization/resources/empathicVoice/index.d.ts +1 -0
- package/dist/serialization/resources/empathicVoice/index.js +1 -0
- package/dist/serialization/resources/empathicVoice/types/LanguageModelType.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/LanguageModelType.js +16 -0
- package/dist/serialization/resources/empathicVoice/types/ModelProviderEnum.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/ModelProviderEnum.js +3 -0
- package/dist/serialization/resources/empathicVoice/types/ReturnChatEventType.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/ReturnChatEventType.js +9 -4
- package/dist/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +13 -13
- package/dist/serialization/resources/empathicVoice/types/ReturnConfig.js +13 -13
- package/dist/serialization/resources/empathicVoice/types/ReturnPrompt.d.ts +1 -1
- package/dist/serialization/resources/empathicVoice/types/ReturnPrompt.js +1 -1
- package/dist/serialization/resources/empathicVoice/types/SupportsToolUse.d.ts +13 -0
- package/dist/serialization/resources/empathicVoice/types/SupportsToolUse.js +44 -0
- package/dist/serialization/resources/empathicVoice/types/index.d.ts +1 -0
- package/dist/serialization/resources/empathicVoice/types/index.js +1 -0
- package/dist/serialization/resources/tts/types/SnippetAudioChunk.d.ts +0 -8
- package/dist/serialization/resources/tts/types/SnippetAudioChunk.js +1 -10
- package/dist/version.d.ts +1 -1
- package/dist/version.js +1 -1
- package/jest.browser.config.mjs +10 -0
- package/jest.config.mjs +1 -0
- package/package.json +6 -7
- package/reference.md +53 -1
- package/serialization/resources/empathicVoice/client/index.d.ts +1 -0
- package/serialization/resources/empathicVoice/client/index.js +17 -0
- package/serialization/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.d.ts +12 -0
- package/serialization/resources/empathicVoice/client/requests/BodyCustomLanguageModelSupportsToolUseV0EviCustomLanguageModelSupportsToolUsePost.js +43 -0
- package/serialization/resources/empathicVoice/client/requests/index.d.ts +1 -0
- package/serialization/resources/empathicVoice/client/requests/index.js +5 -0
- package/serialization/resources/empathicVoice/index.d.ts +1 -0
- package/serialization/resources/empathicVoice/index.js +1 -0
- package/serialization/resources/empathicVoice/types/LanguageModelType.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/LanguageModelType.js +16 -0
- package/serialization/resources/empathicVoice/types/ModelProviderEnum.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/ModelProviderEnum.js +3 -0
- package/serialization/resources/empathicVoice/types/ReturnChatEventType.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/ReturnChatEventType.js +9 -4
- package/serialization/resources/empathicVoice/types/ReturnConfig.d.ts +13 -13
- package/serialization/resources/empathicVoice/types/ReturnConfig.js +13 -13
- package/serialization/resources/empathicVoice/types/ReturnPrompt.d.ts +1 -1
- package/serialization/resources/empathicVoice/types/ReturnPrompt.js +1 -1
- package/serialization/resources/empathicVoice/types/SupportsToolUse.d.ts +13 -0
- package/serialization/resources/empathicVoice/types/SupportsToolUse.js +44 -0
- package/serialization/resources/empathicVoice/types/index.d.ts +1 -0
- package/serialization/resources/empathicVoice/types/index.js +1 -0
- package/serialization/resources/tts/types/SnippetAudioChunk.d.ts +0 -8
- package/serialization/resources/tts/types/SnippetAudioChunk.js +1 -10
- package/version.d.ts +1 -1
- package/version.js +1 -1
- package/core/form-data-utils/toReadableStream.d.ts +0 -1
- package/dist/core/form-data-utils/toReadableStream.d.ts +0 -1
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* This file was auto-generated by Fern from our API Definition.
|
|
3
3
|
*/
|
|
4
|
-
export type LanguageModelType = "claude-3-7-sonnet-latest" | "claude-3-5-sonnet-latest" | "claude-3-5-haiku-latest" | "claude-3-5-sonnet-20240620" | "claude-3-opus-20240229" | "claude-3-sonnet-20240229" | "claude-3-haiku-20240307" | "us.anthropic.claude-3-5-haiku-20241022-v1:0" | "us.anthropic.claude-3-5-sonnet-20240620-v1:0" | "us.anthropic.claude-3-haiku-20240307-v1:0" | "gemini-1.5-pro" | "gemini-1.5-flash" | "gemini-1.5-pro-002" | "gemini-1.5-flash-002" | "gemini-2.0-flash" | "gpt-4-turbo" | "gpt-4-turbo-preview" | "gpt-3.5-turbo-0125" | "gpt-3.5-turbo" | "gpt-4o" | "gpt-4o-mini" | "gemma-7b-it" | "llama3-8b-8192" | "llama3-70b-8192" | "llama-3.1-70b-versatile" | "llama-3.3-70b-versatile" | "llama-3.1-8b-instant" | "accounts/fireworks/models/mixtral-8x7b-instruct" | "accounts/fireworks/models/llama-v3p1-405b-instruct" | "accounts/fireworks/models/llama-v3p1-70b-instruct" | "accounts/fireworks/models/llama-v3p1-8b-instruct" | "ellm" | "custom-language-model";
|
|
4
|
+
export type LanguageModelType = "claude-3-7-sonnet-latest" | "claude-3-5-sonnet-latest" | "claude-3-5-haiku-latest" | "claude-3-5-sonnet-20240620" | "claude-3-opus-20240229" | "claude-3-sonnet-20240229" | "claude-3-haiku-20240307" | "claude-sonnet-4-20250514" | "us.anthropic.claude-3-5-haiku-20241022-v1:0" | "us.anthropic.claude-3-5-sonnet-20240620-v1:0" | "us.anthropic.claude-3-haiku-20240307-v1:0" | "gpt-oss-120b" | "qwen-3-235b-a22b" | "qwen-3-235b-a22b-instruct-2507" | "qwen-3-235b-a22b-thinking-2507" | "gemini-1.5-pro" | "gemini-1.5-flash" | "gemini-1.5-pro-002" | "gemini-1.5-flash-002" | "gemini-2.0-flash" | "gemini-2.5-flash" | "gemini-2.5-flash-preview-04-17" | "gpt-4-turbo" | "gpt-4-turbo-preview" | "gpt-3.5-turbo-0125" | "gpt-3.5-turbo" | "gpt-4o" | "gpt-4o-mini" | "gpt-4.1" | "gemma-7b-it" | "llama3-8b-8192" | "llama3-70b-8192" | "llama-3.1-70b-versatile" | "llama-3.3-70b-versatile" | "llama-3.1-8b-instant" | "moonshotai/kimi-k2-instruct" | "accounts/fireworks/models/mixtral-8x7b-instruct" | "accounts/fireworks/models/llama-v3p1-405b-instruct" | "accounts/fireworks/models/llama-v3p1-70b-instruct" | "accounts/fireworks/models/llama-v3p1-8b-instruct" | "sonar" | "sonar-pro" | "sambanova" | "DeepSeek-R1-Distill-Llama-70B" | "Llama-4-Maverick-17B-128E-Instruct" | "Qwen3-32B" | "ellm" | "custom-language-model" | "hume-evi-3-web-search";
|
|
5
5
|
export declare const LanguageModelType: {
|
|
6
6
|
readonly Claude37SonnetLatest: "claude-3-7-sonnet-latest";
|
|
7
7
|
readonly Claude35SonnetLatest: "claude-3-5-sonnet-latest";
|
|
@@ -10,30 +10,46 @@ export declare const LanguageModelType: {
|
|
|
10
10
|
readonly Claude3Opus20240229: "claude-3-opus-20240229";
|
|
11
11
|
readonly Claude3Sonnet20240229: "claude-3-sonnet-20240229";
|
|
12
12
|
readonly Claude3Haiku20240307: "claude-3-haiku-20240307";
|
|
13
|
+
readonly ClaudeSonnet420250514: "claude-sonnet-4-20250514";
|
|
13
14
|
readonly UsAnthropicClaude35Haiku20241022V10: "us.anthropic.claude-3-5-haiku-20241022-v1:0";
|
|
14
15
|
readonly UsAnthropicClaude35Sonnet20240620V10: "us.anthropic.claude-3-5-sonnet-20240620-v1:0";
|
|
15
16
|
readonly UsAnthropicClaude3Haiku20240307V10: "us.anthropic.claude-3-haiku-20240307-v1:0";
|
|
17
|
+
readonly GptOss120B: "gpt-oss-120b";
|
|
18
|
+
readonly Qwen3235Ba22B: "qwen-3-235b-a22b";
|
|
19
|
+
readonly Qwen3235Ba22BInstruct2507: "qwen-3-235b-a22b-instruct-2507";
|
|
20
|
+
readonly Qwen3235Ba22BThinking2507: "qwen-3-235b-a22b-thinking-2507";
|
|
16
21
|
readonly Gemini15Pro: "gemini-1.5-pro";
|
|
17
22
|
readonly Gemini15Flash: "gemini-1.5-flash";
|
|
18
23
|
readonly Gemini15Pro002: "gemini-1.5-pro-002";
|
|
19
24
|
readonly Gemini15Flash002: "gemini-1.5-flash-002";
|
|
20
25
|
readonly Gemini20Flash: "gemini-2.0-flash";
|
|
26
|
+
readonly Gemini25Flash: "gemini-2.5-flash";
|
|
27
|
+
readonly Gemini25FlashPreview0417: "gemini-2.5-flash-preview-04-17";
|
|
21
28
|
readonly Gpt4Turbo: "gpt-4-turbo";
|
|
22
29
|
readonly Gpt4TurboPreview: "gpt-4-turbo-preview";
|
|
23
30
|
readonly Gpt35Turbo0125: "gpt-3.5-turbo-0125";
|
|
24
31
|
readonly Gpt35Turbo: "gpt-3.5-turbo";
|
|
25
32
|
readonly Gpt4O: "gpt-4o";
|
|
26
33
|
readonly Gpt4OMini: "gpt-4o-mini";
|
|
34
|
+
readonly Gpt41: "gpt-4.1";
|
|
27
35
|
readonly Gemma7BIt: "gemma-7b-it";
|
|
28
36
|
readonly Llama38B8192: "llama3-8b-8192";
|
|
29
37
|
readonly Llama370B8192: "llama3-70b-8192";
|
|
30
38
|
readonly Llama3170BVersatile: "llama-3.1-70b-versatile";
|
|
31
39
|
readonly Llama3370BVersatile: "llama-3.3-70b-versatile";
|
|
32
40
|
readonly Llama318BInstant: "llama-3.1-8b-instant";
|
|
41
|
+
readonly MoonshotaiKimiK2Instruct: "moonshotai/kimi-k2-instruct";
|
|
33
42
|
readonly AccountsFireworksModelsMixtral8X7BInstruct: "accounts/fireworks/models/mixtral-8x7b-instruct";
|
|
34
43
|
readonly AccountsFireworksModelsLlamaV3P1405BInstruct: "accounts/fireworks/models/llama-v3p1-405b-instruct";
|
|
35
44
|
readonly AccountsFireworksModelsLlamaV3P170BInstruct: "accounts/fireworks/models/llama-v3p1-70b-instruct";
|
|
36
45
|
readonly AccountsFireworksModelsLlamaV3P18BInstruct: "accounts/fireworks/models/llama-v3p1-8b-instruct";
|
|
46
|
+
readonly Sonar: "sonar";
|
|
47
|
+
readonly SonarPro: "sonar-pro";
|
|
48
|
+
readonly Sambanova: "sambanova";
|
|
49
|
+
readonly DeepSeekR1DistillLlama70B: "DeepSeek-R1-Distill-Llama-70B";
|
|
50
|
+
readonly Llama4Maverick17B128EInstruct: "Llama-4-Maverick-17B-128E-Instruct";
|
|
51
|
+
readonly Qwen332B: "Qwen3-32B";
|
|
37
52
|
readonly Ellm: "ellm";
|
|
38
53
|
readonly CustomLanguageModel: "custom-language-model";
|
|
54
|
+
readonly HumeEvi3WebSearch: "hume-evi-3-web-search";
|
|
39
55
|
};
|
|
@@ -12,30 +12,46 @@ exports.LanguageModelType = {
|
|
|
12
12
|
Claude3Opus20240229: "claude-3-opus-20240229",
|
|
13
13
|
Claude3Sonnet20240229: "claude-3-sonnet-20240229",
|
|
14
14
|
Claude3Haiku20240307: "claude-3-haiku-20240307",
|
|
15
|
+
ClaudeSonnet420250514: "claude-sonnet-4-20250514",
|
|
15
16
|
UsAnthropicClaude35Haiku20241022V10: "us.anthropic.claude-3-5-haiku-20241022-v1:0",
|
|
16
17
|
UsAnthropicClaude35Sonnet20240620V10: "us.anthropic.claude-3-5-sonnet-20240620-v1:0",
|
|
17
18
|
UsAnthropicClaude3Haiku20240307V10: "us.anthropic.claude-3-haiku-20240307-v1:0",
|
|
19
|
+
GptOss120B: "gpt-oss-120b",
|
|
20
|
+
Qwen3235Ba22B: "qwen-3-235b-a22b",
|
|
21
|
+
Qwen3235Ba22BInstruct2507: "qwen-3-235b-a22b-instruct-2507",
|
|
22
|
+
Qwen3235Ba22BThinking2507: "qwen-3-235b-a22b-thinking-2507",
|
|
18
23
|
Gemini15Pro: "gemini-1.5-pro",
|
|
19
24
|
Gemini15Flash: "gemini-1.5-flash",
|
|
20
25
|
Gemini15Pro002: "gemini-1.5-pro-002",
|
|
21
26
|
Gemini15Flash002: "gemini-1.5-flash-002",
|
|
22
27
|
Gemini20Flash: "gemini-2.0-flash",
|
|
28
|
+
Gemini25Flash: "gemini-2.5-flash",
|
|
29
|
+
Gemini25FlashPreview0417: "gemini-2.5-flash-preview-04-17",
|
|
23
30
|
Gpt4Turbo: "gpt-4-turbo",
|
|
24
31
|
Gpt4TurboPreview: "gpt-4-turbo-preview",
|
|
25
32
|
Gpt35Turbo0125: "gpt-3.5-turbo-0125",
|
|
26
33
|
Gpt35Turbo: "gpt-3.5-turbo",
|
|
27
34
|
Gpt4O: "gpt-4o",
|
|
28
35
|
Gpt4OMini: "gpt-4o-mini",
|
|
36
|
+
Gpt41: "gpt-4.1",
|
|
29
37
|
Gemma7BIt: "gemma-7b-it",
|
|
30
38
|
Llama38B8192: "llama3-8b-8192",
|
|
31
39
|
Llama370B8192: "llama3-70b-8192",
|
|
32
40
|
Llama3170BVersatile: "llama-3.1-70b-versatile",
|
|
33
41
|
Llama3370BVersatile: "llama-3.3-70b-versatile",
|
|
34
42
|
Llama318BInstant: "llama-3.1-8b-instant",
|
|
43
|
+
MoonshotaiKimiK2Instruct: "moonshotai/kimi-k2-instruct",
|
|
35
44
|
AccountsFireworksModelsMixtral8X7BInstruct: "accounts/fireworks/models/mixtral-8x7b-instruct",
|
|
36
45
|
AccountsFireworksModelsLlamaV3P1405BInstruct: "accounts/fireworks/models/llama-v3p1-405b-instruct",
|
|
37
46
|
AccountsFireworksModelsLlamaV3P170BInstruct: "accounts/fireworks/models/llama-v3p1-70b-instruct",
|
|
38
47
|
AccountsFireworksModelsLlamaV3P18BInstruct: "accounts/fireworks/models/llama-v3p1-8b-instruct",
|
|
48
|
+
Sonar: "sonar",
|
|
49
|
+
SonarPro: "sonar-pro",
|
|
50
|
+
Sambanova: "sambanova",
|
|
51
|
+
DeepSeekR1DistillLlama70B: "DeepSeek-R1-Distill-Llama-70B",
|
|
52
|
+
Llama4Maverick17B128EInstruct: "Llama-4-Maverick-17B-128E-Instruct",
|
|
53
|
+
Qwen332B: "Qwen3-32B",
|
|
39
54
|
Ellm: "ellm",
|
|
40
55
|
CustomLanguageModel: "custom-language-model",
|
|
56
|
+
HumeEvi3WebSearch: "hume-evi-3-web-search",
|
|
41
57
|
};
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* This file was auto-generated by Fern from our API Definition.
|
|
3
3
|
*/
|
|
4
|
-
export type ModelProviderEnum = "GROQ" | "OPEN_AI" | "FIREWORKS" | "ANTHROPIC" | "CUSTOM_LANGUAGE_MODEL" | "GOOGLE" | "HUME_AI" | "AMAZON_BEDROCK";
|
|
4
|
+
export type ModelProviderEnum = "GROQ" | "OPEN_AI" | "FIREWORKS" | "ANTHROPIC" | "CUSTOM_LANGUAGE_MODEL" | "GOOGLE" | "HUME_AI" | "AMAZON_BEDROCK" | "PERPLEXITY" | "SAMBANOVA" | "CEREBRAS";
|
|
5
5
|
export declare const ModelProviderEnum: {
|
|
6
6
|
readonly Groq: "GROQ";
|
|
7
7
|
readonly OpenAi: "OPEN_AI";
|
|
@@ -11,4 +11,7 @@ export declare const ModelProviderEnum: {
|
|
|
11
11
|
readonly Google: "GOOGLE";
|
|
12
12
|
readonly HumeAi: "HUME_AI";
|
|
13
13
|
readonly AmazonBedrock: "AMAZON_BEDROCK";
|
|
14
|
+
readonly Perplexity: "PERPLEXITY";
|
|
15
|
+
readonly Sambanova: "SAMBANOVA";
|
|
16
|
+
readonly Cerebras: "CEREBRAS";
|
|
14
17
|
};
|
|
@@ -14,30 +14,25 @@ export interface ReturnChatEvent {
|
|
|
14
14
|
timestamp: number;
|
|
15
15
|
/**
|
|
16
16
|
* The role of the entity which generated the Chat Event. There are four possible values:
|
|
17
|
-
*
|
|
18
17
|
* - `USER`: The user, capable of sending user messages and interruptions.
|
|
19
|
-
*
|
|
20
18
|
* - `AGENT`: The assistant, capable of sending agent messages.
|
|
21
|
-
*
|
|
22
19
|
* - `SYSTEM`: The backend server, capable of transmitting errors.
|
|
23
|
-
*
|
|
24
20
|
* - `TOOL`: The function calling mechanism.
|
|
25
21
|
*/
|
|
26
22
|
role: Hume.empathicVoice.ReturnChatEventRole;
|
|
27
23
|
/**
|
|
28
|
-
* Type of Chat Event. There are
|
|
29
|
-
*
|
|
30
|
-
* - `
|
|
31
|
-
*
|
|
32
|
-
* - `USER_MESSAGE`:
|
|
33
|
-
*
|
|
34
|
-
* - `
|
|
35
|
-
*
|
|
36
|
-
* - `
|
|
37
|
-
*
|
|
38
|
-
* - `
|
|
39
|
-
*
|
|
40
|
-
* - `FUNCTION_CALL_RESPONSE`: Contains the tool response.
|
|
24
|
+
* Type of Chat Event. There are eleven Chat Event types:
|
|
25
|
+
* - `SYSTEM_PROMPT`: The system prompt used to initialize the session.
|
|
26
|
+
* - `CHAT_START_MESSAGE`: Marks the beginning of the chat session.
|
|
27
|
+
* - `USER_RECORDING_START_MESSAGE`: Marks when the client began streaming audio and the start of audio processing.
|
|
28
|
+
* - `USER_MESSAGE`: A message sent by the user.
|
|
29
|
+
* - `USER_INTERRUPTION`: A user-initiated interruption while the assistant is speaking.
|
|
30
|
+
* - `AGENT_MESSAGE`: A response generated by the assistant.
|
|
31
|
+
* - `FUNCTION_CALL`: A record of a tool invocation by the assistant.
|
|
32
|
+
* - `FUNCTION_CALL_RESPONSE`: The result of a previously invoked function or tool.
|
|
33
|
+
* - `PAUSE_ONSET`: Marks when the client sent a `pause_assistant_message` to pause the assistant.
|
|
34
|
+
* - `RESUME_ONSET`: Marks when the client sent a `resume_assistant_message` to resume the assistant.
|
|
35
|
+
* - `CHAT_END_MESSAGE`: Indicates the end of the chat session.
|
|
41
36
|
*/
|
|
42
37
|
type: Hume.empathicVoice.ReturnChatEventType;
|
|
43
38
|
/** The text of the Chat Event. This field contains the message content for each event type listed in the `type` field. */
|
|
@@ -3,13 +3,9 @@
|
|
|
3
3
|
*/
|
|
4
4
|
/**
|
|
5
5
|
* The role of the entity which generated the Chat Event. There are four possible values:
|
|
6
|
-
*
|
|
7
6
|
* - `USER`: The user, capable of sending user messages and interruptions.
|
|
8
|
-
*
|
|
9
7
|
* - `AGENT`: The assistant, capable of sending agent messages.
|
|
10
|
-
*
|
|
11
8
|
* - `SYSTEM`: The backend server, capable of transmitting errors.
|
|
12
|
-
*
|
|
13
9
|
* - `TOOL`: The function calling mechanism.
|
|
14
10
|
*/
|
|
15
11
|
export type ReturnChatEventRole = "USER" | "AGENT" | "SYSTEM" | "TOOL";
|
|
@@ -2,26 +2,30 @@
|
|
|
2
2
|
* This file was auto-generated by Fern from our API Definition.
|
|
3
3
|
*/
|
|
4
4
|
/**
|
|
5
|
-
* Type of Chat Event. There are
|
|
6
|
-
*
|
|
7
|
-
* - `
|
|
8
|
-
*
|
|
9
|
-
* - `USER_MESSAGE`:
|
|
10
|
-
*
|
|
11
|
-
* - `
|
|
12
|
-
*
|
|
13
|
-
* - `
|
|
14
|
-
*
|
|
15
|
-
* - `
|
|
16
|
-
*
|
|
17
|
-
* - `FUNCTION_CALL_RESPONSE`: Contains the tool response.
|
|
5
|
+
* Type of Chat Event. There are eleven Chat Event types:
|
|
6
|
+
* - `SYSTEM_PROMPT`: The system prompt used to initialize the session.
|
|
7
|
+
* - `CHAT_START_MESSAGE`: Marks the beginning of the chat session.
|
|
8
|
+
* - `USER_RECORDING_START_MESSAGE`: Marks when the client began streaming audio and the start of audio processing.
|
|
9
|
+
* - `USER_MESSAGE`: A message sent by the user.
|
|
10
|
+
* - `USER_INTERRUPTION`: A user-initiated interruption while the assistant is speaking.
|
|
11
|
+
* - `AGENT_MESSAGE`: A response generated by the assistant.
|
|
12
|
+
* - `FUNCTION_CALL`: A record of a tool invocation by the assistant.
|
|
13
|
+
* - `FUNCTION_CALL_RESPONSE`: The result of a previously invoked function or tool.
|
|
14
|
+
* - `PAUSE_ONSET`: Marks when the client sent a `pause_assistant_message` to pause the assistant.
|
|
15
|
+
* - `RESUME_ONSET`: Marks when the client sent a `resume_assistant_message` to resume the assistant.
|
|
16
|
+
* - `CHAT_END_MESSAGE`: Indicates the end of the chat session.
|
|
18
17
|
*/
|
|
19
|
-
export type ReturnChatEventType = "SYSTEM_PROMPT" | "
|
|
18
|
+
export type ReturnChatEventType = "FUNCTION_CALL" | "FUNCTION_CALL_RESPONSE" | "CHAT_END_MESSAGE" | "AGENT_MESSAGE" | "SYSTEM_PROMPT" | "USER_RECORDING_START_MESSAGE" | "RESUME_ONSET" | "USER_INTERRUPTION" | "CHAT_START_MESSAGE" | "PAUSE_ONSET" | "USER_MESSAGE";
|
|
20
19
|
export declare const ReturnChatEventType: {
|
|
21
|
-
readonly SystemPrompt: "SYSTEM_PROMPT";
|
|
22
|
-
readonly UserMessage: "USER_MESSAGE";
|
|
23
|
-
readonly UserInterruption: "USER_INTERRUPTION";
|
|
24
|
-
readonly AgentMessage: "AGENT_MESSAGE";
|
|
25
20
|
readonly FunctionCall: "FUNCTION_CALL";
|
|
26
21
|
readonly FunctionCallResponse: "FUNCTION_CALL_RESPONSE";
|
|
22
|
+
readonly ChatEndMessage: "CHAT_END_MESSAGE";
|
|
23
|
+
readonly AgentMessage: "AGENT_MESSAGE";
|
|
24
|
+
readonly SystemPrompt: "SYSTEM_PROMPT";
|
|
25
|
+
readonly UserRecordingStartMessage: "USER_RECORDING_START_MESSAGE";
|
|
26
|
+
readonly ResumeOnset: "RESUME_ONSET";
|
|
27
|
+
readonly UserInterruption: "USER_INTERRUPTION";
|
|
28
|
+
readonly ChatStartMessage: "CHAT_START_MESSAGE";
|
|
29
|
+
readonly PauseOnset: "PAUSE_ONSET";
|
|
30
|
+
readonly UserMessage: "USER_MESSAGE";
|
|
27
31
|
};
|
|
@@ -5,10 +5,15 @@
|
|
|
5
5
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
6
|
exports.ReturnChatEventType = void 0;
|
|
7
7
|
exports.ReturnChatEventType = {
|
|
8
|
-
SystemPrompt: "SYSTEM_PROMPT",
|
|
9
|
-
UserMessage: "USER_MESSAGE",
|
|
10
|
-
UserInterruption: "USER_INTERRUPTION",
|
|
11
|
-
AgentMessage: "AGENT_MESSAGE",
|
|
12
8
|
FunctionCall: "FUNCTION_CALL",
|
|
13
9
|
FunctionCallResponse: "FUNCTION_CALL_RESPONSE",
|
|
10
|
+
ChatEndMessage: "CHAT_END_MESSAGE",
|
|
11
|
+
AgentMessage: "AGENT_MESSAGE",
|
|
12
|
+
SystemPrompt: "SYSTEM_PROMPT",
|
|
13
|
+
UserRecordingStartMessage: "USER_RECORDING_START_MESSAGE",
|
|
14
|
+
ResumeOnset: "RESUME_ONSET",
|
|
15
|
+
UserInterruption: "USER_INTERRUPTION",
|
|
16
|
+
ChatStartMessage: "CHAT_START_MESSAGE",
|
|
17
|
+
PauseOnset: "PAUSE_ONSET",
|
|
18
|
+
UserMessage: "USER_MESSAGE",
|
|
14
19
|
};
|
|
@@ -18,35 +18,39 @@ export interface ReturnConfig {
|
|
|
18
18
|
* Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number.
|
|
19
19
|
*/
|
|
20
20
|
version?: number;
|
|
21
|
-
/**
|
|
21
|
+
/**
|
|
22
|
+
* The supplemental language model associated with this Config.
|
|
23
|
+
*
|
|
24
|
+
* This model is used to generate longer, more detailed responses from EVI. Choosing an appropriate supplemental language model for your use case is crucial for generating fast, high-quality responses from EVI.
|
|
25
|
+
*/
|
|
26
|
+
languageModel?: Hume.empathicVoice.ReturnLanguageModel;
|
|
27
|
+
/** List of built-in tools associated with this Config. */
|
|
28
|
+
builtinTools?: (Hume.empathicVoice.ReturnBuiltinTool | undefined)[];
|
|
29
|
+
/**
|
|
30
|
+
* Specifies the EVI version to use. See our [EVI Version Guide](/docs/speech-to-speech-evi/configuration/evi-version) for differences between versions.
|
|
31
|
+
*
|
|
32
|
+
* **We're officially sunsetting EVI versions 1 and 2 on August 30, 2025**. To keep things running smoothly, be sure to [migrate to EVI 3](/docs/speech-to-speech-evi/configuration/evi-version#migrating-to-evi-3) before then.
|
|
33
|
+
*/
|
|
22
34
|
eviVersion?: string;
|
|
23
35
|
timeouts?: Hume.empathicVoice.ReturnTimeoutSpecs;
|
|
24
|
-
|
|
36
|
+
eventMessages?: Hume.empathicVoice.ReturnEventMessageSpecs;
|
|
25
37
|
/**
|
|
26
38
|
* The eLLM setup associated with this Config.
|
|
27
39
|
*
|
|
28
40
|
* Hume's eLLM (empathic Large Language Model) is a multimodal language model that takes into account both expression measures and language. The eLLM generates short, empathic language responses and guides text-to-speech (TTS) prosody.
|
|
29
41
|
*/
|
|
30
42
|
ellmModel?: Hume.empathicVoice.ReturnEllmModel;
|
|
31
|
-
voice?: unknown;
|
|
32
|
-
prompt?: Hume.empathicVoice.ReturnPrompt;
|
|
33
|
-
/** List of user-defined tools associated with this Config. */
|
|
34
|
-
tools?: (Hume.empathicVoice.ReturnUserDefinedTool | undefined)[];
|
|
35
43
|
/** Map of webhooks associated with this config. */
|
|
36
44
|
webhooks?: (Hume.empathicVoice.ReturnWebhookSpec | undefined)[];
|
|
45
|
+
/** An optional description of the Config version. */
|
|
46
|
+
versionDescription?: string;
|
|
37
47
|
/** Time at which the Config was created. Measured in seconds since the Unix epoch. */
|
|
38
48
|
createdOn?: number;
|
|
39
49
|
/** Time at which the Config was last modified. Measured in seconds since the Unix epoch. */
|
|
40
50
|
modifiedOn?: number;
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
languageModel?: Hume.empathicVoice.ReturnLanguageModel;
|
|
47
|
-
/** List of built-in tools associated with this Config. */
|
|
48
|
-
builtinTools?: (Hume.empathicVoice.ReturnBuiltinTool | undefined)[];
|
|
49
|
-
eventMessages?: Hume.empathicVoice.ReturnEventMessageSpecs;
|
|
50
|
-
/** An optional description of the Config version. */
|
|
51
|
-
versionDescription?: string;
|
|
51
|
+
nudges?: Hume.empathicVoice.ReturnNudgeSpec;
|
|
52
|
+
voice?: unknown;
|
|
53
|
+
prompt?: Hume.empathicVoice.ReturnPrompt;
|
|
54
|
+
/** List of user-defined tools associated with this Config. */
|
|
55
|
+
tools?: (Hume.empathicVoice.ReturnUserDefinedTool | undefined)[];
|
|
52
56
|
}
|
|
@@ -28,10 +28,10 @@ export interface ReturnPrompt {
|
|
|
28
28
|
version: number;
|
|
29
29
|
/** Versioning method for a Prompt. Either `FIXED` for using a fixed version number or `LATEST` for auto-updating to the latest version. */
|
|
30
30
|
versionType: Hume.empathicVoice.ReturnPromptVersionType;
|
|
31
|
+
/** An optional description of the Prompt version. */
|
|
32
|
+
versionDescription?: string;
|
|
31
33
|
/** Time at which the Prompt was created. Measured in seconds since the Unix epoch. */
|
|
32
34
|
createdOn: number;
|
|
33
35
|
/** Time at which the Prompt was last modified. Measured in seconds since the Unix epoch. */
|
|
34
36
|
modifiedOn: number;
|
|
35
|
-
/** An optional description of the Prompt version. */
|
|
36
|
-
versionDescription?: string;
|
|
37
37
|
}
|
|
@@ -10,7 +10,14 @@ export interface UserMessage {
|
|
|
10
10
|
customSessionId?: string;
|
|
11
11
|
/** Indicates if this message was inserted into the conversation as text from a [User Input](/reference/empathic-voice-interface-evi/chat/chat#send.UserInput.text) message. */
|
|
12
12
|
fromText: boolean;
|
|
13
|
-
/**
|
|
13
|
+
/**
|
|
14
|
+
* Indicates whether this `UserMessage` contains an interim (unfinalized) transcript.
|
|
15
|
+
*
|
|
16
|
+
* - `true`: the transcript is provisional; words may be repeated or refined in subsequent `UserMessage` responses as additional audio is processed.
|
|
17
|
+
* - `false`: the transcript is final and complete.
|
|
18
|
+
*
|
|
19
|
+
* Interim transcripts are only sent when the [`verbose_transcription`](/reference/empathic-voice-interface-evi/chat/chat#request.query.verbose_transcription) query parameter is set to `true` in the initial handshake.
|
|
20
|
+
*/
|
|
14
21
|
interim: boolean;
|
|
15
22
|
/** Transcript of the message. */
|
|
16
23
|
message: Hume.empathicVoice.ChatMessage;
|
|
@@ -36,6 +36,7 @@ export * from "./JsonMessage";
|
|
|
36
36
|
export * from "./HttpValidationError";
|
|
37
37
|
export * from "./LanguageModelType";
|
|
38
38
|
export * from "./ModelProviderEnum";
|
|
39
|
+
export * from "./SupportsToolUse";
|
|
39
40
|
export * from "./ValidationErrorLocItem";
|
|
40
41
|
export * from "./ValidationError";
|
|
41
42
|
export * from "./WebhookEventBase";
|
|
@@ -52,6 +52,7 @@ __exportStar(require("./JsonMessage"), exports);
|
|
|
52
52
|
__exportStar(require("./HttpValidationError"), exports);
|
|
53
53
|
__exportStar(require("./LanguageModelType"), exports);
|
|
54
54
|
__exportStar(require("./ModelProviderEnum"), exports);
|
|
55
|
+
__exportStar(require("./SupportsToolUse"), exports);
|
|
55
56
|
__exportStar(require("./ValidationErrorLocItem"), exports);
|
|
56
57
|
__exportStar(require("./ValidationError"), exports);
|
|
57
58
|
__exportStar(require("./WebhookEventBase"), exports);
|
|
@@ -5,8 +5,6 @@ import * as environments from "../../../../../../environments";
|
|
|
5
5
|
import * as core from "../../../../../../core";
|
|
6
6
|
import * as Hume from "../../../../../index";
|
|
7
7
|
import * as stream from "stream";
|
|
8
|
-
import * as fs from "fs";
|
|
9
|
-
import { Blob } from "buffer";
|
|
10
8
|
export declare namespace Batch {
|
|
11
9
|
interface Options {
|
|
12
10
|
environment?: core.Supplier<environments.HumeEnvironment | string>;
|
|
@@ -86,14 +84,14 @@ export declare class Batch {
|
|
|
86
84
|
/**
|
|
87
85
|
* Start a new batch inference job.
|
|
88
86
|
*
|
|
89
|
-
* @param {
|
|
87
|
+
* @param {core.FileLike[]} file
|
|
90
88
|
* @param {Hume.expressionMeasurement.batch.BatchStartInferenceJobFromLocalFileRequest} request
|
|
91
89
|
* @param {Batch.RequestOptions} requestOptions - Request-specific configuration.
|
|
92
90
|
*
|
|
93
91
|
* @example
|
|
94
92
|
* await client.expressionMeasurement.batch.startInferenceJobFromLocalFile([fs.createReadStream("/path/to/your/file")], {})
|
|
95
93
|
*/
|
|
96
|
-
startInferenceJobFromLocalFile(file:
|
|
94
|
+
startInferenceJobFromLocalFile(file: core.FileLike[], request: Hume.expressionMeasurement.batch.BatchStartInferenceJobFromLocalFileRequest, requestOptions?: Batch.RequestOptions): core.HttpResponsePromise<Hume.expressionMeasurement.batch.JobId>;
|
|
97
95
|
private __startInferenceJobFromLocalFile;
|
|
98
96
|
protected _getCustomAuthorizationHeaders(): Promise<{
|
|
99
97
|
"X-Hume-Api-Key": string | undefined;
|
|
@@ -384,7 +384,7 @@ class Batch {
|
|
|
384
384
|
/**
|
|
385
385
|
* Start a new batch inference job.
|
|
386
386
|
*
|
|
387
|
-
* @param {
|
|
387
|
+
* @param {core.FileLike[]} file
|
|
388
388
|
* @param {Hume.expressionMeasurement.batch.BatchStartInferenceJobFromLocalFileRequest} request
|
|
389
389
|
* @param {Batch.RequestOptions} requestOptions - Request-specific configuration.
|
|
390
390
|
*
|
|
@@ -11,7 +11,7 @@ export interface PostedUtterance {
|
|
|
11
11
|
* - **Voice not specified**: the description will serve as a voice prompt for generating a voice. See our [prompting guide](/docs/text-to-speech-tts/prompting) for design tips.
|
|
12
12
|
*/
|
|
13
13
|
description?: string;
|
|
14
|
-
/** Speed multiplier for the synthesized speech. */
|
|
14
|
+
/** Speed multiplier for the synthesized speech. Extreme values below 0.75 and above 1.5 may sometimes cause instability to the generated output. */
|
|
15
15
|
speed?: number;
|
|
16
16
|
/** The input text to be synthesized into speech. */
|
|
17
17
|
text: string;
|
|
@@ -2,20 +2,4 @@
|
|
|
2
2
|
* This file was auto-generated by Fern from our API Definition.
|
|
3
3
|
*/
|
|
4
4
|
export interface SnippetAudioChunk {
|
|
5
|
-
/** The generated audio output chunk in the requested format. */
|
|
6
|
-
audio: string;
|
|
7
|
-
/** The index of the audio chunk in the snippet. */
|
|
8
|
-
chunkIndex: number;
|
|
9
|
-
/** The generation ID of the parent snippet that this chunk corresponds to. */
|
|
10
|
-
generationId: string;
|
|
11
|
-
/** Whether or not this is the last chunk streamed back from the decoder for one input snippet. */
|
|
12
|
-
isLastChunk: boolean;
|
|
13
|
-
/** The ID of the parent snippet that this chunk corresponds to. */
|
|
14
|
-
snippetId: string;
|
|
15
|
-
/** The text of the parent snippet that this chunk corresponds to. */
|
|
16
|
-
text: string;
|
|
17
|
-
/** The transcribed text of the generated audio of the parent snippet that this chunk corresponds to. It is only present if `instant_mode` is set to `false`. */
|
|
18
|
-
transcribedText?: string;
|
|
19
|
-
/** The index of the utterance in the request that the parent snippet of this chunk corresponds to. */
|
|
20
|
-
utteranceIndex?: number;
|
|
21
5
|
}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import { ResponseWithBody } from "./ResponseWithBody.js";
|
|
2
|
+
export interface BinaryResponse {
|
|
3
|
+
/** [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/bodyUsed) */
|
|
4
|
+
bodyUsed: boolean;
|
|
5
|
+
/**
|
|
6
|
+
* Returns a ReadableStream of the response body.
|
|
7
|
+
* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/body)
|
|
8
|
+
*/
|
|
9
|
+
stream: () => ReadableStream<Uint8Array>;
|
|
10
|
+
/** [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/arrayBuffer) */
|
|
11
|
+
arrayBuffer: () => Promise<ArrayBuffer>;
|
|
12
|
+
/** [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/blob) */
|
|
13
|
+
blob: () => Promise<Blob>;
|
|
14
|
+
/** [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/bytes) */
|
|
15
|
+
bytes(): Promise<Uint8Array>;
|
|
16
|
+
}
|
|
17
|
+
export declare function getBinaryResponse(response: ResponseWithBody): BinaryResponse;
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.getBinaryResponse = getBinaryResponse;
|
|
4
|
+
function getBinaryResponse(response) {
|
|
5
|
+
return {
|
|
6
|
+
get bodyUsed() {
|
|
7
|
+
return response.bodyUsed;
|
|
8
|
+
},
|
|
9
|
+
stream: () => response.body,
|
|
10
|
+
arrayBuffer: response.arrayBuffer.bind(response),
|
|
11
|
+
blob: response.blob.bind(response),
|
|
12
|
+
bytes: response.bytes.bind(response),
|
|
13
|
+
};
|
|
14
|
+
}
|
|
@@ -14,7 +14,7 @@ export declare namespace Fetcher {
|
|
|
14
14
|
withCredentials?: boolean;
|
|
15
15
|
abortSignal?: AbortSignal;
|
|
16
16
|
requestType?: "json" | "file" | "bytes";
|
|
17
|
-
responseType?: "json" | "blob" | "sse" | "streaming" | "text" | "arrayBuffer";
|
|
17
|
+
responseType?: "json" | "blob" | "sse" | "streaming" | "text" | "arrayBuffer" | "binary-response";
|
|
18
18
|
duplex?: "half";
|
|
19
19
|
}
|
|
20
20
|
type Error = FailedStatusCodeError | NonJsonError | TimeoutError | UnknownError;
|
|
@@ -43,18 +43,18 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
|
|
|
43
43
|
};
|
|
44
44
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
45
45
|
exports.getFetchFn = getFetchFn;
|
|
46
|
-
const
|
|
46
|
+
const index_1 = require("../runtime/index");
|
|
47
47
|
/**
|
|
48
48
|
* Returns a fetch function based on the runtime
|
|
49
49
|
*/
|
|
50
50
|
function getFetchFn() {
|
|
51
51
|
return __awaiter(this, void 0, void 0, function* () {
|
|
52
52
|
// In Node.js 18+ environments, use native fetch
|
|
53
|
-
if (
|
|
53
|
+
if (index_1.RUNTIME.type === "node" && index_1.RUNTIME.parsedVersion != null && index_1.RUNTIME.parsedVersion >= 18) {
|
|
54
54
|
return fetch;
|
|
55
55
|
}
|
|
56
56
|
// In Node.js 18 or lower environments, the SDK always uses`node-fetch`.
|
|
57
|
-
if (
|
|
57
|
+
if (index_1.RUNTIME.type === "node") {
|
|
58
58
|
return (yield Promise.resolve().then(() => __importStar(require("node-fetch")))).default;
|
|
59
59
|
}
|
|
60
60
|
// Otherwise the SDK uses global fetch if available,
|