@ai-sdk/hume 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md ADDED
@@ -0,0 +1,7 @@
1
+ # @ai-sdk/hume
2
+
3
+ ## 0.0.1
4
+
5
+ ### Patch Changes
6
+
7
+ - 69e8344: feat(providers/hume): add speech
package/LICENSE ADDED
@@ -0,0 +1,13 @@
1
+ Copyright 2023 Vercel, Inc.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
package/README.md ADDED
@@ -0,0 +1,36 @@
1
+ # AI SDK - Hume Provider
2
+
3
+ The **[Hume provider](https://sdk.vercel.ai/providers/ai-sdk-providers/hume)** for the [AI SDK](https://sdk.vercel.ai/docs)
4
+ contains support for the Hume API.
5
+
6
+ ## Setup
7
+
8
+ The Hume provider is available in the `@ai-sdk/hume` module. You can install it with
9
+
10
+ ```bash
11
+ npm i @ai-sdk/hume
12
+ ```
13
+
14
+ ## Provider Instance
15
+
16
+ You can import the default provider instance `lmnt` from `@ai-sdk/lmnt`:
17
+
18
+ ```ts
19
+ import { hume } from '@ai-sdk/hume';
20
+ ```
21
+
22
+ ## Example
23
+
24
+ ```ts
25
+ import { hume } from '@ai-sdk/hume';
26
+ import { experimental_generateSpeech as generateSpeech } from 'ai';
27
+
28
+ const result = await generateSpeech({
29
+ model: hume.speech('aurora'),
30
+ text: 'Hello, world!',
31
+ });
32
+ ```
33
+
34
+ ## Documentation
35
+
36
+ Please check out the **[Hume provider documentation](https://sdk.vercel.ai/providers/ai-sdk-providers/hume)** for more information.
@@ -0,0 +1,63 @@
1
+ import { SpeechModelV1, ProviderV1 } from '@ai-sdk/provider';
2
+ import { FetchFunction } from '@ai-sdk/provider-utils';
3
+
4
+ type HumeConfig = {
5
+ provider: string;
6
+ url: (options: {
7
+ modelId: string;
8
+ path: string;
9
+ }) => string;
10
+ headers: () => Record<string, string | undefined>;
11
+ fetch?: FetchFunction;
12
+ generateId?: () => string;
13
+ };
14
+
15
+ interface HumeSpeechModelConfig extends HumeConfig {
16
+ _internal?: {
17
+ currentDate?: () => Date;
18
+ };
19
+ }
20
+ declare class HumeSpeechModel implements SpeechModelV1 {
21
+ readonly modelId: '';
22
+ private readonly config;
23
+ readonly specificationVersion = "v1";
24
+ get provider(): string;
25
+ constructor(modelId: '', config: HumeSpeechModelConfig);
26
+ private getArgs;
27
+ doGenerate(options: Parameters<SpeechModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV1['doGenerate']>>>;
28
+ }
29
+
30
+ interface HumeProvider extends Pick<ProviderV1, 'speechModel'> {
31
+ (settings?: {}): {
32
+ speech: HumeSpeechModel;
33
+ };
34
+ /**
35
+ Creates a model for speech synthesis.
36
+ */
37
+ speech(): SpeechModelV1;
38
+ }
39
+ interface HumeProviderSettings {
40
+ /**
41
+ API key for authenticating requests.
42
+ */
43
+ apiKey?: string;
44
+ /**
45
+ Custom headers to include in the requests.
46
+ */
47
+ headers?: Record<string, string>;
48
+ /**
49
+ Custom fetch implementation. You can use it as a middleware to intercept requests,
50
+ or to provide a custom fetch implementation for e.g. testing.
51
+ */
52
+ fetch?: FetchFunction;
53
+ }
54
+ /**
55
+ Create an Hume provider instance.
56
+ */
57
+ declare function createHume(options?: HumeProviderSettings): HumeProvider;
58
+ /**
59
+ Default Hume provider instance.
60
+ */
61
+ declare const hume: HumeProvider;
62
+
63
+ export { type HumeProvider, type HumeProviderSettings, createHume, hume };
@@ -0,0 +1,63 @@
1
+ import { SpeechModelV1, ProviderV1 } from '@ai-sdk/provider';
2
+ import { FetchFunction } from '@ai-sdk/provider-utils';
3
+
4
+ type HumeConfig = {
5
+ provider: string;
6
+ url: (options: {
7
+ modelId: string;
8
+ path: string;
9
+ }) => string;
10
+ headers: () => Record<string, string | undefined>;
11
+ fetch?: FetchFunction;
12
+ generateId?: () => string;
13
+ };
14
+
15
+ interface HumeSpeechModelConfig extends HumeConfig {
16
+ _internal?: {
17
+ currentDate?: () => Date;
18
+ };
19
+ }
20
+ declare class HumeSpeechModel implements SpeechModelV1 {
21
+ readonly modelId: '';
22
+ private readonly config;
23
+ readonly specificationVersion = "v1";
24
+ get provider(): string;
25
+ constructor(modelId: '', config: HumeSpeechModelConfig);
26
+ private getArgs;
27
+ doGenerate(options: Parameters<SpeechModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV1['doGenerate']>>>;
28
+ }
29
+
30
+ interface HumeProvider extends Pick<ProviderV1, 'speechModel'> {
31
+ (settings?: {}): {
32
+ speech: HumeSpeechModel;
33
+ };
34
+ /**
35
+ Creates a model for speech synthesis.
36
+ */
37
+ speech(): SpeechModelV1;
38
+ }
39
+ interface HumeProviderSettings {
40
+ /**
41
+ API key for authenticating requests.
42
+ */
43
+ apiKey?: string;
44
+ /**
45
+ Custom headers to include in the requests.
46
+ */
47
+ headers?: Record<string, string>;
48
+ /**
49
+ Custom fetch implementation. You can use it as a middleware to intercept requests,
50
+ or to provide a custom fetch implementation for e.g. testing.
51
+ */
52
+ fetch?: FetchFunction;
53
+ }
54
+ /**
55
+ Create an Hume provider instance.
56
+ */
57
+ declare function createHume(options?: HumeProviderSettings): HumeProvider;
58
+ /**
59
+ Default Hume provider instance.
60
+ */
61
+ declare const hume: HumeProvider;
62
+
63
+ export { type HumeProvider, type HumeProviderSettings, createHume, hume };
package/dist/index.js ADDED
@@ -0,0 +1,260 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var src_exports = {};
22
+ __export(src_exports, {
23
+ createHume: () => createHume,
24
+ hume: () => hume
25
+ });
26
+ module.exports = __toCommonJS(src_exports);
27
+
28
+ // src/hume-provider.ts
29
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
30
+
31
+ // src/hume-speech-model.ts
32
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
33
+ var import_zod2 = require("zod");
34
+
35
+ // src/hume-error.ts
36
+ var import_zod = require("zod");
37
+ var import_provider_utils = require("@ai-sdk/provider-utils");
38
+ var humeErrorDataSchema = import_zod.z.object({
39
+ error: import_zod.z.object({
40
+ message: import_zod.z.string(),
41
+ code: import_zod.z.number()
42
+ })
43
+ });
44
+ var humeFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
45
+ errorSchema: humeErrorDataSchema,
46
+ errorToMessage: (data) => data.error.message
47
+ });
48
+
49
+ // src/hume-speech-model.ts
50
+ var humeSpeechCallOptionsSchema = import_zod2.z.object({
51
+ /**
52
+ * Context for the speech synthesis request.
53
+ * Can be either a generationId for retrieving a previous generation,
54
+ * or a list of utterances to synthesize.
55
+ */
56
+ context: import_zod2.z.object({
57
+ /**
58
+ * ID of a previously generated speech synthesis to retrieve.
59
+ */
60
+ generationId: import_zod2.z.string()
61
+ }).or(
62
+ import_zod2.z.object({
63
+ /**
64
+ * List of utterances to synthesize into speech.
65
+ */
66
+ utterances: import_zod2.z.array(
67
+ import_zod2.z.object({
68
+ /**
69
+ * The text content to convert to speech.
70
+ */
71
+ text: import_zod2.z.string(),
72
+ /**
73
+ * Optional description or instructions for how the text should be spoken.
74
+ */
75
+ description: import_zod2.z.string().optional(),
76
+ /**
77
+ * Optional speech rate multiplier.
78
+ */
79
+ speed: import_zod2.z.number().optional(),
80
+ /**
81
+ * Optional duration of silence to add after the utterance in seconds.
82
+ */
83
+ trailingSilence: import_zod2.z.number().optional(),
84
+ /**
85
+ * Voice configuration for the utterance.
86
+ * Can be specified by ID or name.
87
+ */
88
+ voice: import_zod2.z.object({
89
+ /**
90
+ * ID of the voice to use.
91
+ */
92
+ id: import_zod2.z.string(),
93
+ /**
94
+ * Provider of the voice, either Hume's built-in voices or a custom voice.
95
+ */
96
+ provider: import_zod2.z.enum(["HUME_AI", "CUSTOM_VOICE"]).optional()
97
+ }).or(
98
+ import_zod2.z.object({
99
+ /**
100
+ * Name of the voice to use.
101
+ */
102
+ name: import_zod2.z.string(),
103
+ /**
104
+ * Provider of the voice, either Hume's built-in voices or a custom voice.
105
+ */
106
+ provider: import_zod2.z.enum(["HUME_AI", "CUSTOM_VOICE"]).optional()
107
+ })
108
+ ).optional()
109
+ })
110
+ )
111
+ })
112
+ ).nullish()
113
+ });
114
+ var HumeSpeechModel = class {
115
+ constructor(modelId, config) {
116
+ this.modelId = modelId;
117
+ this.config = config;
118
+ this.specificationVersion = "v1";
119
+ }
120
+ get provider() {
121
+ return this.config.provider;
122
+ }
123
+ getArgs({
124
+ text,
125
+ voice = "d8ab67c6-953d-4bd8-9370-8fa53a0f1453",
126
+ outputFormat = "mp3",
127
+ speed,
128
+ instructions,
129
+ providerOptions
130
+ }) {
131
+ const warnings = [];
132
+ const humeOptions = (0, import_provider_utils2.parseProviderOptions)({
133
+ provider: "hume",
134
+ providerOptions,
135
+ schema: humeSpeechCallOptionsSchema
136
+ });
137
+ const requestBody = {
138
+ utterances: [
139
+ {
140
+ text,
141
+ speed,
142
+ description: instructions,
143
+ voice: {
144
+ id: voice,
145
+ provider: "HUME_AI"
146
+ }
147
+ }
148
+ ],
149
+ format: { type: "mp3" }
150
+ };
151
+ if (outputFormat) {
152
+ if (["mp3", "pcm", "wav"].includes(outputFormat)) {
153
+ requestBody.format = { type: outputFormat };
154
+ } else {
155
+ warnings.push({
156
+ type: "unsupported-setting",
157
+ setting: "outputFormat",
158
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
159
+ });
160
+ }
161
+ }
162
+ if (humeOptions) {
163
+ const speechModelOptions = {};
164
+ if (humeOptions.context) {
165
+ if ("generationId" in humeOptions.context) {
166
+ speechModelOptions.context = {
167
+ generation_id: humeOptions.context.generationId
168
+ };
169
+ } else {
170
+ speechModelOptions.context = {
171
+ utterances: humeOptions.context.utterances.map((utterance) => ({
172
+ text: utterance.text,
173
+ description: utterance.description,
174
+ speed: utterance.speed,
175
+ trailing_silence: utterance.trailingSilence,
176
+ voice: utterance.voice
177
+ }))
178
+ };
179
+ }
180
+ }
181
+ for (const key in speechModelOptions) {
182
+ const value = speechModelOptions[key];
183
+ if (value !== void 0) {
184
+ requestBody[key] = value;
185
+ }
186
+ }
187
+ }
188
+ return {
189
+ requestBody,
190
+ warnings
191
+ };
192
+ }
193
+ async doGenerate(options) {
194
+ var _a, _b, _c;
195
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
196
+ const { requestBody, warnings } = this.getArgs(options);
197
+ const {
198
+ value: audio,
199
+ responseHeaders,
200
+ rawValue: rawResponse
201
+ } = await (0, import_provider_utils2.postJsonToApi)({
202
+ url: this.config.url({
203
+ path: "/v0/tts/file",
204
+ modelId: this.modelId
205
+ }),
206
+ headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
207
+ body: requestBody,
208
+ failedResponseHandler: humeFailedResponseHandler,
209
+ successfulResponseHandler: (0, import_provider_utils2.createBinaryResponseHandler)(),
210
+ abortSignal: options.abortSignal,
211
+ fetch: this.config.fetch
212
+ });
213
+ return {
214
+ audio,
215
+ warnings,
216
+ request: {
217
+ body: JSON.stringify(requestBody)
218
+ },
219
+ response: {
220
+ timestamp: currentDate,
221
+ modelId: this.modelId,
222
+ headers: responseHeaders,
223
+ body: rawResponse
224
+ }
225
+ };
226
+ }
227
+ };
228
+
229
+ // src/hume-provider.ts
230
+ function createHume(options = {}) {
231
+ const getHeaders = () => ({
232
+ "X-Hume-Api-Key": (0, import_provider_utils3.loadApiKey)({
233
+ apiKey: options.apiKey,
234
+ environmentVariableName: "HUME_API_KEY",
235
+ description: "Hume"
236
+ }),
237
+ ...options.headers
238
+ });
239
+ const createSpeechModel = () => new HumeSpeechModel("", {
240
+ provider: `hume.speech`,
241
+ url: ({ path }) => `https://api.hume.ai${path}`,
242
+ headers: getHeaders,
243
+ fetch: options.fetch
244
+ });
245
+ const provider = function() {
246
+ return {
247
+ speech: createSpeechModel()
248
+ };
249
+ };
250
+ provider.speech = createSpeechModel;
251
+ provider.speechModel = createSpeechModel;
252
+ return provider;
253
+ }
254
+ var hume = createHume();
255
+ // Annotate the CommonJS export names for ESM import in node:
256
+ 0 && (module.exports = {
257
+ createHume,
258
+ hume
259
+ });
260
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/index.ts","../src/hume-provider.ts","../src/hume-speech-model.ts","../src/hume-error.ts"],"sourcesContent":["export { createHume, hume } from './hume-provider';\nexport type { HumeProvider, HumeProviderSettings } from './hume-provider';\n","import { SpeechModelV1, ProviderV1 } from '@ai-sdk/provider';\nimport { FetchFunction, loadApiKey } from '@ai-sdk/provider-utils';\nimport { HumeSpeechModel } from './hume-speech-model';\n\nexport interface HumeProvider extends Pick<ProviderV1, 'speechModel'> {\n (settings?: {}): {\n speech: HumeSpeechModel;\n };\n\n /**\nCreates a model for speech synthesis.\n */\n speech(): SpeechModelV1;\n}\n\nexport interface HumeProviderSettings {\n /**\nAPI key for authenticating requests.\n */\n apiKey?: string;\n\n /**\nCustom headers to include in the requests.\n */\n headers?: Record<string, string>;\n\n /**\nCustom fetch implementation. You can use it as a middleware to intercept requests,\nor to provide a custom fetch implementation for e.g. testing.\n */\n fetch?: FetchFunction;\n}\n\n/**\nCreate an Hume provider instance.\n */\nexport function createHume(options: HumeProviderSettings = {}): HumeProvider {\n const getHeaders = () => ({\n 'X-Hume-Api-Key': loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'HUME_API_KEY',\n description: 'Hume',\n }),\n ...options.headers,\n });\n\n const createSpeechModel = () =>\n new HumeSpeechModel('', {\n provider: `hume.speech`,\n url: ({ path }) => `https://api.hume.ai${path}`,\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const provider = function () {\n return {\n speech: createSpeechModel(),\n };\n };\n\n provider.speech = createSpeechModel;\n provider.speechModel = createSpeechModel;\n\n return provider as HumeProvider;\n}\n\n/**\nDefault Hume provider instance.\n */\nexport const hume = createHume();\n","import { SpeechModelV1, SpeechModelV1CallWarning } from '@ai-sdk/provider';\nimport {\n combineHeaders,\n createBinaryResponseHandler,\n parseProviderOptions,\n postJsonToApi,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod';\nimport { HumeConfig } from './hume-config';\nimport { humeFailedResponseHandler } from './hume-error';\nimport { HumeSpeechAPITypes } from './hume-api-types';\n\n// https://dev.hume.ai/reference/text-to-speech-tts/synthesize-file\nconst humeSpeechCallOptionsSchema = z.object({\n /**\n * Context for the speech synthesis request.\n * Can be either a generationId for retrieving a previous generation,\n * or a list of utterances to synthesize.\n */\n context: z\n .object({\n /**\n * ID of a previously generated speech synthesis to retrieve.\n */\n generationId: z.string(),\n })\n .or(\n z.object({\n /**\n * List of utterances to synthesize into speech.\n */\n utterances: z.array(\n z.object({\n /**\n * The text content to convert to speech.\n */\n text: z.string(),\n /**\n * Optional description or instructions for how the text should be spoken.\n */\n description: z.string().optional(),\n /**\n * Optional speech rate multiplier.\n */\n speed: z.number().optional(),\n /**\n * Optional duration of silence to add after the utterance in seconds.\n */\n trailingSilence: z.number().optional(),\n /**\n * Voice configuration for the utterance.\n * Can be specified by ID or name.\n */\n voice: z\n .object({\n /**\n * ID of the voice to use.\n */\n id: z.string(),\n /**\n * Provider of the voice, either Hume's built-in voices or a custom voice.\n */\n provider: z.enum(['HUME_AI', 'CUSTOM_VOICE']).optional(),\n })\n .or(\n z.object({\n /**\n * Name of the voice to use.\n */\n name: z.string(),\n /**\n * Provider of the voice, either Hume's built-in voices or a custom voice.\n */\n provider: z.enum(['HUME_AI', 'CUSTOM_VOICE']).optional(),\n }),\n )\n .optional(),\n }),\n ),\n }),\n )\n .nullish(),\n});\n\nexport type HumeSpeechCallOptions = z.infer<typeof humeSpeechCallOptionsSchema>;\n\ninterface HumeSpeechModelConfig extends HumeConfig {\n _internal?: {\n currentDate?: () => Date;\n };\n}\n\nexport class HumeSpeechModel implements SpeechModelV1 {\n readonly specificationVersion = 'v1';\n\n get provider(): string {\n return this.config.provider;\n }\n\n constructor(\n readonly modelId: '',\n private readonly config: HumeSpeechModelConfig,\n ) {}\n\n private getArgs({\n text,\n voice = 'd8ab67c6-953d-4bd8-9370-8fa53a0f1453',\n outputFormat = 'mp3',\n speed,\n instructions,\n providerOptions,\n }: Parameters<SpeechModelV1['doGenerate']>[0]) {\n const warnings: SpeechModelV1CallWarning[] = [];\n\n // Parse provider options\n const humeOptions = parseProviderOptions({\n provider: 'hume',\n providerOptions,\n schema: humeSpeechCallOptionsSchema,\n });\n\n // Create request body\n const requestBody: HumeSpeechAPITypes = {\n utterances: [\n {\n text,\n speed,\n description: instructions,\n voice: {\n id: voice,\n provider: 'HUME_AI',\n },\n },\n ],\n format: { type: 'mp3' },\n };\n\n if (outputFormat) {\n if (['mp3', 'pcm', 'wav'].includes(outputFormat)) {\n requestBody.format = { type: outputFormat as 'mp3' | 'pcm' | 'wav' };\n } else {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'outputFormat',\n details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`,\n });\n }\n }\n\n // Add provider-specific options\n if (humeOptions) {\n const speechModelOptions: Omit<\n HumeSpeechAPITypes,\n 'utterances' | 'format'\n > = {};\n\n if (humeOptions.context) {\n if ('generationId' in humeOptions.context) {\n speechModelOptions.context = {\n generation_id: humeOptions.context.generationId,\n };\n } else {\n speechModelOptions.context = {\n utterances: humeOptions.context.utterances.map(utterance => ({\n text: utterance.text,\n description: utterance.description,\n speed: utterance.speed,\n trailing_silence: utterance.trailingSilence,\n voice: utterance.voice,\n })),\n };\n }\n }\n\n for (const key in speechModelOptions) {\n const value =\n speechModelOptions[\n key as keyof Omit<HumeSpeechAPITypes, 'utterances' | 'format'>\n ];\n if (value !== undefined) {\n (requestBody as Record<string, unknown>)[key] = value;\n }\n }\n }\n\n return {\n requestBody,\n warnings,\n };\n }\n\n async doGenerate(\n options: Parameters<SpeechModelV1['doGenerate']>[0],\n ): Promise<Awaited<ReturnType<SpeechModelV1['doGenerate']>>> {\n const currentDate = this.config._internal?.currentDate?.() ?? new Date();\n const { requestBody, warnings } = this.getArgs(options);\n\n const {\n value: audio,\n responseHeaders,\n rawValue: rawResponse,\n } = await postJsonToApi({\n url: this.config.url({\n path: '/v0/tts/file',\n modelId: this.modelId,\n }),\n headers: combineHeaders(this.config.headers(), options.headers),\n body: requestBody,\n failedResponseHandler: humeFailedResponseHandler,\n successfulResponseHandler: createBinaryResponseHandler(),\n abortSignal: options.abortSignal,\n fetch: this.config.fetch,\n });\n\n return {\n audio,\n warnings,\n request: {\n body: JSON.stringify(requestBody),\n },\n response: {\n timestamp: currentDate,\n modelId: this.modelId,\n headers: responseHeaders,\n body: rawResponse,\n },\n };\n }\n}\n","import { z } from 'zod';\nimport { createJsonErrorResponseHandler } from '@ai-sdk/provider-utils';\n\nexport const humeErrorDataSchema = z.object({\n error: z.object({\n message: z.string(),\n code: z.number(),\n }),\n});\n\nexport type HumeErrorData = z.infer<typeof humeErrorDataSchema>;\n\nexport const humeFailedResponseHandler = createJsonErrorResponseHandler({\n errorSchema: humeErrorDataSchema,\n errorToMessage: data => data.error.message,\n});\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACCA,IAAAA,yBAA0C;;;ACA1C,IAAAC,yBAKO;AACP,IAAAC,cAAkB;;;ACPlB,iBAAkB;AAClB,4BAA+C;AAExC,IAAM,sBAAsB,aAAE,OAAO;AAAA,EAC1C,OAAO,aAAE,OAAO;AAAA,IACd,SAAS,aAAE,OAAO;AAAA,IAClB,MAAM,aAAE,OAAO;AAAA,EACjB,CAAC;AACH,CAAC;AAIM,IAAM,gCAA4B,sDAA+B;AAAA,EACtE,aAAa;AAAA,EACb,gBAAgB,UAAQ,KAAK,MAAM;AACrC,CAAC;;;ADFD,IAAM,8BAA8B,cAAE,OAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAM3C,SAAS,cACN,OAAO;AAAA;AAAA;AAAA;AAAA,IAIN,cAAc,cAAE,OAAO;AAAA,EACzB,CAAC,EACA;AAAA,IACC,cAAE,OAAO;AAAA;AAAA;AAAA;AAAA,MAIP,YAAY,cAAE;AAAA,QACZ,cAAE,OAAO;AAAA;AAAA;AAAA;AAAA,UAIP,MAAM,cAAE,OAAO;AAAA;AAAA;AAAA;AAAA,UAIf,aAAa,cAAE,OAAO,EAAE,SAAS;AAAA;AAAA;AAAA;AAAA,UAIjC,OAAO,cAAE,OAAO,EAAE,SAAS;AAAA;AAAA;AAAA;AAAA,UAI3B,iBAAiB,cAAE,OAAO,EAAE,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA,UAKrC,OAAO,cACJ,OAAO;AAAA;AAAA;AAAA;AAAA,YAIN,IAAI,cAAE,OAAO;AAAA;AAAA;AAAA;AAAA,YAIb,UAAU,cAAE,KAAK,CAAC,WAAW,cAAc,CAAC,EAAE,SAAS;AAAA,UACzD,CAAC,EACA;AAAA,YACC,cAAE,OAAO;AAAA;AAAA;AAAA;AAAA,cAIP,MAAM,cAAE,OAAO;AAAA;AAAA;AAAA;AAAA,cAIf,UAAU,cAAE,KAAK,CAAC,WAAW,cAAc,CAAC,EAAE,SAAS;AAAA,YACzD,CAAC;AAAA,UACH,EACC,SAAS;AAAA,QACd,CAAC;AAAA,MACH;AAAA,IACF,CAAC;AAAA,EACH,EACC,QAAQ;AACb,CAAC;AAUM,IAAM,kBAAN,MAA+C;AAAA,EAOpD,YACW,SACQ,QACjB;AAFS;AACQ;AARnB,SAAS,uBAAuB;AAAA,EAS7B;AAAA,EAPH,IAAI,WAAmB;AACrB,WAAO,KAAK,OAAO;AAAA,EACrB;AAAA,EAOQ,QAAQ;AAAA,IACd;AAAA,IACA,QAAQ;AAAA,IACR,eAAe;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAA+C;AAC7C,UAAM,WAAuC,CAAC;AAG9C,UAAM,kBAAc,6CAAqB;AAAA,MACvC,UAAU;AAAA,MACV;AAAA,MACA,QAAQ;AAAA,IACV,CAAC;AAGD,UAAM,cAAkC;AAAA,MACtC,YAAY;AAAA,QACV;AAAA,UACE;AAAA,UACA;AAAA,UACA,aAAa;AAAA,UACb,OAAO;AAAA,YACL,IAAI;AAAA,YACJ,UAAU;AAAA,UACZ;AAAA,QACF;AAAA,MACF;AAAA,MACA,QAAQ,EAAE,MAAM,MAAM;AAAA,IACxB;AAEA,QAAI,cAAc;AAChB,UAAI,CAAC,OAAO,OAAO,KAAK,EAAE,SAAS,YAAY,GAAG;AAChD,oBAAY,SAAS,EAAE,MAAM,aAAsC;AAAA,MACrE,OAAO;AACL,iBAAS,KAAK;AAAA,UACZ,MAAM;AAAA,UACN,SAAS;AAAA,UACT,SAAS,8BAA8B,YAAY;AAAA,QACrD,CAAC;AAAA,MACH;AAAA,IACF;AAGA,QAAI,aAAa;AACf,YAAM,qBAGF,CAAC;AAEL,UAAI,YAAY,SAAS;AACvB,YAAI,kBAAkB,YAAY,SAAS;AACzC,6BAAmB,UAAU;AAAA,YAC3B,eAAe,YAAY,QAAQ;AAAA,UACrC;AAAA,QACF,OAAO;AACL,6BAAmB,UAAU;AAAA,YAC3B,YAAY,YAAY,QAAQ,WAAW,IAAI,gBAAc;AAAA,cAC3D,MAAM,UAAU;AAAA,cAChB,aAAa,UAAU;AAAA,cACvB,OAAO,UAAU;AAAA,cACjB,kBAAkB,UAAU;AAAA,cAC5B,OAAO,UAAU;AAAA,YACnB,EAAE;AAAA,UACJ;AAAA,QACF;AAAA,MACF;AAEA,iBAAW,OAAO,oBAAoB;AACpC,cAAM,QACJ,mBACE,GACF;AACF,YAAI,UAAU,QAAW;AACvB,UAAC,YAAwC,GAAG,IAAI;AAAA,QAClD;AAAA,MACF;AAAA,IACF;AAEA,WAAO;AAAA,MACL;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,WACJ,SAC2D;AAjM/D;AAkMI,UAAM,eAAc,sBAAK,OAAO,cAAZ,mBAAuB,gBAAvB,4CAA0C,oBAAI,KAAK;AACvE,UAAM,EAAE,aAAa,SAAS,IAAI,KAAK,QAAQ,OAAO;AAEtD,UAAM;AAAA,MACJ,OAAO;AAAA,MACP;AAAA,MACA,UAAU;AAAA,IACZ,IAAI,UAAM,sCAAc;AAAA,MACtB,KAAK,KAAK,OAAO,IAAI;AAAA,QACnB,MAAM;AAAA,QACN,SAAS,KAAK;AAAA,MAChB,CAAC;AAAA,MACD,aAAS,uCAAe,KAAK,OAAO,QAAQ,GAAG,QAAQ,OAAO;AAAA,MAC9D,MAAM;AAAA,MACN,uBAAuB;AAAA,MACvB,+BAA2B,oDAA4B;AAAA,MACvD,aAAa,QAAQ;AAAA,MACrB,OAAO,KAAK,OAAO;AAAA,IACrB,CAAC;AAED,WAAO;AAAA,MACL;AAAA,MACA;AAAA,MACA,SAAS;AAAA,QACP,MAAM,KAAK,UAAU,WAAW;AAAA,MAClC;AAAA,MACA,UAAU;AAAA,QACR,WAAW;AAAA,QACX,SAAS,KAAK;AAAA,QACd,SAAS;AAAA,QACT,MAAM;AAAA,MACR;AAAA,IACF;AAAA,EACF;AACF;;;ADhMO,SAAS,WAAW,UAAgC,CAAC,GAAiB;AAC3E,QAAM,aAAa,OAAO;AAAA,IACxB,sBAAkB,mCAAW;AAAA,MAC3B,QAAQ,QAAQ;AAAA,MAChB,yBAAyB;AAAA,MACzB,aAAa;AAAA,IACf,CAAC;AAAA,IACD,GAAG,QAAQ;AAAA,EACb;AAEA,QAAM,oBAAoB,MACxB,IAAI,gBAAgB,IAAI;AAAA,IACtB,UAAU;AAAA,IACV,KAAK,CAAC,EAAE,KAAK,MAAM,sBAAsB,IAAI;AAAA,IAC7C,SAAS;AAAA,IACT,OAAO,QAAQ;AAAA,EACjB,CAAC;AAEH,QAAM,WAAW,WAAY;AAC3B,WAAO;AAAA,MACL,QAAQ,kBAAkB;AAAA,IAC5B;AAAA,EACF;AAEA,WAAS,SAAS;AAClB,WAAS,cAAc;AAEvB,SAAO;AACT;AAKO,IAAM,OAAO,WAAW;","names":["import_provider_utils","import_provider_utils","import_zod"]}
package/dist/index.mjs ADDED
@@ -0,0 +1,237 @@
1
+ // src/hume-provider.ts
2
+ import { loadApiKey } from "@ai-sdk/provider-utils";
3
+
4
+ // src/hume-speech-model.ts
5
+ import {
6
+ combineHeaders,
7
+ createBinaryResponseHandler,
8
+ parseProviderOptions,
9
+ postJsonToApi
10
+ } from "@ai-sdk/provider-utils";
11
+ import { z as z2 } from "zod";
12
+
13
+ // src/hume-error.ts
14
+ import { z } from "zod";
15
+ import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
16
+ var humeErrorDataSchema = z.object({
17
+ error: z.object({
18
+ message: z.string(),
19
+ code: z.number()
20
+ })
21
+ });
22
+ var humeFailedResponseHandler = createJsonErrorResponseHandler({
23
+ errorSchema: humeErrorDataSchema,
24
+ errorToMessage: (data) => data.error.message
25
+ });
26
+
27
+ // src/hume-speech-model.ts
28
+ var humeSpeechCallOptionsSchema = z2.object({
29
+ /**
30
+ * Context for the speech synthesis request.
31
+ * Can be either a generationId for retrieving a previous generation,
32
+ * or a list of utterances to synthesize.
33
+ */
34
+ context: z2.object({
35
+ /**
36
+ * ID of a previously generated speech synthesis to retrieve.
37
+ */
38
+ generationId: z2.string()
39
+ }).or(
40
+ z2.object({
41
+ /**
42
+ * List of utterances to synthesize into speech.
43
+ */
44
+ utterances: z2.array(
45
+ z2.object({
46
+ /**
47
+ * The text content to convert to speech.
48
+ */
49
+ text: z2.string(),
50
+ /**
51
+ * Optional description or instructions for how the text should be spoken.
52
+ */
53
+ description: z2.string().optional(),
54
+ /**
55
+ * Optional speech rate multiplier.
56
+ */
57
+ speed: z2.number().optional(),
58
+ /**
59
+ * Optional duration of silence to add after the utterance in seconds.
60
+ */
61
+ trailingSilence: z2.number().optional(),
62
+ /**
63
+ * Voice configuration for the utterance.
64
+ * Can be specified by ID or name.
65
+ */
66
+ voice: z2.object({
67
+ /**
68
+ * ID of the voice to use.
69
+ */
70
+ id: z2.string(),
71
+ /**
72
+ * Provider of the voice, either Hume's built-in voices or a custom voice.
73
+ */
74
+ provider: z2.enum(["HUME_AI", "CUSTOM_VOICE"]).optional()
75
+ }).or(
76
+ z2.object({
77
+ /**
78
+ * Name of the voice to use.
79
+ */
80
+ name: z2.string(),
81
+ /**
82
+ * Provider of the voice, either Hume's built-in voices or a custom voice.
83
+ */
84
+ provider: z2.enum(["HUME_AI", "CUSTOM_VOICE"]).optional()
85
+ })
86
+ ).optional()
87
+ })
88
+ )
89
+ })
90
+ ).nullish()
91
+ });
92
+ var HumeSpeechModel = class {
93
+ constructor(modelId, config) {
94
+ this.modelId = modelId;
95
+ this.config = config;
96
+ this.specificationVersion = "v1";
97
+ }
98
+ get provider() {
99
+ return this.config.provider;
100
+ }
101
+ getArgs({
102
+ text,
103
+ voice = "d8ab67c6-953d-4bd8-9370-8fa53a0f1453",
104
+ outputFormat = "mp3",
105
+ speed,
106
+ instructions,
107
+ providerOptions
108
+ }) {
109
+ const warnings = [];
110
+ const humeOptions = parseProviderOptions({
111
+ provider: "hume",
112
+ providerOptions,
113
+ schema: humeSpeechCallOptionsSchema
114
+ });
115
+ const requestBody = {
116
+ utterances: [
117
+ {
118
+ text,
119
+ speed,
120
+ description: instructions,
121
+ voice: {
122
+ id: voice,
123
+ provider: "HUME_AI"
124
+ }
125
+ }
126
+ ],
127
+ format: { type: "mp3" }
128
+ };
129
+ if (outputFormat) {
130
+ if (["mp3", "pcm", "wav"].includes(outputFormat)) {
131
+ requestBody.format = { type: outputFormat };
132
+ } else {
133
+ warnings.push({
134
+ type: "unsupported-setting",
135
+ setting: "outputFormat",
136
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
137
+ });
138
+ }
139
+ }
140
+ if (humeOptions) {
141
+ const speechModelOptions = {};
142
+ if (humeOptions.context) {
143
+ if ("generationId" in humeOptions.context) {
144
+ speechModelOptions.context = {
145
+ generation_id: humeOptions.context.generationId
146
+ };
147
+ } else {
148
+ speechModelOptions.context = {
149
+ utterances: humeOptions.context.utterances.map((utterance) => ({
150
+ text: utterance.text,
151
+ description: utterance.description,
152
+ speed: utterance.speed,
153
+ trailing_silence: utterance.trailingSilence,
154
+ voice: utterance.voice
155
+ }))
156
+ };
157
+ }
158
+ }
159
+ for (const key in speechModelOptions) {
160
+ const value = speechModelOptions[key];
161
+ if (value !== void 0) {
162
+ requestBody[key] = value;
163
+ }
164
+ }
165
+ }
166
+ return {
167
+ requestBody,
168
+ warnings
169
+ };
170
+ }
171
+ async doGenerate(options) {
172
+ var _a, _b, _c;
173
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
174
+ const { requestBody, warnings } = this.getArgs(options);
175
+ const {
176
+ value: audio,
177
+ responseHeaders,
178
+ rawValue: rawResponse
179
+ } = await postJsonToApi({
180
+ url: this.config.url({
181
+ path: "/v0/tts/file",
182
+ modelId: this.modelId
183
+ }),
184
+ headers: combineHeaders(this.config.headers(), options.headers),
185
+ body: requestBody,
186
+ failedResponseHandler: humeFailedResponseHandler,
187
+ successfulResponseHandler: createBinaryResponseHandler(),
188
+ abortSignal: options.abortSignal,
189
+ fetch: this.config.fetch
190
+ });
191
+ return {
192
+ audio,
193
+ warnings,
194
+ request: {
195
+ body: JSON.stringify(requestBody)
196
+ },
197
+ response: {
198
+ timestamp: currentDate,
199
+ modelId: this.modelId,
200
+ headers: responseHeaders,
201
+ body: rawResponse
202
+ }
203
+ };
204
+ }
205
+ };
206
+
207
+ // src/hume-provider.ts
208
+ function createHume(options = {}) {
209
+ const getHeaders = () => ({
210
+ "X-Hume-Api-Key": loadApiKey({
211
+ apiKey: options.apiKey,
212
+ environmentVariableName: "HUME_API_KEY",
213
+ description: "Hume"
214
+ }),
215
+ ...options.headers
216
+ });
217
+ const createSpeechModel = () => new HumeSpeechModel("", {
218
+ provider: `hume.speech`,
219
+ url: ({ path }) => `https://api.hume.ai${path}`,
220
+ headers: getHeaders,
221
+ fetch: options.fetch
222
+ });
223
+ const provider = function() {
224
+ return {
225
+ speech: createSpeechModel()
226
+ };
227
+ };
228
+ provider.speech = createSpeechModel;
229
+ provider.speechModel = createSpeechModel;
230
+ return provider;
231
+ }
232
+ var hume = createHume();
233
+ export {
234
+ createHume,
235
+ hume
236
+ };
237
+ //# sourceMappingURL=index.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/hume-provider.ts","../src/hume-speech-model.ts","../src/hume-error.ts"],"sourcesContent":["import { SpeechModelV1, ProviderV1 } from '@ai-sdk/provider';\nimport { FetchFunction, loadApiKey } from '@ai-sdk/provider-utils';\nimport { HumeSpeechModel } from './hume-speech-model';\n\nexport interface HumeProvider extends Pick<ProviderV1, 'speechModel'> {\n (settings?: {}): {\n speech: HumeSpeechModel;\n };\n\n /**\nCreates a model for speech synthesis.\n */\n speech(): SpeechModelV1;\n}\n\nexport interface HumeProviderSettings {\n /**\nAPI key for authenticating requests.\n */\n apiKey?: string;\n\n /**\nCustom headers to include in the requests.\n */\n headers?: Record<string, string>;\n\n /**\nCustom fetch implementation. You can use it as a middleware to intercept requests,\nor to provide a custom fetch implementation for e.g. testing.\n */\n fetch?: FetchFunction;\n}\n\n/**\nCreate an Hume provider instance.\n */\nexport function createHume(options: HumeProviderSettings = {}): HumeProvider {\n const getHeaders = () => ({\n 'X-Hume-Api-Key': loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'HUME_API_KEY',\n description: 'Hume',\n }),\n ...options.headers,\n });\n\n const createSpeechModel = () =>\n new HumeSpeechModel('', {\n provider: `hume.speech`,\n url: ({ path }) => `https://api.hume.ai${path}`,\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const provider = function () {\n return {\n speech: createSpeechModel(),\n };\n };\n\n provider.speech = createSpeechModel;\n provider.speechModel = createSpeechModel;\n\n return provider as HumeProvider;\n}\n\n/**\nDefault Hume provider instance.\n */\nexport const hume = createHume();\n","import { SpeechModelV1, SpeechModelV1CallWarning } from '@ai-sdk/provider';\nimport {\n combineHeaders,\n createBinaryResponseHandler,\n parseProviderOptions,\n postJsonToApi,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod';\nimport { HumeConfig } from './hume-config';\nimport { humeFailedResponseHandler } from './hume-error';\nimport { HumeSpeechAPITypes } from './hume-api-types';\n\n// https://dev.hume.ai/reference/text-to-speech-tts/synthesize-file\nconst humeSpeechCallOptionsSchema = z.object({\n /**\n * Context for the speech synthesis request.\n * Can be either a generationId for retrieving a previous generation,\n * or a list of utterances to synthesize.\n */\n context: z\n .object({\n /**\n * ID of a previously generated speech synthesis to retrieve.\n */\n generationId: z.string(),\n })\n .or(\n z.object({\n /**\n * List of utterances to synthesize into speech.\n */\n utterances: z.array(\n z.object({\n /**\n * The text content to convert to speech.\n */\n text: z.string(),\n /**\n * Optional description or instructions for how the text should be spoken.\n */\n description: z.string().optional(),\n /**\n * Optional speech rate multiplier.\n */\n speed: z.number().optional(),\n /**\n * Optional duration of silence to add after the utterance in seconds.\n */\n trailingSilence: z.number().optional(),\n /**\n * Voice configuration for the utterance.\n * Can be specified by ID or name.\n */\n voice: z\n .object({\n /**\n * ID of the voice to use.\n */\n id: z.string(),\n /**\n * Provider of the voice, either Hume's built-in voices or a custom voice.\n */\n provider: z.enum(['HUME_AI', 'CUSTOM_VOICE']).optional(),\n })\n .or(\n z.object({\n /**\n * Name of the voice to use.\n */\n name: z.string(),\n /**\n * Provider of the voice, either Hume's built-in voices or a custom voice.\n */\n provider: z.enum(['HUME_AI', 'CUSTOM_VOICE']).optional(),\n }),\n )\n .optional(),\n }),\n ),\n }),\n )\n .nullish(),\n});\n\nexport type HumeSpeechCallOptions = z.infer<typeof humeSpeechCallOptionsSchema>;\n\ninterface HumeSpeechModelConfig extends HumeConfig {\n _internal?: {\n currentDate?: () => Date;\n };\n}\n\nexport class HumeSpeechModel implements SpeechModelV1 {\n readonly specificationVersion = 'v1';\n\n get provider(): string {\n return this.config.provider;\n }\n\n constructor(\n readonly modelId: '',\n private readonly config: HumeSpeechModelConfig,\n ) {}\n\n private getArgs({\n text,\n voice = 'd8ab67c6-953d-4bd8-9370-8fa53a0f1453',\n outputFormat = 'mp3',\n speed,\n instructions,\n providerOptions,\n }: Parameters<SpeechModelV1['doGenerate']>[0]) {\n const warnings: SpeechModelV1CallWarning[] = [];\n\n // Parse provider options\n const humeOptions = parseProviderOptions({\n provider: 'hume',\n providerOptions,\n schema: humeSpeechCallOptionsSchema,\n });\n\n // Create request body\n const requestBody: HumeSpeechAPITypes = {\n utterances: [\n {\n text,\n speed,\n description: instructions,\n voice: {\n id: voice,\n provider: 'HUME_AI',\n },\n },\n ],\n format: { type: 'mp3' },\n };\n\n if (outputFormat) {\n if (['mp3', 'pcm', 'wav'].includes(outputFormat)) {\n requestBody.format = { type: outputFormat as 'mp3' | 'pcm' | 'wav' };\n } else {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'outputFormat',\n details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`,\n });\n }\n }\n\n // Add provider-specific options\n if (humeOptions) {\n const speechModelOptions: Omit<\n HumeSpeechAPITypes,\n 'utterances' | 'format'\n > = {};\n\n if (humeOptions.context) {\n if ('generationId' in humeOptions.context) {\n speechModelOptions.context = {\n generation_id: humeOptions.context.generationId,\n };\n } else {\n speechModelOptions.context = {\n utterances: humeOptions.context.utterances.map(utterance => ({\n text: utterance.text,\n description: utterance.description,\n speed: utterance.speed,\n trailing_silence: utterance.trailingSilence,\n voice: utterance.voice,\n })),\n };\n }\n }\n\n for (const key in speechModelOptions) {\n const value =\n speechModelOptions[\n key as keyof Omit<HumeSpeechAPITypes, 'utterances' | 'format'>\n ];\n if (value !== undefined) {\n (requestBody as Record<string, unknown>)[key] = value;\n }\n }\n }\n\n return {\n requestBody,\n warnings,\n };\n }\n\n async doGenerate(\n options: Parameters<SpeechModelV1['doGenerate']>[0],\n ): Promise<Awaited<ReturnType<SpeechModelV1['doGenerate']>>> {\n const currentDate = this.config._internal?.currentDate?.() ?? new Date();\n const { requestBody, warnings } = this.getArgs(options);\n\n const {\n value: audio,\n responseHeaders,\n rawValue: rawResponse,\n } = await postJsonToApi({\n url: this.config.url({\n path: '/v0/tts/file',\n modelId: this.modelId,\n }),\n headers: combineHeaders(this.config.headers(), options.headers),\n body: requestBody,\n failedResponseHandler: humeFailedResponseHandler,\n successfulResponseHandler: createBinaryResponseHandler(),\n abortSignal: options.abortSignal,\n fetch: this.config.fetch,\n });\n\n return {\n audio,\n warnings,\n request: {\n body: JSON.stringify(requestBody),\n },\n response: {\n timestamp: currentDate,\n modelId: this.modelId,\n headers: responseHeaders,\n body: rawResponse,\n },\n };\n }\n}\n","import { z } from 'zod';\nimport { createJsonErrorResponseHandler } from '@ai-sdk/provider-utils';\n\nexport const humeErrorDataSchema = z.object({\n error: z.object({\n message: z.string(),\n code: z.number(),\n }),\n});\n\nexport type HumeErrorData = z.infer<typeof humeErrorDataSchema>;\n\nexport const humeFailedResponseHandler = createJsonErrorResponseHandler({\n errorSchema: humeErrorDataSchema,\n errorToMessage: data => data.error.message,\n});\n"],"mappings":";AACA,SAAwB,kBAAkB;;;ACA1C;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP,SAAS,KAAAA,UAAS;;;ACPlB,SAAS,SAAS;AAClB,SAAS,sCAAsC;AAExC,IAAM,sBAAsB,EAAE,OAAO;AAAA,EAC1C,OAAO,EAAE,OAAO;AAAA,IACd,SAAS,EAAE,OAAO;AAAA,IAClB,MAAM,EAAE,OAAO;AAAA,EACjB,CAAC;AACH,CAAC;AAIM,IAAM,4BAA4B,+BAA+B;AAAA,EACtE,aAAa;AAAA,EACb,gBAAgB,UAAQ,KAAK,MAAM;AACrC,CAAC;;;ADFD,IAAM,8BAA8BC,GAAE,OAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAM3C,SAASA,GACN,OAAO;AAAA;AAAA;AAAA;AAAA,IAIN,cAAcA,GAAE,OAAO;AAAA,EACzB,CAAC,EACA;AAAA,IACCA,GAAE,OAAO;AAAA;AAAA;AAAA;AAAA,MAIP,YAAYA,GAAE;AAAA,QACZA,GAAE,OAAO;AAAA;AAAA;AAAA;AAAA,UAIP,MAAMA,GAAE,OAAO;AAAA;AAAA;AAAA;AAAA,UAIf,aAAaA,GAAE,OAAO,EAAE,SAAS;AAAA;AAAA;AAAA;AAAA,UAIjC,OAAOA,GAAE,OAAO,EAAE,SAAS;AAAA;AAAA;AAAA;AAAA,UAI3B,iBAAiBA,GAAE,OAAO,EAAE,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA,UAKrC,OAAOA,GACJ,OAAO;AAAA;AAAA;AAAA;AAAA,YAIN,IAAIA,GAAE,OAAO;AAAA;AAAA;AAAA;AAAA,YAIb,UAAUA,GAAE,KAAK,CAAC,WAAW,cAAc,CAAC,EAAE,SAAS;AAAA,UACzD,CAAC,EACA;AAAA,YACCA,GAAE,OAAO;AAAA;AAAA;AAAA;AAAA,cAIP,MAAMA,GAAE,OAAO;AAAA;AAAA;AAAA;AAAA,cAIf,UAAUA,GAAE,KAAK,CAAC,WAAW,cAAc,CAAC,EAAE,SAAS;AAAA,YACzD,CAAC;AAAA,UACH,EACC,SAAS;AAAA,QACd,CAAC;AAAA,MACH;AAAA,IACF,CAAC;AAAA,EACH,EACC,QAAQ;AACb,CAAC;AAUM,IAAM,kBAAN,MAA+C;AAAA,EAOpD,YACW,SACQ,QACjB;AAFS;AACQ;AARnB,SAAS,uBAAuB;AAAA,EAS7B;AAAA,EAPH,IAAI,WAAmB;AACrB,WAAO,KAAK,OAAO;AAAA,EACrB;AAAA,EAOQ,QAAQ;AAAA,IACd;AAAA,IACA,QAAQ;AAAA,IACR,eAAe;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAA+C;AAC7C,UAAM,WAAuC,CAAC;AAG9C,UAAM,cAAc,qBAAqB;AAAA,MACvC,UAAU;AAAA,MACV;AAAA,MACA,QAAQ;AAAA,IACV,CAAC;AAGD,UAAM,cAAkC;AAAA,MACtC,YAAY;AAAA,QACV;AAAA,UACE;AAAA,UACA;AAAA,UACA,aAAa;AAAA,UACb,OAAO;AAAA,YACL,IAAI;AAAA,YACJ,UAAU;AAAA,UACZ;AAAA,QACF;AAAA,MACF;AAAA,MACA,QAAQ,EAAE,MAAM,MAAM;AAAA,IACxB;AAEA,QAAI,cAAc;AAChB,UAAI,CAAC,OAAO,OAAO,KAAK,EAAE,SAAS,YAAY,GAAG;AAChD,oBAAY,SAAS,EAAE,MAAM,aAAsC;AAAA,MACrE,OAAO;AACL,iBAAS,KAAK;AAAA,UACZ,MAAM;AAAA,UACN,SAAS;AAAA,UACT,SAAS,8BAA8B,YAAY;AAAA,QACrD,CAAC;AAAA,MACH;AAAA,IACF;AAGA,QAAI,aAAa;AACf,YAAM,qBAGF,CAAC;AAEL,UAAI,YAAY,SAAS;AACvB,YAAI,kBAAkB,YAAY,SAAS;AACzC,6BAAmB,UAAU;AAAA,YAC3B,eAAe,YAAY,QAAQ;AAAA,UACrC;AAAA,QACF,OAAO;AACL,6BAAmB,UAAU;AAAA,YAC3B,YAAY,YAAY,QAAQ,WAAW,IAAI,gBAAc;AAAA,cAC3D,MAAM,UAAU;AAAA,cAChB,aAAa,UAAU;AAAA,cACvB,OAAO,UAAU;AAAA,cACjB,kBAAkB,UAAU;AAAA,cAC5B,OAAO,UAAU;AAAA,YACnB,EAAE;AAAA,UACJ;AAAA,QACF;AAAA,MACF;AAEA,iBAAW,OAAO,oBAAoB;AACpC,cAAM,QACJ,mBACE,GACF;AACF,YAAI,UAAU,QAAW;AACvB,UAAC,YAAwC,GAAG,IAAI;AAAA,QAClD;AAAA,MACF;AAAA,IACF;AAEA,WAAO;AAAA,MACL;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,WACJ,SAC2D;AAjM/D;AAkMI,UAAM,eAAc,sBAAK,OAAO,cAAZ,mBAAuB,gBAAvB,4CAA0C,oBAAI,KAAK;AACvE,UAAM,EAAE,aAAa,SAAS,IAAI,KAAK,QAAQ,OAAO;AAEtD,UAAM;AAAA,MACJ,OAAO;AAAA,MACP;AAAA,MACA,UAAU;AAAA,IACZ,IAAI,MAAM,cAAc;AAAA,MACtB,KAAK,KAAK,OAAO,IAAI;AAAA,QACnB,MAAM;AAAA,QACN,SAAS,KAAK;AAAA,MAChB,CAAC;AAAA,MACD,SAAS,eAAe,KAAK,OAAO,QAAQ,GAAG,QAAQ,OAAO;AAAA,MAC9D,MAAM;AAAA,MACN,uBAAuB;AAAA,MACvB,2BAA2B,4BAA4B;AAAA,MACvD,aAAa,QAAQ;AAAA,MACrB,OAAO,KAAK,OAAO;AAAA,IACrB,CAAC;AAED,WAAO;AAAA,MACL;AAAA,MACA;AAAA,MACA,SAAS;AAAA,QACP,MAAM,KAAK,UAAU,WAAW;AAAA,MAClC;AAAA,MACA,UAAU;AAAA,QACR,WAAW;AAAA,QACX,SAAS,KAAK;AAAA,QACd,SAAS;AAAA,QACT,MAAM;AAAA,MACR;AAAA,IACF;AAAA,EACF;AACF;;;ADhMO,SAAS,WAAW,UAAgC,CAAC,GAAiB;AAC3E,QAAM,aAAa,OAAO;AAAA,IACxB,kBAAkB,WAAW;AAAA,MAC3B,QAAQ,QAAQ;AAAA,MAChB,yBAAyB;AAAA,MACzB,aAAa;AAAA,IACf,CAAC;AAAA,IACD,GAAG,QAAQ;AAAA,EACb;AAEA,QAAM,oBAAoB,MACxB,IAAI,gBAAgB,IAAI;AAAA,IACtB,UAAU;AAAA,IACV,KAAK,CAAC,EAAE,KAAK,MAAM,sBAAsB,IAAI;AAAA,IAC7C,SAAS;AAAA,IACT,OAAO,QAAQ;AAAA,EACjB,CAAC;AAEH,QAAM,WAAW,WAAY;AAC3B,WAAO;AAAA,MACL,QAAQ,kBAAkB;AAAA,IAC5B;AAAA,EACF;AAEA,WAAS,SAAS;AAClB,WAAS,cAAc;AAEvB,SAAO;AACT;AAKO,IAAM,OAAO,WAAW;","names":["z","z"]}
package/package.json ADDED
@@ -0,0 +1,64 @@
1
+ {
2
+ "name": "@ai-sdk/hume",
3
+ "version": "0.0.1",
4
+ "license": "Apache-2.0",
5
+ "sideEffects": false,
6
+ "main": "./dist/index.js",
7
+ "module": "./dist/index.mjs",
8
+ "types": "./dist/index.d.ts",
9
+ "files": [
10
+ "dist/**/*",
11
+ "CHANGELOG.md"
12
+ ],
13
+ "exports": {
14
+ "./package.json": "./package.json",
15
+ ".": {
16
+ "types": "./dist/index.d.ts",
17
+ "import": "./dist/index.mjs",
18
+ "require": "./dist/index.js"
19
+ }
20
+ },
21
+ "dependencies": {
22
+ "@ai-sdk/provider": "1.1.3",
23
+ "@ai-sdk/provider-utils": "2.2.7"
24
+ },
25
+ "devDependencies": {
26
+ "@types/node": "20.17.24",
27
+ "tsup": "^8",
28
+ "typescript": "5.6.3",
29
+ "zod": "3.23.8",
30
+ "@vercel/ai-tsconfig": "0.0.0"
31
+ },
32
+ "peerDependencies": {
33
+ "zod": "^3.0.0"
34
+ },
35
+ "engines": {
36
+ "node": ">=18"
37
+ },
38
+ "publishConfig": {
39
+ "access": "public"
40
+ },
41
+ "homepage": "https://sdk.vercel.ai/docs",
42
+ "repository": {
43
+ "type": "git",
44
+ "url": "git+https://github.com/vercel/ai.git"
45
+ },
46
+ "bugs": {
47
+ "url": "https://github.com/vercel/ai/issues"
48
+ },
49
+ "keywords": [
50
+ "ai"
51
+ ],
52
+ "scripts": {
53
+ "build": "tsup",
54
+ "build:watch": "tsup --watch",
55
+ "clean": "rm -rf dist",
56
+ "lint": "eslint \"./**/*.ts*\"",
57
+ "type-check": "tsc --noEmit",
58
+ "prettier-check": "prettier --check \"./**/*.ts*\"",
59
+ "test": "pnpm test:node && pnpm test:edge",
60
+ "test:edge": "vitest --config vitest.edge.config.js --run",
61
+ "test:node": "vitest --config vitest.node.config.js --run",
62
+ "test:node:watch": "vitest --config vitest.node.config.js --watch"
63
+ }
64
+ }