@ai-sdk/baseten 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md ADDED
@@ -0,0 +1,7 @@
1
+ # @ai-sdk/baseten
2
+
3
+ ## 0.0.1
4
+
5
+ ### Patch Changes
6
+
7
+ - 9694b94: initial stable release
package/LICENSE ADDED
@@ -0,0 +1,13 @@
1
+ Copyright 2023 Vercel, Inc.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
package/README.md ADDED
@@ -0,0 +1,35 @@
1
+ # AI SDK - Baseten Provider
2
+
3
+ The **[Baseten provider](https://ai-sdk.dev/providers/ai-sdk-providers/baseten)** for the [AI SDK](https://ai-sdk.dev/docs) contains language model and embedding model support for the [Baseten](https://baseten.co) platform.
4
+
5
+ ## Setup
6
+
7
+ The Baseten provider is available in the `@ai-sdk/baseten` module. You can install it with
8
+
9
+ ```bash
10
+ npm i @ai-sdk/baseten
11
+ ```
12
+
13
+ ## Provider Instance
14
+
15
+ You can import the default provider instance `baseten` from `@ai-sdk/baseten`:
16
+
17
+ ```ts
18
+ import { baseten } from '@ai-sdk/baseten';
19
+ ```
20
+
21
+ ## Language Model Example (Model APIs)
22
+
23
+ ```ts
24
+ import { baseten } from '@ai-sdk/baseten';
25
+ import { generateText } from 'ai';
26
+
27
+ const { text } = await generateText({
28
+ model: baseten('deepseek-ai/DeepSeek-V3-0324'),
29
+ prompt: 'What is the meaning of life?',
30
+ });
31
+ ```
32
+
33
+ ## Documentation
34
+
35
+ Please check out the **[Baseten provider](https://ai-sdk.dev/providers/ai-sdk-providers/baseten)** for more information.
@@ -0,0 +1,61 @@
1
+ import { ProviderV2, LanguageModelV2, EmbeddingModelV2 } from '@ai-sdk/provider';
2
+ import { FetchFunction } from '@ai-sdk/provider-utils';
3
+ import { z } from 'zod/v4';
4
+
5
+ type BasetenChatModelId = 'deepseek-ai/DeepSeek-R1-0528' | 'deepseek-ai/DeepSeek-V3-0324' | 'meta-llama/Llama-4-Maverick-17B-128E-Instruct' | 'meta-llama/Llama-4-Scout-17B-16E-Instruct' | 'moonshotai/Kimi-K2-Instruct' | 'Qwen/Qwen3-235B-A22B-Instruct-2507' | 'Qwen/Qwen3-Coder-480B-A35B-Instruct' | 'openai/gpt-oss-120b' | (string & {});
6
+
7
+ type BasetenEmbeddingModelId = string & {};
8
+
9
+ type BasetenErrorData = z.infer<typeof basetenErrorSchema>;
10
+ declare const basetenErrorSchema: z.ZodObject<{
11
+ error: z.ZodString;
12
+ }, z.core.$strip>;
13
+ interface BasetenProviderSettings {
14
+ /**
15
+ * Baseten API key. Default value is taken from the `BASETEN_API_KEY`
16
+ * environment variable.
17
+ */
18
+ apiKey?: string;
19
+ /**
20
+ * Base URL for the Model APIs. Default: 'https://inference.baseten.co/v1'
21
+ */
22
+ baseURL?: string;
23
+ /**
24
+ * Model URL for custom models (chat or embeddings).
25
+ * If not supplied, the default Model APIs will be used.
26
+ */
27
+ modelURL?: string;
28
+ /**
29
+ * Custom headers to include in the requests.
30
+ */
31
+ headers?: Record<string, string>;
32
+ /**
33
+ * Custom fetch implementation. You can use it as a middleware to intercept requests,
34
+ * or to provide a custom fetch implementation for e.g. testing.
35
+ */
36
+ fetch?: FetchFunction;
37
+ }
38
+ interface BasetenProvider extends ProviderV2 {
39
+ /**
40
+ Creates a chat model for text generation.
41
+ */
42
+ (modelId?: BasetenChatModelId): LanguageModelV2;
43
+ /**
44
+ Creates a chat model for text generation.
45
+ */
46
+ chatModel(modelId?: BasetenChatModelId): LanguageModelV2;
47
+ /**
48
+ Creates a language model for text generation. Alias for chatModel.
49
+ */
50
+ languageModel(modelId?: BasetenChatModelId): LanguageModelV2;
51
+ /**
52
+ Creates a text embedding model for text generation.
53
+ */
54
+ textEmbeddingModel(modelId?: BasetenEmbeddingModelId): EmbeddingModelV2<string>;
55
+ }
56
+ declare function createBaseten(options?: BasetenProviderSettings): BasetenProvider;
57
+ declare const baseten: BasetenProvider;
58
+
59
+ declare const VERSION: string;
60
+
61
+ export { type BasetenChatModelId, type BasetenErrorData, type BasetenProvider, type BasetenProviderSettings, VERSION, baseten, createBaseten };
@@ -0,0 +1,61 @@
1
+ import { ProviderV2, LanguageModelV2, EmbeddingModelV2 } from '@ai-sdk/provider';
2
+ import { FetchFunction } from '@ai-sdk/provider-utils';
3
+ import { z } from 'zod/v4';
4
+
5
+ type BasetenChatModelId = 'deepseek-ai/DeepSeek-R1-0528' | 'deepseek-ai/DeepSeek-V3-0324' | 'meta-llama/Llama-4-Maverick-17B-128E-Instruct' | 'meta-llama/Llama-4-Scout-17B-16E-Instruct' | 'moonshotai/Kimi-K2-Instruct' | 'Qwen/Qwen3-235B-A22B-Instruct-2507' | 'Qwen/Qwen3-Coder-480B-A35B-Instruct' | 'openai/gpt-oss-120b' | (string & {});
6
+
7
+ type BasetenEmbeddingModelId = string & {};
8
+
9
+ type BasetenErrorData = z.infer<typeof basetenErrorSchema>;
10
+ declare const basetenErrorSchema: z.ZodObject<{
11
+ error: z.ZodString;
12
+ }, z.core.$strip>;
13
+ interface BasetenProviderSettings {
14
+ /**
15
+ * Baseten API key. Default value is taken from the `BASETEN_API_KEY`
16
+ * environment variable.
17
+ */
18
+ apiKey?: string;
19
+ /**
20
+ * Base URL for the Model APIs. Default: 'https://inference.baseten.co/v1'
21
+ */
22
+ baseURL?: string;
23
+ /**
24
+ * Model URL for custom models (chat or embeddings).
25
+ * If not supplied, the default Model APIs will be used.
26
+ */
27
+ modelURL?: string;
28
+ /**
29
+ * Custom headers to include in the requests.
30
+ */
31
+ headers?: Record<string, string>;
32
+ /**
33
+ * Custom fetch implementation. You can use it as a middleware to intercept requests,
34
+ * or to provide a custom fetch implementation for e.g. testing.
35
+ */
36
+ fetch?: FetchFunction;
37
+ }
38
+ interface BasetenProvider extends ProviderV2 {
39
+ /**
40
+ Creates a chat model for text generation.
41
+ */
42
+ (modelId?: BasetenChatModelId): LanguageModelV2;
43
+ /**
44
+ Creates a chat model for text generation.
45
+ */
46
+ chatModel(modelId?: BasetenChatModelId): LanguageModelV2;
47
+ /**
48
+ Creates a language model for text generation. Alias for chatModel.
49
+ */
50
+ languageModel(modelId?: BasetenChatModelId): LanguageModelV2;
51
+ /**
52
+ Creates a text embedding model for text generation.
53
+ */
54
+ textEmbeddingModel(modelId?: BasetenEmbeddingModelId): EmbeddingModelV2<string>;
55
+ }
56
+ declare function createBaseten(options?: BasetenProviderSettings): BasetenProvider;
57
+ declare const baseten: BasetenProvider;
58
+
59
+ declare const VERSION: string;
60
+
61
+ export { type BasetenChatModelId, type BasetenErrorData, type BasetenProvider, type BasetenProviderSettings, VERSION, baseten, createBaseten };
package/dist/index.js ADDED
@@ -0,0 +1,157 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var src_exports = {};
22
+ __export(src_exports, {
23
+ VERSION: () => VERSION,
24
+ baseten: () => baseten,
25
+ createBaseten: () => createBaseten
26
+ });
27
+ module.exports = __toCommonJS(src_exports);
28
+
29
+ // src/baseten-provider.ts
30
+ var import_openai_compatible = require("@ai-sdk/openai-compatible");
31
+ var import_provider = require("@ai-sdk/provider");
32
+ var import_provider_utils = require("@ai-sdk/provider-utils");
33
+ var import_v4 = require("zod/v4");
34
+ var import_performance_client = require("@basetenlabs/performance-client");
35
+
36
+ // src/version.ts
37
+ var VERSION = true ? "0.0.1" : "0.0.0-test";
38
+
39
+ // src/baseten-provider.ts
40
+ var basetenErrorSchema = import_v4.z.object({
41
+ error: import_v4.z.string()
42
+ });
43
+ var basetenErrorStructure = {
44
+ errorSchema: basetenErrorSchema,
45
+ errorToMessage: (data) => data.error
46
+ };
47
+ var defaultBaseURL = "https://inference.baseten.co/v1";
48
+ function createBaseten(options = {}) {
49
+ var _a;
50
+ const baseURL = (0, import_provider_utils.withoutTrailingSlash)((_a = options.baseURL) != null ? _a : defaultBaseURL);
51
+ const getHeaders = () => (0, import_provider_utils.withUserAgentSuffix)(
52
+ {
53
+ Authorization: `Bearer ${(0, import_provider_utils.loadApiKey)({
54
+ apiKey: options.apiKey,
55
+ environmentVariableName: "BASETEN_API_KEY",
56
+ description: "Baseten API key"
57
+ })}`,
58
+ ...options.headers
59
+ },
60
+ `ai-sdk/baseten/${VERSION}`
61
+ );
62
+ const getCommonModelConfig = (modelType, customURL) => ({
63
+ provider: `baseten.${modelType}`,
64
+ url: ({ path }) => {
65
+ if (modelType === "embedding" && (customURL == null ? void 0 : customURL.includes("/sync")) && !(customURL == null ? void 0 : customURL.includes("/sync/v1"))) {
66
+ return `${customURL}/v1${path}`;
67
+ }
68
+ return `${customURL || baseURL}${path}`;
69
+ },
70
+ headers: getHeaders,
71
+ fetch: options.fetch
72
+ });
73
+ const createChatModel = (modelId) => {
74
+ const customURL = options.modelURL;
75
+ if (customURL) {
76
+ const isOpenAICompatible = customURL.includes("/sync/v1");
77
+ if (isOpenAICompatible) {
78
+ return new import_openai_compatible.OpenAICompatibleChatLanguageModel(modelId != null ? modelId : "placeholder", {
79
+ ...getCommonModelConfig("chat", customURL),
80
+ errorStructure: basetenErrorStructure
81
+ });
82
+ } else if (customURL.includes("/predict")) {
83
+ throw new Error(
84
+ "Not supported. You must use a /sync/v1 endpoint for chat models."
85
+ );
86
+ }
87
+ }
88
+ return new import_openai_compatible.OpenAICompatibleChatLanguageModel(modelId != null ? modelId : "chat", {
89
+ ...getCommonModelConfig("chat"),
90
+ errorStructure: basetenErrorStructure
91
+ });
92
+ };
93
+ const createTextEmbeddingModel = (modelId) => {
94
+ const customURL = options.modelURL;
95
+ if (!customURL) {
96
+ throw new Error(
97
+ "No model URL provided for embeddings. Please set modelURL option for embeddings."
98
+ );
99
+ }
100
+ const isOpenAICompatible = customURL.includes("/sync");
101
+ if (isOpenAICompatible) {
102
+ const model = new import_openai_compatible.OpenAICompatibleEmbeddingModel(
103
+ modelId != null ? modelId : "embeddings",
104
+ {
105
+ ...getCommonModelConfig("embedding", customURL),
106
+ errorStructure: basetenErrorStructure
107
+ }
108
+ );
109
+ const performanceClientURL = customURL.replace("/sync/v1", "/sync");
110
+ const performanceClient = new import_performance_client.PerformanceClient(
111
+ performanceClientURL,
112
+ (0, import_provider_utils.loadApiKey)({
113
+ apiKey: options.apiKey,
114
+ environmentVariableName: "BASETEN_API_KEY",
115
+ description: "Baseten API key"
116
+ })
117
+ );
118
+ model.doEmbed = async (params) => {
119
+ if (!params.values || !Array.isArray(params.values)) {
120
+ throw new Error("params.values must be an array of strings");
121
+ }
122
+ const response = await performanceClient.embed(
123
+ params.values,
124
+ modelId != null ? modelId : "embeddings"
125
+ // model_id is for Model APIs, we don't use it here for dedicated
126
+ );
127
+ const embeddings = response.data.map((item) => item.embedding);
128
+ return {
129
+ embeddings,
130
+ usage: response.usage ? { tokens: response.usage.total_tokens } : void 0,
131
+ response: { headers: {}, body: response }
132
+ };
133
+ };
134
+ return model;
135
+ } else {
136
+ throw new Error(
137
+ "Not supported. You must use a /sync or /sync/v1 endpoint for embeddings."
138
+ );
139
+ }
140
+ };
141
+ const provider = (modelId) => createChatModel(modelId);
142
+ provider.chatModel = createChatModel;
143
+ provider.languageModel = createChatModel;
144
+ provider.imageModel = (modelId) => {
145
+ throw new import_provider.NoSuchModelError({ modelId, modelType: "imageModel" });
146
+ };
147
+ provider.textEmbeddingModel = createTextEmbeddingModel;
148
+ return provider;
149
+ }
150
+ var baseten = createBaseten();
151
+ // Annotate the CommonJS export names for ESM import in node:
152
+ 0 && (module.exports = {
153
+ VERSION,
154
+ baseten,
155
+ createBaseten
156
+ });
157
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/index.ts","../src/baseten-provider.ts","../src/version.ts"],"sourcesContent":["export type { BasetenChatModelId } from './baseten-chat-options';\nexport { baseten, createBaseten } from './baseten-provider';\nexport type {\n BasetenProvider,\n BasetenProviderSettings,\n BasetenErrorData,\n} from './baseten-provider';\nexport { VERSION } from './version';\n","import {\n OpenAICompatibleChatLanguageModel,\n OpenAICompatibleEmbeddingModel,\n ProviderErrorStructure,\n} from '@ai-sdk/openai-compatible';\nimport {\n EmbeddingModelV2,\n LanguageModelV2,\n NoSuchModelError,\n ProviderV2,\n} from '@ai-sdk/provider';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n withUserAgentSuffix,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\nimport { BasetenChatModelId } from './baseten-chat-options';\nimport { BasetenEmbeddingModelId } from './baseten-embedding-options';\nimport { PerformanceClient } from '@basetenlabs/performance-client';\nimport { VERSION } from './version';\n\nexport type BasetenErrorData = z.infer<typeof basetenErrorSchema>;\n\nconst basetenErrorSchema = z.object({\n error: z.string(),\n});\n\nconst basetenErrorStructure: ProviderErrorStructure<BasetenErrorData> = {\n errorSchema: basetenErrorSchema,\n errorToMessage: data => data.error,\n};\n\nexport interface BasetenProviderSettings {\n /**\n * Baseten API key. Default value is taken from the `BASETEN_API_KEY`\n * environment variable.\n */\n apiKey?: string;\n\n /**\n * Base URL for the Model APIs. Default: 'https://inference.baseten.co/v1'\n */\n baseURL?: string;\n\n /**\n * Model URL for custom models (chat or embeddings).\n * If not supplied, the default Model APIs will be used.\n */\n modelURL?: string;\n /**\n * Custom headers to include in the requests.\n */\n headers?: Record<string, string>;\n\n /**\n * Custom fetch implementation. You can use it as a middleware to intercept requests,\n * or to provide a custom fetch implementation for e.g. testing.\n */\n fetch?: FetchFunction;\n}\n\nexport interface BasetenProvider extends ProviderV2 {\n /**\nCreates a chat model for text generation. \n*/\n (modelId?: BasetenChatModelId): LanguageModelV2;\n\n /**\nCreates a chat model for text generation. \n*/\n chatModel(modelId?: BasetenChatModelId): LanguageModelV2;\n\n /**\nCreates a language model for text generation. Alias for chatModel.\n*/\n languageModel(modelId?: BasetenChatModelId): LanguageModelV2;\n\n /**\nCreates a text embedding model for text generation.\n*/\n textEmbeddingModel(\n modelId?: BasetenEmbeddingModelId,\n ): EmbeddingModelV2<string>;\n}\n\n// by default, we use the Model APIs\nconst defaultBaseURL = 'https://inference.baseten.co/v1';\n\nexport function createBaseten(\n options: BasetenProviderSettings = {},\n): BasetenProvider {\n const baseURL = withoutTrailingSlash(options.baseURL ?? defaultBaseURL);\n const getHeaders = () =>\n withUserAgentSuffix(\n {\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'BASETEN_API_KEY',\n description: 'Baseten API key',\n })}`,\n ...options.headers,\n },\n `ai-sdk/baseten/${VERSION}`,\n );\n\n interface CommonModelConfig {\n provider: string;\n url: ({ path }: { path: string }) => string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n }\n\n const getCommonModelConfig = (\n modelType: string,\n customURL?: string,\n ): CommonModelConfig => ({\n provider: `baseten.${modelType}`,\n url: ({ path }) => {\n // For embeddings with /sync URLs (but not /sync/v1), we need to add /v1\n if (\n modelType === 'embedding' &&\n customURL?.includes('/sync') &&\n !customURL?.includes('/sync/v1')\n ) {\n return `${customURL}/v1${path}`;\n }\n return `${customURL || baseURL}${path}`;\n },\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createChatModel = (modelId?: BasetenChatModelId) => {\n // Use modelURL if provided, otherwise use default Model APIs\n const customURL = options.modelURL;\n\n if (customURL) {\n // Check if this is a /sync/v1 endpoint (OpenAI-compatible) or /predict endpoint (custom)\n const isOpenAICompatible = customURL.includes('/sync/v1');\n\n if (isOpenAICompatible) {\n // For /sync/v1 endpoints, use standard OpenAI-compatible format\n return new OpenAICompatibleChatLanguageModel(modelId ?? 'placeholder', {\n ...getCommonModelConfig('chat', customURL),\n errorStructure: basetenErrorStructure,\n });\n } else if (customURL.includes('/predict')) {\n throw new Error(\n 'Not supported. You must use a /sync/v1 endpoint for chat models.',\n );\n }\n }\n\n // Use default OpenAI-compatible format for Model APIs\n return new OpenAICompatibleChatLanguageModel(modelId ?? 'chat', {\n ...getCommonModelConfig('chat'),\n errorStructure: basetenErrorStructure,\n });\n };\n\n const createTextEmbeddingModel = (modelId?: BasetenEmbeddingModelId) => {\n // Use modelURL if provided\n const customURL = options.modelURL;\n if (!customURL) {\n throw new Error(\n 'No model URL provided for embeddings. Please set modelURL option for embeddings.',\n );\n }\n\n // Check if this is a /sync or /sync/v1 endpoint (OpenAI-compatible)\n // We support both /sync and /sync/v1, stripping /v1 before passing to Performance Client, as Performance Client adds /v1 itself\n const isOpenAICompatible = customURL.includes('/sync');\n\n if (isOpenAICompatible) {\n // Create the model using OpenAICompatibleEmbeddingModel and override doEmbed\n const model = new OpenAICompatibleEmbeddingModel(\n modelId ?? 'embeddings',\n {\n ...getCommonModelConfig('embedding', customURL),\n errorStructure: basetenErrorStructure,\n },\n );\n\n // Strip /v1 from URL if present before passing to Performance Client to avoid double /v1\n const performanceClientURL = customURL.replace('/sync/v1', '/sync');\n\n // Initialize the B10 Performance Client once for reuse\n const performanceClient = new PerformanceClient(\n performanceClientURL,\n loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'BASETEN_API_KEY',\n description: 'Baseten API key',\n }),\n );\n\n // Override the doEmbed method to use the pre-created Performance Client\n model.doEmbed = async params => {\n if (!params.values || !Array.isArray(params.values)) {\n throw new Error('params.values must be an array of strings');\n }\n\n // Performance Client handles batching internally, so we don't need to limit in 128 here\n const response = await performanceClient.embed(\n params.values,\n modelId ?? 'embeddings', // model_id is for Model APIs, we don't use it here for dedicated\n );\n // Transform the response to match the expected format\n const embeddings = response.data.map((item: any) => item.embedding);\n\n return {\n embeddings: embeddings,\n usage: response.usage\n ? { tokens: response.usage.total_tokens }\n : undefined,\n response: { headers: {}, body: response },\n };\n };\n\n return model;\n } else {\n throw new Error(\n 'Not supported. You must use a /sync or /sync/v1 endpoint for embeddings.',\n );\n }\n };\n\n const provider = (modelId?: BasetenChatModelId) => createChatModel(modelId);\n provider.chatModel = createChatModel;\n provider.languageModel = createChatModel;\n provider.imageModel = (modelId: string) => {\n throw new NoSuchModelError({ modelId, modelType: 'imageModel' });\n };\n provider.textEmbeddingModel = createTextEmbeddingModel;\n return provider;\n}\n\nexport const baseten = createBaseten();\n","// Version string of this package injected at build time.\ndeclare const __PACKAGE_VERSION__: string | undefined;\nexport const VERSION: string =\n typeof __PACKAGE_VERSION__ !== 'undefined'\n ? __PACKAGE_VERSION__\n : '0.0.0-test';\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACAA,+BAIO;AACP,sBAKO;AACP,4BAKO;AACP,gBAAkB;AAGlB,gCAAkC;;;AClB3B,IAAM,UACX,OACI,UACA;;;ADoBN,IAAM,qBAAqB,YAAE,OAAO;AAAA,EAClC,OAAO,YAAE,OAAO;AAClB,CAAC;AAED,IAAM,wBAAkE;AAAA,EACtE,aAAa;AAAA,EACb,gBAAgB,UAAQ,KAAK;AAC/B;AAwDA,IAAM,iBAAiB;AAEhB,SAAS,cACd,UAAmC,CAAC,GACnB;AA5FnB;AA6FE,QAAM,cAAU,6CAAqB,aAAQ,YAAR,YAAmB,cAAc;AACtE,QAAM,aAAa,UACjB;AAAA,IACE;AAAA,MACE,eAAe,cAAU,kCAAW;AAAA,QAClC,QAAQ,QAAQ;AAAA,QAChB,yBAAyB;AAAA,QACzB,aAAa;AAAA,MACf,CAAC,CAAC;AAAA,MACF,GAAG,QAAQ;AAAA,IACb;AAAA,IACA,kBAAkB,OAAO;AAAA,EAC3B;AASF,QAAM,uBAAuB,CAC3B,WACA,eACuB;AAAA,IACvB,UAAU,WAAW,SAAS;AAAA,IAC9B,KAAK,CAAC,EAAE,KAAK,MAAM;AAEjB,UACE,cAAc,gBACd,uCAAW,SAAS,aACpB,EAAC,uCAAW,SAAS,cACrB;AACA,eAAO,GAAG,SAAS,MAAM,IAAI;AAAA,MAC/B;AACA,aAAO,GAAG,aAAa,OAAO,GAAG,IAAI;AAAA,IACvC;AAAA,IACA,SAAS;AAAA,IACT,OAAO,QAAQ;AAAA,EACjB;AAEA,QAAM,kBAAkB,CAAC,YAAiC;AAExD,UAAM,YAAY,QAAQ;AAE1B,QAAI,WAAW;AAEb,YAAM,qBAAqB,UAAU,SAAS,UAAU;AAExD,UAAI,oBAAoB;AAEtB,eAAO,IAAI,2DAAkC,4BAAW,eAAe;AAAA,UACrE,GAAG,qBAAqB,QAAQ,SAAS;AAAA,UACzC,gBAAgB;AAAA,QAClB,CAAC;AAAA,MACH,WAAW,UAAU,SAAS,UAAU,GAAG;AACzC,cAAM,IAAI;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAGA,WAAO,IAAI,2DAAkC,4BAAW,QAAQ;AAAA,MAC9D,GAAG,qBAAqB,MAAM;AAAA,MAC9B,gBAAgB;AAAA,IAClB,CAAC;AAAA,EACH;AAEA,QAAM,2BAA2B,CAAC,YAAsC;AAEtE,UAAM,YAAY,QAAQ;AAC1B,QAAI,CAAC,WAAW;AACd,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAIA,UAAM,qBAAqB,UAAU,SAAS,OAAO;AAErD,QAAI,oBAAoB;AAEtB,YAAM,QAAQ,IAAI;AAAA,QAChB,4BAAW;AAAA,QACX;AAAA,UACE,GAAG,qBAAqB,aAAa,SAAS;AAAA,UAC9C,gBAAgB;AAAA,QAClB;AAAA,MACF;AAGA,YAAM,uBAAuB,UAAU,QAAQ,YAAY,OAAO;AAGlE,YAAM,oBAAoB,IAAI;AAAA,QAC5B;AAAA,YACA,kCAAW;AAAA,UACT,QAAQ,QAAQ;AAAA,UAChB,yBAAyB;AAAA,UACzB,aAAa;AAAA,QACf,CAAC;AAAA,MACH;AAGA,YAAM,UAAU,OAAM,WAAU;AAC9B,YAAI,CAAC,OAAO,UAAU,CAAC,MAAM,QAAQ,OAAO,MAAM,GAAG;AACnD,gBAAM,IAAI,MAAM,2CAA2C;AAAA,QAC7D;AAGA,cAAM,WAAW,MAAM,kBAAkB;AAAA,UACvC,OAAO;AAAA,UACP,4BAAW;AAAA;AAAA,QACb;AAEA,cAAM,aAAa,SAAS,KAAK,IAAI,CAAC,SAAc,KAAK,SAAS;AAElE,eAAO;AAAA,UACL;AAAA,UACA,OAAO,SAAS,QACZ,EAAE,QAAQ,SAAS,MAAM,aAAa,IACtC;AAAA,UACJ,UAAU,EAAE,SAAS,CAAC,GAAG,MAAM,SAAS;AAAA,QAC1C;AAAA,MACF;AAEA,aAAO;AAAA,IACT,OAAO;AACL,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,QAAM,WAAW,CAAC,YAAiC,gBAAgB,OAAO;AAC1E,WAAS,YAAY;AACrB,WAAS,gBAAgB;AACzB,WAAS,aAAa,CAAC,YAAoB;AACzC,UAAM,IAAI,iCAAiB,EAAE,SAAS,WAAW,aAAa,CAAC;AAAA,EACjE;AACA,WAAS,qBAAqB;AAC9B,SAAO;AACT;AAEO,IAAM,UAAU,cAAc;","names":[]}
package/dist/index.mjs ADDED
@@ -0,0 +1,137 @@
1
+ // src/baseten-provider.ts
2
+ import {
3
+ OpenAICompatibleChatLanguageModel,
4
+ OpenAICompatibleEmbeddingModel
5
+ } from "@ai-sdk/openai-compatible";
6
+ import {
7
+ NoSuchModelError
8
+ } from "@ai-sdk/provider";
9
+ import {
10
+ loadApiKey,
11
+ withoutTrailingSlash,
12
+ withUserAgentSuffix
13
+ } from "@ai-sdk/provider-utils";
14
+ import { z } from "zod/v4";
15
+ import { PerformanceClient } from "@basetenlabs/performance-client";
16
+
17
+ // src/version.ts
18
+ var VERSION = true ? "0.0.1" : "0.0.0-test";
19
+
20
+ // src/baseten-provider.ts
21
+ var basetenErrorSchema = z.object({
22
+ error: z.string()
23
+ });
24
+ var basetenErrorStructure = {
25
+ errorSchema: basetenErrorSchema,
26
+ errorToMessage: (data) => data.error
27
+ };
28
+ var defaultBaseURL = "https://inference.baseten.co/v1";
29
+ function createBaseten(options = {}) {
30
+ var _a;
31
+ const baseURL = withoutTrailingSlash((_a = options.baseURL) != null ? _a : defaultBaseURL);
32
+ const getHeaders = () => withUserAgentSuffix(
33
+ {
34
+ Authorization: `Bearer ${loadApiKey({
35
+ apiKey: options.apiKey,
36
+ environmentVariableName: "BASETEN_API_KEY",
37
+ description: "Baseten API key"
38
+ })}`,
39
+ ...options.headers
40
+ },
41
+ `ai-sdk/baseten/${VERSION}`
42
+ );
43
+ const getCommonModelConfig = (modelType, customURL) => ({
44
+ provider: `baseten.${modelType}`,
45
+ url: ({ path }) => {
46
+ if (modelType === "embedding" && (customURL == null ? void 0 : customURL.includes("/sync")) && !(customURL == null ? void 0 : customURL.includes("/sync/v1"))) {
47
+ return `${customURL}/v1${path}`;
48
+ }
49
+ return `${customURL || baseURL}${path}`;
50
+ },
51
+ headers: getHeaders,
52
+ fetch: options.fetch
53
+ });
54
+ const createChatModel = (modelId) => {
55
+ const customURL = options.modelURL;
56
+ if (customURL) {
57
+ const isOpenAICompatible = customURL.includes("/sync/v1");
58
+ if (isOpenAICompatible) {
59
+ return new OpenAICompatibleChatLanguageModel(modelId != null ? modelId : "placeholder", {
60
+ ...getCommonModelConfig("chat", customURL),
61
+ errorStructure: basetenErrorStructure
62
+ });
63
+ } else if (customURL.includes("/predict")) {
64
+ throw new Error(
65
+ "Not supported. You must use a /sync/v1 endpoint for chat models."
66
+ );
67
+ }
68
+ }
69
+ return new OpenAICompatibleChatLanguageModel(modelId != null ? modelId : "chat", {
70
+ ...getCommonModelConfig("chat"),
71
+ errorStructure: basetenErrorStructure
72
+ });
73
+ };
74
+ const createTextEmbeddingModel = (modelId) => {
75
+ const customURL = options.modelURL;
76
+ if (!customURL) {
77
+ throw new Error(
78
+ "No model URL provided for embeddings. Please set modelURL option for embeddings."
79
+ );
80
+ }
81
+ const isOpenAICompatible = customURL.includes("/sync");
82
+ if (isOpenAICompatible) {
83
+ const model = new OpenAICompatibleEmbeddingModel(
84
+ modelId != null ? modelId : "embeddings",
85
+ {
86
+ ...getCommonModelConfig("embedding", customURL),
87
+ errorStructure: basetenErrorStructure
88
+ }
89
+ );
90
+ const performanceClientURL = customURL.replace("/sync/v1", "/sync");
91
+ const performanceClient = new PerformanceClient(
92
+ performanceClientURL,
93
+ loadApiKey({
94
+ apiKey: options.apiKey,
95
+ environmentVariableName: "BASETEN_API_KEY",
96
+ description: "Baseten API key"
97
+ })
98
+ );
99
+ model.doEmbed = async (params) => {
100
+ if (!params.values || !Array.isArray(params.values)) {
101
+ throw new Error("params.values must be an array of strings");
102
+ }
103
+ const response = await performanceClient.embed(
104
+ params.values,
105
+ modelId != null ? modelId : "embeddings"
106
+ // model_id is for Model APIs, we don't use it here for dedicated
107
+ );
108
+ const embeddings = response.data.map((item) => item.embedding);
109
+ return {
110
+ embeddings,
111
+ usage: response.usage ? { tokens: response.usage.total_tokens } : void 0,
112
+ response: { headers: {}, body: response }
113
+ };
114
+ };
115
+ return model;
116
+ } else {
117
+ throw new Error(
118
+ "Not supported. You must use a /sync or /sync/v1 endpoint for embeddings."
119
+ );
120
+ }
121
+ };
122
+ const provider = (modelId) => createChatModel(modelId);
123
+ provider.chatModel = createChatModel;
124
+ provider.languageModel = createChatModel;
125
+ provider.imageModel = (modelId) => {
126
+ throw new NoSuchModelError({ modelId, modelType: "imageModel" });
127
+ };
128
+ provider.textEmbeddingModel = createTextEmbeddingModel;
129
+ return provider;
130
+ }
131
+ var baseten = createBaseten();
132
+ export {
133
+ VERSION,
134
+ baseten,
135
+ createBaseten
136
+ };
137
+ //# sourceMappingURL=index.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/baseten-provider.ts","../src/version.ts"],"sourcesContent":["import {\n OpenAICompatibleChatLanguageModel,\n OpenAICompatibleEmbeddingModel,\n ProviderErrorStructure,\n} from '@ai-sdk/openai-compatible';\nimport {\n EmbeddingModelV2,\n LanguageModelV2,\n NoSuchModelError,\n ProviderV2,\n} from '@ai-sdk/provider';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n withUserAgentSuffix,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\nimport { BasetenChatModelId } from './baseten-chat-options';\nimport { BasetenEmbeddingModelId } from './baseten-embedding-options';\nimport { PerformanceClient } from '@basetenlabs/performance-client';\nimport { VERSION } from './version';\n\nexport type BasetenErrorData = z.infer<typeof basetenErrorSchema>;\n\nconst basetenErrorSchema = z.object({\n error: z.string(),\n});\n\nconst basetenErrorStructure: ProviderErrorStructure<BasetenErrorData> = {\n errorSchema: basetenErrorSchema,\n errorToMessage: data => data.error,\n};\n\nexport interface BasetenProviderSettings {\n /**\n * Baseten API key. Default value is taken from the `BASETEN_API_KEY`\n * environment variable.\n */\n apiKey?: string;\n\n /**\n * Base URL for the Model APIs. Default: 'https://inference.baseten.co/v1'\n */\n baseURL?: string;\n\n /**\n * Model URL for custom models (chat or embeddings).\n * If not supplied, the default Model APIs will be used.\n */\n modelURL?: string;\n /**\n * Custom headers to include in the requests.\n */\n headers?: Record<string, string>;\n\n /**\n * Custom fetch implementation. You can use it as a middleware to intercept requests,\n * or to provide a custom fetch implementation for e.g. testing.\n */\n fetch?: FetchFunction;\n}\n\nexport interface BasetenProvider extends ProviderV2 {\n /**\nCreates a chat model for text generation. \n*/\n (modelId?: BasetenChatModelId): LanguageModelV2;\n\n /**\nCreates a chat model for text generation. \n*/\n chatModel(modelId?: BasetenChatModelId): LanguageModelV2;\n\n /**\nCreates a language model for text generation. Alias for chatModel.\n*/\n languageModel(modelId?: BasetenChatModelId): LanguageModelV2;\n\n /**\nCreates a text embedding model for text generation.\n*/\n textEmbeddingModel(\n modelId?: BasetenEmbeddingModelId,\n ): EmbeddingModelV2<string>;\n}\n\n// by default, we use the Model APIs\nconst defaultBaseURL = 'https://inference.baseten.co/v1';\n\nexport function createBaseten(\n options: BasetenProviderSettings = {},\n): BasetenProvider {\n const baseURL = withoutTrailingSlash(options.baseURL ?? defaultBaseURL);\n const getHeaders = () =>\n withUserAgentSuffix(\n {\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'BASETEN_API_KEY',\n description: 'Baseten API key',\n })}`,\n ...options.headers,\n },\n `ai-sdk/baseten/${VERSION}`,\n );\n\n interface CommonModelConfig {\n provider: string;\n url: ({ path }: { path: string }) => string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n }\n\n const getCommonModelConfig = (\n modelType: string,\n customURL?: string,\n ): CommonModelConfig => ({\n provider: `baseten.${modelType}`,\n url: ({ path }) => {\n // For embeddings with /sync URLs (but not /sync/v1), we need to add /v1\n if (\n modelType === 'embedding' &&\n customURL?.includes('/sync') &&\n !customURL?.includes('/sync/v1')\n ) {\n return `${customURL}/v1${path}`;\n }\n return `${customURL || baseURL}${path}`;\n },\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createChatModel = (modelId?: BasetenChatModelId) => {\n // Use modelURL if provided, otherwise use default Model APIs\n const customURL = options.modelURL;\n\n if (customURL) {\n // Check if this is a /sync/v1 endpoint (OpenAI-compatible) or /predict endpoint (custom)\n const isOpenAICompatible = customURL.includes('/sync/v1');\n\n if (isOpenAICompatible) {\n // For /sync/v1 endpoints, use standard OpenAI-compatible format\n return new OpenAICompatibleChatLanguageModel(modelId ?? 'placeholder', {\n ...getCommonModelConfig('chat', customURL),\n errorStructure: basetenErrorStructure,\n });\n } else if (customURL.includes('/predict')) {\n throw new Error(\n 'Not supported. You must use a /sync/v1 endpoint for chat models.',\n );\n }\n }\n\n // Use default OpenAI-compatible format for Model APIs\n return new OpenAICompatibleChatLanguageModel(modelId ?? 'chat', {\n ...getCommonModelConfig('chat'),\n errorStructure: basetenErrorStructure,\n });\n };\n\n const createTextEmbeddingModel = (modelId?: BasetenEmbeddingModelId) => {\n // Use modelURL if provided\n const customURL = options.modelURL;\n if (!customURL) {\n throw new Error(\n 'No model URL provided for embeddings. Please set modelURL option for embeddings.',\n );\n }\n\n // Check if this is a /sync or /sync/v1 endpoint (OpenAI-compatible)\n // We support both /sync and /sync/v1, stripping /v1 before passing to Performance Client, as Performance Client adds /v1 itself\n const isOpenAICompatible = customURL.includes('/sync');\n\n if (isOpenAICompatible) {\n // Create the model using OpenAICompatibleEmbeddingModel and override doEmbed\n const model = new OpenAICompatibleEmbeddingModel(\n modelId ?? 'embeddings',\n {\n ...getCommonModelConfig('embedding', customURL),\n errorStructure: basetenErrorStructure,\n },\n );\n\n // Strip /v1 from URL if present before passing to Performance Client to avoid double /v1\n const performanceClientURL = customURL.replace('/sync/v1', '/sync');\n\n // Initialize the B10 Performance Client once for reuse\n const performanceClient = new PerformanceClient(\n performanceClientURL,\n loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'BASETEN_API_KEY',\n description: 'Baseten API key',\n }),\n );\n\n // Override the doEmbed method to use the pre-created Performance Client\n model.doEmbed = async params => {\n if (!params.values || !Array.isArray(params.values)) {\n throw new Error('params.values must be an array of strings');\n }\n\n // Performance Client handles batching internally, so we don't need to limit in 128 here\n const response = await performanceClient.embed(\n params.values,\n modelId ?? 'embeddings', // model_id is for Model APIs, we don't use it here for dedicated\n );\n // Transform the response to match the expected format\n const embeddings = response.data.map((item: any) => item.embedding);\n\n return {\n embeddings: embeddings,\n usage: response.usage\n ? { tokens: response.usage.total_tokens }\n : undefined,\n response: { headers: {}, body: response },\n };\n };\n\n return model;\n } else {\n throw new Error(\n 'Not supported. You must use a /sync or /sync/v1 endpoint for embeddings.',\n );\n }\n };\n\n const provider = (modelId?: BasetenChatModelId) => createChatModel(modelId);\n provider.chatModel = createChatModel;\n provider.languageModel = createChatModel;\n provider.imageModel = (modelId: string) => {\n throw new NoSuchModelError({ modelId, modelType: 'imageModel' });\n };\n provider.textEmbeddingModel = createTextEmbeddingModel;\n return provider;\n}\n\nexport const baseten = createBaseten();\n","// Version string of this package injected at build time.\ndeclare const __PACKAGE_VERSION__: string | undefined;\nexport const VERSION: string =\n typeof __PACKAGE_VERSION__ !== 'undefined'\n ? __PACKAGE_VERSION__\n : '0.0.0-test';\n"],"mappings":";AAAA;AAAA,EACE;AAAA,EACA;AAAA,OAEK;AACP;AAAA,EAGE;AAAA,OAEK;AACP;AAAA,EAEE;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP,SAAS,SAAS;AAGlB,SAAS,yBAAyB;;;AClB3B,IAAM,UACX,OACI,UACA;;;ADoBN,IAAM,qBAAqB,EAAE,OAAO;AAAA,EAClC,OAAO,EAAE,OAAO;AAClB,CAAC;AAED,IAAM,wBAAkE;AAAA,EACtE,aAAa;AAAA,EACb,gBAAgB,UAAQ,KAAK;AAC/B;AAwDA,IAAM,iBAAiB;AAEhB,SAAS,cACd,UAAmC,CAAC,GACnB;AA5FnB;AA6FE,QAAM,UAAU,sBAAqB,aAAQ,YAAR,YAAmB,cAAc;AACtE,QAAM,aAAa,MACjB;AAAA,IACE;AAAA,MACE,eAAe,UAAU,WAAW;AAAA,QAClC,QAAQ,QAAQ;AAAA,QAChB,yBAAyB;AAAA,QACzB,aAAa;AAAA,MACf,CAAC,CAAC;AAAA,MACF,GAAG,QAAQ;AAAA,IACb;AAAA,IACA,kBAAkB,OAAO;AAAA,EAC3B;AASF,QAAM,uBAAuB,CAC3B,WACA,eACuB;AAAA,IACvB,UAAU,WAAW,SAAS;AAAA,IAC9B,KAAK,CAAC,EAAE,KAAK,MAAM;AAEjB,UACE,cAAc,gBACd,uCAAW,SAAS,aACpB,EAAC,uCAAW,SAAS,cACrB;AACA,eAAO,GAAG,SAAS,MAAM,IAAI;AAAA,MAC/B;AACA,aAAO,GAAG,aAAa,OAAO,GAAG,IAAI;AAAA,IACvC;AAAA,IACA,SAAS;AAAA,IACT,OAAO,QAAQ;AAAA,EACjB;AAEA,QAAM,kBAAkB,CAAC,YAAiC;AAExD,UAAM,YAAY,QAAQ;AAE1B,QAAI,WAAW;AAEb,YAAM,qBAAqB,UAAU,SAAS,UAAU;AAExD,UAAI,oBAAoB;AAEtB,eAAO,IAAI,kCAAkC,4BAAW,eAAe;AAAA,UACrE,GAAG,qBAAqB,QAAQ,SAAS;AAAA,UACzC,gBAAgB;AAAA,QAClB,CAAC;AAAA,MACH,WAAW,UAAU,SAAS,UAAU,GAAG;AACzC,cAAM,IAAI;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAGA,WAAO,IAAI,kCAAkC,4BAAW,QAAQ;AAAA,MAC9D,GAAG,qBAAqB,MAAM;AAAA,MAC9B,gBAAgB;AAAA,IAClB,CAAC;AAAA,EACH;AAEA,QAAM,2BAA2B,CAAC,YAAsC;AAEtE,UAAM,YAAY,QAAQ;AAC1B,QAAI,CAAC,WAAW;AACd,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAIA,UAAM,qBAAqB,UAAU,SAAS,OAAO;AAErD,QAAI,oBAAoB;AAEtB,YAAM,QAAQ,IAAI;AAAA,QAChB,4BAAW;AAAA,QACX;AAAA,UACE,GAAG,qBAAqB,aAAa,SAAS;AAAA,UAC9C,gBAAgB;AAAA,QAClB;AAAA,MACF;AAGA,YAAM,uBAAuB,UAAU,QAAQ,YAAY,OAAO;AAGlE,YAAM,oBAAoB,IAAI;AAAA,QAC5B;AAAA,QACA,WAAW;AAAA,UACT,QAAQ,QAAQ;AAAA,UAChB,yBAAyB;AAAA,UACzB,aAAa;AAAA,QACf,CAAC;AAAA,MACH;AAGA,YAAM,UAAU,OAAM,WAAU;AAC9B,YAAI,CAAC,OAAO,UAAU,CAAC,MAAM,QAAQ,OAAO,MAAM,GAAG;AACnD,gBAAM,IAAI,MAAM,2CAA2C;AAAA,QAC7D;AAGA,cAAM,WAAW,MAAM,kBAAkB;AAAA,UACvC,OAAO;AAAA,UACP,4BAAW;AAAA;AAAA,QACb;AAEA,cAAM,aAAa,SAAS,KAAK,IAAI,CAAC,SAAc,KAAK,SAAS;AAElE,eAAO;AAAA,UACL;AAAA,UACA,OAAO,SAAS,QACZ,EAAE,QAAQ,SAAS,MAAM,aAAa,IACtC;AAAA,UACJ,UAAU,EAAE,SAAS,CAAC,GAAG,MAAM,SAAS;AAAA,QAC1C;AAAA,MACF;AAEA,aAAO;AAAA,IACT,OAAO;AACL,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,QAAM,WAAW,CAAC,YAAiC,gBAAgB,OAAO;AAC1E,WAAS,YAAY;AACrB,WAAS,gBAAgB;AACzB,WAAS,aAAa,CAAC,YAAoB;AACzC,UAAM,IAAI,iBAAiB,EAAE,SAAS,WAAW,aAAa,CAAC;AAAA,EACjE;AACA,WAAS,qBAAqB;AAC9B,SAAO;AACT;AAEO,IAAM,UAAU,cAAc;","names":[]}
package/package.json ADDED
@@ -0,0 +1,67 @@
1
+ {
2
+ "name": "@ai-sdk/baseten",
3
+ "version": "0.0.1",
4
+ "license": "Apache-2.0",
5
+ "sideEffects": false,
6
+ "main": "./dist/index.js",
7
+ "module": "./dist/index.mjs",
8
+ "types": "./dist/index.d.ts",
9
+ "files": [
10
+ "dist/**/*",
11
+ "CHANGELOG.md"
12
+ ],
13
+ "exports": {
14
+ "./package.json": "./package.json",
15
+ ".": {
16
+ "types": "./dist/index.d.ts",
17
+ "import": "./dist/index.mjs",
18
+ "require": "./dist/index.js"
19
+ }
20
+ },
21
+ "dependencies": {
22
+ "@basetenlabs/performance-client": "^0.0.10",
23
+ "@ai-sdk/openai-compatible": "1.0.19",
24
+ "@ai-sdk/provider": "2.0.0",
25
+ "@ai-sdk/provider-utils": "3.0.10"
26
+ },
27
+ "devDependencies": {
28
+ "@types/node": "20.17.24",
29
+ "tsup": "^8",
30
+ "typescript": "5.8.3",
31
+ "zod": "3.25.76",
32
+ "@vercel/ai-tsconfig": "0.0.0"
33
+ },
34
+ "peerDependencies": {
35
+ "zod": "^3.25.76 || ^4.1.8"
36
+ },
37
+ "engines": {
38
+ "node": ">=18"
39
+ },
40
+ "publishConfig": {
41
+ "access": "public"
42
+ },
43
+ "homepage": "https://ai-sdk.dev/docs",
44
+ "repository": {
45
+ "type": "git",
46
+ "url": "git+https://github.com/vercel/ai.git"
47
+ },
48
+ "bugs": {
49
+ "url": "https://github.com/vercel/ai/issues"
50
+ },
51
+ "keywords": [
52
+ "ai"
53
+ ],
54
+ "scripts": {
55
+ "build": "pnpm clean && tsup --tsconfig tsconfig.build.json",
56
+ "build:watch": "pnpm clean && tsup --watch",
57
+ "clean": "rm -rf dist *.tsbuildinfo",
58
+ "lint": "eslint \"./**/*.ts*\"",
59
+ "type-check": "tsc --build",
60
+ "prettier-check": "prettier --check \"./**/*.ts*\"",
61
+ "test": "pnpm test:node && pnpm test:edge",
62
+ "test:update": "pnpm test:node -u",
63
+ "test:watch": "vitest --config vitest.node.config.js",
64
+ "test:edge": "vitest --config vitest.edge.config.js --run",
65
+ "test:node": "vitest --config vitest.node.config.js --run"
66
+ }
67
+ }