@ai-sdk/baseten 1.0.0-beta.4 → 1.0.0-beta.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +17 -0
- package/dist/index.d.mts +4 -4
- package/dist/index.d.ts +4 -4
- package/dist/index.js +1 -1
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1 -1
- package/dist/index.mjs.map +1 -1
- package/package.json +5 -5
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,22 @@
|
|
|
1
1
|
# @ai-sdk/baseten
|
|
2
2
|
|
|
3
|
+
## 1.0.0-beta.6
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 916bc46: bumped performance client to 0.0.10
|
|
8
|
+
|
|
9
|
+
## 1.0.0-beta.5
|
|
10
|
+
|
|
11
|
+
### Patch Changes
|
|
12
|
+
|
|
13
|
+
- 8dac895: feat: `LanguageModelV3`
|
|
14
|
+
- Updated dependencies [8dac895]
|
|
15
|
+
- Updated dependencies [10c1322]
|
|
16
|
+
- @ai-sdk/openai-compatible@1.1.0-beta.5
|
|
17
|
+
- @ai-sdk/provider-utils@3.1.0-beta.5
|
|
18
|
+
- @ai-sdk/provider@2.1.0-beta.3
|
|
19
|
+
|
|
3
20
|
## 1.0.0-beta.4
|
|
4
21
|
|
|
5
22
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { ProviderV3,
|
|
1
|
+
import { ProviderV3, LanguageModelV3, EmbeddingModelV3 } from '@ai-sdk/provider';
|
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
3
|
import { z } from 'zod/v4';
|
|
4
4
|
|
|
@@ -39,15 +39,15 @@ interface BasetenProvider extends ProviderV3 {
|
|
|
39
39
|
/**
|
|
40
40
|
Creates a chat model for text generation.
|
|
41
41
|
*/
|
|
42
|
-
(modelId?: BasetenChatModelId):
|
|
42
|
+
(modelId?: BasetenChatModelId): LanguageModelV3;
|
|
43
43
|
/**
|
|
44
44
|
Creates a chat model for text generation.
|
|
45
45
|
*/
|
|
46
|
-
chatModel(modelId?: BasetenChatModelId):
|
|
46
|
+
chatModel(modelId?: BasetenChatModelId): LanguageModelV3;
|
|
47
47
|
/**
|
|
48
48
|
Creates a language model for text generation. Alias for chatModel.
|
|
49
49
|
*/
|
|
50
|
-
languageModel(modelId?: BasetenChatModelId):
|
|
50
|
+
languageModel(modelId?: BasetenChatModelId): LanguageModelV3;
|
|
51
51
|
/**
|
|
52
52
|
Creates a text embedding model for text generation.
|
|
53
53
|
*/
|
package/dist/index.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { ProviderV3,
|
|
1
|
+
import { ProviderV3, LanguageModelV3, EmbeddingModelV3 } from '@ai-sdk/provider';
|
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
3
|
import { z } from 'zod/v4';
|
|
4
4
|
|
|
@@ -39,15 +39,15 @@ interface BasetenProvider extends ProviderV3 {
|
|
|
39
39
|
/**
|
|
40
40
|
Creates a chat model for text generation.
|
|
41
41
|
*/
|
|
42
|
-
(modelId?: BasetenChatModelId):
|
|
42
|
+
(modelId?: BasetenChatModelId): LanguageModelV3;
|
|
43
43
|
/**
|
|
44
44
|
Creates a chat model for text generation.
|
|
45
45
|
*/
|
|
46
|
-
chatModel(modelId?: BasetenChatModelId):
|
|
46
|
+
chatModel(modelId?: BasetenChatModelId): LanguageModelV3;
|
|
47
47
|
/**
|
|
48
48
|
Creates a language model for text generation. Alias for chatModel.
|
|
49
49
|
*/
|
|
50
|
-
languageModel(modelId?: BasetenChatModelId):
|
|
50
|
+
languageModel(modelId?: BasetenChatModelId): LanguageModelV3;
|
|
51
51
|
/**
|
|
52
52
|
Creates a text embedding model for text generation.
|
|
53
53
|
*/
|
package/dist/index.js
CHANGED
|
@@ -34,7 +34,7 @@ var import_v4 = require("zod/v4");
|
|
|
34
34
|
var import_performance_client = require("@basetenlabs/performance-client");
|
|
35
35
|
|
|
36
36
|
// src/version.ts
|
|
37
|
-
var VERSION = true ? "1.0.0-beta.
|
|
37
|
+
var VERSION = true ? "1.0.0-beta.6" : "0.0.0-test";
|
|
38
38
|
|
|
39
39
|
// src/baseten-provider.ts
|
|
40
40
|
var basetenErrorSchema = import_v4.z.object({
|
package/dist/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/index.ts","../src/baseten-provider.ts","../src/version.ts"],"sourcesContent":["export type { BasetenChatModelId } from './baseten-chat-options';\nexport { baseten, createBaseten } from './baseten-provider';\nexport type {\n BasetenProvider,\n BasetenProviderSettings,\n BasetenErrorData,\n} from './baseten-provider';\nexport { VERSION } from './version';\n","import {\n OpenAICompatibleChatLanguageModel,\n OpenAICompatibleEmbeddingModel,\n ProviderErrorStructure,\n} from '@ai-sdk/openai-compatible';\nimport {\n EmbeddingModelV3,\n LanguageModelV2,\n NoSuchModelError,\n ProviderV3,\n} from '@ai-sdk/provider';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n withUserAgentSuffix,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\nimport { BasetenChatModelId } from './baseten-chat-options';\nimport { BasetenEmbeddingModelId } from './baseten-embedding-options';\nimport { PerformanceClient } from '@basetenlabs/performance-client';\nimport { VERSION } from './version';\n\nexport type BasetenErrorData = z.infer<typeof basetenErrorSchema>;\n\nconst basetenErrorSchema = z.object({\n error: z.string(),\n});\n\nconst basetenErrorStructure: ProviderErrorStructure<BasetenErrorData> = {\n errorSchema: basetenErrorSchema,\n errorToMessage: data => data.error,\n};\n\nexport interface BasetenProviderSettings {\n /**\n * Baseten API key. Default value is taken from the `BASETEN_API_KEY`\n * environment variable.\n */\n apiKey?: string;\n\n /**\n * Base URL for the Model APIs. Default: 'https://inference.baseten.co/v1'\n */\n baseURL?: string;\n\n /**\n * Model URL for custom models (chat or embeddings).\n * If not supplied, the default Model APIs will be used.\n */\n modelURL?: string;\n /**\n * Custom headers to include in the requests.\n */\n headers?: Record<string, string>;\n\n /**\n * Custom fetch implementation. You can use it as a middleware to intercept requests,\n * or to provide a custom fetch implementation for e.g. testing.\n */\n fetch?: FetchFunction;\n}\n\nexport interface BasetenProvider extends ProviderV3 {\n /**\nCreates a chat model for text generation. \n*/\n (modelId?: BasetenChatModelId): LanguageModelV2;\n\n /**\nCreates a chat model for text generation. \n*/\n chatModel(modelId?: BasetenChatModelId): LanguageModelV2;\n\n /**\nCreates a language model for text generation. Alias for chatModel.\n*/\n languageModel(modelId?: BasetenChatModelId): LanguageModelV2;\n\n /**\nCreates a text embedding model for text generation.\n*/\n textEmbeddingModel(\n modelId?: BasetenEmbeddingModelId,\n ): EmbeddingModelV3<string>;\n}\n\n// by default, we use the Model APIs\nconst defaultBaseURL = 'https://inference.baseten.co/v1';\n\nexport function createBaseten(\n options: BasetenProviderSettings = {},\n): BasetenProvider {\n const baseURL = withoutTrailingSlash(options.baseURL ?? defaultBaseURL);\n const getHeaders = () =>\n withUserAgentSuffix(\n {\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'BASETEN_API_KEY',\n description: 'Baseten API key',\n })}`,\n ...options.headers,\n },\n `ai-sdk/baseten/${VERSION}`,\n );\n\n interface CommonModelConfig {\n provider: string;\n url: ({ path }: { path: string }) => string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n }\n\n const getCommonModelConfig = (\n modelType: string,\n customURL?: string,\n ): CommonModelConfig => ({\n provider: `baseten.${modelType}`,\n url: ({ path }) => {\n // For embeddings with /sync URLs (but not /sync/v1), we need to add /v1\n if (\n modelType === 'embedding' &&\n customURL?.includes('/sync') &&\n !customURL?.includes('/sync/v1')\n ) {\n return `${customURL}/v1${path}`;\n }\n return `${customURL || baseURL}${path}`;\n },\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createChatModel = (modelId?: BasetenChatModelId) => {\n // Use modelURL if provided, otherwise use default Model APIs\n const customURL = options.modelURL;\n\n if (customURL) {\n // Check if this is a /sync/v1 endpoint (OpenAI-compatible) or /predict endpoint (custom)\n const isOpenAICompatible = customURL.includes('/sync/v1');\n\n if (isOpenAICompatible) {\n // For /sync/v1 endpoints, use standard OpenAI-compatible format\n return new OpenAICompatibleChatLanguageModel(modelId ?? 'placeholder', {\n ...getCommonModelConfig('chat', customURL),\n errorStructure: basetenErrorStructure,\n });\n } else if (customURL.includes('/predict')) {\n throw new Error(\n 'Not supported. You must use a /sync/v1 endpoint for chat models.',\n );\n }\n }\n\n // Use default OpenAI-compatible format for Model APIs\n return new OpenAICompatibleChatLanguageModel(modelId ?? 'chat', {\n ...getCommonModelConfig('chat'),\n errorStructure: basetenErrorStructure,\n });\n };\n\n const createTextEmbeddingModel = (modelId?: BasetenEmbeddingModelId) => {\n // Use modelURL if provided\n const customURL = options.modelURL;\n if (!customURL) {\n throw new Error(\n 'No model URL provided for embeddings. Please set modelURL option for embeddings.',\n );\n }\n\n // Check if this is a /sync or /sync/v1 endpoint (OpenAI-compatible)\n // We support both /sync and /sync/v1, stripping /v1 before passing to Performance Client, as Performance Client adds /v1 itself\n const isOpenAICompatible = customURL.includes('/sync');\n\n if (isOpenAICompatible) {\n // Create the model using OpenAICompatibleEmbeddingModel and override doEmbed\n const model = new OpenAICompatibleEmbeddingModel(\n modelId ?? 'embeddings',\n {\n ...getCommonModelConfig('embedding', customURL),\n errorStructure: basetenErrorStructure,\n },\n );\n\n // Strip /v1 from URL if present before passing to Performance Client to avoid double /v1\n const performanceClientURL = customURL.replace('/sync/v1', '/sync');\n\n // Initialize the B10 Performance Client once for reuse\n const performanceClient = new PerformanceClient(\n performanceClientURL,\n loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'BASETEN_API_KEY',\n description: 'Baseten API key',\n }),\n );\n\n // Override the doEmbed method to use the pre-created Performance Client\n model.doEmbed = async params => {\n if (!params.values || !Array.isArray(params.values)) {\n throw new Error('params.values must be an array of strings');\n }\n\n // Performance Client handles batching internally, so we don't need to limit in 128 here\n const response = await performanceClient.embed(\n params.values,\n modelId ?? 'embeddings', // model_id is for Model APIs, we don't use it here for dedicated\n );\n // Transform the response to match the expected format\n const embeddings = response.data.map((item: any) => item.embedding);\n\n return {\n embeddings: embeddings,\n usage: response.usage\n ? { tokens: response.usage.total_tokens }\n : undefined,\n response: { headers: {}, body: response },\n };\n };\n\n return model;\n } else {\n throw new Error(\n 'Not supported. You must use a /sync or /sync/v1 endpoint for embeddings.',\n );\n }\n };\n\n const provider = (modelId?: BasetenChatModelId) => createChatModel(modelId);\n provider.chatModel = createChatModel;\n provider.languageModel = createChatModel;\n provider.imageModel = (modelId: string) => {\n throw new NoSuchModelError({ modelId, modelType: 'imageModel' });\n };\n provider.textEmbeddingModel = createTextEmbeddingModel;\n return provider;\n}\n\nexport const baseten = createBaseten();\n","// Version string of this package injected at build time.\ndeclare const __PACKAGE_VERSION__: string | undefined;\nexport const VERSION: string =\n typeof __PACKAGE_VERSION__ !== 'undefined'\n ? __PACKAGE_VERSION__\n : '0.0.0-test';\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACAA,+BAIO;AACP,sBAKO;AACP,4BAKO;AACP,gBAAkB;AAGlB,gCAAkC;;;AClB3B,IAAM,UACX,OACI,iBACA;;;ADoBN,IAAM,qBAAqB,YAAE,OAAO;AAAA,EAClC,OAAO,YAAE,OAAO;AAClB,CAAC;AAED,IAAM,wBAAkE;AAAA,EACtE,aAAa;AAAA,EACb,gBAAgB,UAAQ,KAAK;AAC/B;AAwDA,IAAM,iBAAiB;AAEhB,SAAS,cACd,UAAmC,CAAC,GACnB;AA5FnB;AA6FE,QAAM,cAAU,6CAAqB,aAAQ,YAAR,YAAmB,cAAc;AACtE,QAAM,aAAa,UACjB;AAAA,IACE;AAAA,MACE,eAAe,cAAU,kCAAW;AAAA,QAClC,QAAQ,QAAQ;AAAA,QAChB,yBAAyB;AAAA,QACzB,aAAa;AAAA,MACf,CAAC,CAAC;AAAA,MACF,GAAG,QAAQ;AAAA,IACb;AAAA,IACA,kBAAkB,OAAO;AAAA,EAC3B;AASF,QAAM,uBAAuB,CAC3B,WACA,eACuB;AAAA,IACvB,UAAU,WAAW,SAAS;AAAA,IAC9B,KAAK,CAAC,EAAE,KAAK,MAAM;AAEjB,UACE,cAAc,gBACd,uCAAW,SAAS,aACpB,EAAC,uCAAW,SAAS,cACrB;AACA,eAAO,GAAG,SAAS,MAAM,IAAI;AAAA,MAC/B;AACA,aAAO,GAAG,aAAa,OAAO,GAAG,IAAI;AAAA,IACvC;AAAA,IACA,SAAS;AAAA,IACT,OAAO,QAAQ;AAAA,EACjB;AAEA,QAAM,kBAAkB,CAAC,YAAiC;AAExD,UAAM,YAAY,QAAQ;AAE1B,QAAI,WAAW;AAEb,YAAM,qBAAqB,UAAU,SAAS,UAAU;AAExD,UAAI,oBAAoB;AAEtB,eAAO,IAAI,2DAAkC,4BAAW,eAAe;AAAA,UACrE,GAAG,qBAAqB,QAAQ,SAAS;AAAA,UACzC,gBAAgB;AAAA,QAClB,CAAC;AAAA,MACH,WAAW,UAAU,SAAS,UAAU,GAAG;AACzC,cAAM,IAAI;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAGA,WAAO,IAAI,2DAAkC,4BAAW,QAAQ;AAAA,MAC9D,GAAG,qBAAqB,MAAM;AAAA,MAC9B,gBAAgB;AAAA,IAClB,CAAC;AAAA,EACH;AAEA,QAAM,2BAA2B,CAAC,YAAsC;AAEtE,UAAM,YAAY,QAAQ;AAC1B,QAAI,CAAC,WAAW;AACd,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAIA,UAAM,qBAAqB,UAAU,SAAS,OAAO;AAErD,QAAI,oBAAoB;AAEtB,YAAM,QAAQ,IAAI;AAAA,QAChB,4BAAW;AAAA,QACX;AAAA,UACE,GAAG,qBAAqB,aAAa,SAAS;AAAA,UAC9C,gBAAgB;AAAA,QAClB;AAAA,MACF;AAGA,YAAM,uBAAuB,UAAU,QAAQ,YAAY,OAAO;AAGlE,YAAM,oBAAoB,IAAI;AAAA,QAC5B;AAAA,YACA,kCAAW;AAAA,UACT,QAAQ,QAAQ;AAAA,UAChB,yBAAyB;AAAA,UACzB,aAAa;AAAA,QACf,CAAC;AAAA,MACH;AAGA,YAAM,UAAU,OAAM,WAAU;AAC9B,YAAI,CAAC,OAAO,UAAU,CAAC,MAAM,QAAQ,OAAO,MAAM,GAAG;AACnD,gBAAM,IAAI,MAAM,2CAA2C;AAAA,QAC7D;AAGA,cAAM,WAAW,MAAM,kBAAkB;AAAA,UACvC,OAAO;AAAA,UACP,4BAAW;AAAA;AAAA,QACb;AAEA,cAAM,aAAa,SAAS,KAAK,IAAI,CAAC,SAAc,KAAK,SAAS;AAElE,eAAO;AAAA,UACL;AAAA,UACA,OAAO,SAAS,QACZ,EAAE,QAAQ,SAAS,MAAM,aAAa,IACtC;AAAA,UACJ,UAAU,EAAE,SAAS,CAAC,GAAG,MAAM,SAAS;AAAA,QAC1C;AAAA,MACF;AAEA,aAAO;AAAA,IACT,OAAO;AACL,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,QAAM,WAAW,CAAC,YAAiC,gBAAgB,OAAO;AAC1E,WAAS,YAAY;AACrB,WAAS,gBAAgB;AACzB,WAAS,aAAa,CAAC,YAAoB;AACzC,UAAM,IAAI,iCAAiB,EAAE,SAAS,WAAW,aAAa,CAAC;AAAA,EACjE;AACA,WAAS,qBAAqB;AAC9B,SAAO;AACT;AAEO,IAAM,UAAU,cAAc;","names":[]}
|
|
1
|
+
{"version":3,"sources":["../src/index.ts","../src/baseten-provider.ts","../src/version.ts"],"sourcesContent":["export type { BasetenChatModelId } from './baseten-chat-options';\nexport { baseten, createBaseten } from './baseten-provider';\nexport type {\n BasetenProvider,\n BasetenProviderSettings,\n BasetenErrorData,\n} from './baseten-provider';\nexport { VERSION } from './version';\n","import {\n OpenAICompatibleChatLanguageModel,\n OpenAICompatibleEmbeddingModel,\n ProviderErrorStructure,\n} from '@ai-sdk/openai-compatible';\nimport {\n EmbeddingModelV3,\n LanguageModelV3,\n NoSuchModelError,\n ProviderV3,\n} from '@ai-sdk/provider';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n withUserAgentSuffix,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\nimport { BasetenChatModelId } from './baseten-chat-options';\nimport { BasetenEmbeddingModelId } from './baseten-embedding-options';\nimport { PerformanceClient } from '@basetenlabs/performance-client';\nimport { VERSION } from './version';\n\nexport type BasetenErrorData = z.infer<typeof basetenErrorSchema>;\n\nconst basetenErrorSchema = z.object({\n error: z.string(),\n});\n\nconst basetenErrorStructure: ProviderErrorStructure<BasetenErrorData> = {\n errorSchema: basetenErrorSchema,\n errorToMessage: data => data.error,\n};\n\nexport interface BasetenProviderSettings {\n /**\n * Baseten API key. Default value is taken from the `BASETEN_API_KEY`\n * environment variable.\n */\n apiKey?: string;\n\n /**\n * Base URL for the Model APIs. Default: 'https://inference.baseten.co/v1'\n */\n baseURL?: string;\n\n /**\n * Model URL for custom models (chat or embeddings).\n * If not supplied, the default Model APIs will be used.\n */\n modelURL?: string;\n /**\n * Custom headers to include in the requests.\n */\n headers?: Record<string, string>;\n\n /**\n * Custom fetch implementation. You can use it as a middleware to intercept requests,\n * or to provide a custom fetch implementation for e.g. testing.\n */\n fetch?: FetchFunction;\n}\n\nexport interface BasetenProvider extends ProviderV3 {\n /**\nCreates a chat model for text generation. \n*/\n (modelId?: BasetenChatModelId): LanguageModelV3;\n\n /**\nCreates a chat model for text generation. \n*/\n chatModel(modelId?: BasetenChatModelId): LanguageModelV3;\n\n /**\nCreates a language model for text generation. Alias for chatModel.\n*/\n languageModel(modelId?: BasetenChatModelId): LanguageModelV3;\n\n /**\nCreates a text embedding model for text generation.\n*/\n textEmbeddingModel(\n modelId?: BasetenEmbeddingModelId,\n ): EmbeddingModelV3<string>;\n}\n\n// by default, we use the Model APIs\nconst defaultBaseURL = 'https://inference.baseten.co/v1';\n\nexport function createBaseten(\n options: BasetenProviderSettings = {},\n): BasetenProvider {\n const baseURL = withoutTrailingSlash(options.baseURL ?? defaultBaseURL);\n const getHeaders = () =>\n withUserAgentSuffix(\n {\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'BASETEN_API_KEY',\n description: 'Baseten API key',\n })}`,\n ...options.headers,\n },\n `ai-sdk/baseten/${VERSION}`,\n );\n\n interface CommonModelConfig {\n provider: string;\n url: ({ path }: { path: string }) => string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n }\n\n const getCommonModelConfig = (\n modelType: string,\n customURL?: string,\n ): CommonModelConfig => ({\n provider: `baseten.${modelType}`,\n url: ({ path }) => {\n // For embeddings with /sync URLs (but not /sync/v1), we need to add /v1\n if (\n modelType === 'embedding' &&\n customURL?.includes('/sync') &&\n !customURL?.includes('/sync/v1')\n ) {\n return `${customURL}/v1${path}`;\n }\n return `${customURL || baseURL}${path}`;\n },\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createChatModel = (modelId?: BasetenChatModelId) => {\n // Use modelURL if provided, otherwise use default Model APIs\n const customURL = options.modelURL;\n\n if (customURL) {\n // Check if this is a /sync/v1 endpoint (OpenAI-compatible) or /predict endpoint (custom)\n const isOpenAICompatible = customURL.includes('/sync/v1');\n\n if (isOpenAICompatible) {\n // For /sync/v1 endpoints, use standard OpenAI-compatible format\n return new OpenAICompatibleChatLanguageModel(modelId ?? 'placeholder', {\n ...getCommonModelConfig('chat', customURL),\n errorStructure: basetenErrorStructure,\n });\n } else if (customURL.includes('/predict')) {\n throw new Error(\n 'Not supported. You must use a /sync/v1 endpoint for chat models.',\n );\n }\n }\n\n // Use default OpenAI-compatible format for Model APIs\n return new OpenAICompatibleChatLanguageModel(modelId ?? 'chat', {\n ...getCommonModelConfig('chat'),\n errorStructure: basetenErrorStructure,\n });\n };\n\n const createTextEmbeddingModel = (modelId?: BasetenEmbeddingModelId) => {\n // Use modelURL if provided\n const customURL = options.modelURL;\n if (!customURL) {\n throw new Error(\n 'No model URL provided for embeddings. Please set modelURL option for embeddings.',\n );\n }\n\n // Check if this is a /sync or /sync/v1 endpoint (OpenAI-compatible)\n // We support both /sync and /sync/v1, stripping /v1 before passing to Performance Client, as Performance Client adds /v1 itself\n const isOpenAICompatible = customURL.includes('/sync');\n\n if (isOpenAICompatible) {\n // Create the model using OpenAICompatibleEmbeddingModel and override doEmbed\n const model = new OpenAICompatibleEmbeddingModel(\n modelId ?? 'embeddings',\n {\n ...getCommonModelConfig('embedding', customURL),\n errorStructure: basetenErrorStructure,\n },\n );\n\n // Strip /v1 from URL if present before passing to Performance Client to avoid double /v1\n const performanceClientURL = customURL.replace('/sync/v1', '/sync');\n\n // Initialize the B10 Performance Client once for reuse\n const performanceClient = new PerformanceClient(\n performanceClientURL,\n loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'BASETEN_API_KEY',\n description: 'Baseten API key',\n }),\n );\n\n // Override the doEmbed method to use the pre-created Performance Client\n model.doEmbed = async params => {\n if (!params.values || !Array.isArray(params.values)) {\n throw new Error('params.values must be an array of strings');\n }\n\n // Performance Client handles batching internally, so we don't need to limit in 128 here\n const response = await performanceClient.embed(\n params.values,\n modelId ?? 'embeddings', // model_id is for Model APIs, we don't use it here for dedicated\n );\n // Transform the response to match the expected format\n const embeddings = response.data.map((item: any) => item.embedding);\n\n return {\n embeddings: embeddings,\n usage: response.usage\n ? { tokens: response.usage.total_tokens }\n : undefined,\n response: { headers: {}, body: response },\n };\n };\n\n return model;\n } else {\n throw new Error(\n 'Not supported. You must use a /sync or /sync/v1 endpoint for embeddings.',\n );\n }\n };\n\n const provider = (modelId?: BasetenChatModelId) => createChatModel(modelId);\n provider.chatModel = createChatModel;\n provider.languageModel = createChatModel;\n provider.imageModel = (modelId: string) => {\n throw new NoSuchModelError({ modelId, modelType: 'imageModel' });\n };\n provider.textEmbeddingModel = createTextEmbeddingModel;\n return provider;\n}\n\nexport const baseten = createBaseten();\n","// Version string of this package injected at build time.\ndeclare const __PACKAGE_VERSION__: string | undefined;\nexport const VERSION: string =\n typeof __PACKAGE_VERSION__ !== 'undefined'\n ? __PACKAGE_VERSION__\n : '0.0.0-test';\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACAA,+BAIO;AACP,sBAKO;AACP,4BAKO;AACP,gBAAkB;AAGlB,gCAAkC;;;AClB3B,IAAM,UACX,OACI,iBACA;;;ADoBN,IAAM,qBAAqB,YAAE,OAAO;AAAA,EAClC,OAAO,YAAE,OAAO;AAClB,CAAC;AAED,IAAM,wBAAkE;AAAA,EACtE,aAAa;AAAA,EACb,gBAAgB,UAAQ,KAAK;AAC/B;AAwDA,IAAM,iBAAiB;AAEhB,SAAS,cACd,UAAmC,CAAC,GACnB;AA5FnB;AA6FE,QAAM,cAAU,6CAAqB,aAAQ,YAAR,YAAmB,cAAc;AACtE,QAAM,aAAa,UACjB;AAAA,IACE;AAAA,MACE,eAAe,cAAU,kCAAW;AAAA,QAClC,QAAQ,QAAQ;AAAA,QAChB,yBAAyB;AAAA,QACzB,aAAa;AAAA,MACf,CAAC,CAAC;AAAA,MACF,GAAG,QAAQ;AAAA,IACb;AAAA,IACA,kBAAkB,OAAO;AAAA,EAC3B;AASF,QAAM,uBAAuB,CAC3B,WACA,eACuB;AAAA,IACvB,UAAU,WAAW,SAAS;AAAA,IAC9B,KAAK,CAAC,EAAE,KAAK,MAAM;AAEjB,UACE,cAAc,gBACd,uCAAW,SAAS,aACpB,EAAC,uCAAW,SAAS,cACrB;AACA,eAAO,GAAG,SAAS,MAAM,IAAI;AAAA,MAC/B;AACA,aAAO,GAAG,aAAa,OAAO,GAAG,IAAI;AAAA,IACvC;AAAA,IACA,SAAS;AAAA,IACT,OAAO,QAAQ;AAAA,EACjB;AAEA,QAAM,kBAAkB,CAAC,YAAiC;AAExD,UAAM,YAAY,QAAQ;AAE1B,QAAI,WAAW;AAEb,YAAM,qBAAqB,UAAU,SAAS,UAAU;AAExD,UAAI,oBAAoB;AAEtB,eAAO,IAAI,2DAAkC,4BAAW,eAAe;AAAA,UACrE,GAAG,qBAAqB,QAAQ,SAAS;AAAA,UACzC,gBAAgB;AAAA,QAClB,CAAC;AAAA,MACH,WAAW,UAAU,SAAS,UAAU,GAAG;AACzC,cAAM,IAAI;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAGA,WAAO,IAAI,2DAAkC,4BAAW,QAAQ;AAAA,MAC9D,GAAG,qBAAqB,MAAM;AAAA,MAC9B,gBAAgB;AAAA,IAClB,CAAC;AAAA,EACH;AAEA,QAAM,2BAA2B,CAAC,YAAsC;AAEtE,UAAM,YAAY,QAAQ;AAC1B,QAAI,CAAC,WAAW;AACd,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAIA,UAAM,qBAAqB,UAAU,SAAS,OAAO;AAErD,QAAI,oBAAoB;AAEtB,YAAM,QAAQ,IAAI;AAAA,QAChB,4BAAW;AAAA,QACX;AAAA,UACE,GAAG,qBAAqB,aAAa,SAAS;AAAA,UAC9C,gBAAgB;AAAA,QAClB;AAAA,MACF;AAGA,YAAM,uBAAuB,UAAU,QAAQ,YAAY,OAAO;AAGlE,YAAM,oBAAoB,IAAI;AAAA,QAC5B;AAAA,YACA,kCAAW;AAAA,UACT,QAAQ,QAAQ;AAAA,UAChB,yBAAyB;AAAA,UACzB,aAAa;AAAA,QACf,CAAC;AAAA,MACH;AAGA,YAAM,UAAU,OAAM,WAAU;AAC9B,YAAI,CAAC,OAAO,UAAU,CAAC,MAAM,QAAQ,OAAO,MAAM,GAAG;AACnD,gBAAM,IAAI,MAAM,2CAA2C;AAAA,QAC7D;AAGA,cAAM,WAAW,MAAM,kBAAkB;AAAA,UACvC,OAAO;AAAA,UACP,4BAAW;AAAA;AAAA,QACb;AAEA,cAAM,aAAa,SAAS,KAAK,IAAI,CAAC,SAAc,KAAK,SAAS;AAElE,eAAO;AAAA,UACL;AAAA,UACA,OAAO,SAAS,QACZ,EAAE,QAAQ,SAAS,MAAM,aAAa,IACtC;AAAA,UACJ,UAAU,EAAE,SAAS,CAAC,GAAG,MAAM,SAAS;AAAA,QAC1C;AAAA,MACF;AAEA,aAAO;AAAA,IACT,OAAO;AACL,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,QAAM,WAAW,CAAC,YAAiC,gBAAgB,OAAO;AAC1E,WAAS,YAAY;AACrB,WAAS,gBAAgB;AACzB,WAAS,aAAa,CAAC,YAAoB;AACzC,UAAM,IAAI,iCAAiB,EAAE,SAAS,WAAW,aAAa,CAAC;AAAA,EACjE;AACA,WAAS,qBAAqB;AAC9B,SAAO;AACT;AAEO,IAAM,UAAU,cAAc;","names":[]}
|
package/dist/index.mjs
CHANGED
|
@@ -15,7 +15,7 @@ import { z } from "zod/v4";
|
|
|
15
15
|
import { PerformanceClient } from "@basetenlabs/performance-client";
|
|
16
16
|
|
|
17
17
|
// src/version.ts
|
|
18
|
-
var VERSION = true ? "1.0.0-beta.
|
|
18
|
+
var VERSION = true ? "1.0.0-beta.6" : "0.0.0-test";
|
|
19
19
|
|
|
20
20
|
// src/baseten-provider.ts
|
|
21
21
|
var basetenErrorSchema = z.object({
|
package/dist/index.mjs.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/baseten-provider.ts","../src/version.ts"],"sourcesContent":["import {\n OpenAICompatibleChatLanguageModel,\n OpenAICompatibleEmbeddingModel,\n ProviderErrorStructure,\n} from '@ai-sdk/openai-compatible';\nimport {\n EmbeddingModelV3,\n LanguageModelV2,\n NoSuchModelError,\n ProviderV3,\n} from '@ai-sdk/provider';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n withUserAgentSuffix,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\nimport { BasetenChatModelId } from './baseten-chat-options';\nimport { BasetenEmbeddingModelId } from './baseten-embedding-options';\nimport { PerformanceClient } from '@basetenlabs/performance-client';\nimport { VERSION } from './version';\n\nexport type BasetenErrorData = z.infer<typeof basetenErrorSchema>;\n\nconst basetenErrorSchema = z.object({\n error: z.string(),\n});\n\nconst basetenErrorStructure: ProviderErrorStructure<BasetenErrorData> = {\n errorSchema: basetenErrorSchema,\n errorToMessage: data => data.error,\n};\n\nexport interface BasetenProviderSettings {\n /**\n * Baseten API key. Default value is taken from the `BASETEN_API_KEY`\n * environment variable.\n */\n apiKey?: string;\n\n /**\n * Base URL for the Model APIs. Default: 'https://inference.baseten.co/v1'\n */\n baseURL?: string;\n\n /**\n * Model URL for custom models (chat or embeddings).\n * If not supplied, the default Model APIs will be used.\n */\n modelURL?: string;\n /**\n * Custom headers to include in the requests.\n */\n headers?: Record<string, string>;\n\n /**\n * Custom fetch implementation. You can use it as a middleware to intercept requests,\n * or to provide a custom fetch implementation for e.g. testing.\n */\n fetch?: FetchFunction;\n}\n\nexport interface BasetenProvider extends ProviderV3 {\n /**\nCreates a chat model for text generation. \n*/\n (modelId?: BasetenChatModelId): LanguageModelV2;\n\n /**\nCreates a chat model for text generation. \n*/\n chatModel(modelId?: BasetenChatModelId): LanguageModelV2;\n\n /**\nCreates a language model for text generation. Alias for chatModel.\n*/\n languageModel(modelId?: BasetenChatModelId): LanguageModelV2;\n\n /**\nCreates a text embedding model for text generation.\n*/\n textEmbeddingModel(\n modelId?: BasetenEmbeddingModelId,\n ): EmbeddingModelV3<string>;\n}\n\n// by default, we use the Model APIs\nconst defaultBaseURL = 'https://inference.baseten.co/v1';\n\nexport function createBaseten(\n options: BasetenProviderSettings = {},\n): BasetenProvider {\n const baseURL = withoutTrailingSlash(options.baseURL ?? defaultBaseURL);\n const getHeaders = () =>\n withUserAgentSuffix(\n {\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'BASETEN_API_KEY',\n description: 'Baseten API key',\n })}`,\n ...options.headers,\n },\n `ai-sdk/baseten/${VERSION}`,\n );\n\n interface CommonModelConfig {\n provider: string;\n url: ({ path }: { path: string }) => string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n }\n\n const getCommonModelConfig = (\n modelType: string,\n customURL?: string,\n ): CommonModelConfig => ({\n provider: `baseten.${modelType}`,\n url: ({ path }) => {\n // For embeddings with /sync URLs (but not /sync/v1), we need to add /v1\n if (\n modelType === 'embedding' &&\n customURL?.includes('/sync') &&\n !customURL?.includes('/sync/v1')\n ) {\n return `${customURL}/v1${path}`;\n }\n return `${customURL || baseURL}${path}`;\n },\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createChatModel = (modelId?: BasetenChatModelId) => {\n // Use modelURL if provided, otherwise use default Model APIs\n const customURL = options.modelURL;\n\n if (customURL) {\n // Check if this is a /sync/v1 endpoint (OpenAI-compatible) or /predict endpoint (custom)\n const isOpenAICompatible = customURL.includes('/sync/v1');\n\n if (isOpenAICompatible) {\n // For /sync/v1 endpoints, use standard OpenAI-compatible format\n return new OpenAICompatibleChatLanguageModel(modelId ?? 'placeholder', {\n ...getCommonModelConfig('chat', customURL),\n errorStructure: basetenErrorStructure,\n });\n } else if (customURL.includes('/predict')) {\n throw new Error(\n 'Not supported. You must use a /sync/v1 endpoint for chat models.',\n );\n }\n }\n\n // Use default OpenAI-compatible format for Model APIs\n return new OpenAICompatibleChatLanguageModel(modelId ?? 'chat', {\n ...getCommonModelConfig('chat'),\n errorStructure: basetenErrorStructure,\n });\n };\n\n const createTextEmbeddingModel = (modelId?: BasetenEmbeddingModelId) => {\n // Use modelURL if provided\n const customURL = options.modelURL;\n if (!customURL) {\n throw new Error(\n 'No model URL provided for embeddings. Please set modelURL option for embeddings.',\n );\n }\n\n // Check if this is a /sync or /sync/v1 endpoint (OpenAI-compatible)\n // We support both /sync and /sync/v1, stripping /v1 before passing to Performance Client, as Performance Client adds /v1 itself\n const isOpenAICompatible = customURL.includes('/sync');\n\n if (isOpenAICompatible) {\n // Create the model using OpenAICompatibleEmbeddingModel and override doEmbed\n const model = new OpenAICompatibleEmbeddingModel(\n modelId ?? 'embeddings',\n {\n ...getCommonModelConfig('embedding', customURL),\n errorStructure: basetenErrorStructure,\n },\n );\n\n // Strip /v1 from URL if present before passing to Performance Client to avoid double /v1\n const performanceClientURL = customURL.replace('/sync/v1', '/sync');\n\n // Initialize the B10 Performance Client once for reuse\n const performanceClient = new PerformanceClient(\n performanceClientURL,\n loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'BASETEN_API_KEY',\n description: 'Baseten API key',\n }),\n );\n\n // Override the doEmbed method to use the pre-created Performance Client\n model.doEmbed = async params => {\n if (!params.values || !Array.isArray(params.values)) {\n throw new Error('params.values must be an array of strings');\n }\n\n // Performance Client handles batching internally, so we don't need to limit in 128 here\n const response = await performanceClient.embed(\n params.values,\n modelId ?? 'embeddings', // model_id is for Model APIs, we don't use it here for dedicated\n );\n // Transform the response to match the expected format\n const embeddings = response.data.map((item: any) => item.embedding);\n\n return {\n embeddings: embeddings,\n usage: response.usage\n ? { tokens: response.usage.total_tokens }\n : undefined,\n response: { headers: {}, body: response },\n };\n };\n\n return model;\n } else {\n throw new Error(\n 'Not supported. You must use a /sync or /sync/v1 endpoint for embeddings.',\n );\n }\n };\n\n const provider = (modelId?: BasetenChatModelId) => createChatModel(modelId);\n provider.chatModel = createChatModel;\n provider.languageModel = createChatModel;\n provider.imageModel = (modelId: string) => {\n throw new NoSuchModelError({ modelId, modelType: 'imageModel' });\n };\n provider.textEmbeddingModel = createTextEmbeddingModel;\n return provider;\n}\n\nexport const baseten = createBaseten();\n","// Version string of this package injected at build time.\ndeclare const __PACKAGE_VERSION__: string | undefined;\nexport const VERSION: string =\n typeof __PACKAGE_VERSION__ !== 'undefined'\n ? __PACKAGE_VERSION__\n : '0.0.0-test';\n"],"mappings":";AAAA;AAAA,EACE;AAAA,EACA;AAAA,OAEK;AACP;AAAA,EAGE;AAAA,OAEK;AACP;AAAA,EAEE;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP,SAAS,SAAS;AAGlB,SAAS,yBAAyB;;;AClB3B,IAAM,UACX,OACI,iBACA;;;ADoBN,IAAM,qBAAqB,EAAE,OAAO;AAAA,EAClC,OAAO,EAAE,OAAO;AAClB,CAAC;AAED,IAAM,wBAAkE;AAAA,EACtE,aAAa;AAAA,EACb,gBAAgB,UAAQ,KAAK;AAC/B;AAwDA,IAAM,iBAAiB;AAEhB,SAAS,cACd,UAAmC,CAAC,GACnB;AA5FnB;AA6FE,QAAM,UAAU,sBAAqB,aAAQ,YAAR,YAAmB,cAAc;AACtE,QAAM,aAAa,MACjB;AAAA,IACE;AAAA,MACE,eAAe,UAAU,WAAW;AAAA,QAClC,QAAQ,QAAQ;AAAA,QAChB,yBAAyB;AAAA,QACzB,aAAa;AAAA,MACf,CAAC,CAAC;AAAA,MACF,GAAG,QAAQ;AAAA,IACb;AAAA,IACA,kBAAkB,OAAO;AAAA,EAC3B;AASF,QAAM,uBAAuB,CAC3B,WACA,eACuB;AAAA,IACvB,UAAU,WAAW,SAAS;AAAA,IAC9B,KAAK,CAAC,EAAE,KAAK,MAAM;AAEjB,UACE,cAAc,gBACd,uCAAW,SAAS,aACpB,EAAC,uCAAW,SAAS,cACrB;AACA,eAAO,GAAG,SAAS,MAAM,IAAI;AAAA,MAC/B;AACA,aAAO,GAAG,aAAa,OAAO,GAAG,IAAI;AAAA,IACvC;AAAA,IACA,SAAS;AAAA,IACT,OAAO,QAAQ;AAAA,EACjB;AAEA,QAAM,kBAAkB,CAAC,YAAiC;AAExD,UAAM,YAAY,QAAQ;AAE1B,QAAI,WAAW;AAEb,YAAM,qBAAqB,UAAU,SAAS,UAAU;AAExD,UAAI,oBAAoB;AAEtB,eAAO,IAAI,kCAAkC,4BAAW,eAAe;AAAA,UACrE,GAAG,qBAAqB,QAAQ,SAAS;AAAA,UACzC,gBAAgB;AAAA,QAClB,CAAC;AAAA,MACH,WAAW,UAAU,SAAS,UAAU,GAAG;AACzC,cAAM,IAAI;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAGA,WAAO,IAAI,kCAAkC,4BAAW,QAAQ;AAAA,MAC9D,GAAG,qBAAqB,MAAM;AAAA,MAC9B,gBAAgB;AAAA,IAClB,CAAC;AAAA,EACH;AAEA,QAAM,2BAA2B,CAAC,YAAsC;AAEtE,UAAM,YAAY,QAAQ;AAC1B,QAAI,CAAC,WAAW;AACd,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAIA,UAAM,qBAAqB,UAAU,SAAS,OAAO;AAErD,QAAI,oBAAoB;AAEtB,YAAM,QAAQ,IAAI;AAAA,QAChB,4BAAW;AAAA,QACX;AAAA,UACE,GAAG,qBAAqB,aAAa,SAAS;AAAA,UAC9C,gBAAgB;AAAA,QAClB;AAAA,MACF;AAGA,YAAM,uBAAuB,UAAU,QAAQ,YAAY,OAAO;AAGlE,YAAM,oBAAoB,IAAI;AAAA,QAC5B;AAAA,QACA,WAAW;AAAA,UACT,QAAQ,QAAQ;AAAA,UAChB,yBAAyB;AAAA,UACzB,aAAa;AAAA,QACf,CAAC;AAAA,MACH;AAGA,YAAM,UAAU,OAAM,WAAU;AAC9B,YAAI,CAAC,OAAO,UAAU,CAAC,MAAM,QAAQ,OAAO,MAAM,GAAG;AACnD,gBAAM,IAAI,MAAM,2CAA2C;AAAA,QAC7D;AAGA,cAAM,WAAW,MAAM,kBAAkB;AAAA,UACvC,OAAO;AAAA,UACP,4BAAW;AAAA;AAAA,QACb;AAEA,cAAM,aAAa,SAAS,KAAK,IAAI,CAAC,SAAc,KAAK,SAAS;AAElE,eAAO;AAAA,UACL;AAAA,UACA,OAAO,SAAS,QACZ,EAAE,QAAQ,SAAS,MAAM,aAAa,IACtC;AAAA,UACJ,UAAU,EAAE,SAAS,CAAC,GAAG,MAAM,SAAS;AAAA,QAC1C;AAAA,MACF;AAEA,aAAO;AAAA,IACT,OAAO;AACL,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,QAAM,WAAW,CAAC,YAAiC,gBAAgB,OAAO;AAC1E,WAAS,YAAY;AACrB,WAAS,gBAAgB;AACzB,WAAS,aAAa,CAAC,YAAoB;AACzC,UAAM,IAAI,iBAAiB,EAAE,SAAS,WAAW,aAAa,CAAC;AAAA,EACjE;AACA,WAAS,qBAAqB;AAC9B,SAAO;AACT;AAEO,IAAM,UAAU,cAAc;","names":[]}
|
|
1
|
+
{"version":3,"sources":["../src/baseten-provider.ts","../src/version.ts"],"sourcesContent":["import {\n OpenAICompatibleChatLanguageModel,\n OpenAICompatibleEmbeddingModel,\n ProviderErrorStructure,\n} from '@ai-sdk/openai-compatible';\nimport {\n EmbeddingModelV3,\n LanguageModelV3,\n NoSuchModelError,\n ProviderV3,\n} from '@ai-sdk/provider';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n withUserAgentSuffix,\n} from '@ai-sdk/provider-utils';\nimport { z } from 'zod/v4';\nimport { BasetenChatModelId } from './baseten-chat-options';\nimport { BasetenEmbeddingModelId } from './baseten-embedding-options';\nimport { PerformanceClient } from '@basetenlabs/performance-client';\nimport { VERSION } from './version';\n\nexport type BasetenErrorData = z.infer<typeof basetenErrorSchema>;\n\nconst basetenErrorSchema = z.object({\n error: z.string(),\n});\n\nconst basetenErrorStructure: ProviderErrorStructure<BasetenErrorData> = {\n errorSchema: basetenErrorSchema,\n errorToMessage: data => data.error,\n};\n\nexport interface BasetenProviderSettings {\n /**\n * Baseten API key. Default value is taken from the `BASETEN_API_KEY`\n * environment variable.\n */\n apiKey?: string;\n\n /**\n * Base URL for the Model APIs. Default: 'https://inference.baseten.co/v1'\n */\n baseURL?: string;\n\n /**\n * Model URL for custom models (chat or embeddings).\n * If not supplied, the default Model APIs will be used.\n */\n modelURL?: string;\n /**\n * Custom headers to include in the requests.\n */\n headers?: Record<string, string>;\n\n /**\n * Custom fetch implementation. You can use it as a middleware to intercept requests,\n * or to provide a custom fetch implementation for e.g. testing.\n */\n fetch?: FetchFunction;\n}\n\nexport interface BasetenProvider extends ProviderV3 {\n /**\nCreates a chat model for text generation. \n*/\n (modelId?: BasetenChatModelId): LanguageModelV3;\n\n /**\nCreates a chat model for text generation. \n*/\n chatModel(modelId?: BasetenChatModelId): LanguageModelV3;\n\n /**\nCreates a language model for text generation. Alias for chatModel.\n*/\n languageModel(modelId?: BasetenChatModelId): LanguageModelV3;\n\n /**\nCreates a text embedding model for text generation.\n*/\n textEmbeddingModel(\n modelId?: BasetenEmbeddingModelId,\n ): EmbeddingModelV3<string>;\n}\n\n// by default, we use the Model APIs\nconst defaultBaseURL = 'https://inference.baseten.co/v1';\n\nexport function createBaseten(\n options: BasetenProviderSettings = {},\n): BasetenProvider {\n const baseURL = withoutTrailingSlash(options.baseURL ?? defaultBaseURL);\n const getHeaders = () =>\n withUserAgentSuffix(\n {\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'BASETEN_API_KEY',\n description: 'Baseten API key',\n })}`,\n ...options.headers,\n },\n `ai-sdk/baseten/${VERSION}`,\n );\n\n interface CommonModelConfig {\n provider: string;\n url: ({ path }: { path: string }) => string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n }\n\n const getCommonModelConfig = (\n modelType: string,\n customURL?: string,\n ): CommonModelConfig => ({\n provider: `baseten.${modelType}`,\n url: ({ path }) => {\n // For embeddings with /sync URLs (but not /sync/v1), we need to add /v1\n if (\n modelType === 'embedding' &&\n customURL?.includes('/sync') &&\n !customURL?.includes('/sync/v1')\n ) {\n return `${customURL}/v1${path}`;\n }\n return `${customURL || baseURL}${path}`;\n },\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createChatModel = (modelId?: BasetenChatModelId) => {\n // Use modelURL if provided, otherwise use default Model APIs\n const customURL = options.modelURL;\n\n if (customURL) {\n // Check if this is a /sync/v1 endpoint (OpenAI-compatible) or /predict endpoint (custom)\n const isOpenAICompatible = customURL.includes('/sync/v1');\n\n if (isOpenAICompatible) {\n // For /sync/v1 endpoints, use standard OpenAI-compatible format\n return new OpenAICompatibleChatLanguageModel(modelId ?? 'placeholder', {\n ...getCommonModelConfig('chat', customURL),\n errorStructure: basetenErrorStructure,\n });\n } else if (customURL.includes('/predict')) {\n throw new Error(\n 'Not supported. You must use a /sync/v1 endpoint for chat models.',\n );\n }\n }\n\n // Use default OpenAI-compatible format for Model APIs\n return new OpenAICompatibleChatLanguageModel(modelId ?? 'chat', {\n ...getCommonModelConfig('chat'),\n errorStructure: basetenErrorStructure,\n });\n };\n\n const createTextEmbeddingModel = (modelId?: BasetenEmbeddingModelId) => {\n // Use modelURL if provided\n const customURL = options.modelURL;\n if (!customURL) {\n throw new Error(\n 'No model URL provided for embeddings. Please set modelURL option for embeddings.',\n );\n }\n\n // Check if this is a /sync or /sync/v1 endpoint (OpenAI-compatible)\n // We support both /sync and /sync/v1, stripping /v1 before passing to Performance Client, as Performance Client adds /v1 itself\n const isOpenAICompatible = customURL.includes('/sync');\n\n if (isOpenAICompatible) {\n // Create the model using OpenAICompatibleEmbeddingModel and override doEmbed\n const model = new OpenAICompatibleEmbeddingModel(\n modelId ?? 'embeddings',\n {\n ...getCommonModelConfig('embedding', customURL),\n errorStructure: basetenErrorStructure,\n },\n );\n\n // Strip /v1 from URL if present before passing to Performance Client to avoid double /v1\n const performanceClientURL = customURL.replace('/sync/v1', '/sync');\n\n // Initialize the B10 Performance Client once for reuse\n const performanceClient = new PerformanceClient(\n performanceClientURL,\n loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'BASETEN_API_KEY',\n description: 'Baseten API key',\n }),\n );\n\n // Override the doEmbed method to use the pre-created Performance Client\n model.doEmbed = async params => {\n if (!params.values || !Array.isArray(params.values)) {\n throw new Error('params.values must be an array of strings');\n }\n\n // Performance Client handles batching internally, so we don't need to limit in 128 here\n const response = await performanceClient.embed(\n params.values,\n modelId ?? 'embeddings', // model_id is for Model APIs, we don't use it here for dedicated\n );\n // Transform the response to match the expected format\n const embeddings = response.data.map((item: any) => item.embedding);\n\n return {\n embeddings: embeddings,\n usage: response.usage\n ? { tokens: response.usage.total_tokens }\n : undefined,\n response: { headers: {}, body: response },\n };\n };\n\n return model;\n } else {\n throw new Error(\n 'Not supported. You must use a /sync or /sync/v1 endpoint for embeddings.',\n );\n }\n };\n\n const provider = (modelId?: BasetenChatModelId) => createChatModel(modelId);\n provider.chatModel = createChatModel;\n provider.languageModel = createChatModel;\n provider.imageModel = (modelId: string) => {\n throw new NoSuchModelError({ modelId, modelType: 'imageModel' });\n };\n provider.textEmbeddingModel = createTextEmbeddingModel;\n return provider;\n}\n\nexport const baseten = createBaseten();\n","// Version string of this package injected at build time.\ndeclare const __PACKAGE_VERSION__: string | undefined;\nexport const VERSION: string =\n typeof __PACKAGE_VERSION__ !== 'undefined'\n ? __PACKAGE_VERSION__\n : '0.0.0-test';\n"],"mappings":";AAAA;AAAA,EACE;AAAA,EACA;AAAA,OAEK;AACP;AAAA,EAGE;AAAA,OAEK;AACP;AAAA,EAEE;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP,SAAS,SAAS;AAGlB,SAAS,yBAAyB;;;AClB3B,IAAM,UACX,OACI,iBACA;;;ADoBN,IAAM,qBAAqB,EAAE,OAAO;AAAA,EAClC,OAAO,EAAE,OAAO;AAClB,CAAC;AAED,IAAM,wBAAkE;AAAA,EACtE,aAAa;AAAA,EACb,gBAAgB,UAAQ,KAAK;AAC/B;AAwDA,IAAM,iBAAiB;AAEhB,SAAS,cACd,UAAmC,CAAC,GACnB;AA5FnB;AA6FE,QAAM,UAAU,sBAAqB,aAAQ,YAAR,YAAmB,cAAc;AACtE,QAAM,aAAa,MACjB;AAAA,IACE;AAAA,MACE,eAAe,UAAU,WAAW;AAAA,QAClC,QAAQ,QAAQ;AAAA,QAChB,yBAAyB;AAAA,QACzB,aAAa;AAAA,MACf,CAAC,CAAC;AAAA,MACF,GAAG,QAAQ;AAAA,IACb;AAAA,IACA,kBAAkB,OAAO;AAAA,EAC3B;AASF,QAAM,uBAAuB,CAC3B,WACA,eACuB;AAAA,IACvB,UAAU,WAAW,SAAS;AAAA,IAC9B,KAAK,CAAC,EAAE,KAAK,MAAM;AAEjB,UACE,cAAc,gBACd,uCAAW,SAAS,aACpB,EAAC,uCAAW,SAAS,cACrB;AACA,eAAO,GAAG,SAAS,MAAM,IAAI;AAAA,MAC/B;AACA,aAAO,GAAG,aAAa,OAAO,GAAG,IAAI;AAAA,IACvC;AAAA,IACA,SAAS;AAAA,IACT,OAAO,QAAQ;AAAA,EACjB;AAEA,QAAM,kBAAkB,CAAC,YAAiC;AAExD,UAAM,YAAY,QAAQ;AAE1B,QAAI,WAAW;AAEb,YAAM,qBAAqB,UAAU,SAAS,UAAU;AAExD,UAAI,oBAAoB;AAEtB,eAAO,IAAI,kCAAkC,4BAAW,eAAe;AAAA,UACrE,GAAG,qBAAqB,QAAQ,SAAS;AAAA,UACzC,gBAAgB;AAAA,QAClB,CAAC;AAAA,MACH,WAAW,UAAU,SAAS,UAAU,GAAG;AACzC,cAAM,IAAI;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAGA,WAAO,IAAI,kCAAkC,4BAAW,QAAQ;AAAA,MAC9D,GAAG,qBAAqB,MAAM;AAAA,MAC9B,gBAAgB;AAAA,IAClB,CAAC;AAAA,EACH;AAEA,QAAM,2BAA2B,CAAC,YAAsC;AAEtE,UAAM,YAAY,QAAQ;AAC1B,QAAI,CAAC,WAAW;AACd,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAIA,UAAM,qBAAqB,UAAU,SAAS,OAAO;AAErD,QAAI,oBAAoB;AAEtB,YAAM,QAAQ,IAAI;AAAA,QAChB,4BAAW;AAAA,QACX;AAAA,UACE,GAAG,qBAAqB,aAAa,SAAS;AAAA,UAC9C,gBAAgB;AAAA,QAClB;AAAA,MACF;AAGA,YAAM,uBAAuB,UAAU,QAAQ,YAAY,OAAO;AAGlE,YAAM,oBAAoB,IAAI;AAAA,QAC5B;AAAA,QACA,WAAW;AAAA,UACT,QAAQ,QAAQ;AAAA,UAChB,yBAAyB;AAAA,UACzB,aAAa;AAAA,QACf,CAAC;AAAA,MACH;AAGA,YAAM,UAAU,OAAM,WAAU;AAC9B,YAAI,CAAC,OAAO,UAAU,CAAC,MAAM,QAAQ,OAAO,MAAM,GAAG;AACnD,gBAAM,IAAI,MAAM,2CAA2C;AAAA,QAC7D;AAGA,cAAM,WAAW,MAAM,kBAAkB;AAAA,UACvC,OAAO;AAAA,UACP,4BAAW;AAAA;AAAA,QACb;AAEA,cAAM,aAAa,SAAS,KAAK,IAAI,CAAC,SAAc,KAAK,SAAS;AAElE,eAAO;AAAA,UACL;AAAA,UACA,OAAO,SAAS,QACZ,EAAE,QAAQ,SAAS,MAAM,aAAa,IACtC;AAAA,UACJ,UAAU,EAAE,SAAS,CAAC,GAAG,MAAM,SAAS;AAAA,QAC1C;AAAA,MACF;AAEA,aAAO;AAAA,IACT,OAAO;AACL,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,QAAM,WAAW,CAAC,YAAiC,gBAAgB,OAAO;AAC1E,WAAS,YAAY;AACrB,WAAS,gBAAgB;AACzB,WAAS,aAAa,CAAC,YAAoB;AACzC,UAAM,IAAI,iBAAiB,EAAE,SAAS,WAAW,aAAa,CAAC;AAAA,EACjE;AACA,WAAS,qBAAqB;AAC9B,SAAO;AACT;AAEO,IAAM,UAAU,cAAc;","names":[]}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@ai-sdk/baseten",
|
|
3
|
-
"version": "1.0.0-beta.
|
|
3
|
+
"version": "1.0.0-beta.6",
|
|
4
4
|
"license": "Apache-2.0",
|
|
5
5
|
"sideEffects": false,
|
|
6
6
|
"main": "./dist/index.js",
|
|
@@ -19,10 +19,10 @@
|
|
|
19
19
|
}
|
|
20
20
|
},
|
|
21
21
|
"dependencies": {
|
|
22
|
-
"@basetenlabs/performance-client": "^0.0.
|
|
23
|
-
"@ai-sdk/openai-compatible": "1.1.0-beta.
|
|
24
|
-
"@ai-sdk/provider": "2.1.0-beta.
|
|
25
|
-
"@ai-sdk/provider-utils": "3.1.0-beta.
|
|
22
|
+
"@basetenlabs/performance-client": "^0.0.10",
|
|
23
|
+
"@ai-sdk/openai-compatible": "1.1.0-beta.5",
|
|
24
|
+
"@ai-sdk/provider": "2.1.0-beta.3",
|
|
25
|
+
"@ai-sdk/provider-utils": "3.1.0-beta.5"
|
|
26
26
|
},
|
|
27
27
|
"devDependencies": {
|
|
28
28
|
"@types/node": "20.17.24",
|