ai-providers 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,480 @@
1
+ /**
2
+ * Unified AI Provider Registry
3
+ *
4
+ * Centralizes access to multiple AI providers via simple string identifiers.
5
+ *
6
+ * Smart routing:
7
+ * - openai/* models → OpenAI SDK (via gateway)
8
+ * - anthropic/* models → Anthropic SDK (via gateway)
9
+ * - google/* models → Google AI SDK (via gateway)
10
+ * - All other models → OpenRouter (via gateway)
11
+ *
12
+ * Supports simple aliases: 'opus' → anthropic/claude-opus-4.5
13
+ *
14
+ * @packageDocumentation
15
+ */
16
+
17
+ import { createProviderRegistry, type Provider, type ProviderRegistryProvider, type LanguageModel, type EmbeddingModel } from 'ai'
18
+
19
+ /**
20
+ * Available provider IDs
21
+ */
22
+ export type ProviderId = 'openai' | 'anthropic' | 'google' | 'openrouter' | 'cloudflare' | 'bedrock'
23
+
24
+ /**
25
+ * Providers that get direct SDK access (not via openrouter)
26
+ * These support special capabilities like MCP, structured outputs, etc.
27
+ * Re-exported from language-models for consistency.
28
+ */
29
+ export { DIRECT_PROVIDERS, type DirectProvider } from 'language-models'
30
+
31
+ /**
32
+ * Provider configuration options
33
+ */
34
+ export interface ProviderConfig {
35
+ /** Cloudflare AI Gateway URL (e.g., https://gateway.ai.cloudflare.com/v1/{account_id}/{gateway_name}) */
36
+ gatewayUrl?: string
37
+ /** AI Gateway auth token */
38
+ gatewayToken?: string
39
+
40
+ /** Use llm.do WebSocket transport instead of HTTP (persistent connection) */
41
+ useWebSocket?: boolean
42
+ /** llm.do WebSocket URL (default: wss://llm.do/ws) */
43
+ llmUrl?: string
44
+
45
+ /** OpenAI API key (fallback if no gateway) */
46
+ openaiApiKey?: string
47
+ /** Anthropic API key (fallback if no gateway) */
48
+ anthropicApiKey?: string
49
+ /** Google AI API key (fallback if no gateway) */
50
+ googleApiKey?: string
51
+ /** OpenRouter API key (fallback if no gateway) */
52
+ openrouterApiKey?: string
53
+ /** Cloudflare Account ID */
54
+ cloudflareAccountId?: string
55
+ /** Cloudflare API Token (fallback if no gateway) */
56
+ cloudflareApiToken?: string
57
+
58
+ /** Custom base URLs (overrides gateway) */
59
+ baseUrls?: Partial<Record<ProviderId, string>>
60
+ }
61
+
62
+ /**
63
+ * Cloudflare AI Gateway provider endpoint mapping
64
+ */
65
+ const GATEWAY_PROVIDER_PATHS: Record<ProviderId, string> = {
66
+ openai: 'openai',
67
+ anthropic: 'anthropic',
68
+ google: 'google-ai-studio',
69
+ openrouter: 'openrouter',
70
+ cloudflare: 'workers-ai',
71
+ bedrock: 'aws-bedrock'
72
+ }
73
+
74
+ /**
75
+ * Get provider configuration from environment variables
76
+ */
77
+ function getEnvConfig(): ProviderConfig {
78
+ if (typeof process === 'undefined') return {}
79
+
80
+ return {
81
+ // Cloudflare AI Gateway
82
+ gatewayUrl: process.env.AI_GATEWAY_URL,
83
+ gatewayToken: process.env.AI_GATEWAY_TOKEN || process.env.DO_TOKEN,
84
+
85
+ // llm.do WebSocket transport
86
+ useWebSocket: process.env.LLM_WEBSOCKET === 'true' || process.env.USE_LLM_WEBSOCKET === 'true',
87
+ llmUrl: process.env.LLM_URL,
88
+
89
+ // Individual provider keys (fallbacks)
90
+ openaiApiKey: process.env.OPENAI_API_KEY,
91
+ anthropicApiKey: process.env.ANTHROPIC_API_KEY,
92
+ googleApiKey: process.env.GOOGLE_GENERATIVE_AI_API_KEY || process.env.GOOGLE_AI_API_KEY,
93
+ openrouterApiKey: process.env.OPENROUTER_API_KEY,
94
+ cloudflareAccountId: process.env.CLOUDFLARE_ACCOUNT_ID,
95
+ cloudflareApiToken: process.env.CLOUDFLARE_API_TOKEN
96
+ }
97
+ }
98
+
99
+ /**
100
+ * Get the base URL for a provider, using Cloudflare AI Gateway if configured
101
+ */
102
+ function getBaseUrl(
103
+ providerId: ProviderId,
104
+ config: ProviderConfig,
105
+ defaultUrl?: string
106
+ ): string | undefined {
107
+ // Custom URL takes priority
108
+ if (config.baseUrls?.[providerId]) {
109
+ return config.baseUrls[providerId]
110
+ }
111
+
112
+ // Use Cloudflare AI Gateway if configured
113
+ if (config.gatewayUrl) {
114
+ const gatewayPath = GATEWAY_PROVIDER_PATHS[providerId]
115
+ return `${config.gatewayUrl}/${gatewayPath}`
116
+ }
117
+
118
+ return defaultUrl
119
+ }
120
+
121
+ // Lazy-loaded WebSocket fetch (to avoid circular imports)
122
+ let llmFetchInstance: typeof fetch | null = null
123
+
124
+ /**
125
+ * Create a custom fetch that handles gateway authentication
126
+ * Supports both HTTP (Cloudflare AI Gateway) and WebSocket (llm.do) transports
127
+ */
128
+ function createGatewayFetch(config: ProviderConfig): typeof fetch | undefined {
129
+ // Use llm.do WebSocket transport if enabled
130
+ if (config.useWebSocket && config.gatewayToken) {
131
+ // Return a lazy-initializing fetch that creates the WebSocket connection on first use
132
+ return async (url, init) => {
133
+ if (!llmFetchInstance) {
134
+ const { createLLMFetch } = await import('./llm.do.js')
135
+ llmFetchInstance = createLLMFetch({
136
+ url: config.llmUrl,
137
+ token: config.gatewayToken!
138
+ })
139
+ }
140
+ return llmFetchInstance(url, init)
141
+ }
142
+ }
143
+
144
+ // Use HTTP gateway
145
+ if (!config.gatewayUrl || !config.gatewayToken) {
146
+ return undefined
147
+ }
148
+
149
+ return async (url, init) => {
150
+ const headers = new Headers(init?.headers)
151
+ // Remove SDK's API key headers - gateway will inject from its secrets
152
+ headers.delete('x-api-key')
153
+ headers.delete('authorization')
154
+ headers.delete('x-goog-api-key')
155
+ // Add gateway authentication
156
+ headers.set('cf-aig-authorization', `Bearer ${config.gatewayToken}`)
157
+ return fetch(url, { ...init, headers })
158
+ }
159
+ }
160
+
161
+ /**
162
+ * Check if using gateway with secrets (token configured)
163
+ */
164
+ function useGatewaySecrets(config: ProviderConfig): boolean {
165
+ return !!(config.gatewayUrl && config.gatewayToken)
166
+ }
167
+
168
+ /**
169
+ * Get API key - when using gateway secrets, use a placeholder
170
+ */
171
+ function getApiKey(config: ProviderConfig, providerApiKey?: string): string | undefined {
172
+ if (useGatewaySecrets(config)) {
173
+ return 'gateway' // Placeholder - will be stripped by gatewayFetch
174
+ }
175
+ return providerApiKey
176
+ }
177
+
178
+ /**
179
+ * Create OpenAI provider
180
+ */
181
+ async function createOpenAIProvider(config: ProviderConfig): Promise<unknown> {
182
+ const { createOpenAI } = await import('@ai-sdk/openai')
183
+ return createOpenAI({
184
+ apiKey: getApiKey(config, config.openaiApiKey),
185
+ baseURL: getBaseUrl('openai', config),
186
+ fetch: createGatewayFetch(config),
187
+ })
188
+ }
189
+
190
+ /**
191
+ * Create Anthropic provider
192
+ */
193
+ async function createAnthropicProvider(config: ProviderConfig): Promise<unknown> {
194
+ const { createAnthropic } = await import('@ai-sdk/anthropic')
195
+ return createAnthropic({
196
+ apiKey: getApiKey(config, config.anthropicApiKey),
197
+ baseURL: getBaseUrl('anthropic', config),
198
+ fetch: createGatewayFetch(config),
199
+ })
200
+ }
201
+
202
+ /**
203
+ * Create Google AI provider
204
+ */
205
+ async function createGoogleProvider(config: ProviderConfig): Promise<unknown> {
206
+ const { createGoogleGenerativeAI } = await import('@ai-sdk/google')
207
+ return createGoogleGenerativeAI({
208
+ apiKey: getApiKey(config, config.googleApiKey),
209
+ baseURL: getBaseUrl('google', config),
210
+ fetch: createGatewayFetch(config),
211
+ })
212
+ }
213
+
214
+ /**
215
+ * Create OpenRouter provider (OpenAI-compatible)
216
+ */
217
+ async function createOpenRouterProvider(config: ProviderConfig): Promise<unknown> {
218
+ const { createOpenAI } = await import('@ai-sdk/openai')
219
+ return createOpenAI({
220
+ apiKey: getApiKey(config, config.openrouterApiKey),
221
+ baseURL: getBaseUrl('openrouter', config, 'https://openrouter.ai/api/v1'),
222
+ fetch: createGatewayFetch(config),
223
+ })
224
+ }
225
+
226
+ /**
227
+ * Create Amazon Bedrock provider
228
+ * Supports two authentication modes:
229
+ * 1. Bearer token (AWS_BEARER_TOKEN_BEDROCK) - simpler, recommended, bypasses gateway
230
+ * 2. SigV4 signing (AWS_ACCESS_KEY_ID/SECRET) - standard AWS auth, can use gateway
231
+ */
232
+ async function createBedrockProvider(config: ProviderConfig): Promise<unknown> {
233
+ const { createAmazonBedrock } = await import('@ai-sdk/amazon-bedrock')
234
+
235
+ const bearerToken = process.env.AWS_BEARER_TOKEN_BEDROCK
236
+
237
+ // When using bearer token, go directly to AWS (skip gateway)
238
+ // Gateway doesn't support bearer token auth for Bedrock
239
+ if (bearerToken) {
240
+ return createAmazonBedrock({
241
+ region: process.env.AWS_REGION || 'us-east-1',
242
+ apiKey: bearerToken,
243
+ })
244
+ }
245
+
246
+ // For SigV4 auth, can optionally route through gateway
247
+ const baseURL = getBaseUrl('bedrock', config)
248
+ return createAmazonBedrock({
249
+ ...(baseURL && { baseURL }),
250
+ region: process.env.AWS_REGION || 'us-east-1',
251
+ })
252
+ }
253
+
254
+ /**
255
+ * Create Cloudflare Workers AI provider
256
+ */
257
+ async function createCloudflareProvider(config: ProviderConfig): Promise<unknown> {
258
+ const { cloudflare } = await import('./providers/cloudflare.js')
259
+
260
+ return {
261
+ languageModel: (modelId: string) => {
262
+ throw new Error(`Cloudflare language models not yet supported via registry. Use embedding models like: cloudflare:@cf/baai/bge-m3`)
263
+ },
264
+ textEmbeddingModel: (modelId: string) => {
265
+ return cloudflare.embedding(modelId, {
266
+ accountId: config.cloudflareAccountId,
267
+ apiToken: getApiKey(config, config.cloudflareApiToken),
268
+ baseUrl: getBaseUrl('cloudflare', config)
269
+ })
270
+ }
271
+ } as unknown
272
+ }
273
+
274
+ /**
275
+ * Provider factories map
276
+ */
277
+ const providerFactories: Record<ProviderId, (config: ProviderConfig) => Promise<unknown>> = {
278
+ openai: createOpenAIProvider,
279
+ anthropic: createAnthropicProvider,
280
+ google: createGoogleProvider,
281
+ openrouter: createOpenRouterProvider,
282
+ cloudflare: createCloudflareProvider,
283
+ bedrock: createBedrockProvider
284
+ }
285
+
286
+ /**
287
+ * Create a unified provider registry with all configured providers
288
+ *
289
+ * @example
290
+ * ```ts
291
+ * import { createRegistry } from 'ai-providers'
292
+ * import { generateText, embed } from 'ai'
293
+ *
294
+ * // With Cloudflare AI Gateway (recommended)
295
+ * // Set AI_GATEWAY_URL and AI_GATEWAY_TOKEN env vars
296
+ *
297
+ * const registry = await createRegistry()
298
+ *
299
+ * // Use any provider with simple string IDs
300
+ * const { text } = await generateText({
301
+ * model: registry.languageModel('openai:gpt-4o'),
302
+ * prompt: 'Hello!'
303
+ * })
304
+ *
305
+ * const { text: claude } = await generateText({
306
+ * model: registry.languageModel('anthropic:claude-3-5-sonnet-latest'),
307
+ * prompt: 'Hello!'
308
+ * })
309
+ *
310
+ * const { embedding } = await embed({
311
+ * model: registry.textEmbeddingModel('cloudflare:@cf/baai/bge-m3'),
312
+ * value: 'Hello!'
313
+ * })
314
+ * ```
315
+ */
316
+ export async function createRegistry(
317
+ config: ProviderConfig = {},
318
+ options: { providers?: ProviderId[] } = {}
319
+ ): Promise<ProviderRegistryProvider> {
320
+ const mergedConfig = { ...getEnvConfig(), ...config }
321
+ const providerIds = options.providers || (['openai', 'anthropic', 'google', 'openrouter', 'cloudflare', 'bedrock'] as ProviderId[])
322
+
323
+ const providers: Record<string, unknown> = {}
324
+
325
+ // Load providers in parallel
326
+ await Promise.all(
327
+ providerIds.map(async (id) => {
328
+ try {
329
+ providers[id] = await providerFactories[id](mergedConfig)
330
+ } catch (error) {
331
+ // Provider SDK not installed - skip silently
332
+ if (process.env.DEBUG) {
333
+ console.warn(`Provider ${id} not available:`, error)
334
+ }
335
+ }
336
+ })
337
+ )
338
+
339
+ return createProviderRegistry(providers as Record<string, any>)
340
+ }
341
+
342
+ // Default registry management
343
+ let defaultRegistry: ProviderRegistryProvider | null = null
344
+ let defaultRegistryPromise: Promise<ProviderRegistryProvider> | null = null
345
+
346
+ /**
347
+ * Get or create the default provider registry
348
+ */
349
+ export async function getRegistry(): Promise<ProviderRegistryProvider> {
350
+ if (defaultRegistry) return defaultRegistry
351
+
352
+ if (!defaultRegistryPromise) {
353
+ defaultRegistryPromise = createRegistry().then(registry => {
354
+ defaultRegistry = registry
355
+ return registry
356
+ })
357
+ }
358
+
359
+ return defaultRegistryPromise
360
+ }
361
+
362
+ /**
363
+ * Configure the default registry with custom settings
364
+ */
365
+ export async function configureRegistry(config: ProviderConfig): Promise<void> {
366
+ defaultRegistry = await createRegistry(config)
367
+ defaultRegistryPromise = null
368
+ }
369
+
370
+ /**
371
+ * Parse a model ID into provider and model name
372
+ *
373
+ * @example
374
+ * parseModelId('openai/gpt-4o') // { provider: 'openai', model: 'gpt-4o' }
375
+ * parseModelId('meta-llama/llama-3.3-70b') // { provider: 'meta-llama', model: 'llama-3.3-70b' }
376
+ */
377
+ function parseModelId(id: string): { provider: string; model: string } {
378
+ const slashIndex = id.indexOf('/')
379
+ if (slashIndex === -1) {
380
+ return { provider: 'openrouter', model: id }
381
+ }
382
+ return {
383
+ provider: id.substring(0, slashIndex),
384
+ model: id.substring(slashIndex + 1)
385
+ }
386
+ }
387
+
388
+ /**
389
+ * Get a language model with smart routing
390
+ *
391
+ * Resolves aliases and routes to the appropriate provider:
392
+ * - openai/* → OpenAI SDK (via gateway) when provider_model_id is available
393
+ * - anthropic/* → Anthropic SDK (via gateway) when provider_model_id is available
394
+ * - google/* → Google AI SDK (via gateway) when provider_model_id is available
395
+ * - All others → OpenRouter (via gateway)
396
+ *
397
+ * Direct routing to native SDKs enables provider-specific features like:
398
+ * - Anthropic: MCP (Model Context Protocol), extended thinking
399
+ * - OpenAI: Function calling, JSON mode, vision
400
+ * - Google: Grounding, code execution
401
+ *
402
+ * @example
403
+ * ```ts
404
+ * import { model } from 'ai-providers'
405
+ *
406
+ * // Simple aliases
407
+ * const opus = await model('opus') // → anthropic:claude-opus-4-5-20251101
408
+ * const gpt = await model('gpt-4o') // → openai:gpt-4o
409
+ * const llama = await model('llama-70b') // → openrouter:meta-llama/llama-3.3-70b-instruct
410
+ *
411
+ * // Full IDs also work
412
+ * const claude = await model('anthropic/claude-sonnet-4.5')
413
+ * const mistral = await model('mistralai/mistral-large-2411')
414
+ * ```
415
+ */
416
+ export async function model(id: string): Promise<LanguageModel> {
417
+ const registry = await getRegistry()
418
+
419
+ // Check for direct provider:model format (e.g., bedrock:us.anthropic.claude-*)
420
+ // This bypasses language-models resolution and routes directly to the provider
421
+ const colonIndex = id.indexOf(':')
422
+ if (colonIndex > 0) {
423
+ const provider = id.substring(0, colonIndex)
424
+ // Known providers that support direct routing
425
+ if (['bedrock', 'openai', 'anthropic', 'google', 'openrouter'].includes(provider)) {
426
+ return registry.languageModel(id as `${string}:${string}`)
427
+ }
428
+ }
429
+
430
+ // Try to resolve with provider routing info
431
+ try {
432
+ const { resolveWithProvider, DIRECT_PROVIDERS } = await import('language-models')
433
+ const resolved = resolveWithProvider(id)
434
+
435
+ // Extract expected provider from the model ID (e.g., 'anthropic' from 'anthropic/claude-sonnet-4.5')
436
+ const slashIndex = resolved.id.indexOf('/')
437
+ const expectedProvider = slashIndex > 0 ? resolved.id.substring(0, slashIndex) : null
438
+
439
+ // Use direct routing if:
440
+ // 1. Provider supports direct SDK access (openai, anthropic, google)
441
+ // 2. We have the provider's native model ID
442
+ // 3. The data's provider matches the expected provider from the model ID
443
+ // (OpenRouter may return different top providers like google-vertex for anthropic models)
444
+ const dataProvider = resolved.model?.provider
445
+ const providerMatches = expectedProvider && dataProvider === expectedProvider
446
+
447
+ if (
448
+ resolved.supportsDirectRouting &&
449
+ resolved.providerModelId &&
450
+ providerMatches &&
451
+ (DIRECT_PROVIDERS as readonly string[]).includes(expectedProvider)
452
+ ) {
453
+ // Route directly to provider SDK with native model ID
454
+ const modelSpec = `${expectedProvider}:${resolved.providerModelId}` as `${string}:${string}`
455
+ return registry.languageModel(modelSpec)
456
+ }
457
+
458
+ // Fall back to OpenRouter for all other models
459
+ return registry.languageModel(`openrouter:${resolved.id}`)
460
+ } catch {
461
+ // language-models not available, route through OpenRouter as-is
462
+ return registry.languageModel(`openrouter:${id}`)
463
+ }
464
+ }
465
+
466
+ /**
467
+ * Shorthand to get an embedding model from the default registry
468
+ *
469
+ * @example
470
+ * ```ts
471
+ * import { embeddingModel } from 'ai-providers'
472
+ *
473
+ * const openaiEmbed = await embeddingModel('openai:text-embedding-3-small')
474
+ * const cfEmbed = await embeddingModel('cloudflare:@cf/baai/bge-m3')
475
+ * ```
476
+ */
477
+ export async function embeddingModel(id: string): Promise<EmbeddingModel<string>> {
478
+ const registry = await getRegistry()
479
+ return registry.textEmbeddingModel(id as `${string}:${string}`)
480
+ }
package/tsconfig.json ADDED
@@ -0,0 +1,9 @@
1
+ {
2
+ "extends": "../../tsconfig.base.json",
3
+ "compilerOptions": {
4
+ "rootDir": "src",
5
+ "outDir": "dist"
6
+ },
7
+ "include": ["src/**/*"],
8
+ "exclude": ["node_modules", "dist", "**/*.test.ts", "**/*.spec.ts"]
9
+ }
@@ -0,0 +1,36 @@
1
+ import { defineConfig } from 'vitest/config'
2
+ import { config } from 'dotenv'
3
+ import { resolve } from 'path'
4
+ import { existsSync } from 'fs'
5
+
6
+ // Load .env from current directory, parent directories, and root
7
+ // This supports primitives being used as a submodule
8
+ const envPaths = [
9
+ resolve(process.cwd(), '.env'),
10
+ resolve(process.cwd(), '..', '.env'),
11
+ resolve(process.cwd(), '..', '..', '.env'),
12
+ resolve(process.cwd(), '..', '..', '..', '.env'),
13
+ ]
14
+
15
+ for (const envPath of envPaths) {
16
+ if (existsSync(envPath)) {
17
+ config({ path: envPath })
18
+ }
19
+ }
20
+
21
+ export default defineConfig({
22
+ test: {
23
+ globals: false,
24
+ environment: 'node',
25
+ include: ['src/**/*.test.ts', 'test/**/*.test.ts'],
26
+ testTimeout: 60000, // AI calls can take time
27
+ hookTimeout: 30000,
28
+ // Run tests sequentially to avoid rate limiting
29
+ pool: 'forks',
30
+ poolOptions: {
31
+ forks: {
32
+ singleFork: true,
33
+ },
34
+ },
35
+ },
36
+ })
@@ -1,50 +0,0 @@
1
- import { LanguageModelV1 } from '@ai-sdk/provider';
2
- import { Model } from 'language-models';
3
- type ProviderOptions = {
4
- /**
5
- * If true, our provider will try to fix the schema of an output
6
- * using gemini-2.0-lite, taking the output of the model and
7
- * rewriting it to match the schema.
8
- */
9
- allowFixingSchema?: boolean;
10
- /**
11
- * Tools to be used by the model
12
- */
13
- tools?: Record<string, string | number | boolean | Record<string, unknown>>;
14
- /**
15
- * Priorities for model selection
16
- */
17
- priorities?: string[];
18
- /**
19
- * Enable reasoning capability
20
- */
21
- reasoning?: boolean;
22
- /**
23
- * Maximum price constraint
24
- */
25
- maxPrice?: number;
26
- };
27
- type LLMProviderConfig = {};
28
- export declare const createLLMProvider: (config: LLMProviderConfig) => (model: string, options?: ProviderOptions) => LLMProvider;
29
- export declare const model: (model: string, options?: ProviderOptions) => LLMProvider;
30
- /**
31
- * Returns an array of LLMProvider instances for the given model identifiers
32
- * @param modelIdentifiers Comma-separated string of model identifiers
33
- * @param options Provider options
34
- * @returns Array of LLMProvider instances
35
- */
36
- export declare const models: (modelIdentifiers: string, options?: ProviderOptions) => LLMProvider[];
37
- declare class LLMProvider implements LanguageModelV1 {
38
- modelId: string;
39
- options: ProviderOptions;
40
- private augments?;
41
- readonly specificationVersion = "v1";
42
- readonly resolvedModel: Model;
43
- constructor(modelId: string, options: ProviderOptions, augments?: Record<string, any> | undefined);
44
- get provider(): string;
45
- get supportsImageUrls(): boolean;
46
- get defaultObjectGenerationMode(): "json" | "tool";
47
- doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
48
- doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
49
- }
50
- export {};