@providerprotocol/ai 0.0.20 → 0.0.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/anthropic/index.d.ts +184 -14
- package/dist/anthropic/index.js +210 -82
- package/dist/anthropic/index.js.map +1 -1
- package/dist/{chunk-U3FZWV4U.js → chunk-EDENPF3E.js} +5 -2
- package/dist/{chunk-U3FZWV4U.js.map → chunk-EDENPF3E.js.map} +1 -1
- package/dist/{chunk-UMKWXGO3.js → chunk-M4BMM5IB.js} +86 -2
- package/dist/chunk-M4BMM5IB.js.map +1 -0
- package/dist/{chunk-P5IRTEM5.js → chunk-Y3GBJNA2.js} +2 -2
- package/dist/{chunk-U4JJC2YX.js → chunk-Z4ILICF5.js} +2 -2
- package/dist/chunk-Z4ILICF5.js.map +1 -0
- package/dist/google/index.d.ts +16 -19
- package/dist/google/index.js +14 -36
- package/dist/google/index.js.map +1 -1
- package/dist/http/index.d.ts +2 -2
- package/dist/http/index.js +3 -3
- package/dist/index.d.ts +101 -38
- package/dist/index.js +69 -43
- package/dist/index.js.map +1 -1
- package/dist/ollama/index.d.ts +14 -16
- package/dist/ollama/index.js +5 -7
- package/dist/ollama/index.js.map +1 -1
- package/dist/openai/index.d.ts +25 -133
- package/dist/openai/index.js +27 -81
- package/dist/openai/index.js.map +1 -1
- package/dist/openrouter/index.d.ts +28 -53
- package/dist/openrouter/index.js +20 -43
- package/dist/openrouter/index.js.map +1 -1
- package/dist/provider-DGQHYE6I.d.ts +1319 -0
- package/dist/proxy/index.d.ts +2 -3
- package/dist/proxy/index.js +5 -7
- package/dist/proxy/index.js.map +1 -1
- package/dist/{retry-DR7YRJDz.d.ts → retry-Pcs3hnbu.d.ts} +2 -2
- package/dist/{stream-DRHy6q1a.d.ts → stream-Di9acos2.d.ts} +1 -1
- package/dist/xai/index.d.ts +16 -88
- package/dist/xai/index.js +30 -58
- package/dist/xai/index.js.map +1 -1
- package/package.json +4 -1
- package/dist/chunk-MSR5P65T.js +0 -39
- package/dist/chunk-MSR5P65T.js.map +0 -1
- package/dist/chunk-U4JJC2YX.js.map +0 -1
- package/dist/chunk-UMKWXGO3.js.map +0 -1
- package/dist/content-DEl3z_W2.d.ts +0 -276
- package/dist/image-Dhq-Yuq4.d.ts +0 -456
- package/dist/provider-BBMBZuGn.d.ts +0 -570
- /package/dist/{chunk-P5IRTEM5.js.map → chunk-Y3GBJNA2.js.map} +0 -0
|
@@ -142,7 +142,90 @@ function isToolResultMessage(msg) {
|
|
|
142
142
|
return msg.type === "tool_result";
|
|
143
143
|
}
|
|
144
144
|
|
|
145
|
+
// src/core/provider-handlers.ts
|
|
146
|
+
function isHandlerResolver(value) {
|
|
147
|
+
return value !== void 0 && "handlers" in value && "getMode" in value;
|
|
148
|
+
}
|
|
149
|
+
var providerHandlers = /* @__PURE__ */ new WeakMap();
|
|
150
|
+
function registerProviderHandlers(provider, handlers) {
|
|
151
|
+
providerHandlers.set(provider, handlers);
|
|
152
|
+
}
|
|
153
|
+
function getProviderHandlers(provider) {
|
|
154
|
+
return providerHandlers.get(provider);
|
|
155
|
+
}
|
|
156
|
+
function resolveLLMHandler(provider, options) {
|
|
157
|
+
const handlers = getProviderHandlers(provider);
|
|
158
|
+
const resolver = handlers?.llmResolver;
|
|
159
|
+
if (resolver) {
|
|
160
|
+
const mode = resolver.getMode(options);
|
|
161
|
+
return resolver.handlers[mode] ?? handlers?.llm;
|
|
162
|
+
}
|
|
163
|
+
return handlers?.llm;
|
|
164
|
+
}
|
|
165
|
+
function resolveEmbeddingHandler(provider) {
|
|
166
|
+
const handlers = getProviderHandlers(provider);
|
|
167
|
+
return handlers?.embedding;
|
|
168
|
+
}
|
|
169
|
+
function resolveImageHandler(provider) {
|
|
170
|
+
const handlers = getProviderHandlers(provider);
|
|
171
|
+
return handlers?.image;
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
// src/core/provider.ts
|
|
175
|
+
function createProvider(options) {
|
|
176
|
+
const llmInput = options.handlers.llm;
|
|
177
|
+
const hasResolver = isHandlerResolver(llmInput);
|
|
178
|
+
const defaultLLMHandler = hasResolver ? llmInput.handlers[llmInput.defaultMode] : llmInput;
|
|
179
|
+
if (hasResolver && !defaultLLMHandler) {
|
|
180
|
+
throw new Error(
|
|
181
|
+
`Provider '${options.name}' LLM resolver defaultMode '${llmInput.defaultMode}' has no handler`
|
|
182
|
+
);
|
|
183
|
+
}
|
|
184
|
+
const fn = function(modelId, modelOptions) {
|
|
185
|
+
if (options.createModelReference) {
|
|
186
|
+
return options.createModelReference(modelId, modelOptions, provider);
|
|
187
|
+
}
|
|
188
|
+
return { modelId, provider, options: modelOptions };
|
|
189
|
+
};
|
|
190
|
+
Object.defineProperties(fn, {
|
|
191
|
+
name: {
|
|
192
|
+
value: options.name,
|
|
193
|
+
writable: false,
|
|
194
|
+
configurable: true
|
|
195
|
+
},
|
|
196
|
+
version: {
|
|
197
|
+
value: options.version,
|
|
198
|
+
writable: false,
|
|
199
|
+
configurable: true
|
|
200
|
+
}
|
|
201
|
+
});
|
|
202
|
+
const provider = fn;
|
|
203
|
+
if (hasResolver) {
|
|
204
|
+
for (const handler of Object.values(llmInput.handlers)) {
|
|
205
|
+
handler._setProvider?.(provider);
|
|
206
|
+
}
|
|
207
|
+
} else if (defaultLLMHandler?._setProvider) {
|
|
208
|
+
defaultLLMHandler._setProvider(provider);
|
|
209
|
+
}
|
|
210
|
+
if (options.handlers.embedding?._setProvider) {
|
|
211
|
+
options.handlers.embedding._setProvider(provider);
|
|
212
|
+
}
|
|
213
|
+
if (options.handlers.image?._setProvider) {
|
|
214
|
+
options.handlers.image._setProvider(provider);
|
|
215
|
+
}
|
|
216
|
+
registerProviderHandlers(provider, {
|
|
217
|
+
llm: defaultLLMHandler,
|
|
218
|
+
embedding: options.handlers.embedding,
|
|
219
|
+
image: options.handlers.image,
|
|
220
|
+
...hasResolver ? { llmResolver: llmInput } : {}
|
|
221
|
+
});
|
|
222
|
+
return provider;
|
|
223
|
+
}
|
|
224
|
+
|
|
145
225
|
export {
|
|
226
|
+
resolveLLMHandler,
|
|
227
|
+
resolveEmbeddingHandler,
|
|
228
|
+
resolveImageHandler,
|
|
146
229
|
generateId,
|
|
147
230
|
Message,
|
|
148
231
|
UserMessage,
|
|
@@ -150,6 +233,7 @@ export {
|
|
|
150
233
|
ToolResultMessage,
|
|
151
234
|
isUserMessage,
|
|
152
235
|
isAssistantMessage,
|
|
153
|
-
isToolResultMessage
|
|
236
|
+
isToolResultMessage,
|
|
237
|
+
createProvider
|
|
154
238
|
};
|
|
155
|
-
//# sourceMappingURL=chunk-
|
|
239
|
+
//# sourceMappingURL=chunk-M4BMM5IB.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/utils/id.ts","../src/types/messages.ts","../src/core/provider-handlers.ts","../src/core/provider.ts"],"sourcesContent":["/**\n * @fileoverview ID generation utilities for the Universal Provider Protocol.\n *\n * Provides functions for generating unique identifiers used throughout UPP,\n * including message IDs, tool call IDs, and other internal references.\n *\n * @module utils/id\n */\n\n/**\n * Generates a unique UUID v4 identifier.\n *\n * Uses the native `crypto.randomUUID()` when available for cryptographically\n * secure randomness. Falls back to a Math.random-based implementation for\n * environments without Web Crypto API support.\n *\n * @returns A UUID v4 string in the format `xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx`\n *\n * @example\n * ```typescript\n * const messageId = generateId();\n * // => \"f47ac10b-58cc-4372-a567-0e02b2c3d479\"\n * ```\n */\nexport function generateId(): string {\n if (typeof crypto !== 'undefined' && crypto.randomUUID) {\n return crypto.randomUUID();\n }\n\n return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => {\n const r = (Math.random() * 16) | 0;\n const v = c === 'x' ? r : (r & 0x3) | 0x8;\n return v.toString(16);\n });\n}\n\n/**\n * Generates a short alphanumeric identifier.\n *\n * Creates a 12-character random string using alphanumeric characters (a-z, A-Z, 0-9).\n * Useful for tool call IDs and other cases where a full UUID is not required.\n *\n * @param prefix - Optional prefix to prepend to the generated ID\n * @returns A string containing the prefix followed by 12 random alphanumeric characters\n *\n * @example\n * ```typescript\n * const toolCallId = generateShortId('call_');\n * // => \"call_aB3xY9mK2pQr\"\n *\n * const simpleId = generateShortId();\n * // => \"Tz4wN8vL1sHj\"\n * ```\n */\nexport function generateShortId(prefix = ''): string {\n const chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789';\n let result = prefix;\n for (let i = 0; i < 12; i++) {\n result += chars.charAt(Math.floor(Math.random() * chars.length));\n }\n return result;\n}\n","/**\n * @fileoverview Message types for conversation history.\n *\n * Defines the message classes used to represent conversation turns\n * between users and assistants, including support for multimodal\n * content and tool calls.\n *\n * @module types/messages\n */\n\nimport { generateId } from '../utils/id.ts';\nimport type {\n ContentBlock,\n TextBlock,\n ImageBlock,\n AudioBlock,\n VideoBlock,\n UserContent,\n AssistantContent,\n} from './content.ts';\nimport type { ToolCall, ToolResult } from './tool.ts';\n\n/**\n * Message serialized to JSON format.\n * Picks common fields from Message, converts timestamp to string.\n */\nexport type MessageJSON = Pick<Message, 'id' | 'type' | 'metadata'> & {\n timestamp: string;\n content: ContentBlock[];\n toolCalls?: ToolCall[];\n results?: ToolResult[];\n};\n\n/**\n * Message type discriminator.\n *\n * Used to distinguish between different message types in a conversation.\n */\nexport type MessageType = 'user' | 'assistant' | 'tool_result';\n\n/**\n * Provider-namespaced metadata for messages.\n *\n * Each provider can attach its own metadata under its namespace,\n * preventing conflicts between different providers.\n *\n * @example\n * ```typescript\n * const metadata: MessageMetadata = {\n * openai: { model: 'gpt-4', finishReason: 'stop' },\n * anthropic: { model: 'claude-3', stopReason: 'end_turn' }\n * };\n * ```\n */\nexport interface MessageMetadata {\n [provider: string]: Record<string, unknown> | undefined;\n}\n\n/**\n * Options for constructing messages.\n */\nexport interface MessageOptions {\n /** Custom message ID (auto-generated if not provided) */\n id?: string;\n\n /** Provider-specific metadata */\n metadata?: MessageMetadata;\n}\n\n/**\n * Abstract base class for all message types.\n *\n * Provides common functionality for user, assistant, and tool result\n * messages, including content accessors and metadata handling.\n *\n * @example\n * ```typescript\n * // Access text content from any message\n * const text = message.text;\n *\n * // Access images\n * const images = message.images;\n * ```\n */\nexport abstract class Message {\n /** Unique message identifier */\n readonly id: string;\n\n /** Timestamp when the message was created */\n readonly timestamp: Date;\n\n /** Provider-specific metadata, namespaced by provider name */\n readonly metadata?: MessageMetadata;\n\n /** Message type discriminator (implemented by subclasses) */\n abstract readonly type: MessageType;\n\n /**\n * Returns the content blocks for this message.\n * Implemented by subclasses to provide type-specific content.\n */\n protected abstract getContent(): ContentBlock[];\n\n /**\n * Creates a new message instance.\n *\n * @param options - Optional message ID and metadata\n */\n constructor(options?: MessageOptions) {\n this.id = options?.id ?? generateId();\n this.timestamp = new Date();\n this.metadata = options?.metadata;\n }\n\n /**\n * Concatenated text content from all text blocks.\n * Blocks are joined with double newlines.\n */\n get text(): string {\n return this.getContent()\n .filter((block): block is TextBlock => block.type === 'text')\n .map((block) => block.text)\n .join('\\n\\n');\n }\n\n /**\n * All image content blocks in this message.\n */\n get images(): ImageBlock[] {\n return this.getContent().filter((block): block is ImageBlock => block.type === 'image');\n }\n\n /**\n * All audio content blocks in this message.\n */\n get audio(): AudioBlock[] {\n return this.getContent().filter((block): block is AudioBlock => block.type === 'audio');\n }\n\n /**\n * All video content blocks in this message.\n */\n get video(): VideoBlock[] {\n return this.getContent().filter((block): block is VideoBlock => block.type === 'video');\n }\n}\n\n/**\n * User input message.\n *\n * Represents a message from the user, which can contain text and/or\n * multimodal content like images, audio, or video.\n *\n * @example\n * ```typescript\n * // Simple text message\n * const msg = new UserMessage('Hello, world!');\n *\n * // Multimodal message\n * const msg = new UserMessage([\n * { type: 'text', text: 'What is in this image?' },\n * { type: 'image', source: { type: 'url', url: '...' }, mimeType: 'image/png' }\n * ]);\n * ```\n */\nexport class UserMessage extends Message {\n /** Message type discriminator */\n readonly type = 'user' as const;\n\n /** Content blocks in this message */\n readonly content: UserContent[];\n\n /**\n * Creates a new user message.\n *\n * @param content - String (converted to TextBlock) or array of content blocks\n * @param options - Optional message ID and metadata\n */\n constructor(content: string | UserContent[], options?: MessageOptions) {\n super(options);\n if (typeof content === 'string') {\n this.content = [{ type: 'text', text: content }];\n } else {\n this.content = content;\n }\n }\n\n protected getContent(): ContentBlock[] {\n return this.content;\n }\n}\n\n/**\n * Assistant response message.\n *\n * Represents a response from the AI assistant, which may contain\n * text, media content, and/or tool call requests.\n *\n * @example\n * ```typescript\n * // Simple text response\n * const msg = new AssistantMessage('Hello! How can I help?');\n *\n * // Response with tool calls\n * const msg = new AssistantMessage(\n * 'Let me check the weather...',\n * [{ toolCallId: 'call_1', toolName: 'get_weather', arguments: { location: 'NYC' } }]\n * );\n * ```\n */\nexport class AssistantMessage extends Message {\n /** Message type discriminator */\n readonly type = 'assistant' as const;\n\n /** Content blocks in this message */\n readonly content: AssistantContent[];\n\n /** Tool calls requested by the model (if any) */\n readonly toolCalls?: ToolCall[];\n\n /**\n * Creates a new assistant message.\n *\n * @param content - String (converted to TextBlock) or array of content blocks\n * @param toolCalls - Tool calls requested by the model\n * @param options - Optional message ID and metadata\n */\n constructor(\n content: string | AssistantContent[],\n toolCalls?: ToolCall[],\n options?: MessageOptions\n ) {\n super(options);\n if (typeof content === 'string') {\n this.content = [{ type: 'text', text: content }];\n } else {\n this.content = content;\n }\n this.toolCalls = toolCalls;\n }\n\n protected getContent(): ContentBlock[] {\n return this.content;\n }\n\n /**\n * Whether this message contains tool call requests.\n */\n get hasToolCalls(): boolean {\n return this.toolCalls !== undefined && this.toolCalls.length > 0;\n }\n}\n\n/**\n * Tool execution result message.\n *\n * Contains the results of executing one or more tool calls,\n * sent back to the model for further processing.\n *\n * @example\n * ```typescript\n * const msg = new ToolResultMessage([\n * { toolCallId: 'call_1', result: { temperature: 72, conditions: 'sunny' } },\n * { toolCallId: 'call_2', result: 'File not found', isError: true }\n * ]);\n * ```\n */\nexport class ToolResultMessage extends Message {\n /** Message type discriminator */\n readonly type = 'tool_result' as const;\n\n /** Results from tool executions */\n readonly results: ToolResult[];\n\n /**\n * Creates a new tool result message.\n *\n * @param results - Array of tool execution results\n * @param options - Optional message ID and metadata\n */\n constructor(results: ToolResult[], options?: MessageOptions) {\n super(options);\n this.results = results;\n }\n\n protected getContent(): ContentBlock[] {\n return this.results.map((result) => ({\n type: 'text' as const,\n text:\n typeof result.result === 'string'\n ? result.result\n : JSON.stringify(result.result),\n }));\n }\n}\n\n/**\n * Type guard for UserMessage.\n *\n * @param msg - The message to check\n * @returns True if the message is a UserMessage\n *\n * @example\n * ```typescript\n * if (isUserMessage(msg)) {\n * console.log('User said:', msg.text);\n * }\n * ```\n */\nexport function isUserMessage(msg: Message): msg is UserMessage {\n return msg.type === 'user';\n}\n\n/**\n * Type guard for AssistantMessage.\n *\n * @param msg - The message to check\n * @returns True if the message is an AssistantMessage\n *\n * @example\n * ```typescript\n * if (isAssistantMessage(msg)) {\n * console.log('Assistant said:', msg.text);\n * if (msg.hasToolCalls) {\n * console.log('Tool calls:', msg.toolCalls);\n * }\n * }\n * ```\n */\nexport function isAssistantMessage(msg: Message): msg is AssistantMessage {\n return msg.type === 'assistant';\n}\n\n/**\n * Type guard for ToolResultMessage.\n *\n * @param msg - The message to check\n * @returns True if the message is a ToolResultMessage\n *\n * @example\n * ```typescript\n * if (isToolResultMessage(msg)) {\n * for (const result of msg.results) {\n * console.log(`Tool ${result.toolCallId}:`, result.result);\n * }\n * }\n * ```\n */\nexport function isToolResultMessage(msg: Message): msg is ToolResultMessage {\n return msg.type === 'tool_result';\n}\n","/**\n * @fileoverview Internal handler registry and resolver utilities.\n *\n * @module core/provider-handlers\n */\n\nimport type {\n ProviderIdentity,\n LLMHandler,\n EmbeddingHandler,\n ImageHandler,\n} from '../types/provider.ts';\n\n/**\n * Resolver for dynamically selecting LLM handlers based on model options.\n *\n * Used by providers that support multiple API modes (e.g., OpenAI with responses/completions).\n * The resolver eliminates shared mutable state by storing the mode on the ModelReference\n * and resolving the correct handler at request time.\n *\n * @typeParam TOptions - Provider-specific options type\n */\nexport interface LLMHandlerResolver<TOptions = unknown> {\n /** Map of mode identifiers to their corresponding LLM handlers */\n handlers: Record<string, LLMHandler>;\n /** The default mode when options don't specify one */\n defaultMode: string;\n /** Function to extract the mode from provider options */\n getMode: (options: TOptions | undefined) => string;\n}\n\n/**\n * Type guard to check if a value is an LLMHandlerResolver.\n */\nexport function isHandlerResolver<TOptions>(\n value: LLMHandler | LLMHandlerResolver<TOptions> | undefined\n): value is LLMHandlerResolver<TOptions> {\n return value !== undefined && 'handlers' in value && 'getMode' in value;\n}\n\ntype ProviderHandlers<TOptions = unknown> = {\n llm?: LLMHandler;\n embedding?: EmbeddingHandler;\n image?: ImageHandler;\n llmResolver?: LLMHandlerResolver<TOptions>;\n};\n\nconst providerHandlers = new WeakMap<object, ProviderHandlers<unknown>>();\n\n/**\n * Registers handler implementations for a provider.\n */\nexport function registerProviderHandlers<TOptions>(\n provider: ProviderIdentity,\n handlers: ProviderHandlers<TOptions>\n): void {\n providerHandlers.set(provider as object, handlers as ProviderHandlers<unknown>);\n}\n\nfunction getProviderHandlers<TOptions>(\n provider: ProviderIdentity\n): ProviderHandlers<TOptions> | undefined {\n return providerHandlers.get(provider as object) as ProviderHandlers<TOptions> | undefined;\n}\n\n/**\n * Resolves the correct LLM handler based on model reference options.\n *\n * For providers with multiple LLM handlers (e.g., OpenAI with responses/completions APIs),\n * this function determines which handler to use based on the options stored on the\n * ModelReference. This eliminates race conditions from shared mutable state.\n *\n * For providers with a single LLM handler, this simply returns that handler.\n *\n * @typeParam TOptions - Provider-specific options type\n * @param provider - The provider to resolve the handler from\n * @param options - The options from the ModelReference\n * @returns The resolved LLM handler, or undefined if LLM is not supported\n *\n * @internal\n */\nexport function resolveLLMHandler<TOptions = unknown>(\n provider: ProviderIdentity,\n options: TOptions | undefined\n): LLMHandler | undefined {\n const handlers = getProviderHandlers(provider);\n const resolver = handlers?.llmResolver;\n\n if (resolver) {\n const mode = resolver.getMode(options);\n return resolver.handlers[mode] ?? handlers?.llm;\n }\n\n return handlers?.llm;\n}\n\n/**\n * Resolves the embedding handler for a provider, if supported.\n *\n * @internal\n */\nexport function resolveEmbeddingHandler<TParams = unknown>(\n provider: ProviderIdentity\n): EmbeddingHandler<TParams> | undefined {\n const handlers = getProviderHandlers(provider);\n return handlers?.embedding as EmbeddingHandler<TParams> | undefined;\n}\n\n/**\n * Resolves the image handler for a provider, if supported.\n *\n * @internal\n */\nexport function resolveImageHandler<TParams = unknown>(\n provider: ProviderIdentity\n): ImageHandler<TParams> | undefined {\n const handlers = getProviderHandlers(provider);\n return handlers?.image as ImageHandler<TParams> | undefined;\n}\n","/**\n * @fileoverview Base provider interface and factory for the Universal Provider Protocol.\n *\n * This module provides the foundation for creating AI providers that conform to the\n * UPP specification. Providers are callable functions that create model references\n * and register internal handlers for LLM, embedding, and image modalities.\n *\n * @module core/provider\n */\n\nimport type {\n Provider,\n ModelReference,\n LLMHandler,\n EmbeddingHandler,\n ImageHandler,\n LLMProvider,\n EmbeddingProvider,\n ImageProvider,\n} from '../types/provider.ts';\nimport type { LLMHandlerResolver } from './provider-handlers.ts';\nimport { isHandlerResolver, registerProviderHandlers } from './provider-handlers.ts';\n\n\n/**\n * Configuration options for creating a new provider.\n *\n * @typeParam TOptions - Provider-specific options type\n *\n * @example\n * ```typescript\n * // Simple provider with single handler\n * const options: CreateProviderOptions = {\n * name: 'my-provider',\n * version: '1.0.0',\n * handlers: {\n * llm: createLLMHandler(),\n * embedding: createEmbeddingHandler(),\n * },\n * };\n *\n * // Provider with multiple LLM handlers (API modes)\n * const options: CreateProviderOptions<OpenAIOptions> = {\n * name: 'openai',\n * version: '1.0.0',\n * handlers: {\n * llm: {\n * handlers: { responses: handler1, completions: handler2 },\n * defaultMode: 'responses',\n * getMode: (opts) => opts?.api ?? 'responses',\n * },\n * },\n * };\n * ```\n */\nexport interface CreateProviderOptions<TOptions = unknown> {\n /** Unique identifier for the provider */\n name: string;\n /** Semantic version string for the provider implementation */\n version: string;\n /** Handlers for supported modalities (LLM, embedding, image generation) */\n handlers: {\n /** Handler for language model completions, or resolver for multi-handler providers */\n llm?: LLMHandler | LLMHandlerResolver<TOptions>;\n /** Handler for text embeddings */\n embedding?: EmbeddingHandler;\n /** Handler for image generation */\n image?: ImageHandler;\n };\n /**\n * Custom function to create model references from options.\n * Use this to map provider options to providerConfig (e.g., betas to headers).\n */\n createModelReference?: (\n modelId: string,\n options: TOptions | undefined,\n provider: Provider<TOptions>\n ) => ModelReference<TOptions>;\n}\n\n\n/**\n * Creates a provider factory function with registered modality handlers.\n *\n * The returned provider is a callable function that creates model references\n * when invoked with a model ID. It exposes `name` and `version` metadata.\n *\n * @typeParam TOptions - Provider-specific options type (defaults to unknown)\n * @param options - Provider configuration including name, version, and handlers\n * @returns A callable Provider with handlers registered internally\n *\n * @example\n * ```typescript\n * // Create a basic provider\n * const anthropic = createProvider({\n * name: 'anthropic',\n * version: '1.0.0',\n * handlers: { llm: createLLMHandler() },\n * });\n *\n * // Use the provider to create a model reference\n * const model = anthropic('claude-sonnet-4-20250514');\n *\n * // Provider with custom options type\n * interface MyOptions { apiVersion?: 'v1' | 'v2' }\n * const myProvider = createProvider<MyOptions>({\n * name: 'my-provider',\n * version: '1.0.0',\n * handlers: { llm: handler },\n * });\n *\n * // Provider with multiple LLM handlers (API modes)\n * const openai = createProvider<OpenAIOptions>({\n * name: 'openai',\n * version: '1.0.0',\n * handlers: {\n * llm: {\n * handlers: { responses: responsesHandler, completions: completionsHandler },\n * defaultMode: 'responses',\n * getMode: (opts) => opts?.api ?? 'responses',\n * },\n * },\n * });\n * ```\n */\nexport function createProvider<TOptions = unknown>(\n options: CreateProviderOptions<TOptions>\n): Provider<TOptions> {\n // Resolve the default LLM handler for capabilities/bind\n const llmInput = options.handlers.llm;\n const hasResolver = isHandlerResolver<TOptions>(llmInput);\n const defaultLLMHandler = hasResolver ? llmInput.handlers[llmInput.defaultMode] : llmInput;\n\n if (hasResolver && !defaultLLMHandler) {\n throw new Error(\n `Provider '${options.name}' LLM resolver defaultMode '${llmInput.defaultMode}' has no handler`\n );\n }\n\n // Create the factory function\n const fn = function (modelId: string, modelOptions?: TOptions): ModelReference<TOptions> {\n if (options.createModelReference) {\n return options.createModelReference(modelId, modelOptions, provider);\n }\n // Default: store options on the reference for handler resolution\n return { modelId, provider, options: modelOptions };\n };\n\n Object.defineProperties(fn, {\n name: {\n value: options.name,\n writable: false,\n configurable: true,\n },\n version: {\n value: options.version,\n writable: false,\n configurable: true,\n },\n });\n\n const provider = fn as Provider<TOptions>;\n\n // If there's a resolver, set provider on all handlers\n if (hasResolver) {\n for (const handler of Object.values(llmInput.handlers)) {\n handler._setProvider?.(provider as unknown as LLMProvider);\n }\n } else if (defaultLLMHandler?._setProvider) {\n defaultLLMHandler._setProvider(provider as unknown as LLMProvider);\n }\n\n if (options.handlers.embedding?._setProvider) {\n options.handlers.embedding._setProvider(provider as unknown as EmbeddingProvider);\n }\n if (options.handlers.image?._setProvider) {\n options.handlers.image._setProvider(provider as unknown as ImageProvider);\n }\n\n registerProviderHandlers(provider, {\n llm: defaultLLMHandler,\n embedding: options.handlers.embedding,\n image: options.handlers.image,\n ...(hasResolver ? { llmResolver: llmInput } : {}),\n });\n\n return provider;\n}\n"],"mappings":";AAwBO,SAAS,aAAqB;AACnC,MAAI,OAAO,WAAW,eAAe,OAAO,YAAY;AACtD,WAAO,OAAO,WAAW;AAAA,EAC3B;AAEA,SAAO,uCAAuC,QAAQ,SAAS,CAAC,MAAM;AACpE,UAAM,IAAK,KAAK,OAAO,IAAI,KAAM;AACjC,UAAM,IAAI,MAAM,MAAM,IAAK,IAAI,IAAO;AACtC,WAAO,EAAE,SAAS,EAAE;AAAA,EACtB,CAAC;AACH;;;ACkDO,IAAe,UAAf,MAAuB;AAAA;AAAA,EAEnB;AAAA;AAAA,EAGA;AAAA;AAAA,EAGA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAgBT,YAAY,SAA0B;AACpC,SAAK,KAAK,SAAS,MAAM,WAAW;AACpC,SAAK,YAAY,oBAAI,KAAK;AAC1B,SAAK,WAAW,SAAS;AAAA,EAC3B;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,IAAI,OAAe;AACjB,WAAO,KAAK,WAAW,EACpB,OAAO,CAAC,UAA8B,MAAM,SAAS,MAAM,EAC3D,IAAI,CAAC,UAAU,MAAM,IAAI,EACzB,KAAK,MAAM;AAAA,EAChB;AAAA;AAAA;AAAA;AAAA,EAKA,IAAI,SAAuB;AACzB,WAAO,KAAK,WAAW,EAAE,OAAO,CAAC,UAA+B,MAAM,SAAS,OAAO;AAAA,EACxF;AAAA;AAAA;AAAA;AAAA,EAKA,IAAI,QAAsB;AACxB,WAAO,KAAK,WAAW,EAAE,OAAO,CAAC,UAA+B,MAAM,SAAS,OAAO;AAAA,EACxF;AAAA;AAAA;AAAA;AAAA,EAKA,IAAI,QAAsB;AACxB,WAAO,KAAK,WAAW,EAAE,OAAO,CAAC,UAA+B,MAAM,SAAS,OAAO;AAAA,EACxF;AACF;AAoBO,IAAM,cAAN,cAA0B,QAAQ;AAAA;AAAA,EAE9B,OAAO;AAAA;AAAA,EAGP;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQT,YAAY,SAAiC,SAA0B;AACrE,UAAM,OAAO;AACb,QAAI,OAAO,YAAY,UAAU;AAC/B,WAAK,UAAU,CAAC,EAAE,MAAM,QAAQ,MAAM,QAAQ,CAAC;AAAA,IACjD,OAAO;AACL,WAAK,UAAU;AAAA,IACjB;AAAA,EACF;AAAA,EAEU,aAA6B;AACrC,WAAO,KAAK;AAAA,EACd;AACF;AAoBO,IAAM,mBAAN,cAA+B,QAAQ;AAAA;AAAA,EAEnC,OAAO;AAAA;AAAA,EAGP;AAAA;AAAA,EAGA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAST,YACE,SACA,WACA,SACA;AACA,UAAM,OAAO;AACb,QAAI,OAAO,YAAY,UAAU;AAC/B,WAAK,UAAU,CAAC,EAAE,MAAM,QAAQ,MAAM,QAAQ,CAAC;AAAA,IACjD,OAAO;AACL,WAAK,UAAU;AAAA,IACjB;AACA,SAAK,YAAY;AAAA,EACnB;AAAA,EAEU,aAA6B;AACrC,WAAO,KAAK;AAAA,EACd;AAAA;AAAA;AAAA;AAAA,EAKA,IAAI,eAAwB;AAC1B,WAAO,KAAK,cAAc,UAAa,KAAK,UAAU,SAAS;AAAA,EACjE;AACF;AAgBO,IAAM,oBAAN,cAAgC,QAAQ;AAAA;AAAA,EAEpC,OAAO;AAAA;AAAA,EAGP;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQT,YAAY,SAAuB,SAA0B;AAC3D,UAAM,OAAO;AACb,SAAK,UAAU;AAAA,EACjB;AAAA,EAEU,aAA6B;AACrC,WAAO,KAAK,QAAQ,IAAI,CAAC,YAAY;AAAA,MACnC,MAAM;AAAA,MACN,MACE,OAAO,OAAO,WAAW,WACrB,OAAO,SACP,KAAK,UAAU,OAAO,MAAM;AAAA,IACpC,EAAE;AAAA,EACJ;AACF;AAeO,SAAS,cAAc,KAAkC;AAC9D,SAAO,IAAI,SAAS;AACtB;AAkBO,SAAS,mBAAmB,KAAuC;AACxE,SAAO,IAAI,SAAS;AACtB;AAiBO,SAAS,oBAAoB,KAAwC;AAC1E,SAAO,IAAI,SAAS;AACtB;;;AC5TO,SAAS,kBACd,OACuC;AACvC,SAAO,UAAU,UAAa,cAAc,SAAS,aAAa;AACpE;AASA,IAAM,mBAAmB,oBAAI,QAA2C;AAKjE,SAAS,yBACd,UACA,UACM;AACN,mBAAiB,IAAI,UAAoB,QAAqC;AAChF;AAEA,SAAS,oBACP,UACwC;AACxC,SAAO,iBAAiB,IAAI,QAAkB;AAChD;AAkBO,SAAS,kBACd,UACA,SACwB;AACxB,QAAM,WAAW,oBAAoB,QAAQ;AAC7C,QAAM,WAAW,UAAU;AAE3B,MAAI,UAAU;AACZ,UAAM,OAAO,SAAS,QAAQ,OAAO;AACrC,WAAO,SAAS,SAAS,IAAI,KAAK,UAAU;AAAA,EAC9C;AAEA,SAAO,UAAU;AACnB;AAOO,SAAS,wBACd,UACuC;AACvC,QAAM,WAAW,oBAAoB,QAAQ;AAC7C,SAAO,UAAU;AACnB;AAOO,SAAS,oBACd,UACmC;AACnC,QAAM,WAAW,oBAAoB,QAAQ;AAC7C,SAAO,UAAU;AACnB;;;ACOO,SAAS,eACd,SACoB;AAEpB,QAAM,WAAW,QAAQ,SAAS;AAClC,QAAM,cAAc,kBAA4B,QAAQ;AACxD,QAAM,oBAAoB,cAAc,SAAS,SAAS,SAAS,WAAW,IAAI;AAElF,MAAI,eAAe,CAAC,mBAAmB;AACrC,UAAM,IAAI;AAAA,MACR,aAAa,QAAQ,IAAI,+BAA+B,SAAS,WAAW;AAAA,IAC9E;AAAA,EACF;AAGA,QAAM,KAAK,SAAU,SAAiB,cAAmD;AACvF,QAAI,QAAQ,sBAAsB;AAChC,aAAO,QAAQ,qBAAqB,SAAS,cAAc,QAAQ;AAAA,IACrE;AAEA,WAAO,EAAE,SAAS,UAAU,SAAS,aAAa;AAAA,EACpD;AAEA,SAAO,iBAAiB,IAAI;AAAA,IAC1B,MAAM;AAAA,MACJ,OAAO,QAAQ;AAAA,MACf,UAAU;AAAA,MACV,cAAc;AAAA,IAChB;AAAA,IACA,SAAS;AAAA,MACP,OAAO,QAAQ;AAAA,MACf,UAAU;AAAA,MACV,cAAc;AAAA,IAChB;AAAA,EACF,CAAC;AAED,QAAM,WAAW;AAGjB,MAAI,aAAa;AACf,eAAW,WAAW,OAAO,OAAO,SAAS,QAAQ,GAAG;AACtD,cAAQ,eAAe,QAAkC;AAAA,IAC3D;AAAA,EACF,WAAW,mBAAmB,cAAc;AAC1C,sBAAkB,aAAa,QAAkC;AAAA,EACnE;AAEA,MAAI,QAAQ,SAAS,WAAW,cAAc;AAC5C,YAAQ,SAAS,UAAU,aAAa,QAAwC;AAAA,EAClF;AACA,MAAI,QAAQ,SAAS,OAAO,cAAc;AACxC,YAAQ,SAAS,MAAM,aAAa,QAAoC;AAAA,EAC1E;AAEA,2BAAyB,UAAU;AAAA,IACjC,KAAK;AAAA,IACL,WAAW,QAAQ,SAAS;AAAA,IAC5B,OAAO,QAAQ,SAAS;AAAA,IACxB,GAAI,cAAc,EAAE,aAAa,SAAS,IAAI,CAAC;AAAA,EACjD,CAAC;AAED,SAAO;AACT;","names":[]}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import {
|
|
2
2
|
UPPError
|
|
3
|
-
} from "./chunk-
|
|
3
|
+
} from "./chunk-EDENPF3E.js";
|
|
4
4
|
|
|
5
5
|
// src/http/keys.ts
|
|
6
6
|
var RoundRobinKeys = class {
|
|
@@ -117,4 +117,4 @@ export {
|
|
|
117
117
|
DynamicKey,
|
|
118
118
|
resolveApiKey
|
|
119
119
|
};
|
|
120
|
-
//# sourceMappingURL=chunk-
|
|
120
|
+
//# sourceMappingURL=chunk-Y3GBJNA2.js.map
|
|
@@ -96,7 +96,7 @@ var NoRetry = class {
|
|
|
96
96
|
*
|
|
97
97
|
* @returns Always returns null
|
|
98
98
|
*/
|
|
99
|
-
onRetry() {
|
|
99
|
+
onRetry(_error, _attempt) {
|
|
100
100
|
return null;
|
|
101
101
|
}
|
|
102
102
|
};
|
|
@@ -231,4 +231,4 @@ export {
|
|
|
231
231
|
TokenBucket,
|
|
232
232
|
RetryAfterStrategy
|
|
233
233
|
};
|
|
234
|
-
//# sourceMappingURL=chunk-
|
|
234
|
+
//# sourceMappingURL=chunk-Z4ILICF5.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/http/retry.ts"],"sourcesContent":["/**\n * Retry strategies for handling transient failures in HTTP requests.\n * @module http/retry\n */\n\nimport type { RetryStrategy } from '../types/provider.ts';\nimport type { UPPError } from '../types/errors.ts';\n\n/**\n * Implements exponential backoff with optional jitter for retry delays.\n *\n * The delay between retries doubles with each attempt, helping to:\n * - Avoid overwhelming servers during outages\n * - Reduce thundering herd effects when many clients retry simultaneously\n * - Give transient issues time to resolve\n *\n * Delay formula: min(baseDelay * 2^(attempt-1), maxDelay)\n * With jitter: delay * random(0.5, 1.0)\n *\n * Only retries on transient errors: RATE_LIMITED, NETWORK_ERROR, TIMEOUT, PROVIDER_ERROR\n *\n * @implements {RetryStrategy}\n *\n * @example\n * ```typescript\n * // Default configuration (3 retries, 1s base, 30s max, jitter enabled)\n * const retry = new ExponentialBackoff();\n *\n * // Custom configuration\n * const customRetry = new ExponentialBackoff({\n * maxAttempts: 5, // Up to 5 retry attempts\n * baseDelay: 500, // Start with 500ms delay\n * maxDelay: 60000, // Cap at 60 seconds\n * jitter: false // Disable random jitter\n * });\n *\n * // Use with provider\n * const provider = createOpenAI({\n * retryStrategy: customRetry\n * });\n * ```\n */\nexport class ExponentialBackoff implements RetryStrategy {\n private maxAttempts: number;\n private baseDelay: number;\n private maxDelay: number;\n private jitter: boolean;\n\n /**\n * Creates a new ExponentialBackoff instance.\n *\n * @param options - Configuration options\n * @param options.maxAttempts - Maximum number of retry attempts (default: 3)\n * @param options.baseDelay - Initial delay in milliseconds (default: 1000)\n * @param options.maxDelay - Maximum delay cap in milliseconds (default: 30000)\n * @param options.jitter - Whether to add random jitter to delays (default: true)\n */\n constructor(options: {\n maxAttempts?: number;\n baseDelay?: number;\n maxDelay?: number;\n jitter?: boolean;\n } = {}) {\n this.maxAttempts = options.maxAttempts ?? 3;\n this.baseDelay = options.baseDelay ?? 1000;\n this.maxDelay = options.maxDelay ?? 30000;\n this.jitter = options.jitter ?? true;\n }\n\n /**\n * Determines whether to retry and calculates the delay.\n *\n * @param error - The error that triggered the retry\n * @param attempt - Current attempt number (1-indexed)\n * @returns Delay in milliseconds before next retry, or null to stop retrying\n */\n onRetry(error: UPPError, attempt: number): number | null {\n if (attempt > this.maxAttempts) {\n return null;\n }\n\n if (!this.isRetryable(error)) {\n return null;\n }\n\n let delay = this.baseDelay * Math.pow(2, attempt - 1);\n delay = Math.min(delay, this.maxDelay);\n\n if (this.jitter) {\n delay = delay * (0.5 + Math.random());\n }\n\n return Math.floor(delay);\n }\n\n /**\n * Checks if an error is eligible for retry.\n *\n * @param error - The error to evaluate\n * @returns True if the error is transient and retryable\n */\n private isRetryable(error: UPPError): boolean {\n return (\n error.code === 'RATE_LIMITED' ||\n error.code === 'NETWORK_ERROR' ||\n error.code === 'TIMEOUT' ||\n error.code === 'PROVIDER_ERROR'\n );\n }\n}\n\n/**\n * Implements linear backoff where delays increase proportionally with each attempt.\n *\n * Unlike exponential backoff, linear backoff increases delays at a constant rate:\n * - Attempt 1: delay * 1 (e.g., 1000ms)\n * - Attempt 2: delay * 2 (e.g., 2000ms)\n * - Attempt 3: delay * 3 (e.g., 3000ms)\n *\n * This strategy is simpler and more predictable than exponential backoff,\n * suitable for scenarios where gradual delay increase is preferred over\n * aggressive backoff.\n *\n * Only retries on transient errors: RATE_LIMITED, NETWORK_ERROR, TIMEOUT, PROVIDER_ERROR\n *\n * @implements {RetryStrategy}\n *\n * @example\n * ```typescript\n * // Default configuration (3 retries, 1s delay increment)\n * const retry = new LinearBackoff();\n *\n * // Custom configuration\n * const customRetry = new LinearBackoff({\n * maxAttempts: 4, // Up to 4 retry attempts\n * delay: 2000 // 2s, 4s, 6s, 8s delays\n * });\n *\n * // Use with provider\n * const provider = createAnthropic({\n * retryStrategy: customRetry\n * });\n * ```\n */\nexport class LinearBackoff implements RetryStrategy {\n private maxAttempts: number;\n private delay: number;\n\n /**\n * Creates a new LinearBackoff instance.\n *\n * @param options - Configuration options\n * @param options.maxAttempts - Maximum number of retry attempts (default: 3)\n * @param options.delay - Base delay multiplier in milliseconds (default: 1000)\n */\n constructor(options: {\n maxAttempts?: number;\n delay?: number;\n } = {}) {\n this.maxAttempts = options.maxAttempts ?? 3;\n this.delay = options.delay ?? 1000;\n }\n\n /**\n * Determines whether to retry and calculates the linear delay.\n *\n * @param error - The error that triggered the retry\n * @param attempt - Current attempt number (1-indexed)\n * @returns Delay in milliseconds (delay * attempt), or null to stop retrying\n */\n onRetry(error: UPPError, attempt: number): number | null {\n if (attempt > this.maxAttempts) {\n return null;\n }\n\n if (!this.isRetryable(error)) {\n return null;\n }\n\n return this.delay * attempt;\n }\n\n /**\n * Checks if an error is eligible for retry.\n *\n * @param error - The error to evaluate\n * @returns True if the error is transient and retryable\n */\n private isRetryable(error: UPPError): boolean {\n return (\n error.code === 'RATE_LIMITED' ||\n error.code === 'NETWORK_ERROR' ||\n error.code === 'TIMEOUT' ||\n error.code === 'PROVIDER_ERROR'\n );\n }\n}\n\n/**\n * Disables all retry behavior, failing immediately on any error.\n *\n * Use this strategy when:\n * - Retries are handled at a higher level in your application\n * - You want immediate failure feedback\n * - The operation is not idempotent\n * - Time sensitivity requires fast failure\n *\n * @implements {RetryStrategy}\n *\n * @example\n * ```typescript\n * // Disable retries for time-sensitive operations\n * const provider = createOpenAI({\n * retryStrategy: new NoRetry()\n * });\n * ```\n */\nexport class NoRetry implements RetryStrategy {\n /**\n * Always returns null to indicate no retry should be attempted.\n *\n * @returns Always returns null\n */\n onRetry(_error: UPPError, _attempt: number): null {\n return null;\n }\n}\n\n/**\n * Implements token bucket rate limiting with automatic refill.\n *\n * The token bucket algorithm provides smooth rate limiting by:\n * - Maintaining a bucket of tokens that replenish over time\n * - Consuming one token per request\n * - Delaying requests when the bucket is empty\n * - Allowing burst traffic up to the bucket capacity\n *\n * This is particularly useful for:\n * - Client-side rate limiting to avoid hitting API rate limits\n * - Smoothing request patterns to maintain consistent throughput\n * - Preventing accidental API abuse\n *\n * Unlike other retry strategies, TokenBucket implements {@link beforeRequest}\n * to proactively delay requests before they are made.\n *\n * @implements {RetryStrategy}\n *\n * @example\n * ```typescript\n * // Allow 10 requests burst, refill 1 token per second\n * const bucket = new TokenBucket({\n * maxTokens: 10, // Burst capacity\n * refillRate: 1, // Tokens per second\n * maxAttempts: 3 // Retry attempts on rate limit\n * });\n *\n * // Aggressive rate limiting: 5 req/s sustained\n * const strictBucket = new TokenBucket({\n * maxTokens: 5,\n * refillRate: 5\n * });\n *\n * // Use with provider\n * const provider = createOpenAI({\n * retryStrategy: bucket\n * });\n * ```\n */\nexport class TokenBucket implements RetryStrategy {\n private tokens: number;\n private maxTokens: number;\n private refillRate: number;\n private lastRefill: number;\n private maxAttempts: number;\n\n /**\n * Creates a new TokenBucket instance.\n *\n * @param options - Configuration options\n * @param options.maxTokens - Maximum bucket capacity (default: 10)\n * @param options.refillRate - Tokens added per second (default: 1)\n * @param options.maxAttempts - Maximum retry attempts on rate limit (default: 3)\n */\n constructor(options: {\n maxTokens?: number;\n refillRate?: number;\n maxAttempts?: number;\n } = {}) {\n this.maxTokens = options.maxTokens ?? 10;\n this.refillRate = options.refillRate ?? 1;\n this.maxAttempts = options.maxAttempts ?? 3;\n this.tokens = this.maxTokens;\n this.lastRefill = Date.now();\n }\n\n /**\n * Called before each request to consume a token or calculate wait time.\n *\n * Refills the bucket based on elapsed time, then either:\n * - Returns 0 if a token is available (consumed immediately)\n * - Returns the wait time in milliseconds until the next token\n *\n * @returns Delay in milliseconds before the request can proceed\n */\n beforeRequest(): number {\n this.refill();\n\n if (this.tokens >= 1) {\n this.tokens -= 1;\n return 0;\n }\n\n const msPerToken = 1000 / this.refillRate;\n return Math.ceil(msPerToken);\n }\n\n /**\n * Handles retry logic for rate-limited requests.\n *\n * Only retries on RATE_LIMITED errors, waiting for bucket refill.\n *\n * @param error - The error that triggered the retry\n * @param attempt - Current attempt number (1-indexed)\n * @returns Delay in milliseconds (time for 2 tokens), or null to stop\n */\n onRetry(error: UPPError, attempt: number): number | null {\n if (attempt > this.maxAttempts) {\n return null;\n }\n\n if (error.code !== 'RATE_LIMITED') {\n return null;\n }\n\n const msPerToken = 1000 / this.refillRate;\n return Math.ceil(msPerToken * 2);\n }\n\n /**\n * Resets the bucket to full capacity.\n *\n * Called automatically on successful requests to restore available tokens.\n */\n reset(): void {\n this.tokens = this.maxTokens;\n this.lastRefill = Date.now();\n }\n\n /**\n * Refills the bucket based on elapsed time since last refill.\n */\n private refill(): void {\n const now = Date.now();\n const elapsed = (now - this.lastRefill) / 1000;\n const newTokens = elapsed * this.refillRate;\n\n this.tokens = Math.min(this.maxTokens, this.tokens + newTokens);\n this.lastRefill = now;\n }\n}\n\n/**\n * Respects server-provided Retry-After headers for optimal retry timing.\n *\n * When servers return a 429 (Too Many Requests) response, they often include\n * a Retry-After header indicating when the client should retry. This strategy\n * uses that information for precise retry timing.\n *\n * Benefits over fixed backoff strategies:\n * - Follows server recommendations for optimal retry timing\n * - Avoids retrying too early and wasting requests\n * - Adapts to dynamic rate limit windows\n *\n * If no Retry-After header is provided, falls back to a configurable delay.\n * Only retries on RATE_LIMITED errors.\n *\n * @implements {RetryStrategy}\n *\n * @example\n * ```typescript\n * // Use server-recommended retry timing\n * const retryAfter = new RetryAfterStrategy({\n * maxAttempts: 5, // Retry up to 5 times\n * fallbackDelay: 10000 // 10s fallback if no header\n * });\n *\n * // The doFetch function automatically calls setRetryAfter\n * // when a Retry-After header is present in the response\n *\n * const provider = createOpenAI({\n * retryStrategy: retryAfter\n * });\n * ```\n */\nexport class RetryAfterStrategy implements RetryStrategy {\n private maxAttempts: number;\n private fallbackDelay: number;\n private lastRetryAfter?: number;\n\n /**\n * Creates a new RetryAfterStrategy instance.\n *\n * @param options - Configuration options\n * @param options.maxAttempts - Maximum number of retry attempts (default: 3)\n * @param options.fallbackDelay - Delay in ms when no Retry-After header (default: 5000)\n */\n constructor(options: {\n maxAttempts?: number;\n fallbackDelay?: number;\n } = {}) {\n this.maxAttempts = options.maxAttempts ?? 3;\n this.fallbackDelay = options.fallbackDelay ?? 5000;\n }\n\n /**\n * Sets the retry delay from a Retry-After header value.\n *\n * Called by doFetch when a Retry-After header is present in the response.\n * The value is used for the next onRetry call and then cleared.\n *\n * @param seconds - The Retry-After value in seconds\n */\n setRetryAfter(seconds: number): void {\n this.lastRetryAfter = seconds * 1000;\n }\n\n /**\n * Determines retry delay using Retry-After header or fallback.\n *\n * @param error - The error that triggered the retry\n * @param attempt - Current attempt number (1-indexed)\n * @returns Delay from Retry-After header or fallback, null to stop\n */\n onRetry(error: UPPError, attempt: number): number | null {\n if (attempt > this.maxAttempts) {\n return null;\n }\n\n if (error.code !== 'RATE_LIMITED') {\n return null;\n }\n\n const delay = this.lastRetryAfter ?? this.fallbackDelay;\n this.lastRetryAfter = undefined;\n return delay;\n }\n}\n"],"mappings":";AA0CO,IAAM,qBAAN,MAAkD;AAAA,EAC/C;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWR,YAAY,UAKR,CAAC,GAAG;AACN,SAAK,cAAc,QAAQ,eAAe;AAC1C,SAAK,YAAY,QAAQ,aAAa;AACtC,SAAK,WAAW,QAAQ,YAAY;AACpC,SAAK,SAAS,QAAQ,UAAU;AAAA,EAClC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,QAAQ,OAAiB,SAAgC;AACvD,QAAI,UAAU,KAAK,aAAa;AAC9B,aAAO;AAAA,IACT;AAEA,QAAI,CAAC,KAAK,YAAY,KAAK,GAAG;AAC5B,aAAO;AAAA,IACT;AAEA,QAAI,QAAQ,KAAK,YAAY,KAAK,IAAI,GAAG,UAAU,CAAC;AACpD,YAAQ,KAAK,IAAI,OAAO,KAAK,QAAQ;AAErC,QAAI,KAAK,QAAQ;AACf,cAAQ,SAAS,MAAM,KAAK,OAAO;AAAA,IACrC;AAEA,WAAO,KAAK,MAAM,KAAK;AAAA,EACzB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQQ,YAAY,OAA0B;AAC5C,WACE,MAAM,SAAS,kBACf,MAAM,SAAS,mBACf,MAAM,SAAS,aACf,MAAM,SAAS;AAAA,EAEnB;AACF;AAmCO,IAAM,gBAAN,MAA6C;AAAA,EAC1C;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASR,YAAY,UAGR,CAAC,GAAG;AACN,SAAK,cAAc,QAAQ,eAAe;AAC1C,SAAK,QAAQ,QAAQ,SAAS;AAAA,EAChC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,QAAQ,OAAiB,SAAgC;AACvD,QAAI,UAAU,KAAK,aAAa;AAC9B,aAAO;AAAA,IACT;AAEA,QAAI,CAAC,KAAK,YAAY,KAAK,GAAG;AAC5B,aAAO;AAAA,IACT;AAEA,WAAO,KAAK,QAAQ;AAAA,EACtB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQQ,YAAY,OAA0B;AAC5C,WACE,MAAM,SAAS,kBACf,MAAM,SAAS,mBACf,MAAM,SAAS,aACf,MAAM,SAAS;AAAA,EAEnB;AACF;AAqBO,IAAM,UAAN,MAAuC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAM5C,QAAQ,QAAkB,UAAwB;AAChD,WAAO;AAAA,EACT;AACF;AA0CO,IAAM,cAAN,MAA2C;AAAA,EACxC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUR,YAAY,UAIR,CAAC,GAAG;AACN,SAAK,YAAY,QAAQ,aAAa;AACtC,SAAK,aAAa,QAAQ,cAAc;AACxC,SAAK,cAAc,QAAQ,eAAe;AAC1C,SAAK,SAAS,KAAK;AACnB,SAAK,aAAa,KAAK,IAAI;AAAA,EAC7B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,gBAAwB;AACtB,SAAK,OAAO;AAEZ,QAAI,KAAK,UAAU,GAAG;AACpB,WAAK,UAAU;AACf,aAAO;AAAA,IACT;AAEA,UAAM,aAAa,MAAO,KAAK;AAC/B,WAAO,KAAK,KAAK,UAAU;AAAA,EAC7B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,QAAQ,OAAiB,SAAgC;AACvD,QAAI,UAAU,KAAK,aAAa;AAC9B,aAAO;AAAA,IACT;AAEA,QAAI,MAAM,SAAS,gBAAgB;AACjC,aAAO;AAAA,IACT;AAEA,UAAM,aAAa,MAAO,KAAK;AAC/B,WAAO,KAAK,KAAK,aAAa,CAAC;AAAA,EACjC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,QAAc;AACZ,SAAK,SAAS,KAAK;AACnB,SAAK,aAAa,KAAK,IAAI;AAAA,EAC7B;AAAA;AAAA;AAAA;AAAA,EAKQ,SAAe;AACrB,UAAM,MAAM,KAAK,IAAI;AACrB,UAAM,WAAW,MAAM,KAAK,cAAc;AAC1C,UAAM,YAAY,UAAU,KAAK;AAEjC,SAAK,SAAS,KAAK,IAAI,KAAK,WAAW,KAAK,SAAS,SAAS;AAC9D,SAAK,aAAa;AAAA,EACpB;AACF;AAmCO,IAAM,qBAAN,MAAkD;AAAA,EAC/C;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASR,YAAY,UAGR,CAAC,GAAG;AACN,SAAK,cAAc,QAAQ,eAAe;AAC1C,SAAK,gBAAgB,QAAQ,iBAAiB;AAAA,EAChD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,cAAc,SAAuB;AACnC,SAAK,iBAAiB,UAAU;AAAA,EAClC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,QAAQ,OAAiB,SAAgC;AACvD,QAAI,UAAU,KAAK,aAAa;AAC9B,aAAO;AAAA,IACT;AAEA,QAAI,MAAM,SAAS,gBAAgB;AACjC,aAAO;AAAA,IACT;AAEA,UAAM,QAAQ,KAAK,kBAAkB,KAAK;AAC1C,SAAK,iBAAiB;AACtB,WAAO;AAAA,EACT;AACF;","names":[]}
|
package/dist/google/index.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { g as Provider } from '../provider-DGQHYE6I.js';
|
|
2
2
|
|
|
3
3
|
/**
|
|
4
4
|
* Provider-specific parameters for Google Gemini API requests.
|
|
@@ -98,7 +98,7 @@ interface GoogleLLMParams {
|
|
|
98
98
|
* const model = llm({
|
|
99
99
|
* model: google('gemini-2.5-flash'),
|
|
100
100
|
* params: {
|
|
101
|
-
*
|
|
101
|
+
* tools: [
|
|
102
102
|
* tools.googleSearch(),
|
|
103
103
|
* tools.codeExecution(),
|
|
104
104
|
* ],
|
|
@@ -106,14 +106,14 @@ interface GoogleLLMParams {
|
|
|
106
106
|
* });
|
|
107
107
|
* ```
|
|
108
108
|
*/
|
|
109
|
-
|
|
109
|
+
tools?: GoogleBuiltInTool[];
|
|
110
110
|
/**
|
|
111
111
|
* Tool configuration for retrieval (e.g., user location for Maps).
|
|
112
112
|
*
|
|
113
113
|
* @example
|
|
114
114
|
* ```typescript
|
|
115
115
|
* const params: GoogleLLMParams = {
|
|
116
|
-
*
|
|
116
|
+
* tools: [tools.googleMaps()],
|
|
117
117
|
* toolConfig: {
|
|
118
118
|
* retrievalConfig: {
|
|
119
119
|
* latLng: { latitude: 40.758896, longitude: -73.985130 },
|
|
@@ -633,7 +633,7 @@ declare function fileSearchTool(options: {
|
|
|
633
633
|
* const model = llm({
|
|
634
634
|
* model: google('gemini-2.5-flash'),
|
|
635
635
|
* params: {
|
|
636
|
-
*
|
|
636
|
+
* tools: [
|
|
637
637
|
* tools.googleSearch(),
|
|
638
638
|
* tools.codeExecution(),
|
|
639
639
|
* ],
|
|
@@ -878,25 +878,20 @@ interface GoogleEmbedParams {
|
|
|
878
878
|
* @example
|
|
879
879
|
* ```typescript
|
|
880
880
|
* import { google } from './providers/google';
|
|
881
|
+
* import { llm } from './core/llm';
|
|
881
882
|
*
|
|
882
|
-
*
|
|
883
|
-
*
|
|
884
|
-
*
|
|
885
|
-
* // Simple completion
|
|
886
|
-
* const response = await gemini.complete({
|
|
887
|
-
* messages: [{ role: 'user', content: [{ type: 'text', text: 'Hello!' }] }],
|
|
883
|
+
* const gemini = llm({
|
|
884
|
+
* model: google('gemini-1.5-pro'),
|
|
888
885
|
* config: { apiKey: process.env.GOOGLE_API_KEY },
|
|
889
886
|
* });
|
|
890
887
|
*
|
|
891
|
-
*
|
|
892
|
-
*
|
|
893
|
-
* messages: [{ role: 'user', content: [{ type: 'text', text: 'Tell me a story' }] }],
|
|
894
|
-
* config: { apiKey: process.env.GOOGLE_API_KEY },
|
|
895
|
-
* });
|
|
888
|
+
* const turn = await gemini.generate('Hello!');
|
|
889
|
+
* console.log(turn.response.text);
|
|
896
890
|
*
|
|
891
|
+
* const stream = gemini.stream('Tell me a story');
|
|
897
892
|
* for await (const event of stream) {
|
|
898
893
|
* if (event.type === 'text_delta') {
|
|
899
|
-
* process.stdout.write(event.delta.text);
|
|
894
|
+
* process.stdout.write(event.delta.text ?? '');
|
|
900
895
|
* }
|
|
901
896
|
* }
|
|
902
897
|
* ```
|
|
@@ -913,12 +908,14 @@ interface GoogleEmbedParams {
|
|
|
913
908
|
* });
|
|
914
909
|
*
|
|
915
910
|
* // Use cache in requests
|
|
916
|
-
* const
|
|
917
|
-
*
|
|
911
|
+
* const cachedModel = llm({
|
|
912
|
+
* model: google('gemini-3-flash-preview'),
|
|
918
913
|
* config: { apiKey: process.env.GOOGLE_API_KEY },
|
|
919
914
|
* params: { cachedContent: cacheEntry.name },
|
|
920
915
|
* });
|
|
921
916
|
*
|
|
917
|
+
* const response = await cachedModel.generate('Review this function');
|
|
918
|
+
*
|
|
922
919
|
* // Manage caches
|
|
923
920
|
* await google.cache.update(cacheEntry.name, { ttl: '7200s' }, apiKey);
|
|
924
921
|
* await google.cache.delete(cacheEntry.name, apiKey);
|
package/dist/google/index.js
CHANGED
|
@@ -1,32 +1,30 @@
|
|
|
1
|
-
import {
|
|
2
|
-
createProvider
|
|
3
|
-
} from "../chunk-MSR5P65T.js";
|
|
4
1
|
import {
|
|
5
2
|
Image
|
|
6
3
|
} from "../chunk-WAKD3OO5.js";
|
|
7
4
|
import {
|
|
8
5
|
AssistantMessage,
|
|
6
|
+
createProvider,
|
|
9
7
|
isAssistantMessage,
|
|
10
8
|
isToolResultMessage,
|
|
11
9
|
isUserMessage
|
|
12
|
-
} from "../chunk-
|
|
10
|
+
} from "../chunk-M4BMM5IB.js";
|
|
13
11
|
import {
|
|
14
12
|
parseSSEStream
|
|
15
13
|
} from "../chunk-Z7RBRCRN.js";
|
|
16
14
|
import {
|
|
17
15
|
resolveApiKey
|
|
18
|
-
} from "../chunk-
|
|
16
|
+
} from "../chunk-Y3GBJNA2.js";
|
|
19
17
|
import {
|
|
20
18
|
UPPError,
|
|
21
19
|
doFetch,
|
|
22
20
|
doStreamFetch,
|
|
23
21
|
normalizeHttpError
|
|
24
|
-
} from "../chunk-
|
|
22
|
+
} from "../chunk-EDENPF3E.js";
|
|
25
23
|
|
|
26
24
|
// src/providers/google/transform.ts
|
|
27
25
|
function transformRequest(request, modelId) {
|
|
28
26
|
const params = request.params ?? {};
|
|
29
|
-
const { cachedContent, builtInTools, toolConfig, ...generationParams } = params;
|
|
27
|
+
const { cachedContent, tools: builtInTools, toolConfig, ...generationParams } = params;
|
|
30
28
|
const googleRequest = {
|
|
31
29
|
contents: transformMessages(request.messages)
|
|
32
30
|
};
|
|
@@ -51,17 +49,17 @@ function transformRequest(request, modelId) {
|
|
|
51
49
|
if (Object.keys(generationConfig).length > 0) {
|
|
52
50
|
googleRequest.generationConfig = generationConfig;
|
|
53
51
|
}
|
|
54
|
-
const
|
|
52
|
+
const requestTools = [];
|
|
55
53
|
if (request.tools && request.tools.length > 0) {
|
|
56
|
-
|
|
54
|
+
requestTools.push({
|
|
57
55
|
functionDeclarations: request.tools.map(transformTool)
|
|
58
56
|
});
|
|
59
57
|
}
|
|
60
58
|
if (builtInTools && builtInTools.length > 0) {
|
|
61
|
-
|
|
59
|
+
requestTools.push(...builtInTools);
|
|
62
60
|
}
|
|
63
|
-
if (
|
|
64
|
-
googleRequest.tools =
|
|
61
|
+
if (requestTools.length > 0) {
|
|
62
|
+
googleRequest.tools = requestTools;
|
|
65
63
|
}
|
|
66
64
|
if (toolConfig) {
|
|
67
65
|
googleRequest.toolConfig = toolConfig;
|
|
@@ -490,7 +488,7 @@ function createLLMHandler() {
|
|
|
490
488
|
for await (const data of parseSSEStream(response.body)) {
|
|
491
489
|
if (typeof data === "object" && data !== null) {
|
|
492
490
|
const chunk = data;
|
|
493
|
-
if (
|
|
491
|
+
if (chunk.error) {
|
|
494
492
|
const error = new UPPError(
|
|
495
493
|
chunk.error.message,
|
|
496
494
|
"PROVIDER_ERROR",
|
|
@@ -570,21 +568,10 @@ function createEmbeddingHandler() {
|
|
|
570
568
|
);
|
|
571
569
|
}
|
|
572
570
|
const embedRequest = {
|
|
571
|
+
...request.params,
|
|
573
572
|
model: `models/${modelId}`,
|
|
574
573
|
content: { parts: [{ text }] }
|
|
575
574
|
};
|
|
576
|
-
if (request.params?.taskType !== void 0) {
|
|
577
|
-
embedRequest.taskType = request.params.taskType;
|
|
578
|
-
}
|
|
579
|
-
if (request.params?.title !== void 0) {
|
|
580
|
-
embedRequest.title = request.params.title;
|
|
581
|
-
}
|
|
582
|
-
if (request.params?.outputDimensionality !== void 0) {
|
|
583
|
-
embedRequest.outputDimensionality = request.params.outputDimensionality;
|
|
584
|
-
}
|
|
585
|
-
if (request.params?.autoTruncate !== void 0) {
|
|
586
|
-
embedRequest.autoTruncate = request.params.autoTruncate;
|
|
587
|
-
}
|
|
588
575
|
return embedRequest;
|
|
589
576
|
});
|
|
590
577
|
const url = `${baseUrl}/models/${modelId}:batchEmbedContents?key=${apiKey}`;
|
|
@@ -704,16 +691,7 @@ async function executeGenerate(modelId, request) {
|
|
|
704
691
|
return transformResponse2(data);
|
|
705
692
|
}
|
|
706
693
|
function buildParameters(params) {
|
|
707
|
-
|
|
708
|
-
if (!params) return parameters;
|
|
709
|
-
if (params.sampleCount !== void 0) parameters.sampleCount = params.sampleCount;
|
|
710
|
-
if (params.imageSize !== void 0) parameters.imageSize = params.imageSize;
|
|
711
|
-
if (params.aspectRatio !== void 0) parameters.aspectRatio = params.aspectRatio;
|
|
712
|
-
if (params.personGeneration !== void 0) parameters.personGeneration = params.personGeneration;
|
|
713
|
-
if (params.safetyFilterLevel !== void 0) parameters.safetyFilterLevel = params.safetyFilterLevel;
|
|
714
|
-
if (params.addWatermark !== void 0) parameters.addWatermark = params.addWatermark;
|
|
715
|
-
if (params.negativePrompt !== void 0) parameters.negativePrompt = params.negativePrompt;
|
|
716
|
-
return parameters;
|
|
694
|
+
return params ? { ...params } : {};
|
|
717
695
|
}
|
|
718
696
|
function transformResponse2(data) {
|
|
719
697
|
if (!data.predictions || data.predictions.length === 0) {
|
|
@@ -870,7 +848,7 @@ var tools = {
|
|
|
870
848
|
var baseProvider = createProvider({
|
|
871
849
|
name: "google",
|
|
872
850
|
version: "1.0.0",
|
|
873
|
-
|
|
851
|
+
handlers: {
|
|
874
852
|
llm: createLLMHandler(),
|
|
875
853
|
embedding: createEmbeddingHandler(),
|
|
876
854
|
image: createImageHandler()
|