@providerprotocol/ai 0.0.19 → 0.0.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +82 -9
- package/dist/anthropic/index.d.ts +184 -14
- package/dist/anthropic/index.js +214 -86
- package/dist/anthropic/index.js.map +1 -1
- package/dist/{chunk-5FEAOEXV.js → chunk-EDENPF3E.js} +57 -103
- package/dist/chunk-EDENPF3E.js.map +1 -0
- package/dist/{chunk-UMKWXGO3.js → chunk-M4BMM5IB.js} +86 -2
- package/dist/chunk-M4BMM5IB.js.map +1 -0
- package/dist/chunk-Y3GBJNA2.js +120 -0
- package/dist/chunk-Y3GBJNA2.js.map +1 -0
- package/dist/{chunk-U4JJC2YX.js → chunk-Z4ILICF5.js} +2 -2
- package/dist/chunk-Z4ILICF5.js.map +1 -0
- package/dist/google/index.d.ts +16 -19
- package/dist/google/index.js +18 -40
- package/dist/google/index.js.map +1 -1
- package/dist/http/index.d.ts +2 -2
- package/dist/http/index.js +5 -4
- package/dist/index.d.ts +101 -38
- package/dist/index.js +69 -43
- package/dist/index.js.map +1 -1
- package/dist/ollama/index.d.ts +14 -16
- package/dist/ollama/index.js +9 -11
- package/dist/ollama/index.js.map +1 -1
- package/dist/openai/index.d.ts +25 -133
- package/dist/openai/index.js +31 -85
- package/dist/openai/index.js.map +1 -1
- package/dist/openrouter/index.d.ts +28 -53
- package/dist/openrouter/index.js +24 -47
- package/dist/openrouter/index.js.map +1 -1
- package/dist/provider-DGQHYE6I.d.ts +1319 -0
- package/dist/proxy/index.d.ts +194 -12
- package/dist/proxy/index.js +37 -65
- package/dist/proxy/index.js.map +1 -1
- package/dist/{retry-DR7YRJDz.d.ts → retry-Pcs3hnbu.d.ts} +2 -2
- package/dist/{stream-DRHy6q1a.d.ts → stream-Di9acos2.d.ts} +1 -1
- package/dist/xai/index.d.ts +16 -88
- package/dist/xai/index.js +34 -62
- package/dist/xai/index.js.map +1 -1
- package/package.json +4 -1
- package/dist/chunk-5FEAOEXV.js.map +0 -1
- package/dist/chunk-DZQHVGNV.js +0 -71
- package/dist/chunk-DZQHVGNV.js.map +0 -1
- package/dist/chunk-MSR5P65T.js +0 -39
- package/dist/chunk-MSR5P65T.js.map +0 -1
- package/dist/chunk-U4JJC2YX.js.map +0 -1
- package/dist/chunk-UMKWXGO3.js.map +0 -1
- package/dist/content-DEl3z_W2.d.ts +0 -276
- package/dist/image-Dhq-Yuq4.d.ts +0 -456
- package/dist/provider-BBMBZuGn.d.ts +0 -570
package/dist/ollama/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../src/providers/ollama/transform.ts","../../src/providers/ollama/llm.ts","../../src/providers/ollama/embed.ts","../../src/providers/ollama/index.ts"],"sourcesContent":["/**\n * @fileoverview Transformation utilities for Ollama provider.\n *\n * This module handles bidirectional transformation between the Unified Provider\n * Protocol (UPP) format and Ollama's native API format. It includes:\n *\n * - Request transformation (UPP to Ollama)\n * - Response transformation (Ollama to UPP)\n * - Stream chunk processing\n * - Message format conversion\n *\n * @module providers/ollama/transform\n */\n\nimport type { LLMRequest, LLMResponse } from '../../types/llm.ts';\nimport type { Message } from '../../types/messages.ts';\nimport type { StreamEvent } from '../../types/stream.ts';\nimport type { Tool, ToolCall } from '../../types/tool.ts';\nimport type { TokenUsage } from '../../types/turn.ts';\nimport type { ContentBlock, TextBlock, ImageBlock } from '../../types/content.ts';\nimport {\n AssistantMessage,\n isUserMessage,\n isAssistantMessage,\n isToolResultMessage,\n} from '../../types/messages.ts';\nimport type {\n OllamaLLMParams,\n OllamaRequest,\n OllamaMessage,\n OllamaTool,\n OllamaResponse,\n OllamaStreamChunk,\n OllamaToolCall,\n OllamaOptions,\n} from './types.ts';\n\n/**\n * Transforms a UPP LLM request into Ollama's native API format.\n *\n * This function handles the mapping between UPP's unified request structure\n * and Ollama's specific requirements, including:\n *\n * - Converting messages to Ollama's message format\n * - Mapping model parameters to Ollama's nested `options` structure\n * - Handling top-level parameters like `keep_alive` and `think`\n * - Converting tools to Ollama's function format\n * - Setting up structured output via the `format` field\n *\n * Parameters are spread to allow pass-through of any Ollama API fields,\n * enabling developers to use new API features without library updates.\n *\n * @typeParam TParams - The parameter type extending OllamaLLMParams\n * @param request - The UPP-format LLM request\n * @param modelId - The Ollama model identifier (e.g., 'llama3.2', 'mistral')\n * @returns The transformed Ollama API request body\n *\n * @example\n * ```typescript\n * const ollamaRequest = transformRequest(\n * {\n * messages: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }],\n * config: {},\n * params: { temperature: 0.7 }\n * },\n * 'llama3.2'\n * );\n * ```\n */\nexport function transformRequest<TParams extends OllamaLLMParams>(\n request: LLMRequest<TParams>,\n modelId: string\n): OllamaRequest {\n const params = (request.params ?? {}) as OllamaLLMParams;\n\n // Extract top-level params vs options params\n const {\n keep_alive,\n think,\n logprobs,\n top_logprobs,\n ...optionsParams\n } = params;\n\n // Spread params to pass through all fields, then set required fields\n const ollamaRequest: OllamaRequest = {\n model: modelId,\n messages: transformMessages(request.messages, request.system),\n };\n\n // Add top-level params if provided\n if (keep_alive !== undefined) ollamaRequest.keep_alive = keep_alive;\n if (think !== undefined) ollamaRequest.think = think;\n if (logprobs !== undefined) ollamaRequest.logprobs = logprobs;\n if (top_logprobs !== undefined) ollamaRequest.top_logprobs = top_logprobs;\n\n // Spread remaining params into options to pass through all model parameters\n if (Object.keys(optionsParams).length > 0) {\n ollamaRequest.options = optionsParams as OllamaOptions;\n }\n\n // Tools come from request, not params\n if (request.tools && request.tools.length > 0) {\n ollamaRequest.tools = request.tools.map(transformTool);\n }\n\n // Structured output via format field\n if (request.structure) {\n ollamaRequest.format = request.structure as unknown as Record<string, unknown>;\n }\n\n return ollamaRequest;\n}\n\n/**\n * Normalizes system prompt to string.\n * Converts array format to concatenated string for providers that only support strings.\n */\nfunction normalizeSystem(system: string | unknown[] | undefined): string | undefined {\n if (!system) return undefined;\n if (typeof system === 'string') return system;\n return (system as Array<{text?: string}>)\n .map(block => block.text ?? '')\n .filter(text => text.length > 0)\n .join('\\n\\n');\n}\n\n/**\n * Transforms UPP messages to Ollama's message format.\n *\n * Handles conversion of:\n * - User messages with text and image content\n * - Assistant messages with text and tool calls\n * - Tool result messages\n * - System prompts (prepended as first message)\n *\n * Image handling:\n * - Base64 images are passed directly\n * - Byte arrays are converted to base64\n * - URL images are converted to text placeholders (Ollama limitation)\n *\n * @param messages - Array of UPP messages to transform\n * @param system - Optional system prompt (string or array, normalized to string)\n * @returns Array of Ollama-formatted messages\n */\nfunction transformMessages(messages: Message[], system?: string | unknown[]): OllamaMessage[] {\n const ollamaMessages: OllamaMessage[] = [];\n const normalizedSystem = normalizeSystem(system);\n\n // System prompt as first message\n if (normalizedSystem) {\n ollamaMessages.push({\n role: 'system',\n content: normalizedSystem,\n });\n }\n\n for (const msg of messages) {\n if (isUserMessage(msg)) {\n const textContent: string[] = [];\n const images: string[] = [];\n\n for (const block of msg.content) {\n if (block.type === 'text') {\n textContent.push(block.text);\n } else if (block.type === 'image') {\n const imageBlock = block as ImageBlock;\n if (imageBlock.source.type === 'base64') {\n images.push(imageBlock.source.data);\n } else if (imageBlock.source.type === 'bytes') {\n // Convert bytes to base64\n const base64 = btoa(\n Array.from(imageBlock.source.data)\n .map((b) => String.fromCharCode(b))\n .join('')\n );\n images.push(base64);\n } else if (imageBlock.source.type === 'url') {\n // Ollama doesn't support URL images directly\n // Would need to fetch and convert, for now just add as text\n textContent.push(`[Image: ${imageBlock.source.url}]`);\n }\n }\n }\n\n const message: OllamaMessage = {\n role: 'user',\n content: textContent.join('\\n'),\n };\n\n if (images.length > 0) {\n message.images = images;\n }\n\n ollamaMessages.push(message);\n } else if (isAssistantMessage(msg)) {\n const textContent = msg.content\n .filter((block): block is TextBlock => block.type === 'text')\n .map((block) => block.text)\n .join('\\n');\n\n const message: OllamaMessage = {\n role: 'assistant',\n content: textContent,\n };\n\n // Add tool calls if present\n if (msg.toolCalls && msg.toolCalls.length > 0) {\n message.tool_calls = msg.toolCalls.map((call) => ({\n function: {\n name: call.toolName,\n arguments: call.arguments,\n },\n }));\n }\n\n ollamaMessages.push(message);\n } else if (isToolResultMessage(msg)) {\n // Tool results are sent as 'tool' role messages\n for (const result of msg.results) {\n ollamaMessages.push({\n role: 'tool',\n tool_name: result.toolCallId, // In our UPP, toolCallId maps to tool name for Ollama\n content:\n typeof result.result === 'string'\n ? result.result\n : JSON.stringify(result.result),\n });\n }\n }\n }\n\n return ollamaMessages;\n}\n\n/**\n * Transforms a UPP tool definition to Ollama's function format.\n *\n * Ollama uses the OpenAI-style function calling format with a\n * `type: 'function'` wrapper around the function definition.\n *\n * @param tool - The UPP tool definition\n * @returns The Ollama-formatted tool definition\n */\nfunction transformTool(tool: Tool): OllamaTool {\n return {\n type: 'function',\n function: {\n name: tool.name,\n description: tool.description,\n parameters: {\n type: 'object',\n properties: tool.parameters.properties,\n required: tool.parameters.required,\n },\n },\n };\n}\n\n/**\n * Transforms an Ollama API response to the UPP LLMResponse format.\n *\n * This function extracts and normalizes:\n * - Text content from the assistant message\n * - Tool calls with their arguments\n * - Token usage statistics (prompt + completion tokens)\n * - Stop reason mapping (stop -> end_turn, length -> max_tokens)\n * - Ollama-specific metadata (timings, model info, thinking content)\n *\n * For structured output requests, the response content is automatically\n * parsed as JSON and stored in the `data` field.\n *\n * @param data - The raw Ollama API response\n * @returns The normalized UPP LLM response\n */\nexport function transformResponse(data: OllamaResponse): LLMResponse {\n const textContent: TextBlock[] = [];\n const toolCalls: ToolCall[] = [];\n let structuredData: unknown;\n\n // Add main content\n if (data.message.content) {\n textContent.push({ type: 'text', text: data.message.content });\n\n // Try to parse as JSON for structured output\n try {\n structuredData = JSON.parse(data.message.content);\n } catch {\n // Not valid JSON - that's fine, might not be structured output\n }\n }\n\n // Extract tool calls\n if (data.message.tool_calls) {\n for (const call of data.message.tool_calls) {\n toolCalls.push({\n toolCallId: call.function.name, // Ollama doesn't have separate IDs, use name\n toolName: call.function.name,\n arguments: call.function.arguments,\n });\n }\n }\n\n const message = new AssistantMessage(\n textContent,\n toolCalls.length > 0 ? toolCalls : undefined,\n {\n metadata: {\n ollama: {\n model: data.model,\n created_at: data.created_at,\n done_reason: data.done_reason,\n thinking: data.message.thinking,\n total_duration: data.total_duration,\n load_duration: data.load_duration,\n prompt_eval_duration: data.prompt_eval_duration,\n eval_duration: data.eval_duration,\n logprobs: data.logprobs,\n },\n },\n }\n );\n\n // Calculate token usage (Ollama doesn't support API-level prompt caching)\n const usage: TokenUsage = {\n inputTokens: data.prompt_eval_count ?? 0,\n outputTokens: data.eval_count ?? 0,\n totalTokens: (data.prompt_eval_count ?? 0) + (data.eval_count ?? 0),\n cacheReadTokens: 0,\n cacheWriteTokens: 0,\n };\n\n // Map done_reason to standard stop reason\n let stopReason = 'end_turn';\n if (data.done_reason === 'length') {\n stopReason = 'max_tokens';\n } else if (data.done_reason === 'stop') {\n stopReason = 'end_turn';\n } else if (toolCalls.length > 0) {\n stopReason = 'tool_use';\n }\n\n return {\n message,\n usage,\n stopReason,\n data: structuredData,\n };\n}\n\n/**\n * Mutable state object for accumulating data during stream processing.\n *\n * As streaming chunks arrive, this state object accumulates content,\n * tool calls, and metadata. Once the stream completes (indicated by\n * `done: true`), this state is used to build the final LLMResponse.\n */\nexport interface StreamState {\n /** The model name from the stream. */\n model: string;\n /** Accumulated text content from all chunks. */\n content: string;\n /** Accumulated thinking/reasoning content (for models with think mode). */\n thinking: string;\n /** Tool calls extracted from the stream. */\n toolCalls: Array<{ name: string; args: Record<string, unknown> }>;\n /** The reason the generation stopped (stop, length, etc.). */\n doneReason: string | null;\n /** Number of tokens in the prompt evaluation. */\n promptEvalCount: number;\n /** Number of tokens generated in the response. */\n evalCount: number;\n /** Total generation duration in nanoseconds. */\n totalDuration: number;\n /** Whether we're still waiting for the first chunk. */\n isFirstChunk: boolean;\n /** ISO timestamp when the response was created. */\n createdAt: string;\n}\n\n/**\n * Creates an initial empty stream state for accumulating streaming responses.\n *\n * @returns A fresh StreamState object with default values\n */\nexport function createStreamState(): StreamState {\n return {\n model: '',\n content: '',\n thinking: '',\n toolCalls: [],\n doneReason: null,\n promptEvalCount: 0,\n evalCount: 0,\n totalDuration: 0,\n isFirstChunk: true,\n createdAt: '',\n };\n}\n\n/**\n * Transforms an Ollama stream chunk into UPP StreamEvents.\n *\n * Each Ollama chunk may produce zero or more UPP events:\n * - First chunk: `message_start` event\n * - Content chunks: `text_delta` events\n * - Thinking chunks: `reasoning_delta` events\n * - Tool call chunks: `tool_call_delta` events\n * - Final chunk (done=true): `message_stop` event\n *\n * The function also updates the provided state object with accumulated\n * content and metadata for building the final response.\n *\n * @param chunk - The raw Ollama stream chunk\n * @param state - Mutable state object to accumulate data\n * @returns Array of UPP stream events (may be empty)\n */\nexport function transformStreamChunk(\n chunk: OllamaStreamChunk,\n state: StreamState\n): StreamEvent[] {\n const events: StreamEvent[] = [];\n\n // First chunk - emit message start\n if (state.isFirstChunk) {\n state.model = chunk.model;\n state.createdAt = chunk.created_at;\n events.push({ type: 'message_start', index: 0, delta: {} });\n state.isFirstChunk = false;\n }\n\n // Process message content\n if (chunk.message) {\n // Text content delta\n if (chunk.message.content) {\n state.content += chunk.message.content;\n events.push({\n type: 'text_delta',\n index: 0,\n delta: { text: chunk.message.content },\n });\n }\n\n // Thinking content delta\n if (chunk.message.thinking) {\n state.thinking += chunk.message.thinking;\n events.push({\n type: 'reasoning_delta',\n index: 0,\n delta: { text: chunk.message.thinking },\n });\n }\n\n // Tool calls (typically come in final chunk)\n if (chunk.message.tool_calls) {\n for (const call of chunk.message.tool_calls) {\n state.toolCalls.push({\n name: call.function.name,\n args: call.function.arguments,\n });\n events.push({\n type: 'tool_call_delta',\n index: state.toolCalls.length - 1,\n delta: {\n toolCallId: call.function.name,\n toolName: call.function.name,\n argumentsJson: JSON.stringify(call.function.arguments),\n },\n });\n }\n }\n }\n\n // Final chunk with metrics\n if (chunk.done) {\n state.doneReason = chunk.done_reason ?? null;\n state.promptEvalCount = chunk.prompt_eval_count ?? 0;\n state.evalCount = chunk.eval_count ?? 0;\n state.totalDuration = chunk.total_duration ?? 0;\n events.push({ type: 'message_stop', index: 0, delta: {} });\n }\n\n return events;\n}\n\n/**\n * Builds a complete LLMResponse from accumulated stream state.\n *\n * Called after the stream completes to construct the final response object\n * with all accumulated content, tool calls, usage statistics, and metadata.\n *\n * For structured output, attempts to parse the accumulated content as JSON\n * and stores it in the `data` field if successful.\n *\n * @param state - The accumulated stream state\n * @returns The complete UPP LLM response\n */\nexport function buildResponseFromState(state: StreamState): LLMResponse {\n const textContent: TextBlock[] = [];\n const toolCalls: ToolCall[] = [];\n let structuredData: unknown;\n\n if (state.content) {\n textContent.push({ type: 'text', text: state.content });\n\n // Try to parse as JSON for structured output\n try {\n structuredData = JSON.parse(state.content);\n } catch {\n // Not valid JSON - that's fine\n }\n }\n\n for (const tc of state.toolCalls) {\n toolCalls.push({\n toolCallId: tc.name,\n toolName: tc.name,\n arguments: tc.args,\n });\n }\n\n const message = new AssistantMessage(\n textContent,\n toolCalls.length > 0 ? toolCalls : undefined,\n {\n metadata: {\n ollama: {\n model: state.model,\n created_at: state.createdAt,\n done_reason: state.doneReason,\n thinking: state.thinking || undefined,\n total_duration: state.totalDuration,\n },\n },\n }\n );\n\n // Ollama doesn't support API-level prompt caching\n const usage: TokenUsage = {\n inputTokens: state.promptEvalCount,\n outputTokens: state.evalCount,\n totalTokens: state.promptEvalCount + state.evalCount,\n cacheReadTokens: 0,\n cacheWriteTokens: 0,\n };\n\n // Map done_reason to standard stop reason\n let stopReason = 'end_turn';\n if (state.doneReason === 'length') {\n stopReason = 'max_tokens';\n } else if (toolCalls.length > 0) {\n stopReason = 'tool_use';\n }\n\n return {\n message,\n usage,\n stopReason,\n data: structuredData,\n };\n}\n","/**\n * @fileoverview Ollama LLM handler implementation.\n *\n * This module provides the core LLM functionality for the Ollama provider,\n * including both synchronous completion and streaming capabilities. It\n * communicates with Ollama's native `/api/chat` endpoint.\n *\n * @module providers/ollama/llm\n */\n\nimport type {\n LLMHandler,\n BoundLLMModel,\n LLMRequest,\n LLMResponse,\n LLMStreamResult,\n LLMCapabilities,\n} from '../../types/llm.ts';\nimport type { StreamEvent } from '../../types/stream.ts';\nimport type { LLMProvider } from '../../types/provider.ts';\nimport { UPPError } from '../../types/errors.ts';\nimport { resolveApiKey } from '../../http/keys.ts';\nimport { doFetch, doStreamFetch } from '../../http/fetch.ts';\nimport { normalizeHttpError } from '../../http/errors.ts';\nimport type { OllamaLLMParams, OllamaResponse, OllamaStreamChunk } from './types.ts';\nimport {\n transformRequest,\n transformResponse,\n transformStreamChunk,\n createStreamState,\n buildResponseFromState,\n} from './transform.ts';\n\n/** Default Ollama server URL for local installations. */\nconst OLLAMA_DEFAULT_URL = 'http://localhost:11434';\n\n/**\n * Capability flags for the Ollama provider.\n *\n * **Important:** Tool calling is intentionally disabled. Ollama recommends\n * using their OpenAI-compatible API (`/v1/chat/completions`) for function\n * calling. To use tools with Ollama, configure the OpenAI provider with\n * `baseUrl` pointed to your Ollama instance.\n */\nconst OLLAMA_CAPABILITIES: LLMCapabilities = {\n streaming: true,\n tools: false,\n structuredOutput: true,\n imageInput: true,\n videoInput: false,\n audioInput: false,\n};\n\n/**\n * Parses Ollama's newline-delimited JSON (NDJSON) stream format.\n *\n * Ollama uses NDJSON where each line is a complete JSON object representing\n * a streaming chunk. This generator reads the stream incrementally, buffering\n * incomplete lines and yielding parsed chunks as they become available.\n *\n * @param body - The raw ReadableStream from the fetch response\n * @yields Parsed Ollama stream chunks\n */\nasync function* parseOllamaStream(\n body: ReadableStream<Uint8Array>\n): AsyncGenerator<OllamaStreamChunk, void, unknown> {\n const reader = body.getReader();\n const decoder = new TextDecoder();\n let buffer = '';\n\n try {\n while (true) {\n const { done, value } = await reader.read();\n if (done) break;\n\n buffer += decoder.decode(value, { stream: true });\n\n // Process complete lines (Ollama uses newline-delimited JSON)\n const lines = buffer.split('\\n');\n buffer = lines.pop() ?? ''; // Keep incomplete line in buffer\n\n for (const line of lines) {\n const trimmed = line.trim();\n if (!trimmed) continue;\n\n try {\n const chunk = JSON.parse(trimmed) as OllamaStreamChunk;\n yield chunk;\n } catch {\n // Skip invalid JSON lines\n }\n }\n }\n\n // Process any remaining buffer\n if (buffer.trim()) {\n try {\n const chunk = JSON.parse(buffer.trim()) as OllamaStreamChunk;\n yield chunk;\n } catch {\n // Skip invalid JSON\n }\n }\n } finally {\n reader.releaseLock();\n }\n}\n\n/**\n * Creates the Ollama LLM handler for chat completions.\n *\n * This factory function creates an LLM handler that communicates with\n * Ollama's `/api/chat` endpoint. The handler supports both synchronous\n * completions and streaming responses.\n *\n * The handler is designed to be used with `createProvider()` which injects\n * the provider reference after construction.\n *\n * @returns An LLM handler configured for Ollama\n *\n * @example\n * ```typescript\n * const handler = createLLMHandler();\n * const provider = createProvider({\n * name: 'ollama',\n * version: '1.0.0',\n * modalities: { llm: handler }\n * });\n * ```\n */\nexport function createLLMHandler(): LLMHandler<OllamaLLMParams> {\n let providerRef: LLMProvider<OllamaLLMParams> | null = null;\n\n return {\n _setProvider(provider: LLMProvider<OllamaLLMParams>) {\n providerRef = provider;\n },\n\n bind(modelId: string): BoundLLMModel<OllamaLLMParams> {\n // Use the injected provider reference (set by createProvider)\n if (!providerRef) {\n throw new UPPError(\n 'Provider reference not set. Handler must be used with createProvider().',\n 'INVALID_REQUEST',\n 'ollama',\n 'llm'\n );\n }\n\n const model: BoundLLMModel<OllamaLLMParams> = {\n modelId,\n capabilities: OLLAMA_CAPABILITIES,\n\n get provider(): LLMProvider<OllamaLLMParams> {\n return providerRef!;\n },\n\n async complete(request: LLMRequest<OllamaLLMParams>): Promise<LLMResponse> {\n // Ollama doesn't require an API key by default, but may use one for auth\n let apiKey: string | undefined;\n try {\n apiKey = await resolveApiKey(\n request.config,\n 'OLLAMA_API_KEY',\n 'ollama',\n 'llm'\n );\n } catch {\n // API key is optional for Ollama\n }\n\n const baseUrl = request.config.baseUrl ?? OLLAMA_DEFAULT_URL;\n const url = `${baseUrl}/api/chat`;\n const body = transformRequest(request, modelId);\n body.stream = false;\n\n const headers: Record<string, string> = {\n 'Content-Type': 'application/json',\n };\n\n if (apiKey) {\n headers['Authorization'] = `Bearer ${apiKey}`;\n }\n\n if (request.config.headers) {\n for (const [key, value] of Object.entries(request.config.headers)) {\n if (value !== undefined) {\n headers[key] = value;\n }\n }\n }\n\n const response = await doFetch(\n url,\n {\n method: 'POST',\n headers,\n body: JSON.stringify(body),\n signal: request.signal,\n },\n request.config,\n 'ollama',\n 'llm'\n );\n\n const data = (await response.json()) as OllamaResponse;\n return transformResponse(data);\n },\n\n stream(request: LLMRequest<OllamaLLMParams>): LLMStreamResult {\n const state = createStreamState();\n let responseResolve: (value: LLMResponse) => void;\n let responseReject: (error: Error) => void;\n\n const responsePromise = new Promise<LLMResponse>((resolve, reject) => {\n responseResolve = resolve;\n responseReject = reject;\n });\n\n async function* generateEvents(): AsyncGenerator<StreamEvent, void, unknown> {\n try {\n // Ollama doesn't require an API key by default\n let apiKey: string | undefined;\n try {\n apiKey = await resolveApiKey(\n request.config,\n 'OLLAMA_API_KEY',\n 'ollama',\n 'llm'\n );\n } catch {\n // API key is optional for Ollama\n }\n\n const baseUrl = request.config.baseUrl ?? OLLAMA_DEFAULT_URL;\n const url = `${baseUrl}/api/chat`;\n const body = transformRequest(request, modelId);\n body.stream = true;\n\n const headers: Record<string, string> = {\n 'Content-Type': 'application/json',\n };\n\n if (apiKey) {\n headers['Authorization'] = `Bearer ${apiKey}`;\n }\n\n if (request.config.headers) {\n for (const [key, value] of Object.entries(request.config.headers)) {\n if (value !== undefined) {\n headers[key] = value;\n }\n }\n }\n\n const response = await doStreamFetch(\n url,\n {\n method: 'POST',\n headers,\n body: JSON.stringify(body),\n signal: request.signal,\n },\n request.config,\n 'ollama',\n 'llm'\n );\n\n if (!response.ok) {\n const error = await normalizeHttpError(response, 'ollama', 'llm');\n responseReject(error);\n throw error;\n }\n\n if (!response.body) {\n const error = new UPPError(\n 'No response body for streaming request',\n 'PROVIDER_ERROR',\n 'ollama',\n 'llm'\n );\n responseReject(error);\n throw error;\n }\n\n // Parse Ollama's newline-delimited JSON stream\n for await (const chunk of parseOllamaStream(response.body)) {\n // Check for error in chunk\n if ('error' in chunk && typeof (chunk as Record<string, unknown>).error === 'string') {\n const error = new UPPError(\n (chunk as Record<string, unknown>).error as string,\n 'PROVIDER_ERROR',\n 'ollama',\n 'llm'\n );\n responseReject(error);\n throw error;\n }\n\n const events = transformStreamChunk(chunk, state);\n for (const event of events) {\n yield event;\n }\n }\n\n // Build final response\n responseResolve(buildResponseFromState(state));\n } catch (error) {\n responseReject(error as Error);\n throw error;\n }\n }\n\n return {\n [Symbol.asyncIterator]() {\n return generateEvents();\n },\n response: responsePromise,\n };\n },\n };\n\n return model;\n },\n };\n}\n","/**\n * @fileoverview Ollama Embeddings API Handler\n *\n * This module implements the embedding handler for Ollama's local embeddings API.\n * Supports various embedding models including nomic-embed-text, mxbai-embed-large,\n * qwen3-embedding, and others.\n *\n * @see {@link https://github.com/ollama/ollama/blob/main/docs/api.md#embeddings Ollama Embeddings API Reference}\n * @module providers/ollama/embed\n */\n\nimport type {\n EmbeddingHandler,\n BoundEmbeddingModel,\n EmbeddingRequest,\n EmbeddingResponse,\n EmbeddingProvider,\n} from '../../types/provider.ts';\nimport { UPPError } from '../../types/errors.ts';\nimport { doFetch } from '../../http/fetch.ts';\n\n/** Default URL for Ollama's local API */\nconst OLLAMA_DEFAULT_URL = 'http://localhost:11434';\n\n/**\n * Ollama embedding parameters.\n * Passed through to the API.\n */\nexport interface OllamaEmbedParams {\n /** Truncates the end of each input to fit within context length (default: true) */\n truncate?: boolean;\n /** Controls how long the model stays loaded in memory (e.g., '5m', '1h') */\n keep_alive?: string;\n /** Additional model options */\n options?: Record<string, unknown>;\n}\n\n/**\n * Ollama embeddings API response structure.\n */\ninterface OllamaEmbeddingsResponse {\n model: string;\n embeddings: number[][];\n total_duration?: number;\n load_duration?: number;\n prompt_eval_count?: number;\n}\n\n/**\n * Creates an embedding handler for Ollama's local Embeddings API.\n *\n * @returns An embedding handler configured for Ollama\n *\n * @example\n * ```typescript\n * const handler = createEmbeddingHandler();\n * const model = handler.bind('nomic-embed-text');\n *\n * const response = await model.embed({\n * inputs: ['Hello world'],\n * config: { baseUrl: 'http://localhost:11434' }\n * });\n * ```\n */\nexport function createEmbeddingHandler(): EmbeddingHandler<OllamaEmbedParams> {\n let providerRef: EmbeddingProvider<OllamaEmbedParams> | null = null;\n\n return {\n supportedInputs: ['text'],\n\n _setProvider(provider: EmbeddingProvider<OllamaEmbedParams>) {\n providerRef = provider;\n },\n\n bind(modelId: string): BoundEmbeddingModel<OllamaEmbedParams> {\n if (!providerRef) {\n throw new UPPError(\n 'Provider reference not set. Handler must be used with createProvider().',\n 'INVALID_REQUEST',\n 'ollama',\n 'embedding'\n );\n }\n\n const model: BoundEmbeddingModel<OllamaEmbedParams> = {\n modelId,\n maxBatchSize: 512,\n maxInputLength: 8192,\n dimensions: 768, // Varies by model\n\n get provider(): EmbeddingProvider<OllamaEmbedParams> {\n return providerRef!;\n },\n\n async embed(request: EmbeddingRequest<OllamaEmbedParams>): Promise<EmbeddingResponse> {\n const baseUrl = request.config.baseUrl ?? OLLAMA_DEFAULT_URL;\n\n // Transform inputs to strings\n const inputTexts = request.inputs.map((input) => {\n if (typeof input === 'string') {\n return input;\n }\n if ('text' in input) {\n return input.text;\n }\n throw new UPPError(\n 'Ollama embeddings only support text input',\n 'INVALID_REQUEST',\n 'ollama',\n 'embedding'\n );\n });\n\n // Build request body\n const body: Record<string, unknown> = {\n model: modelId,\n input: inputTexts,\n };\n\n // Pass through Ollama-specific params\n if (request.params?.truncate !== undefined) {\n body.truncate = request.params.truncate;\n }\n if (request.params?.keep_alive !== undefined) {\n body.keep_alive = request.params.keep_alive;\n }\n if (request.params?.options !== undefined) {\n body.options = request.params.options;\n }\n\n const url = `${baseUrl}/api/embed`;\n\n const headers: Record<string, string> = {\n 'Content-Type': 'application/json',\n };\n\n // Merge custom headers\n if (request.config.headers) {\n for (const [key, value] of Object.entries(request.config.headers)) {\n if (value !== undefined) {\n headers[key] = value;\n }\n }\n }\n\n const response = await doFetch(url, {\n method: 'POST',\n headers,\n body: JSON.stringify(body),\n signal: request.signal,\n }, request.config, 'ollama', 'embedding');\n\n const data = await response.json() as OllamaEmbeddingsResponse;\n\n // Return EmbeddingResponse\n return {\n embeddings: data.embeddings.map((vec, index) => ({\n vector: vec,\n index,\n })),\n usage: {\n totalTokens: data.prompt_eval_count ?? 0,\n },\n metadata: {\n totalDuration: data.total_duration,\n loadDuration: data.load_duration,\n },\n };\n },\n };\n\n return model;\n },\n };\n}\n","import { createProvider } from '../../core/provider.ts';\nimport { createLLMHandler } from './llm.ts';\nimport { createEmbeddingHandler } from './embed.ts';\n\n/**\n * Ollama provider for local LLM inference.\n *\n * Ollama runs models locally on your machine, eliminating the need for API keys\n * and external network calls. This makes it ideal for development, testing,\n * and privacy-sensitive applications.\n *\n * **Supported Models:**\n * - Llama 3.x (Meta's latest open-weight models)\n * - Mistral / Mixtral (Mistral AI's efficient models)\n * - Gemma (Google's lightweight models)\n * - Qwen (Alibaba's multilingual models)\n * - DeepSeek (DeepSeek's code and reasoning models)\n * - Phi (Microsoft's small language models)\n * - CodeLlama (Code-specialized Llama variants)\n * - And many more from the Ollama model library\n *\n * **Prerequisites:**\n * 1. Install Ollama from https://ollama.ai\n * 2. Pull a model: `ollama pull llama3.2`\n * 3. Ensure Ollama is running (default: http://localhost:11434)\n *\n * **Note on Tool Calling:**\n * For tool/function calling, Ollama recommends using their OpenAI-compatible\n * API endpoint. Use the OpenAI provider with `baseUrl` pointed to Ollama instead.\n *\n * @example Basic usage with local model\n * ```typescript\n * import { llm } from 'provider-protocol';\n * import { ollama } from 'provider-protocol/ollama';\n *\n * const model = llm(ollama('llama3.2'));\n * const result = await model.complete({\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n * ```\n *\n * @example Custom Ollama server URL\n * ```typescript\n * import { llm } from 'provider-protocol';\n * import { ollama } from 'provider-protocol/ollama';\n *\n * const model = llm(ollama('llama3.2'), {\n * baseUrl: 'http://my-ollama-server:11434',\n * });\n * ```\n *\n * @example Streaming responses\n * ```typescript\n * const model = llm(ollama('llama3.2'));\n * const stream = model.stream({\n * messages: [{ role: 'user', content: 'Write a poem' }]\n * });\n *\n * for await (const event of stream) {\n * if (event.type === 'text_delta') {\n * process.stdout.write(event.delta.text);\n * }\n * }\n * ```\n *\n * @example Using model parameters\n * ```typescript\n * const model = llm(ollama('llama3.2'));\n * const result = await model.complete({\n * messages: [{ role: 'user', content: 'Be creative!' }],\n * params: {\n * temperature: 0.9,\n * top_p: 0.95,\n * num_predict: 500\n * }\n * });\n * ```\n *\n * @see {@link OllamaLLMParams} for available model parameters\n */\nexport const ollama = createProvider({\n name: 'ollama',\n version: '1.0.0',\n modalities: {\n llm: createLLMHandler(),\n embedding: createEmbeddingHandler(),\n },\n});\n\nexport type { OllamaLLMParams, OllamaHeaders } from './types.ts';\nexport type { OllamaEmbedParams } from './embed.ts';\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAqEO,SAAS,iBACd,SACA,SACe;AACf,QAAM,SAAU,QAAQ,UAAU,CAAC;AAGnC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,GAAG;AAAA,EACL,IAAI;AAGJ,QAAM,gBAA+B;AAAA,IACnC,OAAO;AAAA,IACP,UAAU,kBAAkB,QAAQ,UAAU,QAAQ,MAAM;AAAA,EAC9D;AAGA,MAAI,eAAe,OAAW,eAAc,aAAa;AACzD,MAAI,UAAU,OAAW,eAAc,QAAQ;AAC/C,MAAI,aAAa,OAAW,eAAc,WAAW;AACrD,MAAI,iBAAiB,OAAW,eAAc,eAAe;AAG7D,MAAI,OAAO,KAAK,aAAa,EAAE,SAAS,GAAG;AACzC,kBAAc,UAAU;AAAA,EAC1B;AAGA,MAAI,QAAQ,SAAS,QAAQ,MAAM,SAAS,GAAG;AAC7C,kBAAc,QAAQ,QAAQ,MAAM,IAAI,aAAa;AAAA,EACvD;AAGA,MAAI,QAAQ,WAAW;AACrB,kBAAc,SAAS,QAAQ;AAAA,EACjC;AAEA,SAAO;AACT;AAMA,SAAS,gBAAgB,QAA4D;AACnF,MAAI,CAAC,OAAQ,QAAO;AACpB,MAAI,OAAO,WAAW,SAAU,QAAO;AACvC,SAAQ,OACL,IAAI,WAAS,MAAM,QAAQ,EAAE,EAC7B,OAAO,UAAQ,KAAK,SAAS,CAAC,EAC9B,KAAK,MAAM;AAChB;AAoBA,SAAS,kBAAkB,UAAqB,QAA8C;AAC5F,QAAM,iBAAkC,CAAC;AACzC,QAAM,mBAAmB,gBAAgB,MAAM;AAG/C,MAAI,kBAAkB;AACpB,mBAAe,KAAK;AAAA,MAClB,MAAM;AAAA,MACN,SAAS;AAAA,IACX,CAAC;AAAA,EACH;AAEA,aAAW,OAAO,UAAU;AAC1B,QAAI,cAAc,GAAG,GAAG;AACtB,YAAM,cAAwB,CAAC;AAC/B,YAAM,SAAmB,CAAC;AAE1B,iBAAW,SAAS,IAAI,SAAS;AAC/B,YAAI,MAAM,SAAS,QAAQ;AACzB,sBAAY,KAAK,MAAM,IAAI;AAAA,QAC7B,WAAW,MAAM,SAAS,SAAS;AACjC,gBAAM,aAAa;AACnB,cAAI,WAAW,OAAO,SAAS,UAAU;AACvC,mBAAO,KAAK,WAAW,OAAO,IAAI;AAAA,UACpC,WAAW,WAAW,OAAO,SAAS,SAAS;AAE7C,kBAAM,SAAS;AAAA,cACb,MAAM,KAAK,WAAW,OAAO,IAAI,EAC9B,IAAI,CAAC,MAAM,OAAO,aAAa,CAAC,CAAC,EACjC,KAAK,EAAE;AAAA,YACZ;AACA,mBAAO,KAAK,MAAM;AAAA,UACpB,WAAW,WAAW,OAAO,SAAS,OAAO;AAG3C,wBAAY,KAAK,WAAW,WAAW,OAAO,GAAG,GAAG;AAAA,UACtD;AAAA,QACF;AAAA,MACF;AAEA,YAAM,UAAyB;AAAA,QAC7B,MAAM;AAAA,QACN,SAAS,YAAY,KAAK,IAAI;AAAA,MAChC;AAEA,UAAI,OAAO,SAAS,GAAG;AACrB,gBAAQ,SAAS;AAAA,MACnB;AAEA,qBAAe,KAAK,OAAO;AAAA,IAC7B,WAAW,mBAAmB,GAAG,GAAG;AAClC,YAAM,cAAc,IAAI,QACrB,OAAO,CAAC,UAA8B,MAAM,SAAS,MAAM,EAC3D,IAAI,CAAC,UAAU,MAAM,IAAI,EACzB,KAAK,IAAI;AAEZ,YAAM,UAAyB;AAAA,QAC7B,MAAM;AAAA,QACN,SAAS;AAAA,MACX;AAGA,UAAI,IAAI,aAAa,IAAI,UAAU,SAAS,GAAG;AAC7C,gBAAQ,aAAa,IAAI,UAAU,IAAI,CAAC,UAAU;AAAA,UAChD,UAAU;AAAA,YACR,MAAM,KAAK;AAAA,YACX,WAAW,KAAK;AAAA,UAClB;AAAA,QACF,EAAE;AAAA,MACJ;AAEA,qBAAe,KAAK,OAAO;AAAA,IAC7B,WAAW,oBAAoB,GAAG,GAAG;AAEnC,iBAAW,UAAU,IAAI,SAAS;AAChC,uBAAe,KAAK;AAAA,UAClB,MAAM;AAAA,UACN,WAAW,OAAO;AAAA;AAAA,UAClB,SACE,OAAO,OAAO,WAAW,WACrB,OAAO,SACP,KAAK,UAAU,OAAO,MAAM;AAAA,QACpC,CAAC;AAAA,MACH;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAWA,SAAS,cAAc,MAAwB;AAC7C,SAAO;AAAA,IACL,MAAM;AAAA,IACN,UAAU;AAAA,MACR,MAAM,KAAK;AAAA,MACX,aAAa,KAAK;AAAA,MAClB,YAAY;AAAA,QACV,MAAM;AAAA,QACN,YAAY,KAAK,WAAW;AAAA,QAC5B,UAAU,KAAK,WAAW;AAAA,MAC5B;AAAA,IACF;AAAA,EACF;AACF;AAkBO,SAAS,kBAAkB,MAAmC;AACnE,QAAM,cAA2B,CAAC;AAClC,QAAM,YAAwB,CAAC;AAC/B,MAAI;AAGJ,MAAI,KAAK,QAAQ,SAAS;AACxB,gBAAY,KAAK,EAAE,MAAM,QAAQ,MAAM,KAAK,QAAQ,QAAQ,CAAC;AAG7D,QAAI;AACF,uBAAiB,KAAK,MAAM,KAAK,QAAQ,OAAO;AAAA,IAClD,QAAQ;AAAA,IAER;AAAA,EACF;AAGA,MAAI,KAAK,QAAQ,YAAY;AAC3B,eAAW,QAAQ,KAAK,QAAQ,YAAY;AAC1C,gBAAU,KAAK;AAAA,QACb,YAAY,KAAK,SAAS;AAAA;AAAA,QAC1B,UAAU,KAAK,SAAS;AAAA,QACxB,WAAW,KAAK,SAAS;AAAA,MAC3B,CAAC;AAAA,IACH;AAAA,EACF;AAEA,QAAM,UAAU,IAAI;AAAA,IAClB;AAAA,IACA,UAAU,SAAS,IAAI,YAAY;AAAA,IACnC;AAAA,MACE,UAAU;AAAA,QACR,QAAQ;AAAA,UACN,OAAO,KAAK;AAAA,UACZ,YAAY,KAAK;AAAA,UACjB,aAAa,KAAK;AAAA,UAClB,UAAU,KAAK,QAAQ;AAAA,UACvB,gBAAgB,KAAK;AAAA,UACrB,eAAe,KAAK;AAAA,UACpB,sBAAsB,KAAK;AAAA,UAC3B,eAAe,KAAK;AAAA,UACpB,UAAU,KAAK;AAAA,QACjB;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAGA,QAAM,QAAoB;AAAA,IACxB,aAAa,KAAK,qBAAqB;AAAA,IACvC,cAAc,KAAK,cAAc;AAAA,IACjC,cAAc,KAAK,qBAAqB,MAAM,KAAK,cAAc;AAAA,IACjE,iBAAiB;AAAA,IACjB,kBAAkB;AAAA,EACpB;AAGA,MAAI,aAAa;AACjB,MAAI,KAAK,gBAAgB,UAAU;AACjC,iBAAa;AAAA,EACf,WAAW,KAAK,gBAAgB,QAAQ;AACtC,iBAAa;AAAA,EACf,WAAW,UAAU,SAAS,GAAG;AAC/B,iBAAa;AAAA,EACf;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA,MAAM;AAAA,EACR;AACF;AAqCO,SAAS,oBAAiC;AAC/C,SAAO;AAAA,IACL,OAAO;AAAA,IACP,SAAS;AAAA,IACT,UAAU;AAAA,IACV,WAAW,CAAC;AAAA,IACZ,YAAY;AAAA,IACZ,iBAAiB;AAAA,IACjB,WAAW;AAAA,IACX,eAAe;AAAA,IACf,cAAc;AAAA,IACd,WAAW;AAAA,EACb;AACF;AAmBO,SAAS,qBACd,OACA,OACe;AACf,QAAM,SAAwB,CAAC;AAG/B,MAAI,MAAM,cAAc;AACtB,UAAM,QAAQ,MAAM;AACpB,UAAM,YAAY,MAAM;AACxB,WAAO,KAAK,EAAE,MAAM,iBAAiB,OAAO,GAAG,OAAO,CAAC,EAAE,CAAC;AAC1D,UAAM,eAAe;AAAA,EACvB;AAGA,MAAI,MAAM,SAAS;AAEjB,QAAI,MAAM,QAAQ,SAAS;AACzB,YAAM,WAAW,MAAM,QAAQ;AAC/B,aAAO,KAAK;AAAA,QACV,MAAM;AAAA,QACN,OAAO;AAAA,QACP,OAAO,EAAE,MAAM,MAAM,QAAQ,QAAQ;AAAA,MACvC,CAAC;AAAA,IACH;AAGA,QAAI,MAAM,QAAQ,UAAU;AAC1B,YAAM,YAAY,MAAM,QAAQ;AAChC,aAAO,KAAK;AAAA,QACV,MAAM;AAAA,QACN,OAAO;AAAA,QACP,OAAO,EAAE,MAAM,MAAM,QAAQ,SAAS;AAAA,MACxC,CAAC;AAAA,IACH;AAGA,QAAI,MAAM,QAAQ,YAAY;AAC5B,iBAAW,QAAQ,MAAM,QAAQ,YAAY;AAC3C,cAAM,UAAU,KAAK;AAAA,UACnB,MAAM,KAAK,SAAS;AAAA,UACpB,MAAM,KAAK,SAAS;AAAA,QACtB,CAAC;AACD,eAAO,KAAK;AAAA,UACV,MAAM;AAAA,UACN,OAAO,MAAM,UAAU,SAAS;AAAA,UAChC,OAAO;AAAA,YACL,YAAY,KAAK,SAAS;AAAA,YAC1B,UAAU,KAAK,SAAS;AAAA,YACxB,eAAe,KAAK,UAAU,KAAK,SAAS,SAAS;AAAA,UACvD;AAAA,QACF,CAAC;AAAA,MACH;AAAA,IACF;AAAA,EACF;AAGA,MAAI,MAAM,MAAM;AACd,UAAM,aAAa,MAAM,eAAe;AACxC,UAAM,kBAAkB,MAAM,qBAAqB;AACnD,UAAM,YAAY,MAAM,cAAc;AACtC,UAAM,gBAAgB,MAAM,kBAAkB;AAC9C,WAAO,KAAK,EAAE,MAAM,gBAAgB,OAAO,GAAG,OAAO,CAAC,EAAE,CAAC;AAAA,EAC3D;AAEA,SAAO;AACT;AAcO,SAAS,uBAAuB,OAAiC;AACtE,QAAM,cAA2B,CAAC;AAClC,QAAM,YAAwB,CAAC;AAC/B,MAAI;AAEJ,MAAI,MAAM,SAAS;AACjB,gBAAY,KAAK,EAAE,MAAM,QAAQ,MAAM,MAAM,QAAQ,CAAC;AAGtD,QAAI;AACF,uBAAiB,KAAK,MAAM,MAAM,OAAO;AAAA,IAC3C,QAAQ;AAAA,IAER;AAAA,EACF;AAEA,aAAW,MAAM,MAAM,WAAW;AAChC,cAAU,KAAK;AAAA,MACb,YAAY,GAAG;AAAA,MACf,UAAU,GAAG;AAAA,MACb,WAAW,GAAG;AAAA,IAChB,CAAC;AAAA,EACH;AAEA,QAAM,UAAU,IAAI;AAAA,IAClB;AAAA,IACA,UAAU,SAAS,IAAI,YAAY;AAAA,IACnC;AAAA,MACE,UAAU;AAAA,QACR,QAAQ;AAAA,UACN,OAAO,MAAM;AAAA,UACb,YAAY,MAAM;AAAA,UAClB,aAAa,MAAM;AAAA,UACnB,UAAU,MAAM,YAAY;AAAA,UAC5B,gBAAgB,MAAM;AAAA,QACxB;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAGA,QAAM,QAAoB;AAAA,IACxB,aAAa,MAAM;AAAA,IACnB,cAAc,MAAM;AAAA,IACpB,aAAa,MAAM,kBAAkB,MAAM;AAAA,IAC3C,iBAAiB;AAAA,IACjB,kBAAkB;AAAA,EACpB;AAGA,MAAI,aAAa;AACjB,MAAI,MAAM,eAAe,UAAU;AACjC,iBAAa;AAAA,EACf,WAAW,UAAU,SAAS,GAAG;AAC/B,iBAAa;AAAA,EACf;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA,MAAM;AAAA,EACR;AACF;;;AC9gBA,IAAM,qBAAqB;AAU3B,IAAM,sBAAuC;AAAA,EAC3C,WAAW;AAAA,EACX,OAAO;AAAA,EACP,kBAAkB;AAAA,EAClB,YAAY;AAAA,EACZ,YAAY;AAAA,EACZ,YAAY;AACd;AAYA,gBAAgB,kBACd,MACkD;AAClD,QAAM,SAAS,KAAK,UAAU;AAC9B,QAAM,UAAU,IAAI,YAAY;AAChC,MAAI,SAAS;AAEb,MAAI;AACF,WAAO,MAAM;AACX,YAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAC1C,UAAI,KAAM;AAEV,gBAAU,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AAGhD,YAAM,QAAQ,OAAO,MAAM,IAAI;AAC/B,eAAS,MAAM,IAAI,KAAK;AAExB,iBAAW,QAAQ,OAAO;AACxB,cAAM,UAAU,KAAK,KAAK;AAC1B,YAAI,CAAC,QAAS;AAEd,YAAI;AACF,gBAAM,QAAQ,KAAK,MAAM,OAAO;AAChC,gBAAM;AAAA,QACR,QAAQ;AAAA,QAER;AAAA,MACF;AAAA,IACF;AAGA,QAAI,OAAO,KAAK,GAAG;AACjB,UAAI;AACF,cAAM,QAAQ,KAAK,MAAM,OAAO,KAAK,CAAC;AACtC,cAAM;AAAA,MACR,QAAQ;AAAA,MAER;AAAA,IACF;AAAA,EACF,UAAE;AACA,WAAO,YAAY;AAAA,EACrB;AACF;AAwBO,SAAS,mBAAgD;AAC9D,MAAI,cAAmD;AAEvD,SAAO;AAAA,IACL,aAAa,UAAwC;AACnD,oBAAc;AAAA,IAChB;AAAA,IAEA,KAAK,SAAiD;AAEpD,UAAI,CAAC,aAAa;AAChB,cAAM,IAAI;AAAA,UACR;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,QACF;AAAA,MACF;AAEA,YAAM,QAAwC;AAAA,QAC5C;AAAA,QACA,cAAc;AAAA,QAEd,IAAI,WAAyC;AAC3C,iBAAO;AAAA,QACT;AAAA,QAEA,MAAM,SAAS,SAA4D;AAEzE,cAAI;AACJ,cAAI;AACF,qBAAS,MAAM;AAAA,cACb,QAAQ;AAAA,cACR;AAAA,cACA;AAAA,cACA;AAAA,YACF;AAAA,UACF,QAAQ;AAAA,UAER;AAEA,gBAAM,UAAU,QAAQ,OAAO,WAAW;AAC1C,gBAAM,MAAM,GAAG,OAAO;AACtB,gBAAM,OAAO,iBAAiB,SAAS,OAAO;AAC9C,eAAK,SAAS;AAEd,gBAAM,UAAkC;AAAA,YACtC,gBAAgB;AAAA,UAClB;AAEA,cAAI,QAAQ;AACV,oBAAQ,eAAe,IAAI,UAAU,MAAM;AAAA,UAC7C;AAEA,cAAI,QAAQ,OAAO,SAAS;AAC1B,uBAAW,CAAC,KAAK,KAAK,KAAK,OAAO,QAAQ,QAAQ,OAAO,OAAO,GAAG;AACjE,kBAAI,UAAU,QAAW;AACvB,wBAAQ,GAAG,IAAI;AAAA,cACjB;AAAA,YACF;AAAA,UACF;AAEA,gBAAM,WAAW,MAAM;AAAA,YACrB;AAAA,YACA;AAAA,cACE,QAAQ;AAAA,cACR;AAAA,cACA,MAAM,KAAK,UAAU,IAAI;AAAA,cACzB,QAAQ,QAAQ;AAAA,YAClB;AAAA,YACA,QAAQ;AAAA,YACR;AAAA,YACA;AAAA,UACF;AAEA,gBAAM,OAAQ,MAAM,SAAS,KAAK;AAClC,iBAAO,kBAAkB,IAAI;AAAA,QAC/B;AAAA,QAEA,OAAO,SAAuD;AAC5D,gBAAM,QAAQ,kBAAkB;AAChC,cAAI;AACJ,cAAI;AAEJ,gBAAM,kBAAkB,IAAI,QAAqB,CAAC,SAAS,WAAW;AACpE,8BAAkB;AAClB,6BAAiB;AAAA,UACnB,CAAC;AAED,0BAAgB,iBAA6D;AAC3E,gBAAI;AAEF,kBAAI;AACJ,kBAAI;AACF,yBAAS,MAAM;AAAA,kBACb,QAAQ;AAAA,kBACR;AAAA,kBACA;AAAA,kBACA;AAAA,gBACF;AAAA,cACF,QAAQ;AAAA,cAER;AAEA,oBAAM,UAAU,QAAQ,OAAO,WAAW;AAC1C,oBAAM,MAAM,GAAG,OAAO;AACtB,oBAAM,OAAO,iBAAiB,SAAS,OAAO;AAC9C,mBAAK,SAAS;AAEd,oBAAM,UAAkC;AAAA,gBACtC,gBAAgB;AAAA,cAClB;AAEA,kBAAI,QAAQ;AACV,wBAAQ,eAAe,IAAI,UAAU,MAAM;AAAA,cAC7C;AAEA,kBAAI,QAAQ,OAAO,SAAS;AAC1B,2BAAW,CAAC,KAAK,KAAK,KAAK,OAAO,QAAQ,QAAQ,OAAO,OAAO,GAAG;AACjE,sBAAI,UAAU,QAAW;AACvB,4BAAQ,GAAG,IAAI;AAAA,kBACjB;AAAA,gBACF;AAAA,cACF;AAEA,oBAAM,WAAW,MAAM;AAAA,gBACrB;AAAA,gBACA;AAAA,kBACE,QAAQ;AAAA,kBACR;AAAA,kBACA,MAAM,KAAK,UAAU,IAAI;AAAA,kBACzB,QAAQ,QAAQ;AAAA,gBAClB;AAAA,gBACA,QAAQ;AAAA,gBACR;AAAA,gBACA;AAAA,cACF;AAEA,kBAAI,CAAC,SAAS,IAAI;AAChB,sBAAM,QAAQ,MAAM,mBAAmB,UAAU,UAAU,KAAK;AAChE,+BAAe,KAAK;AACpB,sBAAM;AAAA,cACR;AAEA,kBAAI,CAAC,SAAS,MAAM;AAClB,sBAAM,QAAQ,IAAI;AAAA,kBAChB;AAAA,kBACA;AAAA,kBACA;AAAA,kBACA;AAAA,gBACF;AACA,+BAAe,KAAK;AACpB,sBAAM;AAAA,cACR;AAGA,+BAAiB,SAAS,kBAAkB,SAAS,IAAI,GAAG;AAE1D,oBAAI,WAAW,SAAS,OAAQ,MAAkC,UAAU,UAAU;AACpF,wBAAM,QAAQ,IAAI;AAAA,oBACf,MAAkC;AAAA,oBACnC;AAAA,oBACA;AAAA,oBACA;AAAA,kBACF;AACA,iCAAe,KAAK;AACpB,wBAAM;AAAA,gBACR;AAEA,sBAAM,SAAS,qBAAqB,OAAO,KAAK;AAChD,2BAAW,SAAS,QAAQ;AAC1B,wBAAM;AAAA,gBACR;AAAA,cACF;AAGA,8BAAgB,uBAAuB,KAAK,CAAC;AAAA,YAC/C,SAAS,OAAO;AACd,6BAAe,KAAc;AAC7B,oBAAM;AAAA,YACR;AAAA,UACF;AAEA,iBAAO;AAAA,YACL,CAAC,OAAO,aAAa,IAAI;AACvB,qBAAO,eAAe;AAAA,YACxB;AAAA,YACA,UAAU;AAAA,UACZ;AAAA,QACF;AAAA,MACF;AAEA,aAAO;AAAA,IACT;AAAA,EACF;AACF;;;AC/SA,IAAMA,sBAAqB;AA0CpB,SAAS,yBAA8D;AAC5E,MAAI,cAA2D;AAE/D,SAAO;AAAA,IACL,iBAAiB,CAAC,MAAM;AAAA,IAExB,aAAa,UAAgD;AAC3D,oBAAc;AAAA,IAChB;AAAA,IAEA,KAAK,SAAyD;AAC5D,UAAI,CAAC,aAAa;AAChB,cAAM,IAAI;AAAA,UACR;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,QACF;AAAA,MACF;AAEA,YAAM,QAAgD;AAAA,QACpD;AAAA,QACA,cAAc;AAAA,QACd,gBAAgB;AAAA,QAChB,YAAY;AAAA;AAAA,QAEZ,IAAI,WAAiD;AACnD,iBAAO;AAAA,QACT;AAAA,QAEA,MAAM,MAAM,SAA0E;AACpF,gBAAM,UAAU,QAAQ,OAAO,WAAWA;AAG1C,gBAAM,aAAa,QAAQ,OAAO,IAAI,CAAC,UAAU;AAC/C,gBAAI,OAAO,UAAU,UAAU;AAC7B,qBAAO;AAAA,YACT;AACA,gBAAI,UAAU,OAAO;AACnB,qBAAO,MAAM;AAAA,YACf;AACA,kBAAM,IAAI;AAAA,cACR;AAAA,cACA;AAAA,cACA;AAAA,cACA;AAAA,YACF;AAAA,UACF,CAAC;AAGD,gBAAM,OAAgC;AAAA,YACpC,OAAO;AAAA,YACP,OAAO;AAAA,UACT;AAGA,cAAI,QAAQ,QAAQ,aAAa,QAAW;AAC1C,iBAAK,WAAW,QAAQ,OAAO;AAAA,UACjC;AACA,cAAI,QAAQ,QAAQ,eAAe,QAAW;AAC5C,iBAAK,aAAa,QAAQ,OAAO;AAAA,UACnC;AACA,cAAI,QAAQ,QAAQ,YAAY,QAAW;AACzC,iBAAK,UAAU,QAAQ,OAAO;AAAA,UAChC;AAEA,gBAAM,MAAM,GAAG,OAAO;AAEtB,gBAAM,UAAkC;AAAA,YACtC,gBAAgB;AAAA,UAClB;AAGA,cAAI,QAAQ,OAAO,SAAS;AAC1B,uBAAW,CAAC,KAAK,KAAK,KAAK,OAAO,QAAQ,QAAQ,OAAO,OAAO,GAAG;AACjE,kBAAI,UAAU,QAAW;AACvB,wBAAQ,GAAG,IAAI;AAAA,cACjB;AAAA,YACF;AAAA,UACF;AAEA,gBAAM,WAAW,MAAM,QAAQ,KAAK;AAAA,YAClC,QAAQ;AAAA,YACR;AAAA,YACA,MAAM,KAAK,UAAU,IAAI;AAAA,YACzB,QAAQ,QAAQ;AAAA,UAClB,GAAG,QAAQ,QAAQ,UAAU,WAAW;AAExC,gBAAM,OAAO,MAAM,SAAS,KAAK;AAGjC,iBAAO;AAAA,YACL,YAAY,KAAK,WAAW,IAAI,CAAC,KAAK,WAAW;AAAA,cAC/C,QAAQ;AAAA,cACR;AAAA,YACF,EAAE;AAAA,YACF,OAAO;AAAA,cACL,aAAa,KAAK,qBAAqB;AAAA,YACzC;AAAA,YACA,UAAU;AAAA,cACR,eAAe,KAAK;AAAA,cACpB,cAAc,KAAK;AAAA,YACrB;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAEA,aAAO;AAAA,IACT;AAAA,EACF;AACF;;;AC9FO,IAAM,SAAS,eAAe;AAAA,EACnC,MAAM;AAAA,EACN,SAAS;AAAA,EACT,YAAY;AAAA,IACV,KAAK,iBAAiB;AAAA,IACtB,WAAW,uBAAuB;AAAA,EACpC;AACF,CAAC;","names":["OLLAMA_DEFAULT_URL"]}
|
|
1
|
+
{"version":3,"sources":["../../src/providers/ollama/transform.ts","../../src/providers/ollama/llm.ts","../../src/providers/ollama/embed.ts","../../src/providers/ollama/index.ts"],"sourcesContent":["/**\n * @fileoverview Transformation utilities for Ollama provider.\n *\n * This module handles bidirectional transformation between the Unified Provider\n * Protocol (UPP) format and Ollama's native API format. It includes:\n *\n * - Request transformation (UPP to Ollama)\n * - Response transformation (Ollama to UPP)\n * - Stream chunk processing\n * - Message format conversion\n *\n * @module providers/ollama/transform\n */\n\nimport type { LLMRequest, LLMResponse } from '../../types/llm.ts';\nimport type { Message } from '../../types/messages.ts';\nimport type { StreamEvent } from '../../types/stream.ts';\nimport type { Tool, ToolCall } from '../../types/tool.ts';\nimport type { TokenUsage } from '../../types/turn.ts';\nimport type { ContentBlock, TextBlock, ImageBlock } from '../../types/content.ts';\nimport {\n AssistantMessage,\n isUserMessage,\n isAssistantMessage,\n isToolResultMessage,\n} from '../../types/messages.ts';\nimport type {\n OllamaLLMParams,\n OllamaRequest,\n OllamaMessage,\n OllamaTool,\n OllamaResponse,\n OllamaStreamChunk,\n OllamaToolCall,\n OllamaOptions,\n} from './types.ts';\n\n/**\n * Transforms a UPP LLM request into Ollama's native API format.\n *\n * This function handles the mapping between UPP's unified request structure\n * and Ollama's specific requirements, including:\n *\n * - Converting messages to Ollama's message format\n * - Mapping model parameters to Ollama's nested `options` structure\n * - Handling top-level parameters like `keep_alive` and `think`\n * - Converting tools to Ollama's function format\n * - Setting up structured output via the `format` field\n *\n * Parameters are spread to allow pass-through of any Ollama API fields,\n * enabling developers to use new API features without library updates.\n *\n * @typeParam TParams - The parameter type extending OllamaLLMParams\n * @param request - The UPP-format LLM request\n * @param modelId - The Ollama model identifier (e.g., 'llama3.2', 'mistral')\n * @returns The transformed Ollama API request body\n *\n * @example\n * ```typescript\n * const ollamaRequest = transformRequest(\n * {\n * messages: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }],\n * config: {},\n * params: { temperature: 0.7 }\n * },\n * 'llama3.2'\n * );\n * ```\n */\nexport function transformRequest<TParams extends OllamaLLMParams>(\n request: LLMRequest<TParams>,\n modelId: string\n): OllamaRequest {\n const params = (request.params ?? {}) as OllamaLLMParams;\n\n // Extract top-level params vs options params\n const {\n keep_alive,\n think,\n logprobs,\n top_logprobs,\n ...optionsParams\n } = params;\n\n // Spread params to pass through all fields, then set required fields\n const ollamaRequest: OllamaRequest = {\n model: modelId,\n messages: transformMessages(request.messages, request.system),\n };\n\n // Add top-level params if provided\n if (keep_alive !== undefined) ollamaRequest.keep_alive = keep_alive;\n if (think !== undefined) ollamaRequest.think = think;\n if (logprobs !== undefined) ollamaRequest.logprobs = logprobs;\n if (top_logprobs !== undefined) ollamaRequest.top_logprobs = top_logprobs;\n\n // Spread remaining params into options to pass through all model parameters\n if (Object.keys(optionsParams).length > 0) {\n ollamaRequest.options = optionsParams as OllamaOptions;\n }\n\n // Tools come from request, not params\n if (request.tools && request.tools.length > 0) {\n ollamaRequest.tools = request.tools.map(transformTool);\n }\n\n // Structured output via format field\n if (request.structure) {\n ollamaRequest.format = request.structure as unknown as Record<string, unknown>;\n }\n\n return ollamaRequest;\n}\n\n/**\n * Normalizes system prompt to string.\n * Converts array format to concatenated string for providers that only support strings.\n */\nfunction normalizeSystem(system: string | unknown[] | undefined): string | undefined {\n if (!system) return undefined;\n if (typeof system === 'string') return system;\n return (system as Array<{text?: string}>)\n .map(block => block.text ?? '')\n .filter(text => text.length > 0)\n .join('\\n\\n');\n}\n\n/**\n * Transforms UPP messages to Ollama's message format.\n *\n * Handles conversion of:\n * - User messages with text and image content\n * - Assistant messages with text and tool calls\n * - Tool result messages\n * - System prompts (prepended as first message)\n *\n * Image handling:\n * - Base64 images are passed directly\n * - Byte arrays are converted to base64\n * - URL images are converted to text placeholders (Ollama limitation)\n *\n * @param messages - Array of UPP messages to transform\n * @param system - Optional system prompt (string or array, normalized to string)\n * @returns Array of Ollama-formatted messages\n */\nfunction transformMessages(messages: Message[], system?: string | unknown[]): OllamaMessage[] {\n const ollamaMessages: OllamaMessage[] = [];\n const normalizedSystem = normalizeSystem(system);\n\n // System prompt as first message\n if (normalizedSystem) {\n ollamaMessages.push({\n role: 'system',\n content: normalizedSystem,\n });\n }\n\n for (const msg of messages) {\n if (isUserMessage(msg)) {\n const textContent: string[] = [];\n const images: string[] = [];\n\n for (const block of msg.content) {\n if (block.type === 'text') {\n textContent.push(block.text);\n } else if (block.type === 'image') {\n const imageBlock = block as ImageBlock;\n if (imageBlock.source.type === 'base64') {\n images.push(imageBlock.source.data);\n } else if (imageBlock.source.type === 'bytes') {\n // Convert bytes to base64\n const base64 = btoa(\n Array.from(imageBlock.source.data)\n .map((b) => String.fromCharCode(b))\n .join('')\n );\n images.push(base64);\n } else if (imageBlock.source.type === 'url') {\n // Ollama doesn't support URL images directly\n // Would need to fetch and convert, for now just add as text\n textContent.push(`[Image: ${imageBlock.source.url}]`);\n }\n }\n }\n\n const message: OllamaMessage = {\n role: 'user',\n content: textContent.join('\\n'),\n };\n\n if (images.length > 0) {\n message.images = images;\n }\n\n ollamaMessages.push(message);\n } else if (isAssistantMessage(msg)) {\n const textContent = msg.content\n .filter((block): block is TextBlock => block.type === 'text')\n .map((block) => block.text)\n .join('\\n');\n\n const message: OllamaMessage = {\n role: 'assistant',\n content: textContent,\n };\n\n // Add tool calls if present\n if (msg.toolCalls && msg.toolCalls.length > 0) {\n message.tool_calls = msg.toolCalls.map((call) => ({\n function: {\n name: call.toolName,\n arguments: call.arguments,\n },\n }));\n }\n\n ollamaMessages.push(message);\n } else if (isToolResultMessage(msg)) {\n // Tool results are sent as 'tool' role messages\n for (const result of msg.results) {\n ollamaMessages.push({\n role: 'tool',\n tool_name: result.toolCallId, // In our UPP, toolCallId maps to tool name for Ollama\n content:\n typeof result.result === 'string'\n ? result.result\n : JSON.stringify(result.result),\n });\n }\n }\n }\n\n return ollamaMessages;\n}\n\n/**\n * Transforms a UPP tool definition to Ollama's function format.\n *\n * Ollama uses the OpenAI-style function calling format with a\n * `type: 'function'` wrapper around the function definition.\n *\n * @param tool - The UPP tool definition\n * @returns The Ollama-formatted tool definition\n */\nfunction transformTool(tool: Tool): OllamaTool {\n return {\n type: 'function',\n function: {\n name: tool.name,\n description: tool.description,\n parameters: {\n type: 'object',\n properties: tool.parameters.properties,\n required: tool.parameters.required,\n },\n },\n };\n}\n\n/**\n * Transforms an Ollama API response to the UPP LLMResponse format.\n *\n * This function extracts and normalizes:\n * - Text content from the assistant message\n * - Tool calls with their arguments\n * - Token usage statistics (prompt + completion tokens)\n * - Stop reason mapping (stop -> end_turn, length -> max_tokens)\n * - Ollama-specific metadata (timings, model info, thinking content)\n *\n * For structured output requests, the response content is automatically\n * parsed as JSON and stored in the `data` field.\n *\n * @param data - The raw Ollama API response\n * @returns The normalized UPP LLM response\n */\nexport function transformResponse(data: OllamaResponse): LLMResponse {\n const textContent: TextBlock[] = [];\n const toolCalls: ToolCall[] = [];\n let structuredData: unknown;\n\n // Add main content\n if (data.message.content) {\n textContent.push({ type: 'text', text: data.message.content });\n\n // Try to parse as JSON for structured output\n try {\n structuredData = JSON.parse(data.message.content);\n } catch {\n // Not valid JSON - that's fine, might not be structured output\n }\n }\n\n // Extract tool calls\n if (data.message.tool_calls) {\n for (const call of data.message.tool_calls) {\n toolCalls.push({\n toolCallId: call.function.name, // Ollama doesn't have separate IDs, use name\n toolName: call.function.name,\n arguments: call.function.arguments,\n });\n }\n }\n\n const message = new AssistantMessage(\n textContent,\n toolCalls.length > 0 ? toolCalls : undefined,\n {\n metadata: {\n ollama: {\n model: data.model,\n created_at: data.created_at,\n done_reason: data.done_reason,\n thinking: data.message.thinking,\n total_duration: data.total_duration,\n load_duration: data.load_duration,\n prompt_eval_duration: data.prompt_eval_duration,\n eval_duration: data.eval_duration,\n logprobs: data.logprobs,\n },\n },\n }\n );\n\n // Calculate token usage (Ollama doesn't support API-level prompt caching)\n const usage: TokenUsage = {\n inputTokens: data.prompt_eval_count ?? 0,\n outputTokens: data.eval_count ?? 0,\n totalTokens: (data.prompt_eval_count ?? 0) + (data.eval_count ?? 0),\n cacheReadTokens: 0,\n cacheWriteTokens: 0,\n };\n\n // Map done_reason to standard stop reason\n let stopReason = 'end_turn';\n if (data.done_reason === 'length') {\n stopReason = 'max_tokens';\n } else if (data.done_reason === 'stop') {\n stopReason = 'end_turn';\n } else if (toolCalls.length > 0) {\n stopReason = 'tool_use';\n }\n\n return {\n message,\n usage,\n stopReason,\n data: structuredData,\n };\n}\n\n/**\n * Mutable state object for accumulating data during stream processing.\n *\n * As streaming chunks arrive, this state object accumulates content,\n * tool calls, and metadata. Once the stream completes (indicated by\n * `done: true`), this state is used to build the final LLMResponse.\n */\nexport interface StreamState {\n /** The model name from the stream. */\n model: string;\n /** Accumulated text content from all chunks. */\n content: string;\n /** Accumulated thinking/reasoning content (for models with think mode). */\n thinking: string;\n /** Tool calls extracted from the stream. */\n toolCalls: Array<{ name: string; args: Record<string, unknown> }>;\n /** The reason the generation stopped (stop, length, etc.). */\n doneReason: string | null;\n /** Number of tokens in the prompt evaluation. */\n promptEvalCount: number;\n /** Number of tokens generated in the response. */\n evalCount: number;\n /** Total generation duration in nanoseconds. */\n totalDuration: number;\n /** Whether we're still waiting for the first chunk. */\n isFirstChunk: boolean;\n /** ISO timestamp when the response was created. */\n createdAt: string;\n}\n\n/**\n * Creates an initial empty stream state for accumulating streaming responses.\n *\n * @returns A fresh StreamState object with default values\n */\nexport function createStreamState(): StreamState {\n return {\n model: '',\n content: '',\n thinking: '',\n toolCalls: [],\n doneReason: null,\n promptEvalCount: 0,\n evalCount: 0,\n totalDuration: 0,\n isFirstChunk: true,\n createdAt: '',\n };\n}\n\n/**\n * Transforms an Ollama stream chunk into UPP StreamEvents.\n *\n * Each Ollama chunk may produce zero or more UPP events:\n * - First chunk: `message_start` event\n * - Content chunks: `text_delta` events\n * - Thinking chunks: `reasoning_delta` events\n * - Tool call chunks: `tool_call_delta` events\n * - Final chunk (done=true): `message_stop` event\n *\n * The function also updates the provided state object with accumulated\n * content and metadata for building the final response.\n *\n * @param chunk - The raw Ollama stream chunk\n * @param state - Mutable state object to accumulate data\n * @returns Array of UPP stream events (may be empty)\n */\nexport function transformStreamChunk(\n chunk: OllamaStreamChunk,\n state: StreamState\n): StreamEvent[] {\n const events: StreamEvent[] = [];\n\n // First chunk - emit message start\n if (state.isFirstChunk) {\n state.model = chunk.model;\n state.createdAt = chunk.created_at;\n events.push({ type: 'message_start', index: 0, delta: {} });\n state.isFirstChunk = false;\n }\n\n // Process message content\n if (chunk.message) {\n // Text content delta\n if (chunk.message.content) {\n state.content += chunk.message.content;\n events.push({\n type: 'text_delta',\n index: 0,\n delta: { text: chunk.message.content },\n });\n }\n\n // Thinking content delta\n if (chunk.message.thinking) {\n state.thinking += chunk.message.thinking;\n events.push({\n type: 'reasoning_delta',\n index: 0,\n delta: { text: chunk.message.thinking },\n });\n }\n\n // Tool calls (typically come in final chunk)\n if (chunk.message.tool_calls) {\n for (const call of chunk.message.tool_calls) {\n state.toolCalls.push({\n name: call.function.name,\n args: call.function.arguments,\n });\n events.push({\n type: 'tool_call_delta',\n index: state.toolCalls.length - 1,\n delta: {\n toolCallId: call.function.name,\n toolName: call.function.name,\n argumentsJson: JSON.stringify(call.function.arguments),\n },\n });\n }\n }\n }\n\n // Final chunk with metrics\n if (chunk.done) {\n state.doneReason = chunk.done_reason ?? null;\n state.promptEvalCount = chunk.prompt_eval_count ?? 0;\n state.evalCount = chunk.eval_count ?? 0;\n state.totalDuration = chunk.total_duration ?? 0;\n events.push({ type: 'message_stop', index: 0, delta: {} });\n }\n\n return events;\n}\n\n/**\n * Builds a complete LLMResponse from accumulated stream state.\n *\n * Called after the stream completes to construct the final response object\n * with all accumulated content, tool calls, usage statistics, and metadata.\n *\n * For structured output, attempts to parse the accumulated content as JSON\n * and stores it in the `data` field if successful.\n *\n * @param state - The accumulated stream state\n * @returns The complete UPP LLM response\n */\nexport function buildResponseFromState(state: StreamState): LLMResponse {\n const textContent: TextBlock[] = [];\n const toolCalls: ToolCall[] = [];\n let structuredData: unknown;\n\n if (state.content) {\n textContent.push({ type: 'text', text: state.content });\n\n // Try to parse as JSON for structured output\n try {\n structuredData = JSON.parse(state.content);\n } catch {\n // Not valid JSON - that's fine\n }\n }\n\n for (const tc of state.toolCalls) {\n toolCalls.push({\n toolCallId: tc.name,\n toolName: tc.name,\n arguments: tc.args,\n });\n }\n\n const message = new AssistantMessage(\n textContent,\n toolCalls.length > 0 ? toolCalls : undefined,\n {\n metadata: {\n ollama: {\n model: state.model,\n created_at: state.createdAt,\n done_reason: state.doneReason,\n thinking: state.thinking || undefined,\n total_duration: state.totalDuration,\n },\n },\n }\n );\n\n // Ollama doesn't support API-level prompt caching\n const usage: TokenUsage = {\n inputTokens: state.promptEvalCount,\n outputTokens: state.evalCount,\n totalTokens: state.promptEvalCount + state.evalCount,\n cacheReadTokens: 0,\n cacheWriteTokens: 0,\n };\n\n // Map done_reason to standard stop reason\n let stopReason = 'end_turn';\n if (state.doneReason === 'length') {\n stopReason = 'max_tokens';\n } else if (toolCalls.length > 0) {\n stopReason = 'tool_use';\n }\n\n return {\n message,\n usage,\n stopReason,\n data: structuredData,\n };\n}\n","/**\n * @fileoverview Ollama LLM handler implementation.\n *\n * This module provides the core LLM functionality for the Ollama provider,\n * including both synchronous completion and streaming capabilities. It\n * communicates with Ollama's native `/api/chat` endpoint.\n *\n * @module providers/ollama/llm\n */\n\nimport type {\n LLMHandler,\n BoundLLMModel,\n LLMRequest,\n LLMResponse,\n LLMStreamResult,\n LLMCapabilities,\n} from '../../types/llm.ts';\nimport type { StreamEvent } from '../../types/stream.ts';\nimport type { LLMProvider } from '../../types/provider.ts';\nimport { UPPError } from '../../types/errors.ts';\nimport { resolveApiKey } from '../../http/keys.ts';\nimport { doFetch, doStreamFetch } from '../../http/fetch.ts';\nimport { normalizeHttpError } from '../../http/errors.ts';\nimport type { OllamaLLMParams, OllamaResponse, OllamaStreamChunk } from './types.ts';\nimport {\n transformRequest,\n transformResponse,\n transformStreamChunk,\n createStreamState,\n buildResponseFromState,\n} from './transform.ts';\n\n/** Default Ollama server URL for local installations. */\nconst OLLAMA_DEFAULT_URL = 'http://localhost:11434';\n\n/**\n * Capability flags for the Ollama provider.\n *\n * **Important:** Tool calling is intentionally disabled. Ollama recommends\n * using their OpenAI-compatible API (`/v1/chat/completions`) for function\n * calling. To use tools with Ollama, configure the OpenAI provider with\n * `baseUrl` pointed to your Ollama instance.\n */\nconst OLLAMA_CAPABILITIES: LLMCapabilities = {\n streaming: true,\n tools: false,\n structuredOutput: true,\n imageInput: true,\n videoInput: false,\n audioInput: false,\n};\n\n/**\n * Parses Ollama's newline-delimited JSON (NDJSON) stream format.\n *\n * Ollama uses NDJSON where each line is a complete JSON object representing\n * a streaming chunk. This generator reads the stream incrementally, buffering\n * incomplete lines and yielding parsed chunks as they become available.\n *\n * @param body - The raw ReadableStream from the fetch response\n * @yields Parsed Ollama stream chunks\n */\nasync function* parseOllamaStream(\n body: ReadableStream<Uint8Array>\n): AsyncGenerator<OllamaStreamChunk, void, unknown> {\n const reader = body.getReader();\n const decoder = new TextDecoder();\n let buffer = '';\n\n try {\n while (true) {\n const { done, value } = await reader.read();\n if (done) break;\n\n buffer += decoder.decode(value, { stream: true });\n\n // Process complete lines (Ollama uses newline-delimited JSON)\n const lines = buffer.split('\\n');\n buffer = lines.pop() ?? ''; // Keep incomplete line in buffer\n\n for (const line of lines) {\n const trimmed = line.trim();\n if (!trimmed) continue;\n\n try {\n const chunk = JSON.parse(trimmed) as OllamaStreamChunk;\n yield chunk;\n } catch {\n // Skip invalid JSON lines\n }\n }\n }\n\n // Process any remaining buffer\n if (buffer.trim()) {\n try {\n const chunk = JSON.parse(buffer.trim()) as OllamaStreamChunk;\n yield chunk;\n } catch {\n // Skip invalid JSON\n }\n }\n } finally {\n reader.releaseLock();\n }\n}\n\n/**\n * Creates the Ollama LLM handler for chat completions.\n *\n * This factory function creates an LLM handler that communicates with\n * Ollama's `/api/chat` endpoint. The handler supports both synchronous\n * completions and streaming responses.\n *\n * The handler is designed to be used with `createProvider()` which injects\n * the provider reference after construction.\n *\n * @returns An LLM handler configured for Ollama\n *\n * @example\n * ```typescript\n * const handler = createLLMHandler();\n * const provider = createProvider({\n * name: 'ollama',\n * version: '1.0.0',\n * handlers: { llm: handler }\n * });\n * ```\n */\nexport function createLLMHandler(): LLMHandler<OllamaLLMParams> {\n let providerRef: LLMProvider<OllamaLLMParams> | null = null;\n\n return {\n _setProvider(provider: LLMProvider<OllamaLLMParams>) {\n providerRef = provider;\n },\n\n bind(modelId: string): BoundLLMModel<OllamaLLMParams> {\n // Use the injected provider reference (set by createProvider)\n if (!providerRef) {\n throw new UPPError(\n 'Provider reference not set. Handler must be used with createProvider().',\n 'INVALID_REQUEST',\n 'ollama',\n 'llm'\n );\n }\n\n const model: BoundLLMModel<OllamaLLMParams> = {\n modelId,\n capabilities: OLLAMA_CAPABILITIES,\n\n get provider(): LLMProvider<OllamaLLMParams> {\n return providerRef!;\n },\n\n async complete(request: LLMRequest<OllamaLLMParams>): Promise<LLMResponse> {\n // Ollama doesn't require an API key by default, but may use one for auth\n let apiKey: string | undefined;\n try {\n apiKey = await resolveApiKey(\n request.config,\n 'OLLAMA_API_KEY',\n 'ollama',\n 'llm'\n );\n } catch {\n // API key is optional for Ollama\n }\n\n const baseUrl = request.config.baseUrl ?? OLLAMA_DEFAULT_URL;\n const url = `${baseUrl}/api/chat`;\n const body = transformRequest(request, modelId);\n body.stream = false;\n\n const headers: Record<string, string> = {\n 'Content-Type': 'application/json',\n };\n\n if (apiKey) {\n headers['Authorization'] = `Bearer ${apiKey}`;\n }\n\n if (request.config.headers) {\n for (const [key, value] of Object.entries(request.config.headers)) {\n if (value !== undefined) {\n headers[key] = value;\n }\n }\n }\n\n const response = await doFetch(\n url,\n {\n method: 'POST',\n headers,\n body: JSON.stringify(body),\n signal: request.signal,\n },\n request.config,\n 'ollama',\n 'llm'\n );\n\n const data = (await response.json()) as OllamaResponse;\n return transformResponse(data);\n },\n\n stream(request: LLMRequest<OllamaLLMParams>): LLMStreamResult {\n const state = createStreamState();\n let responseResolve: (value: LLMResponse) => void;\n let responseReject: (error: Error) => void;\n\n const responsePromise = new Promise<LLMResponse>((resolve, reject) => {\n responseResolve = resolve;\n responseReject = reject;\n });\n\n async function* generateEvents(): AsyncGenerator<StreamEvent, void, unknown> {\n try {\n // Ollama doesn't require an API key by default\n let apiKey: string | undefined;\n try {\n apiKey = await resolveApiKey(\n request.config,\n 'OLLAMA_API_KEY',\n 'ollama',\n 'llm'\n );\n } catch {\n // API key is optional for Ollama\n }\n\n const baseUrl = request.config.baseUrl ?? OLLAMA_DEFAULT_URL;\n const url = `${baseUrl}/api/chat`;\n const body = transformRequest(request, modelId);\n body.stream = true;\n\n const headers: Record<string, string> = {\n 'Content-Type': 'application/json',\n };\n\n if (apiKey) {\n headers['Authorization'] = `Bearer ${apiKey}`;\n }\n\n if (request.config.headers) {\n for (const [key, value] of Object.entries(request.config.headers)) {\n if (value !== undefined) {\n headers[key] = value;\n }\n }\n }\n\n const response = await doStreamFetch(\n url,\n {\n method: 'POST',\n headers,\n body: JSON.stringify(body),\n signal: request.signal,\n },\n request.config,\n 'ollama',\n 'llm'\n );\n\n if (!response.ok) {\n const error = await normalizeHttpError(response, 'ollama', 'llm');\n responseReject(error);\n throw error;\n }\n\n if (!response.body) {\n const error = new UPPError(\n 'No response body for streaming request',\n 'PROVIDER_ERROR',\n 'ollama',\n 'llm'\n );\n responseReject(error);\n throw error;\n }\n\n // Parse Ollama's newline-delimited JSON stream\n for await (const chunk of parseOllamaStream(response.body)) {\n // Check for error in chunk\n if ('error' in chunk && typeof (chunk as Record<string, unknown>).error === 'string') {\n const error = new UPPError(\n (chunk as Record<string, unknown>).error as string,\n 'PROVIDER_ERROR',\n 'ollama',\n 'llm'\n );\n responseReject(error);\n throw error;\n }\n\n const events = transformStreamChunk(chunk, state);\n for (const event of events) {\n yield event;\n }\n }\n\n // Build final response\n responseResolve(buildResponseFromState(state));\n } catch (error) {\n responseReject(error as Error);\n throw error;\n }\n }\n\n return {\n [Symbol.asyncIterator]() {\n return generateEvents();\n },\n response: responsePromise,\n };\n },\n };\n\n return model;\n },\n };\n}\n","/**\n * @fileoverview Ollama Embeddings API Handler\n *\n * This module implements the embedding handler for Ollama's local embeddings API.\n * Supports various embedding models including nomic-embed-text, mxbai-embed-large,\n * qwen3-embedding, and others.\n *\n * @see {@link https://github.com/ollama/ollama/blob/main/docs/api.md#embeddings Ollama Embeddings API Reference}\n * @module providers/ollama/embed\n */\n\nimport type {\n EmbeddingHandler,\n BoundEmbeddingModel,\n EmbeddingRequest,\n EmbeddingResponse,\n EmbeddingProvider,\n} from '../../types/provider.ts';\nimport { UPPError } from '../../types/errors.ts';\nimport { doFetch } from '../../http/fetch.ts';\n\n/** Default URL for Ollama's local API */\nconst OLLAMA_DEFAULT_URL = 'http://localhost:11434';\n\n/**\n * Ollama embedding parameters.\n * Passed through to the API.\n */\nexport interface OllamaEmbedParams {\n /** Truncates the end of each input to fit within context length (default: true) */\n truncate?: boolean;\n /** Controls how long the model stays loaded in memory (e.g., '5m', '1h') */\n keep_alive?: string;\n /** Additional model options */\n options?: Record<string, unknown>;\n}\n\n/**\n * Ollama embeddings API response structure.\n */\ninterface OllamaEmbeddingsResponse {\n model: string;\n embeddings: number[][];\n total_duration?: number;\n load_duration?: number;\n prompt_eval_count?: number;\n}\n\n/**\n * Creates an embedding handler for Ollama's local Embeddings API.\n *\n * @returns An embedding handler configured for Ollama\n *\n * @example\n * ```typescript\n * const handler = createEmbeddingHandler();\n * const model = handler.bind('nomic-embed-text');\n *\n * const response = await model.embed({\n * inputs: ['Hello world'],\n * config: { baseUrl: 'http://localhost:11434' }\n * });\n * ```\n */\nexport function createEmbeddingHandler(): EmbeddingHandler<OllamaEmbedParams> {\n let providerRef: EmbeddingProvider<OllamaEmbedParams> | null = null;\n\n return {\n supportedInputs: ['text'],\n\n _setProvider(provider: EmbeddingProvider<OllamaEmbedParams>) {\n providerRef = provider;\n },\n\n bind(modelId: string): BoundEmbeddingModel<OllamaEmbedParams> {\n if (!providerRef) {\n throw new UPPError(\n 'Provider reference not set. Handler must be used with createProvider().',\n 'INVALID_REQUEST',\n 'ollama',\n 'embedding'\n );\n }\n\n const model: BoundEmbeddingModel<OllamaEmbedParams> = {\n modelId,\n maxBatchSize: 512,\n maxInputLength: 8192,\n dimensions: 768, // Varies by model\n\n get provider(): EmbeddingProvider<OllamaEmbedParams> {\n return providerRef!;\n },\n\n async embed(request: EmbeddingRequest<OllamaEmbedParams>): Promise<EmbeddingResponse> {\n const baseUrl = request.config.baseUrl ?? OLLAMA_DEFAULT_URL;\n\n // Transform inputs to strings\n const inputTexts = request.inputs.map((input) => {\n if (typeof input === 'string') {\n return input;\n }\n if ('text' in input) {\n return input.text;\n }\n throw new UPPError(\n 'Ollama embeddings only support text input',\n 'INVALID_REQUEST',\n 'ollama',\n 'embedding'\n );\n });\n\n // Build request body\n const body: Record<string, unknown> = {\n model: modelId,\n input: inputTexts,\n };\n\n // Pass through Ollama-specific params\n if (request.params?.truncate !== undefined) {\n body.truncate = request.params.truncate;\n }\n if (request.params?.keep_alive !== undefined) {\n body.keep_alive = request.params.keep_alive;\n }\n if (request.params?.options !== undefined) {\n body.options = request.params.options;\n }\n\n const url = `${baseUrl}/api/embed`;\n\n const headers: Record<string, string> = {\n 'Content-Type': 'application/json',\n };\n\n // Merge custom headers\n if (request.config.headers) {\n for (const [key, value] of Object.entries(request.config.headers)) {\n if (value !== undefined) {\n headers[key] = value;\n }\n }\n }\n\n const response = await doFetch(url, {\n method: 'POST',\n headers,\n body: JSON.stringify(body),\n signal: request.signal,\n }, request.config, 'ollama', 'embedding');\n\n const data = await response.json() as OllamaEmbeddingsResponse;\n\n // Return EmbeddingResponse\n return {\n embeddings: data.embeddings.map((vec, index) => ({\n vector: vec,\n index,\n })),\n usage: {\n totalTokens: data.prompt_eval_count ?? 0,\n },\n metadata: {\n totalDuration: data.total_duration,\n loadDuration: data.load_duration,\n },\n };\n },\n };\n\n return model;\n },\n };\n}\n","import { createProvider } from '../../core/provider.ts';\nimport { createLLMHandler } from './llm.ts';\nimport { createEmbeddingHandler } from './embed.ts';\n\n/**\n * Ollama provider for local LLM inference.\n *\n * Ollama runs models locally on your machine, eliminating the need for API keys\n * and external network calls. This makes it ideal for development, testing,\n * and privacy-sensitive applications.\n *\n * **Supported Models:**\n * - Llama 3.x (Meta's latest open-weight models)\n * - Mistral / Mixtral (Mistral AI's efficient models)\n * - Gemma (Google's lightweight models)\n * - Qwen (Alibaba's multilingual models)\n * - DeepSeek (DeepSeek's code and reasoning models)\n * - Phi (Microsoft's small language models)\n * - CodeLlama (Code-specialized Llama variants)\n * - And many more from the Ollama model library\n *\n * **Prerequisites:**\n * 1. Install Ollama from https://ollama.ai\n * 2. Pull a model: `ollama pull llama3.2`\n * 3. Ensure Ollama is running (default: http://localhost:11434)\n *\n * **Note on Tool Calling:**\n * For tool/function calling, Ollama recommends using their OpenAI-compatible\n * API endpoint. Use the OpenAI provider with `baseUrl` pointed to Ollama instead.\n *\n * @example Basic usage with local model\n * ```typescript\n * import { llm } from 'provider-protocol';\n * import { ollama } from 'provider-protocol/ollama';\n *\n * const model = llm({ model: ollama('llama3.2') });\n * const turn = await model.generate('Hello!');\n * console.log(turn.response.text);\n * ```\n *\n * @example Custom Ollama server URL\n * ```typescript\n * import { llm } from 'provider-protocol';\n * import { ollama } from 'provider-protocol/ollama';\n *\n * const model = llm({\n * model: ollama('llama3.2'),\n * config: { baseUrl: 'http://my-ollama-server:11434' },\n * });\n * ```\n *\n * @example Streaming responses\n * ```typescript\n * const model = llm({ model: ollama('llama3.2') });\n * const stream = model.stream('Write a poem');\n *\n * for await (const event of stream) {\n * if (event.type === 'text_delta') {\n * process.stdout.write(event.delta.text);\n * }\n * }\n * ```\n *\n * @example Using model parameters\n * ```typescript\n * const model = llm({\n * model: ollama('llama3.2'),\n * params: {\n * temperature: 0.9,\n * top_p: 0.95,\n * num_predict: 500,\n * },\n * });\n * const result = await model.generate('Be creative!');\n * ```\n *\n * @see {@link OllamaLLMParams} for available model parameters\n */\nexport const ollama = createProvider({\n name: 'ollama',\n version: '1.0.0',\n handlers: {\n llm: createLLMHandler(),\n embedding: createEmbeddingHandler(),\n },\n});\n\nexport type { OllamaLLMParams, OllamaHeaders } from './types.ts';\nexport type { OllamaEmbedParams } from './embed.ts';\n"],"mappings":";;;;;;;;;;;;;;;;;;AAqEO,SAAS,iBACd,SACA,SACe;AACf,QAAM,SAAU,QAAQ,UAAU,CAAC;AAGnC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,GAAG;AAAA,EACL,IAAI;AAGJ,QAAM,gBAA+B;AAAA,IACnC,OAAO;AAAA,IACP,UAAU,kBAAkB,QAAQ,UAAU,QAAQ,MAAM;AAAA,EAC9D;AAGA,MAAI,eAAe,OAAW,eAAc,aAAa;AACzD,MAAI,UAAU,OAAW,eAAc,QAAQ;AAC/C,MAAI,aAAa,OAAW,eAAc,WAAW;AACrD,MAAI,iBAAiB,OAAW,eAAc,eAAe;AAG7D,MAAI,OAAO,KAAK,aAAa,EAAE,SAAS,GAAG;AACzC,kBAAc,UAAU;AAAA,EAC1B;AAGA,MAAI,QAAQ,SAAS,QAAQ,MAAM,SAAS,GAAG;AAC7C,kBAAc,QAAQ,QAAQ,MAAM,IAAI,aAAa;AAAA,EACvD;AAGA,MAAI,QAAQ,WAAW;AACrB,kBAAc,SAAS,QAAQ;AAAA,EACjC;AAEA,SAAO;AACT;AAMA,SAAS,gBAAgB,QAA4D;AACnF,MAAI,CAAC,OAAQ,QAAO;AACpB,MAAI,OAAO,WAAW,SAAU,QAAO;AACvC,SAAQ,OACL,IAAI,WAAS,MAAM,QAAQ,EAAE,EAC7B,OAAO,UAAQ,KAAK,SAAS,CAAC,EAC9B,KAAK,MAAM;AAChB;AAoBA,SAAS,kBAAkB,UAAqB,QAA8C;AAC5F,QAAM,iBAAkC,CAAC;AACzC,QAAM,mBAAmB,gBAAgB,MAAM;AAG/C,MAAI,kBAAkB;AACpB,mBAAe,KAAK;AAAA,MAClB,MAAM;AAAA,MACN,SAAS;AAAA,IACX,CAAC;AAAA,EACH;AAEA,aAAW,OAAO,UAAU;AAC1B,QAAI,cAAc,GAAG,GAAG;AACtB,YAAM,cAAwB,CAAC;AAC/B,YAAM,SAAmB,CAAC;AAE1B,iBAAW,SAAS,IAAI,SAAS;AAC/B,YAAI,MAAM,SAAS,QAAQ;AACzB,sBAAY,KAAK,MAAM,IAAI;AAAA,QAC7B,WAAW,MAAM,SAAS,SAAS;AACjC,gBAAM,aAAa;AACnB,cAAI,WAAW,OAAO,SAAS,UAAU;AACvC,mBAAO,KAAK,WAAW,OAAO,IAAI;AAAA,UACpC,WAAW,WAAW,OAAO,SAAS,SAAS;AAE7C,kBAAM,SAAS;AAAA,cACb,MAAM,KAAK,WAAW,OAAO,IAAI,EAC9B,IAAI,CAAC,MAAM,OAAO,aAAa,CAAC,CAAC,EACjC,KAAK,EAAE;AAAA,YACZ;AACA,mBAAO,KAAK,MAAM;AAAA,UACpB,WAAW,WAAW,OAAO,SAAS,OAAO;AAG3C,wBAAY,KAAK,WAAW,WAAW,OAAO,GAAG,GAAG;AAAA,UACtD;AAAA,QACF;AAAA,MACF;AAEA,YAAM,UAAyB;AAAA,QAC7B,MAAM;AAAA,QACN,SAAS,YAAY,KAAK,IAAI;AAAA,MAChC;AAEA,UAAI,OAAO,SAAS,GAAG;AACrB,gBAAQ,SAAS;AAAA,MACnB;AAEA,qBAAe,KAAK,OAAO;AAAA,IAC7B,WAAW,mBAAmB,GAAG,GAAG;AAClC,YAAM,cAAc,IAAI,QACrB,OAAO,CAAC,UAA8B,MAAM,SAAS,MAAM,EAC3D,IAAI,CAAC,UAAU,MAAM,IAAI,EACzB,KAAK,IAAI;AAEZ,YAAM,UAAyB;AAAA,QAC7B,MAAM;AAAA,QACN,SAAS;AAAA,MACX;AAGA,UAAI,IAAI,aAAa,IAAI,UAAU,SAAS,GAAG;AAC7C,gBAAQ,aAAa,IAAI,UAAU,IAAI,CAAC,UAAU;AAAA,UAChD,UAAU;AAAA,YACR,MAAM,KAAK;AAAA,YACX,WAAW,KAAK;AAAA,UAClB;AAAA,QACF,EAAE;AAAA,MACJ;AAEA,qBAAe,KAAK,OAAO;AAAA,IAC7B,WAAW,oBAAoB,GAAG,GAAG;AAEnC,iBAAW,UAAU,IAAI,SAAS;AAChC,uBAAe,KAAK;AAAA,UAClB,MAAM;AAAA,UACN,WAAW,OAAO;AAAA;AAAA,UAClB,SACE,OAAO,OAAO,WAAW,WACrB,OAAO,SACP,KAAK,UAAU,OAAO,MAAM;AAAA,QACpC,CAAC;AAAA,MACH;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAWA,SAAS,cAAc,MAAwB;AAC7C,SAAO;AAAA,IACL,MAAM;AAAA,IACN,UAAU;AAAA,MACR,MAAM,KAAK;AAAA,MACX,aAAa,KAAK;AAAA,MAClB,YAAY;AAAA,QACV,MAAM;AAAA,QACN,YAAY,KAAK,WAAW;AAAA,QAC5B,UAAU,KAAK,WAAW;AAAA,MAC5B;AAAA,IACF;AAAA,EACF;AACF;AAkBO,SAAS,kBAAkB,MAAmC;AACnE,QAAM,cAA2B,CAAC;AAClC,QAAM,YAAwB,CAAC;AAC/B,MAAI;AAGJ,MAAI,KAAK,QAAQ,SAAS;AACxB,gBAAY,KAAK,EAAE,MAAM,QAAQ,MAAM,KAAK,QAAQ,QAAQ,CAAC;AAG7D,QAAI;AACF,uBAAiB,KAAK,MAAM,KAAK,QAAQ,OAAO;AAAA,IAClD,QAAQ;AAAA,IAER;AAAA,EACF;AAGA,MAAI,KAAK,QAAQ,YAAY;AAC3B,eAAW,QAAQ,KAAK,QAAQ,YAAY;AAC1C,gBAAU,KAAK;AAAA,QACb,YAAY,KAAK,SAAS;AAAA;AAAA,QAC1B,UAAU,KAAK,SAAS;AAAA,QACxB,WAAW,KAAK,SAAS;AAAA,MAC3B,CAAC;AAAA,IACH;AAAA,EACF;AAEA,QAAM,UAAU,IAAI;AAAA,IAClB;AAAA,IACA,UAAU,SAAS,IAAI,YAAY;AAAA,IACnC;AAAA,MACE,UAAU;AAAA,QACR,QAAQ;AAAA,UACN,OAAO,KAAK;AAAA,UACZ,YAAY,KAAK;AAAA,UACjB,aAAa,KAAK;AAAA,UAClB,UAAU,KAAK,QAAQ;AAAA,UACvB,gBAAgB,KAAK;AAAA,UACrB,eAAe,KAAK;AAAA,UACpB,sBAAsB,KAAK;AAAA,UAC3B,eAAe,KAAK;AAAA,UACpB,UAAU,KAAK;AAAA,QACjB;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAGA,QAAM,QAAoB;AAAA,IACxB,aAAa,KAAK,qBAAqB;AAAA,IACvC,cAAc,KAAK,cAAc;AAAA,IACjC,cAAc,KAAK,qBAAqB,MAAM,KAAK,cAAc;AAAA,IACjE,iBAAiB;AAAA,IACjB,kBAAkB;AAAA,EACpB;AAGA,MAAI,aAAa;AACjB,MAAI,KAAK,gBAAgB,UAAU;AACjC,iBAAa;AAAA,EACf,WAAW,KAAK,gBAAgB,QAAQ;AACtC,iBAAa;AAAA,EACf,WAAW,UAAU,SAAS,GAAG;AAC/B,iBAAa;AAAA,EACf;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA,MAAM;AAAA,EACR;AACF;AAqCO,SAAS,oBAAiC;AAC/C,SAAO;AAAA,IACL,OAAO;AAAA,IACP,SAAS;AAAA,IACT,UAAU;AAAA,IACV,WAAW,CAAC;AAAA,IACZ,YAAY;AAAA,IACZ,iBAAiB;AAAA,IACjB,WAAW;AAAA,IACX,eAAe;AAAA,IACf,cAAc;AAAA,IACd,WAAW;AAAA,EACb;AACF;AAmBO,SAAS,qBACd,OACA,OACe;AACf,QAAM,SAAwB,CAAC;AAG/B,MAAI,MAAM,cAAc;AACtB,UAAM,QAAQ,MAAM;AACpB,UAAM,YAAY,MAAM;AACxB,WAAO,KAAK,EAAE,MAAM,iBAAiB,OAAO,GAAG,OAAO,CAAC,EAAE,CAAC;AAC1D,UAAM,eAAe;AAAA,EACvB;AAGA,MAAI,MAAM,SAAS;AAEjB,QAAI,MAAM,QAAQ,SAAS;AACzB,YAAM,WAAW,MAAM,QAAQ;AAC/B,aAAO,KAAK;AAAA,QACV,MAAM;AAAA,QACN,OAAO;AAAA,QACP,OAAO,EAAE,MAAM,MAAM,QAAQ,QAAQ;AAAA,MACvC,CAAC;AAAA,IACH;AAGA,QAAI,MAAM,QAAQ,UAAU;AAC1B,YAAM,YAAY,MAAM,QAAQ;AAChC,aAAO,KAAK;AAAA,QACV,MAAM;AAAA,QACN,OAAO;AAAA,QACP,OAAO,EAAE,MAAM,MAAM,QAAQ,SAAS;AAAA,MACxC,CAAC;AAAA,IACH;AAGA,QAAI,MAAM,QAAQ,YAAY;AAC5B,iBAAW,QAAQ,MAAM,QAAQ,YAAY;AAC3C,cAAM,UAAU,KAAK;AAAA,UACnB,MAAM,KAAK,SAAS;AAAA,UACpB,MAAM,KAAK,SAAS;AAAA,QACtB,CAAC;AACD,eAAO,KAAK;AAAA,UACV,MAAM;AAAA,UACN,OAAO,MAAM,UAAU,SAAS;AAAA,UAChC,OAAO;AAAA,YACL,YAAY,KAAK,SAAS;AAAA,YAC1B,UAAU,KAAK,SAAS;AAAA,YACxB,eAAe,KAAK,UAAU,KAAK,SAAS,SAAS;AAAA,UACvD;AAAA,QACF,CAAC;AAAA,MACH;AAAA,IACF;AAAA,EACF;AAGA,MAAI,MAAM,MAAM;AACd,UAAM,aAAa,MAAM,eAAe;AACxC,UAAM,kBAAkB,MAAM,qBAAqB;AACnD,UAAM,YAAY,MAAM,cAAc;AACtC,UAAM,gBAAgB,MAAM,kBAAkB;AAC9C,WAAO,KAAK,EAAE,MAAM,gBAAgB,OAAO,GAAG,OAAO,CAAC,EAAE,CAAC;AAAA,EAC3D;AAEA,SAAO;AACT;AAcO,SAAS,uBAAuB,OAAiC;AACtE,QAAM,cAA2B,CAAC;AAClC,QAAM,YAAwB,CAAC;AAC/B,MAAI;AAEJ,MAAI,MAAM,SAAS;AACjB,gBAAY,KAAK,EAAE,MAAM,QAAQ,MAAM,MAAM,QAAQ,CAAC;AAGtD,QAAI;AACF,uBAAiB,KAAK,MAAM,MAAM,OAAO;AAAA,IAC3C,QAAQ;AAAA,IAER;AAAA,EACF;AAEA,aAAW,MAAM,MAAM,WAAW;AAChC,cAAU,KAAK;AAAA,MACb,YAAY,GAAG;AAAA,MACf,UAAU,GAAG;AAAA,MACb,WAAW,GAAG;AAAA,IAChB,CAAC;AAAA,EACH;AAEA,QAAM,UAAU,IAAI;AAAA,IAClB;AAAA,IACA,UAAU,SAAS,IAAI,YAAY;AAAA,IACnC;AAAA,MACE,UAAU;AAAA,QACR,QAAQ;AAAA,UACN,OAAO,MAAM;AAAA,UACb,YAAY,MAAM;AAAA,UAClB,aAAa,MAAM;AAAA,UACnB,UAAU,MAAM,YAAY;AAAA,UAC5B,gBAAgB,MAAM;AAAA,QACxB;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAGA,QAAM,QAAoB;AAAA,IACxB,aAAa,MAAM;AAAA,IACnB,cAAc,MAAM;AAAA,IACpB,aAAa,MAAM,kBAAkB,MAAM;AAAA,IAC3C,iBAAiB;AAAA,IACjB,kBAAkB;AAAA,EACpB;AAGA,MAAI,aAAa;AACjB,MAAI,MAAM,eAAe,UAAU;AACjC,iBAAa;AAAA,EACf,WAAW,UAAU,SAAS,GAAG;AAC/B,iBAAa;AAAA,EACf;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA,MAAM;AAAA,EACR;AACF;;;AC9gBA,IAAM,qBAAqB;AAU3B,IAAM,sBAAuC;AAAA,EAC3C,WAAW;AAAA,EACX,OAAO;AAAA,EACP,kBAAkB;AAAA,EAClB,YAAY;AAAA,EACZ,YAAY;AAAA,EACZ,YAAY;AACd;AAYA,gBAAgB,kBACd,MACkD;AAClD,QAAM,SAAS,KAAK,UAAU;AAC9B,QAAM,UAAU,IAAI,YAAY;AAChC,MAAI,SAAS;AAEb,MAAI;AACF,WAAO,MAAM;AACX,YAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAC1C,UAAI,KAAM;AAEV,gBAAU,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AAGhD,YAAM,QAAQ,OAAO,MAAM,IAAI;AAC/B,eAAS,MAAM,IAAI,KAAK;AAExB,iBAAW,QAAQ,OAAO;AACxB,cAAM,UAAU,KAAK,KAAK;AAC1B,YAAI,CAAC,QAAS;AAEd,YAAI;AACF,gBAAM,QAAQ,KAAK,MAAM,OAAO;AAChC,gBAAM;AAAA,QACR,QAAQ;AAAA,QAER;AAAA,MACF;AAAA,IACF;AAGA,QAAI,OAAO,KAAK,GAAG;AACjB,UAAI;AACF,cAAM,QAAQ,KAAK,MAAM,OAAO,KAAK,CAAC;AACtC,cAAM;AAAA,MACR,QAAQ;AAAA,MAER;AAAA,IACF;AAAA,EACF,UAAE;AACA,WAAO,YAAY;AAAA,EACrB;AACF;AAwBO,SAAS,mBAAgD;AAC9D,MAAI,cAAmD;AAEvD,SAAO;AAAA,IACL,aAAa,UAAwC;AACnD,oBAAc;AAAA,IAChB;AAAA,IAEA,KAAK,SAAiD;AAEpD,UAAI,CAAC,aAAa;AAChB,cAAM,IAAI;AAAA,UACR;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,QACF;AAAA,MACF;AAEA,YAAM,QAAwC;AAAA,QAC5C;AAAA,QACA,cAAc;AAAA,QAEd,IAAI,WAAyC;AAC3C,iBAAO;AAAA,QACT;AAAA,QAEA,MAAM,SAAS,SAA4D;AAEzE,cAAI;AACJ,cAAI;AACF,qBAAS,MAAM;AAAA,cACb,QAAQ;AAAA,cACR;AAAA,cACA;AAAA,cACA;AAAA,YACF;AAAA,UACF,QAAQ;AAAA,UAER;AAEA,gBAAM,UAAU,QAAQ,OAAO,WAAW;AAC1C,gBAAM,MAAM,GAAG,OAAO;AACtB,gBAAM,OAAO,iBAAiB,SAAS,OAAO;AAC9C,eAAK,SAAS;AAEd,gBAAM,UAAkC;AAAA,YACtC,gBAAgB;AAAA,UAClB;AAEA,cAAI,QAAQ;AACV,oBAAQ,eAAe,IAAI,UAAU,MAAM;AAAA,UAC7C;AAEA,cAAI,QAAQ,OAAO,SAAS;AAC1B,uBAAW,CAAC,KAAK,KAAK,KAAK,OAAO,QAAQ,QAAQ,OAAO,OAAO,GAAG;AACjE,kBAAI,UAAU,QAAW;AACvB,wBAAQ,GAAG,IAAI;AAAA,cACjB;AAAA,YACF;AAAA,UACF;AAEA,gBAAM,WAAW,MAAM;AAAA,YACrB;AAAA,YACA;AAAA,cACE,QAAQ;AAAA,cACR;AAAA,cACA,MAAM,KAAK,UAAU,IAAI;AAAA,cACzB,QAAQ,QAAQ;AAAA,YAClB;AAAA,YACA,QAAQ;AAAA,YACR;AAAA,YACA;AAAA,UACF;AAEA,gBAAM,OAAQ,MAAM,SAAS,KAAK;AAClC,iBAAO,kBAAkB,IAAI;AAAA,QAC/B;AAAA,QAEA,OAAO,SAAuD;AAC5D,gBAAM,QAAQ,kBAAkB;AAChC,cAAI;AACJ,cAAI;AAEJ,gBAAM,kBAAkB,IAAI,QAAqB,CAAC,SAAS,WAAW;AACpE,8BAAkB;AAClB,6BAAiB;AAAA,UACnB,CAAC;AAED,0BAAgB,iBAA6D;AAC3E,gBAAI;AAEF,kBAAI;AACJ,kBAAI;AACF,yBAAS,MAAM;AAAA,kBACb,QAAQ;AAAA,kBACR;AAAA,kBACA;AAAA,kBACA;AAAA,gBACF;AAAA,cACF,QAAQ;AAAA,cAER;AAEA,oBAAM,UAAU,QAAQ,OAAO,WAAW;AAC1C,oBAAM,MAAM,GAAG,OAAO;AACtB,oBAAM,OAAO,iBAAiB,SAAS,OAAO;AAC9C,mBAAK,SAAS;AAEd,oBAAM,UAAkC;AAAA,gBACtC,gBAAgB;AAAA,cAClB;AAEA,kBAAI,QAAQ;AACV,wBAAQ,eAAe,IAAI,UAAU,MAAM;AAAA,cAC7C;AAEA,kBAAI,QAAQ,OAAO,SAAS;AAC1B,2BAAW,CAAC,KAAK,KAAK,KAAK,OAAO,QAAQ,QAAQ,OAAO,OAAO,GAAG;AACjE,sBAAI,UAAU,QAAW;AACvB,4BAAQ,GAAG,IAAI;AAAA,kBACjB;AAAA,gBACF;AAAA,cACF;AAEA,oBAAM,WAAW,MAAM;AAAA,gBACrB;AAAA,gBACA;AAAA,kBACE,QAAQ;AAAA,kBACR;AAAA,kBACA,MAAM,KAAK,UAAU,IAAI;AAAA,kBACzB,QAAQ,QAAQ;AAAA,gBAClB;AAAA,gBACA,QAAQ;AAAA,gBACR;AAAA,gBACA;AAAA,cACF;AAEA,kBAAI,CAAC,SAAS,IAAI;AAChB,sBAAM,QAAQ,MAAM,mBAAmB,UAAU,UAAU,KAAK;AAChE,+BAAe,KAAK;AACpB,sBAAM;AAAA,cACR;AAEA,kBAAI,CAAC,SAAS,MAAM;AAClB,sBAAM,QAAQ,IAAI;AAAA,kBAChB;AAAA,kBACA;AAAA,kBACA;AAAA,kBACA;AAAA,gBACF;AACA,+BAAe,KAAK;AACpB,sBAAM;AAAA,cACR;AAGA,+BAAiB,SAAS,kBAAkB,SAAS,IAAI,GAAG;AAE1D,oBAAI,WAAW,SAAS,OAAQ,MAAkC,UAAU,UAAU;AACpF,wBAAM,QAAQ,IAAI;AAAA,oBACf,MAAkC;AAAA,oBACnC;AAAA,oBACA;AAAA,oBACA;AAAA,kBACF;AACA,iCAAe,KAAK;AACpB,wBAAM;AAAA,gBACR;AAEA,sBAAM,SAAS,qBAAqB,OAAO,KAAK;AAChD,2BAAW,SAAS,QAAQ;AAC1B,wBAAM;AAAA,gBACR;AAAA,cACF;AAGA,8BAAgB,uBAAuB,KAAK,CAAC;AAAA,YAC/C,SAAS,OAAO;AACd,6BAAe,KAAc;AAC7B,oBAAM;AAAA,YACR;AAAA,UACF;AAEA,iBAAO;AAAA,YACL,CAAC,OAAO,aAAa,IAAI;AACvB,qBAAO,eAAe;AAAA,YACxB;AAAA,YACA,UAAU;AAAA,UACZ;AAAA,QACF;AAAA,MACF;AAEA,aAAO;AAAA,IACT;AAAA,EACF;AACF;;;AC/SA,IAAMA,sBAAqB;AA0CpB,SAAS,yBAA8D;AAC5E,MAAI,cAA2D;AAE/D,SAAO;AAAA,IACL,iBAAiB,CAAC,MAAM;AAAA,IAExB,aAAa,UAAgD;AAC3D,oBAAc;AAAA,IAChB;AAAA,IAEA,KAAK,SAAyD;AAC5D,UAAI,CAAC,aAAa;AAChB,cAAM,IAAI;AAAA,UACR;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,QACF;AAAA,MACF;AAEA,YAAM,QAAgD;AAAA,QACpD;AAAA,QACA,cAAc;AAAA,QACd,gBAAgB;AAAA,QAChB,YAAY;AAAA;AAAA,QAEZ,IAAI,WAAiD;AACnD,iBAAO;AAAA,QACT;AAAA,QAEA,MAAM,MAAM,SAA0E;AACpF,gBAAM,UAAU,QAAQ,OAAO,WAAWA;AAG1C,gBAAM,aAAa,QAAQ,OAAO,IAAI,CAAC,UAAU;AAC/C,gBAAI,OAAO,UAAU,UAAU;AAC7B,qBAAO;AAAA,YACT;AACA,gBAAI,UAAU,OAAO;AACnB,qBAAO,MAAM;AAAA,YACf;AACA,kBAAM,IAAI;AAAA,cACR;AAAA,cACA;AAAA,cACA;AAAA,cACA;AAAA,YACF;AAAA,UACF,CAAC;AAGD,gBAAM,OAAgC;AAAA,YACpC,OAAO;AAAA,YACP,OAAO;AAAA,UACT;AAGA,cAAI,QAAQ,QAAQ,aAAa,QAAW;AAC1C,iBAAK,WAAW,QAAQ,OAAO;AAAA,UACjC;AACA,cAAI,QAAQ,QAAQ,eAAe,QAAW;AAC5C,iBAAK,aAAa,QAAQ,OAAO;AAAA,UACnC;AACA,cAAI,QAAQ,QAAQ,YAAY,QAAW;AACzC,iBAAK,UAAU,QAAQ,OAAO;AAAA,UAChC;AAEA,gBAAM,MAAM,GAAG,OAAO;AAEtB,gBAAM,UAAkC;AAAA,YACtC,gBAAgB;AAAA,UAClB;AAGA,cAAI,QAAQ,OAAO,SAAS;AAC1B,uBAAW,CAAC,KAAK,KAAK,KAAK,OAAO,QAAQ,QAAQ,OAAO,OAAO,GAAG;AACjE,kBAAI,UAAU,QAAW;AACvB,wBAAQ,GAAG,IAAI;AAAA,cACjB;AAAA,YACF;AAAA,UACF;AAEA,gBAAM,WAAW,MAAM,QAAQ,KAAK;AAAA,YAClC,QAAQ;AAAA,YACR;AAAA,YACA,MAAM,KAAK,UAAU,IAAI;AAAA,YACzB,QAAQ,QAAQ;AAAA,UAClB,GAAG,QAAQ,QAAQ,UAAU,WAAW;AAExC,gBAAM,OAAO,MAAM,SAAS,KAAK;AAGjC,iBAAO;AAAA,YACL,YAAY,KAAK,WAAW,IAAI,CAAC,KAAK,WAAW;AAAA,cAC/C,QAAQ;AAAA,cACR;AAAA,YACF,EAAE;AAAA,YACF,OAAO;AAAA,cACL,aAAa,KAAK,qBAAqB;AAAA,YACzC;AAAA,YACA,UAAU;AAAA,cACR,eAAe,KAAK;AAAA,cACpB,cAAc,KAAK;AAAA,YACrB;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAEA,aAAO;AAAA,IACT;AAAA,EACF;AACF;;;AChGO,IAAM,SAAS,eAAe;AAAA,EACnC,MAAM;AAAA,EACN,SAAS;AAAA,EACT,UAAU;AAAA,IACR,KAAK,iBAAiB;AAAA,IACtB,WAAW,uBAAuB;AAAA,EACpC;AACF,CAAC;","names":["OLLAMA_DEFAULT_URL"]}
|
package/dist/openai/index.d.ts
CHANGED
|
@@ -1,70 +1,4 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { n as ImageHandler } from '../image-Dhq-Yuq4.js';
|
|
3
|
-
import '../content-DEl3z_W2.js';
|
|
4
|
-
|
|
5
|
-
/**
|
|
6
|
-
* @fileoverview OpenAI Embeddings API Handler
|
|
7
|
-
*
|
|
8
|
-
* This module implements the embedding handler for OpenAI's embeddings API.
|
|
9
|
-
* Supports text-embedding-3-small, text-embedding-3-large, and text-embedding-ada-002 models.
|
|
10
|
-
*
|
|
11
|
-
* @see {@link https://platform.openai.com/docs/api-reference/embeddings OpenAI Embeddings API Reference}
|
|
12
|
-
* @module providers/openai/embed
|
|
13
|
-
*/
|
|
14
|
-
|
|
15
|
-
/**
|
|
16
|
-
* OpenAI embedding parameters.
|
|
17
|
-
* Passed through unchanged to the API.
|
|
18
|
-
*/
|
|
19
|
-
interface OpenAIEmbedParams {
|
|
20
|
-
/** Output dimensions (text-embedding-3 models only) */
|
|
21
|
-
dimensions?: number;
|
|
22
|
-
/** Encoding format: 'float' or 'base64' */
|
|
23
|
-
encoding_format?: 'float' | 'base64';
|
|
24
|
-
/** A unique identifier representing your end-user */
|
|
25
|
-
user?: string;
|
|
26
|
-
}
|
|
27
|
-
|
|
28
|
-
/**
|
|
29
|
-
* @fileoverview OpenAI Image Generation API Handler
|
|
30
|
-
*
|
|
31
|
-
* This module implements the image handler for OpenAI's Image Generation APIs.
|
|
32
|
-
* Supports DALL-E 2, DALL-E 3, and GPT-Image models (gpt-image-1, gpt-image-1.5, etc).
|
|
33
|
-
*
|
|
34
|
-
* @see {@link https://platform.openai.com/docs/api-reference/images OpenAI Images API Reference}
|
|
35
|
-
* @module providers/openai/image
|
|
36
|
-
*/
|
|
37
|
-
|
|
38
|
-
/**
|
|
39
|
-
* OpenAI image generation parameters.
|
|
40
|
-
* Passed through unchanged to the API.
|
|
41
|
-
*/
|
|
42
|
-
interface OpenAIImageParams {
|
|
43
|
-
/** Number of images to generate (1-10 for GPT Image, 1 for DALL-E 3) */
|
|
44
|
-
n?: number;
|
|
45
|
-
/** Output size (varies by model) */
|
|
46
|
-
size?: '256x256' | '512x512' | '1024x1024' | '1792x1024' | '1024x1792' | '1536x1024' | '1024x1536' | 'auto';
|
|
47
|
-
/** Image quality */
|
|
48
|
-
quality?: 'standard' | 'hd' | 'low' | 'medium' | 'high' | 'auto';
|
|
49
|
-
/** Style (DALL-E 3 only) */
|
|
50
|
-
style?: 'vivid' | 'natural';
|
|
51
|
-
/** Background transparency (GPT Image only) */
|
|
52
|
-
background?: 'transparent' | 'opaque' | 'auto';
|
|
53
|
-
/** Output format (GPT Image only) */
|
|
54
|
-
output_format?: 'png' | 'jpeg' | 'webp';
|
|
55
|
-
/** Output compression (0-100 for webp/jpeg) */
|
|
56
|
-
output_compression?: number;
|
|
57
|
-
/** Response format */
|
|
58
|
-
response_format?: 'url' | 'b64_json';
|
|
59
|
-
/** Content moderation (GPT Image only) */
|
|
60
|
-
moderation?: 'auto' | 'low';
|
|
61
|
-
/** Enable streaming (GPT Image only) */
|
|
62
|
-
stream?: boolean;
|
|
63
|
-
/** Number of partial images during streaming (0-3) */
|
|
64
|
-
partial_images?: number;
|
|
65
|
-
/** User identifier */
|
|
66
|
-
user?: string;
|
|
67
|
-
}
|
|
1
|
+
import { g as Provider } from '../provider-DGQHYE6I.js';
|
|
68
2
|
|
|
69
3
|
/**
|
|
70
4
|
* @fileoverview OpenAI Provider Type Definitions
|
|
@@ -807,6 +741,29 @@ declare const tools: {
|
|
|
807
741
|
mcp: typeof mcpTool;
|
|
808
742
|
};
|
|
809
743
|
|
|
744
|
+
/**
|
|
745
|
+
* @fileoverview OpenAI Embeddings API Handler
|
|
746
|
+
*
|
|
747
|
+
* This module implements the embedding handler for OpenAI's embeddings API.
|
|
748
|
+
* Supports text-embedding-3-small, text-embedding-3-large, and text-embedding-ada-002 models.
|
|
749
|
+
*
|
|
750
|
+
* @see {@link https://platform.openai.com/docs/api-reference/embeddings OpenAI Embeddings API Reference}
|
|
751
|
+
* @module providers/openai/embed
|
|
752
|
+
*/
|
|
753
|
+
|
|
754
|
+
/**
|
|
755
|
+
* OpenAI embedding parameters.
|
|
756
|
+
* Passed through unchanged to the API.
|
|
757
|
+
*/
|
|
758
|
+
interface OpenAIEmbedParams {
|
|
759
|
+
/** Output dimensions (text-embedding-3 models only) */
|
|
760
|
+
dimensions?: number;
|
|
761
|
+
/** Encoding format: 'float' or 'base64' */
|
|
762
|
+
encoding_format?: 'float' | 'base64';
|
|
763
|
+
/** A unique identifier representing your end-user */
|
|
764
|
+
user?: string;
|
|
765
|
+
}
|
|
766
|
+
|
|
810
767
|
/**
|
|
811
768
|
* @fileoverview OpenAI Provider Factory
|
|
812
769
|
*
|
|
@@ -815,12 +772,6 @@ declare const tools: {
|
|
|
815
772
|
*
|
|
816
773
|
* @module providers/openai
|
|
817
774
|
*/
|
|
818
|
-
|
|
819
|
-
/**
|
|
820
|
-
* Union type for the LLM handler that supports both API modes.
|
|
821
|
-
* Used internally for the dynamic handler selection based on API mode.
|
|
822
|
-
*/
|
|
823
|
-
type OpenAILLMParamsUnion = OpenAICompletionsParams | OpenAIResponsesParams;
|
|
824
775
|
/**
|
|
825
776
|
* Configuration options for the OpenAI provider.
|
|
826
777
|
*
|
|
@@ -841,65 +792,6 @@ interface OpenAIProviderOptions {
|
|
|
841
792
|
*/
|
|
842
793
|
api?: 'responses' | 'completions';
|
|
843
794
|
}
|
|
844
|
-
/**
|
|
845
|
-
* OpenAI provider interface with configurable API mode.
|
|
846
|
-
*
|
|
847
|
-
* The provider is callable as a function to create model references, and also
|
|
848
|
-
* exposes metadata about the provider and its supported modalities.
|
|
849
|
-
*
|
|
850
|
-
* @example Creating model references
|
|
851
|
-
* ```typescript
|
|
852
|
-
* // Using the modern Responses API (default, recommended)
|
|
853
|
-
* const model = openai('gpt-4o');
|
|
854
|
-
*
|
|
855
|
-
* // Using the legacy Chat Completions API
|
|
856
|
-
* const legacyModel = openai('gpt-4o', { api: 'completions' });
|
|
857
|
-
*
|
|
858
|
-
* // Explicit Responses API selection
|
|
859
|
-
* const responsesModel = openai('gpt-4o', { api: 'responses' });
|
|
860
|
-
* ```
|
|
861
|
-
*
|
|
862
|
-
* @see {@link OpenAIProviderOptions} for available configuration options
|
|
863
|
-
* @see {@link OpenAIResponsesParams} for Responses API parameters
|
|
864
|
-
* @see {@link OpenAICompletionsParams} for Chat Completions API parameters
|
|
865
|
-
*/
|
|
866
|
-
interface OpenAIProvider extends Provider<OpenAIProviderOptions> {
|
|
867
|
-
/**
|
|
868
|
-
* Creates a model reference for the specified OpenAI model.
|
|
869
|
-
*
|
|
870
|
-
* @param modelId - The OpenAI model identifier (e.g., 'gpt-4o', 'gpt-4-turbo', 'o1-preview', 'gpt-4o-mini')
|
|
871
|
-
* @param options - Optional provider configuration including API mode selection
|
|
872
|
-
* @returns A model reference that can be used with the LLM core functions
|
|
873
|
-
*
|
|
874
|
-
* @example
|
|
875
|
-
* ```typescript
|
|
876
|
-
* const gpt4o = openai('gpt-4o');
|
|
877
|
-
* const gpt4turbo = openai('gpt-4-turbo', { api: 'completions' });
|
|
878
|
-
* ```
|
|
879
|
-
*/
|
|
880
|
-
(modelId: string, options?: OpenAIProviderOptions): ModelReference<OpenAIProviderOptions>;
|
|
881
|
-
/**
|
|
882
|
-
* The provider identifier.
|
|
883
|
-
* Always returns `'openai'` for this provider.
|
|
884
|
-
*/
|
|
885
|
-
readonly name: 'openai';
|
|
886
|
-
/**
|
|
887
|
-
* The provider version following semantic versioning.
|
|
888
|
-
*/
|
|
889
|
-
readonly version: string;
|
|
890
|
-
/**
|
|
891
|
-
* Supported modalities and their handlers.
|
|
892
|
-
* Supports LLM, Embedding, and Image modalities.
|
|
893
|
-
*/
|
|
894
|
-
readonly modalities: {
|
|
895
|
-
/** The LLM handler for text generation and chat completion */
|
|
896
|
-
llm: LLMHandler<OpenAILLMParamsUnion>;
|
|
897
|
-
/** The embedding handler for text embeddings */
|
|
898
|
-
embedding: EmbeddingHandler<OpenAIEmbedParams>;
|
|
899
|
-
/** The image handler for image generation */
|
|
900
|
-
image: ImageHandler<OpenAIImageParams>;
|
|
901
|
-
};
|
|
902
|
-
}
|
|
903
795
|
/**
|
|
904
796
|
* The OpenAI provider instance.
|
|
905
797
|
*
|
|
@@ -941,6 +833,6 @@ interface OpenAIProvider extends Provider<OpenAIProviderOptions> {
|
|
|
941
833
|
* });
|
|
942
834
|
* ```
|
|
943
835
|
*/
|
|
944
|
-
declare const openai:
|
|
836
|
+
declare const openai: Provider<OpenAIProviderOptions>;
|
|
945
837
|
|
|
946
838
|
export { type OpenAIAPIMode, type OpenAIAudioConfig, type OpenAIBuiltInTool, type OpenAICodeInterpreterContainer, type OpenAICodeInterpreterTool, type OpenAICompletionsParams, type OpenAICompletionsWebSearchUserLocation, type OpenAIComputerEnvironment, type OpenAIComputerTool, type OpenAIConfig, type OpenAIConversation, type OpenAIEmbedParams, type OpenAIFileSearchTool, type OpenAIImageGenerationTool, type OpenAIMcpServerConfig, type OpenAIMcpTool, type OpenAIModelOptions, type OpenAIModelReference, type OpenAIPromptTemplate, type OpenAIResponsesParams, type OpenAIResponsesToolUnion, type OpenAIWebSearchOptions, type OpenAIWebSearchTool, type OpenAIWebSearchUserLocation, codeInterpreterTool, computerTool, fileSearchTool, imageGenerationTool, mcpTool, openai, tools, webSearchTool };
|
package/dist/openai/index.js
CHANGED
|
@@ -3,22 +3,23 @@ import {
|
|
|
3
3
|
} from "../chunk-WAKD3OO5.js";
|
|
4
4
|
import {
|
|
5
5
|
AssistantMessage,
|
|
6
|
+
createProvider,
|
|
6
7
|
isAssistantMessage,
|
|
7
8
|
isToolResultMessage,
|
|
8
9
|
isUserMessage
|
|
9
|
-
} from "../chunk-
|
|
10
|
+
} from "../chunk-M4BMM5IB.js";
|
|
10
11
|
import {
|
|
11
12
|
parseSSEStream
|
|
12
13
|
} from "../chunk-Z7RBRCRN.js";
|
|
13
14
|
import {
|
|
14
|
-
doFetch,
|
|
15
|
-
doStreamFetch,
|
|
16
|
-
normalizeHttpError,
|
|
17
15
|
resolveApiKey
|
|
18
|
-
} from "../chunk-
|
|
16
|
+
} from "../chunk-Y3GBJNA2.js";
|
|
19
17
|
import {
|
|
20
|
-
UPPError
|
|
21
|
-
|
|
18
|
+
UPPError,
|
|
19
|
+
doFetch,
|
|
20
|
+
doStreamFetch,
|
|
21
|
+
normalizeHttpError
|
|
22
|
+
} from "../chunk-EDENPF3E.js";
|
|
22
23
|
|
|
23
24
|
// src/providers/openai/transform.completions.ts
|
|
24
25
|
function transformRequest(request, modelId) {
|
|
@@ -588,15 +589,17 @@ function createCompletionsLLMHandler() {
|
|
|
588
589
|
// src/providers/openai/transform.responses.ts
|
|
589
590
|
function transformRequest2(request, modelId) {
|
|
590
591
|
const params = request.params ?? {};
|
|
591
|
-
const builtInTools = params
|
|
592
|
-
const { tools: _paramsTools, ...restParams } = params;
|
|
592
|
+
const { tools: builtInTools, ...restParams } = params;
|
|
593
593
|
const openaiRequest = {
|
|
594
594
|
...restParams,
|
|
595
595
|
model: modelId,
|
|
596
596
|
input: transformInputItems(request.messages, request.system)
|
|
597
597
|
};
|
|
598
598
|
const functionTools = request.tools?.map(transformTool2) ?? [];
|
|
599
|
-
const allTools = [
|
|
599
|
+
const allTools = [
|
|
600
|
+
...functionTools,
|
|
601
|
+
...builtInTools ?? []
|
|
602
|
+
];
|
|
600
603
|
if (allTools.length > 0) {
|
|
601
604
|
openaiRequest.tools = allTools;
|
|
602
605
|
}
|
|
@@ -1340,18 +1343,10 @@ function createEmbeddingHandler() {
|
|
|
1340
1343
|
);
|
|
1341
1344
|
});
|
|
1342
1345
|
const body = {
|
|
1346
|
+
...request.params,
|
|
1343
1347
|
model: modelId,
|
|
1344
1348
|
input: inputTexts
|
|
1345
1349
|
};
|
|
1346
|
-
if (request.params?.dimensions !== void 0) {
|
|
1347
|
-
body.dimensions = request.params.dimensions;
|
|
1348
|
-
}
|
|
1349
|
-
if (request.params?.encoding_format !== void 0) {
|
|
1350
|
-
body.encoding_format = request.params.encoding_format;
|
|
1351
|
-
}
|
|
1352
|
-
if (request.params?.user !== void 0) {
|
|
1353
|
-
body.user = request.params.user;
|
|
1354
|
-
}
|
|
1355
1350
|
const headers = {
|
|
1356
1351
|
"Content-Type": "application/json",
|
|
1357
1352
|
Authorization: `Bearer ${apiKey}`
|
|
@@ -1444,22 +1439,10 @@ async function executeGenerate(modelId, request) {
|
|
|
1444
1439
|
);
|
|
1445
1440
|
const baseUrl = request.config.baseUrl ? `${request.config.baseUrl.replace(/\/$/, "")}/v1/images/generations` : OPENAI_IMAGES_API_URL;
|
|
1446
1441
|
const body = {
|
|
1442
|
+
...request.params,
|
|
1447
1443
|
model: modelId,
|
|
1448
1444
|
prompt: request.prompt
|
|
1449
1445
|
};
|
|
1450
|
-
if (request.params) {
|
|
1451
|
-
const { n, size, quality, style, background, output_format, output_compression, response_format, moderation, user } = request.params;
|
|
1452
|
-
if (n !== void 0) body.n = n;
|
|
1453
|
-
if (size !== void 0) body.size = size;
|
|
1454
|
-
if (quality !== void 0) body.quality = quality;
|
|
1455
|
-
if (style !== void 0) body.style = style;
|
|
1456
|
-
if (background !== void 0) body.background = background;
|
|
1457
|
-
if (output_format !== void 0) body.output_format = output_format;
|
|
1458
|
-
if (output_compression !== void 0) body.output_compression = output_compression;
|
|
1459
|
-
if (response_format !== void 0) body.response_format = response_format;
|
|
1460
|
-
if (moderation !== void 0) body.moderation = moderation;
|
|
1461
|
-
if (user !== void 0) body.user = user;
|
|
1462
|
-
}
|
|
1463
1446
|
const headers = {
|
|
1464
1447
|
"Content-Type": "application/json",
|
|
1465
1448
|
Authorization: `Bearer ${apiKey}`
|
|
@@ -1543,21 +1526,11 @@ function executeStream(modelId, request) {
|
|
|
1543
1526
|
);
|
|
1544
1527
|
const baseUrl = request.config.baseUrl ? `${request.config.baseUrl.replace(/\/$/, "")}/v1/images/generations` : OPENAI_IMAGES_API_URL;
|
|
1545
1528
|
const body = {
|
|
1529
|
+
...request.params,
|
|
1546
1530
|
model: modelId,
|
|
1547
1531
|
prompt: request.prompt,
|
|
1548
1532
|
stream: true
|
|
1549
1533
|
};
|
|
1550
|
-
if (request.params) {
|
|
1551
|
-
const { n, size, quality, background, output_format, partial_images, moderation, user } = request.params;
|
|
1552
|
-
if (n !== void 0) body.n = n;
|
|
1553
|
-
if (size !== void 0) body.size = size;
|
|
1554
|
-
if (quality !== void 0) body.quality = quality;
|
|
1555
|
-
if (background !== void 0) body.background = background;
|
|
1556
|
-
if (output_format !== void 0) body.output_format = output_format;
|
|
1557
|
-
if (partial_images !== void 0) body.partial_images = partial_images;
|
|
1558
|
-
if (moderation !== void 0) body.moderation = moderation;
|
|
1559
|
-
if (user !== void 0) body.user = user;
|
|
1560
|
-
}
|
|
1561
1534
|
const headers = {
|
|
1562
1535
|
"Content-Type": "application/json",
|
|
1563
1536
|
Authorization: `Bearer ${apiKey}`,
|
|
@@ -1749,49 +1722,22 @@ var tools = {
|
|
|
1749
1722
|
};
|
|
1750
1723
|
|
|
1751
1724
|
// src/providers/openai/index.ts
|
|
1752
|
-
|
|
1753
|
-
|
|
1754
|
-
|
|
1755
|
-
|
|
1756
|
-
|
|
1757
|
-
|
|
1758
|
-
|
|
1759
|
-
|
|
1760
|
-
|
|
1761
|
-
|
|
1762
|
-
|
|
1763
|
-
const modalities = {
|
|
1764
|
-
get llm() {
|
|
1765
|
-
return currentApiMode === "completions" ? completionsHandler : responsesHandler;
|
|
1766
|
-
},
|
|
1767
|
-
embedding: embeddingHandler,
|
|
1768
|
-
image: imageHandler
|
|
1769
|
-
};
|
|
1770
|
-
Object.defineProperties(fn, {
|
|
1771
|
-
name: {
|
|
1772
|
-
value: "openai",
|
|
1773
|
-
writable: false,
|
|
1774
|
-
configurable: true
|
|
1775
|
-
},
|
|
1776
|
-
version: {
|
|
1777
|
-
value: "1.0.0",
|
|
1778
|
-
writable: false,
|
|
1779
|
-
configurable: true
|
|
1725
|
+
var openai = createProvider({
|
|
1726
|
+
name: "openai",
|
|
1727
|
+
version: "1.0.0",
|
|
1728
|
+
handlers: {
|
|
1729
|
+
llm: {
|
|
1730
|
+
handlers: {
|
|
1731
|
+
responses: createResponsesLLMHandler(),
|
|
1732
|
+
completions: createCompletionsLLMHandler()
|
|
1733
|
+
},
|
|
1734
|
+
defaultMode: "responses",
|
|
1735
|
+
getMode: (options) => options?.api ?? "responses"
|
|
1780
1736
|
},
|
|
1781
|
-
|
|
1782
|
-
|
|
1783
|
-
|
|
1784
|
-
|
|
1785
|
-
}
|
|
1786
|
-
});
|
|
1787
|
-
const provider = fn;
|
|
1788
|
-
responsesHandler._setProvider?.(provider);
|
|
1789
|
-
completionsHandler._setProvider?.(provider);
|
|
1790
|
-
embeddingHandler._setProvider?.(provider);
|
|
1791
|
-
imageHandler._setProvider?.(provider);
|
|
1792
|
-
return provider;
|
|
1793
|
-
}
|
|
1794
|
-
var openai = createOpenAIProvider();
|
|
1737
|
+
embedding: createEmbeddingHandler(),
|
|
1738
|
+
image: createImageHandler()
|
|
1739
|
+
}
|
|
1740
|
+
});
|
|
1795
1741
|
export {
|
|
1796
1742
|
codeInterpreterTool,
|
|
1797
1743
|
computerTool,
|