@providerprotocol/ai 0.0.20 → 0.0.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/dist/anthropic/index.d.ts +184 -14
  2. package/dist/anthropic/index.js +306 -107
  3. package/dist/anthropic/index.js.map +1 -1
  4. package/dist/{chunk-P5IRTEM5.js → chunk-7WYBJPJJ.js} +2 -2
  5. package/dist/chunk-I2VHCGQE.js +49 -0
  6. package/dist/chunk-I2VHCGQE.js.map +1 -0
  7. package/dist/{chunk-UMKWXGO3.js → chunk-M4BMM5IB.js} +86 -2
  8. package/dist/chunk-M4BMM5IB.js.map +1 -0
  9. package/dist/{chunk-SKY2JLA7.js → chunk-MKDLXV4O.js} +1 -1
  10. package/dist/chunk-MKDLXV4O.js.map +1 -0
  11. package/dist/{chunk-Z7RBRCRN.js → chunk-NWS5IKNR.js} +37 -11
  12. package/dist/chunk-NWS5IKNR.js.map +1 -0
  13. package/dist/{chunk-U3FZWV4U.js → chunk-RFWLEFAB.js} +100 -43
  14. package/dist/chunk-RFWLEFAB.js.map +1 -0
  15. package/dist/{chunk-U4JJC2YX.js → chunk-RS7C25LS.js} +36 -11
  16. package/dist/chunk-RS7C25LS.js.map +1 -0
  17. package/dist/google/index.d.ts +35 -24
  18. package/dist/google/index.js +273 -99
  19. package/dist/google/index.js.map +1 -1
  20. package/dist/http/index.d.ts +3 -3
  21. package/dist/http/index.js +4 -4
  22. package/dist/index.d.ts +103 -38
  23. package/dist/index.js +346 -153
  24. package/dist/index.js.map +1 -1
  25. package/dist/ollama/index.d.ts +14 -16
  26. package/dist/ollama/index.js +68 -16
  27. package/dist/ollama/index.js.map +1 -1
  28. package/dist/openai/index.d.ts +25 -133
  29. package/dist/openai/index.js +208 -122
  30. package/dist/openai/index.js.map +1 -1
  31. package/dist/openrouter/index.d.ts +28 -53
  32. package/dist/openrouter/index.js +179 -72
  33. package/dist/openrouter/index.js.map +1 -1
  34. package/dist/provider-DWEAzeM5.d.ts +1329 -0
  35. package/dist/proxy/index.d.ts +2 -3
  36. package/dist/proxy/index.js +174 -17
  37. package/dist/proxy/index.js.map +1 -1
  38. package/dist/{retry-DR7YRJDz.d.ts → retry-DmPmqZL6.d.ts} +12 -3
  39. package/dist/{stream-DRHy6q1a.d.ts → stream-DbkLOIbJ.d.ts} +15 -5
  40. package/dist/xai/index.d.ts +16 -88
  41. package/dist/xai/index.js +167 -86
  42. package/dist/xai/index.js.map +1 -1
  43. package/package.json +4 -1
  44. package/dist/chunk-MSR5P65T.js +0 -39
  45. package/dist/chunk-MSR5P65T.js.map +0 -1
  46. package/dist/chunk-SKY2JLA7.js.map +0 -1
  47. package/dist/chunk-U3FZWV4U.js.map +0 -1
  48. package/dist/chunk-U4JJC2YX.js.map +0 -1
  49. package/dist/chunk-UMKWXGO3.js.map +0 -1
  50. package/dist/chunk-Z7RBRCRN.js.map +0 -1
  51. package/dist/content-DEl3z_W2.d.ts +0 -276
  52. package/dist/image-Dhq-Yuq4.d.ts +0 -456
  53. package/dist/provider-BBMBZuGn.d.ts +0 -570
  54. /package/dist/{chunk-P5IRTEM5.js.map → chunk-7WYBJPJJ.js.map} +0 -0
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/http/retry.ts"],"sourcesContent":["/**\n * Retry strategies for handling transient failures in HTTP requests.\n * @module http/retry\n */\n\nimport type { RetryStrategy } from '../types/provider.ts';\nimport type { UPPError } from '../types/errors.ts';\n\n/**\n * Implements exponential backoff with optional jitter for retry delays.\n *\n * The delay between retries doubles with each attempt, helping to:\n * - Avoid overwhelming servers during outages\n * - Reduce thundering herd effects when many clients retry simultaneously\n * - Give transient issues time to resolve\n *\n * Delay formula: min(baseDelay * 2^(attempt-1), maxDelay)\n * With jitter: delay * random(0.5, 1.0)\n *\n * Only retries on transient errors: RATE_LIMITED, NETWORK_ERROR, TIMEOUT, PROVIDER_ERROR\n *\n * @implements {RetryStrategy}\n *\n * @example\n * ```typescript\n * // Default configuration (3 retries, 1s base, 30s max, jitter enabled)\n * const retry = new ExponentialBackoff();\n *\n * // Custom configuration\n * const customRetry = new ExponentialBackoff({\n * maxAttempts: 5, // Up to 5 retry attempts\n * baseDelay: 500, // Start with 500ms delay\n * maxDelay: 60000, // Cap at 60 seconds\n * jitter: false // Disable random jitter\n * });\n *\n * // Use with provider\n * const provider = createOpenAI({\n * retryStrategy: customRetry\n * });\n * ```\n */\nexport class ExponentialBackoff implements RetryStrategy {\n private maxAttempts: number;\n private baseDelay: number;\n private maxDelay: number;\n private jitter: boolean;\n\n /**\n * Creates a new ExponentialBackoff instance.\n *\n * @param options - Configuration options\n * @param options.maxAttempts - Maximum number of retry attempts (default: 3)\n * @param options.baseDelay - Initial delay in milliseconds (default: 1000)\n * @param options.maxDelay - Maximum delay cap in milliseconds (default: 30000)\n * @param options.jitter - Whether to add random jitter to delays (default: true)\n */\n constructor(options: {\n maxAttempts?: number;\n baseDelay?: number;\n maxDelay?: number;\n jitter?: boolean;\n } = {}) {\n this.maxAttempts = options.maxAttempts ?? 3;\n this.baseDelay = options.baseDelay ?? 1000;\n this.maxDelay = options.maxDelay ?? 30000;\n this.jitter = options.jitter ?? true;\n }\n\n /**\n * Determines whether to retry and calculates the delay.\n *\n * @param error - The error that triggered the retry\n * @param attempt - Current attempt number (1-indexed)\n * @returns Delay in milliseconds before next retry, or null to stop retrying\n */\n onRetry(error: UPPError, attempt: number): number | null {\n if (attempt > this.maxAttempts) {\n return null;\n }\n\n if (!this.isRetryable(error)) {\n return null;\n }\n\n let delay = this.baseDelay * Math.pow(2, attempt - 1);\n delay = Math.min(delay, this.maxDelay);\n\n if (this.jitter) {\n delay = delay * (0.5 + Math.random());\n }\n\n return Math.floor(delay);\n }\n\n /**\n * Checks if an error is eligible for retry.\n *\n * @param error - The error to evaluate\n * @returns True if the error is transient and retryable\n */\n private isRetryable(error: UPPError): boolean {\n return (\n error.code === 'RATE_LIMITED' ||\n error.code === 'NETWORK_ERROR' ||\n error.code === 'TIMEOUT' ||\n error.code === 'PROVIDER_ERROR'\n );\n }\n}\n\n/**\n * Implements linear backoff where delays increase proportionally with each attempt.\n *\n * Unlike exponential backoff, linear backoff increases delays at a constant rate:\n * - Attempt 1: delay * 1 (e.g., 1000ms)\n * - Attempt 2: delay * 2 (e.g., 2000ms)\n * - Attempt 3: delay * 3 (e.g., 3000ms)\n *\n * This strategy is simpler and more predictable than exponential backoff,\n * suitable for scenarios where gradual delay increase is preferred over\n * aggressive backoff.\n *\n * Only retries on transient errors: RATE_LIMITED, NETWORK_ERROR, TIMEOUT, PROVIDER_ERROR\n *\n * @implements {RetryStrategy}\n *\n * @example\n * ```typescript\n * // Default configuration (3 retries, 1s delay increment)\n * const retry = new LinearBackoff();\n *\n * // Custom configuration\n * const customRetry = new LinearBackoff({\n * maxAttempts: 4, // Up to 4 retry attempts\n * delay: 2000 // 2s, 4s, 6s, 8s delays\n * });\n *\n * // Use with provider\n * const provider = createAnthropic({\n * retryStrategy: customRetry\n * });\n * ```\n */\nexport class LinearBackoff implements RetryStrategy {\n private maxAttempts: number;\n private delay: number;\n\n /**\n * Creates a new LinearBackoff instance.\n *\n * @param options - Configuration options\n * @param options.maxAttempts - Maximum number of retry attempts (default: 3)\n * @param options.delay - Base delay multiplier in milliseconds (default: 1000)\n */\n constructor(options: {\n maxAttempts?: number;\n delay?: number;\n } = {}) {\n this.maxAttempts = options.maxAttempts ?? 3;\n this.delay = options.delay ?? 1000;\n }\n\n /**\n * Determines whether to retry and calculates the linear delay.\n *\n * @param error - The error that triggered the retry\n * @param attempt - Current attempt number (1-indexed)\n * @returns Delay in milliseconds (delay * attempt), or null to stop retrying\n */\n onRetry(error: UPPError, attempt: number): number | null {\n if (attempt > this.maxAttempts) {\n return null;\n }\n\n if (!this.isRetryable(error)) {\n return null;\n }\n\n return this.delay * attempt;\n }\n\n /**\n * Checks if an error is eligible for retry.\n *\n * @param error - The error to evaluate\n * @returns True if the error is transient and retryable\n */\n private isRetryable(error: UPPError): boolean {\n return (\n error.code === 'RATE_LIMITED' ||\n error.code === 'NETWORK_ERROR' ||\n error.code === 'TIMEOUT' ||\n error.code === 'PROVIDER_ERROR'\n );\n }\n}\n\n/**\n * Disables all retry behavior, failing immediately on any error.\n *\n * Use this strategy when:\n * - Retries are handled at a higher level in your application\n * - You want immediate failure feedback\n * - The operation is not idempotent\n * - Time sensitivity requires fast failure\n *\n * @implements {RetryStrategy}\n *\n * @example\n * ```typescript\n * // Disable retries for time-sensitive operations\n * const provider = createOpenAI({\n * retryStrategy: new NoRetry()\n * });\n * ```\n */\nexport class NoRetry implements RetryStrategy {\n /**\n * Always returns null to indicate no retry should be attempted.\n *\n * @returns Always returns null\n */\n onRetry(_error: UPPError, _attempt: number): null {\n return null;\n }\n}\n\n/**\n * Implements token bucket rate limiting with automatic refill.\n *\n * The token bucket algorithm provides smooth rate limiting by:\n * - Maintaining a bucket of tokens that replenish over time\n * - Consuming one token per request\n * - Delaying requests when the bucket is empty\n * - Allowing burst traffic up to the bucket capacity\n *\n * This is particularly useful for:\n * - Client-side rate limiting to avoid hitting API rate limits\n * - Smoothing request patterns to maintain consistent throughput\n * - Preventing accidental API abuse\n *\n * Unlike other retry strategies, TokenBucket implements {@link beforeRequest}\n * to proactively delay requests before they are made.\n *\n * @implements {RetryStrategy}\n *\n * @example\n * ```typescript\n * // Allow 10 requests burst, refill 1 token per second\n * const bucket = new TokenBucket({\n * maxTokens: 10, // Burst capacity\n * refillRate: 1, // Tokens per second\n * maxAttempts: 3 // Retry attempts on rate limit\n * });\n *\n * // Aggressive rate limiting: 5 req/s sustained\n * const strictBucket = new TokenBucket({\n * maxTokens: 5,\n * refillRate: 5\n * });\n *\n * // Use with provider\n * const provider = createOpenAI({\n * retryStrategy: bucket\n * });\n * ```\n */\nexport class TokenBucket implements RetryStrategy {\n private tokens: number;\n private maxTokens: number;\n private refillRate: number;\n private lastRefill: number;\n private maxAttempts: number;\n private lock: Promise<void>;\n\n /**\n * Creates a new TokenBucket instance.\n *\n * @param options - Configuration options\n * @param options.maxTokens - Maximum bucket capacity (default: 10)\n * @param options.refillRate - Tokens added per second (default: 1)\n * @param options.maxAttempts - Maximum retry attempts on rate limit (default: 3)\n */\n constructor(options: {\n maxTokens?: number;\n refillRate?: number;\n maxAttempts?: number;\n } = {}) {\n this.maxTokens = options.maxTokens ?? 10;\n this.refillRate = options.refillRate ?? 1;\n this.maxAttempts = options.maxAttempts ?? 3;\n this.tokens = this.maxTokens;\n this.lastRefill = Date.now();\n this.lock = Promise.resolve();\n }\n\n /**\n * Called before each request to consume a token or calculate wait time.\n *\n * Refills the bucket based on elapsed time, then either:\n * - Returns 0 if a token is available (consumed immediately)\n * - Returns the wait time in milliseconds until the next token\n *\n * This method may allow tokens to go negative to reserve future capacity\n * and avoid concurrent callers oversubscribing the same refill.\n *\n * @returns Delay in milliseconds before the request can proceed\n */\n beforeRequest(): Promise<number> {\n return this.withLock(() => {\n this.refill();\n\n if (this.tokens >= 1) {\n this.tokens -= 1;\n return 0;\n }\n\n const deficit = 1 - this.tokens;\n const msPerToken = 1000 / this.refillRate;\n this.tokens -= 1;\n return Math.ceil(deficit * msPerToken);\n });\n }\n\n /**\n * Handles retry logic for rate-limited requests.\n *\n * Only retries on RATE_LIMITED errors, waiting for bucket refill.\n *\n * @param error - The error that triggered the retry\n * @param attempt - Current attempt number (1-indexed)\n * @returns Delay in milliseconds (time for 2 tokens), or null to stop\n */\n onRetry(error: UPPError, attempt: number): number | null {\n if (attempt > this.maxAttempts) {\n return null;\n }\n\n if (error.code !== 'RATE_LIMITED') {\n return null;\n }\n\n const msPerToken = 1000 / this.refillRate;\n return Math.ceil(msPerToken * 2);\n }\n\n /**\n * Resets the bucket to full capacity.\n *\n * Called automatically on successful requests to restore available tokens.\n */\n reset(): void {\n void this.withLock(() => {\n this.tokens = this.maxTokens;\n this.lastRefill = Date.now();\n });\n }\n\n /**\n * Refills the bucket based on elapsed time since last refill.\n */\n private refill(): void {\n const now = Date.now();\n const elapsed = (now - this.lastRefill) / 1000;\n const newTokens = elapsed * this.refillRate;\n\n this.tokens = Math.min(this.maxTokens, this.tokens + newTokens);\n this.lastRefill = now;\n }\n\n private async withLock<T>(fn: () => T | Promise<T>): Promise<T> {\n const next = this.lock.then(fn, fn);\n this.lock = next.then(() => undefined, () => undefined);\n return next;\n }\n}\n\n/**\n * Respects server-provided Retry-After headers for optimal retry timing.\n *\n * When servers return a 429 (Too Many Requests) response, they often include\n * a Retry-After header indicating when the client should retry. This strategy\n * uses that information for precise retry timing.\n *\n * Benefits over fixed backoff strategies:\n * - Follows server recommendations for optimal retry timing\n * - Avoids retrying too early and wasting requests\n * - Adapts to dynamic rate limit windows\n *\n * If no Retry-After header is provided, falls back to a configurable delay.\n * Only retries on RATE_LIMITED errors.\n *\n * @implements {RetryStrategy}\n *\n * @example\n * ```typescript\n * // Use server-recommended retry timing\n * const retryAfter = new RetryAfterStrategy({\n * maxAttempts: 5, // Retry up to 5 times\n * fallbackDelay: 10000 // 10s fallback if no header\n * });\n *\n * // The doFetch function automatically calls setRetryAfter\n * // when a Retry-After header is present in the response\n *\n * const provider = createOpenAI({\n * retryStrategy: retryAfter\n * });\n * ```\n */\nexport class RetryAfterStrategy implements RetryStrategy {\n private maxAttempts: number;\n private fallbackDelay: number;\n private lastRetryAfter?: number;\n\n /**\n * Creates a new RetryAfterStrategy instance.\n *\n * @param options - Configuration options\n * @param options.maxAttempts - Maximum number of retry attempts (default: 3)\n * @param options.fallbackDelay - Delay in ms when no Retry-After header (default: 5000)\n */\n constructor(options: {\n maxAttempts?: number;\n fallbackDelay?: number;\n } = {}) {\n this.maxAttempts = options.maxAttempts ?? 3;\n this.fallbackDelay = options.fallbackDelay ?? 5000;\n }\n\n /**\n * Creates a request-scoped copy of this strategy.\n */\n fork(): RetryAfterStrategy {\n return new RetryAfterStrategy({\n maxAttempts: this.maxAttempts,\n fallbackDelay: this.fallbackDelay,\n });\n }\n\n /**\n * Sets the retry delay from a Retry-After header value.\n *\n * Called by doFetch when a Retry-After header is present in the response.\n * The value is used for the next onRetry call and then cleared.\n *\n * @param seconds - The Retry-After value in seconds\n */\n setRetryAfter(seconds: number): void {\n this.lastRetryAfter = seconds * 1000;\n }\n\n /**\n * Determines retry delay using Retry-After header or fallback.\n *\n * @param error - The error that triggered the retry\n * @param attempt - Current attempt number (1-indexed)\n * @returns Delay from Retry-After header or fallback, null to stop\n */\n onRetry(error: UPPError, attempt: number): number | null {\n if (attempt > this.maxAttempts) {\n return null;\n }\n\n if (error.code !== 'RATE_LIMITED') {\n return null;\n }\n\n const delay = this.lastRetryAfter ?? this.fallbackDelay;\n this.lastRetryAfter = undefined;\n return delay;\n }\n}\n"],"mappings":";AA0CO,IAAM,qBAAN,MAAkD;AAAA,EAC/C;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWR,YAAY,UAKR,CAAC,GAAG;AACN,SAAK,cAAc,QAAQ,eAAe;AAC1C,SAAK,YAAY,QAAQ,aAAa;AACtC,SAAK,WAAW,QAAQ,YAAY;AACpC,SAAK,SAAS,QAAQ,UAAU;AAAA,EAClC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,QAAQ,OAAiB,SAAgC;AACvD,QAAI,UAAU,KAAK,aAAa;AAC9B,aAAO;AAAA,IACT;AAEA,QAAI,CAAC,KAAK,YAAY,KAAK,GAAG;AAC5B,aAAO;AAAA,IACT;AAEA,QAAI,QAAQ,KAAK,YAAY,KAAK,IAAI,GAAG,UAAU,CAAC;AACpD,YAAQ,KAAK,IAAI,OAAO,KAAK,QAAQ;AAErC,QAAI,KAAK,QAAQ;AACf,cAAQ,SAAS,MAAM,KAAK,OAAO;AAAA,IACrC;AAEA,WAAO,KAAK,MAAM,KAAK;AAAA,EACzB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQQ,YAAY,OAA0B;AAC5C,WACE,MAAM,SAAS,kBACf,MAAM,SAAS,mBACf,MAAM,SAAS,aACf,MAAM,SAAS;AAAA,EAEnB;AACF;AAmCO,IAAM,gBAAN,MAA6C;AAAA,EAC1C;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASR,YAAY,UAGR,CAAC,GAAG;AACN,SAAK,cAAc,QAAQ,eAAe;AAC1C,SAAK,QAAQ,QAAQ,SAAS;AAAA,EAChC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,QAAQ,OAAiB,SAAgC;AACvD,QAAI,UAAU,KAAK,aAAa;AAC9B,aAAO;AAAA,IACT;AAEA,QAAI,CAAC,KAAK,YAAY,KAAK,GAAG;AAC5B,aAAO;AAAA,IACT;AAEA,WAAO,KAAK,QAAQ;AAAA,EACtB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQQ,YAAY,OAA0B;AAC5C,WACE,MAAM,SAAS,kBACf,MAAM,SAAS,mBACf,MAAM,SAAS,aACf,MAAM,SAAS;AAAA,EAEnB;AACF;AAqBO,IAAM,UAAN,MAAuC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAM5C,QAAQ,QAAkB,UAAwB;AAChD,WAAO;AAAA,EACT;AACF;AA0CO,IAAM,cAAN,MAA2C;AAAA,EACxC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUR,YAAY,UAIR,CAAC,GAAG;AACN,SAAK,YAAY,QAAQ,aAAa;AACtC,SAAK,aAAa,QAAQ,cAAc;AACxC,SAAK,cAAc,QAAQ,eAAe;AAC1C,SAAK,SAAS,KAAK;AACnB,SAAK,aAAa,KAAK,IAAI;AAC3B,SAAK,OAAO,QAAQ,QAAQ;AAAA,EAC9B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,gBAAiC;AAC/B,WAAO,KAAK,SAAS,MAAM;AACzB,WAAK,OAAO;AAEZ,UAAI,KAAK,UAAU,GAAG;AACpB,aAAK,UAAU;AACf,eAAO;AAAA,MACT;AAEA,YAAM,UAAU,IAAI,KAAK;AACzB,YAAM,aAAa,MAAO,KAAK;AAC/B,WAAK,UAAU;AACf,aAAO,KAAK,KAAK,UAAU,UAAU;AAAA,IACvC,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,QAAQ,OAAiB,SAAgC;AACvD,QAAI,UAAU,KAAK,aAAa;AAC9B,aAAO;AAAA,IACT;AAEA,QAAI,MAAM,SAAS,gBAAgB;AACjC,aAAO;AAAA,IACT;AAEA,UAAM,aAAa,MAAO,KAAK;AAC/B,WAAO,KAAK,KAAK,aAAa,CAAC;AAAA,EACjC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,QAAc;AACZ,SAAK,KAAK,SAAS,MAAM;AACvB,WAAK,SAAS,KAAK;AACnB,WAAK,aAAa,KAAK,IAAI;AAAA,IAC7B,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAKQ,SAAe;AACrB,UAAM,MAAM,KAAK,IAAI;AACrB,UAAM,WAAW,MAAM,KAAK,cAAc;AAC1C,UAAM,YAAY,UAAU,KAAK;AAEjC,SAAK,SAAS,KAAK,IAAI,KAAK,WAAW,KAAK,SAAS,SAAS;AAC9D,SAAK,aAAa;AAAA,EACpB;AAAA,EAEA,MAAc,SAAY,IAAsC;AAC9D,UAAM,OAAO,KAAK,KAAK,KAAK,IAAI,EAAE;AAClC,SAAK,OAAO,KAAK,KAAK,MAAM,QAAW,MAAM,MAAS;AACtD,WAAO;AAAA,EACT;AACF;AAmCO,IAAM,qBAAN,MAAM,oBAA4C;AAAA,EAC/C;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASR,YAAY,UAGR,CAAC,GAAG;AACN,SAAK,cAAc,QAAQ,eAAe;AAC1C,SAAK,gBAAgB,QAAQ,iBAAiB;AAAA,EAChD;AAAA;AAAA;AAAA;AAAA,EAKA,OAA2B;AACzB,WAAO,IAAI,oBAAmB;AAAA,MAC5B,aAAa,KAAK;AAAA,MAClB,eAAe,KAAK;AAAA,IACtB,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,cAAc,SAAuB;AACnC,SAAK,iBAAiB,UAAU;AAAA,EAClC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,QAAQ,OAAiB,SAAgC;AACvD,QAAI,UAAU,KAAK,aAAa;AAC9B,aAAO;AAAA,IACT;AAEA,QAAI,MAAM,SAAS,gBAAgB;AACjC,aAAO;AAAA,IACT;AAEA,UAAM,QAAQ,KAAK,kBAAkB,KAAK;AAC1C,SAAK,iBAAiB;AACtB,WAAO;AAAA,EACT;AACF;","names":[]}
@@ -1,4 +1,4 @@
1
- import { d as Provider } from '../provider-BBMBZuGn.js';
1
+ import { a as ProviderConfig, g as Provider } from '../provider-DWEAzeM5.js';
2
2
 
3
3
  /**
4
4
  * Provider-specific parameters for Google Gemini API requests.
@@ -98,7 +98,7 @@ interface GoogleLLMParams {
98
98
  * const model = llm({
99
99
  * model: google('gemini-2.5-flash'),
100
100
  * params: {
101
- * builtInTools: [
101
+ * tools: [
102
102
  * tools.googleSearch(),
103
103
  * tools.codeExecution(),
104
104
  * ],
@@ -106,14 +106,14 @@ interface GoogleLLMParams {
106
106
  * });
107
107
  * ```
108
108
  */
109
- builtInTools?: GoogleBuiltInTool[];
109
+ tools?: GoogleBuiltInTool[];
110
110
  /**
111
111
  * Tool configuration for retrieval (e.g., user location for Maps).
112
112
  *
113
113
  * @example
114
114
  * ```typescript
115
115
  * const params: GoogleLLMParams = {
116
- * builtInTools: [tools.googleMaps()],
116
+ * tools: [tools.googleMaps()],
117
117
  * toolConfig: {
118
118
  * retrievalConfig: {
119
119
  * latLng: { latitude: 40.758896, longitude: -73.985130 },
@@ -633,7 +633,7 @@ declare function fileSearchTool(options: {
633
633
  * const model = llm({
634
634
  * model: google('gemini-2.5-flash'),
635
635
  * params: {
636
- * builtInTools: [
636
+ * tools: [
637
637
  * tools.googleSearch(),
638
638
  * tools.codeExecution(),
639
639
  * ],
@@ -671,6 +671,10 @@ declare const tools: {
671
671
  interface CacheCreateOptions {
672
672
  /** API key for authentication */
673
673
  apiKey: string;
674
+ /** Provider configuration (timeout, retry strategy, custom fetch) */
675
+ config?: ProviderConfig;
676
+ /** Abort signal for cancellation */
677
+ signal?: AbortSignal;
674
678
  /** Model to associate with this cache (e.g., "gemini-3-flash-preview") */
675
679
  model: string;
676
680
  /** Optional display name for the cache (max 128 chars) */
@@ -692,6 +696,10 @@ interface CacheCreateOptions {
692
696
  interface CacheListOptions {
693
697
  /** API key for authentication */
694
698
  apiKey: string;
699
+ /** Provider configuration (timeout, retry strategy, custom fetch) */
700
+ config?: ProviderConfig;
701
+ /** Abort signal for cancellation */
702
+ signal?: AbortSignal;
695
703
  /** Maximum number of caches to return per page */
696
704
  pageSize?: number;
697
705
  /** Token for fetching the next page of results */
@@ -736,6 +744,8 @@ declare function create(options: CacheCreateOptions): Promise<GoogleCacheRespons
736
744
  *
737
745
  * @param name - The cache name (format: "cachedContents/{id}")
738
746
  * @param apiKey - API key for authentication
747
+ * @param config - Provider configuration (timeout, retry strategy, custom fetch)
748
+ * @param signal - Abort signal for cancellation
739
749
  * @returns The cache entry details
740
750
  *
741
751
  * @example
@@ -744,7 +754,7 @@ declare function create(options: CacheCreateOptions): Promise<GoogleCacheRespons
744
754
  * console.log(`Cache expires at: ${cache.expireTime}`);
745
755
  * ```
746
756
  */
747
- declare function get(name: string, apiKey: string): Promise<GoogleCacheResponse>;
757
+ declare function get(name: string, apiKey: string, config?: ProviderConfig, signal?: AbortSignal): Promise<GoogleCacheResponse>;
748
758
  /**
749
759
  * Lists all cached content entries.
750
760
  *
@@ -771,8 +781,10 @@ declare function list(options: CacheListOptions): Promise<GoogleCacheListRespons
771
781
  * (contents, systemInstruction, tools) are immutable after creation.
772
782
  *
773
783
  * @param name - The cache name (format: "cachedContents/{id}")
774
- * @param update - The update to apply (ttl or expireTime)
784
+ * @param update - The update to apply (exactly one of ttl or expireTime)
775
785
  * @param apiKey - API key for authentication
786
+ * @param config - Provider configuration (timeout, retry strategy, custom fetch)
787
+ * @param signal - Abort signal for cancellation
776
788
  * @returns The updated cache entry
777
789
  *
778
790
  * @example
@@ -785,19 +797,21 @@ declare function list(options: CacheListOptions): Promise<GoogleCacheListRespons
785
797
  * );
786
798
  * ```
787
799
  */
788
- declare function update(name: string, updateRequest: GoogleCacheUpdateRequest, apiKey: string): Promise<GoogleCacheResponse>;
800
+ declare function update(name: string, updateRequest: GoogleCacheUpdateRequest, apiKey: string, config?: ProviderConfig, signal?: AbortSignal): Promise<GoogleCacheResponse>;
789
801
  /**
790
802
  * Deletes a cached content entry.
791
803
  *
792
804
  * @param name - The cache name (format: "cachedContents/{id}")
793
805
  * @param apiKey - API key for authentication
806
+ * @param config - Provider configuration (timeout, retry strategy, custom fetch)
807
+ * @param signal - Abort signal for cancellation
794
808
  *
795
809
  * @example
796
810
  * ```typescript
797
811
  * await google.cache.delete('cachedContents/abc123', apiKey);
798
812
  * ```
799
813
  */
800
- declare function deleteCache(name: string, apiKey: string): Promise<void>;
814
+ declare function deleteCache(name: string, apiKey: string, config?: ProviderConfig, signal?: AbortSignal): Promise<void>;
801
815
  /**
802
816
  * Cache utilities namespace.
803
817
  *
@@ -878,25 +892,20 @@ interface GoogleEmbedParams {
878
892
  * @example
879
893
  * ```typescript
880
894
  * import { google } from './providers/google';
895
+ * import { llm } from './core/llm';
881
896
  *
882
- * // Create a model instance
883
- * const gemini = google.llm.bind('gemini-1.5-pro');
884
- *
885
- * // Simple completion
886
- * const response = await gemini.complete({
887
- * messages: [{ role: 'user', content: [{ type: 'text', text: 'Hello!' }] }],
897
+ * const gemini = llm({
898
+ * model: google('gemini-1.5-pro'),
888
899
  * config: { apiKey: process.env.GOOGLE_API_KEY },
889
900
  * });
890
901
  *
891
- * // Streaming completion
892
- * const stream = gemini.stream({
893
- * messages: [{ role: 'user', content: [{ type: 'text', text: 'Tell me a story' }] }],
894
- * config: { apiKey: process.env.GOOGLE_API_KEY },
895
- * });
902
+ * const turn = await gemini.generate('Hello!');
903
+ * console.log(turn.response.text);
896
904
  *
905
+ * const stream = gemini.stream('Tell me a story');
897
906
  * for await (const event of stream) {
898
907
  * if (event.type === 'text_delta') {
899
- * process.stdout.write(event.delta.text);
908
+ * process.stdout.write(event.delta.text ?? '');
900
909
  * }
901
910
  * }
902
911
  * ```
@@ -913,12 +922,14 @@ interface GoogleEmbedParams {
913
922
  * });
914
923
  *
915
924
  * // Use cache in requests
916
- * const response = await gemini.complete({
917
- * messages: [userMessage('Review this function')],
925
+ * const cachedModel = llm({
926
+ * model: google('gemini-3-flash-preview'),
918
927
  * config: { apiKey: process.env.GOOGLE_API_KEY },
919
928
  * params: { cachedContent: cacheEntry.name },
920
929
  * });
921
930
  *
931
+ * const response = await cachedModel.generate('Review this function');
932
+ *
922
933
  * // Manage caches
923
934
  * await google.cache.update(cacheEntry.name, { ttl: '7200s' }, apiKey);
924
935
  * await google.cache.delete(cacheEntry.name, apiKey);
@@ -933,7 +944,7 @@ declare const google: Provider<unknown> & {
933
944
  get: typeof get;
934
945
  list: typeof list;
935
946
  update: typeof update;
936
- delete: (name: string, apiKey: string) => Promise<void>;
947
+ delete: (name: string, apiKey: string, config?: ProviderConfig, signal?: AbortSignal) => Promise<void>;
937
948
  };
938
949
  };
939
950