@apertis/ai-sdk-provider 2.0.1 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,4 +1,4 @@
1
- // node_modules/.pnpm/@ai-sdk+provider@3.0.4/node_modules/@ai-sdk/provider/dist/index.mjs
1
+ // node_modules/.pnpm/@ai-sdk+provider@3.0.8/node_modules/@ai-sdk/provider/dist/index.mjs
2
2
  var marker = "vercel.ai.error";
3
3
  var symbol = Symbol.for(marker);
4
4
  var _a;
@@ -268,34 +268,61 @@ var symbol13 = Symbol.for(marker13);
268
268
  var _a13;
269
269
  var _b13;
270
270
  var TypeValidationError = class _TypeValidationError extends (_b13 = AISDKError, _a13 = symbol13, _b13) {
271
- constructor({ value, cause }) {
271
+ constructor({
272
+ value,
273
+ cause,
274
+ context
275
+ }) {
276
+ let contextPrefix = "Type validation failed";
277
+ if (context == null ? void 0 : context.field) {
278
+ contextPrefix += ` for ${context.field}`;
279
+ }
280
+ if ((context == null ? void 0 : context.entityName) || (context == null ? void 0 : context.entityId)) {
281
+ contextPrefix += " (";
282
+ const parts = [];
283
+ if (context.entityName) {
284
+ parts.push(context.entityName);
285
+ }
286
+ if (context.entityId) {
287
+ parts.push(`id: "${context.entityId}"`);
288
+ }
289
+ contextPrefix += parts.join(", ");
290
+ contextPrefix += ")";
291
+ }
272
292
  super({
273
293
  name: name12,
274
- message: `Type validation failed: Value: ${JSON.stringify(value)}.
294
+ message: `${contextPrefix}: Value: ${JSON.stringify(value)}.
275
295
  Error message: ${getErrorMessage(cause)}`,
276
296
  cause
277
297
  });
278
298
  this[_a13] = true;
279
299
  this.value = value;
300
+ this.context = context;
280
301
  }
281
302
  static isInstance(error) {
282
303
  return AISDKError.hasMarker(error, marker13);
283
304
  }
284
305
  /**
285
306
  * Wraps an error into a TypeValidationError.
286
- * If the cause is already a TypeValidationError with the same value, it returns the cause.
307
+ * If the cause is already a TypeValidationError with the same value and context, it returns the cause.
287
308
  * Otherwise, it creates a new TypeValidationError.
288
309
  *
289
310
  * @param {Object} params - The parameters for wrapping the error.
290
311
  * @param {unknown} params.value - The value that failed validation.
291
312
  * @param {unknown} params.cause - The original error or cause of the validation failure.
313
+ * @param {TypeValidationContext} params.context - Optional context about what is being validated.
292
314
  * @returns {TypeValidationError} A TypeValidationError instance.
293
315
  */
294
316
  static wrap({
295
317
  value,
296
- cause
318
+ cause,
319
+ context
297
320
  }) {
298
- return _TypeValidationError.isInstance(cause) && cause.value === value ? cause : new _TypeValidationError({ value, cause });
321
+ var _a152, _b152, _c;
322
+ if (_TypeValidationError.isInstance(cause) && cause.value === value && ((_a152 = cause.context) == null ? void 0 : _a152.field) === (context == null ? void 0 : context.field) && ((_b152 = cause.context) == null ? void 0 : _b152.entityName) === (context == null ? void 0 : context.entityName) && ((_c = cause.context) == null ? void 0 : _c.entityId) === (context == null ? void 0 : context.entityId)) {
323
+ return cause;
324
+ }
325
+ return new _TypeValidationError({ value, cause, context });
299
326
  }
300
327
  };
301
328
  var name13 = "AI_UnsupportedFunctionalityError";
@@ -317,7 +344,7 @@ var UnsupportedFunctionalityError = class extends (_b14 = AISDKError, _a14 = sym
317
344
  }
318
345
  };
319
346
 
320
- // node_modules/.pnpm/@ai-sdk+provider-utils@4.0.8_zod@3.25.76/node_modules/@ai-sdk/provider-utils/dist/index.mjs
347
+ // node_modules/.pnpm/@ai-sdk+provider-utils@4.0.23_zod@3.25.76/node_modules/@ai-sdk/provider-utils/dist/index.mjs
321
348
  import * as z4 from "zod/v4";
322
349
  import { ZodFirstPartyTypeKind as ZodFirstPartyTypeKind3 } from "zod/v3";
323
350
  import { ZodFirstPartyTypeKind } from "zod/v3";
@@ -452,11 +479,57 @@ var EventSourceParserStream = class extends TransformStream {
452
479
  }
453
480
  };
454
481
 
455
- // node_modules/.pnpm/@ai-sdk+provider-utils@4.0.8_zod@3.25.76/node_modules/@ai-sdk/provider-utils/dist/index.mjs
482
+ // node_modules/.pnpm/@ai-sdk+provider-utils@4.0.23_zod@3.25.76/node_modules/@ai-sdk/provider-utils/dist/index.mjs
483
+ function combineHeaders(...headers) {
484
+ return headers.reduce(
485
+ (combinedHeaders, currentHeaders) => ({
486
+ ...combinedHeaders,
487
+ ...currentHeaders != null ? currentHeaders : {}
488
+ }),
489
+ {}
490
+ );
491
+ }
456
492
  function extractResponseHeaders(response) {
457
493
  return Object.fromEntries([...response.headers]);
458
494
  }
459
495
  var { btoa, atob } = globalThis;
496
+ function convertBase64ToUint8Array(base64String) {
497
+ const base64Url = base64String.replace(/-/g, "+").replace(/_/g, "/");
498
+ const latin1string = atob(base64Url);
499
+ return Uint8Array.from(latin1string, (byte) => byte.codePointAt(0));
500
+ }
501
+ function convertUint8ArrayToBase64(array) {
502
+ let latin1string = "";
503
+ for (let i = 0; i < array.length; i++) {
504
+ latin1string += String.fromCodePoint(array[i]);
505
+ }
506
+ return btoa(latin1string);
507
+ }
508
+ function convertToBase64(value) {
509
+ return value instanceof Uint8Array ? convertUint8ArrayToBase64(value) : value;
510
+ }
511
+ function convertToFormData(input, options = {}) {
512
+ const { useArrayBrackets = true } = options;
513
+ const formData = new FormData();
514
+ for (const [key, value] of Object.entries(input)) {
515
+ if (value == null) {
516
+ continue;
517
+ }
518
+ if (Array.isArray(value)) {
519
+ if (value.length === 1) {
520
+ formData.append(key, value[0]);
521
+ continue;
522
+ }
523
+ const arrayKey = useArrayBrackets ? `${key}[]` : key;
524
+ for (const item of value) {
525
+ formData.append(arrayKey, item);
526
+ }
527
+ continue;
528
+ }
529
+ formData.append(key, value);
530
+ }
531
+ return formData;
532
+ }
460
533
  var name14 = "AI_DownloadError";
461
534
  var marker15 = `vercel.ai.error.${name14}`;
462
535
  var symbol15 = Symbol.for(marker15);
@@ -480,6 +553,187 @@ var DownloadError = class extends (_b15 = AISDKError, _a15 = symbol15, _b15) {
480
553
  return AISDKError.hasMarker(error, marker15);
481
554
  }
482
555
  };
556
+ var DEFAULT_MAX_DOWNLOAD_SIZE = 2 * 1024 * 1024 * 1024;
557
+ async function readResponseWithSizeLimit({
558
+ response,
559
+ url,
560
+ maxBytes = DEFAULT_MAX_DOWNLOAD_SIZE
561
+ }) {
562
+ const contentLength = response.headers.get("content-length");
563
+ if (contentLength != null) {
564
+ const length = parseInt(contentLength, 10);
565
+ if (!isNaN(length) && length > maxBytes) {
566
+ throw new DownloadError({
567
+ url,
568
+ message: `Download of ${url} exceeded maximum size of ${maxBytes} bytes (Content-Length: ${length}).`
569
+ });
570
+ }
571
+ }
572
+ const body = response.body;
573
+ if (body == null) {
574
+ return new Uint8Array(0);
575
+ }
576
+ const reader = body.getReader();
577
+ const chunks = [];
578
+ let totalBytes = 0;
579
+ try {
580
+ while (true) {
581
+ const { done, value } = await reader.read();
582
+ if (done) {
583
+ break;
584
+ }
585
+ totalBytes += value.length;
586
+ if (totalBytes > maxBytes) {
587
+ throw new DownloadError({
588
+ url,
589
+ message: `Download of ${url} exceeded maximum size of ${maxBytes} bytes.`
590
+ });
591
+ }
592
+ chunks.push(value);
593
+ }
594
+ } finally {
595
+ try {
596
+ await reader.cancel();
597
+ } finally {
598
+ reader.releaseLock();
599
+ }
600
+ }
601
+ const result = new Uint8Array(totalBytes);
602
+ let offset = 0;
603
+ for (const chunk of chunks) {
604
+ result.set(chunk, offset);
605
+ offset += chunk.length;
606
+ }
607
+ return result;
608
+ }
609
+ function validateDownloadUrl(url) {
610
+ let parsed;
611
+ try {
612
+ parsed = new URL(url);
613
+ } catch (e) {
614
+ throw new DownloadError({
615
+ url,
616
+ message: `Invalid URL: ${url}`
617
+ });
618
+ }
619
+ if (parsed.protocol === "data:") {
620
+ return;
621
+ }
622
+ if (parsed.protocol !== "http:" && parsed.protocol !== "https:") {
623
+ throw new DownloadError({
624
+ url,
625
+ message: `URL scheme must be http, https, or data, got ${parsed.protocol}`
626
+ });
627
+ }
628
+ const hostname = parsed.hostname;
629
+ if (!hostname) {
630
+ throw new DownloadError({
631
+ url,
632
+ message: `URL must have a hostname`
633
+ });
634
+ }
635
+ if (hostname === "localhost" || hostname.endsWith(".local") || hostname.endsWith(".localhost")) {
636
+ throw new DownloadError({
637
+ url,
638
+ message: `URL with hostname ${hostname} is not allowed`
639
+ });
640
+ }
641
+ if (hostname.startsWith("[") && hostname.endsWith("]")) {
642
+ const ipv6 = hostname.slice(1, -1);
643
+ if (isPrivateIPv6(ipv6)) {
644
+ throw new DownloadError({
645
+ url,
646
+ message: `URL with IPv6 address ${hostname} is not allowed`
647
+ });
648
+ }
649
+ return;
650
+ }
651
+ if (isIPv4(hostname)) {
652
+ if (isPrivateIPv4(hostname)) {
653
+ throw new DownloadError({
654
+ url,
655
+ message: `URL with IP address ${hostname} is not allowed`
656
+ });
657
+ }
658
+ return;
659
+ }
660
+ }
661
+ function isIPv4(hostname) {
662
+ const parts = hostname.split(".");
663
+ if (parts.length !== 4) return false;
664
+ return parts.every((part) => {
665
+ const num = Number(part);
666
+ return Number.isInteger(num) && num >= 0 && num <= 255 && String(num) === part;
667
+ });
668
+ }
669
+ function isPrivateIPv4(ip) {
670
+ const parts = ip.split(".").map(Number);
671
+ const [a, b] = parts;
672
+ if (a === 0) return true;
673
+ if (a === 10) return true;
674
+ if (a === 127) return true;
675
+ if (a === 169 && b === 254) return true;
676
+ if (a === 172 && b >= 16 && b <= 31) return true;
677
+ if (a === 192 && b === 168) return true;
678
+ return false;
679
+ }
680
+ function isPrivateIPv6(ip) {
681
+ const normalized = ip.toLowerCase();
682
+ if (normalized === "::1") return true;
683
+ if (normalized === "::") return true;
684
+ if (normalized.startsWith("::ffff:")) {
685
+ const mappedPart = normalized.slice(7);
686
+ if (isIPv4(mappedPart)) {
687
+ return isPrivateIPv4(mappedPart);
688
+ }
689
+ const hexParts = mappedPart.split(":");
690
+ if (hexParts.length === 2) {
691
+ const high = parseInt(hexParts[0], 16);
692
+ const low = parseInt(hexParts[1], 16);
693
+ if (!isNaN(high) && !isNaN(low)) {
694
+ const a = high >> 8 & 255;
695
+ const b = high & 255;
696
+ const c = low >> 8 & 255;
697
+ const d = low & 255;
698
+ return isPrivateIPv4(`${a}.${b}.${c}.${d}`);
699
+ }
700
+ }
701
+ }
702
+ if (normalized.startsWith("fc") || normalized.startsWith("fd")) return true;
703
+ if (normalized.startsWith("fe80")) return true;
704
+ return false;
705
+ }
706
+ async function downloadBlob(url, options) {
707
+ var _a22, _b22;
708
+ validateDownloadUrl(url);
709
+ try {
710
+ const response = await fetch(url, {
711
+ signal: options == null ? void 0 : options.abortSignal
712
+ });
713
+ if (response.redirected) {
714
+ validateDownloadUrl(response.url);
715
+ }
716
+ if (!response.ok) {
717
+ throw new DownloadError({
718
+ url,
719
+ statusCode: response.status,
720
+ statusText: response.statusText
721
+ });
722
+ }
723
+ const data = await readResponseWithSizeLimit({
724
+ response,
725
+ url,
726
+ maxBytes: (_a22 = options == null ? void 0 : options.maxBytes) != null ? _a22 : DEFAULT_MAX_DOWNLOAD_SIZE
727
+ });
728
+ const contentType = (_b22 = response.headers.get("content-type")) != null ? _b22 : void 0;
729
+ return new Blob([data], contentType ? { type: contentType } : void 0);
730
+ } catch (error) {
731
+ if (DownloadError.isInstance(error)) {
732
+ throw error;
733
+ }
734
+ throw new DownloadError({ url, cause: error });
735
+ }
736
+ }
483
737
  var createIdGenerator = ({
484
738
  prefix,
485
739
  size = 16,
@@ -511,6 +765,25 @@ function isAbortError(error) {
511
765
  error.name === "TimeoutError");
512
766
  }
513
767
  var FETCH_FAILED_ERROR_MESSAGES = ["fetch failed", "failed to fetch"];
768
+ var BUN_ERROR_CODES = [
769
+ "ConnectionRefused",
770
+ "ConnectionClosed",
771
+ "FailedToOpenSocket",
772
+ "ECONNRESET",
773
+ "ECONNREFUSED",
774
+ "ETIMEDOUT",
775
+ "EPIPE"
776
+ ];
777
+ function isBunNetworkError(error) {
778
+ if (!(error instanceof Error)) {
779
+ return false;
780
+ }
781
+ const code = error.code;
782
+ if (typeof code === "string" && BUN_ERROR_CODES.includes(code)) {
783
+ return true;
784
+ }
785
+ return false;
786
+ }
514
787
  function handleFetchError({
515
788
  error,
516
789
  url,
@@ -532,6 +805,15 @@ function handleFetchError({
532
805
  });
533
806
  }
534
807
  }
808
+ if (isBunNetworkError(error)) {
809
+ return new APICallError({
810
+ message: `Cannot connect to API: ${error.message}`,
811
+ cause: error,
812
+ url,
813
+ requestBodyValues,
814
+ isRetryable: true
815
+ });
816
+ }
535
817
  return error;
536
818
  }
537
819
  function getRuntimeEnvironmentUserAgent(globalThisAny = globalThis) {
@@ -580,41 +862,9 @@ function withUserAgentSuffix(headers, ...userAgentSuffixParts) {
580
862
  );
581
863
  return Object.fromEntries(normalizedHeaders.entries());
582
864
  }
583
- var VERSION = true ? "4.0.8" : "0.0.0-test";
584
- function loadApiKey({
585
- apiKey,
586
- environmentVariableName,
587
- apiKeyParameterName = "apiKey",
588
- description
589
- }) {
590
- if (typeof apiKey === "string") {
591
- return apiKey;
592
- }
593
- if (apiKey != null) {
594
- throw new LoadAPIKeyError({
595
- message: `${description} API key must be a string.`
596
- });
597
- }
598
- if (typeof process === "undefined") {
599
- throw new LoadAPIKeyError({
600
- message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter. Environment variables is not supported in this environment.`
601
- });
602
- }
603
- apiKey = process.env[environmentVariableName];
604
- if (apiKey == null) {
605
- throw new LoadAPIKeyError({
606
- message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter or the ${environmentVariableName} environment variable.`
607
- });
608
- }
609
- if (typeof apiKey !== "string") {
610
- throw new LoadAPIKeyError({
611
- message: `${description} API key must be a string. The value of the ${environmentVariableName} environment variable is not a string.`
612
- });
613
- }
614
- return apiKey;
615
- }
616
- var suspectProtoRx = /"__proto__"\s*:/;
617
- var suspectConstructorRx = /"constructor"\s*:/;
865
+ var VERSION = true ? "4.0.23" : "0.0.0-test";
866
+ var suspectProtoRx = /"(?:_|\\u005[Ff])(?:_|\\u005[Ff])(?:p|\\u0070)(?:r|\\u0072)(?:o|\\u006[Ff])(?:t|\\u0074)(?:o|\\u006[Ff])(?:_|\\u005[Ff])(?:_|\\u005[Ff])"\s*:/;
867
+ var suspectConstructorRx = /"(?:c|\\u0063)(?:o|\\u006[Ff])(?:n|\\u006[Ee])(?:s|\\u0073)(?:t|\\u0074)(?:r|\\u0072)(?:u|\\u0075)(?:c|\\u0063)(?:t|\\u0074)(?:o|\\u006[Ff])(?:r|\\u0072)"\s*:/;
618
868
  function _parse(text) {
619
869
  const obj = JSON.parse(text);
620
870
  if (obj === null || typeof obj !== "object") {
@@ -634,7 +884,7 @@ function filter(obj) {
634
884
  if (Object.prototype.hasOwnProperty.call(node, "__proto__")) {
635
885
  throw new SyntaxError("Object contains forbidden prototype property");
636
886
  }
637
- if (Object.prototype.hasOwnProperty.call(node, "constructor") && Object.prototype.hasOwnProperty.call(node.constructor, "prototype")) {
887
+ if (Object.prototype.hasOwnProperty.call(node, "constructor") && node.constructor !== null && typeof node.constructor === "object" && Object.prototype.hasOwnProperty.call(node.constructor, "prototype")) {
638
888
  throw new SyntaxError("Object contains forbidden prototype property");
639
889
  }
640
890
  for (const key in node) {
@@ -1885,17 +2135,19 @@ function zodSchema(zodSchema2, options) {
1885
2135
  }
1886
2136
  async function validateTypes({
1887
2137
  value,
1888
- schema
2138
+ schema,
2139
+ context
1889
2140
  }) {
1890
- const result = await safeValidateTypes({ value, schema });
2141
+ const result = await safeValidateTypes({ value, schema, context });
1891
2142
  if (!result.success) {
1892
- throw TypeValidationError.wrap({ value, cause: result.error });
2143
+ throw TypeValidationError.wrap({ value, cause: result.error, context });
1893
2144
  }
1894
2145
  return result.value;
1895
2146
  }
1896
2147
  async function safeValidateTypes({
1897
2148
  value,
1898
- schema
2149
+ schema,
2150
+ context
1899
2151
  }) {
1900
2152
  const actualSchema = asSchema(schema);
1901
2153
  try {
@@ -1908,13 +2160,13 @@ async function safeValidateTypes({
1908
2160
  }
1909
2161
  return {
1910
2162
  success: false,
1911
- error: TypeValidationError.wrap({ value, cause: result.error }),
2163
+ error: TypeValidationError.wrap({ value, cause: result.error, context }),
1912
2164
  rawValue: value
1913
2165
  };
1914
2166
  } catch (error) {
1915
2167
  return {
1916
2168
  success: false,
1917
- error: TypeValidationError.wrap({ value, cause: error }),
2169
+ error: TypeValidationError.wrap({ value, cause: error, context }),
1918
2170
  rawValue: value
1919
2171
  };
1920
2172
  }
@@ -1954,6 +2206,14 @@ async function safeParseJSON({
1954
2206
  };
1955
2207
  }
1956
2208
  }
2209
+ function isParsableJson(input) {
2210
+ try {
2211
+ secureJsonParse(input);
2212
+ return true;
2213
+ } catch (e) {
2214
+ return false;
2215
+ }
2216
+ }
1957
2217
  function parseJsonEventStream({
1958
2218
  stream,
1959
2219
  schema
@@ -1969,6 +2229,27 @@ function parseJsonEventStream({
1969
2229
  })
1970
2230
  );
1971
2231
  }
2232
+ async function parseProviderOptions({
2233
+ provider,
2234
+ providerOptions,
2235
+ schema
2236
+ }) {
2237
+ if ((providerOptions == null ? void 0 : providerOptions[provider]) == null) {
2238
+ return void 0;
2239
+ }
2240
+ const parsedProviderOptions = await safeValidateTypes({
2241
+ value: providerOptions[provider],
2242
+ schema
2243
+ });
2244
+ if (!parsedProviderOptions.success) {
2245
+ throw new InvalidArgumentError({
2246
+ argument: "providerOptions",
2247
+ message: `invalid ${provider} provider options`,
2248
+ cause: parsedProviderOptions.error
2249
+ });
2250
+ }
2251
+ return parsedProviderOptions.value;
2252
+ }
1972
2253
  var getOriginalFetch2 = () => globalThis.fetch;
1973
2254
  var postJsonToApi = async ({
1974
2255
  url,
@@ -1993,6 +2274,26 @@ var postJsonToApi = async ({
1993
2274
  abortSignal,
1994
2275
  fetch: fetch2
1995
2276
  });
2277
+ var postFormDataToApi = async ({
2278
+ url,
2279
+ headers,
2280
+ formData,
2281
+ failedResponseHandler,
2282
+ successfulResponseHandler,
2283
+ abortSignal,
2284
+ fetch: fetch2
2285
+ }) => postToApi({
2286
+ url,
2287
+ headers,
2288
+ body: {
2289
+ content: formData,
2290
+ values: Object.fromEntries(formData.entries())
2291
+ },
2292
+ failedResponseHandler,
2293
+ successfulResponseHandler,
2294
+ abortSignal,
2295
+ fetch: fetch2
2296
+ });
1996
2297
  var postToApi = async ({
1997
2298
  url,
1998
2299
  headers = {},
@@ -2157,853 +2458,1676 @@ function withoutTrailingSlash(url) {
2157
2458
  return url == null ? void 0 : url.replace(/\/$/, "");
2158
2459
  }
2159
2460
 
2160
- // src/apertis-error.ts
2161
- import { z } from "zod";
2162
- var apertisErrorSchema = z.object({
2461
+ // node_modules/.pnpm/@ai-sdk+openai-compatible@2.0.41_zod@3.25.76/node_modules/@ai-sdk/openai-compatible/dist/index.mjs
2462
+ import { z as z3 } from "zod/v4";
2463
+ import { z } from "zod/v4";
2464
+ import { z as z2 } from "zod/v4";
2465
+ import { z as z5 } from "zod/v4";
2466
+ import { z as z42 } from "zod/v4";
2467
+ import { z as z7 } from "zod/v4";
2468
+ import { z as z6 } from "zod/v4";
2469
+ import { z as z8 } from "zod/v4";
2470
+ function toCamelCase(str) {
2471
+ return str.replace(/[_-]([a-z])/g, (g) => g[1].toUpperCase());
2472
+ }
2473
+ function resolveProviderOptionsKey(rawName, providerOptions) {
2474
+ const camelName = toCamelCase(rawName);
2475
+ if (camelName !== rawName && (providerOptions == null ? void 0 : providerOptions[camelName]) != null) {
2476
+ return camelName;
2477
+ }
2478
+ return rawName;
2479
+ }
2480
+ var openaiCompatibleErrorDataSchema = z.object({
2163
2481
  error: z.object({
2164
2482
  message: z.string(),
2165
- type: z.string().optional(),
2166
- code: z.string().nullable().optional(),
2167
- param: z.string().nullable().optional()
2483
+ // The additional information below is handled loosely to support
2484
+ // OpenAI-compatible providers that have slightly different error
2485
+ // responses:
2486
+ type: z.string().nullish(),
2487
+ param: z.any().nullish(),
2488
+ code: z.union([z.string(), z.number()]).nullish()
2168
2489
  })
2169
2490
  });
2170
- var apertisFailedResponseHandler = createJsonErrorResponseHandler({
2171
- errorSchema: apertisErrorSchema,
2172
- errorToMessage: (error) => error.error.message
2173
- });
2174
-
2175
- // src/schemas/chat-response.ts
2176
- import { z as z2 } from "zod";
2177
- var openAIChatResponseSchema = z2.object({
2178
- id: z2.string().optional(),
2179
- object: z2.literal("chat.completion").optional(),
2180
- created: z2.number().optional(),
2181
- model: z2.string().optional(),
2182
- choices: z2.array(
2183
- z2.object({
2184
- index: z2.number(),
2185
- message: z2.object({
2186
- role: z2.literal("assistant"),
2187
- content: z2.string().nullable(),
2188
- tool_calls: z2.array(
2189
- z2.object({
2190
- id: z2.string(),
2191
- type: z2.literal("function"),
2192
- function: z2.object({
2193
- name: z2.string(),
2194
- arguments: z2.string()
2195
- })
2196
- })
2197
- ).optional()
2198
- }),
2199
- finish_reason: z2.string().nullable(),
2200
- logprobs: z2.any().nullable().optional()
2201
- })
2202
- ),
2203
- usage: z2.object({
2204
- prompt_tokens: z2.number(),
2205
- completion_tokens: z2.number(),
2206
- total_tokens: z2.number().optional()
2207
- }).optional()
2208
- });
2209
- var openAIChatChunkSchema = z2.object({
2210
- id: z2.string().optional(),
2211
- object: z2.literal("chat.completion.chunk").optional(),
2212
- created: z2.number().optional(),
2213
- model: z2.string().optional(),
2214
- choices: z2.array(
2215
- z2.object({
2216
- index: z2.number(),
2217
- delta: z2.object({
2218
- role: z2.literal("assistant").optional(),
2219
- content: z2.string().nullable().optional(),
2220
- tool_calls: z2.array(
2221
- z2.object({
2222
- index: z2.number(),
2223
- id: z2.string().optional(),
2224
- type: z2.literal("function").optional(),
2225
- function: z2.object({
2226
- name: z2.string().optional(),
2227
- arguments: z2.string().optional()
2228
- }).optional()
2229
- })
2230
- ).optional()
2231
- }),
2232
- finish_reason: z2.string().nullable().optional()
2233
- })
2234
- ),
2235
- usage: z2.object({
2236
- prompt_tokens: z2.number(),
2237
- completion_tokens: z2.number()
2238
- }).nullish()
2239
- });
2240
-
2241
- // src/utils/map-finish-reason.ts
2242
- function normalizeFinishReason(finishReason) {
2243
- switch (finishReason) {
2244
- case "stop":
2245
- return "stop";
2246
- case "length":
2247
- return "length";
2248
- case "tool_calls":
2249
- return "tool-calls";
2250
- case "content_filter":
2251
- return "content-filter";
2252
- default:
2253
- return "other";
2491
+ var defaultOpenAICompatibleErrorStructure = {
2492
+ errorSchema: openaiCompatibleErrorDataSchema,
2493
+ errorToMessage: (data) => data.error.message
2494
+ };
2495
+ function convertOpenAICompatibleChatUsage(usage) {
2496
+ var _a16, _b16, _c, _d, _e, _f;
2497
+ if (usage == null) {
2498
+ return {
2499
+ inputTokens: {
2500
+ total: void 0,
2501
+ noCache: void 0,
2502
+ cacheRead: void 0,
2503
+ cacheWrite: void 0
2504
+ },
2505
+ outputTokens: {
2506
+ total: void 0,
2507
+ text: void 0,
2508
+ reasoning: void 0
2509
+ },
2510
+ raw: void 0
2511
+ };
2254
2512
  }
2255
- }
2256
- function mapApertisFinishReason(finishReason) {
2513
+ const promptTokens = (_a16 = usage.prompt_tokens) != null ? _a16 : 0;
2514
+ const completionTokens = (_b16 = usage.completion_tokens) != null ? _b16 : 0;
2515
+ const cacheReadTokens = (_d = (_c = usage.prompt_tokens_details) == null ? void 0 : _c.cached_tokens) != null ? _d : 0;
2516
+ const reasoningTokens = (_f = (_e = usage.completion_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : 0;
2257
2517
  return {
2258
- unified: normalizeFinishReason(finishReason),
2259
- raw: finishReason ?? void 0
2518
+ inputTokens: {
2519
+ total: promptTokens,
2520
+ noCache: promptTokens - cacheReadTokens,
2521
+ cacheRead: cacheReadTokens,
2522
+ cacheWrite: void 0
2523
+ },
2524
+ outputTokens: {
2525
+ total: completionTokens,
2526
+ text: completionTokens - reasoningTokens,
2527
+ reasoning: reasoningTokens
2528
+ },
2529
+ raw: usage
2260
2530
  };
2261
2531
  }
2262
- function mapApertisFinishReasonV2(finishReason) {
2263
- return normalizeFinishReason(finishReason);
2532
+ function getOpenAIMetadata(message) {
2533
+ var _a16, _b16;
2534
+ return (_b16 = (_a16 = message == null ? void 0 : message.providerOptions) == null ? void 0 : _a16.openaiCompatible) != null ? _b16 : {};
2264
2535
  }
2265
-
2266
- // src/utils/convert-to-openai-messages.ts
2267
- function convertToOpenAIMessages(prompt) {
2536
+ function getAudioFormat(mediaType) {
2537
+ switch (mediaType) {
2538
+ case "audio/wav":
2539
+ return "wav";
2540
+ case "audio/mp3":
2541
+ case "audio/mpeg":
2542
+ return "mp3";
2543
+ default:
2544
+ return null;
2545
+ }
2546
+ }
2547
+ function convertToOpenAICompatibleChatMessages(prompt) {
2548
+ var _a16, _b16, _c;
2268
2549
  const messages = [];
2269
- for (const message of prompt) {
2270
- switch (message.role) {
2271
- case "system":
2272
- messages.push({ role: "system", content: message.content });
2550
+ for (const { role, content, ...message } of prompt) {
2551
+ const metadata = getOpenAIMetadata({ ...message });
2552
+ switch (role) {
2553
+ case "system": {
2554
+ messages.push({ role: "system", content, ...metadata });
2273
2555
  break;
2274
- case "user":
2556
+ }
2557
+ case "user": {
2558
+ if (content.length === 1 && content[0].type === "text") {
2559
+ messages.push({
2560
+ role: "user",
2561
+ content: content[0].text,
2562
+ ...getOpenAIMetadata(content[0])
2563
+ });
2564
+ break;
2565
+ }
2275
2566
  messages.push({
2276
2567
  role: "user",
2277
- content: message.content.map((part) => {
2568
+ content: content.map((part) => {
2569
+ var _a22;
2570
+ const partMetadata = getOpenAIMetadata(part);
2278
2571
  switch (part.type) {
2279
- case "text":
2280
- return { type: "text", text: part.text };
2572
+ case "text": {
2573
+ return { type: "text", text: part.text, ...partMetadata };
2574
+ }
2281
2575
  case "file": {
2282
- if (part.mediaType?.startsWith("image/")) {
2283
- let url;
2576
+ if (part.mediaType.startsWith("image/")) {
2577
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
2578
+ return {
2579
+ type: "image_url",
2580
+ image_url: {
2581
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`
2582
+ },
2583
+ ...partMetadata
2584
+ };
2585
+ }
2586
+ if (part.mediaType.startsWith("audio/")) {
2284
2587
  if (part.data instanceof URL) {
2285
- url = part.data.toString();
2286
- } else if (typeof part.data === "string") {
2287
- if (part.data.startsWith("http://") || part.data.startsWith("https://")) {
2288
- url = part.data;
2289
- } else {
2290
- url = `data:${part.mediaType};base64,${part.data}`;
2291
- }
2292
- } else {
2293
- url = `data:${part.mediaType};base64,${Buffer.from(part.data).toString("base64")}`;
2588
+ throw new UnsupportedFunctionalityError({
2589
+ functionality: "audio file parts with URLs"
2590
+ });
2591
+ }
2592
+ const format = getAudioFormat(part.mediaType);
2593
+ if (format === null) {
2594
+ throw new UnsupportedFunctionalityError({
2595
+ functionality: `audio media type ${part.mediaType}`
2596
+ });
2294
2597
  }
2295
2598
  return {
2296
- type: "image_url",
2297
- image_url: { url }
2599
+ type: "input_audio",
2600
+ input_audio: {
2601
+ data: convertToBase64(part.data),
2602
+ format
2603
+ },
2604
+ ...partMetadata
2605
+ };
2606
+ }
2607
+ if (part.mediaType === "application/pdf") {
2608
+ if (part.data instanceof URL) {
2609
+ throw new UnsupportedFunctionalityError({
2610
+ functionality: "PDF file parts with URLs"
2611
+ });
2612
+ }
2613
+ return {
2614
+ type: "file",
2615
+ file: {
2616
+ filename: (_a22 = part.filename) != null ? _a22 : "document.pdf",
2617
+ file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`
2618
+ },
2619
+ ...partMetadata
2620
+ };
2621
+ }
2622
+ if (part.mediaType.startsWith("text/")) {
2623
+ const textContent = part.data instanceof URL ? part.data.toString() : typeof part.data === "string" ? new TextDecoder().decode(
2624
+ convertBase64ToUint8Array(part.data)
2625
+ ) : new TextDecoder().decode(part.data);
2626
+ return {
2627
+ type: "text",
2628
+ text: textContent,
2629
+ ...partMetadata
2298
2630
  };
2299
2631
  }
2300
- throw new Error(
2301
- `Unsupported file type: ${part.mediaType}. Only image/* is supported.`
2302
- );
2632
+ throw new UnsupportedFunctionalityError({
2633
+ functionality: `file part media type ${part.mediaType}`
2634
+ });
2303
2635
  }
2304
- default:
2305
- throw new Error(
2306
- `Unsupported user content part type: ${part.type}`
2307
- );
2308
2636
  }
2309
- })
2637
+ }),
2638
+ ...metadata
2310
2639
  });
2311
2640
  break;
2641
+ }
2312
2642
  case "assistant": {
2313
- const textContent = message.content.filter((p) => p.type === "text").map((p) => p.text).join("");
2314
- const toolCalls = message.content.filter((p) => p.type === "tool-call").map((tc) => {
2315
- let arguments_str = "{}";
2316
- try {
2317
- arguments_str = typeof tc.input === "string" ? tc.input : JSON.stringify(tc.input);
2318
- } catch {
2319
- arguments_str = "{}";
2643
+ let text = "";
2644
+ let reasoning = "";
2645
+ const toolCalls = [];
2646
+ for (const part of content) {
2647
+ const partMetadata = getOpenAIMetadata(part);
2648
+ switch (part.type) {
2649
+ case "text": {
2650
+ text += part.text;
2651
+ break;
2652
+ }
2653
+ case "reasoning": {
2654
+ reasoning += part.text;
2655
+ break;
2656
+ }
2657
+ case "tool-call": {
2658
+ const thoughtSignature = (_b16 = (_a16 = part.providerOptions) == null ? void 0 : _a16.google) == null ? void 0 : _b16.thoughtSignature;
2659
+ toolCalls.push({
2660
+ id: part.toolCallId,
2661
+ type: "function",
2662
+ function: {
2663
+ name: part.toolName,
2664
+ arguments: JSON.stringify(part.input)
2665
+ },
2666
+ ...partMetadata,
2667
+ // Include extra_content for Google Gemini thought signatures
2668
+ ...thoughtSignature ? {
2669
+ extra_content: {
2670
+ google: {
2671
+ thought_signature: String(thoughtSignature)
2672
+ }
2673
+ }
2674
+ } : {}
2675
+ });
2676
+ break;
2677
+ }
2320
2678
  }
2321
- return {
2322
- id: tc.toolCallId,
2323
- type: "function",
2324
- function: { name: tc.toolName, arguments: arguments_str }
2325
- };
2326
- });
2679
+ }
2327
2680
  messages.push({
2328
2681
  role: "assistant",
2329
- content: textContent || null,
2330
- ...toolCalls.length > 0 ? { tool_calls: toolCalls } : {}
2682
+ content: text,
2683
+ ...reasoning.length > 0 ? { reasoning_content: reasoning } : {},
2684
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0,
2685
+ ...metadata
2331
2686
  });
2332
2687
  break;
2333
2688
  }
2334
- case "tool":
2335
- for (const result of message.content) {
2336
- if (result.type !== "tool-result") continue;
2337
- let content = "{}";
2338
- const output = result.output;
2339
- if (typeof output === "string") {
2340
- content = output;
2341
- } else if (Array.isArray(output)) {
2342
- const textParts = output.filter((p) => p.type === "text").map((p) => p.text);
2343
- content = textParts.join("");
2344
- } else {
2345
- try {
2346
- content = JSON.stringify(output);
2347
- } catch {
2348
- content = "{}";
2349
- }
2689
+ case "tool": {
2690
+ for (const toolResponse of content) {
2691
+ if (toolResponse.type === "tool-approval-response") {
2692
+ continue;
2693
+ }
2694
+ const output = toolResponse.output;
2695
+ let contentValue;
2696
+ switch (output.type) {
2697
+ case "text":
2698
+ case "error-text":
2699
+ contentValue = output.value;
2700
+ break;
2701
+ case "execution-denied":
2702
+ contentValue = (_c = output.reason) != null ? _c : "Tool execution denied.";
2703
+ break;
2704
+ case "content":
2705
+ case "json":
2706
+ case "error-json":
2707
+ contentValue = JSON.stringify(output.value);
2708
+ break;
2350
2709
  }
2710
+ const toolResponseMetadata = getOpenAIMetadata(toolResponse);
2351
2711
  messages.push({
2352
2712
  role: "tool",
2353
- tool_call_id: result.toolCallId,
2354
- content
2713
+ tool_call_id: toolResponse.toolCallId,
2714
+ content: contentValue,
2715
+ ...toolResponseMetadata
2355
2716
  });
2356
2717
  }
2357
2718
  break;
2719
+ }
2720
+ default: {
2721
+ const _exhaustiveCheck = role;
2722
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
2723
+ }
2358
2724
  }
2359
2725
  }
2360
2726
  return messages;
2361
2727
  }
2362
-
2363
- // src/utils/convert-to-openai-tools.ts
2364
- function convertToOpenAITools(tools) {
2365
- if (!tools || tools.length === 0) return void 0;
2366
- return tools.map((tool) => ({
2367
- type: "function",
2368
- function: {
2369
- name: tool.name,
2370
- description: tool.description,
2371
- parameters: tool.inputSchema
2372
- }
2373
- }));
2374
- }
2375
- function convertToOpenAIToolChoice(toolChoice) {
2376
- if (!toolChoice) return void 0;
2377
- switch (toolChoice.type) {
2378
- case "none":
2379
- return "none";
2380
- case "auto":
2381
- return "auto";
2382
- case "required":
2383
- return "required";
2384
- case "tool":
2385
- if (!toolChoice.toolName) return void 0;
2386
- return {
2387
- type: "function",
2388
- function: { name: toolChoice.toolName }
2389
- };
2728
+ function getResponseMetadata({
2729
+ id,
2730
+ model,
2731
+ created
2732
+ }) {
2733
+ return {
2734
+ id: id != null ? id : void 0,
2735
+ modelId: model != null ? model : void 0,
2736
+ timestamp: created != null ? new Date(created * 1e3) : void 0
2737
+ };
2738
+ }
2739
+ function mapOpenAICompatibleFinishReason(finishReason) {
2740
+ switch (finishReason) {
2741
+ case "stop":
2742
+ return "stop";
2743
+ case "length":
2744
+ return "length";
2745
+ case "content_filter":
2746
+ return "content-filter";
2747
+ case "function_call":
2748
+ case "tool_calls":
2749
+ return "tool-calls";
2390
2750
  default:
2391
- return void 0;
2751
+ return "other";
2392
2752
  }
2393
2753
  }
2394
-
2395
- // src/apertis-chat-language-model.ts
2396
- var ApertisChatLanguageModel = class {
2397
- constructor(modelId, settings, config) {
2398
- this.modelId = modelId;
2399
- this.settings = settings;
2400
- this.config = config;
2401
- }
2402
- specificationVersion = "v3";
2754
+ var openaiCompatibleLanguageModelChatOptions = z2.object({
2403
2755
  /**
2404
- * Supported URL patterns for different media types.
2405
- * Supports HTTP(S) image URLs for direct URL passing.
2756
+ * A unique identifier representing your end-user, which can help the provider to
2757
+ * monitor and detect abuse.
2406
2758
  */
2407
- supportedUrls = {
2408
- "image/*": [/^https?:\/\/.+$/]
2409
- };
2410
- get provider() {
2411
- return this.config.provider;
2759
+ user: z2.string().optional(),
2760
+ /**
2761
+ * Reasoning effort for reasoning models. Defaults to `medium`.
2762
+ */
2763
+ reasoningEffort: z2.string().optional(),
2764
+ /**
2765
+ * Controls the verbosity of the generated text. Defaults to `medium`.
2766
+ */
2767
+ textVerbosity: z2.string().optional(),
2768
+ /**
2769
+ * Whether to use strict JSON schema validation.
2770
+ * When true, the model uses constrained decoding to guarantee schema compliance.
2771
+ * Only used when the provider supports structured outputs and a schema is provided.
2772
+ *
2773
+ * @default true
2774
+ */
2775
+ strictJsonSchema: z2.boolean().optional()
2776
+ });
2777
+ function prepareTools({
2778
+ tools,
2779
+ toolChoice
2780
+ }) {
2781
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
2782
+ const toolWarnings = [];
2783
+ if (tools == null) {
2784
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
2785
+ }
2786
+ const openaiCompatTools = [];
2787
+ for (const tool of tools) {
2788
+ if (tool.type === "provider") {
2789
+ toolWarnings.push({
2790
+ type: "unsupported",
2791
+ feature: `provider-defined tool ${tool.id}`
2792
+ });
2793
+ } else {
2794
+ openaiCompatTools.push({
2795
+ type: "function",
2796
+ function: {
2797
+ name: tool.name,
2798
+ description: tool.description,
2799
+ parameters: tool.inputSchema,
2800
+ ...tool.strict != null ? { strict: tool.strict } : {}
2801
+ }
2802
+ });
2803
+ }
2804
+ }
2805
+ if (toolChoice == null) {
2806
+ return { tools: openaiCompatTools, toolChoice: void 0, toolWarnings };
2807
+ }
2808
+ const type = toolChoice.type;
2809
+ switch (type) {
2810
+ case "auto":
2811
+ case "none":
2812
+ case "required":
2813
+ return { tools: openaiCompatTools, toolChoice: type, toolWarnings };
2814
+ case "tool":
2815
+ return {
2816
+ tools: openaiCompatTools,
2817
+ toolChoice: {
2818
+ type: "function",
2819
+ function: { name: toolChoice.toolName }
2820
+ },
2821
+ toolWarnings
2822
+ };
2823
+ default: {
2824
+ const _exhaustiveCheck = type;
2825
+ throw new UnsupportedFunctionalityError({
2826
+ functionality: `tool choice type: ${_exhaustiveCheck}`
2827
+ });
2828
+ }
2829
+ }
2830
+ }
2831
+ var OpenAICompatibleChatLanguageModel = class {
2832
+ // type inferred via constructor
2833
+ constructor(modelId, config) {
2834
+ this.specificationVersion = "v3";
2835
+ var _a16, _b16;
2836
+ this.modelId = modelId;
2837
+ this.config = config;
2838
+ const errorStructure = (_a16 = config.errorStructure) != null ? _a16 : defaultOpenAICompatibleErrorStructure;
2839
+ this.chunkSchema = createOpenAICompatibleChatChunkSchema(
2840
+ errorStructure.errorSchema
2841
+ );
2842
+ this.failedResponseHandler = createJsonErrorResponseHandler(errorStructure);
2843
+ this.supportsStructuredOutputs = (_b16 = config.supportsStructuredOutputs) != null ? _b16 : false;
2844
+ }
2845
+ get provider() {
2846
+ return this.config.provider;
2847
+ }
2848
+ get providerOptionsName() {
2849
+ return this.config.provider.split(".")[0].trim();
2850
+ }
2851
+ get supportedUrls() {
2852
+ var _a16, _b16, _c;
2853
+ return (_c = (_b16 = (_a16 = this.config).supportedUrls) == null ? void 0 : _b16.call(_a16)) != null ? _c : {};
2854
+ }
2855
+ transformRequestBody(args) {
2856
+ var _a16, _b16, _c;
2857
+ return (_c = (_b16 = (_a16 = this.config).transformRequestBody) == null ? void 0 : _b16.call(_a16, args)) != null ? _c : args;
2858
+ }
2859
+ async getArgs({
2860
+ prompt,
2861
+ maxOutputTokens,
2862
+ temperature,
2863
+ topP,
2864
+ topK,
2865
+ frequencyPenalty,
2866
+ presencePenalty,
2867
+ providerOptions,
2868
+ stopSequences,
2869
+ responseFormat,
2870
+ seed,
2871
+ toolChoice,
2872
+ tools
2873
+ }) {
2874
+ var _a16, _b16, _c, _d, _e;
2875
+ const warnings = [];
2876
+ const deprecatedOptions = await parseProviderOptions({
2877
+ provider: "openai-compatible",
2878
+ providerOptions,
2879
+ schema: openaiCompatibleLanguageModelChatOptions
2880
+ });
2881
+ if (deprecatedOptions != null) {
2882
+ warnings.push({
2883
+ type: "other",
2884
+ message: `The 'openai-compatible' key in providerOptions is deprecated. Use 'openaiCompatible' instead.`
2885
+ });
2886
+ }
2887
+ const compatibleOptions = Object.assign(
2888
+ deprecatedOptions != null ? deprecatedOptions : {},
2889
+ (_a16 = await parseProviderOptions({
2890
+ provider: "openaiCompatible",
2891
+ providerOptions,
2892
+ schema: openaiCompatibleLanguageModelChatOptions
2893
+ })) != null ? _a16 : {},
2894
+ (_b16 = await parseProviderOptions({
2895
+ provider: this.providerOptionsName,
2896
+ providerOptions,
2897
+ schema: openaiCompatibleLanguageModelChatOptions
2898
+ })) != null ? _b16 : {},
2899
+ (_c = await parseProviderOptions({
2900
+ provider: toCamelCase(this.providerOptionsName),
2901
+ providerOptions,
2902
+ schema: openaiCompatibleLanguageModelChatOptions
2903
+ })) != null ? _c : {}
2904
+ );
2905
+ const strictJsonSchema = (_d = compatibleOptions == null ? void 0 : compatibleOptions.strictJsonSchema) != null ? _d : true;
2906
+ if (topK != null) {
2907
+ warnings.push({ type: "unsupported", feature: "topK" });
2908
+ }
2909
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
2910
+ warnings.push({
2911
+ type: "unsupported",
2912
+ feature: "responseFormat",
2913
+ details: "JSON response format schema is only supported with structuredOutputs"
2914
+ });
2915
+ }
2916
+ const {
2917
+ tools: openaiTools,
2918
+ toolChoice: openaiToolChoice,
2919
+ toolWarnings
2920
+ } = prepareTools({
2921
+ tools,
2922
+ toolChoice
2923
+ });
2924
+ const metadataKey = resolveProviderOptionsKey(
2925
+ this.providerOptionsName,
2926
+ providerOptions
2927
+ );
2928
+ return {
2929
+ metadataKey,
2930
+ args: {
2931
+ // model id:
2932
+ model: this.modelId,
2933
+ // model specific settings:
2934
+ user: compatibleOptions.user,
2935
+ // standardized settings:
2936
+ max_tokens: maxOutputTokens,
2937
+ temperature,
2938
+ top_p: topP,
2939
+ frequency_penalty: frequencyPenalty,
2940
+ presence_penalty: presencePenalty,
2941
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs === true && responseFormat.schema != null ? {
2942
+ type: "json_schema",
2943
+ json_schema: {
2944
+ schema: responseFormat.schema,
2945
+ strict: strictJsonSchema,
2946
+ name: (_e = responseFormat.name) != null ? _e : "response",
2947
+ description: responseFormat.description
2948
+ }
2949
+ } : { type: "json_object" } : void 0,
2950
+ stop: stopSequences,
2951
+ seed,
2952
+ ...Object.fromEntries(
2953
+ Object.entries({
2954
+ ...providerOptions == null ? void 0 : providerOptions[this.providerOptionsName],
2955
+ ...providerOptions == null ? void 0 : providerOptions[toCamelCase(this.providerOptionsName)]
2956
+ }).filter(
2957
+ ([key]) => !Object.keys(
2958
+ openaiCompatibleLanguageModelChatOptions.shape
2959
+ ).includes(key)
2960
+ )
2961
+ ),
2962
+ reasoning_effort: compatibleOptions.reasoningEffort,
2963
+ verbosity: compatibleOptions.textVerbosity,
2964
+ // messages:
2965
+ messages: convertToOpenAICompatibleChatMessages(prompt),
2966
+ // tools:
2967
+ tools: openaiTools,
2968
+ tool_choice: openaiToolChoice
2969
+ },
2970
+ warnings: [...warnings, ...toolWarnings]
2971
+ };
2412
2972
  }
2413
2973
  async doGenerate(options) {
2414
- const body = this.buildRequestBody(options, false);
2415
- const { value: response } = await postJsonToApi({
2416
- url: `${this.config.baseURL}/chat/completions`,
2417
- headers: this.config.headers(),
2418
- body,
2419
- failedResponseHandler: apertisFailedResponseHandler,
2974
+ var _a16, _b16, _c, _d, _e, _f, _g, _h;
2975
+ const { args, warnings, metadataKey } = await this.getArgs({ ...options });
2976
+ const transformedBody = this.transformRequestBody(args);
2977
+ const body = JSON.stringify(transformedBody);
2978
+ const {
2979
+ responseHeaders,
2980
+ value: responseBody,
2981
+ rawValue: rawResponse
2982
+ } = await postJsonToApi({
2983
+ url: this.config.url({
2984
+ path: "/chat/completions",
2985
+ modelId: this.modelId
2986
+ }),
2987
+ headers: combineHeaders(this.config.headers(), options.headers),
2988
+ body: transformedBody,
2989
+ failedResponseHandler: this.failedResponseHandler,
2420
2990
  successfulResponseHandler: createJsonResponseHandler(
2421
- openAIChatResponseSchema
2991
+ OpenAICompatibleChatResponseSchema
2422
2992
  ),
2423
- fetch: this.config.fetch,
2424
- abortSignal: options.abortSignal
2993
+ abortSignal: options.abortSignal,
2994
+ fetch: this.config.fetch
2425
2995
  });
2426
- const choice = response.choices[0];
2996
+ const choice = responseBody.choices[0];
2427
2997
  const content = [];
2428
- if (choice.message.content) {
2998
+ const text = choice.message.content;
2999
+ if (text != null && text.length > 0) {
3000
+ content.push({ type: "text", text });
3001
+ }
3002
+ const reasoning = (_a16 = choice.message.reasoning_content) != null ? _a16 : choice.message.reasoning;
3003
+ if (reasoning != null && reasoning.length > 0) {
2429
3004
  content.push({
2430
- type: "text",
2431
- text: choice.message.content
3005
+ type: "reasoning",
3006
+ text: reasoning
2432
3007
  });
2433
3008
  }
2434
- if (choice.message.tool_calls) {
2435
- for (const tc of choice.message.tool_calls) {
3009
+ if (choice.message.tool_calls != null) {
3010
+ for (const toolCall of choice.message.tool_calls) {
3011
+ const thoughtSignature = (_c = (_b16 = toolCall.extra_content) == null ? void 0 : _b16.google) == null ? void 0 : _c.thought_signature;
2436
3012
  content.push({
2437
3013
  type: "tool-call",
2438
- toolCallId: tc.id,
2439
- toolName: tc.function.name,
2440
- input: tc.function.arguments
3014
+ toolCallId: (_d = toolCall.id) != null ? _d : generateId(),
3015
+ toolName: toolCall.function.name,
3016
+ input: toolCall.function.arguments,
3017
+ ...thoughtSignature ? {
3018
+ providerMetadata: {
3019
+ [metadataKey]: { thoughtSignature }
3020
+ }
3021
+ } : {}
2441
3022
  });
2442
3023
  }
2443
3024
  }
3025
+ const providerMetadata = {
3026
+ [metadataKey]: {},
3027
+ ...await ((_f = (_e = this.config.metadataExtractor) == null ? void 0 : _e.extractMetadata) == null ? void 0 : _f.call(_e, {
3028
+ parsedBody: rawResponse
3029
+ }))
3030
+ };
3031
+ const completionTokenDetails = (_g = responseBody.usage) == null ? void 0 : _g.completion_tokens_details;
3032
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
3033
+ providerMetadata[metadataKey].acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
3034
+ }
3035
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
3036
+ providerMetadata[metadataKey].rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
3037
+ }
2444
3038
  return {
2445
3039
  content,
2446
- finishReason: mapApertisFinishReason(choice.finish_reason),
2447
- usage: {
2448
- inputTokens: {
2449
- total: response.usage?.prompt_tokens ?? 0,
2450
- noCache: void 0,
2451
- cacheRead: void 0,
2452
- cacheWrite: void 0
2453
- },
2454
- outputTokens: {
2455
- total: response.usage?.completion_tokens ?? 0,
2456
- text: void 0,
2457
- reasoning: void 0
2458
- }
3040
+ finishReason: {
3041
+ unified: mapOpenAICompatibleFinishReason(choice.finish_reason),
3042
+ raw: (_h = choice.finish_reason) != null ? _h : void 0
3043
+ },
3044
+ usage: convertOpenAICompatibleChatUsage(responseBody.usage),
3045
+ providerMetadata,
3046
+ request: { body },
3047
+ response: {
3048
+ ...getResponseMetadata(responseBody),
3049
+ headers: responseHeaders,
3050
+ body: rawResponse
2459
3051
  },
2460
- warnings: [],
2461
- request: { body }
3052
+ warnings
2462
3053
  };
2463
3054
  }
2464
3055
  async doStream(options) {
2465
- const body = this.buildRequestBody(options, true);
2466
- const { value: response } = await postJsonToApi({
2467
- url: `${this.config.baseURL}/chat/completions`,
2468
- headers: this.config.headers(),
3056
+ var _a16;
3057
+ const { args, warnings, metadataKey } = await this.getArgs({ ...options });
3058
+ const body = this.transformRequestBody({
3059
+ ...args,
3060
+ stream: true,
3061
+ // only include stream_options when in strict compatibility mode:
3062
+ stream_options: this.config.includeUsage ? { include_usage: true } : void 0
3063
+ });
3064
+ const metadataExtractor = (_a16 = this.config.metadataExtractor) == null ? void 0 : _a16.createStreamExtractor();
3065
+ const { responseHeaders, value: response } = await postJsonToApi({
3066
+ url: this.config.url({
3067
+ path: "/chat/completions",
3068
+ modelId: this.modelId
3069
+ }),
3070
+ headers: combineHeaders(this.config.headers(), options.headers),
2469
3071
  body,
2470
- failedResponseHandler: apertisFailedResponseHandler,
3072
+ failedResponseHandler: this.failedResponseHandler,
2471
3073
  successfulResponseHandler: createEventSourceResponseHandler(
2472
- openAIChatChunkSchema
3074
+ this.chunkSchema
2473
3075
  ),
2474
- fetch: this.config.fetch,
2475
- abortSignal: options.abortSignal
3076
+ abortSignal: options.abortSignal,
3077
+ fetch: this.config.fetch
2476
3078
  });
2477
- const toolCallBuffers = /* @__PURE__ */ new Map();
2478
- let textId = null;
2479
- const transformStream = new TransformStream({
2480
- transform(parseResult, controller) {
2481
- if (!parseResult.success) {
2482
- return;
2483
- }
2484
- const chunk = parseResult.value;
2485
- const choice = chunk.choices[0];
2486
- if (!choice) return;
2487
- if (choice.delta.content) {
2488
- if (!textId) {
2489
- textId = generateId();
2490
- controller.enqueue({
2491
- type: "text-start",
2492
- id: textId
2493
- });
2494
- }
2495
- controller.enqueue({
2496
- type: "text-delta",
2497
- id: textId,
2498
- delta: choice.delta.content
2499
- });
2500
- }
2501
- if (choice.delta.tool_calls) {
2502
- for (const tc of choice.delta.tool_calls) {
2503
- let buffer = toolCallBuffers.get(tc.index);
2504
- if (!buffer) {
2505
- buffer = { id: tc.id ?? generateId(), name: "", arguments: "" };
2506
- toolCallBuffers.set(tc.index, buffer);
3079
+ const toolCalls = [];
3080
+ let finishReason = {
3081
+ unified: "other",
3082
+ raw: void 0
3083
+ };
3084
+ let usage = void 0;
3085
+ let isFirstChunk = true;
3086
+ const providerOptionsName = metadataKey;
3087
+ let isActiveReasoning = false;
3088
+ let isActiveText = false;
3089
+ return {
3090
+ stream: response.pipeThrough(
3091
+ new TransformStream({
3092
+ start(controller) {
3093
+ controller.enqueue({ type: "stream-start", warnings });
3094
+ },
3095
+ transform(chunk, controller) {
3096
+ var _a22, _b16, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r;
3097
+ if (options.includeRawChunks) {
3098
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2507
3099
  }
2508
- if (tc.id) buffer.id = tc.id;
2509
- if (tc.function?.name) buffer.name += tc.function.name;
2510
- if (tc.function?.arguments)
2511
- buffer.arguments += tc.function.arguments;
2512
- }
2513
- }
2514
- if (choice.finish_reason) {
2515
- if (textId) {
2516
- controller.enqueue({
2517
- type: "text-end",
2518
- id: textId
2519
- });
2520
- textId = null;
2521
- }
2522
- for (const [, buffer] of toolCallBuffers) {
2523
- if (buffer.name) {
3100
+ if (!chunk.success) {
3101
+ finishReason = { unified: "error", raw: void 0 };
3102
+ controller.enqueue({ type: "error", error: chunk.error });
3103
+ return;
3104
+ }
3105
+ metadataExtractor == null ? void 0 : metadataExtractor.processChunk(chunk.rawValue);
3106
+ if ("error" in chunk.value) {
3107
+ finishReason = { unified: "error", raw: void 0 };
2524
3108
  controller.enqueue({
2525
- type: "tool-call",
2526
- toolCallId: buffer.id,
2527
- toolName: buffer.name,
2528
- input: buffer.arguments
3109
+ type: "error",
3110
+ error: chunk.value.error.message
2529
3111
  });
3112
+ return;
2530
3113
  }
2531
- }
2532
- toolCallBuffers.clear();
2533
- controller.enqueue({
2534
- type: "finish",
2535
- finishReason: mapApertisFinishReason(choice.finish_reason),
2536
- usage: {
2537
- inputTokens: {
2538
- total: chunk.usage?.prompt_tokens ?? 0,
2539
- noCache: void 0,
2540
- cacheRead: void 0,
2541
- cacheWrite: void 0
2542
- },
2543
- outputTokens: {
2544
- total: chunk.usage?.completion_tokens ?? 0,
2545
- text: void 0,
2546
- reasoning: void 0
3114
+ const value = chunk.value;
3115
+ if (isFirstChunk) {
3116
+ isFirstChunk = false;
3117
+ controller.enqueue({
3118
+ type: "response-metadata",
3119
+ ...getResponseMetadata(value)
3120
+ });
3121
+ }
3122
+ if (value.usage != null) {
3123
+ usage = value.usage;
3124
+ }
3125
+ const choice = value.choices[0];
3126
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
3127
+ finishReason = {
3128
+ unified: mapOpenAICompatibleFinishReason(choice.finish_reason),
3129
+ raw: (_a22 = choice.finish_reason) != null ? _a22 : void 0
3130
+ };
3131
+ }
3132
+ if ((choice == null ? void 0 : choice.delta) == null) {
3133
+ return;
3134
+ }
3135
+ const delta = choice.delta;
3136
+ const reasoningContent = (_b16 = delta.reasoning_content) != null ? _b16 : delta.reasoning;
3137
+ if (reasoningContent) {
3138
+ if (!isActiveReasoning) {
3139
+ controller.enqueue({
3140
+ type: "reasoning-start",
3141
+ id: "reasoning-0"
3142
+ });
3143
+ isActiveReasoning = true;
2547
3144
  }
3145
+ controller.enqueue({
3146
+ type: "reasoning-delta",
3147
+ id: "reasoning-0",
3148
+ delta: reasoningContent
3149
+ });
3150
+ }
3151
+ if (delta.content) {
3152
+ if (isActiveReasoning) {
3153
+ controller.enqueue({
3154
+ type: "reasoning-end",
3155
+ id: "reasoning-0"
3156
+ });
3157
+ isActiveReasoning = false;
3158
+ }
3159
+ if (!isActiveText) {
3160
+ controller.enqueue({ type: "text-start", id: "txt-0" });
3161
+ isActiveText = true;
3162
+ }
3163
+ controller.enqueue({
3164
+ type: "text-delta",
3165
+ id: "txt-0",
3166
+ delta: delta.content
3167
+ });
3168
+ }
3169
+ if (delta.tool_calls != null) {
3170
+ if (isActiveReasoning) {
3171
+ controller.enqueue({
3172
+ type: "reasoning-end",
3173
+ id: "reasoning-0"
3174
+ });
3175
+ isActiveReasoning = false;
3176
+ }
3177
+ for (const toolCallDelta of delta.tool_calls) {
3178
+ const index = (_c = toolCallDelta.index) != null ? _c : toolCalls.length;
3179
+ if (toolCalls[index] == null) {
3180
+ if (toolCallDelta.id == null) {
3181
+ throw new InvalidResponseDataError({
3182
+ data: toolCallDelta,
3183
+ message: `Expected 'id' to be a string.`
3184
+ });
3185
+ }
3186
+ if (((_d = toolCallDelta.function) == null ? void 0 : _d.name) == null) {
3187
+ throw new InvalidResponseDataError({
3188
+ data: toolCallDelta,
3189
+ message: `Expected 'function.name' to be a string.`
3190
+ });
3191
+ }
3192
+ controller.enqueue({
3193
+ type: "tool-input-start",
3194
+ id: toolCallDelta.id,
3195
+ toolName: toolCallDelta.function.name
3196
+ });
3197
+ toolCalls[index] = {
3198
+ id: toolCallDelta.id,
3199
+ type: "function",
3200
+ function: {
3201
+ name: toolCallDelta.function.name,
3202
+ arguments: (_e = toolCallDelta.function.arguments) != null ? _e : ""
3203
+ },
3204
+ hasFinished: false,
3205
+ thoughtSignature: (_h = (_g = (_f = toolCallDelta.extra_content) == null ? void 0 : _f.google) == null ? void 0 : _g.thought_signature) != null ? _h : void 0
3206
+ };
3207
+ const toolCall2 = toolCalls[index];
3208
+ if (((_i = toolCall2.function) == null ? void 0 : _i.name) != null && ((_j = toolCall2.function) == null ? void 0 : _j.arguments) != null) {
3209
+ if (toolCall2.function.arguments.length > 0) {
3210
+ controller.enqueue({
3211
+ type: "tool-input-delta",
3212
+ id: toolCall2.id,
3213
+ delta: toolCall2.function.arguments
3214
+ });
3215
+ }
3216
+ if (isParsableJson(toolCall2.function.arguments)) {
3217
+ controller.enqueue({
3218
+ type: "tool-input-end",
3219
+ id: toolCall2.id
3220
+ });
3221
+ controller.enqueue({
3222
+ type: "tool-call",
3223
+ toolCallId: (_k = toolCall2.id) != null ? _k : generateId(),
3224
+ toolName: toolCall2.function.name,
3225
+ input: toolCall2.function.arguments,
3226
+ ...toolCall2.thoughtSignature ? {
3227
+ providerMetadata: {
3228
+ [providerOptionsName]: {
3229
+ thoughtSignature: toolCall2.thoughtSignature
3230
+ }
3231
+ }
3232
+ } : {}
3233
+ });
3234
+ toolCall2.hasFinished = true;
3235
+ }
3236
+ }
3237
+ continue;
3238
+ }
3239
+ const toolCall = toolCalls[index];
3240
+ if (toolCall.hasFinished) {
3241
+ continue;
3242
+ }
3243
+ if (((_l = toolCallDelta.function) == null ? void 0 : _l.arguments) != null) {
3244
+ toolCall.function.arguments += (_n = (_m = toolCallDelta.function) == null ? void 0 : _m.arguments) != null ? _n : "";
3245
+ }
3246
+ controller.enqueue({
3247
+ type: "tool-input-delta",
3248
+ id: toolCall.id,
3249
+ delta: (_o = toolCallDelta.function.arguments) != null ? _o : ""
3250
+ });
3251
+ if (((_p = toolCall.function) == null ? void 0 : _p.name) != null && ((_q = toolCall.function) == null ? void 0 : _q.arguments) != null && isParsableJson(toolCall.function.arguments)) {
3252
+ controller.enqueue({
3253
+ type: "tool-input-end",
3254
+ id: toolCall.id
3255
+ });
3256
+ controller.enqueue({
3257
+ type: "tool-call",
3258
+ toolCallId: (_r = toolCall.id) != null ? _r : generateId(),
3259
+ toolName: toolCall.function.name,
3260
+ input: toolCall.function.arguments,
3261
+ ...toolCall.thoughtSignature ? {
3262
+ providerMetadata: {
3263
+ [providerOptionsName]: {
3264
+ thoughtSignature: toolCall.thoughtSignature
3265
+ }
3266
+ }
3267
+ } : {}
3268
+ });
3269
+ toolCall.hasFinished = true;
3270
+ }
3271
+ }
3272
+ }
3273
+ },
3274
+ flush(controller) {
3275
+ var _a22, _b16, _c, _d, _e;
3276
+ if (isActiveReasoning) {
3277
+ controller.enqueue({ type: "reasoning-end", id: "reasoning-0" });
3278
+ }
3279
+ if (isActiveText) {
3280
+ controller.enqueue({ type: "text-end", id: "txt-0" });
3281
+ }
3282
+ for (const toolCall of toolCalls.filter(
3283
+ (toolCall2) => !toolCall2.hasFinished
3284
+ )) {
3285
+ controller.enqueue({
3286
+ type: "tool-input-end",
3287
+ id: toolCall.id
3288
+ });
3289
+ controller.enqueue({
3290
+ type: "tool-call",
3291
+ toolCallId: (_a22 = toolCall.id) != null ? _a22 : generateId(),
3292
+ toolName: toolCall.function.name,
3293
+ input: toolCall.function.arguments,
3294
+ ...toolCall.thoughtSignature ? {
3295
+ providerMetadata: {
3296
+ [providerOptionsName]: {
3297
+ thoughtSignature: toolCall.thoughtSignature
3298
+ }
3299
+ }
3300
+ } : {}
3301
+ });
3302
+ }
3303
+ const providerMetadata = {
3304
+ [providerOptionsName]: {},
3305
+ ...metadataExtractor == null ? void 0 : metadataExtractor.buildMetadata()
3306
+ };
3307
+ if (((_b16 = usage == null ? void 0 : usage.completion_tokens_details) == null ? void 0 : _b16.accepted_prediction_tokens) != null) {
3308
+ providerMetadata[providerOptionsName].acceptedPredictionTokens = (_c = usage == null ? void 0 : usage.completion_tokens_details) == null ? void 0 : _c.accepted_prediction_tokens;
3309
+ }
3310
+ if (((_d = usage == null ? void 0 : usage.completion_tokens_details) == null ? void 0 : _d.rejected_prediction_tokens) != null) {
3311
+ providerMetadata[providerOptionsName].rejectedPredictionTokens = (_e = usage == null ? void 0 : usage.completion_tokens_details) == null ? void 0 : _e.rejected_prediction_tokens;
2548
3312
  }
2549
- });
2550
- }
2551
- },
2552
- flush(controller) {
2553
- if (textId) {
2554
- controller.enqueue({
2555
- type: "text-end",
2556
- id: textId
2557
- });
2558
- }
2559
- for (const [, buffer] of toolCallBuffers) {
2560
- if (buffer.name) {
2561
3313
  controller.enqueue({
2562
- type: "tool-call",
2563
- toolCallId: buffer.id,
2564
- toolName: buffer.name,
2565
- input: buffer.arguments
3314
+ type: "finish",
3315
+ finishReason,
3316
+ usage: convertOpenAICompatibleChatUsage(usage),
3317
+ providerMetadata
2566
3318
  });
2567
3319
  }
2568
- }
2569
- }
2570
- });
2571
- return {
2572
- stream: response.pipeThrough(transformStream),
2573
- request: { body }
3320
+ })
3321
+ ),
3322
+ request: { body },
3323
+ response: { headers: responseHeaders }
2574
3324
  };
2575
3325
  }
2576
- buildRequestBody(options, stream) {
2577
- const tools = this.filterFunctionTools(options.tools);
2578
- const responseFormat = options.responseFormat?.type === "json" ? { type: "json_object" } : void 0;
2579
- const body = {
2580
- model: this.modelId,
2581
- messages: convertToOpenAIMessages(options.prompt),
2582
- stream
3326
+ };
3327
+ var openaiCompatibleTokenUsageSchema = z3.looseObject({
3328
+ prompt_tokens: z3.number().nullish(),
3329
+ completion_tokens: z3.number().nullish(),
3330
+ total_tokens: z3.number().nullish(),
3331
+ prompt_tokens_details: z3.object({
3332
+ cached_tokens: z3.number().nullish()
3333
+ }).nullish(),
3334
+ completion_tokens_details: z3.object({
3335
+ reasoning_tokens: z3.number().nullish(),
3336
+ accepted_prediction_tokens: z3.number().nullish(),
3337
+ rejected_prediction_tokens: z3.number().nullish()
3338
+ }).nullish()
3339
+ }).nullish();
3340
+ var OpenAICompatibleChatResponseSchema = z3.looseObject({
3341
+ id: z3.string().nullish(),
3342
+ created: z3.number().nullish(),
3343
+ model: z3.string().nullish(),
3344
+ choices: z3.array(
3345
+ z3.object({
3346
+ message: z3.object({
3347
+ role: z3.literal("assistant").nullish(),
3348
+ content: z3.string().nullish(),
3349
+ reasoning_content: z3.string().nullish(),
3350
+ reasoning: z3.string().nullish(),
3351
+ tool_calls: z3.array(
3352
+ z3.object({
3353
+ id: z3.string().nullish(),
3354
+ function: z3.object({
3355
+ name: z3.string(),
3356
+ arguments: z3.string()
3357
+ }),
3358
+ // Support for Google Gemini thought signatures via OpenAI compatibility
3359
+ extra_content: z3.object({
3360
+ google: z3.object({
3361
+ thought_signature: z3.string().nullish()
3362
+ }).nullish()
3363
+ }).nullish()
3364
+ })
3365
+ ).nullish()
3366
+ }),
3367
+ finish_reason: z3.string().nullish()
3368
+ })
3369
+ ),
3370
+ usage: openaiCompatibleTokenUsageSchema
3371
+ });
3372
+ var chunkBaseSchema = z3.looseObject({
3373
+ id: z3.string().nullish(),
3374
+ created: z3.number().nullish(),
3375
+ model: z3.string().nullish(),
3376
+ choices: z3.array(
3377
+ z3.object({
3378
+ delta: z3.object({
3379
+ role: z3.enum(["assistant"]).nullish(),
3380
+ content: z3.string().nullish(),
3381
+ // Most openai-compatible models set `reasoning_content`, but some
3382
+ // providers serving `gpt-oss` set `reasoning`. See #7866
3383
+ reasoning_content: z3.string().nullish(),
3384
+ reasoning: z3.string().nullish(),
3385
+ tool_calls: z3.array(
3386
+ z3.object({
3387
+ index: z3.number().nullish(),
3388
+ //google does not send index
3389
+ id: z3.string().nullish(),
3390
+ function: z3.object({
3391
+ name: z3.string().nullish(),
3392
+ arguments: z3.string().nullish()
3393
+ }),
3394
+ // Support for Google Gemini thought signatures via OpenAI compatibility
3395
+ extra_content: z3.object({
3396
+ google: z3.object({
3397
+ thought_signature: z3.string().nullish()
3398
+ }).nullish()
3399
+ }).nullish()
3400
+ })
3401
+ ).nullish()
3402
+ }).nullish(),
3403
+ finish_reason: z3.string().nullish()
3404
+ })
3405
+ ),
3406
+ usage: openaiCompatibleTokenUsageSchema
3407
+ });
3408
+ var createOpenAICompatibleChatChunkSchema = (errorSchema) => z3.union([chunkBaseSchema, errorSchema]);
3409
+ function convertOpenAICompatibleCompletionUsage(usage) {
3410
+ var _a16, _b16;
3411
+ if (usage == null) {
3412
+ return {
3413
+ inputTokens: {
3414
+ total: void 0,
3415
+ noCache: void 0,
3416
+ cacheRead: void 0,
3417
+ cacheWrite: void 0
3418
+ },
3419
+ outputTokens: {
3420
+ total: void 0,
3421
+ text: void 0,
3422
+ reasoning: void 0
3423
+ },
3424
+ raw: void 0
2583
3425
  };
2584
- if (stream) body.stream_options = { include_usage: true };
2585
- if (options.temperature !== void 0)
2586
- body.temperature = options.temperature;
2587
- if (options.maxOutputTokens !== void 0)
2588
- body.max_tokens = options.maxOutputTokens;
2589
- if (options.topP !== void 0) body.top_p = options.topP;
2590
- if (options.frequencyPenalty !== void 0)
2591
- body.frequency_penalty = options.frequencyPenalty;
2592
- if (options.presencePenalty !== void 0)
2593
- body.presence_penalty = options.presencePenalty;
2594
- if (options.stopSequences !== void 0) body.stop = options.stopSequences;
2595
- if (options.seed !== void 0) body.seed = options.seed;
2596
- const convertedTools = convertToOpenAITools(tools);
2597
- if (convertedTools !== void 0) body.tools = convertedTools;
2598
- const convertedToolChoice = convertToOpenAIToolChoice(options.toolChoice);
2599
- if (convertedToolChoice !== void 0)
2600
- body.tool_choice = convertedToolChoice;
2601
- if (responseFormat !== void 0) body.response_format = responseFormat;
2602
- if (this.settings.user !== void 0) body.user = this.settings.user;
2603
- if (this.settings.logprobs !== void 0)
2604
- body.logprobs = this.settings.logprobs;
2605
- if (this.settings.topLogprobs !== void 0)
2606
- body.top_logprobs = this.settings.topLogprobs;
2607
- return body;
2608
- }
2609
- filterFunctionTools(tools) {
2610
- if (!tools) return void 0;
2611
- return tools.filter(
2612
- (tool) => tool.type === "function"
2613
- );
2614
3426
  }
2615
- };
3427
+ const promptTokens = (_a16 = usage.prompt_tokens) != null ? _a16 : 0;
3428
+ const completionTokens = (_b16 = usage.completion_tokens) != null ? _b16 : 0;
3429
+ return {
3430
+ inputTokens: {
3431
+ total: promptTokens,
3432
+ noCache: promptTokens,
3433
+ cacheRead: void 0,
3434
+ cacheWrite: void 0
3435
+ },
3436
+ outputTokens: {
3437
+ total: completionTokens,
3438
+ text: completionTokens,
3439
+ reasoning: void 0
3440
+ },
3441
+ raw: usage
3442
+ };
3443
+ }
3444
+ function convertToOpenAICompatibleCompletionPrompt({
3445
+ prompt,
3446
+ user = "user",
3447
+ assistant = "assistant"
3448
+ }) {
3449
+ let text = "";
3450
+ if (prompt[0].role === "system") {
3451
+ text += `${prompt[0].content}
3452
+
3453
+ `;
3454
+ prompt = prompt.slice(1);
3455
+ }
3456
+ for (const { role, content } of prompt) {
3457
+ switch (role) {
3458
+ case "system": {
3459
+ throw new InvalidPromptError({
3460
+ message: "Unexpected system message in prompt: ${content}",
3461
+ prompt
3462
+ });
3463
+ }
3464
+ case "user": {
3465
+ const userMessage = content.map((part) => {
3466
+ switch (part.type) {
3467
+ case "text": {
3468
+ return part.text;
3469
+ }
3470
+ }
3471
+ }).filter(Boolean).join("");
3472
+ text += `${user}:
3473
+ ${userMessage}
3474
+
3475
+ `;
3476
+ break;
3477
+ }
3478
+ case "assistant": {
3479
+ const assistantMessage = content.map((part) => {
3480
+ switch (part.type) {
3481
+ case "text": {
3482
+ return part.text;
3483
+ }
3484
+ case "tool-call": {
3485
+ throw new UnsupportedFunctionalityError({
3486
+ functionality: "tool-call messages"
3487
+ });
3488
+ }
3489
+ }
3490
+ }).join("");
3491
+ text += `${assistant}:
3492
+ ${assistantMessage}
2616
3493
 
2617
- // src/apertis-chat-language-model-v2.ts
2618
- var ApertisChatLanguageModelV2 = class {
2619
- constructor(modelId, settings, config) {
3494
+ `;
3495
+ break;
3496
+ }
3497
+ case "tool": {
3498
+ throw new UnsupportedFunctionalityError({
3499
+ functionality: "tool messages"
3500
+ });
3501
+ }
3502
+ default: {
3503
+ const _exhaustiveCheck = role;
3504
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
3505
+ }
3506
+ }
3507
+ }
3508
+ text += `${assistant}:
3509
+ `;
3510
+ return {
3511
+ prompt: text,
3512
+ stopSequences: [`
3513
+ ${user}:`]
3514
+ };
3515
+ }
3516
+ function getResponseMetadata2({
3517
+ id,
3518
+ model,
3519
+ created
3520
+ }) {
3521
+ return {
3522
+ id: id != null ? id : void 0,
3523
+ modelId: model != null ? model : void 0,
3524
+ timestamp: created != null ? new Date(created * 1e3) : void 0
3525
+ };
3526
+ }
3527
+ function mapOpenAICompatibleFinishReason2(finishReason) {
3528
+ switch (finishReason) {
3529
+ case "stop":
3530
+ return "stop";
3531
+ case "length":
3532
+ return "length";
3533
+ case "content_filter":
3534
+ return "content-filter";
3535
+ case "function_call":
3536
+ case "tool_calls":
3537
+ return "tool-calls";
3538
+ default:
3539
+ return "other";
3540
+ }
3541
+ }
3542
+ var openaiCompatibleLanguageModelCompletionOptions = z42.object({
3543
+ /**
3544
+ * Echo back the prompt in addition to the completion.
3545
+ */
3546
+ echo: z42.boolean().optional(),
3547
+ /**
3548
+ * Modify the likelihood of specified tokens appearing in the completion.
3549
+ *
3550
+ * Accepts a JSON object that maps tokens (specified by their token ID in
3551
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
3552
+ */
3553
+ logitBias: z42.record(z42.string(), z42.number()).optional(),
3554
+ /**
3555
+ * The suffix that comes after a completion of inserted text.
3556
+ */
3557
+ suffix: z42.string().optional(),
3558
+ /**
3559
+ * A unique identifier representing your end-user, which can help providers to
3560
+ * monitor and detect abuse.
3561
+ */
3562
+ user: z42.string().optional()
3563
+ });
3564
+ var OpenAICompatibleCompletionLanguageModel = class {
3565
+ // type inferred via constructor
3566
+ constructor(modelId, config) {
3567
+ this.specificationVersion = "v3";
3568
+ var _a16;
2620
3569
  this.modelId = modelId;
2621
- this.settings = settings;
2622
3570
  this.config = config;
3571
+ const errorStructure = (_a16 = config.errorStructure) != null ? _a16 : defaultOpenAICompatibleErrorStructure;
3572
+ this.chunkSchema = createOpenAICompatibleCompletionChunkSchema(
3573
+ errorStructure.errorSchema
3574
+ );
3575
+ this.failedResponseHandler = createJsonErrorResponseHandler(errorStructure);
2623
3576
  }
2624
- specificationVersion = "v2";
2625
- supportedUrls = {
2626
- "image/*": [/^https?:\/\/.+$/]
2627
- };
2628
3577
  get provider() {
2629
3578
  return this.config.provider;
2630
3579
  }
3580
+ get providerOptionsName() {
3581
+ return this.config.provider.split(".")[0].trim();
3582
+ }
3583
+ get supportedUrls() {
3584
+ var _a16, _b16, _c;
3585
+ return (_c = (_b16 = (_a16 = this.config).supportedUrls) == null ? void 0 : _b16.call(_a16)) != null ? _c : {};
3586
+ }
3587
+ async getArgs({
3588
+ prompt,
3589
+ maxOutputTokens,
3590
+ temperature,
3591
+ topP,
3592
+ topK,
3593
+ frequencyPenalty,
3594
+ presencePenalty,
3595
+ stopSequences: userStopSequences,
3596
+ responseFormat,
3597
+ seed,
3598
+ providerOptions,
3599
+ tools,
3600
+ toolChoice
3601
+ }) {
3602
+ var _a16, _b16;
3603
+ const warnings = [];
3604
+ const completionOptions = Object.assign(
3605
+ (_a16 = await parseProviderOptions({
3606
+ provider: this.providerOptionsName,
3607
+ providerOptions,
3608
+ schema: openaiCompatibleLanguageModelCompletionOptions
3609
+ })) != null ? _a16 : {},
3610
+ (_b16 = await parseProviderOptions({
3611
+ provider: toCamelCase(this.providerOptionsName),
3612
+ providerOptions,
3613
+ schema: openaiCompatibleLanguageModelCompletionOptions
3614
+ })) != null ? _b16 : {}
3615
+ );
3616
+ if (topK != null) {
3617
+ warnings.push({ type: "unsupported", feature: "topK" });
3618
+ }
3619
+ if (tools == null ? void 0 : tools.length) {
3620
+ warnings.push({ type: "unsupported", feature: "tools" });
3621
+ }
3622
+ if (toolChoice != null) {
3623
+ warnings.push({ type: "unsupported", feature: "toolChoice" });
3624
+ }
3625
+ if (responseFormat != null && responseFormat.type !== "text") {
3626
+ warnings.push({
3627
+ type: "unsupported",
3628
+ feature: "responseFormat",
3629
+ details: "JSON response format is not supported."
3630
+ });
3631
+ }
3632
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompatibleCompletionPrompt({ prompt });
3633
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
3634
+ return {
3635
+ args: {
3636
+ // model id:
3637
+ model: this.modelId,
3638
+ // model specific settings:
3639
+ echo: completionOptions.echo,
3640
+ logit_bias: completionOptions.logitBias,
3641
+ suffix: completionOptions.suffix,
3642
+ user: completionOptions.user,
3643
+ // standardized settings:
3644
+ max_tokens: maxOutputTokens,
3645
+ temperature,
3646
+ top_p: topP,
3647
+ frequency_penalty: frequencyPenalty,
3648
+ presence_penalty: presencePenalty,
3649
+ seed,
3650
+ ...providerOptions == null ? void 0 : providerOptions[this.providerOptionsName],
3651
+ ...providerOptions == null ? void 0 : providerOptions[toCamelCase(this.providerOptionsName)],
3652
+ // prompt:
3653
+ prompt: completionPrompt,
3654
+ // stop sequences:
3655
+ stop: stop.length > 0 ? stop : void 0
3656
+ },
3657
+ warnings
3658
+ };
3659
+ }
2631
3660
  async doGenerate(options) {
2632
- const body = this.buildRequestBody(options, false);
2633
- const { value: response } = await postJsonToApi({
2634
- url: `${this.config.baseURL}/chat/completions`,
2635
- headers: this.config.headers(),
2636
- body,
2637
- failedResponseHandler: apertisFailedResponseHandler,
3661
+ const { args, warnings } = await this.getArgs(options);
3662
+ const {
3663
+ responseHeaders,
3664
+ value: response,
3665
+ rawValue: rawResponse
3666
+ } = await postJsonToApi({
3667
+ url: this.config.url({
3668
+ path: "/completions",
3669
+ modelId: this.modelId
3670
+ }),
3671
+ headers: combineHeaders(this.config.headers(), options.headers),
3672
+ body: args,
3673
+ failedResponseHandler: this.failedResponseHandler,
2638
3674
  successfulResponseHandler: createJsonResponseHandler(
2639
- openAIChatResponseSchema
3675
+ openaiCompatibleCompletionResponseSchema
2640
3676
  ),
2641
- fetch: this.config.fetch,
2642
- abortSignal: options.abortSignal
3677
+ abortSignal: options.abortSignal,
3678
+ fetch: this.config.fetch
2643
3679
  });
2644
3680
  const choice = response.choices[0];
2645
3681
  const content = [];
2646
- if (choice.message.content) {
2647
- content.push({
2648
- type: "text",
2649
- text: choice.message.content
2650
- });
2651
- }
2652
- if (choice.message.tool_calls) {
2653
- for (const tc of choice.message.tool_calls) {
2654
- content.push({
2655
- type: "tool-call",
2656
- toolCallId: tc.id,
2657
- toolName: tc.function.name,
2658
- input: tc.function.arguments
2659
- });
2660
- }
3682
+ if (choice.text != null && choice.text.length > 0) {
3683
+ content.push({ type: "text", text: choice.text });
2661
3684
  }
2662
3685
  return {
2663
3686
  content,
2664
- finishReason: mapApertisFinishReasonV2(choice.finish_reason),
2665
- usage: {
2666
- inputTokens: response.usage?.prompt_tokens ?? 0,
2667
- outputTokens: response.usage?.completion_tokens ?? 0,
2668
- totalTokens: response.usage?.total_tokens ?? void 0
3687
+ usage: convertOpenAICompatibleCompletionUsage(response.usage),
3688
+ finishReason: {
3689
+ unified: mapOpenAICompatibleFinishReason2(choice.finish_reason),
3690
+ raw: choice.finish_reason
3691
+ },
3692
+ request: { body: args },
3693
+ response: {
3694
+ ...getResponseMetadata2(response),
3695
+ headers: responseHeaders,
3696
+ body: rawResponse
2669
3697
  },
2670
- warnings: [],
2671
- request: { body }
3698
+ warnings
2672
3699
  };
2673
3700
  }
2674
3701
  async doStream(options) {
2675
- const body = this.buildRequestBody(options, true);
2676
- const { value: response } = await postJsonToApi({
2677
- url: `${this.config.baseURL}/chat/completions`,
2678
- headers: this.config.headers(),
3702
+ const { args, warnings } = await this.getArgs(options);
3703
+ const body = {
3704
+ ...args,
3705
+ stream: true,
3706
+ // only include stream_options when in strict compatibility mode:
3707
+ stream_options: this.config.includeUsage ? { include_usage: true } : void 0
3708
+ };
3709
+ const { responseHeaders, value: response } = await postJsonToApi({
3710
+ url: this.config.url({
3711
+ path: "/completions",
3712
+ modelId: this.modelId
3713
+ }),
3714
+ headers: combineHeaders(this.config.headers(), options.headers),
2679
3715
  body,
2680
- failedResponseHandler: apertisFailedResponseHandler,
3716
+ failedResponseHandler: this.failedResponseHandler,
2681
3717
  successfulResponseHandler: createEventSourceResponseHandler(
2682
- openAIChatChunkSchema
3718
+ this.chunkSchema
2683
3719
  ),
2684
- fetch: this.config.fetch,
2685
- abortSignal: options.abortSignal
3720
+ abortSignal: options.abortSignal,
3721
+ fetch: this.config.fetch
2686
3722
  });
2687
- const toolCallBuffers = /* @__PURE__ */ new Map();
2688
- let textId = null;
2689
- const transformStream = new TransformStream({
2690
- transform(parseResult, controller) {
2691
- if (!parseResult.success) {
2692
- return;
2693
- }
2694
- const chunk = parseResult.value;
2695
- const choice = chunk.choices[0];
2696
- if (!choice) return;
2697
- if (choice.delta.content) {
2698
- if (!textId) {
2699
- textId = generateId();
2700
- controller.enqueue({
2701
- type: "text-start",
2702
- id: textId
2703
- });
2704
- }
2705
- controller.enqueue({
2706
- type: "text-delta",
2707
- id: textId,
2708
- delta: choice.delta.content
2709
- });
2710
- }
2711
- if (choice.delta.tool_calls) {
2712
- for (const tc of choice.delta.tool_calls) {
2713
- let buffer = toolCallBuffers.get(tc.index);
2714
- if (!buffer) {
2715
- buffer = {
2716
- id: tc.id ?? generateId(),
2717
- name: "",
2718
- arguments: ""
3723
+ let finishReason = {
3724
+ unified: "other",
3725
+ raw: void 0
3726
+ };
3727
+ let usage = void 0;
3728
+ let isFirstChunk = true;
3729
+ return {
3730
+ stream: response.pipeThrough(
3731
+ new TransformStream({
3732
+ start(controller) {
3733
+ controller.enqueue({ type: "stream-start", warnings });
3734
+ },
3735
+ transform(chunk, controller) {
3736
+ var _a16;
3737
+ if (options.includeRawChunks) {
3738
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
3739
+ }
3740
+ if (!chunk.success) {
3741
+ finishReason = { unified: "error", raw: void 0 };
3742
+ controller.enqueue({ type: "error", error: chunk.error });
3743
+ return;
3744
+ }
3745
+ const value = chunk.value;
3746
+ if ("error" in value) {
3747
+ finishReason = { unified: "error", raw: void 0 };
3748
+ controller.enqueue({ type: "error", error: value.error });
3749
+ return;
3750
+ }
3751
+ if (isFirstChunk) {
3752
+ isFirstChunk = false;
3753
+ controller.enqueue({
3754
+ type: "response-metadata",
3755
+ ...getResponseMetadata2(value)
3756
+ });
3757
+ controller.enqueue({
3758
+ type: "text-start",
3759
+ id: "0"
3760
+ });
3761
+ }
3762
+ if (value.usage != null) {
3763
+ usage = value.usage;
3764
+ }
3765
+ const choice = value.choices[0];
3766
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
3767
+ finishReason = {
3768
+ unified: mapOpenAICompatibleFinishReason2(choice.finish_reason),
3769
+ raw: (_a16 = choice.finish_reason) != null ? _a16 : void 0
2719
3770
  };
2720
- toolCallBuffers.set(tc.index, buffer);
2721
3771
  }
2722
- if (tc.id) buffer.id = tc.id;
2723
- if (tc.function?.name) buffer.name += tc.function.name;
2724
- if (tc.function?.arguments)
2725
- buffer.arguments += tc.function.arguments;
2726
- }
2727
- }
2728
- if (choice.finish_reason) {
2729
- if (textId) {
2730
- controller.enqueue({
2731
- type: "text-end",
2732
- id: textId
2733
- });
2734
- textId = null;
2735
- }
2736
- for (const [, buffer] of toolCallBuffers) {
2737
- if (buffer.name) {
3772
+ if ((choice == null ? void 0 : choice.text) != null) {
2738
3773
  controller.enqueue({
2739
- type: "tool-call",
2740
- toolCallId: buffer.id,
2741
- toolName: buffer.name,
2742
- input: buffer.arguments
3774
+ type: "text-delta",
3775
+ id: "0",
3776
+ delta: choice.text
2743
3777
  });
2744
3778
  }
2745
- }
2746
- toolCallBuffers.clear();
2747
- controller.enqueue({
2748
- type: "finish",
2749
- finishReason: mapApertisFinishReasonV2(choice.finish_reason),
2750
- usage: {
2751
- inputTokens: chunk.usage?.prompt_tokens ?? 0,
2752
- outputTokens: chunk.usage?.completion_tokens ?? 0,
2753
- totalTokens: void 0
3779
+ },
3780
+ flush(controller) {
3781
+ if (!isFirstChunk) {
3782
+ controller.enqueue({ type: "text-end", id: "0" });
2754
3783
  }
2755
- });
2756
- }
2757
- },
2758
- flush(controller) {
2759
- if (textId) {
2760
- controller.enqueue({
2761
- type: "text-end",
2762
- id: textId
2763
- });
2764
- }
2765
- for (const [, buffer] of toolCallBuffers) {
2766
- if (buffer.name) {
2767
3784
  controller.enqueue({
2768
- type: "tool-call",
2769
- toolCallId: buffer.id,
2770
- toolName: buffer.name,
2771
- input: buffer.arguments
3785
+ type: "finish",
3786
+ finishReason,
3787
+ usage: convertOpenAICompatibleCompletionUsage(usage)
2772
3788
  });
2773
3789
  }
2774
- }
2775
- }
2776
- });
2777
- return {
2778
- stream: response.pipeThrough(transformStream),
2779
- request: { body }
2780
- };
2781
- }
2782
- buildRequestBody(options, stream) {
2783
- const tools = this.filterFunctionTools(options.tools);
2784
- const responseFormat = options.responseFormat?.type === "json" ? { type: "json_object" } : void 0;
2785
- const body = {
2786
- model: this.modelId,
2787
- messages: convertToOpenAIMessages(options.prompt),
2788
- stream
3790
+ })
3791
+ ),
3792
+ request: { body },
3793
+ response: { headers: responseHeaders }
2789
3794
  };
2790
- if (stream) body.stream_options = { include_usage: true };
2791
- if (options.temperature !== void 0)
2792
- body.temperature = options.temperature;
2793
- if (options.maxOutputTokens !== void 0)
2794
- body.max_tokens = options.maxOutputTokens;
2795
- if (options.topP !== void 0) body.top_p = options.topP;
2796
- if (options.frequencyPenalty !== void 0)
2797
- body.frequency_penalty = options.frequencyPenalty;
2798
- if (options.presencePenalty !== void 0)
2799
- body.presence_penalty = options.presencePenalty;
2800
- if (options.stopSequences !== void 0) body.stop = options.stopSequences;
2801
- if (options.seed !== void 0) body.seed = options.seed;
2802
- const convertedTools = convertToOpenAITools(tools);
2803
- if (convertedTools !== void 0) body.tools = convertedTools;
2804
- const convertedToolChoice = convertToOpenAIToolChoice(options.toolChoice);
2805
- if (convertedToolChoice !== void 0)
2806
- body.tool_choice = convertedToolChoice;
2807
- if (responseFormat !== void 0) body.response_format = responseFormat;
2808
- if (this.settings.user !== void 0) body.user = this.settings.user;
2809
- if (this.settings.logprobs !== void 0)
2810
- body.logprobs = this.settings.logprobs;
2811
- if (this.settings.topLogprobs !== void 0)
2812
- body.top_logprobs = this.settings.topLogprobs;
2813
- return body;
2814
- }
2815
- filterFunctionTools(tools) {
2816
- if (!tools) return void 0;
2817
- return tools.filter(
2818
- (tool) => tool.type === "function"
2819
- );
2820
3795
  }
2821
3796
  };
2822
-
2823
- // src/schemas/embedding-response.ts
2824
- import { z as z3 } from "zod";
2825
- var openAIEmbeddingResponseSchema = z3.object({
2826
- object: z3.literal("list").optional(),
2827
- data: z3.array(
2828
- z3.object({
2829
- object: z3.literal("embedding").optional(),
2830
- embedding: z3.array(z3.number()),
2831
- index: z3.number()
3797
+ var usageSchema = z5.object({
3798
+ prompt_tokens: z5.number(),
3799
+ completion_tokens: z5.number(),
3800
+ total_tokens: z5.number()
3801
+ });
3802
+ var openaiCompatibleCompletionResponseSchema = z5.object({
3803
+ id: z5.string().nullish(),
3804
+ created: z5.number().nullish(),
3805
+ model: z5.string().nullish(),
3806
+ choices: z5.array(
3807
+ z5.object({
3808
+ text: z5.string(),
3809
+ finish_reason: z5.string()
2832
3810
  })
2833
3811
  ),
2834
- model: z3.string().optional(),
2835
- usage: z3.object({
2836
- prompt_tokens: z3.number(),
2837
- total_tokens: z3.number()
2838
- }).optional()
3812
+ usage: usageSchema.nullish()
2839
3813
  });
2840
-
2841
- // src/apertis-embedding-model.ts
2842
- var ApertisEmbeddingModel = class {
2843
- constructor(modelId, settings, config) {
3814
+ var createOpenAICompatibleCompletionChunkSchema = (errorSchema) => z5.union([
3815
+ z5.object({
3816
+ id: z5.string().nullish(),
3817
+ created: z5.number().nullish(),
3818
+ model: z5.string().nullish(),
3819
+ choices: z5.array(
3820
+ z5.object({
3821
+ text: z5.string(),
3822
+ finish_reason: z5.string().nullish(),
3823
+ index: z5.number()
3824
+ })
3825
+ ),
3826
+ usage: usageSchema.nullish()
3827
+ }),
3828
+ errorSchema
3829
+ ]);
3830
+ var openaiCompatibleEmbeddingModelOptions = z6.object({
3831
+ /**
3832
+ * The number of dimensions the resulting output embeddings should have.
3833
+ * Only supported in text-embedding-3 and later models.
3834
+ */
3835
+ dimensions: z6.number().optional(),
3836
+ /**
3837
+ * A unique identifier representing your end-user, which can help providers to
3838
+ * monitor and detect abuse.
3839
+ */
3840
+ user: z6.string().optional()
3841
+ });
3842
+ var OpenAICompatibleEmbeddingModel = class {
3843
+ constructor(modelId, config) {
3844
+ this.specificationVersion = "v3";
2844
3845
  this.modelId = modelId;
2845
- this.settings = settings;
2846
3846
  this.config = config;
2847
- this.maxEmbeddingsPerCall = settings.maxEmbeddingsPerCall ?? 2048;
2848
- this.supportsParallelCalls = settings.supportsParallelCalls ?? true;
2849
3847
  }
2850
- specificationVersion = "v3";
2851
- maxEmbeddingsPerCall;
2852
- supportsParallelCalls;
2853
3848
  get provider() {
2854
3849
  return this.config.provider;
2855
3850
  }
2856
- async doEmbed(options) {
2857
- const body = {
2858
- model: this.modelId,
2859
- input: options.values,
2860
- encoding_format: "float"
2861
- };
2862
- if (this.settings.dimensions !== void 0) {
2863
- body.dimensions = this.settings.dimensions;
3851
+ get maxEmbeddingsPerCall() {
3852
+ var _a16;
3853
+ return (_a16 = this.config.maxEmbeddingsPerCall) != null ? _a16 : 2048;
3854
+ }
3855
+ get supportsParallelCalls() {
3856
+ var _a16;
3857
+ return (_a16 = this.config.supportsParallelCalls) != null ? _a16 : true;
3858
+ }
3859
+ get providerOptionsName() {
3860
+ return this.config.provider.split(".")[0].trim();
3861
+ }
3862
+ async doEmbed({
3863
+ values,
3864
+ headers,
3865
+ abortSignal,
3866
+ providerOptions
3867
+ }) {
3868
+ var _a16, _b16, _c;
3869
+ const warnings = [];
3870
+ const deprecatedOptions = await parseProviderOptions({
3871
+ provider: "openai-compatible",
3872
+ providerOptions,
3873
+ schema: openaiCompatibleEmbeddingModelOptions
3874
+ });
3875
+ if (deprecatedOptions != null) {
3876
+ warnings.push({
3877
+ type: "other",
3878
+ message: `The 'openai-compatible' key in providerOptions is deprecated. Use 'openaiCompatible' instead.`
3879
+ });
2864
3880
  }
2865
- if (this.settings.user !== void 0) {
2866
- body.user = this.settings.user;
3881
+ const compatibleOptions = Object.assign(
3882
+ deprecatedOptions != null ? deprecatedOptions : {},
3883
+ (_a16 = await parseProviderOptions({
3884
+ provider: "openaiCompatible",
3885
+ providerOptions,
3886
+ schema: openaiCompatibleEmbeddingModelOptions
3887
+ })) != null ? _a16 : {},
3888
+ (_b16 = await parseProviderOptions({
3889
+ provider: this.providerOptionsName,
3890
+ providerOptions,
3891
+ schema: openaiCompatibleEmbeddingModelOptions
3892
+ })) != null ? _b16 : {}
3893
+ );
3894
+ if (values.length > this.maxEmbeddingsPerCall) {
3895
+ throw new TooManyEmbeddingValuesForCallError({
3896
+ provider: this.provider,
3897
+ modelId: this.modelId,
3898
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
3899
+ values
3900
+ });
2867
3901
  }
2868
- const { value: response } = await postJsonToApi({
2869
- url: `${this.config.baseURL}/embeddings`,
2870
- headers: this.config.headers(),
2871
- body,
2872
- failedResponseHandler: apertisFailedResponseHandler,
3902
+ const {
3903
+ responseHeaders,
3904
+ value: response,
3905
+ rawValue
3906
+ } = await postJsonToApi({
3907
+ url: this.config.url({
3908
+ path: "/embeddings",
3909
+ modelId: this.modelId
3910
+ }),
3911
+ headers: combineHeaders(this.config.headers(), headers),
3912
+ body: {
3913
+ model: this.modelId,
3914
+ input: values,
3915
+ encoding_format: "float",
3916
+ dimensions: compatibleOptions.dimensions,
3917
+ user: compatibleOptions.user
3918
+ },
3919
+ failedResponseHandler: createJsonErrorResponseHandler(
3920
+ (_c = this.config.errorStructure) != null ? _c : defaultOpenAICompatibleErrorStructure
3921
+ ),
2873
3922
  successfulResponseHandler: createJsonResponseHandler(
2874
- openAIEmbeddingResponseSchema
3923
+ openaiTextEmbeddingResponseSchema
2875
3924
  ),
2876
- fetch: this.config.fetch,
2877
- abortSignal: options.abortSignal
3925
+ abortSignal,
3926
+ fetch: this.config.fetch
2878
3927
  });
2879
3928
  return {
3929
+ warnings,
2880
3930
  embeddings: response.data.map((item) => item.embedding),
2881
3931
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
2882
- warnings: []
3932
+ providerMetadata: response.providerMetadata,
3933
+ response: { headers: responseHeaders, body: rawValue }
2883
3934
  };
2884
3935
  }
2885
3936
  };
2886
-
2887
- // src/apertis-embedding-model-v2.ts
2888
- var ApertisEmbeddingModelV2 = class {
2889
- constructor(modelId, settings, config) {
3937
+ var openaiTextEmbeddingResponseSchema = z7.object({
3938
+ data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
3939
+ usage: z7.object({ prompt_tokens: z7.number() }).nullish(),
3940
+ providerMetadata: z7.record(z7.string(), z7.record(z7.string(), z7.any())).optional()
3941
+ });
3942
+ var OpenAICompatibleImageModel = class {
3943
+ constructor(modelId, config) {
2890
3944
  this.modelId = modelId;
2891
- this.settings = settings;
2892
3945
  this.config = config;
2893
- this.maxEmbeddingsPerCall = settings.maxEmbeddingsPerCall ?? 2048;
2894
- this.supportsParallelCalls = settings.supportsParallelCalls ?? true;
3946
+ this.specificationVersion = "v3";
3947
+ this.maxImagesPerCall = 10;
2895
3948
  }
2896
- specificationVersion = "v2";
2897
- maxEmbeddingsPerCall;
2898
- supportsParallelCalls;
2899
3949
  get provider() {
2900
3950
  return this.config.provider;
2901
3951
  }
2902
- async doEmbed(options) {
2903
- const body = {
2904
- model: this.modelId,
2905
- input: options.values,
2906
- encoding_format: "float"
3952
+ /**
3953
+ * The provider options key used to extract provider-specific options.
3954
+ */
3955
+ get providerOptionsKey() {
3956
+ return this.config.provider.split(".")[0].trim();
3957
+ }
3958
+ // TODO: deprecate non-camelCase keys and remove in future major version
3959
+ getArgs(providerOptions) {
3960
+ return {
3961
+ ...providerOptions[this.providerOptionsKey],
3962
+ ...providerOptions[toCamelCase(this.providerOptionsKey)]
2907
3963
  };
2908
- if (this.settings.dimensions !== void 0) {
2909
- body.dimensions = this.settings.dimensions;
3964
+ }
3965
+ async doGenerate({
3966
+ prompt,
3967
+ n,
3968
+ size,
3969
+ aspectRatio,
3970
+ seed,
3971
+ providerOptions,
3972
+ headers,
3973
+ abortSignal,
3974
+ files,
3975
+ mask
3976
+ }) {
3977
+ var _a16, _b16, _c, _d, _e;
3978
+ const warnings = [];
3979
+ if (aspectRatio != null) {
3980
+ warnings.push({
3981
+ type: "unsupported",
3982
+ feature: "aspectRatio",
3983
+ details: "This model does not support aspect ratio. Use `size` instead."
3984
+ });
2910
3985
  }
2911
- if (this.settings.user !== void 0) {
2912
- body.user = this.settings.user;
3986
+ if (seed != null) {
3987
+ warnings.push({ type: "unsupported", feature: "seed" });
2913
3988
  }
2914
- const { value: response } = await postJsonToApi({
2915
- url: `${this.config.baseURL}/embeddings`,
2916
- headers: this.config.headers(),
2917
- body,
2918
- failedResponseHandler: apertisFailedResponseHandler,
3989
+ const currentDate = (_c = (_b16 = (_a16 = this.config._internal) == null ? void 0 : _a16.currentDate) == null ? void 0 : _b16.call(_a16)) != null ? _c : /* @__PURE__ */ new Date();
3990
+ const args = this.getArgs(providerOptions);
3991
+ if (files != null && files.length > 0) {
3992
+ const { value: response2, responseHeaders: responseHeaders2 } = await postFormDataToApi({
3993
+ url: this.config.url({
3994
+ path: "/images/edits",
3995
+ modelId: this.modelId
3996
+ }),
3997
+ headers: combineHeaders(this.config.headers(), headers),
3998
+ formData: convertToFormData({
3999
+ model: this.modelId,
4000
+ prompt,
4001
+ image: await Promise.all(files.map((file) => fileToBlob(file))),
4002
+ mask: mask != null ? await fileToBlob(mask) : void 0,
4003
+ n,
4004
+ size,
4005
+ ...args
4006
+ }),
4007
+ failedResponseHandler: createJsonErrorResponseHandler(
4008
+ (_d = this.config.errorStructure) != null ? _d : defaultOpenAICompatibleErrorStructure
4009
+ ),
4010
+ successfulResponseHandler: createJsonResponseHandler(
4011
+ openaiCompatibleImageResponseSchema
4012
+ ),
4013
+ abortSignal,
4014
+ fetch: this.config.fetch
4015
+ });
4016
+ return {
4017
+ images: response2.data.map((item) => item.b64_json),
4018
+ warnings,
4019
+ response: {
4020
+ timestamp: currentDate,
4021
+ modelId: this.modelId,
4022
+ headers: responseHeaders2
4023
+ }
4024
+ };
4025
+ }
4026
+ const { value: response, responseHeaders } = await postJsonToApi({
4027
+ url: this.config.url({
4028
+ path: "/images/generations",
4029
+ modelId: this.modelId
4030
+ }),
4031
+ headers: combineHeaders(this.config.headers(), headers),
4032
+ body: {
4033
+ model: this.modelId,
4034
+ prompt,
4035
+ n,
4036
+ size,
4037
+ ...args,
4038
+ response_format: "b64_json"
4039
+ },
4040
+ failedResponseHandler: createJsonErrorResponseHandler(
4041
+ (_e = this.config.errorStructure) != null ? _e : defaultOpenAICompatibleErrorStructure
4042
+ ),
2919
4043
  successfulResponseHandler: createJsonResponseHandler(
2920
- openAIEmbeddingResponseSchema
4044
+ openaiCompatibleImageResponseSchema
2921
4045
  ),
2922
- fetch: this.config.fetch,
2923
- abortSignal: options.abortSignal
4046
+ abortSignal,
4047
+ fetch: this.config.fetch
2924
4048
  });
2925
4049
  return {
2926
- embeddings: response.data.map((item) => item.embedding),
2927
- usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0
4050
+ images: response.data.map((item) => item.b64_json),
4051
+ warnings,
4052
+ response: {
4053
+ timestamp: currentDate,
4054
+ modelId: this.modelId,
4055
+ headers: responseHeaders
4056
+ }
2928
4057
  };
2929
4058
  }
2930
4059
  };
2931
-
2932
- // src/apertis-provider.ts
2933
- function initializeProvider(options = {}) {
2934
- const baseURL = withoutTrailingSlash(options.baseURL) ?? "https://api.apertis.ai/v1";
2935
- const getHeaders = () => ({
2936
- ...options.headers,
2937
- Authorization: `Bearer ${loadApiKey({
2938
- apiKey: options.apiKey,
2939
- environmentVariableName: "APERTIS_API_KEY",
2940
- description: "Apertis API key"
2941
- })}`,
2942
- "Content-Type": "application/json"
2943
- });
2944
- return { baseURL, getHeaders, fetch: options.fetch };
4060
+ var openaiCompatibleImageResponseSchema = z8.object({
4061
+ data: z8.array(z8.object({ b64_json: z8.string() }))
4062
+ });
4063
+ async function fileToBlob(file) {
4064
+ if (file.type === "url") {
4065
+ return downloadBlob(file.url);
4066
+ }
4067
+ const data = file.data instanceof Uint8Array ? file.data : convertBase64ToUint8Array(file.data);
4068
+ return new Blob([data], { type: file.mediaType });
2945
4069
  }
2946
- function createApertis(options = {}) {
2947
- const { baseURL, getHeaders, fetch: fetchImpl } = initializeProvider(options);
2948
- const createChatModel = (modelId, settings = {}) => new ApertisChatLanguageModelV2(modelId, settings, {
2949
- provider: "apertis.chat",
2950
- baseURL,
4070
+ var VERSION2 = true ? "2.0.41" : "0.0.0-test";
4071
+ function createOpenAICompatible(options) {
4072
+ const baseURL = withoutTrailingSlash(options.baseURL);
4073
+ const providerName = options.name;
4074
+ const headers = {
4075
+ ...options.apiKey && { Authorization: `Bearer ${options.apiKey}` },
4076
+ ...options.headers
4077
+ };
4078
+ const getHeaders = () => withUserAgentSuffix(headers, `ai-sdk/openai-compatible/${VERSION2}`);
4079
+ const getCommonModelConfig = (modelType) => ({
4080
+ provider: `${providerName}.${modelType}`,
4081
+ url: ({ path }) => {
4082
+ const url = new URL(`${baseURL}${path}`);
4083
+ if (options.queryParams) {
4084
+ url.search = new URLSearchParams(options.queryParams).toString();
4085
+ }
4086
+ return url.toString();
4087
+ },
2951
4088
  headers: getHeaders,
2952
- fetch: fetchImpl
4089
+ fetch: options.fetch
2953
4090
  });
2954
- const createEmbeddingModel = (modelId, settings = {}) => new ApertisEmbeddingModelV2(modelId, settings, {
2955
- provider: "apertis.embedding",
2956
- baseURL,
2957
- headers: getHeaders,
2958
- fetch: fetchImpl
4091
+ const createLanguageModel = (modelId) => createChatModel(modelId);
4092
+ const createChatModel = (modelId) => new OpenAICompatibleChatLanguageModel(modelId, {
4093
+ ...getCommonModelConfig("chat"),
4094
+ includeUsage: options.includeUsage,
4095
+ supportsStructuredOutputs: options.supportsStructuredOutputs,
4096
+ transformRequestBody: options.transformRequestBody,
4097
+ metadataExtractor: options.metadataExtractor
2959
4098
  });
2960
- const provider = Object.assign(
2961
- (modelId, settings) => createChatModel(modelId, settings),
2962
- {
2963
- specificationVersion: "v2",
2964
- chat: createChatModel,
2965
- languageModel: (modelId) => createChatModel(modelId),
2966
- textEmbeddingModel: createEmbeddingModel,
2967
- imageModel: () => {
2968
- throw new Error("Image models are not supported by Apertis");
2969
- }
2970
- }
2971
- );
2972
- return provider;
2973
- }
2974
- function createApertisV3(options = {}) {
2975
- const { baseURL, getHeaders, fetch: fetchImpl } = initializeProvider(options);
2976
- const createChatModel = (modelId, settings = {}) => new ApertisChatLanguageModel(modelId, settings, {
2977
- provider: "apertis.chat",
2978
- baseURL,
2979
- headers: getHeaders,
2980
- fetch: fetchImpl
4099
+ const createCompletionModel = (modelId) => new OpenAICompatibleCompletionLanguageModel(modelId, {
4100
+ ...getCommonModelConfig("completion"),
4101
+ includeUsage: options.includeUsage
2981
4102
  });
2982
- const createEmbeddingModel = (modelId, settings = {}) => new ApertisEmbeddingModel(modelId, settings, {
2983
- provider: "apertis.embedding",
2984
- baseURL,
2985
- headers: getHeaders,
2986
- fetch: fetchImpl
4103
+ const createEmbeddingModel = (modelId) => new OpenAICompatibleEmbeddingModel(modelId, {
4104
+ ...getCommonModelConfig("embedding")
2987
4105
  });
2988
- const provider = Object.assign(
2989
- (modelId, settings) => createChatModel(modelId, settings),
2990
- {
2991
- specificationVersion: "v3",
2992
- chat: createChatModel,
2993
- languageModel: (modelId) => createChatModel(modelId),
2994
- embeddingModel: (modelId) => createEmbeddingModel(modelId),
2995
- textEmbeddingModel: createEmbeddingModel,
2996
- imageModel: () => {
2997
- throw new Error("Image models are not supported by Apertis");
2998
- }
2999
- }
3000
- );
4106
+ const createImageModel = (modelId) => new OpenAICompatibleImageModel(modelId, getCommonModelConfig("image"));
4107
+ const provider = (modelId) => createLanguageModel(modelId);
4108
+ provider.specificationVersion = "v3";
4109
+ provider.languageModel = createLanguageModel;
4110
+ provider.chatModel = createChatModel;
4111
+ provider.completionModel = createCompletionModel;
4112
+ provider.embeddingModel = createEmbeddingModel;
4113
+ provider.textEmbeddingModel = createEmbeddingModel;
4114
+ provider.imageModel = createImageModel;
3001
4115
  return provider;
3002
4116
  }
4117
+
4118
+ // src/apertis-provider.ts
4119
+ function createApertis(options = {}) {
4120
+ return createOpenAICompatible({
4121
+ name: "apertis",
4122
+ baseURL: options.baseURL ?? "https://api.apertis.ai/v1",
4123
+ apiKey: options.apiKey ?? process.env.APERTIS_API_KEY,
4124
+ headers: options.headers,
4125
+ fetch: options.fetch
4126
+ });
4127
+ }
3003
4128
  var apertis = createApertis();
3004
4129
  export {
3005
4130
  apertis,
3006
- createApertis,
3007
- createApertisV3
4131
+ createApertis
3008
4132
  };
3009
4133
  //# sourceMappingURL=index.js.map