@llmgateway/ai-sdk-provider 1.1.0 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,10 +1,12 @@
1
1
  "use strict";
2
+ var __create = Object.create;
2
3
  var __defProp = Object.defineProperty;
3
4
  var __defProps = Object.defineProperties;
4
5
  var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
6
  var __getOwnPropDescs = Object.getOwnPropertyDescriptors;
6
7
  var __getOwnPropNames = Object.getOwnPropertyNames;
7
8
  var __getOwnPropSymbols = Object.getOwnPropertySymbols;
9
+ var __getProtoOf = Object.getPrototypeOf;
8
10
  var __hasOwnProp = Object.prototype.hasOwnProperty;
9
11
  var __propIsEnum = Object.prototype.propertyIsEnumerable;
10
12
  var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
@@ -20,21 +22,9 @@ var __spreadValues = (a, b) => {
20
22
  return a;
21
23
  };
22
24
  var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b));
23
- var __objRest = (source, exclude) => {
24
- var target = {};
25
- for (var prop in source)
26
- if (__hasOwnProp.call(source, prop) && exclude.indexOf(prop) < 0)
27
- target[prop] = source[prop];
28
- if (source != null && __getOwnPropSymbols)
29
- for (var prop of __getOwnPropSymbols(source)) {
30
- if (exclude.indexOf(prop) < 0 && __propIsEnum.call(source, prop))
31
- target[prop] = source[prop];
32
- }
33
- return target;
34
- };
35
25
  var __export = (target, all) => {
36
- for (var name in all)
37
- __defProp(target, name, { get: all[name], enumerable: true });
26
+ for (var name14 in all)
27
+ __defProp(target, name14, { get: all[name14], enumerable: true });
38
28
  };
39
29
  var __copyProps = (to, from, except, desc) => {
40
30
  if (from && typeof from === "object" || typeof from === "function") {
@@ -44,6 +34,14 @@ var __copyProps = (to, from, except, desc) => {
44
34
  }
45
35
  return to;
46
36
  };
37
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
38
+ // If the importer is in node compatibility mode or this is not an ESM
39
+ // file that has been converted to a CommonJS file using a Babel-
40
+ // compatible transform (i.e. "__esModule" has not been set), then set
41
+ // "default" to the CommonJS "module.exports" for node compatibility.
42
+ isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
43
+ mod
44
+ ));
47
45
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
48
46
 
49
47
  // src/internal/index.ts
@@ -54,72 +52,960 @@ __export(index_exports, {
54
52
  });
55
53
  module.exports = __toCommonJS(index_exports);
56
54
 
57
- // src/schemas/reasoning-details.ts
55
+ // node_modules/.pnpm/@ai-sdk+provider@2.0.0-beta.1/node_modules/@ai-sdk/provider/dist/index.mjs
56
+ var marker = "vercel.ai.error";
57
+ var symbol = Symbol.for(marker);
58
+ var _a;
59
+ var _AISDKError = class _AISDKError2 extends Error {
60
+ /**
61
+ * Creates an AI SDK Error.
62
+ *
63
+ * @param {Object} params - The parameters for creating the error.
64
+ * @param {string} params.name - The name of the error.
65
+ * @param {string} params.message - The error message.
66
+ * @param {unknown} [params.cause] - The underlying cause of the error.
67
+ */
68
+ constructor({
69
+ name: name14,
70
+ message,
71
+ cause
72
+ }) {
73
+ super(message);
74
+ this[_a] = true;
75
+ this.name = name14;
76
+ this.cause = cause;
77
+ }
78
+ /**
79
+ * Checks if the given error is an AI SDK Error.
80
+ * @param {unknown} error - The error to check.
81
+ * @returns {boolean} True if the error is an AI SDK Error, false otherwise.
82
+ */
83
+ static isInstance(error) {
84
+ return _AISDKError2.hasMarker(error, marker);
85
+ }
86
+ static hasMarker(error, marker15) {
87
+ const markerSymbol = Symbol.for(marker15);
88
+ return error != null && typeof error === "object" && markerSymbol in error && typeof error[markerSymbol] === "boolean" && error[markerSymbol] === true;
89
+ }
90
+ };
91
+ _a = symbol;
92
+ var AISDKError = _AISDKError;
93
+ var name = "AI_APICallError";
94
+ var marker2 = `vercel.ai.error.${name}`;
95
+ var symbol2 = Symbol.for(marker2);
96
+ var _a2;
97
+ var APICallError = class extends AISDKError {
98
+ constructor({
99
+ message,
100
+ url,
101
+ requestBodyValues,
102
+ statusCode,
103
+ responseHeaders,
104
+ responseBody,
105
+ cause,
106
+ isRetryable = statusCode != null && (statusCode === 408 || // request timeout
107
+ statusCode === 409 || // conflict
108
+ statusCode === 429 || // too many requests
109
+ statusCode >= 500),
110
+ // server error
111
+ data
112
+ }) {
113
+ super({ name, message, cause });
114
+ this[_a2] = true;
115
+ this.url = url;
116
+ this.requestBodyValues = requestBodyValues;
117
+ this.statusCode = statusCode;
118
+ this.responseHeaders = responseHeaders;
119
+ this.responseBody = responseBody;
120
+ this.isRetryable = isRetryable;
121
+ this.data = data;
122
+ }
123
+ static isInstance(error) {
124
+ return AISDKError.hasMarker(error, marker2);
125
+ }
126
+ };
127
+ _a2 = symbol2;
128
+ var name2 = "AI_EmptyResponseBodyError";
129
+ var marker3 = `vercel.ai.error.${name2}`;
130
+ var symbol3 = Symbol.for(marker3);
131
+ var _a3;
132
+ var EmptyResponseBodyError = class extends AISDKError {
133
+ // used in isInstance
134
+ constructor({ message = "Empty response body" } = {}) {
135
+ super({ name: name2, message });
136
+ this[_a3] = true;
137
+ }
138
+ static isInstance(error) {
139
+ return AISDKError.hasMarker(error, marker3);
140
+ }
141
+ };
142
+ _a3 = symbol3;
143
+ function getErrorMessage(error) {
144
+ if (error == null) {
145
+ return "unknown error";
146
+ }
147
+ if (typeof error === "string") {
148
+ return error;
149
+ }
150
+ if (error instanceof Error) {
151
+ return error.message;
152
+ }
153
+ return JSON.stringify(error);
154
+ }
155
+ var name3 = "AI_InvalidArgumentError";
156
+ var marker4 = `vercel.ai.error.${name3}`;
157
+ var symbol4 = Symbol.for(marker4);
158
+ var _a4;
159
+ var InvalidArgumentError = class extends AISDKError {
160
+ constructor({
161
+ message,
162
+ cause,
163
+ argument
164
+ }) {
165
+ super({ name: name3, message, cause });
166
+ this[_a4] = true;
167
+ this.argument = argument;
168
+ }
169
+ static isInstance(error) {
170
+ return AISDKError.hasMarker(error, marker4);
171
+ }
172
+ };
173
+ _a4 = symbol4;
174
+ var name4 = "AI_InvalidPromptError";
175
+ var marker5 = `vercel.ai.error.${name4}`;
176
+ var symbol5 = Symbol.for(marker5);
177
+ var _a5;
178
+ var InvalidPromptError = class extends AISDKError {
179
+ constructor({
180
+ prompt,
181
+ message,
182
+ cause
183
+ }) {
184
+ super({ name: name4, message: `Invalid prompt: ${message}`, cause });
185
+ this[_a5] = true;
186
+ this.prompt = prompt;
187
+ }
188
+ static isInstance(error) {
189
+ return AISDKError.hasMarker(error, marker5);
190
+ }
191
+ };
192
+ _a5 = symbol5;
193
+ var name5 = "AI_InvalidResponseDataError";
194
+ var marker6 = `vercel.ai.error.${name5}`;
195
+ var symbol6 = Symbol.for(marker6);
196
+ var _a6;
197
+ var InvalidResponseDataError = class extends AISDKError {
198
+ constructor({
199
+ data,
200
+ message = `Invalid response data: ${JSON.stringify(data)}.`
201
+ }) {
202
+ super({ name: name5, message });
203
+ this[_a6] = true;
204
+ this.data = data;
205
+ }
206
+ static isInstance(error) {
207
+ return AISDKError.hasMarker(error, marker6);
208
+ }
209
+ };
210
+ _a6 = symbol6;
211
+ var name6 = "AI_JSONParseError";
212
+ var marker7 = `vercel.ai.error.${name6}`;
213
+ var symbol7 = Symbol.for(marker7);
214
+ var _a7;
215
+ var JSONParseError = class extends AISDKError {
216
+ constructor({ text, cause }) {
217
+ super({
218
+ name: name6,
219
+ message: `JSON parsing failed: Text: ${text}.
220
+ Error message: ${getErrorMessage(cause)}`,
221
+ cause
222
+ });
223
+ this[_a7] = true;
224
+ this.text = text;
225
+ }
226
+ static isInstance(error) {
227
+ return AISDKError.hasMarker(error, marker7);
228
+ }
229
+ };
230
+ _a7 = symbol7;
231
+ var name7 = "AI_LoadAPIKeyError";
232
+ var marker8 = `vercel.ai.error.${name7}`;
233
+ var symbol8 = Symbol.for(marker8);
234
+ var _a8;
235
+ _a8 = symbol8;
236
+ var name8 = "AI_LoadSettingError";
237
+ var marker9 = `vercel.ai.error.${name8}`;
238
+ var symbol9 = Symbol.for(marker9);
239
+ var _a9;
240
+ _a9 = symbol9;
241
+ var name9 = "AI_NoContentGeneratedError";
242
+ var marker10 = `vercel.ai.error.${name9}`;
243
+ var symbol10 = Symbol.for(marker10);
244
+ var _a10;
245
+ _a10 = symbol10;
246
+ var name10 = "AI_NoSuchModelError";
247
+ var marker11 = `vercel.ai.error.${name10}`;
248
+ var symbol11 = Symbol.for(marker11);
249
+ var _a11;
250
+ _a11 = symbol11;
251
+ var name11 = "AI_TooManyEmbeddingValuesForCallError";
252
+ var marker12 = `vercel.ai.error.${name11}`;
253
+ var symbol12 = Symbol.for(marker12);
254
+ var _a12;
255
+ _a12 = symbol12;
256
+ var name12 = "AI_TypeValidationError";
257
+ var marker13 = `vercel.ai.error.${name12}`;
258
+ var symbol13 = Symbol.for(marker13);
259
+ var _a13;
260
+ var _TypeValidationError = class _TypeValidationError2 extends AISDKError {
261
+ constructor({ value, cause }) {
262
+ super({
263
+ name: name12,
264
+ message: `Type validation failed: Value: ${JSON.stringify(value)}.
265
+ Error message: ${getErrorMessage(cause)}`,
266
+ cause
267
+ });
268
+ this[_a13] = true;
269
+ this.value = value;
270
+ }
271
+ static isInstance(error) {
272
+ return AISDKError.hasMarker(error, marker13);
273
+ }
274
+ /**
275
+ * Wraps an error into a TypeValidationError.
276
+ * If the cause is already a TypeValidationError with the same value, it returns the cause.
277
+ * Otherwise, it creates a new TypeValidationError.
278
+ *
279
+ * @param {Object} params - The parameters for wrapping the error.
280
+ * @param {unknown} params.value - The value that failed validation.
281
+ * @param {unknown} params.cause - The original error or cause of the validation failure.
282
+ * @returns {TypeValidationError} A TypeValidationError instance.
283
+ */
284
+ static wrap({
285
+ value,
286
+ cause
287
+ }) {
288
+ return _TypeValidationError2.isInstance(cause) && cause.value === value ? cause : new _TypeValidationError2({ value, cause });
289
+ }
290
+ };
291
+ _a13 = symbol13;
292
+ var TypeValidationError = _TypeValidationError;
293
+ var name13 = "AI_UnsupportedFunctionalityError";
294
+ var marker14 = `vercel.ai.error.${name13}`;
295
+ var symbol14 = Symbol.for(marker14);
296
+ var _a14;
297
+ var UnsupportedFunctionalityError = class extends AISDKError {
298
+ constructor({
299
+ functionality,
300
+ message = `'${functionality}' functionality not supported.`
301
+ }) {
302
+ super({ name: name13, message });
303
+ this[_a14] = true;
304
+ this.functionality = functionality;
305
+ }
306
+ static isInstance(error) {
307
+ return AISDKError.hasMarker(error, marker14);
308
+ }
309
+ };
310
+ _a14 = symbol14;
311
+
312
+ // node_modules/.pnpm/eventsource-parser@3.0.3/node_modules/eventsource-parser/dist/index.js
313
+ var ParseError = class extends Error {
314
+ constructor(message, options) {
315
+ super(message), this.name = "ParseError", this.type = options.type, this.field = options.field, this.value = options.value, this.line = options.line;
316
+ }
317
+ };
318
+ function noop(_arg) {
319
+ }
320
+ function createParser(callbacks) {
321
+ if (typeof callbacks == "function")
322
+ throw new TypeError(
323
+ "`callbacks` must be an object, got a function instead. Did you mean `{onEvent: fn}`?"
324
+ );
325
+ const { onEvent = noop, onError = noop, onRetry = noop, onComment } = callbacks;
326
+ let incompleteLine = "", isFirstChunk = true, id, data = "", eventType = "";
327
+ function feed(newChunk) {
328
+ const chunk = isFirstChunk ? newChunk.replace(/^\xEF\xBB\xBF/, "") : newChunk, [complete, incomplete] = splitLines(`${incompleteLine}${chunk}`);
329
+ for (const line of complete)
330
+ parseLine(line);
331
+ incompleteLine = incomplete, isFirstChunk = false;
332
+ }
333
+ function parseLine(line) {
334
+ if (line === "") {
335
+ dispatchEvent();
336
+ return;
337
+ }
338
+ if (line.startsWith(":")) {
339
+ onComment && onComment(line.slice(line.startsWith(": ") ? 2 : 1));
340
+ return;
341
+ }
342
+ const fieldSeparatorIndex = line.indexOf(":");
343
+ if (fieldSeparatorIndex !== -1) {
344
+ const field = line.slice(0, fieldSeparatorIndex), offset = line[fieldSeparatorIndex + 1] === " " ? 2 : 1, value = line.slice(fieldSeparatorIndex + offset);
345
+ processField(field, value, line);
346
+ return;
347
+ }
348
+ processField(line, "", line);
349
+ }
350
+ function processField(field, value, line) {
351
+ switch (field) {
352
+ case "event":
353
+ eventType = value;
354
+ break;
355
+ case "data":
356
+ data = `${data}${value}
357
+ `;
358
+ break;
359
+ case "id":
360
+ id = value.includes("\0") ? void 0 : value;
361
+ break;
362
+ case "retry":
363
+ /^\d+$/.test(value) ? onRetry(parseInt(value, 10)) : onError(
364
+ new ParseError(`Invalid \`retry\` value: "${value}"`, {
365
+ type: "invalid-retry",
366
+ value,
367
+ line
368
+ })
369
+ );
370
+ break;
371
+ default:
372
+ onError(
373
+ new ParseError(
374
+ `Unknown field "${field.length > 20 ? `${field.slice(0, 20)}\u2026` : field}"`,
375
+ { type: "unknown-field", field, value, line }
376
+ )
377
+ );
378
+ break;
379
+ }
380
+ }
381
+ function dispatchEvent() {
382
+ data.length > 0 && onEvent({
383
+ id,
384
+ event: eventType || void 0,
385
+ // If the data buffer's last character is a U+000A LINE FEED (LF) character,
386
+ // then remove the last character from the data buffer.
387
+ data: data.endsWith(`
388
+ `) ? data.slice(0, -1) : data
389
+ }), id = void 0, data = "", eventType = "";
390
+ }
391
+ function reset(options = {}) {
392
+ incompleteLine && options.consume && parseLine(incompleteLine), isFirstChunk = true, id = void 0, data = "", eventType = "", incompleteLine = "";
393
+ }
394
+ return { feed, reset };
395
+ }
396
+ function splitLines(chunk) {
397
+ const lines = [];
398
+ let incompleteLine = "", searchIndex = 0;
399
+ for (; searchIndex < chunk.length; ) {
400
+ const crIndex = chunk.indexOf("\r", searchIndex), lfIndex = chunk.indexOf(`
401
+ `, searchIndex);
402
+ let lineEnd = -1;
403
+ if (crIndex !== -1 && lfIndex !== -1 ? lineEnd = Math.min(crIndex, lfIndex) : crIndex !== -1 ? lineEnd = crIndex : lfIndex !== -1 && (lineEnd = lfIndex), lineEnd === -1) {
404
+ incompleteLine = chunk.slice(searchIndex);
405
+ break;
406
+ } else {
407
+ const line = chunk.slice(searchIndex, lineEnd);
408
+ lines.push(line), searchIndex = lineEnd + 1, chunk[searchIndex - 1] === "\r" && chunk[searchIndex] === `
409
+ ` && searchIndex++;
410
+ }
411
+ }
412
+ return [lines, incompleteLine];
413
+ }
414
+
415
+ // node_modules/.pnpm/eventsource-parser@3.0.3/node_modules/eventsource-parser/dist/stream.js
416
+ var EventSourceParserStream = class extends TransformStream {
417
+ constructor({ onError, onRetry, onComment } = {}) {
418
+ let parser;
419
+ super({
420
+ start(controller) {
421
+ parser = createParser({
422
+ onEvent: (event) => {
423
+ controller.enqueue(event);
424
+ },
425
+ onError(error) {
426
+ onError === "terminate" ? controller.error(error) : typeof onError == "function" && onError(error);
427
+ },
428
+ onRetry,
429
+ onComment
430
+ });
431
+ },
432
+ transform(chunk) {
433
+ parser.feed(chunk);
434
+ }
435
+ });
436
+ }
437
+ };
438
+
439
+ // node_modules/.pnpm/@ai-sdk+provider-utils@3.0.0-beta.5_zod@3.25.34/node_modules/@ai-sdk/provider-utils/dist/index.mjs
440
+ var z4 = __toESM(require("zod/v4"), 1);
441
+
442
+ // node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/Options.js
443
+ var ignoreOverride = Symbol("Let zodToJsonSchema decide on which parser to use");
444
+
445
+ // node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/selectParser.js
446
+ var import_zod4 = require("zod");
447
+
448
+ // node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/parsers/array.js
58
449
  var import_zod = require("zod");
59
- var ReasoningDetailSummarySchema = import_zod.z.object({
60
- type: import_zod.z.literal("reasoning.summary" /* Summary */),
61
- summary: import_zod.z.string()
450
+
451
+ // node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/parsers/record.js
452
+ var import_zod2 = require("zod");
453
+
454
+ // node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/parsers/string.js
455
+ var ALPHA_NUMERIC = new Set("ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvxyz0123456789");
456
+
457
+ // node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/parsers/object.js
458
+ var import_zod3 = require("zod");
459
+
460
+ // node_modules/.pnpm/@ai-sdk+provider-utils@3.0.0-beta.5_zod@3.25.34/node_modules/@ai-sdk/provider-utils/dist/index.mjs
461
+ function combineHeaders(...headers) {
462
+ return headers.reduce(
463
+ (combinedHeaders, currentHeaders) => __spreadValues(__spreadValues({}, combinedHeaders), currentHeaders != null ? currentHeaders : {}),
464
+ {}
465
+ );
466
+ }
467
+ function extractResponseHeaders(response) {
468
+ return Object.fromEntries([...response.headers]);
469
+ }
470
+ var createIdGenerator = ({
471
+ prefix,
472
+ size = 16,
473
+ alphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
474
+ separator = "-"
475
+ } = {}) => {
476
+ const generator = () => {
477
+ const alphabetLength = alphabet.length;
478
+ const chars = new Array(size);
479
+ for (let i = 0; i < size; i++) {
480
+ chars[i] = alphabet[Math.random() * alphabetLength | 0];
481
+ }
482
+ return chars.join("");
483
+ };
484
+ if (prefix == null) {
485
+ return generator;
486
+ }
487
+ if (alphabet.includes(separator)) {
488
+ throw new InvalidArgumentError({
489
+ argument: "separator",
490
+ message: `The separator "${separator}" must not be part of the alphabet "${alphabet}".`
491
+ });
492
+ }
493
+ return () => `${prefix}${separator}${generator()}`;
494
+ };
495
+ var generateId = createIdGenerator();
496
+ function isAbortError(error) {
497
+ return error instanceof Error && (error.name === "AbortError" || error.name === "ResponseAborted" || // Next.js
498
+ error.name === "TimeoutError");
499
+ }
500
+ var FETCH_FAILED_ERROR_MESSAGES = ["fetch failed", "failed to fetch"];
501
+ function handleFetchError({
502
+ error,
503
+ url,
504
+ requestBodyValues
505
+ }) {
506
+ if (isAbortError(error)) {
507
+ return error;
508
+ }
509
+ if (error instanceof TypeError && FETCH_FAILED_ERROR_MESSAGES.includes(error.message.toLowerCase())) {
510
+ const cause = error.cause;
511
+ if (cause != null) {
512
+ return new APICallError({
513
+ message: `Cannot connect to API: ${cause.message}`,
514
+ cause,
515
+ url,
516
+ requestBodyValues,
517
+ isRetryable: true
518
+ // retry when network error
519
+ });
520
+ }
521
+ }
522
+ return error;
523
+ }
524
+ function removeUndefinedEntries(record) {
525
+ return Object.fromEntries(
526
+ Object.entries(record).filter(([_key, value]) => value != null)
527
+ );
528
+ }
529
+ var suspectProtoRx = /"__proto__"\s*:/;
530
+ var suspectConstructorRx = /"constructor"\s*:/;
531
+ function _parse(text) {
532
+ const obj = JSON.parse(text);
533
+ if (obj === null || typeof obj !== "object") {
534
+ return obj;
535
+ }
536
+ if (suspectProtoRx.test(text) === false && suspectConstructorRx.test(text) === false) {
537
+ return obj;
538
+ }
539
+ return filter(obj);
540
+ }
541
+ function filter(obj) {
542
+ let next = [obj];
543
+ while (next.length) {
544
+ const nodes = next;
545
+ next = [];
546
+ for (const node of nodes) {
547
+ if (Object.prototype.hasOwnProperty.call(node, "__proto__")) {
548
+ throw new SyntaxError("Object contains forbidden prototype property");
549
+ }
550
+ if (Object.prototype.hasOwnProperty.call(node, "constructor") && Object.prototype.hasOwnProperty.call(node.constructor, "prototype")) {
551
+ throw new SyntaxError("Object contains forbidden prototype property");
552
+ }
553
+ for (const key in node) {
554
+ const value = node[key];
555
+ if (value && typeof value === "object") {
556
+ next.push(value);
557
+ }
558
+ }
559
+ }
560
+ }
561
+ return obj;
562
+ }
563
+ function secureJsonParse(text) {
564
+ const { stackTraceLimit } = Error;
565
+ Error.stackTraceLimit = 0;
566
+ try {
567
+ return _parse(text);
568
+ } finally {
569
+ Error.stackTraceLimit = stackTraceLimit;
570
+ }
571
+ }
572
+ var validatorSymbol = Symbol.for("vercel.ai.validator");
573
+ function validator(validate) {
574
+ return { [validatorSymbol]: true, validate };
575
+ }
576
+ function isValidator(value) {
577
+ return typeof value === "object" && value !== null && validatorSymbol in value && value[validatorSymbol] === true && "validate" in value;
578
+ }
579
+ function asValidator(value) {
580
+ return isValidator(value) ? value : standardSchemaValidator(value);
581
+ }
582
+ function standardSchemaValidator(standardSchema) {
583
+ return validator(async (value) => {
584
+ const result = await standardSchema["~standard"].validate(value);
585
+ return result.issues == null ? { success: true, value: result.value } : {
586
+ success: false,
587
+ error: new TypeValidationError({
588
+ value,
589
+ cause: result.issues
590
+ })
591
+ };
592
+ });
593
+ }
594
+ async function validateTypes({
595
+ value,
596
+ schema
597
+ }) {
598
+ const result = await safeValidateTypes({ value, schema });
599
+ if (!result.success) {
600
+ throw TypeValidationError.wrap({ value, cause: result.error });
601
+ }
602
+ return result.value;
603
+ }
604
+ async function safeValidateTypes({
605
+ value,
606
+ schema
607
+ }) {
608
+ const validator2 = asValidator(schema);
609
+ try {
610
+ if (validator2.validate == null) {
611
+ return { success: true, value, rawValue: value };
612
+ }
613
+ const result = await validator2.validate(value);
614
+ if (result.success) {
615
+ return { success: true, value: result.value, rawValue: value };
616
+ }
617
+ return {
618
+ success: false,
619
+ error: TypeValidationError.wrap({ value, cause: result.error }),
620
+ rawValue: value
621
+ };
622
+ } catch (error) {
623
+ return {
624
+ success: false,
625
+ error: TypeValidationError.wrap({ value, cause: error }),
626
+ rawValue: value
627
+ };
628
+ }
629
+ }
630
+ async function parseJSON({
631
+ text,
632
+ schema
633
+ }) {
634
+ try {
635
+ const value = secureJsonParse(text);
636
+ if (schema == null) {
637
+ return value;
638
+ }
639
+ return validateTypes({ value, schema });
640
+ } catch (error) {
641
+ if (JSONParseError.isInstance(error) || TypeValidationError.isInstance(error)) {
642
+ throw error;
643
+ }
644
+ throw new JSONParseError({ text, cause: error });
645
+ }
646
+ }
647
+ async function safeParseJSON({
648
+ text,
649
+ schema
650
+ }) {
651
+ try {
652
+ const value = secureJsonParse(text);
653
+ if (schema == null) {
654
+ return { success: true, value, rawValue: value };
655
+ }
656
+ return await safeValidateTypes({ value, schema });
657
+ } catch (error) {
658
+ return {
659
+ success: false,
660
+ error: JSONParseError.isInstance(error) ? error : new JSONParseError({ text, cause: error }),
661
+ rawValue: void 0
662
+ };
663
+ }
664
+ }
665
+ function isParsableJson(input) {
666
+ try {
667
+ secureJsonParse(input);
668
+ return true;
669
+ } catch (e) {
670
+ return false;
671
+ }
672
+ }
673
+ function parseJsonEventStream({
674
+ stream,
675
+ schema
676
+ }) {
677
+ return stream.pipeThrough(new TextDecoderStream()).pipeThrough(new EventSourceParserStream()).pipeThrough(
678
+ new TransformStream({
679
+ async transform({ data }, controller) {
680
+ if (data === "[DONE]") {
681
+ return;
682
+ }
683
+ controller.enqueue(await safeParseJSON({ text: data, schema }));
684
+ }
685
+ })
686
+ );
687
+ }
688
+ var getOriginalFetch2 = () => globalThis.fetch;
689
+ var postJsonToApi = async ({
690
+ url,
691
+ headers,
692
+ body,
693
+ failedResponseHandler,
694
+ successfulResponseHandler,
695
+ abortSignal,
696
+ fetch
697
+ }) => postToApi({
698
+ url,
699
+ headers: __spreadValues({
700
+ "Content-Type": "application/json"
701
+ }, headers),
702
+ body: {
703
+ content: JSON.stringify(body),
704
+ values: body
705
+ },
706
+ failedResponseHandler,
707
+ successfulResponseHandler,
708
+ abortSignal,
709
+ fetch
710
+ });
711
+ var postToApi = async ({
712
+ url,
713
+ headers = {},
714
+ body,
715
+ successfulResponseHandler,
716
+ failedResponseHandler,
717
+ abortSignal,
718
+ fetch = getOriginalFetch2()
719
+ }) => {
720
+ try {
721
+ const response = await fetch(url, {
722
+ method: "POST",
723
+ headers: removeUndefinedEntries(headers),
724
+ body: body.content,
725
+ signal: abortSignal
726
+ });
727
+ const responseHeaders = extractResponseHeaders(response);
728
+ if (!response.ok) {
729
+ let errorInformation;
730
+ try {
731
+ errorInformation = await failedResponseHandler({
732
+ response,
733
+ url,
734
+ requestBodyValues: body.values
735
+ });
736
+ } catch (error) {
737
+ if (isAbortError(error) || APICallError.isInstance(error)) {
738
+ throw error;
739
+ }
740
+ throw new APICallError({
741
+ message: "Failed to process error response",
742
+ cause: error,
743
+ statusCode: response.status,
744
+ url,
745
+ responseHeaders,
746
+ requestBodyValues: body.values
747
+ });
748
+ }
749
+ throw errorInformation.value;
750
+ }
751
+ try {
752
+ return await successfulResponseHandler({
753
+ response,
754
+ url,
755
+ requestBodyValues: body.values
756
+ });
757
+ } catch (error) {
758
+ if (error instanceof Error) {
759
+ if (isAbortError(error) || APICallError.isInstance(error)) {
760
+ throw error;
761
+ }
762
+ }
763
+ throw new APICallError({
764
+ message: "Failed to process successful response",
765
+ cause: error,
766
+ statusCode: response.status,
767
+ url,
768
+ responseHeaders,
769
+ requestBodyValues: body.values
770
+ });
771
+ }
772
+ } catch (error) {
773
+ throw handleFetchError({ error, url, requestBodyValues: body.values });
774
+ }
775
+ };
776
+ var createJsonErrorResponseHandler = ({
777
+ errorSchema,
778
+ errorToMessage,
779
+ isRetryable
780
+ }) => async ({ response, url, requestBodyValues }) => {
781
+ const responseBody = await response.text();
782
+ const responseHeaders = extractResponseHeaders(response);
783
+ if (responseBody.trim() === "") {
784
+ return {
785
+ responseHeaders,
786
+ value: new APICallError({
787
+ message: response.statusText,
788
+ url,
789
+ requestBodyValues,
790
+ statusCode: response.status,
791
+ responseHeaders,
792
+ responseBody,
793
+ isRetryable: isRetryable == null ? void 0 : isRetryable(response)
794
+ })
795
+ };
796
+ }
797
+ try {
798
+ const parsedError = await parseJSON({
799
+ text: responseBody,
800
+ schema: errorSchema
801
+ });
802
+ return {
803
+ responseHeaders,
804
+ value: new APICallError({
805
+ message: errorToMessage(parsedError),
806
+ url,
807
+ requestBodyValues,
808
+ statusCode: response.status,
809
+ responseHeaders,
810
+ responseBody,
811
+ data: parsedError,
812
+ isRetryable: isRetryable == null ? void 0 : isRetryable(response, parsedError)
813
+ })
814
+ };
815
+ } catch (parseError) {
816
+ return {
817
+ responseHeaders,
818
+ value: new APICallError({
819
+ message: response.statusText,
820
+ url,
821
+ requestBodyValues,
822
+ statusCode: response.status,
823
+ responseHeaders,
824
+ responseBody,
825
+ isRetryable: isRetryable == null ? void 0 : isRetryable(response)
826
+ })
827
+ };
828
+ }
829
+ };
830
+ var createEventSourceResponseHandler = (chunkSchema) => async ({ response }) => {
831
+ const responseHeaders = extractResponseHeaders(response);
832
+ if (response.body == null) {
833
+ throw new EmptyResponseBodyError({});
834
+ }
835
+ return {
836
+ responseHeaders,
837
+ value: parseJsonEventStream({
838
+ stream: response.body,
839
+ schema: chunkSchema
840
+ })
841
+ };
842
+ };
843
+ var createJsonResponseHandler = (responseSchema) => async ({ response, url, requestBodyValues }) => {
844
+ const responseBody = await response.text();
845
+ const parsedResult = await safeParseJSON({
846
+ text: responseBody,
847
+ schema: responseSchema
848
+ });
849
+ const responseHeaders = extractResponseHeaders(response);
850
+ if (!parsedResult.success) {
851
+ throw new APICallError({
852
+ message: "Invalid JSON response",
853
+ cause: parsedResult.error,
854
+ statusCode: response.status,
855
+ responseHeaders,
856
+ responseBody,
857
+ url,
858
+ requestBodyValues
859
+ });
860
+ }
861
+ return {
862
+ responseHeaders,
863
+ value: parsedResult.value,
864
+ rawValue: parsedResult.rawValue
865
+ };
866
+ };
867
+ var schemaSymbol = Symbol.for("vercel.ai.schema");
868
+ var { btoa, atob } = globalThis;
869
+ function convertUint8ArrayToBase64(array) {
870
+ let latin1string = "";
871
+ for (let i = 0; i < array.length; i++) {
872
+ latin1string += String.fromCodePoint(array[i]);
873
+ }
874
+ return btoa(latin1string);
875
+ }
876
+
877
+ // src/schemas/reasoning-details.ts
878
+ var import_v4 = require("zod/v4");
879
+ var ReasoningDetailSummarySchema = import_v4.z.object({
880
+ type: import_v4.z.literal("reasoning.summary" /* Summary */),
881
+ summary: import_v4.z.string()
62
882
  });
63
- var ReasoningDetailEncryptedSchema = import_zod.z.object({
64
- type: import_zod.z.literal("reasoning.encrypted" /* Encrypted */),
65
- data: import_zod.z.string()
883
+ var ReasoningDetailEncryptedSchema = import_v4.z.object({
884
+ type: import_v4.z.literal("reasoning.encrypted" /* Encrypted */),
885
+ data: import_v4.z.string()
66
886
  });
67
- var ReasoningDetailTextSchema = import_zod.z.object({
68
- type: import_zod.z.literal("reasoning.text" /* Text */),
69
- text: import_zod.z.string().nullish(),
70
- signature: import_zod.z.string().nullish()
887
+ var ReasoningDetailTextSchema = import_v4.z.object({
888
+ type: import_v4.z.literal("reasoning.text" /* Text */),
889
+ text: import_v4.z.string().nullish(),
890
+ signature: import_v4.z.string().nullish()
71
891
  });
72
- var ReasoningDetailUnionSchema = import_zod.z.union([
892
+ var ReasoningDetailUnionSchema = import_v4.z.union([
73
893
  ReasoningDetailSummarySchema,
74
894
  ReasoningDetailEncryptedSchema,
75
895
  ReasoningDetailTextSchema
76
896
  ]);
77
- var ReasoningDetailsWithUnknownSchema = import_zod.z.union([
897
+ var ReasoningDetailsWithUnknownSchema = import_v4.z.union([
78
898
  ReasoningDetailUnionSchema,
79
- import_zod.z.unknown().transform(() => null)
899
+ import_v4.z.unknown().transform(() => null)
80
900
  ]);
81
- var ReasoningDetailArraySchema = import_zod.z.array(ReasoningDetailsWithUnknownSchema).transform((d) => d.filter((d2) => !!d2));
901
+ var ReasoningDetailArraySchema = import_v4.z.array(ReasoningDetailsWithUnknownSchema).transform((d) => d.filter((d2) => !!d2));
82
902
 
83
- // src/llmgateway-chat-language-model.ts
84
- var import_provider = require("@ai-sdk/provider");
85
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
86
- var import_zod3 = require("zod");
903
+ // src/schemas/error-response.ts
904
+ var import_v42 = require("zod/v4");
905
+ var LLMGatewayErrorResponseSchema = import_v42.z.object({
906
+ error: import_v42.z.object({
907
+ code: import_v42.z.union([import_v42.z.string(), import_v42.z.number()]).nullable().optional().default(null),
908
+ message: import_v42.z.string(),
909
+ type: import_v42.z.string().nullable().optional().default(null),
910
+ param: import_v42.z.any().nullable().optional().default(null)
911
+ })
912
+ });
913
+ var llmgatewayFailedResponseHandler = createJsonErrorResponseHandler({
914
+ errorSchema: LLMGatewayErrorResponseSchema,
915
+ errorToMessage: (data) => data.error.message
916
+ });
917
+
918
+ // src/utils/map-finish-reason.ts
919
+ function mapLLMGatewayFinishReason(finishReason) {
920
+ switch (finishReason) {
921
+ case "stop":
922
+ return "stop";
923
+ case "length":
924
+ return "length";
925
+ case "content_filter":
926
+ return "content-filter";
927
+ case "function_call":
928
+ case "tool_calls":
929
+ return "tool-calls";
930
+ default:
931
+ return "unknown";
932
+ }
933
+ }
87
934
 
88
- // src/convert-to-llmgateway-chat-messages.ts
89
- var import_provider_utils = require("@ai-sdk/provider-utils");
935
+ // src/chat/is-url.ts
936
+ function isUrl({
937
+ url,
938
+ protocols
939
+ }) {
940
+ try {
941
+ const urlObj = new URL(url);
942
+ return protocols.has(urlObj.protocol);
943
+ } catch (_) {
944
+ return false;
945
+ }
946
+ }
947
+
948
+ // src/chat/file-url-utils.ts
949
+ function getFileUrl({
950
+ part,
951
+ defaultMediaType
952
+ }) {
953
+ var _a15, _b;
954
+ if (part.data instanceof Uint8Array) {
955
+ const base64 = convertUint8ArrayToBase64(part.data);
956
+ return `data:${(_a15 = part.mediaType) != null ? _a15 : defaultMediaType};base64,${base64}`;
957
+ }
958
+ const stringUrl = part.data.toString();
959
+ if (isUrl({
960
+ url: stringUrl,
961
+ protocols: /* @__PURE__ */ new Set(["http:", "https:"])
962
+ })) {
963
+ return stringUrl;
964
+ }
965
+ return stringUrl.startsWith("data:") ? stringUrl : `data:${(_b = part.mediaType) != null ? _b : defaultMediaType};base64,${stringUrl}`;
966
+ }
967
+
968
+ // src/chat/convert-to-llmgateway-chat-messages.ts
90
969
  function getCacheControl(providerMetadata) {
91
- var _a, _b, _c;
970
+ var _a15, _b, _c;
92
971
  const anthropic = providerMetadata == null ? void 0 : providerMetadata.anthropic;
93
972
  const llmgateway = providerMetadata == null ? void 0 : providerMetadata.llmgateway;
94
- return (_c = (_b = (_a = llmgateway == null ? void 0 : llmgateway.cacheControl) != null ? _a : llmgateway == null ? void 0 : llmgateway.cache_control) != null ? _b : anthropic == null ? void 0 : anthropic.cacheControl) != null ? _c : anthropic == null ? void 0 : anthropic.cache_control;
973
+ return (_c = (_b = (_a15 = llmgateway == null ? void 0 : llmgateway.cacheControl) != null ? _a15 : llmgateway == null ? void 0 : llmgateway.cache_control) != null ? _b : anthropic == null ? void 0 : anthropic.cacheControl) != null ? _c : anthropic == null ? void 0 : anthropic.cache_control;
95
974
  }
96
975
  function convertToLLMGatewayChatMessages(prompt) {
97
- var _a, _b, _c;
976
+ var _a15, _b, _c;
98
977
  const messages = [];
99
- for (const { role, content, providerMetadata } of prompt) {
978
+ for (const { role, content, providerOptions } of prompt) {
100
979
  switch (role) {
101
980
  case "system": {
102
981
  messages.push({
103
982
  role: "system",
104
983
  content,
105
- cache_control: getCacheControl(providerMetadata)
984
+ cache_control: getCacheControl(providerOptions)
106
985
  });
107
986
  break;
108
987
  }
109
988
  case "user": {
110
- if (content.length === 1 && ((_a = content[0]) == null ? void 0 : _a.type) === "text") {
989
+ if (content.length === 1 && ((_a15 = content[0]) == null ? void 0 : _a15.type) === "text") {
990
+ const cacheControl = (_b = getCacheControl(providerOptions)) != null ? _b : getCacheControl(content[0].providerOptions);
991
+ const contentWithCacheControl = cacheControl ? [
992
+ {
993
+ type: "text",
994
+ text: content[0].text,
995
+ cache_control: cacheControl
996
+ }
997
+ ] : content[0].text;
111
998
  messages.push({
112
999
  role: "user",
113
- content: content[0].text,
114
- cache_control: (_b = getCacheControl(providerMetadata)) != null ? _b : getCacheControl(content[0].providerMetadata)
1000
+ content: contentWithCacheControl
115
1001
  });
116
1002
  break;
117
1003
  }
118
- const messageCacheControl = getCacheControl(providerMetadata);
1004
+ const messageCacheControl = getCacheControl(providerOptions);
119
1005
  const contentParts = content.map(
120
1006
  (part) => {
121
- var _a2, _b2, _c2, _d;
122
- const cacheControl = (_a2 = getCacheControl(part.providerMetadata)) != null ? _a2 : messageCacheControl;
1007
+ var _a16, _b2, _c2, _d, _e, _f;
1008
+ const cacheControl = (_a16 = getCacheControl(part.providerOptions)) != null ? _a16 : messageCacheControl;
123
1009
  switch (part.type) {
124
1010
  case "text":
125
1011
  return {
@@ -128,33 +1014,55 @@ function convertToLLMGatewayChatMessages(prompt) {
128
1014
  // For text parts, only use part-specific cache control
129
1015
  cache_control: cacheControl
130
1016
  };
131
- case "image":
132
- return {
133
- type: "image_url",
134
- image_url: {
135
- url: part.image instanceof URL ? part.image.toString() : `data:${(_b2 = part.mimeType) != null ? _b2 : "image/jpeg"};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(
136
- part.image
137
- )}`
138
- },
139
- // For image parts, use part-specific or message-level cache control
140
- cache_control: cacheControl
141
- };
142
- case "file":
1017
+ case "file": {
1018
+ if ((_b2 = part.mediaType) == null ? void 0 : _b2.startsWith("image/")) {
1019
+ const url = getFileUrl({
1020
+ part,
1021
+ defaultMediaType: "image/jpeg"
1022
+ });
1023
+ return {
1024
+ type: "image_url",
1025
+ image_url: {
1026
+ url
1027
+ },
1028
+ // For image parts, use part-specific or message-level cache control
1029
+ cache_control: cacheControl
1030
+ };
1031
+ }
1032
+ const fileName = String(
1033
+ (_f = (_e = (_d = (_c2 = part.providerOptions) == null ? void 0 : _c2.llmgateway) == null ? void 0 : _d.filename) != null ? _e : part.filename) != null ? _f : ""
1034
+ );
1035
+ const fileData = getFileUrl({
1036
+ part,
1037
+ defaultMediaType: "application/pdf"
1038
+ });
1039
+ if (isUrl({
1040
+ url: fileData,
1041
+ protocols: /* @__PURE__ */ new Set(["http:", "https:"])
1042
+ })) {
1043
+ return {
1044
+ type: "file",
1045
+ file: {
1046
+ filename: fileName,
1047
+ file_data: fileData
1048
+ }
1049
+ };
1050
+ }
143
1051
  return {
144
1052
  type: "file",
145
1053
  file: {
146
- filename: String(
147
- (_d = (_c2 = part.providerMetadata) == null ? void 0 : _c2.llmgateway) == null ? void 0 : _d.filename
148
- ),
149
- file_data: part.data instanceof Uint8Array ? `data:${part.mimeType};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(part.data)}` : `data:${part.mimeType};base64,${part.data}`
1054
+ filename: fileName,
1055
+ file_data: fileData
150
1056
  },
151
1057
  cache_control: cacheControl
152
1058
  };
1059
+ }
153
1060
  default: {
154
- const _exhaustiveCheck = part;
155
- throw new Error(
156
- `Unsupported content part type: ${_exhaustiveCheck}`
157
- );
1061
+ return {
1062
+ type: "text",
1063
+ text: "",
1064
+ cache_control: cacheControl
1065
+ };
158
1066
  }
159
1067
  }
160
1068
  }
@@ -182,7 +1090,7 @@ function convertToLLMGatewayChatMessages(prompt) {
182
1090
  type: "function",
183
1091
  function: {
184
1092
  name: part.toolName,
185
- arguments: JSON.stringify(part.args)
1093
+ arguments: JSON.stringify(part.input)
186
1094
  }
187
1095
  });
188
1096
  break;
@@ -191,23 +1099,14 @@ function convertToLLMGatewayChatMessages(prompt) {
191
1099
  reasoning += part.text;
192
1100
  reasoningDetails.push({
193
1101
  type: "reasoning.text" /* Text */,
194
- text: part.text,
195
- signature: part.signature
196
- });
197
- break;
198
- }
199
- case "redacted-reasoning": {
200
- reasoningDetails.push({
201
- type: "reasoning.encrypted" /* Encrypted */,
202
- data: part.data
1102
+ text: part.text
203
1103
  });
204
1104
  break;
205
1105
  }
206
1106
  case "file":
207
1107
  break;
208
1108
  default: {
209
- const _exhaustiveCheck = part;
210
- throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
1109
+ break;
211
1110
  }
212
1111
  }
213
1112
  }
@@ -217,95 +1116,188 @@ function convertToLLMGatewayChatMessages(prompt) {
217
1116
  tool_calls: toolCalls.length > 0 ? toolCalls : void 0,
218
1117
  reasoning: reasoning || void 0,
219
1118
  reasoning_details: reasoningDetails.length > 0 ? reasoningDetails : void 0,
220
- cache_control: getCacheControl(providerMetadata)
1119
+ cache_control: getCacheControl(providerOptions)
221
1120
  });
222
1121
  break;
223
1122
  }
224
1123
  case "tool": {
225
1124
  for (const toolResponse of content) {
1125
+ const content2 = getToolResultContent(toolResponse);
226
1126
  messages.push({
227
1127
  role: "tool",
228
1128
  tool_call_id: toolResponse.toolCallId,
229
- content: JSON.stringify(toolResponse.result),
230
- cache_control: (_c = getCacheControl(providerMetadata)) != null ? _c : getCacheControl(toolResponse.providerMetadata)
1129
+ content: content2,
1130
+ cache_control: (_c = getCacheControl(providerOptions)) != null ? _c : getCacheControl(toolResponse.providerOptions)
231
1131
  });
232
1132
  }
233
1133
  break;
234
1134
  }
235
1135
  default: {
236
- const _exhaustiveCheck = role;
237
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1136
+ break;
238
1137
  }
239
1138
  }
240
1139
  }
241
1140
  return messages;
242
1141
  }
243
-
244
- // src/map-llmgateway-chat-logprobs.ts
245
- function mapLLMGatewayChatLogProbsOutput(logprobs) {
246
- var _a, _b;
247
- return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
248
- token,
249
- logprob,
250
- topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
251
- token: token2,
252
- logprob: logprob2
253
- })) : []
254
- }))) != null ? _b : void 0;
1142
+ function getToolResultContent(input) {
1143
+ return input.output.type === "text" ? input.output.value : JSON.stringify(input.output.value);
255
1144
  }
256
1145
 
257
- // src/map-llmgateway-finish-reason.ts
258
- function mapLLMGatewayFinishReason(finishReason) {
259
- switch (finishReason) {
260
- case "stop":
261
- return "stop";
262
- case "length":
263
- return "length";
264
- case "content_filter":
265
- return "content-filter";
266
- case "function_call":
267
- case "tool_calls":
268
- return "tool-calls";
269
- default:
270
- return "unknown";
1146
+ // src/chat/get-tool-choice.ts
1147
+ var import_v43 = require("zod/v4");
1148
+ var ChatCompletionToolChoiceSchema = import_v43.z.union([
1149
+ import_v43.z.literal("auto"),
1150
+ import_v43.z.literal("none"),
1151
+ import_v43.z.literal("required"),
1152
+ import_v43.z.object({
1153
+ type: import_v43.z.literal("function"),
1154
+ function: import_v43.z.object({
1155
+ name: import_v43.z.string()
1156
+ })
1157
+ })
1158
+ ]);
1159
+ function getChatCompletionToolChoice(toolChoice) {
1160
+ switch (toolChoice.type) {
1161
+ case "auto":
1162
+ case "none":
1163
+ case "required":
1164
+ return toolChoice.type;
1165
+ case "tool": {
1166
+ return {
1167
+ type: "function",
1168
+ function: { name: toolChoice.toolName }
1169
+ };
1170
+ }
1171
+ default: {
1172
+ toolChoice;
1173
+ throw new Error(`Invalid tool choice type: ${toolChoice}`);
1174
+ }
271
1175
  }
272
1176
  }
273
1177
 
274
- // src/llmgateway-error.ts
275
- var import_provider_utils2 = require("@ai-sdk/provider-utils");
276
- var import_zod2 = require("zod");
277
- var LLMGatewayErrorResponseSchema = import_zod2.z.object({
278
- error: import_zod2.z.object({
279
- message: import_zod2.z.string(),
280
- type: import_zod2.z.string(),
281
- param: import_zod2.z.any().nullable(),
282
- code: import_zod2.z.string().nullable()
283
- })
1178
+ // src/chat/schemas.ts
1179
+ var import_v44 = require("zod/v4");
1180
+ var LLMGatewayChatCompletionBaseResponseSchema = import_v44.z.object({
1181
+ id: import_v44.z.string().optional(),
1182
+ model: import_v44.z.string().optional(),
1183
+ usage: import_v44.z.object({
1184
+ prompt_tokens: import_v44.z.number(),
1185
+ prompt_tokens_details: import_v44.z.object({
1186
+ cached_tokens: import_v44.z.number()
1187
+ }).nullish(),
1188
+ completion_tokens: import_v44.z.number(),
1189
+ completion_tokens_details: import_v44.z.object({
1190
+ reasoning_tokens: import_v44.z.number()
1191
+ }).nullish(),
1192
+ total_tokens: import_v44.z.number(),
1193
+ cost: import_v44.z.number().optional(),
1194
+ cost_details: import_v44.z.object({
1195
+ upstream_inference_cost: import_v44.z.number().nullish()
1196
+ }).nullish()
1197
+ }).nullish()
284
1198
  });
285
- var llmgatewayFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
286
- errorSchema: LLMGatewayErrorResponseSchema,
287
- errorToMessage: (data) => data.error.message
1199
+ var LLMGatewayNonStreamChatCompletionResponseSchema = LLMGatewayChatCompletionBaseResponseSchema.extend({
1200
+ choices: import_v44.z.array(
1201
+ import_v44.z.object({
1202
+ message: import_v44.z.object({
1203
+ role: import_v44.z.literal("assistant"),
1204
+ content: import_v44.z.string().nullable().optional(),
1205
+ reasoning: import_v44.z.string().nullable().optional(),
1206
+ reasoning_details: ReasoningDetailArraySchema.nullish(),
1207
+ tool_calls: import_v44.z.array(
1208
+ import_v44.z.object({
1209
+ id: import_v44.z.string().optional().nullable(),
1210
+ type: import_v44.z.literal("function"),
1211
+ function: import_v44.z.object({
1212
+ name: import_v44.z.string(),
1213
+ arguments: import_v44.z.string()
1214
+ })
1215
+ })
1216
+ ).optional()
1217
+ }),
1218
+ index: import_v44.z.number().nullish(),
1219
+ logprobs: import_v44.z.object({
1220
+ content: import_v44.z.array(
1221
+ import_v44.z.object({
1222
+ token: import_v44.z.string(),
1223
+ logprob: import_v44.z.number(),
1224
+ top_logprobs: import_v44.z.array(
1225
+ import_v44.z.object({
1226
+ token: import_v44.z.string(),
1227
+ logprob: import_v44.z.number()
1228
+ })
1229
+ )
1230
+ })
1231
+ ).nullable()
1232
+ }).nullable().optional(),
1233
+ finish_reason: import_v44.z.string().optional().nullable()
1234
+ })
1235
+ )
288
1236
  });
1237
+ var LLMGatewayStreamChatCompletionChunkSchema = import_v44.z.union([
1238
+ LLMGatewayChatCompletionBaseResponseSchema.extend({
1239
+ choices: import_v44.z.array(
1240
+ import_v44.z.object({
1241
+ delta: import_v44.z.object({
1242
+ role: import_v44.z.enum(["assistant"]).optional(),
1243
+ content: import_v44.z.string().nullish(),
1244
+ reasoning: import_v44.z.string().nullish().optional(),
1245
+ reasoning_details: ReasoningDetailArraySchema.nullish(),
1246
+ tool_calls: import_v44.z.array(
1247
+ import_v44.z.object({
1248
+ index: import_v44.z.number().nullish(),
1249
+ id: import_v44.z.string().nullish(),
1250
+ type: import_v44.z.literal("function").optional(),
1251
+ function: import_v44.z.object({
1252
+ name: import_v44.z.string().nullish(),
1253
+ arguments: import_v44.z.string().nullish()
1254
+ })
1255
+ })
1256
+ ).nullish()
1257
+ }).nullish(),
1258
+ logprobs: import_v44.z.object({
1259
+ content: import_v44.z.array(
1260
+ import_v44.z.object({
1261
+ token: import_v44.z.string(),
1262
+ logprob: import_v44.z.number(),
1263
+ top_logprobs: import_v44.z.array(
1264
+ import_v44.z.object({
1265
+ token: import_v44.z.string(),
1266
+ logprob: import_v44.z.number()
1267
+ })
1268
+ )
1269
+ })
1270
+ ).nullable()
1271
+ }).nullish(),
1272
+ finish_reason: import_v44.z.string().nullable().optional(),
1273
+ index: import_v44.z.number().nullish()
1274
+ })
1275
+ )
1276
+ }),
1277
+ LLMGatewayErrorResponseSchema
1278
+ ]);
289
1279
 
290
- // src/llmgateway-chat-language-model.ts
291
- function isFunctionTool(tool) {
292
- return "parameters" in tool;
293
- }
1280
+ // src/chat/index.ts
294
1281
  var LLMGatewayChatLanguageModel = class {
295
1282
  constructor(modelId, settings, config) {
296
- this.specificationVersion = "v1";
1283
+ this.specificationVersion = "v2";
1284
+ this.provider = "llmgateway";
297
1285
  this.defaultObjectGenerationMode = "tool";
1286
+ this.supportedUrls = {
1287
+ "image/*": [
1288
+ /^data:image\/[a-zA-Z]+;base64,/,
1289
+ /^https?:\/\/.+\.(jpg|jpeg|png|gif|webp)$/i
1290
+ ],
1291
+ // 'text/*': [/^data:text\//, /^https?:\/\/.+$/],
1292
+ "application/*": [/^data:application\//, /^https?:\/\/.+$/]
1293
+ };
298
1294
  this.modelId = modelId;
299
1295
  this.settings = settings;
300
1296
  this.config = config;
301
1297
  }
302
- get provider() {
303
- return this.config.provider;
304
- }
305
1298
  getArgs({
306
- mode,
307
1299
  prompt,
308
- maxTokens,
1300
+ maxOutputTokens,
309
1301
  temperature,
310
1302
  topP,
311
1303
  frequencyPenalty,
@@ -314,12 +1306,10 @@ var LLMGatewayChatLanguageModel = class {
314
1306
  stopSequences,
315
1307
  responseFormat,
316
1308
  topK,
317
- providerMetadata
1309
+ tools,
1310
+ toolChoice
318
1311
  }) {
319
- var _a;
320
- const type = mode.type;
321
- const extraCallingBody = (_a = providerMetadata == null ? void 0 : providerMetadata.llmgateway) != null ? _a : {};
322
- const baseArgs = __spreadValues(__spreadValues(__spreadValues({
1312
+ const baseArgs = __spreadValues(__spreadValues({
323
1313
  // model id:
324
1314
  model: this.modelId,
325
1315
  models: this.settings.models,
@@ -330,7 +1320,7 @@ var LLMGatewayChatLanguageModel = class {
330
1320
  user: this.settings.user,
331
1321
  parallel_tool_calls: this.settings.parallelToolCalls,
332
1322
  // standardized settings:
333
- max_tokens: maxTokens,
1323
+ max_tokens: maxOutputTokens,
334
1324
  temperature,
335
1325
  top_p: topP,
336
1326
  frequency_penalty: frequencyPenalty,
@@ -345,97 +1335,72 @@ var LLMGatewayChatLanguageModel = class {
345
1335
  include_reasoning: this.settings.includeReasoning,
346
1336
  reasoning: this.settings.reasoning,
347
1337
  usage: this.settings.usage
348
- }, this.config.extraBody), this.settings.extraBody), extraCallingBody);
349
- switch (type) {
350
- case "regular": {
351
- return __spreadValues(__spreadValues({}, baseArgs), prepareToolsAndToolChoice(mode));
352
- }
353
- case "object-json": {
354
- return __spreadProps(__spreadValues({}, baseArgs), {
355
- response_format: { type: "json_object" }
356
- });
357
- }
358
- case "object-tool": {
359
- return __spreadProps(__spreadValues({}, baseArgs), {
360
- tool_choice: { type: "function", function: { name: mode.tool.name } },
361
- tools: [
362
- {
363
- type: "function",
364
- function: {
365
- name: mode.tool.name,
366
- description: mode.tool.description,
367
- parameters: mode.tool.parameters
368
- }
369
- }
370
- ]
371
- });
372
- }
373
- // Handle all non-text types with a single default case
374
- default: {
375
- const _exhaustiveCheck = type;
376
- throw new import_provider.UnsupportedFunctionalityError({
377
- functionality: `${_exhaustiveCheck} mode`
378
- });
379
- }
1338
+ }, this.config.extraBody), this.settings.extraBody);
1339
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json") {
1340
+ return __spreadProps(__spreadValues({}, baseArgs), {
1341
+ response_format: { type: "json_object" }
1342
+ });
1343
+ }
1344
+ if (tools && tools.length > 0) {
1345
+ const mappedTools = tools.filter((tool) => tool.type === "function").map((tool) => ({
1346
+ type: "function",
1347
+ function: {
1348
+ name: tool.name,
1349
+ description: tool.type,
1350
+ parameters: tool.inputSchema
1351
+ }
1352
+ }));
1353
+ return __spreadProps(__spreadValues({}, baseArgs), {
1354
+ tools: mappedTools,
1355
+ tool_choice: toolChoice ? getChatCompletionToolChoice(toolChoice) : void 0
1356
+ });
380
1357
  }
1358
+ return baseArgs;
381
1359
  }
382
1360
  async doGenerate(options) {
383
- var _b, _c, _d, _e, _f, _g, _h, _i, _j;
384
- const args = this.getArgs(options);
385
- const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
1361
+ var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w;
1362
+ const providerOptions = options.providerOptions || {};
1363
+ const llmgatewayOptions = providerOptions.llmgateway || {};
1364
+ const args = __spreadValues(__spreadValues({}, this.getArgs(options)), llmgatewayOptions);
1365
+ const { value: response, responseHeaders } = await postJsonToApi({
386
1366
  url: this.config.url({
387
1367
  path: "/chat/completions",
388
1368
  modelId: this.modelId
389
1369
  }),
390
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
1370
+ headers: combineHeaders(this.config.headers(), options.headers),
391
1371
  body: args,
392
1372
  failedResponseHandler: llmgatewayFailedResponseHandler,
393
- successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
1373
+ successfulResponseHandler: createJsonResponseHandler(
394
1374
  LLMGatewayNonStreamChatCompletionResponseSchema
395
1375
  ),
396
1376
  abortSignal: options.abortSignal,
397
1377
  fetch: this.config.fetch
398
1378
  });
399
- const _a = args, { messages: rawPrompt } = _a, rawSettings = __objRest(_a, ["messages"]);
400
1379
  const choice = response.choices[0];
401
1380
  if (!choice) {
402
1381
  throw new Error("No choice in response");
403
1382
  }
404
1383
  const usageInfo = response.usage ? {
405
- promptTokens: (_b = response.usage.prompt_tokens) != null ? _b : 0,
406
- completionTokens: (_c = response.usage.completion_tokens) != null ? _c : 0
1384
+ inputTokens: (_a15 = response.usage.prompt_tokens) != null ? _a15 : 0,
1385
+ outputTokens: (_b = response.usage.completion_tokens) != null ? _b : 0,
1386
+ totalTokens: ((_c = response.usage.prompt_tokens) != null ? _c : 0) + ((_d = response.usage.completion_tokens) != null ? _d : 0),
1387
+ reasoningTokens: (_f = (_e = response.usage.completion_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : 0,
1388
+ cachedInputTokens: (_h = (_g = response.usage.prompt_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : 0
407
1389
  } : {
408
- promptTokens: 0,
409
- completionTokens: 0
1390
+ inputTokens: 0,
1391
+ outputTokens: 0,
1392
+ totalTokens: 0,
1393
+ reasoningTokens: 0,
1394
+ cachedInputTokens: 0
410
1395
  };
411
- const providerMetadata = {};
412
- if (response.usage && ((_d = this.settings.usage) == null ? void 0 : _d.include)) {
413
- providerMetadata.llmgateway = {
414
- usage: {
415
- promptTokens: response.usage.prompt_tokens,
416
- promptTokensDetails: response.usage.prompt_tokens_details ? {
417
- cachedTokens: (_e = response.usage.prompt_tokens_details.cached_tokens) != null ? _e : 0
418
- } : void 0,
419
- completionTokens: response.usage.completion_tokens,
420
- completionTokensDetails: response.usage.completion_tokens_details ? {
421
- reasoningTokens: (_f = response.usage.completion_tokens_details.reasoning_tokens) != null ? _f : 0
422
- } : void 0,
423
- cost: response.usage.cost,
424
- totalTokens: (_g = response.usage.total_tokens) != null ? _g : 0
425
- }
426
- };
427
- }
428
- const hasProviderMetadata = Object.keys(providerMetadata).length > 0;
429
- const reasoningDetails = (_h = choice.message.reasoning_details) != null ? _h : [];
1396
+ const reasoningDetails = (_i = choice.message.reasoning_details) != null ? _i : [];
430
1397
  const reasoning = reasoningDetails.length > 0 ? reasoningDetails.map((detail) => {
431
- var _a2;
432
1398
  switch (detail.type) {
433
1399
  case "reasoning.text" /* Text */: {
434
1400
  if (detail.text) {
435
1401
  return {
436
- type: "text",
437
- text: detail.text,
438
- signature: (_a2 = detail.signature) != null ? _a2 : void 0
1402
+ type: "reasoning",
1403
+ text: detail.text
439
1404
  };
440
1405
  }
441
1406
  break;
@@ -443,7 +1408,7 @@ var LLMGatewayChatLanguageModel = class {
443
1408
  case "reasoning.summary" /* Summary */: {
444
1409
  if (detail.summary) {
445
1410
  return {
446
- type: "text",
1411
+ type: "reasoning",
447
1412
  text: detail.summary
448
1413
  };
449
1414
  }
@@ -452,85 +1417,119 @@ var LLMGatewayChatLanguageModel = class {
452
1417
  case "reasoning.encrypted" /* Encrypted */: {
453
1418
  if (detail.data) {
454
1419
  return {
455
- type: "redacted",
456
- data: detail.data
1420
+ type: "reasoning",
1421
+ text: "[REDACTED]"
457
1422
  };
458
1423
  }
459
1424
  break;
460
1425
  }
461
1426
  default: {
462
- detail;
1427
+ const _exhaustiveCheck = detail;
1428
+ return _exhaustiveCheck;
463
1429
  }
464
1430
  }
465
1431
  return null;
466
1432
  }).filter((p) => p !== null) : choice.message.reasoning ? [
467
1433
  {
468
- type: "text",
1434
+ type: "reasoning",
469
1435
  text: choice.message.reasoning
470
1436
  }
471
1437
  ] : [];
472
- return __spreadValues({
473
- response: {
474
- id: response.id,
475
- modelId: response.model
476
- },
477
- text: (_i = choice.message.content) != null ? _i : void 0,
478
- reasoning,
479
- toolCalls: (_j = choice.message.tool_calls) == null ? void 0 : _j.map((toolCall) => {
480
- var _a2;
481
- return {
482
- toolCallType: "function",
483
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
1438
+ const content = [];
1439
+ content.push(...reasoning);
1440
+ if (choice.message.content) {
1441
+ content.push({
1442
+ type: "text",
1443
+ text: choice.message.content
1444
+ });
1445
+ }
1446
+ if (choice.message.tool_calls) {
1447
+ for (const toolCall of choice.message.tool_calls) {
1448
+ content.push({
1449
+ type: "tool-call",
1450
+ toolCallId: (_j = toolCall.id) != null ? _j : generateId(),
484
1451
  toolName: toolCall.function.name,
485
- args: toolCall.function.arguments
486
- };
487
- }),
1452
+ input: toolCall.function.arguments
1453
+ });
1454
+ }
1455
+ }
1456
+ return {
1457
+ content,
488
1458
  finishReason: mapLLMGatewayFinishReason(choice.finish_reason),
489
1459
  usage: usageInfo,
490
- rawCall: { rawPrompt, rawSettings },
491
- rawResponse: { headers: responseHeaders },
492
1460
  warnings: [],
493
- logprobs: mapLLMGatewayChatLogProbsOutput(choice.logprobs)
494
- }, hasProviderMetadata ? { providerMetadata } : {});
1461
+ providerMetadata: {
1462
+ llmgateway: {
1463
+ usage: {
1464
+ promptTokens: (_k = usageInfo.inputTokens) != null ? _k : 0,
1465
+ completionTokens: (_l = usageInfo.outputTokens) != null ? _l : 0,
1466
+ totalTokens: (_m = usageInfo.totalTokens) != null ? _m : 0,
1467
+ cost: (_n = response.usage) == null ? void 0 : _n.cost,
1468
+ promptTokensDetails: {
1469
+ cachedTokens: (_q = (_p = (_o = response.usage) == null ? void 0 : _o.prompt_tokens_details) == null ? void 0 : _p.cached_tokens) != null ? _q : 0
1470
+ },
1471
+ completionTokensDetails: {
1472
+ reasoningTokens: (_t = (_s = (_r = response.usage) == null ? void 0 : _r.completion_tokens_details) == null ? void 0 : _s.reasoning_tokens) != null ? _t : 0
1473
+ },
1474
+ costDetails: {
1475
+ upstreamInferenceCost: (_w = (_v = (_u = response.usage) == null ? void 0 : _u.cost_details) == null ? void 0 : _v.upstream_inference_cost) != null ? _w : 0
1476
+ }
1477
+ }
1478
+ }
1479
+ },
1480
+ request: { body: args },
1481
+ response: {
1482
+ id: response.id,
1483
+ modelId: response.model,
1484
+ headers: responseHeaders
1485
+ }
1486
+ };
495
1487
  }
496
1488
  async doStream(options) {
497
- var _a, _c;
498
- const args = this.getArgs(options);
499
- const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
1489
+ var _a15;
1490
+ const providerOptions = options.providerOptions || {};
1491
+ const llmgatewayOptions = providerOptions.llmgateway || {};
1492
+ const args = __spreadValues(__spreadValues({}, this.getArgs(options)), llmgatewayOptions);
1493
+ const { value: response, responseHeaders } = await postJsonToApi({
500
1494
  url: this.config.url({
501
1495
  path: "/chat/completions",
502
1496
  modelId: this.modelId
503
1497
  }),
504
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
1498
+ headers: combineHeaders(this.config.headers(), options.headers),
505
1499
  body: __spreadProps(__spreadValues({}, args), {
506
1500
  stream: true,
507
1501
  // only include stream_options when in strict compatibility mode:
508
1502
  stream_options: this.config.compatibility === "strict" ? __spreadValues({
509
1503
  include_usage: true
510
- }, ((_a = this.settings.usage) == null ? void 0 : _a.include) ? { include_usage: true } : {}) : void 0
1504
+ }, ((_a15 = this.settings.usage) == null ? void 0 : _a15.include) ? { include_usage: true } : {}) : void 0
511
1505
  }),
512
1506
  failedResponseHandler: llmgatewayFailedResponseHandler,
513
- successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
1507
+ successfulResponseHandler: createEventSourceResponseHandler(
514
1508
  LLMGatewayStreamChatCompletionChunkSchema
515
1509
  ),
516
1510
  abortSignal: options.abortSignal,
517
1511
  fetch: this.config.fetch
518
1512
  });
519
- const _b = args, { messages: rawPrompt } = _b, rawSettings = __objRest(_b, ["messages"]);
520
1513
  const toolCalls = [];
521
1514
  let finishReason = "other";
522
- let usage = {
523
- promptTokens: Number.NaN,
524
- completionTokens: Number.NaN
1515
+ const usage = {
1516
+ inputTokens: Number.NaN,
1517
+ outputTokens: Number.NaN,
1518
+ totalTokens: Number.NaN,
1519
+ reasoningTokens: Number.NaN,
1520
+ cachedInputTokens: Number.NaN
525
1521
  };
526
- let logprobs;
527
1522
  const llmgatewayUsage = {};
528
- const shouldIncludeUsageAccounting = !!((_c = this.settings.usage) == null ? void 0 : _c.include);
1523
+ let textStarted = false;
1524
+ let reasoningStarted = false;
1525
+ let textId;
1526
+ let reasoningId;
1527
+ let llmgatewayResponseId;
529
1528
  return {
530
1529
  stream: response.pipeThrough(
531
1530
  new TransformStream({
532
1531
  transform(chunk, controller) {
533
- var _a2, _b2, _c2, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
1532
+ var _a16, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
534
1533
  if (!chunk.success) {
535
1534
  finishReason = "error";
536
1535
  controller.enqueue({ type: "error", error: chunk.error });
@@ -543,6 +1542,7 @@ var LLMGatewayChatLanguageModel = class {
543
1542
  return;
544
1543
  }
545
1544
  if (value.id) {
1545
+ llmgatewayResponseId = value.id;
546
1546
  controller.enqueue({
547
1547
  type: "response-metadata",
548
1548
  id: value.id
@@ -555,20 +1555,23 @@ var LLMGatewayChatLanguageModel = class {
555
1555
  });
556
1556
  }
557
1557
  if (value.usage != null) {
558
- usage = {
559
- promptTokens: value.usage.prompt_tokens,
560
- completionTokens: value.usage.completion_tokens
561
- };
1558
+ usage.inputTokens = value.usage.prompt_tokens;
1559
+ usage.outputTokens = value.usage.completion_tokens;
1560
+ usage.totalTokens = value.usage.prompt_tokens + value.usage.completion_tokens;
562
1561
  llmgatewayUsage.promptTokens = value.usage.prompt_tokens;
563
1562
  if (value.usage.prompt_tokens_details) {
1563
+ const cachedInputTokens = (_a16 = value.usage.prompt_tokens_details.cached_tokens) != null ? _a16 : 0;
1564
+ usage.cachedInputTokens = cachedInputTokens;
564
1565
  llmgatewayUsage.promptTokensDetails = {
565
- cachedTokens: (_a2 = value.usage.prompt_tokens_details.cached_tokens) != null ? _a2 : 0
1566
+ cachedTokens: cachedInputTokens
566
1567
  };
567
1568
  }
568
1569
  llmgatewayUsage.completionTokens = value.usage.completion_tokens;
569
1570
  if (value.usage.completion_tokens_details) {
1571
+ const reasoningTokens = (_b = value.usage.completion_tokens_details.reasoning_tokens) != null ? _b : 0;
1572
+ usage.reasoningTokens = reasoningTokens;
570
1573
  llmgatewayUsage.completionTokensDetails = {
571
- reasoningTokens: (_b2 = value.usage.completion_tokens_details.reasoning_tokens) != null ? _b2 : 0
1574
+ reasoningTokens
572
1575
  };
573
1576
  }
574
1577
  llmgatewayUsage.cost = value.usage.cost;
@@ -582,51 +1585,39 @@ var LLMGatewayChatLanguageModel = class {
582
1585
  return;
583
1586
  }
584
1587
  const delta = choice.delta;
585
- if (delta.content != null) {
586
- controller.enqueue({
587
- type: "text-delta",
588
- textDelta: delta.content
589
- });
590
- }
591
- if (delta.reasoning != null) {
1588
+ const emitReasoningChunk = (chunkText) => {
1589
+ if (!reasoningStarted) {
1590
+ reasoningId = llmgatewayResponseId || generateId();
1591
+ controller.enqueue({
1592
+ type: "reasoning-start",
1593
+ id: reasoningId
1594
+ });
1595
+ reasoningStarted = true;
1596
+ }
592
1597
  controller.enqueue({
593
- type: "reasoning",
594
- textDelta: delta.reasoning
1598
+ type: "reasoning-delta",
1599
+ delta: chunkText,
1600
+ id: reasoningId || generateId()
595
1601
  });
596
- }
1602
+ };
597
1603
  if (delta.reasoning_details && delta.reasoning_details.length > 0) {
598
1604
  for (const detail of delta.reasoning_details) {
599
1605
  switch (detail.type) {
600
1606
  case "reasoning.text" /* Text */: {
601
1607
  if (detail.text) {
602
- controller.enqueue({
603
- type: "reasoning",
604
- textDelta: detail.text
605
- });
606
- }
607
- if (detail.signature) {
608
- controller.enqueue({
609
- type: "reasoning-signature",
610
- signature: detail.signature
611
- });
1608
+ emitReasoningChunk(detail.text);
612
1609
  }
613
1610
  break;
614
1611
  }
615
1612
  case "reasoning.encrypted" /* Encrypted */: {
616
1613
  if (detail.data) {
617
- controller.enqueue({
618
- type: "redacted-reasoning",
619
- data: detail.data
620
- });
1614
+ emitReasoningChunk("[REDACTED]");
621
1615
  }
622
1616
  break;
623
1617
  }
624
1618
  case "reasoning.summary" /* Summary */: {
625
1619
  if (detail.summary) {
626
- controller.enqueue({
627
- type: "reasoning",
628
- textDelta: detail.summary
629
- });
1620
+ emitReasoningChunk(detail.summary);
630
1621
  }
631
1622
  break;
632
1623
  }
@@ -636,34 +1627,42 @@ var LLMGatewayChatLanguageModel = class {
636
1627
  }
637
1628
  }
638
1629
  }
1630
+ } else if (delta.reasoning != null) {
1631
+ emitReasoningChunk(delta.reasoning);
639
1632
  }
640
- const mappedLogprobs = mapLLMGatewayChatLogProbsOutput(
641
- choice == null ? void 0 : choice.logprobs
642
- );
643
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
644
- if (logprobs === void 0) {
645
- logprobs = [];
1633
+ if (delta.content != null) {
1634
+ if (!textStarted) {
1635
+ textId = llmgatewayResponseId || generateId();
1636
+ controller.enqueue({
1637
+ type: "text-start",
1638
+ id: textId
1639
+ });
1640
+ textStarted = true;
646
1641
  }
647
- logprobs.push(...mappedLogprobs);
1642
+ controller.enqueue({
1643
+ type: "text-delta",
1644
+ delta: delta.content,
1645
+ id: textId || generateId()
1646
+ });
648
1647
  }
649
1648
  if (delta.tool_calls != null) {
650
1649
  for (const toolCallDelta of delta.tool_calls) {
651
- const index = toolCallDelta.index;
1650
+ const index = (_c = toolCallDelta.index) != null ? _c : toolCalls.length - 1;
652
1651
  if (toolCalls[index] == null) {
653
1652
  if (toolCallDelta.type !== "function") {
654
- throw new import_provider.InvalidResponseDataError({
1653
+ throw new InvalidResponseDataError({
655
1654
  data: toolCallDelta,
656
1655
  message: `Expected 'function' type.`
657
1656
  });
658
1657
  }
659
1658
  if (toolCallDelta.id == null) {
660
- throw new import_provider.InvalidResponseDataError({
1659
+ throw new InvalidResponseDataError({
661
1660
  data: toolCallDelta,
662
1661
  message: `Expected 'id' to be a string.`
663
1662
  });
664
1663
  }
665
- if (((_c2 = toolCallDelta.function) == null ? void 0 : _c2.name) == null) {
666
- throw new import_provider.InvalidResponseDataError({
1664
+ if (((_d = toolCallDelta.function) == null ? void 0 : _d.name) == null) {
1665
+ throw new InvalidResponseDataError({
667
1666
  data: toolCallDelta,
668
1667
  message: `Expected 'function.name' to be a string.`
669
1668
  });
@@ -673,28 +1672,36 @@ var LLMGatewayChatLanguageModel = class {
673
1672
  type: "function",
674
1673
  function: {
675
1674
  name: toolCallDelta.function.name,
676
- arguments: (_d = toolCallDelta.function.arguments) != null ? _d : ""
1675
+ arguments: (_e = toolCallDelta.function.arguments) != null ? _e : ""
677
1676
  },
1677
+ inputStarted: false,
678
1678
  sent: false
679
1679
  };
680
1680
  const toolCall2 = toolCalls[index];
681
1681
  if (toolCall2 == null) {
682
1682
  throw new Error("Tool call is missing");
683
1683
  }
684
- if (((_e = toolCall2.function) == null ? void 0 : _e.name) != null && ((_f = toolCall2.function) == null ? void 0 : _f.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall2.function.arguments)) {
1684
+ if (((_f = toolCall2.function) == null ? void 0 : _f.name) != null && ((_g = toolCall2.function) == null ? void 0 : _g.arguments) != null && isParsableJson(toolCall2.function.arguments)) {
1685
+ toolCall2.inputStarted = true;
685
1686
  controller.enqueue({
686
- type: "tool-call-delta",
687
- toolCallType: "function",
688
- toolCallId: toolCall2.id,
689
- toolName: toolCall2.function.name,
690
- argsTextDelta: toolCall2.function.arguments
1687
+ type: "tool-input-start",
1688
+ id: toolCall2.id,
1689
+ toolName: toolCall2.function.name
1690
+ });
1691
+ controller.enqueue({
1692
+ type: "tool-input-delta",
1693
+ id: toolCall2.id,
1694
+ delta: toolCall2.function.arguments
1695
+ });
1696
+ controller.enqueue({
1697
+ type: "tool-input-end",
1698
+ id: toolCall2.id
691
1699
  });
692
1700
  controller.enqueue({
693
1701
  type: "tool-call",
694
- toolCallType: "function",
695
- toolCallId: (_g = toolCall2.id) != null ? _g : (0, import_provider_utils3.generateId)(),
1702
+ toolCallId: toolCall2.id,
696
1703
  toolName: toolCall2.function.name,
697
- args: toolCall2.function.arguments
1704
+ input: toolCall2.function.arguments
698
1705
  });
699
1706
  toolCall2.sent = true;
700
1707
  }
@@ -704,23 +1711,28 @@ var LLMGatewayChatLanguageModel = class {
704
1711
  if (toolCall == null) {
705
1712
  throw new Error("Tool call is missing");
706
1713
  }
1714
+ if (!toolCall.inputStarted) {
1715
+ toolCall.inputStarted = true;
1716
+ controller.enqueue({
1717
+ type: "tool-input-start",
1718
+ id: toolCall.id,
1719
+ toolName: toolCall.function.name
1720
+ });
1721
+ }
707
1722
  if (((_h = toolCallDelta.function) == null ? void 0 : _h.arguments) != null) {
708
1723
  toolCall.function.arguments += (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : "";
709
1724
  }
710
1725
  controller.enqueue({
711
- type: "tool-call-delta",
712
- toolCallType: "function",
713
- toolCallId: toolCall.id,
714
- toolName: toolCall.function.name,
715
- argsTextDelta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
1726
+ type: "tool-input-delta",
1727
+ id: toolCall.id,
1728
+ delta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
716
1729
  });
717
- if (((_l = toolCall.function) == null ? void 0 : _l.name) != null && ((_m = toolCall.function) == null ? void 0 : _m.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
1730
+ if (((_l = toolCall.function) == null ? void 0 : _l.name) != null && ((_m = toolCall.function) == null ? void 0 : _m.arguments) != null && isParsableJson(toolCall.function.arguments)) {
718
1731
  controller.enqueue({
719
1732
  type: "tool-call",
720
- toolCallType: "function",
721
- toolCallId: (_n = toolCall.id) != null ? _n : (0, import_provider_utils3.generateId)(),
1733
+ toolCallId: (_n = toolCall.id) != null ? _n : generateId(),
722
1734
  toolName: toolCall.function.name,
723
- args: toolCall.function.arguments
1735
+ input: toolCall.function.arguments
724
1736
  });
725
1737
  toolCall.sent = true;
726
1738
  }
@@ -728,198 +1740,54 @@ var LLMGatewayChatLanguageModel = class {
728
1740
  }
729
1741
  },
730
1742
  flush(controller) {
731
- var _a2;
1743
+ var _a16;
732
1744
  if (finishReason === "tool-calls") {
733
1745
  for (const toolCall of toolCalls) {
734
- if (!toolCall.sent) {
1746
+ if (toolCall && !toolCall.sent) {
735
1747
  controller.enqueue({
736
1748
  type: "tool-call",
737
- toolCallType: "function",
738
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
1749
+ toolCallId: (_a16 = toolCall.id) != null ? _a16 : generateId(),
739
1750
  toolName: toolCall.function.name,
740
1751
  // Coerce invalid arguments to an empty JSON object
741
- args: (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments) ? toolCall.function.arguments : "{}"
1752
+ input: isParsableJson(toolCall.function.arguments) ? toolCall.function.arguments : "{}"
742
1753
  });
743
1754
  toolCall.sent = true;
744
1755
  }
745
1756
  }
746
1757
  }
747
- const providerMetadata = {};
748
- if (shouldIncludeUsageAccounting && (llmgatewayUsage.totalTokens !== void 0 || llmgatewayUsage.cost !== void 0 || llmgatewayUsage.promptTokensDetails !== void 0 || llmgatewayUsage.completionTokensDetails !== void 0)) {
749
- providerMetadata.llmgateway = {
750
- usage: llmgatewayUsage
751
- };
1758
+ if (textStarted) {
1759
+ controller.enqueue({
1760
+ type: "text-end",
1761
+ id: textId || generateId()
1762
+ });
1763
+ }
1764
+ if (reasoningStarted) {
1765
+ controller.enqueue({
1766
+ type: "reasoning-end",
1767
+ id: reasoningId || generateId()
1768
+ });
752
1769
  }
753
- const hasProviderMetadata = Object.keys(providerMetadata).length > 0 && shouldIncludeUsageAccounting;
754
- controller.enqueue(__spreadValues({
1770
+ controller.enqueue({
755
1771
  type: "finish",
756
1772
  finishReason,
757
- logprobs,
758
- usage
759
- }, hasProviderMetadata ? { providerMetadata } : {}));
1773
+ usage,
1774
+ providerMetadata: {
1775
+ llmgateway: {
1776
+ usage: llmgatewayUsage
1777
+ }
1778
+ }
1779
+ });
760
1780
  }
761
1781
  })
762
1782
  ),
763
- rawCall: { rawPrompt, rawSettings },
764
- rawResponse: { headers: responseHeaders },
765
- warnings: []
1783
+ warnings: [],
1784
+ request: { body: args },
1785
+ response: { headers: responseHeaders }
766
1786
  };
767
1787
  }
768
1788
  };
769
- var LLMGatewayChatCompletionBaseResponseSchema = import_zod3.z.object({
770
- id: import_zod3.z.string().optional(),
771
- model: import_zod3.z.string().optional(),
772
- usage: import_zod3.z.object({
773
- prompt_tokens: import_zod3.z.number(),
774
- prompt_tokens_details: import_zod3.z.object({
775
- cached_tokens: import_zod3.z.number()
776
- }).nullish(),
777
- completion_tokens: import_zod3.z.number(),
778
- completion_tokens_details: import_zod3.z.object({
779
- reasoning_tokens: import_zod3.z.number()
780
- }).nullish(),
781
- total_tokens: import_zod3.z.number(),
782
- cost: import_zod3.z.number().optional()
783
- }).nullish()
784
- });
785
- var LLMGatewayNonStreamChatCompletionResponseSchema = LLMGatewayChatCompletionBaseResponseSchema.extend({
786
- choices: import_zod3.z.array(
787
- import_zod3.z.object({
788
- message: import_zod3.z.object({
789
- role: import_zod3.z.literal("assistant"),
790
- content: import_zod3.z.string().nullable().optional(),
791
- reasoning: import_zod3.z.string().nullable().optional(),
792
- reasoning_details: ReasoningDetailArraySchema.nullish(),
793
- tool_calls: import_zod3.z.array(
794
- import_zod3.z.object({
795
- id: import_zod3.z.string().optional().nullable(),
796
- type: import_zod3.z.literal("function"),
797
- function: import_zod3.z.object({
798
- name: import_zod3.z.string(),
799
- arguments: import_zod3.z.string()
800
- })
801
- })
802
- ).optional()
803
- }),
804
- index: import_zod3.z.number(),
805
- logprobs: import_zod3.z.object({
806
- content: import_zod3.z.array(
807
- import_zod3.z.object({
808
- token: import_zod3.z.string(),
809
- logprob: import_zod3.z.number(),
810
- top_logprobs: import_zod3.z.array(
811
- import_zod3.z.object({
812
- token: import_zod3.z.string(),
813
- logprob: import_zod3.z.number()
814
- })
815
- )
816
- })
817
- ).nullable()
818
- }).nullable().optional(),
819
- finish_reason: import_zod3.z.string().optional().nullable()
820
- })
821
- )
822
- });
823
- var LLMGatewayStreamChatCompletionChunkSchema = import_zod3.z.union([
824
- LLMGatewayChatCompletionBaseResponseSchema.extend({
825
- choices: import_zod3.z.array(
826
- import_zod3.z.object({
827
- delta: import_zod3.z.object({
828
- role: import_zod3.z.enum(["assistant"]).optional(),
829
- content: import_zod3.z.string().nullish(),
830
- reasoning: import_zod3.z.string().nullish().optional(),
831
- reasoning_details: ReasoningDetailArraySchema.nullish(),
832
- tool_calls: import_zod3.z.array(
833
- import_zod3.z.object({
834
- index: import_zod3.z.number(),
835
- id: import_zod3.z.string().nullish(),
836
- type: import_zod3.z.literal("function").optional(),
837
- function: import_zod3.z.object({
838
- name: import_zod3.z.string().nullish(),
839
- arguments: import_zod3.z.string().nullish()
840
- })
841
- })
842
- ).nullish()
843
- }).nullish(),
844
- logprobs: import_zod3.z.object({
845
- content: import_zod3.z.array(
846
- import_zod3.z.object({
847
- token: import_zod3.z.string(),
848
- logprob: import_zod3.z.number(),
849
- top_logprobs: import_zod3.z.array(
850
- import_zod3.z.object({
851
- token: import_zod3.z.string(),
852
- logprob: import_zod3.z.number()
853
- })
854
- )
855
- })
856
- ).nullable()
857
- }).nullish(),
858
- finish_reason: import_zod3.z.string().nullable().optional(),
859
- index: import_zod3.z.number()
860
- })
861
- )
862
- }),
863
- LLMGatewayErrorResponseSchema
864
- ]);
865
- function prepareToolsAndToolChoice(mode) {
866
- var _a;
867
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
868
- if (tools == null) {
869
- return { tools: void 0, tool_choice: void 0 };
870
- }
871
- const mappedTools = tools.map((tool) => {
872
- if (isFunctionTool(tool)) {
873
- return {
874
- type: "function",
875
- function: {
876
- name: tool.name,
877
- description: tool.description,
878
- parameters: tool.parameters
879
- }
880
- };
881
- }
882
- return {
883
- type: "function",
884
- function: {
885
- name: tool.name
886
- }
887
- };
888
- });
889
- const toolChoice = mode.toolChoice;
890
- if (toolChoice == null) {
891
- return { tools: mappedTools, tool_choice: void 0 };
892
- }
893
- const type = toolChoice.type;
894
- switch (type) {
895
- case "auto":
896
- case "none":
897
- case "required":
898
- return { tools: mappedTools, tool_choice: type };
899
- case "tool":
900
- return {
901
- tools: mappedTools,
902
- tool_choice: {
903
- type: "function",
904
- function: {
905
- name: toolChoice.toolName
906
- }
907
- }
908
- };
909
- default: {
910
- const _exhaustiveCheck = type;
911
- throw new Error(`Unsupported tool choice type: ${_exhaustiveCheck}`);
912
- }
913
- }
914
- }
915
-
916
- // src/llmgateway-completion-language-model.ts
917
- var import_provider3 = require("@ai-sdk/provider");
918
- var import_provider_utils4 = require("@ai-sdk/provider-utils");
919
- var import_zod4 = require("zod");
920
1789
 
921
- // src/convert-to-llmgateway-completion-prompt.ts
922
- var import_provider2 = require("@ai-sdk/provider");
1790
+ // src/completion/convert-to-llmgateway-completion-prompt.ts
923
1791
  function convertToLLMGatewayCompletionPrompt({
924
1792
  prompt,
925
1793
  inputFormat,
@@ -939,8 +1807,8 @@ function convertToLLMGatewayCompletionPrompt({
939
1807
  for (const { role, content } of prompt) {
940
1808
  switch (role) {
941
1809
  case "system": {
942
- throw new import_provider2.InvalidPromptError({
943
- message: "Unexpected system message in prompt: ${content}",
1810
+ throw new InvalidPromptError({
1811
+ message: `Unexpected system message in prompt: ${content}`,
944
1812
  prompt
945
1813
  });
946
1814
  }
@@ -950,21 +1818,13 @@ function convertToLLMGatewayCompletionPrompt({
950
1818
  case "text": {
951
1819
  return part.text;
952
1820
  }
953
- case "image": {
954
- throw new import_provider2.UnsupportedFunctionalityError({
955
- functionality: "images"
956
- });
957
- }
958
1821
  case "file": {
959
- throw new import_provider2.UnsupportedFunctionalityError({
1822
+ throw new UnsupportedFunctionalityError({
960
1823
  functionality: "file attachments"
961
1824
  });
962
1825
  }
963
1826
  default: {
964
- const _exhaustiveCheck = part;
965
- throw new Error(
966
- `Unsupported content type: ${_exhaustiveCheck}`
967
- );
1827
+ return "";
968
1828
  }
969
1829
  }
970
1830
  }).join("");
@@ -975,39 +1835,38 @@ ${userMessage}
975
1835
  break;
976
1836
  }
977
1837
  case "assistant": {
978
- const assistantMessage = content.map((part) => {
979
- switch (part.type) {
980
- case "text": {
981
- return part.text;
982
- }
983
- case "tool-call": {
984
- throw new import_provider2.UnsupportedFunctionalityError({
985
- functionality: "tool-call messages"
986
- });
987
- }
988
- case "reasoning": {
989
- throw new import_provider2.UnsupportedFunctionalityError({
990
- functionality: "reasoning messages"
991
- });
992
- }
993
- case "redacted-reasoning": {
994
- throw new import_provider2.UnsupportedFunctionalityError({
995
- functionality: "redacted reasoning messages"
996
- });
997
- }
998
- case "file": {
999
- throw new import_provider2.UnsupportedFunctionalityError({
1000
- functionality: "file attachments"
1001
- });
1002
- }
1003
- default: {
1004
- const _exhaustiveCheck = part;
1005
- throw new Error(
1006
- `Unsupported content type: ${_exhaustiveCheck}`
1007
- );
1838
+ const assistantMessage = content.map(
1839
+ (part) => {
1840
+ switch (part.type) {
1841
+ case "text": {
1842
+ return part.text;
1843
+ }
1844
+ case "tool-call": {
1845
+ throw new UnsupportedFunctionalityError({
1846
+ functionality: "tool-call messages"
1847
+ });
1848
+ }
1849
+ case "tool-result": {
1850
+ throw new UnsupportedFunctionalityError({
1851
+ functionality: "tool-result messages"
1852
+ });
1853
+ }
1854
+ case "reasoning": {
1855
+ throw new UnsupportedFunctionalityError({
1856
+ functionality: "reasoning messages"
1857
+ });
1858
+ }
1859
+ case "file": {
1860
+ throw new UnsupportedFunctionalityError({
1861
+ functionality: "file attachments"
1862
+ });
1863
+ }
1864
+ default: {
1865
+ return "";
1866
+ }
1008
1867
  }
1009
1868
  }
1010
- }).join("");
1869
+ ).join("");
1011
1870
  text += `${assistant}:
1012
1871
  ${assistantMessage}
1013
1872
 
@@ -1015,13 +1874,12 @@ ${assistantMessage}
1015
1874
  break;
1016
1875
  }
1017
1876
  case "tool": {
1018
- throw new import_provider2.UnsupportedFunctionalityError({
1877
+ throw new UnsupportedFunctionalityError({
1019
1878
  functionality: "tool messages"
1020
1879
  });
1021
1880
  }
1022
1881
  default: {
1023
- const _exhaustiveCheck = role;
1024
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1882
+ break;
1025
1883
  }
1026
1884
  }
1027
1885
  }
@@ -1032,40 +1890,63 @@ ${assistantMessage}
1032
1890
  };
1033
1891
  }
1034
1892
 
1035
- // src/map-llmgateway-completion-logprobs.ts
1036
- function mapLLMGatewayCompletionLogprobs(logprobs) {
1037
- return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => {
1038
- var _a, _b;
1039
- return {
1040
- token,
1041
- logprob: (_a = logprobs.token_logprobs[index]) != null ? _a : 0,
1042
- topLogprobs: logprobs.top_logprobs ? Object.entries((_b = logprobs.top_logprobs[index]) != null ? _b : {}).map(
1043
- ([token2, logprob]) => ({
1044
- token: token2,
1045
- logprob
1046
- })
1047
- ) : []
1048
- };
1049
- });
1050
- }
1893
+ // src/completion/schemas.ts
1894
+ var import_v45 = require("zod/v4");
1895
+ var LLMGatewayCompletionChunkSchema = import_v45.z.union([
1896
+ import_v45.z.object({
1897
+ id: import_v45.z.string().optional(),
1898
+ model: import_v45.z.string().optional(),
1899
+ choices: import_v45.z.array(
1900
+ import_v45.z.object({
1901
+ text: import_v45.z.string(),
1902
+ reasoning: import_v45.z.string().nullish().optional(),
1903
+ reasoning_details: ReasoningDetailArraySchema.nullish(),
1904
+ finish_reason: import_v45.z.string().nullish(),
1905
+ index: import_v45.z.number().nullish(),
1906
+ logprobs: import_v45.z.object({
1907
+ tokens: import_v45.z.array(import_v45.z.string()),
1908
+ token_logprobs: import_v45.z.array(import_v45.z.number()),
1909
+ top_logprobs: import_v45.z.array(import_v45.z.record(import_v45.z.string(), import_v45.z.number())).nullable()
1910
+ }).nullable().optional()
1911
+ })
1912
+ ),
1913
+ usage: import_v45.z.object({
1914
+ prompt_tokens: import_v45.z.number(),
1915
+ prompt_tokens_details: import_v45.z.object({
1916
+ cached_tokens: import_v45.z.number()
1917
+ }).nullish(),
1918
+ completion_tokens: import_v45.z.number(),
1919
+ completion_tokens_details: import_v45.z.object({
1920
+ reasoning_tokens: import_v45.z.number()
1921
+ }).nullish(),
1922
+ total_tokens: import_v45.z.number(),
1923
+ cost: import_v45.z.number().optional()
1924
+ }).nullish()
1925
+ }),
1926
+ LLMGatewayErrorResponseSchema
1927
+ ]);
1051
1928
 
1052
- // src/llmgateway-completion-language-model.ts
1929
+ // src/completion/index.ts
1053
1930
  var LLMGatewayCompletionLanguageModel = class {
1054
1931
  constructor(modelId, settings, config) {
1055
- this.specificationVersion = "v1";
1932
+ this.specificationVersion = "v2";
1933
+ this.provider = "llmgateway";
1934
+ this.supportedUrls = {
1935
+ "image/*": [
1936
+ /^data:image\/[a-zA-Z]+;base64,/,
1937
+ /^https?:\/\/.+\.(jpg|jpeg|png|gif|webp)$/i
1938
+ ],
1939
+ "text/*": [/^data:text\//, /^https?:\/\/.+$/],
1940
+ "application/*": [/^data:application\//, /^https?:\/\/.+$/]
1941
+ };
1056
1942
  this.defaultObjectGenerationMode = void 0;
1057
1943
  this.modelId = modelId;
1058
1944
  this.settings = settings;
1059
1945
  this.config = config;
1060
1946
  }
1061
- get provider() {
1062
- return this.config.provider;
1063
- }
1064
1947
  getArgs({
1065
- mode,
1066
- inputFormat,
1067
1948
  prompt,
1068
- maxTokens,
1949
+ maxOutputTokens,
1069
1950
  temperature,
1070
1951
  topP,
1071
1952
  frequencyPenalty,
@@ -1074,16 +1955,24 @@ var LLMGatewayCompletionLanguageModel = class {
1074
1955
  responseFormat,
1075
1956
  topK,
1076
1957
  stopSequences,
1077
- providerMetadata
1958
+ tools,
1959
+ toolChoice
1078
1960
  }) {
1079
- var _a, _b;
1080
- const type = mode.type;
1081
- const extraCallingBody = (_a = providerMetadata == null ? void 0 : providerMetadata.llmgateway) != null ? _a : {};
1082
1961
  const { prompt: completionPrompt } = convertToLLMGatewayCompletionPrompt({
1083
1962
  prompt,
1084
- inputFormat
1963
+ inputFormat: "prompt"
1085
1964
  });
1086
- const baseArgs = __spreadValues(__spreadValues(__spreadValues({
1965
+ if (tools == null ? void 0 : tools.length) {
1966
+ throw new UnsupportedFunctionalityError({
1967
+ functionality: "tools"
1968
+ });
1969
+ }
1970
+ if (toolChoice) {
1971
+ throw new UnsupportedFunctionalityError({
1972
+ functionality: "toolChoice"
1973
+ });
1974
+ }
1975
+ return __spreadValues(__spreadValues({
1087
1976
  // model id:
1088
1977
  model: this.modelId,
1089
1978
  models: this.settings.models,
@@ -1093,7 +1982,7 @@ var LLMGatewayCompletionLanguageModel = class {
1093
1982
  suffix: this.settings.suffix,
1094
1983
  user: this.settings.user,
1095
1984
  // standardized settings:
1096
- max_tokens: maxTokens,
1985
+ max_tokens: maxOutputTokens,
1097
1986
  temperature,
1098
1987
  top_p: topP,
1099
1988
  frequency_penalty: frequencyPenalty,
@@ -1103,59 +1992,31 @@ var LLMGatewayCompletionLanguageModel = class {
1103
1992
  response_format: responseFormat,
1104
1993
  top_k: topK,
1105
1994
  // prompt:
1106
- prompt: completionPrompt
1107
- }, this.config.extraBody), this.settings.extraBody), extraCallingBody);
1108
- switch (type) {
1109
- case "regular": {
1110
- if ((_b = mode.tools) == null ? void 0 : _b.length) {
1111
- throw new import_provider3.UnsupportedFunctionalityError({
1112
- functionality: "tools"
1113
- });
1114
- }
1115
- if (mode.toolChoice) {
1116
- throw new import_provider3.UnsupportedFunctionalityError({
1117
- functionality: "toolChoice"
1118
- });
1119
- }
1120
- return baseArgs;
1121
- }
1122
- case "object-json": {
1123
- throw new import_provider3.UnsupportedFunctionalityError({
1124
- functionality: "object-json mode"
1125
- });
1126
- }
1127
- case "object-tool": {
1128
- throw new import_provider3.UnsupportedFunctionalityError({
1129
- functionality: "object-tool mode"
1130
- });
1131
- }
1132
- // Handle all non-text types with a single default case
1133
- default: {
1134
- const _exhaustiveCheck = type;
1135
- throw new import_provider3.UnsupportedFunctionalityError({
1136
- functionality: `${_exhaustiveCheck} mode`
1137
- });
1138
- }
1139
- }
1995
+ prompt: completionPrompt,
1996
+ // LLMGateway specific settings:
1997
+ include_reasoning: this.settings.includeReasoning,
1998
+ reasoning: this.settings.reasoning
1999
+ }, this.config.extraBody), this.settings.extraBody);
1140
2000
  }
1141
2001
  async doGenerate(options) {
1142
- var _b, _c, _d, _e, _f;
1143
- const args = this.getArgs(options);
1144
- const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
2002
+ var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o;
2003
+ const providerOptions = options.providerOptions || {};
2004
+ const llmgatewayOptions = providerOptions.llmgateway || {};
2005
+ const args = __spreadValues(__spreadValues({}, this.getArgs(options)), llmgatewayOptions);
2006
+ const { value: response, responseHeaders } = await postJsonToApi({
1145
2007
  url: this.config.url({
1146
2008
  path: "/completions",
1147
2009
  modelId: this.modelId
1148
2010
  }),
1149
- headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
2011
+ headers: combineHeaders(this.config.headers(), options.headers),
1150
2012
  body: args,
1151
2013
  failedResponseHandler: llmgatewayFailedResponseHandler,
1152
- successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
2014
+ successfulResponseHandler: createJsonResponseHandler(
1153
2015
  LLMGatewayCompletionChunkSchema
1154
2016
  ),
1155
2017
  abortSignal: options.abortSignal,
1156
2018
  fetch: this.config.fetch
1157
2019
  });
1158
- const _a = args, { prompt: rawPrompt } = _a, rawSettings = __objRest(_a, ["prompt"]);
1159
2020
  if ("error" in response) {
1160
2021
  throw new Error(`${response.error.message}`);
1161
2022
  }
@@ -1164,54 +2025,62 @@ var LLMGatewayCompletionLanguageModel = class {
1164
2025
  throw new Error("No choice in LLMGateway completion response");
1165
2026
  }
1166
2027
  return {
1167
- response: {
1168
- id: response.id,
1169
- modelId: response.model
1170
- },
1171
- text: (_b = choice.text) != null ? _b : "",
1172
- reasoning: choice.reasoning || void 0,
2028
+ content: [
2029
+ {
2030
+ type: "text",
2031
+ text: (_a15 = choice.text) != null ? _a15 : ""
2032
+ }
2033
+ ],
2034
+ finishReason: mapLLMGatewayFinishReason(choice.finish_reason),
1173
2035
  usage: {
1174
- promptTokens: (_d = (_c = response.usage) == null ? void 0 : _c.prompt_tokens) != null ? _d : 0,
1175
- completionTokens: (_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens) != null ? _f : 0
2036
+ inputTokens: (_c = (_b = response.usage) == null ? void 0 : _b.prompt_tokens) != null ? _c : 0,
2037
+ outputTokens: (_e = (_d = response.usage) == null ? void 0 : _d.completion_tokens) != null ? _e : 0,
2038
+ totalTokens: ((_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : 0) + ((_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : 0),
2039
+ reasoningTokens: (_l = (_k = (_j = response.usage) == null ? void 0 : _j.completion_tokens_details) == null ? void 0 : _k.reasoning_tokens) != null ? _l : 0,
2040
+ cachedInputTokens: (_o = (_n = (_m = response.usage) == null ? void 0 : _m.prompt_tokens_details) == null ? void 0 : _n.cached_tokens) != null ? _o : 0
1176
2041
  },
1177
- finishReason: mapLLMGatewayFinishReason(choice.finish_reason),
1178
- logprobs: mapLLMGatewayCompletionLogprobs(choice.logprobs),
1179
- rawCall: { rawPrompt, rawSettings },
1180
- rawResponse: { headers: responseHeaders },
1181
- warnings: []
2042
+ warnings: [],
2043
+ response: {
2044
+ headers: responseHeaders
2045
+ }
1182
2046
  };
1183
2047
  }
1184
2048
  async doStream(options) {
1185
- const args = this.getArgs(options);
1186
- const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
2049
+ const providerOptions = options.providerOptions || {};
2050
+ const llmgatewayOptions = providerOptions.llmgateway || {};
2051
+ const args = __spreadValues(__spreadValues({}, this.getArgs(options)), llmgatewayOptions);
2052
+ const { value: response, responseHeaders } = await postJsonToApi({
1187
2053
  url: this.config.url({
1188
2054
  path: "/completions",
1189
2055
  modelId: this.modelId
1190
2056
  }),
1191
- headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1192
- body: __spreadProps(__spreadValues({}, this.getArgs(options)), {
2057
+ headers: combineHeaders(this.config.headers(), options.headers),
2058
+ body: __spreadProps(__spreadValues({}, args), {
1193
2059
  stream: true,
1194
2060
  // only include stream_options when in strict compatibility mode:
1195
2061
  stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1196
2062
  }),
1197
2063
  failedResponseHandler: llmgatewayFailedResponseHandler,
1198
- successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(
2064
+ successfulResponseHandler: createEventSourceResponseHandler(
1199
2065
  LLMGatewayCompletionChunkSchema
1200
2066
  ),
1201
2067
  abortSignal: options.abortSignal,
1202
2068
  fetch: this.config.fetch
1203
2069
  });
1204
- const _a = args, { prompt: rawPrompt } = _a, rawSettings = __objRest(_a, ["prompt"]);
1205
2070
  let finishReason = "other";
1206
- let usage = {
1207
- promptTokens: Number.NaN,
1208
- completionTokens: Number.NaN
2071
+ const usage = {
2072
+ inputTokens: Number.NaN,
2073
+ outputTokens: Number.NaN,
2074
+ totalTokens: Number.NaN,
2075
+ reasoningTokens: Number.NaN,
2076
+ cachedInputTokens: Number.NaN
1209
2077
  };
1210
- let logprobs;
2078
+ const llmgatewayUsage = {};
1211
2079
  return {
1212
2080
  stream: response.pipeThrough(
1213
2081
  new TransformStream({
1214
2082
  transform(chunk, controller) {
2083
+ var _a15, _b;
1215
2084
  if (!chunk.success) {
1216
2085
  finishReason = "error";
1217
2086
  controller.enqueue({ type: "error", error: chunk.error });
@@ -1224,10 +2093,27 @@ var LLMGatewayCompletionLanguageModel = class {
1224
2093
  return;
1225
2094
  }
1226
2095
  if (value.usage != null) {
1227
- usage = {
1228
- promptTokens: value.usage.prompt_tokens,
1229
- completionTokens: value.usage.completion_tokens
1230
- };
2096
+ usage.inputTokens = value.usage.prompt_tokens;
2097
+ usage.outputTokens = value.usage.completion_tokens;
2098
+ usage.totalTokens = value.usage.prompt_tokens + value.usage.completion_tokens;
2099
+ llmgatewayUsage.promptTokens = value.usage.prompt_tokens;
2100
+ if (value.usage.prompt_tokens_details) {
2101
+ const cachedInputTokens = (_a15 = value.usage.prompt_tokens_details.cached_tokens) != null ? _a15 : 0;
2102
+ usage.cachedInputTokens = cachedInputTokens;
2103
+ llmgatewayUsage.promptTokensDetails = {
2104
+ cachedTokens: cachedInputTokens
2105
+ };
2106
+ }
2107
+ llmgatewayUsage.completionTokens = value.usage.completion_tokens;
2108
+ if (value.usage.completion_tokens_details) {
2109
+ const reasoningTokens = (_b = value.usage.completion_tokens_details.reasoning_tokens) != null ? _b : 0;
2110
+ usage.reasoningTokens = reasoningTokens;
2111
+ llmgatewayUsage.completionTokensDetails = {
2112
+ reasoningTokens
2113
+ };
2114
+ }
2115
+ llmgatewayUsage.cost = value.usage.cost;
2116
+ llmgatewayUsage.totalTokens = value.usage.total_tokens;
1231
2117
  }
1232
2118
  const choice = value.choices[0];
1233
2119
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1236,60 +2122,31 @@ var LLMGatewayCompletionLanguageModel = class {
1236
2122
  if ((choice == null ? void 0 : choice.text) != null) {
1237
2123
  controller.enqueue({
1238
2124
  type: "text-delta",
1239
- textDelta: choice.text
2125
+ delta: choice.text,
2126
+ id: generateId()
1240
2127
  });
1241
2128
  }
1242
- const mappedLogprobs = mapLLMGatewayCompletionLogprobs(
1243
- choice == null ? void 0 : choice.logprobs
1244
- );
1245
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1246
- if (logprobs === void 0) {
1247
- logprobs = [];
1248
- }
1249
- logprobs.push(...mappedLogprobs);
1250
- }
1251
2129
  },
1252
2130
  flush(controller) {
1253
2131
  controller.enqueue({
1254
2132
  type: "finish",
1255
2133
  finishReason,
1256
- logprobs,
1257
- usage
2134
+ usage,
2135
+ providerMetadata: {
2136
+ llmgateway: {
2137
+ usage: llmgatewayUsage
2138
+ }
2139
+ }
1258
2140
  });
1259
2141
  }
1260
2142
  })
1261
2143
  ),
1262
- rawCall: { rawPrompt, rawSettings },
1263
- rawResponse: { headers: responseHeaders },
1264
- warnings: []
2144
+ response: {
2145
+ headers: responseHeaders
2146
+ }
1265
2147
  };
1266
2148
  }
1267
2149
  };
1268
- var LLMGatewayCompletionChunkSchema = import_zod4.z.union([
1269
- import_zod4.z.object({
1270
- id: import_zod4.z.string().optional(),
1271
- model: import_zod4.z.string().optional(),
1272
- choices: import_zod4.z.array(
1273
- import_zod4.z.object({
1274
- text: import_zod4.z.string(),
1275
- reasoning: import_zod4.z.string().nullish().optional(),
1276
- reasoning_details: ReasoningDetailArraySchema.nullish(),
1277
- finish_reason: import_zod4.z.string().nullish(),
1278
- index: import_zod4.z.number(),
1279
- logprobs: import_zod4.z.object({
1280
- tokens: import_zod4.z.array(import_zod4.z.string()),
1281
- token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1282
- top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1283
- }).nullable().optional()
1284
- })
1285
- ),
1286
- usage: import_zod4.z.object({
1287
- prompt_tokens: import_zod4.z.number(),
1288
- completion_tokens: import_zod4.z.number()
1289
- }).optional().nullable()
1290
- }),
1291
- LLMGatewayErrorResponseSchema
1292
- ]);
1293
2150
  // Annotate the CommonJS export names for ESM import in node:
1294
2151
  0 && (module.exports = {
1295
2152
  LLMGatewayChatLanguageModel,