@llmgateway/ai-sdk-provider 1.0.3 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,21 +17,831 @@ var __spreadValues = (a, b) => {
17
17
  return a;
18
18
  };
19
19
  var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b));
20
- var __objRest = (source, exclude) => {
21
- var target = {};
22
- for (var prop in source)
23
- if (__hasOwnProp.call(source, prop) && exclude.indexOf(prop) < 0)
24
- target[prop] = source[prop];
25
- if (source != null && __getOwnPropSymbols)
26
- for (var prop of __getOwnPropSymbols(source)) {
27
- if (exclude.indexOf(prop) < 0 && __propIsEnum.call(source, prop))
28
- target[prop] = source[prop];
20
+
21
+ // node_modules/.pnpm/@ai-sdk+provider@2.0.0-beta.1/node_modules/@ai-sdk/provider/dist/index.mjs
22
+ var marker = "vercel.ai.error";
23
+ var symbol = Symbol.for(marker);
24
+ var _a;
25
+ var _AISDKError = class _AISDKError2 extends Error {
26
+ /**
27
+ * Creates an AI SDK Error.
28
+ *
29
+ * @param {Object} params - The parameters for creating the error.
30
+ * @param {string} params.name - The name of the error.
31
+ * @param {string} params.message - The error message.
32
+ * @param {unknown} [params.cause] - The underlying cause of the error.
33
+ */
34
+ constructor({
35
+ name: name14,
36
+ message,
37
+ cause
38
+ }) {
39
+ super(message);
40
+ this[_a] = true;
41
+ this.name = name14;
42
+ this.cause = cause;
43
+ }
44
+ /**
45
+ * Checks if the given error is an AI SDK Error.
46
+ * @param {unknown} error - The error to check.
47
+ * @returns {boolean} True if the error is an AI SDK Error, false otherwise.
48
+ */
49
+ static isInstance(error) {
50
+ return _AISDKError2.hasMarker(error, marker);
51
+ }
52
+ static hasMarker(error, marker15) {
53
+ const markerSymbol = Symbol.for(marker15);
54
+ return error != null && typeof error === "object" && markerSymbol in error && typeof error[markerSymbol] === "boolean" && error[markerSymbol] === true;
55
+ }
56
+ };
57
+ _a = symbol;
58
+ var AISDKError = _AISDKError;
59
+ var name = "AI_APICallError";
60
+ var marker2 = `vercel.ai.error.${name}`;
61
+ var symbol2 = Symbol.for(marker2);
62
+ var _a2;
63
+ var APICallError = class extends AISDKError {
64
+ constructor({
65
+ message,
66
+ url,
67
+ requestBodyValues,
68
+ statusCode,
69
+ responseHeaders,
70
+ responseBody,
71
+ cause,
72
+ isRetryable = statusCode != null && (statusCode === 408 || // request timeout
73
+ statusCode === 409 || // conflict
74
+ statusCode === 429 || // too many requests
75
+ statusCode >= 500),
76
+ // server error
77
+ data
78
+ }) {
79
+ super({ name, message, cause });
80
+ this[_a2] = true;
81
+ this.url = url;
82
+ this.requestBodyValues = requestBodyValues;
83
+ this.statusCode = statusCode;
84
+ this.responseHeaders = responseHeaders;
85
+ this.responseBody = responseBody;
86
+ this.isRetryable = isRetryable;
87
+ this.data = data;
88
+ }
89
+ static isInstance(error) {
90
+ return AISDKError.hasMarker(error, marker2);
91
+ }
92
+ };
93
+ _a2 = symbol2;
94
+ var name2 = "AI_EmptyResponseBodyError";
95
+ var marker3 = `vercel.ai.error.${name2}`;
96
+ var symbol3 = Symbol.for(marker3);
97
+ var _a3;
98
+ var EmptyResponseBodyError = class extends AISDKError {
99
+ // used in isInstance
100
+ constructor({ message = "Empty response body" } = {}) {
101
+ super({ name: name2, message });
102
+ this[_a3] = true;
103
+ }
104
+ static isInstance(error) {
105
+ return AISDKError.hasMarker(error, marker3);
106
+ }
107
+ };
108
+ _a3 = symbol3;
109
+ function getErrorMessage(error) {
110
+ if (error == null) {
111
+ return "unknown error";
112
+ }
113
+ if (typeof error === "string") {
114
+ return error;
115
+ }
116
+ if (error instanceof Error) {
117
+ return error.message;
118
+ }
119
+ return JSON.stringify(error);
120
+ }
121
+ var name3 = "AI_InvalidArgumentError";
122
+ var marker4 = `vercel.ai.error.${name3}`;
123
+ var symbol4 = Symbol.for(marker4);
124
+ var _a4;
125
+ var InvalidArgumentError = class extends AISDKError {
126
+ constructor({
127
+ message,
128
+ cause,
129
+ argument
130
+ }) {
131
+ super({ name: name3, message, cause });
132
+ this[_a4] = true;
133
+ this.argument = argument;
134
+ }
135
+ static isInstance(error) {
136
+ return AISDKError.hasMarker(error, marker4);
137
+ }
138
+ };
139
+ _a4 = symbol4;
140
+ var name4 = "AI_InvalidPromptError";
141
+ var marker5 = `vercel.ai.error.${name4}`;
142
+ var symbol5 = Symbol.for(marker5);
143
+ var _a5;
144
+ var InvalidPromptError = class extends AISDKError {
145
+ constructor({
146
+ prompt,
147
+ message,
148
+ cause
149
+ }) {
150
+ super({ name: name4, message: `Invalid prompt: ${message}`, cause });
151
+ this[_a5] = true;
152
+ this.prompt = prompt;
153
+ }
154
+ static isInstance(error) {
155
+ return AISDKError.hasMarker(error, marker5);
156
+ }
157
+ };
158
+ _a5 = symbol5;
159
+ var name5 = "AI_InvalidResponseDataError";
160
+ var marker6 = `vercel.ai.error.${name5}`;
161
+ var symbol6 = Symbol.for(marker6);
162
+ var _a6;
163
+ var InvalidResponseDataError = class extends AISDKError {
164
+ constructor({
165
+ data,
166
+ message = `Invalid response data: ${JSON.stringify(data)}.`
167
+ }) {
168
+ super({ name: name5, message });
169
+ this[_a6] = true;
170
+ this.data = data;
171
+ }
172
+ static isInstance(error) {
173
+ return AISDKError.hasMarker(error, marker6);
174
+ }
175
+ };
176
+ _a6 = symbol6;
177
+ var name6 = "AI_JSONParseError";
178
+ var marker7 = `vercel.ai.error.${name6}`;
179
+ var symbol7 = Symbol.for(marker7);
180
+ var _a7;
181
+ var JSONParseError = class extends AISDKError {
182
+ constructor({ text, cause }) {
183
+ super({
184
+ name: name6,
185
+ message: `JSON parsing failed: Text: ${text}.
186
+ Error message: ${getErrorMessage(cause)}`,
187
+ cause
188
+ });
189
+ this[_a7] = true;
190
+ this.text = text;
191
+ }
192
+ static isInstance(error) {
193
+ return AISDKError.hasMarker(error, marker7);
194
+ }
195
+ };
196
+ _a7 = symbol7;
197
+ var name7 = "AI_LoadAPIKeyError";
198
+ var marker8 = `vercel.ai.error.${name7}`;
199
+ var symbol8 = Symbol.for(marker8);
200
+ var _a8;
201
+ _a8 = symbol8;
202
+ var name8 = "AI_LoadSettingError";
203
+ var marker9 = `vercel.ai.error.${name8}`;
204
+ var symbol9 = Symbol.for(marker9);
205
+ var _a9;
206
+ _a9 = symbol9;
207
+ var name9 = "AI_NoContentGeneratedError";
208
+ var marker10 = `vercel.ai.error.${name9}`;
209
+ var symbol10 = Symbol.for(marker10);
210
+ var _a10;
211
+ _a10 = symbol10;
212
+ var name10 = "AI_NoSuchModelError";
213
+ var marker11 = `vercel.ai.error.${name10}`;
214
+ var symbol11 = Symbol.for(marker11);
215
+ var _a11;
216
+ _a11 = symbol11;
217
+ var name11 = "AI_TooManyEmbeddingValuesForCallError";
218
+ var marker12 = `vercel.ai.error.${name11}`;
219
+ var symbol12 = Symbol.for(marker12);
220
+ var _a12;
221
+ _a12 = symbol12;
222
+ var name12 = "AI_TypeValidationError";
223
+ var marker13 = `vercel.ai.error.${name12}`;
224
+ var symbol13 = Symbol.for(marker13);
225
+ var _a13;
226
+ var _TypeValidationError = class _TypeValidationError2 extends AISDKError {
227
+ constructor({ value, cause }) {
228
+ super({
229
+ name: name12,
230
+ message: `Type validation failed: Value: ${JSON.stringify(value)}.
231
+ Error message: ${getErrorMessage(cause)}`,
232
+ cause
233
+ });
234
+ this[_a13] = true;
235
+ this.value = value;
236
+ }
237
+ static isInstance(error) {
238
+ return AISDKError.hasMarker(error, marker13);
239
+ }
240
+ /**
241
+ * Wraps an error into a TypeValidationError.
242
+ * If the cause is already a TypeValidationError with the same value, it returns the cause.
243
+ * Otherwise, it creates a new TypeValidationError.
244
+ *
245
+ * @param {Object} params - The parameters for wrapping the error.
246
+ * @param {unknown} params.value - The value that failed validation.
247
+ * @param {unknown} params.cause - The original error or cause of the validation failure.
248
+ * @returns {TypeValidationError} A TypeValidationError instance.
249
+ */
250
+ static wrap({
251
+ value,
252
+ cause
253
+ }) {
254
+ return _TypeValidationError2.isInstance(cause) && cause.value === value ? cause : new _TypeValidationError2({ value, cause });
255
+ }
256
+ };
257
+ _a13 = symbol13;
258
+ var TypeValidationError = _TypeValidationError;
259
+ var name13 = "AI_UnsupportedFunctionalityError";
260
+ var marker14 = `vercel.ai.error.${name13}`;
261
+ var symbol14 = Symbol.for(marker14);
262
+ var _a14;
263
+ var UnsupportedFunctionalityError = class extends AISDKError {
264
+ constructor({
265
+ functionality,
266
+ message = `'${functionality}' functionality not supported.`
267
+ }) {
268
+ super({ name: name13, message });
269
+ this[_a14] = true;
270
+ this.functionality = functionality;
271
+ }
272
+ static isInstance(error) {
273
+ return AISDKError.hasMarker(error, marker14);
274
+ }
275
+ };
276
+ _a14 = symbol14;
277
+
278
+ // node_modules/.pnpm/eventsource-parser@3.0.3/node_modules/eventsource-parser/dist/index.js
279
+ var ParseError = class extends Error {
280
+ constructor(message, options) {
281
+ super(message), this.name = "ParseError", this.type = options.type, this.field = options.field, this.value = options.value, this.line = options.line;
282
+ }
283
+ };
284
+ function noop(_arg) {
285
+ }
286
+ function createParser(callbacks) {
287
+ if (typeof callbacks == "function")
288
+ throw new TypeError(
289
+ "`callbacks` must be an object, got a function instead. Did you mean `{onEvent: fn}`?"
290
+ );
291
+ const { onEvent = noop, onError = noop, onRetry = noop, onComment } = callbacks;
292
+ let incompleteLine = "", isFirstChunk = true, id, data = "", eventType = "";
293
+ function feed(newChunk) {
294
+ const chunk = isFirstChunk ? newChunk.replace(/^\xEF\xBB\xBF/, "") : newChunk, [complete, incomplete] = splitLines(`${incompleteLine}${chunk}`);
295
+ for (const line of complete)
296
+ parseLine(line);
297
+ incompleteLine = incomplete, isFirstChunk = false;
298
+ }
299
+ function parseLine(line) {
300
+ if (line === "") {
301
+ dispatchEvent();
302
+ return;
303
+ }
304
+ if (line.startsWith(":")) {
305
+ onComment && onComment(line.slice(line.startsWith(": ") ? 2 : 1));
306
+ return;
307
+ }
308
+ const fieldSeparatorIndex = line.indexOf(":");
309
+ if (fieldSeparatorIndex !== -1) {
310
+ const field = line.slice(0, fieldSeparatorIndex), offset = line[fieldSeparatorIndex + 1] === " " ? 2 : 1, value = line.slice(fieldSeparatorIndex + offset);
311
+ processField(field, value, line);
312
+ return;
313
+ }
314
+ processField(line, "", line);
315
+ }
316
+ function processField(field, value, line) {
317
+ switch (field) {
318
+ case "event":
319
+ eventType = value;
320
+ break;
321
+ case "data":
322
+ data = `${data}${value}
323
+ `;
324
+ break;
325
+ case "id":
326
+ id = value.includes("\0") ? void 0 : value;
327
+ break;
328
+ case "retry":
329
+ /^\d+$/.test(value) ? onRetry(parseInt(value, 10)) : onError(
330
+ new ParseError(`Invalid \`retry\` value: "${value}"`, {
331
+ type: "invalid-retry",
332
+ value,
333
+ line
334
+ })
335
+ );
336
+ break;
337
+ default:
338
+ onError(
339
+ new ParseError(
340
+ `Unknown field "${field.length > 20 ? `${field.slice(0, 20)}\u2026` : field}"`,
341
+ { type: "unknown-field", field, value, line }
342
+ )
343
+ );
344
+ break;
345
+ }
346
+ }
347
+ function dispatchEvent() {
348
+ data.length > 0 && onEvent({
349
+ id,
350
+ event: eventType || void 0,
351
+ // If the data buffer's last character is a U+000A LINE FEED (LF) character,
352
+ // then remove the last character from the data buffer.
353
+ data: data.endsWith(`
354
+ `) ? data.slice(0, -1) : data
355
+ }), id = void 0, data = "", eventType = "";
356
+ }
357
+ function reset(options = {}) {
358
+ incompleteLine && options.consume && parseLine(incompleteLine), isFirstChunk = true, id = void 0, data = "", eventType = "", incompleteLine = "";
359
+ }
360
+ return { feed, reset };
361
+ }
362
+ function splitLines(chunk) {
363
+ const lines = [];
364
+ let incompleteLine = "", searchIndex = 0;
365
+ for (; searchIndex < chunk.length; ) {
366
+ const crIndex = chunk.indexOf("\r", searchIndex), lfIndex = chunk.indexOf(`
367
+ `, searchIndex);
368
+ let lineEnd = -1;
369
+ if (crIndex !== -1 && lfIndex !== -1 ? lineEnd = Math.min(crIndex, lfIndex) : crIndex !== -1 ? lineEnd = crIndex : lfIndex !== -1 && (lineEnd = lfIndex), lineEnd === -1) {
370
+ incompleteLine = chunk.slice(searchIndex);
371
+ break;
372
+ } else {
373
+ const line = chunk.slice(searchIndex, lineEnd);
374
+ lines.push(line), searchIndex = lineEnd + 1, chunk[searchIndex - 1] === "\r" && chunk[searchIndex] === `
375
+ ` && searchIndex++;
29
376
  }
30
- return target;
377
+ }
378
+ return [lines, incompleteLine];
379
+ }
380
+
381
+ // node_modules/.pnpm/eventsource-parser@3.0.3/node_modules/eventsource-parser/dist/stream.js
382
+ var EventSourceParserStream = class extends TransformStream {
383
+ constructor({ onError, onRetry, onComment } = {}) {
384
+ let parser;
385
+ super({
386
+ start(controller) {
387
+ parser = createParser({
388
+ onEvent: (event) => {
389
+ controller.enqueue(event);
390
+ },
391
+ onError(error) {
392
+ onError === "terminate" ? controller.error(error) : typeof onError == "function" && onError(error);
393
+ },
394
+ onRetry,
395
+ onComment
396
+ });
397
+ },
398
+ transform(chunk) {
399
+ parser.feed(chunk);
400
+ }
401
+ });
402
+ }
31
403
  };
32
404
 
405
+ // node_modules/.pnpm/@ai-sdk+provider-utils@3.0.0-beta.5_zod@3.25.34/node_modules/@ai-sdk/provider-utils/dist/index.mjs
406
+ import * as z4 from "zod/v4";
407
+
408
+ // node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/Options.js
409
+ var ignoreOverride = Symbol("Let zodToJsonSchema decide on which parser to use");
410
+
411
+ // node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/selectParser.js
412
+ import { ZodFirstPartyTypeKind as ZodFirstPartyTypeKind3 } from "zod";
413
+
414
+ // node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/parsers/array.js
415
+ import { ZodFirstPartyTypeKind } from "zod";
416
+
417
+ // node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/parsers/record.js
418
+ import { ZodFirstPartyTypeKind as ZodFirstPartyTypeKind2 } from "zod";
419
+
420
+ // node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/parsers/string.js
421
+ var ALPHA_NUMERIC = new Set("ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvxyz0123456789");
422
+
423
+ // node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/parsers/object.js
424
+ import { ZodOptional } from "zod";
425
+
426
+ // node_modules/.pnpm/@ai-sdk+provider-utils@3.0.0-beta.5_zod@3.25.34/node_modules/@ai-sdk/provider-utils/dist/index.mjs
427
+ function combineHeaders(...headers) {
428
+ return headers.reduce(
429
+ (combinedHeaders, currentHeaders) => __spreadValues(__spreadValues({}, combinedHeaders), currentHeaders != null ? currentHeaders : {}),
430
+ {}
431
+ );
432
+ }
433
+ function extractResponseHeaders(response) {
434
+ return Object.fromEntries([...response.headers]);
435
+ }
436
+ var createIdGenerator = ({
437
+ prefix,
438
+ size = 16,
439
+ alphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
440
+ separator = "-"
441
+ } = {}) => {
442
+ const generator = () => {
443
+ const alphabetLength = alphabet.length;
444
+ const chars = new Array(size);
445
+ for (let i = 0; i < size; i++) {
446
+ chars[i] = alphabet[Math.random() * alphabetLength | 0];
447
+ }
448
+ return chars.join("");
449
+ };
450
+ if (prefix == null) {
451
+ return generator;
452
+ }
453
+ if (alphabet.includes(separator)) {
454
+ throw new InvalidArgumentError({
455
+ argument: "separator",
456
+ message: `The separator "${separator}" must not be part of the alphabet "${alphabet}".`
457
+ });
458
+ }
459
+ return () => `${prefix}${separator}${generator()}`;
460
+ };
461
+ var generateId = createIdGenerator();
462
+ function isAbortError(error) {
463
+ return error instanceof Error && (error.name === "AbortError" || error.name === "ResponseAborted" || // Next.js
464
+ error.name === "TimeoutError");
465
+ }
466
+ var FETCH_FAILED_ERROR_MESSAGES = ["fetch failed", "failed to fetch"];
467
+ function handleFetchError({
468
+ error,
469
+ url,
470
+ requestBodyValues
471
+ }) {
472
+ if (isAbortError(error)) {
473
+ return error;
474
+ }
475
+ if (error instanceof TypeError && FETCH_FAILED_ERROR_MESSAGES.includes(error.message.toLowerCase())) {
476
+ const cause = error.cause;
477
+ if (cause != null) {
478
+ return new APICallError({
479
+ message: `Cannot connect to API: ${cause.message}`,
480
+ cause,
481
+ url,
482
+ requestBodyValues,
483
+ isRetryable: true
484
+ // retry when network error
485
+ });
486
+ }
487
+ }
488
+ return error;
489
+ }
490
+ function removeUndefinedEntries(record) {
491
+ return Object.fromEntries(
492
+ Object.entries(record).filter(([_key, value]) => value != null)
493
+ );
494
+ }
495
+ var suspectProtoRx = /"__proto__"\s*:/;
496
+ var suspectConstructorRx = /"constructor"\s*:/;
497
+ function _parse(text) {
498
+ const obj = JSON.parse(text);
499
+ if (obj === null || typeof obj !== "object") {
500
+ return obj;
501
+ }
502
+ if (suspectProtoRx.test(text) === false && suspectConstructorRx.test(text) === false) {
503
+ return obj;
504
+ }
505
+ return filter(obj);
506
+ }
507
+ function filter(obj) {
508
+ let next = [obj];
509
+ while (next.length) {
510
+ const nodes = next;
511
+ next = [];
512
+ for (const node of nodes) {
513
+ if (Object.prototype.hasOwnProperty.call(node, "__proto__")) {
514
+ throw new SyntaxError("Object contains forbidden prototype property");
515
+ }
516
+ if (Object.prototype.hasOwnProperty.call(node, "constructor") && Object.prototype.hasOwnProperty.call(node.constructor, "prototype")) {
517
+ throw new SyntaxError("Object contains forbidden prototype property");
518
+ }
519
+ for (const key in node) {
520
+ const value = node[key];
521
+ if (value && typeof value === "object") {
522
+ next.push(value);
523
+ }
524
+ }
525
+ }
526
+ }
527
+ return obj;
528
+ }
529
+ function secureJsonParse(text) {
530
+ const { stackTraceLimit } = Error;
531
+ Error.stackTraceLimit = 0;
532
+ try {
533
+ return _parse(text);
534
+ } finally {
535
+ Error.stackTraceLimit = stackTraceLimit;
536
+ }
537
+ }
538
+ var validatorSymbol = Symbol.for("vercel.ai.validator");
539
+ function validator(validate) {
540
+ return { [validatorSymbol]: true, validate };
541
+ }
542
+ function isValidator(value) {
543
+ return typeof value === "object" && value !== null && validatorSymbol in value && value[validatorSymbol] === true && "validate" in value;
544
+ }
545
+ function asValidator(value) {
546
+ return isValidator(value) ? value : standardSchemaValidator(value);
547
+ }
548
+ function standardSchemaValidator(standardSchema) {
549
+ return validator(async (value) => {
550
+ const result = await standardSchema["~standard"].validate(value);
551
+ return result.issues == null ? { success: true, value: result.value } : {
552
+ success: false,
553
+ error: new TypeValidationError({
554
+ value,
555
+ cause: result.issues
556
+ })
557
+ };
558
+ });
559
+ }
560
+ async function validateTypes({
561
+ value,
562
+ schema
563
+ }) {
564
+ const result = await safeValidateTypes({ value, schema });
565
+ if (!result.success) {
566
+ throw TypeValidationError.wrap({ value, cause: result.error });
567
+ }
568
+ return result.value;
569
+ }
570
+ async function safeValidateTypes({
571
+ value,
572
+ schema
573
+ }) {
574
+ const validator2 = asValidator(schema);
575
+ try {
576
+ if (validator2.validate == null) {
577
+ return { success: true, value, rawValue: value };
578
+ }
579
+ const result = await validator2.validate(value);
580
+ if (result.success) {
581
+ return { success: true, value: result.value, rawValue: value };
582
+ }
583
+ return {
584
+ success: false,
585
+ error: TypeValidationError.wrap({ value, cause: result.error }),
586
+ rawValue: value
587
+ };
588
+ } catch (error) {
589
+ return {
590
+ success: false,
591
+ error: TypeValidationError.wrap({ value, cause: error }),
592
+ rawValue: value
593
+ };
594
+ }
595
+ }
596
+ async function parseJSON({
597
+ text,
598
+ schema
599
+ }) {
600
+ try {
601
+ const value = secureJsonParse(text);
602
+ if (schema == null) {
603
+ return value;
604
+ }
605
+ return validateTypes({ value, schema });
606
+ } catch (error) {
607
+ if (JSONParseError.isInstance(error) || TypeValidationError.isInstance(error)) {
608
+ throw error;
609
+ }
610
+ throw new JSONParseError({ text, cause: error });
611
+ }
612
+ }
613
+ async function safeParseJSON({
614
+ text,
615
+ schema
616
+ }) {
617
+ try {
618
+ const value = secureJsonParse(text);
619
+ if (schema == null) {
620
+ return { success: true, value, rawValue: value };
621
+ }
622
+ return await safeValidateTypes({ value, schema });
623
+ } catch (error) {
624
+ return {
625
+ success: false,
626
+ error: JSONParseError.isInstance(error) ? error : new JSONParseError({ text, cause: error }),
627
+ rawValue: void 0
628
+ };
629
+ }
630
+ }
631
+ function isParsableJson(input) {
632
+ try {
633
+ secureJsonParse(input);
634
+ return true;
635
+ } catch (e) {
636
+ return false;
637
+ }
638
+ }
639
+ function parseJsonEventStream({
640
+ stream,
641
+ schema
642
+ }) {
643
+ return stream.pipeThrough(new TextDecoderStream()).pipeThrough(new EventSourceParserStream()).pipeThrough(
644
+ new TransformStream({
645
+ async transform({ data }, controller) {
646
+ if (data === "[DONE]") {
647
+ return;
648
+ }
649
+ controller.enqueue(await safeParseJSON({ text: data, schema }));
650
+ }
651
+ })
652
+ );
653
+ }
654
+ var getOriginalFetch2 = () => globalThis.fetch;
655
+ var postJsonToApi = async ({
656
+ url,
657
+ headers,
658
+ body,
659
+ failedResponseHandler,
660
+ successfulResponseHandler,
661
+ abortSignal,
662
+ fetch
663
+ }) => postToApi({
664
+ url,
665
+ headers: __spreadValues({
666
+ "Content-Type": "application/json"
667
+ }, headers),
668
+ body: {
669
+ content: JSON.stringify(body),
670
+ values: body
671
+ },
672
+ failedResponseHandler,
673
+ successfulResponseHandler,
674
+ abortSignal,
675
+ fetch
676
+ });
677
+ var postToApi = async ({
678
+ url,
679
+ headers = {},
680
+ body,
681
+ successfulResponseHandler,
682
+ failedResponseHandler,
683
+ abortSignal,
684
+ fetch = getOriginalFetch2()
685
+ }) => {
686
+ try {
687
+ const response = await fetch(url, {
688
+ method: "POST",
689
+ headers: removeUndefinedEntries(headers),
690
+ body: body.content,
691
+ signal: abortSignal
692
+ });
693
+ const responseHeaders = extractResponseHeaders(response);
694
+ if (!response.ok) {
695
+ let errorInformation;
696
+ try {
697
+ errorInformation = await failedResponseHandler({
698
+ response,
699
+ url,
700
+ requestBodyValues: body.values
701
+ });
702
+ } catch (error) {
703
+ if (isAbortError(error) || APICallError.isInstance(error)) {
704
+ throw error;
705
+ }
706
+ throw new APICallError({
707
+ message: "Failed to process error response",
708
+ cause: error,
709
+ statusCode: response.status,
710
+ url,
711
+ responseHeaders,
712
+ requestBodyValues: body.values
713
+ });
714
+ }
715
+ throw errorInformation.value;
716
+ }
717
+ try {
718
+ return await successfulResponseHandler({
719
+ response,
720
+ url,
721
+ requestBodyValues: body.values
722
+ });
723
+ } catch (error) {
724
+ if (error instanceof Error) {
725
+ if (isAbortError(error) || APICallError.isInstance(error)) {
726
+ throw error;
727
+ }
728
+ }
729
+ throw new APICallError({
730
+ message: "Failed to process successful response",
731
+ cause: error,
732
+ statusCode: response.status,
733
+ url,
734
+ responseHeaders,
735
+ requestBodyValues: body.values
736
+ });
737
+ }
738
+ } catch (error) {
739
+ throw handleFetchError({ error, url, requestBodyValues: body.values });
740
+ }
741
+ };
742
+ var createJsonErrorResponseHandler = ({
743
+ errorSchema,
744
+ errorToMessage,
745
+ isRetryable
746
+ }) => async ({ response, url, requestBodyValues }) => {
747
+ const responseBody = await response.text();
748
+ const responseHeaders = extractResponseHeaders(response);
749
+ if (responseBody.trim() === "") {
750
+ return {
751
+ responseHeaders,
752
+ value: new APICallError({
753
+ message: response.statusText,
754
+ url,
755
+ requestBodyValues,
756
+ statusCode: response.status,
757
+ responseHeaders,
758
+ responseBody,
759
+ isRetryable: isRetryable == null ? void 0 : isRetryable(response)
760
+ })
761
+ };
762
+ }
763
+ try {
764
+ const parsedError = await parseJSON({
765
+ text: responseBody,
766
+ schema: errorSchema
767
+ });
768
+ return {
769
+ responseHeaders,
770
+ value: new APICallError({
771
+ message: errorToMessage(parsedError),
772
+ url,
773
+ requestBodyValues,
774
+ statusCode: response.status,
775
+ responseHeaders,
776
+ responseBody,
777
+ data: parsedError,
778
+ isRetryable: isRetryable == null ? void 0 : isRetryable(response, parsedError)
779
+ })
780
+ };
781
+ } catch (parseError) {
782
+ return {
783
+ responseHeaders,
784
+ value: new APICallError({
785
+ message: response.statusText,
786
+ url,
787
+ requestBodyValues,
788
+ statusCode: response.status,
789
+ responseHeaders,
790
+ responseBody,
791
+ isRetryable: isRetryable == null ? void 0 : isRetryable(response)
792
+ })
793
+ };
794
+ }
795
+ };
796
+ var createEventSourceResponseHandler = (chunkSchema) => async ({ response }) => {
797
+ const responseHeaders = extractResponseHeaders(response);
798
+ if (response.body == null) {
799
+ throw new EmptyResponseBodyError({});
800
+ }
801
+ return {
802
+ responseHeaders,
803
+ value: parseJsonEventStream({
804
+ stream: response.body,
805
+ schema: chunkSchema
806
+ })
807
+ };
808
+ };
809
+ var createJsonResponseHandler = (responseSchema) => async ({ response, url, requestBodyValues }) => {
810
+ const responseBody = await response.text();
811
+ const parsedResult = await safeParseJSON({
812
+ text: responseBody,
813
+ schema: responseSchema
814
+ });
815
+ const responseHeaders = extractResponseHeaders(response);
816
+ if (!parsedResult.success) {
817
+ throw new APICallError({
818
+ message: "Invalid JSON response",
819
+ cause: parsedResult.error,
820
+ statusCode: response.status,
821
+ responseHeaders,
822
+ responseBody,
823
+ url,
824
+ requestBodyValues
825
+ });
826
+ }
827
+ return {
828
+ responseHeaders,
829
+ value: parsedResult.value,
830
+ rawValue: parsedResult.rawValue
831
+ };
832
+ };
833
+ var schemaSymbol = Symbol.for("vercel.ai.schema");
834
+ var { btoa, atob } = globalThis;
835
+ function convertUint8ArrayToBase64(array) {
836
+ let latin1string = "";
837
+ for (let i = 0; i < array.length; i++) {
838
+ latin1string += String.fromCodePoint(array[i]);
839
+ }
840
+ return btoa(latin1string);
841
+ }
842
+
33
843
  // src/schemas/reasoning-details.ts
34
- import { z } from "zod";
844
+ import { z } from "zod/v4";
35
845
  var ReasoningDetailSummarySchema = z.object({
36
846
  type: z.literal("reasoning.summary" /* Summary */),
37
847
  summary: z.string()
@@ -56,56 +866,112 @@ var ReasoningDetailsWithUnknownSchema = z.union([
56
866
  ]);
57
867
  var ReasoningDetailArraySchema = z.array(ReasoningDetailsWithUnknownSchema).transform((d) => d.filter((d2) => !!d2));
58
868
 
59
- // src/llmgateway-chat-language-model.ts
60
- import {
61
- InvalidResponseDataError,
62
- UnsupportedFunctionalityError
63
- } from "@ai-sdk/provider";
64
- import {
65
- combineHeaders,
66
- createEventSourceResponseHandler,
67
- createJsonResponseHandler,
68
- generateId,
69
- isParsableJson,
70
- postJsonToApi
71
- } from "@ai-sdk/provider-utils";
72
- import { z as z3 } from "zod";
869
+ // src/schemas/error-response.ts
870
+ import { z as z2 } from "zod/v4";
871
+ var LLMGatewayErrorResponseSchema = z2.object({
872
+ error: z2.object({
873
+ code: z2.union([z2.string(), z2.number()]).nullable().optional().default(null),
874
+ message: z2.string(),
875
+ type: z2.string().nullable().optional().default(null),
876
+ param: z2.any().nullable().optional().default(null)
877
+ })
878
+ });
879
+ var llmgatewayFailedResponseHandler = createJsonErrorResponseHandler({
880
+ errorSchema: LLMGatewayErrorResponseSchema,
881
+ errorToMessage: (data) => data.error.message
882
+ });
883
+
884
+ // src/utils/map-finish-reason.ts
885
+ function mapLLMGatewayFinishReason(finishReason) {
886
+ switch (finishReason) {
887
+ case "stop":
888
+ return "stop";
889
+ case "length":
890
+ return "length";
891
+ case "content_filter":
892
+ return "content-filter";
893
+ case "function_call":
894
+ case "tool_calls":
895
+ return "tool-calls";
896
+ default:
897
+ return "unknown";
898
+ }
899
+ }
900
+
901
+ // src/chat/is-url.ts
902
+ function isUrl({
903
+ url,
904
+ protocols
905
+ }) {
906
+ try {
907
+ const urlObj = new URL(url);
908
+ return protocols.has(urlObj.protocol);
909
+ } catch (_) {
910
+ return false;
911
+ }
912
+ }
73
913
 
74
- // src/convert-to-llmgateway-chat-messages.ts
75
- import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
914
+ // src/chat/file-url-utils.ts
915
+ function getFileUrl({
916
+ part,
917
+ defaultMediaType
918
+ }) {
919
+ var _a15, _b;
920
+ if (part.data instanceof Uint8Array) {
921
+ const base64 = convertUint8ArrayToBase64(part.data);
922
+ return `data:${(_a15 = part.mediaType) != null ? _a15 : defaultMediaType};base64,${base64}`;
923
+ }
924
+ const stringUrl = part.data.toString();
925
+ if (isUrl({
926
+ url: stringUrl,
927
+ protocols: /* @__PURE__ */ new Set(["http:", "https:"])
928
+ })) {
929
+ return stringUrl;
930
+ }
931
+ return stringUrl.startsWith("data:") ? stringUrl : `data:${(_b = part.mediaType) != null ? _b : defaultMediaType};base64,${stringUrl}`;
932
+ }
933
+
934
+ // src/chat/convert-to-llmgateway-chat-messages.ts
76
935
  function getCacheControl(providerMetadata) {
77
- var _a, _b, _c;
936
+ var _a15, _b, _c;
78
937
  const anthropic = providerMetadata == null ? void 0 : providerMetadata.anthropic;
79
938
  const llmgateway = providerMetadata == null ? void 0 : providerMetadata.llmgateway;
80
- return (_c = (_b = (_a = llmgateway == null ? void 0 : llmgateway.cacheControl) != null ? _a : llmgateway == null ? void 0 : llmgateway.cache_control) != null ? _b : anthropic == null ? void 0 : anthropic.cacheControl) != null ? _c : anthropic == null ? void 0 : anthropic.cache_control;
939
+ return (_c = (_b = (_a15 = llmgateway == null ? void 0 : llmgateway.cacheControl) != null ? _a15 : llmgateway == null ? void 0 : llmgateway.cache_control) != null ? _b : anthropic == null ? void 0 : anthropic.cacheControl) != null ? _c : anthropic == null ? void 0 : anthropic.cache_control;
81
940
  }
82
941
  function convertToLLMGatewayChatMessages(prompt) {
83
- var _a, _b, _c;
942
+ var _a15, _b, _c;
84
943
  const messages = [];
85
- for (const { role, content, providerMetadata } of prompt) {
944
+ for (const { role, content, providerOptions } of prompt) {
86
945
  switch (role) {
87
946
  case "system": {
88
947
  messages.push({
89
948
  role: "system",
90
949
  content,
91
- cache_control: getCacheControl(providerMetadata)
950
+ cache_control: getCacheControl(providerOptions)
92
951
  });
93
952
  break;
94
953
  }
95
954
  case "user": {
96
- if (content.length === 1 && ((_a = content[0]) == null ? void 0 : _a.type) === "text") {
955
+ if (content.length === 1 && ((_a15 = content[0]) == null ? void 0 : _a15.type) === "text") {
956
+ const cacheControl = (_b = getCacheControl(providerOptions)) != null ? _b : getCacheControl(content[0].providerOptions);
957
+ const contentWithCacheControl = cacheControl ? [
958
+ {
959
+ type: "text",
960
+ text: content[0].text,
961
+ cache_control: cacheControl
962
+ }
963
+ ] : content[0].text;
97
964
  messages.push({
98
965
  role: "user",
99
- content: content[0].text,
100
- cache_control: (_b = getCacheControl(providerMetadata)) != null ? _b : getCacheControl(content[0].providerMetadata)
966
+ content: contentWithCacheControl
101
967
  });
102
968
  break;
103
969
  }
104
- const messageCacheControl = getCacheControl(providerMetadata);
970
+ const messageCacheControl = getCacheControl(providerOptions);
105
971
  const contentParts = content.map(
106
972
  (part) => {
107
- var _a2, _b2, _c2, _d;
108
- const cacheControl = (_a2 = getCacheControl(part.providerMetadata)) != null ? _a2 : messageCacheControl;
973
+ var _a16, _b2, _c2, _d, _e, _f;
974
+ const cacheControl = (_a16 = getCacheControl(part.providerOptions)) != null ? _a16 : messageCacheControl;
109
975
  switch (part.type) {
110
976
  case "text":
111
977
  return {
@@ -114,33 +980,55 @@ function convertToLLMGatewayChatMessages(prompt) {
114
980
  // For text parts, only use part-specific cache control
115
981
  cache_control: cacheControl
116
982
  };
117
- case "image":
118
- return {
119
- type: "image_url",
120
- image_url: {
121
- url: part.image instanceof URL ? part.image.toString() : `data:${(_b2 = part.mimeType) != null ? _b2 : "image/jpeg"};base64,${convertUint8ArrayToBase64(
122
- part.image
123
- )}`
124
- },
125
- // For image parts, use part-specific or message-level cache control
126
- cache_control: cacheControl
127
- };
128
- case "file":
983
+ case "file": {
984
+ if ((_b2 = part.mediaType) == null ? void 0 : _b2.startsWith("image/")) {
985
+ const url = getFileUrl({
986
+ part,
987
+ defaultMediaType: "image/jpeg"
988
+ });
989
+ return {
990
+ type: "image_url",
991
+ image_url: {
992
+ url
993
+ },
994
+ // For image parts, use part-specific or message-level cache control
995
+ cache_control: cacheControl
996
+ };
997
+ }
998
+ const fileName = String(
999
+ (_f = (_e = (_d = (_c2 = part.providerOptions) == null ? void 0 : _c2.llmgateway) == null ? void 0 : _d.filename) != null ? _e : part.filename) != null ? _f : ""
1000
+ );
1001
+ const fileData = getFileUrl({
1002
+ part,
1003
+ defaultMediaType: "application/pdf"
1004
+ });
1005
+ if (isUrl({
1006
+ url: fileData,
1007
+ protocols: /* @__PURE__ */ new Set(["http:", "https:"])
1008
+ })) {
1009
+ return {
1010
+ type: "file",
1011
+ file: {
1012
+ filename: fileName,
1013
+ file_data: fileData
1014
+ }
1015
+ };
1016
+ }
129
1017
  return {
130
1018
  type: "file",
131
1019
  file: {
132
- filename: String(
133
- (_d = (_c2 = part.providerMetadata) == null ? void 0 : _c2.llmgateway) == null ? void 0 : _d.filename
134
- ),
135
- file_data: part.data instanceof Uint8Array ? `data:${part.mimeType};base64,${convertUint8ArrayToBase64(part.data)}` : `data:${part.mimeType};base64,${part.data}`
1020
+ filename: fileName,
1021
+ file_data: fileData
136
1022
  },
137
1023
  cache_control: cacheControl
138
1024
  };
1025
+ }
139
1026
  default: {
140
- const _exhaustiveCheck = part;
141
- throw new Error(
142
- `Unsupported content part type: ${_exhaustiveCheck}`
143
- );
1027
+ return {
1028
+ type: "text",
1029
+ text: "",
1030
+ cache_control: cacheControl
1031
+ };
144
1032
  }
145
1033
  }
146
1034
  }
@@ -168,7 +1056,7 @@ function convertToLLMGatewayChatMessages(prompt) {
168
1056
  type: "function",
169
1057
  function: {
170
1058
  name: part.toolName,
171
- arguments: JSON.stringify(part.args)
1059
+ arguments: JSON.stringify(part.input)
172
1060
  }
173
1061
  });
174
1062
  break;
@@ -177,23 +1065,14 @@ function convertToLLMGatewayChatMessages(prompt) {
177
1065
  reasoning += part.text;
178
1066
  reasoningDetails.push({
179
1067
  type: "reasoning.text" /* Text */,
180
- text: part.text,
181
- signature: part.signature
182
- });
183
- break;
184
- }
185
- case "redacted-reasoning": {
186
- reasoningDetails.push({
187
- type: "reasoning.encrypted" /* Encrypted */,
188
- data: part.data
1068
+ text: part.text
189
1069
  });
190
1070
  break;
191
1071
  }
192
1072
  case "file":
193
1073
  break;
194
1074
  default: {
195
- const _exhaustiveCheck = part;
196
- throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
1075
+ break;
197
1076
  }
198
1077
  }
199
1078
  }
@@ -203,95 +1082,188 @@ function convertToLLMGatewayChatMessages(prompt) {
203
1082
  tool_calls: toolCalls.length > 0 ? toolCalls : void 0,
204
1083
  reasoning: reasoning || void 0,
205
1084
  reasoning_details: reasoningDetails.length > 0 ? reasoningDetails : void 0,
206
- cache_control: getCacheControl(providerMetadata)
1085
+ cache_control: getCacheControl(providerOptions)
207
1086
  });
208
1087
  break;
209
1088
  }
210
1089
  case "tool": {
211
1090
  for (const toolResponse of content) {
1091
+ const content2 = getToolResultContent(toolResponse);
212
1092
  messages.push({
213
1093
  role: "tool",
214
1094
  tool_call_id: toolResponse.toolCallId,
215
- content: JSON.stringify(toolResponse.result),
216
- cache_control: (_c = getCacheControl(providerMetadata)) != null ? _c : getCacheControl(toolResponse.providerMetadata)
1095
+ content: content2,
1096
+ cache_control: (_c = getCacheControl(providerOptions)) != null ? _c : getCacheControl(toolResponse.providerOptions)
217
1097
  });
218
1098
  }
219
1099
  break;
220
1100
  }
221
1101
  default: {
222
- const _exhaustiveCheck = role;
223
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1102
+ break;
224
1103
  }
225
1104
  }
226
1105
  }
227
1106
  return messages;
228
1107
  }
229
-
230
- // src/map-llmgateway-chat-logprobs.ts
231
- function mapLLMGatewayChatLogProbsOutput(logprobs) {
232
- var _a, _b;
233
- return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
234
- token,
235
- logprob,
236
- topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
237
- token: token2,
238
- logprob: logprob2
239
- })) : []
240
- }))) != null ? _b : void 0;
1108
+ function getToolResultContent(input) {
1109
+ return input.output.type === "text" ? input.output.value : JSON.stringify(input.output.value);
241
1110
  }
242
1111
 
243
- // src/map-llmgateway-finish-reason.ts
244
- function mapLLMGatewayFinishReason(finishReason) {
245
- switch (finishReason) {
246
- case "stop":
247
- return "stop";
248
- case "length":
249
- return "length";
250
- case "content_filter":
251
- return "content-filter";
252
- case "function_call":
253
- case "tool_calls":
254
- return "tool-calls";
255
- default:
256
- return "unknown";
1112
+ // src/chat/get-tool-choice.ts
1113
+ import { z as z3 } from "zod/v4";
1114
+ var ChatCompletionToolChoiceSchema = z3.union([
1115
+ z3.literal("auto"),
1116
+ z3.literal("none"),
1117
+ z3.literal("required"),
1118
+ z3.object({
1119
+ type: z3.literal("function"),
1120
+ function: z3.object({
1121
+ name: z3.string()
1122
+ })
1123
+ })
1124
+ ]);
1125
+ function getChatCompletionToolChoice(toolChoice) {
1126
+ switch (toolChoice.type) {
1127
+ case "auto":
1128
+ case "none":
1129
+ case "required":
1130
+ return toolChoice.type;
1131
+ case "tool": {
1132
+ return {
1133
+ type: "function",
1134
+ function: { name: toolChoice.toolName }
1135
+ };
1136
+ }
1137
+ default: {
1138
+ toolChoice;
1139
+ throw new Error(`Invalid tool choice type: ${toolChoice}`);
1140
+ }
257
1141
  }
258
1142
  }
259
1143
 
260
- // src/llmgateway-error.ts
261
- import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
262
- import { z as z2 } from "zod";
263
- var LLMGatewayErrorResponseSchema = z2.object({
264
- error: z2.object({
265
- message: z2.string(),
266
- type: z2.string(),
267
- param: z2.any().nullable(),
268
- code: z2.string().nullable()
269
- })
1144
+ // src/chat/schemas.ts
1145
+ import { z as z5 } from "zod/v4";
1146
+ var LLMGatewayChatCompletionBaseResponseSchema = z5.object({
1147
+ id: z5.string().optional(),
1148
+ model: z5.string().optional(),
1149
+ usage: z5.object({
1150
+ prompt_tokens: z5.number(),
1151
+ prompt_tokens_details: z5.object({
1152
+ cached_tokens: z5.number()
1153
+ }).nullish(),
1154
+ completion_tokens: z5.number(),
1155
+ completion_tokens_details: z5.object({
1156
+ reasoning_tokens: z5.number()
1157
+ }).nullish(),
1158
+ total_tokens: z5.number(),
1159
+ cost: z5.number().optional(),
1160
+ cost_details: z5.object({
1161
+ upstream_inference_cost: z5.number().nullish()
1162
+ }).nullish()
1163
+ }).nullish()
270
1164
  });
271
- var llmgatewayFailedResponseHandler = createJsonErrorResponseHandler({
272
- errorSchema: LLMGatewayErrorResponseSchema,
273
- errorToMessage: (data) => data.error.message
1165
+ var LLMGatewayNonStreamChatCompletionResponseSchema = LLMGatewayChatCompletionBaseResponseSchema.extend({
1166
+ choices: z5.array(
1167
+ z5.object({
1168
+ message: z5.object({
1169
+ role: z5.literal("assistant"),
1170
+ content: z5.string().nullable().optional(),
1171
+ reasoning: z5.string().nullable().optional(),
1172
+ reasoning_details: ReasoningDetailArraySchema.nullish(),
1173
+ tool_calls: z5.array(
1174
+ z5.object({
1175
+ id: z5.string().optional().nullable(),
1176
+ type: z5.literal("function"),
1177
+ function: z5.object({
1178
+ name: z5.string(),
1179
+ arguments: z5.string()
1180
+ })
1181
+ })
1182
+ ).optional()
1183
+ }),
1184
+ index: z5.number().nullish(),
1185
+ logprobs: z5.object({
1186
+ content: z5.array(
1187
+ z5.object({
1188
+ token: z5.string(),
1189
+ logprob: z5.number(),
1190
+ top_logprobs: z5.array(
1191
+ z5.object({
1192
+ token: z5.string(),
1193
+ logprob: z5.number()
1194
+ })
1195
+ )
1196
+ })
1197
+ ).nullable()
1198
+ }).nullable().optional(),
1199
+ finish_reason: z5.string().optional().nullable()
1200
+ })
1201
+ )
274
1202
  });
1203
+ var LLMGatewayStreamChatCompletionChunkSchema = z5.union([
1204
+ LLMGatewayChatCompletionBaseResponseSchema.extend({
1205
+ choices: z5.array(
1206
+ z5.object({
1207
+ delta: z5.object({
1208
+ role: z5.enum(["assistant"]).optional(),
1209
+ content: z5.string().nullish(),
1210
+ reasoning: z5.string().nullish().optional(),
1211
+ reasoning_details: ReasoningDetailArraySchema.nullish(),
1212
+ tool_calls: z5.array(
1213
+ z5.object({
1214
+ index: z5.number().nullish(),
1215
+ id: z5.string().nullish(),
1216
+ type: z5.literal("function").optional(),
1217
+ function: z5.object({
1218
+ name: z5.string().nullish(),
1219
+ arguments: z5.string().nullish()
1220
+ })
1221
+ })
1222
+ ).nullish()
1223
+ }).nullish(),
1224
+ logprobs: z5.object({
1225
+ content: z5.array(
1226
+ z5.object({
1227
+ token: z5.string(),
1228
+ logprob: z5.number(),
1229
+ top_logprobs: z5.array(
1230
+ z5.object({
1231
+ token: z5.string(),
1232
+ logprob: z5.number()
1233
+ })
1234
+ )
1235
+ })
1236
+ ).nullable()
1237
+ }).nullish(),
1238
+ finish_reason: z5.string().nullable().optional(),
1239
+ index: z5.number().nullish()
1240
+ })
1241
+ )
1242
+ }),
1243
+ LLMGatewayErrorResponseSchema
1244
+ ]);
275
1245
 
276
- // src/llmgateway-chat-language-model.ts
277
- function isFunctionTool(tool) {
278
- return "parameters" in tool;
279
- }
1246
+ // src/chat/index.ts
280
1247
  var LLMGatewayChatLanguageModel = class {
281
1248
  constructor(modelId, settings, config) {
282
- this.specificationVersion = "v1";
1249
+ this.specificationVersion = "v2";
1250
+ this.provider = "llmgateway";
283
1251
  this.defaultObjectGenerationMode = "tool";
1252
+ this.supportedUrls = {
1253
+ "image/*": [
1254
+ /^data:image\/[a-zA-Z]+;base64,/,
1255
+ /^https?:\/\/.+\.(jpg|jpeg|png|gif|webp)$/i
1256
+ ],
1257
+ // 'text/*': [/^data:text\//, /^https?:\/\/.+$/],
1258
+ "application/*": [/^data:application\//, /^https?:\/\/.+$/]
1259
+ };
284
1260
  this.modelId = modelId;
285
1261
  this.settings = settings;
286
1262
  this.config = config;
287
1263
  }
288
- get provider() {
289
- return this.config.provider;
290
- }
291
1264
  getArgs({
292
- mode,
293
1265
  prompt,
294
- maxTokens,
1266
+ maxOutputTokens,
295
1267
  temperature,
296
1268
  topP,
297
1269
  frequencyPenalty,
@@ -300,12 +1272,10 @@ var LLMGatewayChatLanguageModel = class {
300
1272
  stopSequences,
301
1273
  responseFormat,
302
1274
  topK,
303
- providerMetadata
1275
+ tools,
1276
+ toolChoice
304
1277
  }) {
305
- var _a;
306
- const type = mode.type;
307
- const extraCallingBody = (_a = providerMetadata == null ? void 0 : providerMetadata.llmgateway) != null ? _a : {};
308
- const baseArgs = __spreadValues(__spreadValues(__spreadValues({
1278
+ const baseArgs = __spreadValues(__spreadValues({
309
1279
  // model id:
310
1280
  model: this.modelId,
311
1281
  models: this.settings.models,
@@ -316,7 +1286,7 @@ var LLMGatewayChatLanguageModel = class {
316
1286
  user: this.settings.user,
317
1287
  parallel_tool_calls: this.settings.parallelToolCalls,
318
1288
  // standardized settings:
319
- max_tokens: maxTokens,
1289
+ max_tokens: maxOutputTokens,
320
1290
  temperature,
321
1291
  top_p: topP,
322
1292
  frequency_penalty: frequencyPenalty,
@@ -331,44 +1301,34 @@ var LLMGatewayChatLanguageModel = class {
331
1301
  include_reasoning: this.settings.includeReasoning,
332
1302
  reasoning: this.settings.reasoning,
333
1303
  usage: this.settings.usage
334
- }, this.config.extraBody), this.settings.extraBody), extraCallingBody);
335
- switch (type) {
336
- case "regular": {
337
- return __spreadValues(__spreadValues({}, baseArgs), prepareToolsAndToolChoice(mode));
338
- }
339
- case "object-json": {
340
- return __spreadProps(__spreadValues({}, baseArgs), {
341
- response_format: { type: "json_object" }
342
- });
343
- }
344
- case "object-tool": {
345
- return __spreadProps(__spreadValues({}, baseArgs), {
346
- tool_choice: { type: "function", function: { name: mode.tool.name } },
347
- tools: [
348
- {
349
- type: "function",
350
- function: {
351
- name: mode.tool.name,
352
- description: mode.tool.description,
353
- parameters: mode.tool.parameters
354
- }
355
- }
356
- ]
357
- });
358
- }
359
- // Handle all non-text types with a single default case
360
- default: {
361
- const _exhaustiveCheck = type;
362
- throw new UnsupportedFunctionalityError({
363
- functionality: `${_exhaustiveCheck} mode`
364
- });
365
- }
1304
+ }, this.config.extraBody), this.settings.extraBody);
1305
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json") {
1306
+ return __spreadProps(__spreadValues({}, baseArgs), {
1307
+ response_format: { type: "json_object" }
1308
+ });
366
1309
  }
1310
+ if (tools && tools.length > 0) {
1311
+ const mappedTools = tools.filter((tool) => tool.type === "function").map((tool) => ({
1312
+ type: "function",
1313
+ function: {
1314
+ name: tool.name,
1315
+ description: tool.type,
1316
+ parameters: tool.inputSchema
1317
+ }
1318
+ }));
1319
+ return __spreadProps(__spreadValues({}, baseArgs), {
1320
+ tools: mappedTools,
1321
+ tool_choice: toolChoice ? getChatCompletionToolChoice(toolChoice) : void 0
1322
+ });
1323
+ }
1324
+ return baseArgs;
367
1325
  }
368
1326
  async doGenerate(options) {
369
- var _b, _c, _d, _e, _f, _g, _h, _i, _j;
370
- const args = this.getArgs(options);
371
- const { responseHeaders, value: response } = await postJsonToApi({
1327
+ var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w;
1328
+ const providerOptions = options.providerOptions || {};
1329
+ const llmgatewayOptions = providerOptions.llmgateway || {};
1330
+ const args = __spreadValues(__spreadValues({}, this.getArgs(options)), llmgatewayOptions);
1331
+ const { value: response, responseHeaders } = await postJsonToApi({
372
1332
  url: this.config.url({
373
1333
  path: "/chat/completions",
374
1334
  modelId: this.modelId
@@ -382,46 +1342,31 @@ var LLMGatewayChatLanguageModel = class {
382
1342
  abortSignal: options.abortSignal,
383
1343
  fetch: this.config.fetch
384
1344
  });
385
- const _a = args, { messages: rawPrompt } = _a, rawSettings = __objRest(_a, ["messages"]);
386
1345
  const choice = response.choices[0];
387
1346
  if (!choice) {
388
1347
  throw new Error("No choice in response");
389
1348
  }
390
1349
  const usageInfo = response.usage ? {
391
- promptTokens: (_b = response.usage.prompt_tokens) != null ? _b : 0,
392
- completionTokens: (_c = response.usage.completion_tokens) != null ? _c : 0
1350
+ inputTokens: (_a15 = response.usage.prompt_tokens) != null ? _a15 : 0,
1351
+ outputTokens: (_b = response.usage.completion_tokens) != null ? _b : 0,
1352
+ totalTokens: ((_c = response.usage.prompt_tokens) != null ? _c : 0) + ((_d = response.usage.completion_tokens) != null ? _d : 0),
1353
+ reasoningTokens: (_f = (_e = response.usage.completion_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : 0,
1354
+ cachedInputTokens: (_h = (_g = response.usage.prompt_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : 0
393
1355
  } : {
394
- promptTokens: 0,
395
- completionTokens: 0
1356
+ inputTokens: 0,
1357
+ outputTokens: 0,
1358
+ totalTokens: 0,
1359
+ reasoningTokens: 0,
1360
+ cachedInputTokens: 0
396
1361
  };
397
- const providerMetadata = {};
398
- if (response.usage && ((_d = this.settings.usage) == null ? void 0 : _d.include)) {
399
- providerMetadata.llmgateway = {
400
- usage: {
401
- promptTokens: response.usage.prompt_tokens,
402
- promptTokensDetails: response.usage.prompt_tokens_details ? {
403
- cachedTokens: (_e = response.usage.prompt_tokens_details.cached_tokens) != null ? _e : 0
404
- } : void 0,
405
- completionTokens: response.usage.completion_tokens,
406
- completionTokensDetails: response.usage.completion_tokens_details ? {
407
- reasoningTokens: (_f = response.usage.completion_tokens_details.reasoning_tokens) != null ? _f : 0
408
- } : void 0,
409
- cost: response.usage.cost,
410
- totalTokens: (_g = response.usage.total_tokens) != null ? _g : 0
411
- }
412
- };
413
- }
414
- const hasProviderMetadata = Object.keys(providerMetadata).length > 0;
415
- const reasoningDetails = (_h = choice.message.reasoning_details) != null ? _h : [];
1362
+ const reasoningDetails = (_i = choice.message.reasoning_details) != null ? _i : [];
416
1363
  const reasoning = reasoningDetails.length > 0 ? reasoningDetails.map((detail) => {
417
- var _a2;
418
1364
  switch (detail.type) {
419
1365
  case "reasoning.text" /* Text */: {
420
1366
  if (detail.text) {
421
1367
  return {
422
- type: "text",
423
- text: detail.text,
424
- signature: (_a2 = detail.signature) != null ? _a2 : void 0
1368
+ type: "reasoning",
1369
+ text: detail.text
425
1370
  };
426
1371
  }
427
1372
  break;
@@ -429,7 +1374,7 @@ var LLMGatewayChatLanguageModel = class {
429
1374
  case "reasoning.summary" /* Summary */: {
430
1375
  if (detail.summary) {
431
1376
  return {
432
- type: "text",
1377
+ type: "reasoning",
433
1378
  text: detail.summary
434
1379
  };
435
1380
  }
@@ -438,51 +1383,80 @@ var LLMGatewayChatLanguageModel = class {
438
1383
  case "reasoning.encrypted" /* Encrypted */: {
439
1384
  if (detail.data) {
440
1385
  return {
441
- type: "redacted",
442
- data: detail.data
1386
+ type: "reasoning",
1387
+ text: "[REDACTED]"
443
1388
  };
444
1389
  }
445
1390
  break;
446
1391
  }
447
1392
  default: {
448
- detail;
1393
+ const _exhaustiveCheck = detail;
1394
+ return _exhaustiveCheck;
449
1395
  }
450
1396
  }
451
1397
  return null;
452
1398
  }).filter((p) => p !== null) : choice.message.reasoning ? [
453
1399
  {
454
- type: "text",
1400
+ type: "reasoning",
455
1401
  text: choice.message.reasoning
456
1402
  }
457
1403
  ] : [];
458
- return __spreadValues({
459
- response: {
460
- id: response.id,
461
- modelId: response.model
462
- },
463
- text: (_i = choice.message.content) != null ? _i : void 0,
464
- reasoning,
465
- toolCalls: (_j = choice.message.tool_calls) == null ? void 0 : _j.map((toolCall) => {
466
- var _a2;
467
- return {
468
- toolCallType: "function",
469
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
1404
+ const content = [];
1405
+ content.push(...reasoning);
1406
+ if (choice.message.content) {
1407
+ content.push({
1408
+ type: "text",
1409
+ text: choice.message.content
1410
+ });
1411
+ }
1412
+ if (choice.message.tool_calls) {
1413
+ for (const toolCall of choice.message.tool_calls) {
1414
+ content.push({
1415
+ type: "tool-call",
1416
+ toolCallId: (_j = toolCall.id) != null ? _j : generateId(),
470
1417
  toolName: toolCall.function.name,
471
- args: toolCall.function.arguments
472
- };
473
- }),
1418
+ input: toolCall.function.arguments
1419
+ });
1420
+ }
1421
+ }
1422
+ return {
1423
+ content,
474
1424
  finishReason: mapLLMGatewayFinishReason(choice.finish_reason),
475
1425
  usage: usageInfo,
476
- rawCall: { rawPrompt, rawSettings },
477
- rawResponse: { headers: responseHeaders },
478
1426
  warnings: [],
479
- logprobs: mapLLMGatewayChatLogProbsOutput(choice.logprobs)
480
- }, hasProviderMetadata ? { providerMetadata } : {});
1427
+ providerMetadata: {
1428
+ llmgateway: {
1429
+ usage: {
1430
+ promptTokens: (_k = usageInfo.inputTokens) != null ? _k : 0,
1431
+ completionTokens: (_l = usageInfo.outputTokens) != null ? _l : 0,
1432
+ totalTokens: (_m = usageInfo.totalTokens) != null ? _m : 0,
1433
+ cost: (_n = response.usage) == null ? void 0 : _n.cost,
1434
+ promptTokensDetails: {
1435
+ cachedTokens: (_q = (_p = (_o = response.usage) == null ? void 0 : _o.prompt_tokens_details) == null ? void 0 : _p.cached_tokens) != null ? _q : 0
1436
+ },
1437
+ completionTokensDetails: {
1438
+ reasoningTokens: (_t = (_s = (_r = response.usage) == null ? void 0 : _r.completion_tokens_details) == null ? void 0 : _s.reasoning_tokens) != null ? _t : 0
1439
+ },
1440
+ costDetails: {
1441
+ upstreamInferenceCost: (_w = (_v = (_u = response.usage) == null ? void 0 : _u.cost_details) == null ? void 0 : _v.upstream_inference_cost) != null ? _w : 0
1442
+ }
1443
+ }
1444
+ }
1445
+ },
1446
+ request: { body: args },
1447
+ response: {
1448
+ id: response.id,
1449
+ modelId: response.model,
1450
+ headers: responseHeaders
1451
+ }
1452
+ };
481
1453
  }
482
1454
  async doStream(options) {
483
- var _a, _c;
484
- const args = this.getArgs(options);
485
- const { responseHeaders, value: response } = await postJsonToApi({
1455
+ var _a15;
1456
+ const providerOptions = options.providerOptions || {};
1457
+ const llmgatewayOptions = providerOptions.llmgateway || {};
1458
+ const args = __spreadValues(__spreadValues({}, this.getArgs(options)), llmgatewayOptions);
1459
+ const { value: response, responseHeaders } = await postJsonToApi({
486
1460
  url: this.config.url({
487
1461
  path: "/chat/completions",
488
1462
  modelId: this.modelId
@@ -493,7 +1467,7 @@ var LLMGatewayChatLanguageModel = class {
493
1467
  // only include stream_options when in strict compatibility mode:
494
1468
  stream_options: this.config.compatibility === "strict" ? __spreadValues({
495
1469
  include_usage: true
496
- }, ((_a = this.settings.usage) == null ? void 0 : _a.include) ? { include_usage: true } : {}) : void 0
1470
+ }, ((_a15 = this.settings.usage) == null ? void 0 : _a15.include) ? { include_usage: true } : {}) : void 0
497
1471
  }),
498
1472
  failedResponseHandler: llmgatewayFailedResponseHandler,
499
1473
  successfulResponseHandler: createEventSourceResponseHandler(
@@ -502,21 +1476,26 @@ var LLMGatewayChatLanguageModel = class {
502
1476
  abortSignal: options.abortSignal,
503
1477
  fetch: this.config.fetch
504
1478
  });
505
- const _b = args, { messages: rawPrompt } = _b, rawSettings = __objRest(_b, ["messages"]);
506
1479
  const toolCalls = [];
507
1480
  let finishReason = "other";
508
- let usage = {
509
- promptTokens: Number.NaN,
510
- completionTokens: Number.NaN
1481
+ const usage = {
1482
+ inputTokens: Number.NaN,
1483
+ outputTokens: Number.NaN,
1484
+ totalTokens: Number.NaN,
1485
+ reasoningTokens: Number.NaN,
1486
+ cachedInputTokens: Number.NaN
511
1487
  };
512
- let logprobs;
513
1488
  const llmgatewayUsage = {};
514
- const shouldIncludeUsageAccounting = !!((_c = this.settings.usage) == null ? void 0 : _c.include);
1489
+ let textStarted = false;
1490
+ let reasoningStarted = false;
1491
+ let textId;
1492
+ let reasoningId;
1493
+ let llmgatewayResponseId;
515
1494
  return {
516
1495
  stream: response.pipeThrough(
517
1496
  new TransformStream({
518
1497
  transform(chunk, controller) {
519
- var _a2, _b2, _c2, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
1498
+ var _a16, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
520
1499
  if (!chunk.success) {
521
1500
  finishReason = "error";
522
1501
  controller.enqueue({ type: "error", error: chunk.error });
@@ -529,6 +1508,7 @@ var LLMGatewayChatLanguageModel = class {
529
1508
  return;
530
1509
  }
531
1510
  if (value.id) {
1511
+ llmgatewayResponseId = value.id;
532
1512
  controller.enqueue({
533
1513
  type: "response-metadata",
534
1514
  id: value.id
@@ -541,20 +1521,23 @@ var LLMGatewayChatLanguageModel = class {
541
1521
  });
542
1522
  }
543
1523
  if (value.usage != null) {
544
- usage = {
545
- promptTokens: value.usage.prompt_tokens,
546
- completionTokens: value.usage.completion_tokens
547
- };
1524
+ usage.inputTokens = value.usage.prompt_tokens;
1525
+ usage.outputTokens = value.usage.completion_tokens;
1526
+ usage.totalTokens = value.usage.prompt_tokens + value.usage.completion_tokens;
548
1527
  llmgatewayUsage.promptTokens = value.usage.prompt_tokens;
549
1528
  if (value.usage.prompt_tokens_details) {
1529
+ const cachedInputTokens = (_a16 = value.usage.prompt_tokens_details.cached_tokens) != null ? _a16 : 0;
1530
+ usage.cachedInputTokens = cachedInputTokens;
550
1531
  llmgatewayUsage.promptTokensDetails = {
551
- cachedTokens: (_a2 = value.usage.prompt_tokens_details.cached_tokens) != null ? _a2 : 0
1532
+ cachedTokens: cachedInputTokens
552
1533
  };
553
1534
  }
554
1535
  llmgatewayUsage.completionTokens = value.usage.completion_tokens;
555
1536
  if (value.usage.completion_tokens_details) {
1537
+ const reasoningTokens = (_b = value.usage.completion_tokens_details.reasoning_tokens) != null ? _b : 0;
1538
+ usage.reasoningTokens = reasoningTokens;
556
1539
  llmgatewayUsage.completionTokensDetails = {
557
- reasoningTokens: (_b2 = value.usage.completion_tokens_details.reasoning_tokens) != null ? _b2 : 0
1540
+ reasoningTokens
558
1541
  };
559
1542
  }
560
1543
  llmgatewayUsage.cost = value.usage.cost;
@@ -568,51 +1551,39 @@ var LLMGatewayChatLanguageModel = class {
568
1551
  return;
569
1552
  }
570
1553
  const delta = choice.delta;
571
- if (delta.content != null) {
572
- controller.enqueue({
573
- type: "text-delta",
574
- textDelta: delta.content
575
- });
576
- }
577
- if (delta.reasoning != null) {
1554
+ const emitReasoningChunk = (chunkText) => {
1555
+ if (!reasoningStarted) {
1556
+ reasoningId = llmgatewayResponseId || generateId();
1557
+ controller.enqueue({
1558
+ type: "reasoning-start",
1559
+ id: reasoningId
1560
+ });
1561
+ reasoningStarted = true;
1562
+ }
578
1563
  controller.enqueue({
579
- type: "reasoning",
580
- textDelta: delta.reasoning
1564
+ type: "reasoning-delta",
1565
+ delta: chunkText,
1566
+ id: reasoningId || generateId()
581
1567
  });
582
- }
1568
+ };
583
1569
  if (delta.reasoning_details && delta.reasoning_details.length > 0) {
584
1570
  for (const detail of delta.reasoning_details) {
585
1571
  switch (detail.type) {
586
1572
  case "reasoning.text" /* Text */: {
587
1573
  if (detail.text) {
588
- controller.enqueue({
589
- type: "reasoning",
590
- textDelta: detail.text
591
- });
592
- }
593
- if (detail.signature) {
594
- controller.enqueue({
595
- type: "reasoning-signature",
596
- signature: detail.signature
597
- });
1574
+ emitReasoningChunk(detail.text);
598
1575
  }
599
1576
  break;
600
1577
  }
601
1578
  case "reasoning.encrypted" /* Encrypted */: {
602
1579
  if (detail.data) {
603
- controller.enqueue({
604
- type: "redacted-reasoning",
605
- data: detail.data
606
- });
1580
+ emitReasoningChunk("[REDACTED]");
607
1581
  }
608
1582
  break;
609
1583
  }
610
1584
  case "reasoning.summary" /* Summary */: {
611
1585
  if (detail.summary) {
612
- controller.enqueue({
613
- type: "reasoning",
614
- textDelta: detail.summary
615
- });
1586
+ emitReasoningChunk(detail.summary);
616
1587
  }
617
1588
  break;
618
1589
  }
@@ -622,19 +1593,27 @@ var LLMGatewayChatLanguageModel = class {
622
1593
  }
623
1594
  }
624
1595
  }
1596
+ } else if (delta.reasoning != null) {
1597
+ emitReasoningChunk(delta.reasoning);
625
1598
  }
626
- const mappedLogprobs = mapLLMGatewayChatLogProbsOutput(
627
- choice == null ? void 0 : choice.logprobs
628
- );
629
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
630
- if (logprobs === void 0) {
631
- logprobs = [];
1599
+ if (delta.content != null) {
1600
+ if (!textStarted) {
1601
+ textId = llmgatewayResponseId || generateId();
1602
+ controller.enqueue({
1603
+ type: "text-start",
1604
+ id: textId
1605
+ });
1606
+ textStarted = true;
632
1607
  }
633
- logprobs.push(...mappedLogprobs);
1608
+ controller.enqueue({
1609
+ type: "text-delta",
1610
+ delta: delta.content,
1611
+ id: textId || generateId()
1612
+ });
634
1613
  }
635
1614
  if (delta.tool_calls != null) {
636
1615
  for (const toolCallDelta of delta.tool_calls) {
637
- const index = toolCallDelta.index;
1616
+ const index = (_c = toolCallDelta.index) != null ? _c : toolCalls.length - 1;
638
1617
  if (toolCalls[index] == null) {
639
1618
  if (toolCallDelta.type !== "function") {
640
1619
  throw new InvalidResponseDataError({
@@ -648,7 +1627,7 @@ var LLMGatewayChatLanguageModel = class {
648
1627
  message: `Expected 'id' to be a string.`
649
1628
  });
650
1629
  }
651
- if (((_c2 = toolCallDelta.function) == null ? void 0 : _c2.name) == null) {
1630
+ if (((_d = toolCallDelta.function) == null ? void 0 : _d.name) == null) {
652
1631
  throw new InvalidResponseDataError({
653
1632
  data: toolCallDelta,
654
1633
  message: `Expected 'function.name' to be a string.`
@@ -659,28 +1638,36 @@ var LLMGatewayChatLanguageModel = class {
659
1638
  type: "function",
660
1639
  function: {
661
1640
  name: toolCallDelta.function.name,
662
- arguments: (_d = toolCallDelta.function.arguments) != null ? _d : ""
1641
+ arguments: (_e = toolCallDelta.function.arguments) != null ? _e : ""
663
1642
  },
1643
+ inputStarted: false,
664
1644
  sent: false
665
1645
  };
666
1646
  const toolCall2 = toolCalls[index];
667
1647
  if (toolCall2 == null) {
668
1648
  throw new Error("Tool call is missing");
669
1649
  }
670
- if (((_e = toolCall2.function) == null ? void 0 : _e.name) != null && ((_f = toolCall2.function) == null ? void 0 : _f.arguments) != null && isParsableJson(toolCall2.function.arguments)) {
1650
+ if (((_f = toolCall2.function) == null ? void 0 : _f.name) != null && ((_g = toolCall2.function) == null ? void 0 : _g.arguments) != null && isParsableJson(toolCall2.function.arguments)) {
1651
+ toolCall2.inputStarted = true;
671
1652
  controller.enqueue({
672
- type: "tool-call-delta",
673
- toolCallType: "function",
674
- toolCallId: toolCall2.id,
675
- toolName: toolCall2.function.name,
676
- argsTextDelta: toolCall2.function.arguments
1653
+ type: "tool-input-start",
1654
+ id: toolCall2.id,
1655
+ toolName: toolCall2.function.name
1656
+ });
1657
+ controller.enqueue({
1658
+ type: "tool-input-delta",
1659
+ id: toolCall2.id,
1660
+ delta: toolCall2.function.arguments
1661
+ });
1662
+ controller.enqueue({
1663
+ type: "tool-input-end",
1664
+ id: toolCall2.id
677
1665
  });
678
1666
  controller.enqueue({
679
1667
  type: "tool-call",
680
- toolCallType: "function",
681
- toolCallId: (_g = toolCall2.id) != null ? _g : generateId(),
1668
+ toolCallId: toolCall2.id,
682
1669
  toolName: toolCall2.function.name,
683
- args: toolCall2.function.arguments
1670
+ input: toolCall2.function.arguments
684
1671
  });
685
1672
  toolCall2.sent = true;
686
1673
  }
@@ -690,23 +1677,28 @@ var LLMGatewayChatLanguageModel = class {
690
1677
  if (toolCall == null) {
691
1678
  throw new Error("Tool call is missing");
692
1679
  }
1680
+ if (!toolCall.inputStarted) {
1681
+ toolCall.inputStarted = true;
1682
+ controller.enqueue({
1683
+ type: "tool-input-start",
1684
+ id: toolCall.id,
1685
+ toolName: toolCall.function.name
1686
+ });
1687
+ }
693
1688
  if (((_h = toolCallDelta.function) == null ? void 0 : _h.arguments) != null) {
694
1689
  toolCall.function.arguments += (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : "";
695
1690
  }
696
1691
  controller.enqueue({
697
- type: "tool-call-delta",
698
- toolCallType: "function",
699
- toolCallId: toolCall.id,
700
- toolName: toolCall.function.name,
701
- argsTextDelta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
1692
+ type: "tool-input-delta",
1693
+ id: toolCall.id,
1694
+ delta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
702
1695
  });
703
1696
  if (((_l = toolCall.function) == null ? void 0 : _l.name) != null && ((_m = toolCall.function) == null ? void 0 : _m.arguments) != null && isParsableJson(toolCall.function.arguments)) {
704
1697
  controller.enqueue({
705
1698
  type: "tool-call",
706
- toolCallType: "function",
707
1699
  toolCallId: (_n = toolCall.id) != null ? _n : generateId(),
708
1700
  toolName: toolCall.function.name,
709
- args: toolCall.function.arguments
1701
+ input: toolCall.function.arguments
710
1702
  });
711
1703
  toolCall.sent = true;
712
1704
  }
@@ -714,206 +1706,54 @@ var LLMGatewayChatLanguageModel = class {
714
1706
  }
715
1707
  },
716
1708
  flush(controller) {
717
- var _a2;
1709
+ var _a16;
718
1710
  if (finishReason === "tool-calls") {
719
1711
  for (const toolCall of toolCalls) {
720
- if (!toolCall.sent) {
1712
+ if (toolCall && !toolCall.sent) {
721
1713
  controller.enqueue({
722
1714
  type: "tool-call",
723
- toolCallType: "function",
724
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
1715
+ toolCallId: (_a16 = toolCall.id) != null ? _a16 : generateId(),
725
1716
  toolName: toolCall.function.name,
726
1717
  // Coerce invalid arguments to an empty JSON object
727
- args: isParsableJson(toolCall.function.arguments) ? toolCall.function.arguments : "{}"
1718
+ input: isParsableJson(toolCall.function.arguments) ? toolCall.function.arguments : "{}"
728
1719
  });
729
1720
  toolCall.sent = true;
730
1721
  }
731
1722
  }
732
1723
  }
733
- const providerMetadata = {};
734
- if (shouldIncludeUsageAccounting && (llmgatewayUsage.totalTokens !== void 0 || llmgatewayUsage.cost !== void 0 || llmgatewayUsage.promptTokensDetails !== void 0 || llmgatewayUsage.completionTokensDetails !== void 0)) {
735
- providerMetadata.llmgateway = {
736
- usage: llmgatewayUsage
737
- };
1724
+ if (textStarted) {
1725
+ controller.enqueue({
1726
+ type: "text-end",
1727
+ id: textId || generateId()
1728
+ });
1729
+ }
1730
+ if (reasoningStarted) {
1731
+ controller.enqueue({
1732
+ type: "reasoning-end",
1733
+ id: reasoningId || generateId()
1734
+ });
738
1735
  }
739
- const hasProviderMetadata = Object.keys(providerMetadata).length > 0 && shouldIncludeUsageAccounting;
740
- controller.enqueue(__spreadValues({
1736
+ controller.enqueue({
741
1737
  type: "finish",
742
1738
  finishReason,
743
- logprobs,
744
- usage
745
- }, hasProviderMetadata ? { providerMetadata } : {}));
1739
+ usage,
1740
+ providerMetadata: {
1741
+ llmgateway: {
1742
+ usage: llmgatewayUsage
1743
+ }
1744
+ }
1745
+ });
746
1746
  }
747
1747
  })
748
1748
  ),
749
- rawCall: { rawPrompt, rawSettings },
750
- rawResponse: { headers: responseHeaders },
751
- warnings: []
1749
+ warnings: [],
1750
+ request: { body: args },
1751
+ response: { headers: responseHeaders }
752
1752
  };
753
1753
  }
754
1754
  };
755
- var LLMGatewayChatCompletionBaseResponseSchema = z3.object({
756
- id: z3.string().optional(),
757
- model: z3.string().optional(),
758
- usage: z3.object({
759
- prompt_tokens: z3.number(),
760
- prompt_tokens_details: z3.object({
761
- cached_tokens: z3.number()
762
- }).nullish(),
763
- completion_tokens: z3.number(),
764
- completion_tokens_details: z3.object({
765
- reasoning_tokens: z3.number()
766
- }).nullish(),
767
- total_tokens: z3.number(),
768
- cost: z3.number().optional()
769
- }).nullish()
770
- });
771
- var LLMGatewayNonStreamChatCompletionResponseSchema = LLMGatewayChatCompletionBaseResponseSchema.extend({
772
- choices: z3.array(
773
- z3.object({
774
- message: z3.object({
775
- role: z3.literal("assistant"),
776
- content: z3.string().nullable().optional(),
777
- reasoning: z3.string().nullable().optional(),
778
- reasoning_details: ReasoningDetailArraySchema.nullish(),
779
- tool_calls: z3.array(
780
- z3.object({
781
- id: z3.string().optional().nullable(),
782
- type: z3.literal("function"),
783
- function: z3.object({
784
- name: z3.string(),
785
- arguments: z3.string()
786
- })
787
- })
788
- ).optional()
789
- }),
790
- index: z3.number(),
791
- logprobs: z3.object({
792
- content: z3.array(
793
- z3.object({
794
- token: z3.string(),
795
- logprob: z3.number(),
796
- top_logprobs: z3.array(
797
- z3.object({
798
- token: z3.string(),
799
- logprob: z3.number()
800
- })
801
- )
802
- })
803
- ).nullable()
804
- }).nullable().optional(),
805
- finish_reason: z3.string().optional().nullable()
806
- })
807
- )
808
- });
809
- var LLMGatewayStreamChatCompletionChunkSchema = z3.union([
810
- LLMGatewayChatCompletionBaseResponseSchema.extend({
811
- choices: z3.array(
812
- z3.object({
813
- delta: z3.object({
814
- role: z3.enum(["assistant"]).optional(),
815
- content: z3.string().nullish(),
816
- reasoning: z3.string().nullish().optional(),
817
- reasoning_details: ReasoningDetailArraySchema.nullish(),
818
- tool_calls: z3.array(
819
- z3.object({
820
- index: z3.number(),
821
- id: z3.string().nullish(),
822
- type: z3.literal("function").optional(),
823
- function: z3.object({
824
- name: z3.string().nullish(),
825
- arguments: z3.string().nullish()
826
- })
827
- })
828
- ).nullish()
829
- }).nullish(),
830
- logprobs: z3.object({
831
- content: z3.array(
832
- z3.object({
833
- token: z3.string(),
834
- logprob: z3.number(),
835
- top_logprobs: z3.array(
836
- z3.object({
837
- token: z3.string(),
838
- logprob: z3.number()
839
- })
840
- )
841
- })
842
- ).nullable()
843
- }).nullish(),
844
- finish_reason: z3.string().nullable().optional(),
845
- index: z3.number()
846
- })
847
- )
848
- }),
849
- LLMGatewayErrorResponseSchema
850
- ]);
851
- function prepareToolsAndToolChoice(mode) {
852
- var _a;
853
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
854
- if (tools == null) {
855
- return { tools: void 0, tool_choice: void 0 };
856
- }
857
- const mappedTools = tools.map((tool) => {
858
- if (isFunctionTool(tool)) {
859
- return {
860
- type: "function",
861
- function: {
862
- name: tool.name,
863
- description: tool.description,
864
- parameters: tool.parameters
865
- }
866
- };
867
- }
868
- return {
869
- type: "function",
870
- function: {
871
- name: tool.name
872
- }
873
- };
874
- });
875
- const toolChoice = mode.toolChoice;
876
- if (toolChoice == null) {
877
- return { tools: mappedTools, tool_choice: void 0 };
878
- }
879
- const type = toolChoice.type;
880
- switch (type) {
881
- case "auto":
882
- case "none":
883
- case "required":
884
- return { tools: mappedTools, tool_choice: type };
885
- case "tool":
886
- return {
887
- tools: mappedTools,
888
- tool_choice: {
889
- type: "function",
890
- function: {
891
- name: toolChoice.toolName
892
- }
893
- }
894
- };
895
- default: {
896
- const _exhaustiveCheck = type;
897
- throw new Error(`Unsupported tool choice type: ${_exhaustiveCheck}`);
898
- }
899
- }
900
- }
901
-
902
- // src/llmgateway-completion-language-model.ts
903
- import { UnsupportedFunctionalityError as UnsupportedFunctionalityError3 } from "@ai-sdk/provider";
904
- import {
905
- combineHeaders as combineHeaders2,
906
- createEventSourceResponseHandler as createEventSourceResponseHandler2,
907
- createJsonResponseHandler as createJsonResponseHandler2,
908
- postJsonToApi as postJsonToApi2
909
- } from "@ai-sdk/provider-utils";
910
- import { z as z4 } from "zod";
911
1755
 
912
- // src/convert-to-llmgateway-completion-prompt.ts
913
- import {
914
- InvalidPromptError,
915
- UnsupportedFunctionalityError as UnsupportedFunctionalityError2
916
- } from "@ai-sdk/provider";
1756
+ // src/completion/convert-to-llmgateway-completion-prompt.ts
917
1757
  function convertToLLMGatewayCompletionPrompt({
918
1758
  prompt,
919
1759
  inputFormat,
@@ -934,7 +1774,7 @@ function convertToLLMGatewayCompletionPrompt({
934
1774
  switch (role) {
935
1775
  case "system": {
936
1776
  throw new InvalidPromptError({
937
- message: "Unexpected system message in prompt: ${content}",
1777
+ message: `Unexpected system message in prompt: ${content}`,
938
1778
  prompt
939
1779
  });
940
1780
  }
@@ -944,21 +1784,13 @@ function convertToLLMGatewayCompletionPrompt({
944
1784
  case "text": {
945
1785
  return part.text;
946
1786
  }
947
- case "image": {
948
- throw new UnsupportedFunctionalityError2({
949
- functionality: "images"
950
- });
951
- }
952
1787
  case "file": {
953
- throw new UnsupportedFunctionalityError2({
1788
+ throw new UnsupportedFunctionalityError({
954
1789
  functionality: "file attachments"
955
1790
  });
956
1791
  }
957
1792
  default: {
958
- const _exhaustiveCheck = part;
959
- throw new Error(
960
- `Unsupported content type: ${_exhaustiveCheck}`
961
- );
1793
+ return "";
962
1794
  }
963
1795
  }
964
1796
  }).join("");
@@ -969,39 +1801,38 @@ ${userMessage}
969
1801
  break;
970
1802
  }
971
1803
  case "assistant": {
972
- const assistantMessage = content.map((part) => {
973
- switch (part.type) {
974
- case "text": {
975
- return part.text;
976
- }
977
- case "tool-call": {
978
- throw new UnsupportedFunctionalityError2({
979
- functionality: "tool-call messages"
980
- });
981
- }
982
- case "reasoning": {
983
- throw new UnsupportedFunctionalityError2({
984
- functionality: "reasoning messages"
985
- });
986
- }
987
- case "redacted-reasoning": {
988
- throw new UnsupportedFunctionalityError2({
989
- functionality: "redacted reasoning messages"
990
- });
991
- }
992
- case "file": {
993
- throw new UnsupportedFunctionalityError2({
994
- functionality: "file attachments"
995
- });
996
- }
997
- default: {
998
- const _exhaustiveCheck = part;
999
- throw new Error(
1000
- `Unsupported content type: ${_exhaustiveCheck}`
1001
- );
1804
+ const assistantMessage = content.map(
1805
+ (part) => {
1806
+ switch (part.type) {
1807
+ case "text": {
1808
+ return part.text;
1809
+ }
1810
+ case "tool-call": {
1811
+ throw new UnsupportedFunctionalityError({
1812
+ functionality: "tool-call messages"
1813
+ });
1814
+ }
1815
+ case "tool-result": {
1816
+ throw new UnsupportedFunctionalityError({
1817
+ functionality: "tool-result messages"
1818
+ });
1819
+ }
1820
+ case "reasoning": {
1821
+ throw new UnsupportedFunctionalityError({
1822
+ functionality: "reasoning messages"
1823
+ });
1824
+ }
1825
+ case "file": {
1826
+ throw new UnsupportedFunctionalityError({
1827
+ functionality: "file attachments"
1828
+ });
1829
+ }
1830
+ default: {
1831
+ return "";
1832
+ }
1002
1833
  }
1003
1834
  }
1004
- }).join("");
1835
+ ).join("");
1005
1836
  text += `${assistant}:
1006
1837
  ${assistantMessage}
1007
1838
 
@@ -1009,13 +1840,12 @@ ${assistantMessage}
1009
1840
  break;
1010
1841
  }
1011
1842
  case "tool": {
1012
- throw new UnsupportedFunctionalityError2({
1843
+ throw new UnsupportedFunctionalityError({
1013
1844
  functionality: "tool messages"
1014
1845
  });
1015
1846
  }
1016
1847
  default: {
1017
- const _exhaustiveCheck = role;
1018
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1848
+ break;
1019
1849
  }
1020
1850
  }
1021
1851
  }
@@ -1026,40 +1856,63 @@ ${assistantMessage}
1026
1856
  };
1027
1857
  }
1028
1858
 
1029
- // src/map-llmgateway-completion-logprobs.ts
1030
- function mapLLMGatewayCompletionLogprobs(logprobs) {
1031
- return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => {
1032
- var _a, _b;
1033
- return {
1034
- token,
1035
- logprob: (_a = logprobs.token_logprobs[index]) != null ? _a : 0,
1036
- topLogprobs: logprobs.top_logprobs ? Object.entries((_b = logprobs.top_logprobs[index]) != null ? _b : {}).map(
1037
- ([token2, logprob]) => ({
1038
- token: token2,
1039
- logprob
1040
- })
1041
- ) : []
1042
- };
1043
- });
1044
- }
1859
+ // src/completion/schemas.ts
1860
+ import { z as z6 } from "zod/v4";
1861
+ var LLMGatewayCompletionChunkSchema = z6.union([
1862
+ z6.object({
1863
+ id: z6.string().optional(),
1864
+ model: z6.string().optional(),
1865
+ choices: z6.array(
1866
+ z6.object({
1867
+ text: z6.string(),
1868
+ reasoning: z6.string().nullish().optional(),
1869
+ reasoning_details: ReasoningDetailArraySchema.nullish(),
1870
+ finish_reason: z6.string().nullish(),
1871
+ index: z6.number().nullish(),
1872
+ logprobs: z6.object({
1873
+ tokens: z6.array(z6.string()),
1874
+ token_logprobs: z6.array(z6.number()),
1875
+ top_logprobs: z6.array(z6.record(z6.string(), z6.number())).nullable()
1876
+ }).nullable().optional()
1877
+ })
1878
+ ),
1879
+ usage: z6.object({
1880
+ prompt_tokens: z6.number(),
1881
+ prompt_tokens_details: z6.object({
1882
+ cached_tokens: z6.number()
1883
+ }).nullish(),
1884
+ completion_tokens: z6.number(),
1885
+ completion_tokens_details: z6.object({
1886
+ reasoning_tokens: z6.number()
1887
+ }).nullish(),
1888
+ total_tokens: z6.number(),
1889
+ cost: z6.number().optional()
1890
+ }).nullish()
1891
+ }),
1892
+ LLMGatewayErrorResponseSchema
1893
+ ]);
1045
1894
 
1046
- // src/llmgateway-completion-language-model.ts
1895
+ // src/completion/index.ts
1047
1896
  var LLMGatewayCompletionLanguageModel = class {
1048
1897
  constructor(modelId, settings, config) {
1049
- this.specificationVersion = "v1";
1898
+ this.specificationVersion = "v2";
1899
+ this.provider = "llmgateway";
1900
+ this.supportedUrls = {
1901
+ "image/*": [
1902
+ /^data:image\/[a-zA-Z]+;base64,/,
1903
+ /^https?:\/\/.+\.(jpg|jpeg|png|gif|webp)$/i
1904
+ ],
1905
+ "text/*": [/^data:text\//, /^https?:\/\/.+$/],
1906
+ "application/*": [/^data:application\//, /^https?:\/\/.+$/]
1907
+ };
1050
1908
  this.defaultObjectGenerationMode = void 0;
1051
1909
  this.modelId = modelId;
1052
1910
  this.settings = settings;
1053
1911
  this.config = config;
1054
1912
  }
1055
- get provider() {
1056
- return this.config.provider;
1057
- }
1058
1913
  getArgs({
1059
- mode,
1060
- inputFormat,
1061
1914
  prompt,
1062
- maxTokens,
1915
+ maxOutputTokens,
1063
1916
  temperature,
1064
1917
  topP,
1065
1918
  frequencyPenalty,
@@ -1068,16 +1921,24 @@ var LLMGatewayCompletionLanguageModel = class {
1068
1921
  responseFormat,
1069
1922
  topK,
1070
1923
  stopSequences,
1071
- providerMetadata
1924
+ tools,
1925
+ toolChoice
1072
1926
  }) {
1073
- var _a, _b;
1074
- const type = mode.type;
1075
- const extraCallingBody = (_a = providerMetadata == null ? void 0 : providerMetadata.llmgateway) != null ? _a : {};
1076
1927
  const { prompt: completionPrompt } = convertToLLMGatewayCompletionPrompt({
1077
1928
  prompt,
1078
- inputFormat
1929
+ inputFormat: "prompt"
1079
1930
  });
1080
- const baseArgs = __spreadValues(__spreadValues(__spreadValues({
1931
+ if (tools == null ? void 0 : tools.length) {
1932
+ throw new UnsupportedFunctionalityError({
1933
+ functionality: "tools"
1934
+ });
1935
+ }
1936
+ if (toolChoice) {
1937
+ throw new UnsupportedFunctionalityError({
1938
+ functionality: "toolChoice"
1939
+ });
1940
+ }
1941
+ return __spreadValues(__spreadValues({
1081
1942
  // model id:
1082
1943
  model: this.modelId,
1083
1944
  models: this.settings.models,
@@ -1087,7 +1948,7 @@ var LLMGatewayCompletionLanguageModel = class {
1087
1948
  suffix: this.settings.suffix,
1088
1949
  user: this.settings.user,
1089
1950
  // standardized settings:
1090
- max_tokens: maxTokens,
1951
+ max_tokens: maxOutputTokens,
1091
1952
  temperature,
1092
1953
  top_p: topP,
1093
1954
  frequency_penalty: frequencyPenalty,
@@ -1097,59 +1958,31 @@ var LLMGatewayCompletionLanguageModel = class {
1097
1958
  response_format: responseFormat,
1098
1959
  top_k: topK,
1099
1960
  // prompt:
1100
- prompt: completionPrompt
1101
- }, this.config.extraBody), this.settings.extraBody), extraCallingBody);
1102
- switch (type) {
1103
- case "regular": {
1104
- if ((_b = mode.tools) == null ? void 0 : _b.length) {
1105
- throw new UnsupportedFunctionalityError3({
1106
- functionality: "tools"
1107
- });
1108
- }
1109
- if (mode.toolChoice) {
1110
- throw new UnsupportedFunctionalityError3({
1111
- functionality: "toolChoice"
1112
- });
1113
- }
1114
- return baseArgs;
1115
- }
1116
- case "object-json": {
1117
- throw new UnsupportedFunctionalityError3({
1118
- functionality: "object-json mode"
1119
- });
1120
- }
1121
- case "object-tool": {
1122
- throw new UnsupportedFunctionalityError3({
1123
- functionality: "object-tool mode"
1124
- });
1125
- }
1126
- // Handle all non-text types with a single default case
1127
- default: {
1128
- const _exhaustiveCheck = type;
1129
- throw new UnsupportedFunctionalityError3({
1130
- functionality: `${_exhaustiveCheck} mode`
1131
- });
1132
- }
1133
- }
1961
+ prompt: completionPrompt,
1962
+ // LLMGateway specific settings:
1963
+ include_reasoning: this.settings.includeReasoning,
1964
+ reasoning: this.settings.reasoning
1965
+ }, this.config.extraBody), this.settings.extraBody);
1134
1966
  }
1135
1967
  async doGenerate(options) {
1136
- var _b, _c, _d, _e, _f;
1137
- const args = this.getArgs(options);
1138
- const { responseHeaders, value: response } = await postJsonToApi2({
1968
+ var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o;
1969
+ const providerOptions = options.providerOptions || {};
1970
+ const llmgatewayOptions = providerOptions.llmgateway || {};
1971
+ const args = __spreadValues(__spreadValues({}, this.getArgs(options)), llmgatewayOptions);
1972
+ const { value: response, responseHeaders } = await postJsonToApi({
1139
1973
  url: this.config.url({
1140
1974
  path: "/completions",
1141
1975
  modelId: this.modelId
1142
1976
  }),
1143
- headers: combineHeaders2(this.config.headers(), options.headers),
1977
+ headers: combineHeaders(this.config.headers(), options.headers),
1144
1978
  body: args,
1145
1979
  failedResponseHandler: llmgatewayFailedResponseHandler,
1146
- successfulResponseHandler: createJsonResponseHandler2(
1980
+ successfulResponseHandler: createJsonResponseHandler(
1147
1981
  LLMGatewayCompletionChunkSchema
1148
1982
  ),
1149
1983
  abortSignal: options.abortSignal,
1150
1984
  fetch: this.config.fetch
1151
1985
  });
1152
- const _a = args, { prompt: rawPrompt } = _a, rawSettings = __objRest(_a, ["prompt"]);
1153
1986
  if ("error" in response) {
1154
1987
  throw new Error(`${response.error.message}`);
1155
1988
  }
@@ -1158,54 +1991,62 @@ var LLMGatewayCompletionLanguageModel = class {
1158
1991
  throw new Error("No choice in LLMGateway completion response");
1159
1992
  }
1160
1993
  return {
1161
- response: {
1162
- id: response.id,
1163
- modelId: response.model
1164
- },
1165
- text: (_b = choice.text) != null ? _b : "",
1166
- reasoning: choice.reasoning || void 0,
1994
+ content: [
1995
+ {
1996
+ type: "text",
1997
+ text: (_a15 = choice.text) != null ? _a15 : ""
1998
+ }
1999
+ ],
2000
+ finishReason: mapLLMGatewayFinishReason(choice.finish_reason),
1167
2001
  usage: {
1168
- promptTokens: (_d = (_c = response.usage) == null ? void 0 : _c.prompt_tokens) != null ? _d : 0,
1169
- completionTokens: (_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens) != null ? _f : 0
2002
+ inputTokens: (_c = (_b = response.usage) == null ? void 0 : _b.prompt_tokens) != null ? _c : 0,
2003
+ outputTokens: (_e = (_d = response.usage) == null ? void 0 : _d.completion_tokens) != null ? _e : 0,
2004
+ totalTokens: ((_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : 0) + ((_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : 0),
2005
+ reasoningTokens: (_l = (_k = (_j = response.usage) == null ? void 0 : _j.completion_tokens_details) == null ? void 0 : _k.reasoning_tokens) != null ? _l : 0,
2006
+ cachedInputTokens: (_o = (_n = (_m = response.usage) == null ? void 0 : _m.prompt_tokens_details) == null ? void 0 : _n.cached_tokens) != null ? _o : 0
1170
2007
  },
1171
- finishReason: mapLLMGatewayFinishReason(choice.finish_reason),
1172
- logprobs: mapLLMGatewayCompletionLogprobs(choice.logprobs),
1173
- rawCall: { rawPrompt, rawSettings },
1174
- rawResponse: { headers: responseHeaders },
1175
- warnings: []
2008
+ warnings: [],
2009
+ response: {
2010
+ headers: responseHeaders
2011
+ }
1176
2012
  };
1177
2013
  }
1178
2014
  async doStream(options) {
1179
- const args = this.getArgs(options);
1180
- const { responseHeaders, value: response } = await postJsonToApi2({
2015
+ const providerOptions = options.providerOptions || {};
2016
+ const llmgatewayOptions = providerOptions.llmgateway || {};
2017
+ const args = __spreadValues(__spreadValues({}, this.getArgs(options)), llmgatewayOptions);
2018
+ const { value: response, responseHeaders } = await postJsonToApi({
1181
2019
  url: this.config.url({
1182
2020
  path: "/completions",
1183
2021
  modelId: this.modelId
1184
2022
  }),
1185
- headers: combineHeaders2(this.config.headers(), options.headers),
1186
- body: __spreadProps(__spreadValues({}, this.getArgs(options)), {
2023
+ headers: combineHeaders(this.config.headers(), options.headers),
2024
+ body: __spreadProps(__spreadValues({}, args), {
1187
2025
  stream: true,
1188
2026
  // only include stream_options when in strict compatibility mode:
1189
2027
  stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1190
2028
  }),
1191
2029
  failedResponseHandler: llmgatewayFailedResponseHandler,
1192
- successfulResponseHandler: createEventSourceResponseHandler2(
2030
+ successfulResponseHandler: createEventSourceResponseHandler(
1193
2031
  LLMGatewayCompletionChunkSchema
1194
2032
  ),
1195
2033
  abortSignal: options.abortSignal,
1196
2034
  fetch: this.config.fetch
1197
2035
  });
1198
- const _a = args, { prompt: rawPrompt } = _a, rawSettings = __objRest(_a, ["prompt"]);
1199
2036
  let finishReason = "other";
1200
- let usage = {
1201
- promptTokens: Number.NaN,
1202
- completionTokens: Number.NaN
2037
+ const usage = {
2038
+ inputTokens: Number.NaN,
2039
+ outputTokens: Number.NaN,
2040
+ totalTokens: Number.NaN,
2041
+ reasoningTokens: Number.NaN,
2042
+ cachedInputTokens: Number.NaN
1203
2043
  };
1204
- let logprobs;
2044
+ const llmgatewayUsage = {};
1205
2045
  return {
1206
2046
  stream: response.pipeThrough(
1207
2047
  new TransformStream({
1208
2048
  transform(chunk, controller) {
2049
+ var _a15, _b;
1209
2050
  if (!chunk.success) {
1210
2051
  finishReason = "error";
1211
2052
  controller.enqueue({ type: "error", error: chunk.error });
@@ -1218,10 +2059,27 @@ var LLMGatewayCompletionLanguageModel = class {
1218
2059
  return;
1219
2060
  }
1220
2061
  if (value.usage != null) {
1221
- usage = {
1222
- promptTokens: value.usage.prompt_tokens,
1223
- completionTokens: value.usage.completion_tokens
1224
- };
2062
+ usage.inputTokens = value.usage.prompt_tokens;
2063
+ usage.outputTokens = value.usage.completion_tokens;
2064
+ usage.totalTokens = value.usage.prompt_tokens + value.usage.completion_tokens;
2065
+ llmgatewayUsage.promptTokens = value.usage.prompt_tokens;
2066
+ if (value.usage.prompt_tokens_details) {
2067
+ const cachedInputTokens = (_a15 = value.usage.prompt_tokens_details.cached_tokens) != null ? _a15 : 0;
2068
+ usage.cachedInputTokens = cachedInputTokens;
2069
+ llmgatewayUsage.promptTokensDetails = {
2070
+ cachedTokens: cachedInputTokens
2071
+ };
2072
+ }
2073
+ llmgatewayUsage.completionTokens = value.usage.completion_tokens;
2074
+ if (value.usage.completion_tokens_details) {
2075
+ const reasoningTokens = (_b = value.usage.completion_tokens_details.reasoning_tokens) != null ? _b : 0;
2076
+ usage.reasoningTokens = reasoningTokens;
2077
+ llmgatewayUsage.completionTokensDetails = {
2078
+ reasoningTokens
2079
+ };
2080
+ }
2081
+ llmgatewayUsage.cost = value.usage.cost;
2082
+ llmgatewayUsage.totalTokens = value.usage.total_tokens;
1225
2083
  }
1226
2084
  const choice = value.choices[0];
1227
2085
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1230,60 +2088,31 @@ var LLMGatewayCompletionLanguageModel = class {
1230
2088
  if ((choice == null ? void 0 : choice.text) != null) {
1231
2089
  controller.enqueue({
1232
2090
  type: "text-delta",
1233
- textDelta: choice.text
2091
+ delta: choice.text,
2092
+ id: generateId()
1234
2093
  });
1235
2094
  }
1236
- const mappedLogprobs = mapLLMGatewayCompletionLogprobs(
1237
- choice == null ? void 0 : choice.logprobs
1238
- );
1239
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1240
- if (logprobs === void 0) {
1241
- logprobs = [];
1242
- }
1243
- logprobs.push(...mappedLogprobs);
1244
- }
1245
2095
  },
1246
2096
  flush(controller) {
1247
2097
  controller.enqueue({
1248
2098
  type: "finish",
1249
2099
  finishReason,
1250
- logprobs,
1251
- usage
2100
+ usage,
2101
+ providerMetadata: {
2102
+ llmgateway: {
2103
+ usage: llmgatewayUsage
2104
+ }
2105
+ }
1252
2106
  });
1253
2107
  }
1254
2108
  })
1255
2109
  ),
1256
- rawCall: { rawPrompt, rawSettings },
1257
- rawResponse: { headers: responseHeaders },
1258
- warnings: []
2110
+ response: {
2111
+ headers: responseHeaders
2112
+ }
1259
2113
  };
1260
2114
  }
1261
2115
  };
1262
- var LLMGatewayCompletionChunkSchema = z4.union([
1263
- z4.object({
1264
- id: z4.string().optional(),
1265
- model: z4.string().optional(),
1266
- choices: z4.array(
1267
- z4.object({
1268
- text: z4.string(),
1269
- reasoning: z4.string().nullish().optional(),
1270
- reasoning_details: ReasoningDetailArraySchema.nullish(),
1271
- finish_reason: z4.string().nullish(),
1272
- index: z4.number(),
1273
- logprobs: z4.object({
1274
- tokens: z4.array(z4.string()),
1275
- token_logprobs: z4.array(z4.number()),
1276
- top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
1277
- }).nullable().optional()
1278
- })
1279
- ),
1280
- usage: z4.object({
1281
- prompt_tokens: z4.number(),
1282
- completion_tokens: z4.number()
1283
- }).optional().nullable()
1284
- }),
1285
- LLMGatewayErrorResponseSchema
1286
- ]);
1287
2116
  export {
1288
2117
  LLMGatewayChatLanguageModel,
1289
2118
  LLMGatewayCompletionLanguageModel