@llmgateway/ai-sdk-provider 1.0.3 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -17,24 +17,876 @@ var __spreadValues = (a, b) => {
17
17
  return a;
18
18
  };
19
19
  var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b));
20
- var __objRest = (source, exclude) => {
21
- var target = {};
22
- for (var prop in source)
23
- if (__hasOwnProp.call(source, prop) && exclude.indexOf(prop) < 0)
24
- target[prop] = source[prop];
25
- if (source != null && __getOwnPropSymbols)
26
- for (var prop of __getOwnPropSymbols(source)) {
27
- if (exclude.indexOf(prop) < 0 && __propIsEnum.call(source, prop))
28
- target[prop] = source[prop];
20
+
21
+ // node_modules/.pnpm/@ai-sdk+provider@2.0.0-beta.1/node_modules/@ai-sdk/provider/dist/index.mjs
22
+ var marker = "vercel.ai.error";
23
+ var symbol = Symbol.for(marker);
24
+ var _a;
25
+ var _AISDKError = class _AISDKError2 extends Error {
26
+ /**
27
+ * Creates an AI SDK Error.
28
+ *
29
+ * @param {Object} params - The parameters for creating the error.
30
+ * @param {string} params.name - The name of the error.
31
+ * @param {string} params.message - The error message.
32
+ * @param {unknown} [params.cause] - The underlying cause of the error.
33
+ */
34
+ constructor({
35
+ name: name14,
36
+ message,
37
+ cause
38
+ }) {
39
+ super(message);
40
+ this[_a] = true;
41
+ this.name = name14;
42
+ this.cause = cause;
43
+ }
44
+ /**
45
+ * Checks if the given error is an AI SDK Error.
46
+ * @param {unknown} error - The error to check.
47
+ * @returns {boolean} True if the error is an AI SDK Error, false otherwise.
48
+ */
49
+ static isInstance(error) {
50
+ return _AISDKError2.hasMarker(error, marker);
51
+ }
52
+ static hasMarker(error, marker15) {
53
+ const markerSymbol = Symbol.for(marker15);
54
+ return error != null && typeof error === "object" && markerSymbol in error && typeof error[markerSymbol] === "boolean" && error[markerSymbol] === true;
55
+ }
56
+ };
57
+ _a = symbol;
58
+ var AISDKError = _AISDKError;
59
+ var name = "AI_APICallError";
60
+ var marker2 = `vercel.ai.error.${name}`;
61
+ var symbol2 = Symbol.for(marker2);
62
+ var _a2;
63
+ var APICallError = class extends AISDKError {
64
+ constructor({
65
+ message,
66
+ url,
67
+ requestBodyValues,
68
+ statusCode,
69
+ responseHeaders,
70
+ responseBody,
71
+ cause,
72
+ isRetryable = statusCode != null && (statusCode === 408 || // request timeout
73
+ statusCode === 409 || // conflict
74
+ statusCode === 429 || // too many requests
75
+ statusCode >= 500),
76
+ // server error
77
+ data
78
+ }) {
79
+ super({ name, message, cause });
80
+ this[_a2] = true;
81
+ this.url = url;
82
+ this.requestBodyValues = requestBodyValues;
83
+ this.statusCode = statusCode;
84
+ this.responseHeaders = responseHeaders;
85
+ this.responseBody = responseBody;
86
+ this.isRetryable = isRetryable;
87
+ this.data = data;
88
+ }
89
+ static isInstance(error) {
90
+ return AISDKError.hasMarker(error, marker2);
91
+ }
92
+ };
93
+ _a2 = symbol2;
94
+ var name2 = "AI_EmptyResponseBodyError";
95
+ var marker3 = `vercel.ai.error.${name2}`;
96
+ var symbol3 = Symbol.for(marker3);
97
+ var _a3;
98
+ var EmptyResponseBodyError = class extends AISDKError {
99
+ // used in isInstance
100
+ constructor({ message = "Empty response body" } = {}) {
101
+ super({ name: name2, message });
102
+ this[_a3] = true;
103
+ }
104
+ static isInstance(error) {
105
+ return AISDKError.hasMarker(error, marker3);
106
+ }
107
+ };
108
+ _a3 = symbol3;
109
+ function getErrorMessage(error) {
110
+ if (error == null) {
111
+ return "unknown error";
112
+ }
113
+ if (typeof error === "string") {
114
+ return error;
115
+ }
116
+ if (error instanceof Error) {
117
+ return error.message;
118
+ }
119
+ return JSON.stringify(error);
120
+ }
121
+ var name3 = "AI_InvalidArgumentError";
122
+ var marker4 = `vercel.ai.error.${name3}`;
123
+ var symbol4 = Symbol.for(marker4);
124
+ var _a4;
125
+ var InvalidArgumentError = class extends AISDKError {
126
+ constructor({
127
+ message,
128
+ cause,
129
+ argument
130
+ }) {
131
+ super({ name: name3, message, cause });
132
+ this[_a4] = true;
133
+ this.argument = argument;
134
+ }
135
+ static isInstance(error) {
136
+ return AISDKError.hasMarker(error, marker4);
137
+ }
138
+ };
139
+ _a4 = symbol4;
140
+ var name4 = "AI_InvalidPromptError";
141
+ var marker5 = `vercel.ai.error.${name4}`;
142
+ var symbol5 = Symbol.for(marker5);
143
+ var _a5;
144
+ var InvalidPromptError = class extends AISDKError {
145
+ constructor({
146
+ prompt,
147
+ message,
148
+ cause
149
+ }) {
150
+ super({ name: name4, message: `Invalid prompt: ${message}`, cause });
151
+ this[_a5] = true;
152
+ this.prompt = prompt;
153
+ }
154
+ static isInstance(error) {
155
+ return AISDKError.hasMarker(error, marker5);
156
+ }
157
+ };
158
+ _a5 = symbol5;
159
+ var name5 = "AI_InvalidResponseDataError";
160
+ var marker6 = `vercel.ai.error.${name5}`;
161
+ var symbol6 = Symbol.for(marker6);
162
+ var _a6;
163
+ var InvalidResponseDataError = class extends AISDKError {
164
+ constructor({
165
+ data,
166
+ message = `Invalid response data: ${JSON.stringify(data)}.`
167
+ }) {
168
+ super({ name: name5, message });
169
+ this[_a6] = true;
170
+ this.data = data;
171
+ }
172
+ static isInstance(error) {
173
+ return AISDKError.hasMarker(error, marker6);
174
+ }
175
+ };
176
+ _a6 = symbol6;
177
+ var name6 = "AI_JSONParseError";
178
+ var marker7 = `vercel.ai.error.${name6}`;
179
+ var symbol7 = Symbol.for(marker7);
180
+ var _a7;
181
+ var JSONParseError = class extends AISDKError {
182
+ constructor({ text, cause }) {
183
+ super({
184
+ name: name6,
185
+ message: `JSON parsing failed: Text: ${text}.
186
+ Error message: ${getErrorMessage(cause)}`,
187
+ cause
188
+ });
189
+ this[_a7] = true;
190
+ this.text = text;
191
+ }
192
+ static isInstance(error) {
193
+ return AISDKError.hasMarker(error, marker7);
194
+ }
195
+ };
196
+ _a7 = symbol7;
197
+ var name7 = "AI_LoadAPIKeyError";
198
+ var marker8 = `vercel.ai.error.${name7}`;
199
+ var symbol8 = Symbol.for(marker8);
200
+ var _a8;
201
+ var LoadAPIKeyError = class extends AISDKError {
202
+ // used in isInstance
203
+ constructor({ message }) {
204
+ super({ name: name7, message });
205
+ this[_a8] = true;
206
+ }
207
+ static isInstance(error) {
208
+ return AISDKError.hasMarker(error, marker8);
209
+ }
210
+ };
211
+ _a8 = symbol8;
212
+ var name8 = "AI_LoadSettingError";
213
+ var marker9 = `vercel.ai.error.${name8}`;
214
+ var symbol9 = Symbol.for(marker9);
215
+ var _a9;
216
+ _a9 = symbol9;
217
+ var name9 = "AI_NoContentGeneratedError";
218
+ var marker10 = `vercel.ai.error.${name9}`;
219
+ var symbol10 = Symbol.for(marker10);
220
+ var _a10;
221
+ _a10 = symbol10;
222
+ var name10 = "AI_NoSuchModelError";
223
+ var marker11 = `vercel.ai.error.${name10}`;
224
+ var symbol11 = Symbol.for(marker11);
225
+ var _a11;
226
+ _a11 = symbol11;
227
+ var name11 = "AI_TooManyEmbeddingValuesForCallError";
228
+ var marker12 = `vercel.ai.error.${name11}`;
229
+ var symbol12 = Symbol.for(marker12);
230
+ var _a12;
231
+ _a12 = symbol12;
232
+ var name12 = "AI_TypeValidationError";
233
+ var marker13 = `vercel.ai.error.${name12}`;
234
+ var symbol13 = Symbol.for(marker13);
235
+ var _a13;
236
+ var _TypeValidationError = class _TypeValidationError2 extends AISDKError {
237
+ constructor({ value, cause }) {
238
+ super({
239
+ name: name12,
240
+ message: `Type validation failed: Value: ${JSON.stringify(value)}.
241
+ Error message: ${getErrorMessage(cause)}`,
242
+ cause
243
+ });
244
+ this[_a13] = true;
245
+ this.value = value;
246
+ }
247
+ static isInstance(error) {
248
+ return AISDKError.hasMarker(error, marker13);
249
+ }
250
+ /**
251
+ * Wraps an error into a TypeValidationError.
252
+ * If the cause is already a TypeValidationError with the same value, it returns the cause.
253
+ * Otherwise, it creates a new TypeValidationError.
254
+ *
255
+ * @param {Object} params - The parameters for wrapping the error.
256
+ * @param {unknown} params.value - The value that failed validation.
257
+ * @param {unknown} params.cause - The original error or cause of the validation failure.
258
+ * @returns {TypeValidationError} A TypeValidationError instance.
259
+ */
260
+ static wrap({
261
+ value,
262
+ cause
263
+ }) {
264
+ return _TypeValidationError2.isInstance(cause) && cause.value === value ? cause : new _TypeValidationError2({ value, cause });
265
+ }
266
+ };
267
+ _a13 = symbol13;
268
+ var TypeValidationError = _TypeValidationError;
269
+ var name13 = "AI_UnsupportedFunctionalityError";
270
+ var marker14 = `vercel.ai.error.${name13}`;
271
+ var symbol14 = Symbol.for(marker14);
272
+ var _a14;
273
+ var UnsupportedFunctionalityError = class extends AISDKError {
274
+ constructor({
275
+ functionality,
276
+ message = `'${functionality}' functionality not supported.`
277
+ }) {
278
+ super({ name: name13, message });
279
+ this[_a14] = true;
280
+ this.functionality = functionality;
281
+ }
282
+ static isInstance(error) {
283
+ return AISDKError.hasMarker(error, marker14);
284
+ }
285
+ };
286
+ _a14 = symbol14;
287
+
288
+ // node_modules/.pnpm/eventsource-parser@3.0.3/node_modules/eventsource-parser/dist/index.js
289
+ var ParseError = class extends Error {
290
+ constructor(message, options) {
291
+ super(message), this.name = "ParseError", this.type = options.type, this.field = options.field, this.value = options.value, this.line = options.line;
292
+ }
293
+ };
294
+ function noop(_arg) {
295
+ }
296
+ function createParser(callbacks) {
297
+ if (typeof callbacks == "function")
298
+ throw new TypeError(
299
+ "`callbacks` must be an object, got a function instead. Did you mean `{onEvent: fn}`?"
300
+ );
301
+ const { onEvent = noop, onError = noop, onRetry = noop, onComment } = callbacks;
302
+ let incompleteLine = "", isFirstChunk = true, id, data = "", eventType = "";
303
+ function feed(newChunk) {
304
+ const chunk = isFirstChunk ? newChunk.replace(/^\xEF\xBB\xBF/, "") : newChunk, [complete, incomplete] = splitLines(`${incompleteLine}${chunk}`);
305
+ for (const line of complete)
306
+ parseLine(line);
307
+ incompleteLine = incomplete, isFirstChunk = false;
308
+ }
309
+ function parseLine(line) {
310
+ if (line === "") {
311
+ dispatchEvent();
312
+ return;
313
+ }
314
+ if (line.startsWith(":")) {
315
+ onComment && onComment(line.slice(line.startsWith(": ") ? 2 : 1));
316
+ return;
317
+ }
318
+ const fieldSeparatorIndex = line.indexOf(":");
319
+ if (fieldSeparatorIndex !== -1) {
320
+ const field = line.slice(0, fieldSeparatorIndex), offset = line[fieldSeparatorIndex + 1] === " " ? 2 : 1, value = line.slice(fieldSeparatorIndex + offset);
321
+ processField(field, value, line);
322
+ return;
29
323
  }
30
- return target;
324
+ processField(line, "", line);
325
+ }
326
+ function processField(field, value, line) {
327
+ switch (field) {
328
+ case "event":
329
+ eventType = value;
330
+ break;
331
+ case "data":
332
+ data = `${data}${value}
333
+ `;
334
+ break;
335
+ case "id":
336
+ id = value.includes("\0") ? void 0 : value;
337
+ break;
338
+ case "retry":
339
+ /^\d+$/.test(value) ? onRetry(parseInt(value, 10)) : onError(
340
+ new ParseError(`Invalid \`retry\` value: "${value}"`, {
341
+ type: "invalid-retry",
342
+ value,
343
+ line
344
+ })
345
+ );
346
+ break;
347
+ default:
348
+ onError(
349
+ new ParseError(
350
+ `Unknown field "${field.length > 20 ? `${field.slice(0, 20)}\u2026` : field}"`,
351
+ { type: "unknown-field", field, value, line }
352
+ )
353
+ );
354
+ break;
355
+ }
356
+ }
357
+ function dispatchEvent() {
358
+ data.length > 0 && onEvent({
359
+ id,
360
+ event: eventType || void 0,
361
+ // If the data buffer's last character is a U+000A LINE FEED (LF) character,
362
+ // then remove the last character from the data buffer.
363
+ data: data.endsWith(`
364
+ `) ? data.slice(0, -1) : data
365
+ }), id = void 0, data = "", eventType = "";
366
+ }
367
+ function reset(options = {}) {
368
+ incompleteLine && options.consume && parseLine(incompleteLine), isFirstChunk = true, id = void 0, data = "", eventType = "", incompleteLine = "";
369
+ }
370
+ return { feed, reset };
371
+ }
372
+ function splitLines(chunk) {
373
+ const lines = [];
374
+ let incompleteLine = "", searchIndex = 0;
375
+ for (; searchIndex < chunk.length; ) {
376
+ const crIndex = chunk.indexOf("\r", searchIndex), lfIndex = chunk.indexOf(`
377
+ `, searchIndex);
378
+ let lineEnd = -1;
379
+ if (crIndex !== -1 && lfIndex !== -1 ? lineEnd = Math.min(crIndex, lfIndex) : crIndex !== -1 ? lineEnd = crIndex : lfIndex !== -1 && (lineEnd = lfIndex), lineEnd === -1) {
380
+ incompleteLine = chunk.slice(searchIndex);
381
+ break;
382
+ } else {
383
+ const line = chunk.slice(searchIndex, lineEnd);
384
+ lines.push(line), searchIndex = lineEnd + 1, chunk[searchIndex - 1] === "\r" && chunk[searchIndex] === `
385
+ ` && searchIndex++;
386
+ }
387
+ }
388
+ return [lines, incompleteLine];
389
+ }
390
+
391
+ // node_modules/.pnpm/eventsource-parser@3.0.3/node_modules/eventsource-parser/dist/stream.js
392
+ var EventSourceParserStream = class extends TransformStream {
393
+ constructor({ onError, onRetry, onComment } = {}) {
394
+ let parser;
395
+ super({
396
+ start(controller) {
397
+ parser = createParser({
398
+ onEvent: (event) => {
399
+ controller.enqueue(event);
400
+ },
401
+ onError(error) {
402
+ onError === "terminate" ? controller.error(error) : typeof onError == "function" && onError(error);
403
+ },
404
+ onRetry,
405
+ onComment
406
+ });
407
+ },
408
+ transform(chunk) {
409
+ parser.feed(chunk);
410
+ }
411
+ });
412
+ }
31
413
  };
32
414
 
33
- // src/llmgateway-facade.ts
34
- import { loadApiKey, withoutTrailingSlash } from "@ai-sdk/provider-utils";
415
+ // node_modules/.pnpm/@ai-sdk+provider-utils@3.0.0-beta.5_zod@3.25.34/node_modules/@ai-sdk/provider-utils/dist/index.mjs
416
+ import * as z4 from "zod/v4";
417
+
418
+ // node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/Options.js
419
+ var ignoreOverride = Symbol("Let zodToJsonSchema decide on which parser to use");
420
+
421
+ // node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/selectParser.js
422
+ import { ZodFirstPartyTypeKind as ZodFirstPartyTypeKind3 } from "zod";
423
+
424
+ // node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/parsers/array.js
425
+ import { ZodFirstPartyTypeKind } from "zod";
426
+
427
+ // node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/parsers/record.js
428
+ import { ZodFirstPartyTypeKind as ZodFirstPartyTypeKind2 } from "zod";
429
+
430
+ // node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/parsers/string.js
431
+ var ALPHA_NUMERIC = new Set("ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvxyz0123456789");
432
+
433
+ // node_modules/.pnpm/zod-to-json-schema@3.24.5_zod@3.25.34/node_modules/zod-to-json-schema/dist/esm/parsers/object.js
434
+ import { ZodOptional } from "zod";
435
+
436
+ // node_modules/.pnpm/@ai-sdk+provider-utils@3.0.0-beta.5_zod@3.25.34/node_modules/@ai-sdk/provider-utils/dist/index.mjs
437
+ function combineHeaders(...headers) {
438
+ return headers.reduce(
439
+ (combinedHeaders, currentHeaders) => __spreadValues(__spreadValues({}, combinedHeaders), currentHeaders != null ? currentHeaders : {}),
440
+ {}
441
+ );
442
+ }
443
+ function extractResponseHeaders(response) {
444
+ return Object.fromEntries([...response.headers]);
445
+ }
446
+ var createIdGenerator = ({
447
+ prefix,
448
+ size = 16,
449
+ alphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
450
+ separator = "-"
451
+ } = {}) => {
452
+ const generator = () => {
453
+ const alphabetLength = alphabet.length;
454
+ const chars = new Array(size);
455
+ for (let i = 0; i < size; i++) {
456
+ chars[i] = alphabet[Math.random() * alphabetLength | 0];
457
+ }
458
+ return chars.join("");
459
+ };
460
+ if (prefix == null) {
461
+ return generator;
462
+ }
463
+ if (alphabet.includes(separator)) {
464
+ throw new InvalidArgumentError({
465
+ argument: "separator",
466
+ message: `The separator "${separator}" must not be part of the alphabet "${alphabet}".`
467
+ });
468
+ }
469
+ return () => `${prefix}${separator}${generator()}`;
470
+ };
471
+ var generateId = createIdGenerator();
472
+ function isAbortError(error) {
473
+ return error instanceof Error && (error.name === "AbortError" || error.name === "ResponseAborted" || // Next.js
474
+ error.name === "TimeoutError");
475
+ }
476
+ var FETCH_FAILED_ERROR_MESSAGES = ["fetch failed", "failed to fetch"];
477
+ function handleFetchError({
478
+ error,
479
+ url,
480
+ requestBodyValues
481
+ }) {
482
+ if (isAbortError(error)) {
483
+ return error;
484
+ }
485
+ if (error instanceof TypeError && FETCH_FAILED_ERROR_MESSAGES.includes(error.message.toLowerCase())) {
486
+ const cause = error.cause;
487
+ if (cause != null) {
488
+ return new APICallError({
489
+ message: `Cannot connect to API: ${cause.message}`,
490
+ cause,
491
+ url,
492
+ requestBodyValues,
493
+ isRetryable: true
494
+ // retry when network error
495
+ });
496
+ }
497
+ }
498
+ return error;
499
+ }
500
+ function removeUndefinedEntries(record) {
501
+ return Object.fromEntries(
502
+ Object.entries(record).filter(([_key, value]) => value != null)
503
+ );
504
+ }
505
+ function loadApiKey({
506
+ apiKey,
507
+ environmentVariableName,
508
+ apiKeyParameterName = "apiKey",
509
+ description
510
+ }) {
511
+ if (typeof apiKey === "string") {
512
+ return apiKey;
513
+ }
514
+ if (apiKey != null) {
515
+ throw new LoadAPIKeyError({
516
+ message: `${description} API key must be a string.`
517
+ });
518
+ }
519
+ if (typeof process === "undefined") {
520
+ throw new LoadAPIKeyError({
521
+ message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter. Environment variables is not supported in this environment.`
522
+ });
523
+ }
524
+ apiKey = process.env[environmentVariableName];
525
+ if (apiKey == null) {
526
+ throw new LoadAPIKeyError({
527
+ message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter or the ${environmentVariableName} environment variable.`
528
+ });
529
+ }
530
+ if (typeof apiKey !== "string") {
531
+ throw new LoadAPIKeyError({
532
+ message: `${description} API key must be a string. The value of the ${environmentVariableName} environment variable is not a string.`
533
+ });
534
+ }
535
+ return apiKey;
536
+ }
537
+ var suspectProtoRx = /"__proto__"\s*:/;
538
+ var suspectConstructorRx = /"constructor"\s*:/;
539
+ function _parse(text) {
540
+ const obj = JSON.parse(text);
541
+ if (obj === null || typeof obj !== "object") {
542
+ return obj;
543
+ }
544
+ if (suspectProtoRx.test(text) === false && suspectConstructorRx.test(text) === false) {
545
+ return obj;
546
+ }
547
+ return filter(obj);
548
+ }
549
+ function filter(obj) {
550
+ let next = [obj];
551
+ while (next.length) {
552
+ const nodes = next;
553
+ next = [];
554
+ for (const node of nodes) {
555
+ if (Object.prototype.hasOwnProperty.call(node, "__proto__")) {
556
+ throw new SyntaxError("Object contains forbidden prototype property");
557
+ }
558
+ if (Object.prototype.hasOwnProperty.call(node, "constructor") && Object.prototype.hasOwnProperty.call(node.constructor, "prototype")) {
559
+ throw new SyntaxError("Object contains forbidden prototype property");
560
+ }
561
+ for (const key in node) {
562
+ const value = node[key];
563
+ if (value && typeof value === "object") {
564
+ next.push(value);
565
+ }
566
+ }
567
+ }
568
+ }
569
+ return obj;
570
+ }
571
+ function secureJsonParse(text) {
572
+ const { stackTraceLimit } = Error;
573
+ Error.stackTraceLimit = 0;
574
+ try {
575
+ return _parse(text);
576
+ } finally {
577
+ Error.stackTraceLimit = stackTraceLimit;
578
+ }
579
+ }
580
+ var validatorSymbol = Symbol.for("vercel.ai.validator");
581
+ function validator(validate) {
582
+ return { [validatorSymbol]: true, validate };
583
+ }
584
+ function isValidator(value) {
585
+ return typeof value === "object" && value !== null && validatorSymbol in value && value[validatorSymbol] === true && "validate" in value;
586
+ }
587
+ function asValidator(value) {
588
+ return isValidator(value) ? value : standardSchemaValidator(value);
589
+ }
590
+ function standardSchemaValidator(standardSchema) {
591
+ return validator(async (value) => {
592
+ const result = await standardSchema["~standard"].validate(value);
593
+ return result.issues == null ? { success: true, value: result.value } : {
594
+ success: false,
595
+ error: new TypeValidationError({
596
+ value,
597
+ cause: result.issues
598
+ })
599
+ };
600
+ });
601
+ }
602
+ async function validateTypes({
603
+ value,
604
+ schema
605
+ }) {
606
+ const result = await safeValidateTypes({ value, schema });
607
+ if (!result.success) {
608
+ throw TypeValidationError.wrap({ value, cause: result.error });
609
+ }
610
+ return result.value;
611
+ }
612
+ async function safeValidateTypes({
613
+ value,
614
+ schema
615
+ }) {
616
+ const validator2 = asValidator(schema);
617
+ try {
618
+ if (validator2.validate == null) {
619
+ return { success: true, value, rawValue: value };
620
+ }
621
+ const result = await validator2.validate(value);
622
+ if (result.success) {
623
+ return { success: true, value: result.value, rawValue: value };
624
+ }
625
+ return {
626
+ success: false,
627
+ error: TypeValidationError.wrap({ value, cause: result.error }),
628
+ rawValue: value
629
+ };
630
+ } catch (error) {
631
+ return {
632
+ success: false,
633
+ error: TypeValidationError.wrap({ value, cause: error }),
634
+ rawValue: value
635
+ };
636
+ }
637
+ }
638
+ async function parseJSON({
639
+ text,
640
+ schema
641
+ }) {
642
+ try {
643
+ const value = secureJsonParse(text);
644
+ if (schema == null) {
645
+ return value;
646
+ }
647
+ return validateTypes({ value, schema });
648
+ } catch (error) {
649
+ if (JSONParseError.isInstance(error) || TypeValidationError.isInstance(error)) {
650
+ throw error;
651
+ }
652
+ throw new JSONParseError({ text, cause: error });
653
+ }
654
+ }
655
+ async function safeParseJSON({
656
+ text,
657
+ schema
658
+ }) {
659
+ try {
660
+ const value = secureJsonParse(text);
661
+ if (schema == null) {
662
+ return { success: true, value, rawValue: value };
663
+ }
664
+ return await safeValidateTypes({ value, schema });
665
+ } catch (error) {
666
+ return {
667
+ success: false,
668
+ error: JSONParseError.isInstance(error) ? error : new JSONParseError({ text, cause: error }),
669
+ rawValue: void 0
670
+ };
671
+ }
672
+ }
673
+ function isParsableJson(input) {
674
+ try {
675
+ secureJsonParse(input);
676
+ return true;
677
+ } catch (e) {
678
+ return false;
679
+ }
680
+ }
681
+ function parseJsonEventStream({
682
+ stream,
683
+ schema
684
+ }) {
685
+ return stream.pipeThrough(new TextDecoderStream()).pipeThrough(new EventSourceParserStream()).pipeThrough(
686
+ new TransformStream({
687
+ async transform({ data }, controller) {
688
+ if (data === "[DONE]") {
689
+ return;
690
+ }
691
+ controller.enqueue(await safeParseJSON({ text: data, schema }));
692
+ }
693
+ })
694
+ );
695
+ }
696
+ var getOriginalFetch2 = () => globalThis.fetch;
697
+ var postJsonToApi = async ({
698
+ url,
699
+ headers,
700
+ body,
701
+ failedResponseHandler,
702
+ successfulResponseHandler,
703
+ abortSignal,
704
+ fetch
705
+ }) => postToApi({
706
+ url,
707
+ headers: __spreadValues({
708
+ "Content-Type": "application/json"
709
+ }, headers),
710
+ body: {
711
+ content: JSON.stringify(body),
712
+ values: body
713
+ },
714
+ failedResponseHandler,
715
+ successfulResponseHandler,
716
+ abortSignal,
717
+ fetch
718
+ });
719
+ var postToApi = async ({
720
+ url,
721
+ headers = {},
722
+ body,
723
+ successfulResponseHandler,
724
+ failedResponseHandler,
725
+ abortSignal,
726
+ fetch = getOriginalFetch2()
727
+ }) => {
728
+ try {
729
+ const response = await fetch(url, {
730
+ method: "POST",
731
+ headers: removeUndefinedEntries(headers),
732
+ body: body.content,
733
+ signal: abortSignal
734
+ });
735
+ const responseHeaders = extractResponseHeaders(response);
736
+ if (!response.ok) {
737
+ let errorInformation;
738
+ try {
739
+ errorInformation = await failedResponseHandler({
740
+ response,
741
+ url,
742
+ requestBodyValues: body.values
743
+ });
744
+ } catch (error) {
745
+ if (isAbortError(error) || APICallError.isInstance(error)) {
746
+ throw error;
747
+ }
748
+ throw new APICallError({
749
+ message: "Failed to process error response",
750
+ cause: error,
751
+ statusCode: response.status,
752
+ url,
753
+ responseHeaders,
754
+ requestBodyValues: body.values
755
+ });
756
+ }
757
+ throw errorInformation.value;
758
+ }
759
+ try {
760
+ return await successfulResponseHandler({
761
+ response,
762
+ url,
763
+ requestBodyValues: body.values
764
+ });
765
+ } catch (error) {
766
+ if (error instanceof Error) {
767
+ if (isAbortError(error) || APICallError.isInstance(error)) {
768
+ throw error;
769
+ }
770
+ }
771
+ throw new APICallError({
772
+ message: "Failed to process successful response",
773
+ cause: error,
774
+ statusCode: response.status,
775
+ url,
776
+ responseHeaders,
777
+ requestBodyValues: body.values
778
+ });
779
+ }
780
+ } catch (error) {
781
+ throw handleFetchError({ error, url, requestBodyValues: body.values });
782
+ }
783
+ };
784
+ var createJsonErrorResponseHandler = ({
785
+ errorSchema,
786
+ errorToMessage,
787
+ isRetryable
788
+ }) => async ({ response, url, requestBodyValues }) => {
789
+ const responseBody = await response.text();
790
+ const responseHeaders = extractResponseHeaders(response);
791
+ if (responseBody.trim() === "") {
792
+ return {
793
+ responseHeaders,
794
+ value: new APICallError({
795
+ message: response.statusText,
796
+ url,
797
+ requestBodyValues,
798
+ statusCode: response.status,
799
+ responseHeaders,
800
+ responseBody,
801
+ isRetryable: isRetryable == null ? void 0 : isRetryable(response)
802
+ })
803
+ };
804
+ }
805
+ try {
806
+ const parsedError = await parseJSON({
807
+ text: responseBody,
808
+ schema: errorSchema
809
+ });
810
+ return {
811
+ responseHeaders,
812
+ value: new APICallError({
813
+ message: errorToMessage(parsedError),
814
+ url,
815
+ requestBodyValues,
816
+ statusCode: response.status,
817
+ responseHeaders,
818
+ responseBody,
819
+ data: parsedError,
820
+ isRetryable: isRetryable == null ? void 0 : isRetryable(response, parsedError)
821
+ })
822
+ };
823
+ } catch (parseError) {
824
+ return {
825
+ responseHeaders,
826
+ value: new APICallError({
827
+ message: response.statusText,
828
+ url,
829
+ requestBodyValues,
830
+ statusCode: response.status,
831
+ responseHeaders,
832
+ responseBody,
833
+ isRetryable: isRetryable == null ? void 0 : isRetryable(response)
834
+ })
835
+ };
836
+ }
837
+ };
838
+ var createEventSourceResponseHandler = (chunkSchema) => async ({ response }) => {
839
+ const responseHeaders = extractResponseHeaders(response);
840
+ if (response.body == null) {
841
+ throw new EmptyResponseBodyError({});
842
+ }
843
+ return {
844
+ responseHeaders,
845
+ value: parseJsonEventStream({
846
+ stream: response.body,
847
+ schema: chunkSchema
848
+ })
849
+ };
850
+ };
851
+ var createJsonResponseHandler = (responseSchema) => async ({ response, url, requestBodyValues }) => {
852
+ const responseBody = await response.text();
853
+ const parsedResult = await safeParseJSON({
854
+ text: responseBody,
855
+ schema: responseSchema
856
+ });
857
+ const responseHeaders = extractResponseHeaders(response);
858
+ if (!parsedResult.success) {
859
+ throw new APICallError({
860
+ message: "Invalid JSON response",
861
+ cause: parsedResult.error,
862
+ statusCode: response.status,
863
+ responseHeaders,
864
+ responseBody,
865
+ url,
866
+ requestBodyValues
867
+ });
868
+ }
869
+ return {
870
+ responseHeaders,
871
+ value: parsedResult.value,
872
+ rawValue: parsedResult.rawValue
873
+ };
874
+ };
875
+ var schemaSymbol = Symbol.for("vercel.ai.schema");
876
+ var { btoa, atob } = globalThis;
877
+ function convertUint8ArrayToBase64(array) {
878
+ let latin1string = "";
879
+ for (let i = 0; i < array.length; i++) {
880
+ latin1string += String.fromCodePoint(array[i]);
881
+ }
882
+ return btoa(latin1string);
883
+ }
884
+ function withoutTrailingSlash(url) {
885
+ return url == null ? void 0 : url.replace(/\/$/, "");
886
+ }
35
887
 
36
888
  // src/schemas/reasoning-details.ts
37
- import { z } from "zod";
889
+ import { z } from "zod/v4";
38
890
  var ReasoningDetailSummarySchema = z.object({
39
891
  type: z.literal("reasoning.summary" /* Summary */),
40
892
  summary: z.string()
@@ -59,56 +911,112 @@ var ReasoningDetailsWithUnknownSchema = z.union([
59
911
  ]);
60
912
  var ReasoningDetailArraySchema = z.array(ReasoningDetailsWithUnknownSchema).transform((d) => d.filter((d2) => !!d2));
61
913
 
62
- // src/llmgateway-chat-language-model.ts
63
- import {
64
- InvalidResponseDataError,
65
- UnsupportedFunctionalityError
66
- } from "@ai-sdk/provider";
67
- import {
68
- combineHeaders,
69
- createEventSourceResponseHandler,
70
- createJsonResponseHandler,
71
- generateId,
72
- isParsableJson,
73
- postJsonToApi
74
- } from "@ai-sdk/provider-utils";
75
- import { z as z3 } from "zod";
914
+ // src/schemas/error-response.ts
915
+ import { z as z2 } from "zod/v4";
916
+ var LLMGatewayErrorResponseSchema = z2.object({
917
+ error: z2.object({
918
+ code: z2.union([z2.string(), z2.number()]).nullable().optional().default(null),
919
+ message: z2.string(),
920
+ type: z2.string().nullable().optional().default(null),
921
+ param: z2.any().nullable().optional().default(null)
922
+ })
923
+ });
924
+ var llmgatewayFailedResponseHandler = createJsonErrorResponseHandler({
925
+ errorSchema: LLMGatewayErrorResponseSchema,
926
+ errorToMessage: (data) => data.error.message
927
+ });
928
+
929
+ // src/utils/map-finish-reason.ts
930
+ function mapLLMGatewayFinishReason(finishReason) {
931
+ switch (finishReason) {
932
+ case "stop":
933
+ return "stop";
934
+ case "length":
935
+ return "length";
936
+ case "content_filter":
937
+ return "content-filter";
938
+ case "function_call":
939
+ case "tool_calls":
940
+ return "tool-calls";
941
+ default:
942
+ return "unknown";
943
+ }
944
+ }
945
+
946
+ // src/chat/is-url.ts
947
+ function isUrl({
948
+ url,
949
+ protocols
950
+ }) {
951
+ try {
952
+ const urlObj = new URL(url);
953
+ return protocols.has(urlObj.protocol);
954
+ } catch (_) {
955
+ return false;
956
+ }
957
+ }
958
+
959
+ // src/chat/file-url-utils.ts
960
+ function getFileUrl({
961
+ part,
962
+ defaultMediaType
963
+ }) {
964
+ var _a15, _b;
965
+ if (part.data instanceof Uint8Array) {
966
+ const base64 = convertUint8ArrayToBase64(part.data);
967
+ return `data:${(_a15 = part.mediaType) != null ? _a15 : defaultMediaType};base64,${base64}`;
968
+ }
969
+ const stringUrl = part.data.toString();
970
+ if (isUrl({
971
+ url: stringUrl,
972
+ protocols: /* @__PURE__ */ new Set(["http:", "https:"])
973
+ })) {
974
+ return stringUrl;
975
+ }
976
+ return stringUrl.startsWith("data:") ? stringUrl : `data:${(_b = part.mediaType) != null ? _b : defaultMediaType};base64,${stringUrl}`;
977
+ }
76
978
 
77
- // src/convert-to-llmgateway-chat-messages.ts
78
- import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
979
+ // src/chat/convert-to-llmgateway-chat-messages.ts
79
980
  function getCacheControl(providerMetadata) {
80
- var _a, _b, _c;
981
+ var _a15, _b, _c;
81
982
  const anthropic = providerMetadata == null ? void 0 : providerMetadata.anthropic;
82
983
  const llmgateway2 = providerMetadata == null ? void 0 : providerMetadata.llmgateway;
83
- return (_c = (_b = (_a = llmgateway2 == null ? void 0 : llmgateway2.cacheControl) != null ? _a : llmgateway2 == null ? void 0 : llmgateway2.cache_control) != null ? _b : anthropic == null ? void 0 : anthropic.cacheControl) != null ? _c : anthropic == null ? void 0 : anthropic.cache_control;
984
+ return (_c = (_b = (_a15 = llmgateway2 == null ? void 0 : llmgateway2.cacheControl) != null ? _a15 : llmgateway2 == null ? void 0 : llmgateway2.cache_control) != null ? _b : anthropic == null ? void 0 : anthropic.cacheControl) != null ? _c : anthropic == null ? void 0 : anthropic.cache_control;
84
985
  }
85
986
  function convertToLLMGatewayChatMessages(prompt) {
86
- var _a, _b, _c;
987
+ var _a15, _b, _c;
87
988
  const messages = [];
88
- for (const { role, content, providerMetadata } of prompt) {
989
+ for (const { role, content, providerOptions } of prompt) {
89
990
  switch (role) {
90
991
  case "system": {
91
992
  messages.push({
92
993
  role: "system",
93
994
  content,
94
- cache_control: getCacheControl(providerMetadata)
995
+ cache_control: getCacheControl(providerOptions)
95
996
  });
96
997
  break;
97
998
  }
98
999
  case "user": {
99
- if (content.length === 1 && ((_a = content[0]) == null ? void 0 : _a.type) === "text") {
1000
+ if (content.length === 1 && ((_a15 = content[0]) == null ? void 0 : _a15.type) === "text") {
1001
+ const cacheControl = (_b = getCacheControl(providerOptions)) != null ? _b : getCacheControl(content[0].providerOptions);
1002
+ const contentWithCacheControl = cacheControl ? [
1003
+ {
1004
+ type: "text",
1005
+ text: content[0].text,
1006
+ cache_control: cacheControl
1007
+ }
1008
+ ] : content[0].text;
100
1009
  messages.push({
101
1010
  role: "user",
102
- content: content[0].text,
103
- cache_control: (_b = getCacheControl(providerMetadata)) != null ? _b : getCacheControl(content[0].providerMetadata)
1011
+ content: contentWithCacheControl
104
1012
  });
105
1013
  break;
106
1014
  }
107
- const messageCacheControl = getCacheControl(providerMetadata);
1015
+ const messageCacheControl = getCacheControl(providerOptions);
108
1016
  const contentParts = content.map(
109
1017
  (part) => {
110
- var _a2, _b2, _c2, _d;
111
- const cacheControl = (_a2 = getCacheControl(part.providerMetadata)) != null ? _a2 : messageCacheControl;
1018
+ var _a16, _b2, _c2, _d, _e, _f;
1019
+ const cacheControl = (_a16 = getCacheControl(part.providerOptions)) != null ? _a16 : messageCacheControl;
112
1020
  switch (part.type) {
113
1021
  case "text":
114
1022
  return {
@@ -117,33 +1025,55 @@ function convertToLLMGatewayChatMessages(prompt) {
117
1025
  // For text parts, only use part-specific cache control
118
1026
  cache_control: cacheControl
119
1027
  };
120
- case "image":
121
- return {
122
- type: "image_url",
123
- image_url: {
124
- url: part.image instanceof URL ? part.image.toString() : `data:${(_b2 = part.mimeType) != null ? _b2 : "image/jpeg"};base64,${convertUint8ArrayToBase64(
125
- part.image
126
- )}`
127
- },
128
- // For image parts, use part-specific or message-level cache control
129
- cache_control: cacheControl
130
- };
131
- case "file":
1028
+ case "file": {
1029
+ if ((_b2 = part.mediaType) == null ? void 0 : _b2.startsWith("image/")) {
1030
+ const url = getFileUrl({
1031
+ part,
1032
+ defaultMediaType: "image/jpeg"
1033
+ });
1034
+ return {
1035
+ type: "image_url",
1036
+ image_url: {
1037
+ url
1038
+ },
1039
+ // For image parts, use part-specific or message-level cache control
1040
+ cache_control: cacheControl
1041
+ };
1042
+ }
1043
+ const fileName = String(
1044
+ (_f = (_e = (_d = (_c2 = part.providerOptions) == null ? void 0 : _c2.llmgateway) == null ? void 0 : _d.filename) != null ? _e : part.filename) != null ? _f : ""
1045
+ );
1046
+ const fileData = getFileUrl({
1047
+ part,
1048
+ defaultMediaType: "application/pdf"
1049
+ });
1050
+ if (isUrl({
1051
+ url: fileData,
1052
+ protocols: /* @__PURE__ */ new Set(["http:", "https:"])
1053
+ })) {
1054
+ return {
1055
+ type: "file",
1056
+ file: {
1057
+ filename: fileName,
1058
+ file_data: fileData
1059
+ }
1060
+ };
1061
+ }
132
1062
  return {
133
1063
  type: "file",
134
1064
  file: {
135
- filename: String(
136
- (_d = (_c2 = part.providerMetadata) == null ? void 0 : _c2.llmgateway) == null ? void 0 : _d.filename
137
- ),
138
- file_data: part.data instanceof Uint8Array ? `data:${part.mimeType};base64,${convertUint8ArrayToBase64(part.data)}` : `data:${part.mimeType};base64,${part.data}`
1065
+ filename: fileName,
1066
+ file_data: fileData
139
1067
  },
140
1068
  cache_control: cacheControl
141
1069
  };
1070
+ }
142
1071
  default: {
143
- const _exhaustiveCheck = part;
144
- throw new Error(
145
- `Unsupported content part type: ${_exhaustiveCheck}`
146
- );
1072
+ return {
1073
+ type: "text",
1074
+ text: "",
1075
+ cache_control: cacheControl
1076
+ };
147
1077
  }
148
1078
  }
149
1079
  }
@@ -171,7 +1101,7 @@ function convertToLLMGatewayChatMessages(prompt) {
171
1101
  type: "function",
172
1102
  function: {
173
1103
  name: part.toolName,
174
- arguments: JSON.stringify(part.args)
1104
+ arguments: JSON.stringify(part.input)
175
1105
  }
176
1106
  });
177
1107
  break;
@@ -180,23 +1110,14 @@ function convertToLLMGatewayChatMessages(prompt) {
180
1110
  reasoning += part.text;
181
1111
  reasoningDetails.push({
182
1112
  type: "reasoning.text" /* Text */,
183
- text: part.text,
184
- signature: part.signature
185
- });
186
- break;
187
- }
188
- case "redacted-reasoning": {
189
- reasoningDetails.push({
190
- type: "reasoning.encrypted" /* Encrypted */,
191
- data: part.data
1113
+ text: part.text
192
1114
  });
193
1115
  break;
194
1116
  }
195
1117
  case "file":
196
1118
  break;
197
1119
  default: {
198
- const _exhaustiveCheck = part;
199
- throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
1120
+ break;
200
1121
  }
201
1122
  }
202
1123
  }
@@ -206,95 +1127,188 @@ function convertToLLMGatewayChatMessages(prompt) {
206
1127
  tool_calls: toolCalls.length > 0 ? toolCalls : void 0,
207
1128
  reasoning: reasoning || void 0,
208
1129
  reasoning_details: reasoningDetails.length > 0 ? reasoningDetails : void 0,
209
- cache_control: getCacheControl(providerMetadata)
1130
+ cache_control: getCacheControl(providerOptions)
210
1131
  });
211
1132
  break;
212
1133
  }
213
1134
  case "tool": {
214
1135
  for (const toolResponse of content) {
1136
+ const content2 = getToolResultContent(toolResponse);
215
1137
  messages.push({
216
1138
  role: "tool",
217
1139
  tool_call_id: toolResponse.toolCallId,
218
- content: JSON.stringify(toolResponse.result),
219
- cache_control: (_c = getCacheControl(providerMetadata)) != null ? _c : getCacheControl(toolResponse.providerMetadata)
1140
+ content: content2,
1141
+ cache_control: (_c = getCacheControl(providerOptions)) != null ? _c : getCacheControl(toolResponse.providerOptions)
220
1142
  });
221
1143
  }
222
1144
  break;
223
1145
  }
224
1146
  default: {
225
- const _exhaustiveCheck = role;
226
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1147
+ break;
227
1148
  }
228
1149
  }
229
1150
  }
230
1151
  return messages;
231
1152
  }
232
-
233
- // src/map-llmgateway-chat-logprobs.ts
234
- function mapLLMGatewayChatLogProbsOutput(logprobs) {
235
- var _a, _b;
236
- return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
237
- token,
238
- logprob,
239
- topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
240
- token: token2,
241
- logprob: logprob2
242
- })) : []
243
- }))) != null ? _b : void 0;
1153
+ function getToolResultContent(input) {
1154
+ return input.output.type === "text" ? input.output.value : JSON.stringify(input.output.value);
244
1155
  }
245
1156
 
246
- // src/map-llmgateway-finish-reason.ts
247
- function mapLLMGatewayFinishReason(finishReason) {
248
- switch (finishReason) {
249
- case "stop":
250
- return "stop";
251
- case "length":
252
- return "length";
253
- case "content_filter":
254
- return "content-filter";
255
- case "function_call":
256
- case "tool_calls":
257
- return "tool-calls";
258
- default:
259
- return "unknown";
1157
+ // src/chat/get-tool-choice.ts
1158
+ import { z as z3 } from "zod/v4";
1159
+ var ChatCompletionToolChoiceSchema = z3.union([
1160
+ z3.literal("auto"),
1161
+ z3.literal("none"),
1162
+ z3.literal("required"),
1163
+ z3.object({
1164
+ type: z3.literal("function"),
1165
+ function: z3.object({
1166
+ name: z3.string()
1167
+ })
1168
+ })
1169
+ ]);
1170
+ function getChatCompletionToolChoice(toolChoice) {
1171
+ switch (toolChoice.type) {
1172
+ case "auto":
1173
+ case "none":
1174
+ case "required":
1175
+ return toolChoice.type;
1176
+ case "tool": {
1177
+ return {
1178
+ type: "function",
1179
+ function: { name: toolChoice.toolName }
1180
+ };
1181
+ }
1182
+ default: {
1183
+ toolChoice;
1184
+ throw new Error(`Invalid tool choice type: ${toolChoice}`);
1185
+ }
260
1186
  }
261
1187
  }
262
1188
 
263
- // src/llmgateway-error.ts
264
- import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
265
- import { z as z2 } from "zod";
266
- var LLMGatewayErrorResponseSchema = z2.object({
267
- error: z2.object({
268
- message: z2.string(),
269
- type: z2.string(),
270
- param: z2.any().nullable(),
271
- code: z2.string().nullable()
272
- })
1189
+ // src/chat/schemas.ts
1190
+ import { z as z5 } from "zod/v4";
1191
+ var LLMGatewayChatCompletionBaseResponseSchema = z5.object({
1192
+ id: z5.string().optional(),
1193
+ model: z5.string().optional(),
1194
+ usage: z5.object({
1195
+ prompt_tokens: z5.number(),
1196
+ prompt_tokens_details: z5.object({
1197
+ cached_tokens: z5.number()
1198
+ }).nullish(),
1199
+ completion_tokens: z5.number(),
1200
+ completion_tokens_details: z5.object({
1201
+ reasoning_tokens: z5.number()
1202
+ }).nullish(),
1203
+ total_tokens: z5.number(),
1204
+ cost: z5.number().optional(),
1205
+ cost_details: z5.object({
1206
+ upstream_inference_cost: z5.number().nullish()
1207
+ }).nullish()
1208
+ }).nullish()
273
1209
  });
274
- var llmgatewayFailedResponseHandler = createJsonErrorResponseHandler({
275
- errorSchema: LLMGatewayErrorResponseSchema,
276
- errorToMessage: (data) => data.error.message
1210
+ var LLMGatewayNonStreamChatCompletionResponseSchema = LLMGatewayChatCompletionBaseResponseSchema.extend({
1211
+ choices: z5.array(
1212
+ z5.object({
1213
+ message: z5.object({
1214
+ role: z5.literal("assistant"),
1215
+ content: z5.string().nullable().optional(),
1216
+ reasoning: z5.string().nullable().optional(),
1217
+ reasoning_details: ReasoningDetailArraySchema.nullish(),
1218
+ tool_calls: z5.array(
1219
+ z5.object({
1220
+ id: z5.string().optional().nullable(),
1221
+ type: z5.literal("function"),
1222
+ function: z5.object({
1223
+ name: z5.string(),
1224
+ arguments: z5.string()
1225
+ })
1226
+ })
1227
+ ).optional()
1228
+ }),
1229
+ index: z5.number().nullish(),
1230
+ logprobs: z5.object({
1231
+ content: z5.array(
1232
+ z5.object({
1233
+ token: z5.string(),
1234
+ logprob: z5.number(),
1235
+ top_logprobs: z5.array(
1236
+ z5.object({
1237
+ token: z5.string(),
1238
+ logprob: z5.number()
1239
+ })
1240
+ )
1241
+ })
1242
+ ).nullable()
1243
+ }).nullable().optional(),
1244
+ finish_reason: z5.string().optional().nullable()
1245
+ })
1246
+ )
277
1247
  });
1248
+ var LLMGatewayStreamChatCompletionChunkSchema = z5.union([
1249
+ LLMGatewayChatCompletionBaseResponseSchema.extend({
1250
+ choices: z5.array(
1251
+ z5.object({
1252
+ delta: z5.object({
1253
+ role: z5.enum(["assistant"]).optional(),
1254
+ content: z5.string().nullish(),
1255
+ reasoning: z5.string().nullish().optional(),
1256
+ reasoning_details: ReasoningDetailArraySchema.nullish(),
1257
+ tool_calls: z5.array(
1258
+ z5.object({
1259
+ index: z5.number().nullish(),
1260
+ id: z5.string().nullish(),
1261
+ type: z5.literal("function").optional(),
1262
+ function: z5.object({
1263
+ name: z5.string().nullish(),
1264
+ arguments: z5.string().nullish()
1265
+ })
1266
+ })
1267
+ ).nullish()
1268
+ }).nullish(),
1269
+ logprobs: z5.object({
1270
+ content: z5.array(
1271
+ z5.object({
1272
+ token: z5.string(),
1273
+ logprob: z5.number(),
1274
+ top_logprobs: z5.array(
1275
+ z5.object({
1276
+ token: z5.string(),
1277
+ logprob: z5.number()
1278
+ })
1279
+ )
1280
+ })
1281
+ ).nullable()
1282
+ }).nullish(),
1283
+ finish_reason: z5.string().nullable().optional(),
1284
+ index: z5.number().nullish()
1285
+ })
1286
+ )
1287
+ }),
1288
+ LLMGatewayErrorResponseSchema
1289
+ ]);
278
1290
 
279
- // src/llmgateway-chat-language-model.ts
280
- function isFunctionTool(tool) {
281
- return "parameters" in tool;
282
- }
1291
+ // src/chat/index.ts
283
1292
  var LLMGatewayChatLanguageModel = class {
284
1293
  constructor(modelId, settings, config) {
285
- this.specificationVersion = "v1";
1294
+ this.specificationVersion = "v2";
1295
+ this.provider = "llmgateway";
286
1296
  this.defaultObjectGenerationMode = "tool";
1297
+ this.supportedUrls = {
1298
+ "image/*": [
1299
+ /^data:image\/[a-zA-Z]+;base64,/,
1300
+ /^https?:\/\/.+\.(jpg|jpeg|png|gif|webp)$/i
1301
+ ],
1302
+ // 'text/*': [/^data:text\//, /^https?:\/\/.+$/],
1303
+ "application/*": [/^data:application\//, /^https?:\/\/.+$/]
1304
+ };
287
1305
  this.modelId = modelId;
288
1306
  this.settings = settings;
289
1307
  this.config = config;
290
1308
  }
291
- get provider() {
292
- return this.config.provider;
293
- }
294
1309
  getArgs({
295
- mode,
296
1310
  prompt,
297
- maxTokens,
1311
+ maxOutputTokens,
298
1312
  temperature,
299
1313
  topP,
300
1314
  frequencyPenalty,
@@ -303,12 +1317,10 @@ var LLMGatewayChatLanguageModel = class {
303
1317
  stopSequences,
304
1318
  responseFormat,
305
1319
  topK,
306
- providerMetadata
1320
+ tools,
1321
+ toolChoice
307
1322
  }) {
308
- var _a;
309
- const type = mode.type;
310
- const extraCallingBody = (_a = providerMetadata == null ? void 0 : providerMetadata.llmgateway) != null ? _a : {};
311
- const baseArgs = __spreadValues(__spreadValues(__spreadValues({
1323
+ const baseArgs = __spreadValues(__spreadValues({
312
1324
  // model id:
313
1325
  model: this.modelId,
314
1326
  models: this.settings.models,
@@ -319,7 +1331,7 @@ var LLMGatewayChatLanguageModel = class {
319
1331
  user: this.settings.user,
320
1332
  parallel_tool_calls: this.settings.parallelToolCalls,
321
1333
  // standardized settings:
322
- max_tokens: maxTokens,
1334
+ max_tokens: maxOutputTokens,
323
1335
  temperature,
324
1336
  top_p: topP,
325
1337
  frequency_penalty: frequencyPenalty,
@@ -334,44 +1346,34 @@ var LLMGatewayChatLanguageModel = class {
334
1346
  include_reasoning: this.settings.includeReasoning,
335
1347
  reasoning: this.settings.reasoning,
336
1348
  usage: this.settings.usage
337
- }, this.config.extraBody), this.settings.extraBody), extraCallingBody);
338
- switch (type) {
339
- case "regular": {
340
- return __spreadValues(__spreadValues({}, baseArgs), prepareToolsAndToolChoice(mode));
341
- }
342
- case "object-json": {
343
- return __spreadProps(__spreadValues({}, baseArgs), {
344
- response_format: { type: "json_object" }
345
- });
346
- }
347
- case "object-tool": {
348
- return __spreadProps(__spreadValues({}, baseArgs), {
349
- tool_choice: { type: "function", function: { name: mode.tool.name } },
350
- tools: [
351
- {
352
- type: "function",
353
- function: {
354
- name: mode.tool.name,
355
- description: mode.tool.description,
356
- parameters: mode.tool.parameters
357
- }
358
- }
359
- ]
360
- });
361
- }
362
- // Handle all non-text types with a single default case
363
- default: {
364
- const _exhaustiveCheck = type;
365
- throw new UnsupportedFunctionalityError({
366
- functionality: `${_exhaustiveCheck} mode`
367
- });
368
- }
1349
+ }, this.config.extraBody), this.settings.extraBody);
1350
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json") {
1351
+ return __spreadProps(__spreadValues({}, baseArgs), {
1352
+ response_format: { type: "json_object" }
1353
+ });
369
1354
  }
1355
+ if (tools && tools.length > 0) {
1356
+ const mappedTools = tools.filter((tool) => tool.type === "function").map((tool) => ({
1357
+ type: "function",
1358
+ function: {
1359
+ name: tool.name,
1360
+ description: tool.type,
1361
+ parameters: tool.inputSchema
1362
+ }
1363
+ }));
1364
+ return __spreadProps(__spreadValues({}, baseArgs), {
1365
+ tools: mappedTools,
1366
+ tool_choice: toolChoice ? getChatCompletionToolChoice(toolChoice) : void 0
1367
+ });
1368
+ }
1369
+ return baseArgs;
370
1370
  }
371
1371
  async doGenerate(options) {
372
- var _b, _c, _d, _e, _f, _g, _h, _i, _j;
373
- const args = this.getArgs(options);
374
- const { responseHeaders, value: response } = await postJsonToApi({
1372
+ var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w;
1373
+ const providerOptions = options.providerOptions || {};
1374
+ const llmgatewayOptions = providerOptions.llmgateway || {};
1375
+ const args = __spreadValues(__spreadValues({}, this.getArgs(options)), llmgatewayOptions);
1376
+ const { value: response, responseHeaders } = await postJsonToApi({
375
1377
  url: this.config.url({
376
1378
  path: "/chat/completions",
377
1379
  modelId: this.modelId
@@ -385,46 +1387,31 @@ var LLMGatewayChatLanguageModel = class {
385
1387
  abortSignal: options.abortSignal,
386
1388
  fetch: this.config.fetch
387
1389
  });
388
- const _a = args, { messages: rawPrompt } = _a, rawSettings = __objRest(_a, ["messages"]);
389
1390
  const choice = response.choices[0];
390
1391
  if (!choice) {
391
1392
  throw new Error("No choice in response");
392
1393
  }
393
1394
  const usageInfo = response.usage ? {
394
- promptTokens: (_b = response.usage.prompt_tokens) != null ? _b : 0,
395
- completionTokens: (_c = response.usage.completion_tokens) != null ? _c : 0
1395
+ inputTokens: (_a15 = response.usage.prompt_tokens) != null ? _a15 : 0,
1396
+ outputTokens: (_b = response.usage.completion_tokens) != null ? _b : 0,
1397
+ totalTokens: ((_c = response.usage.prompt_tokens) != null ? _c : 0) + ((_d = response.usage.completion_tokens) != null ? _d : 0),
1398
+ reasoningTokens: (_f = (_e = response.usage.completion_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : 0,
1399
+ cachedInputTokens: (_h = (_g = response.usage.prompt_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : 0
396
1400
  } : {
397
- promptTokens: 0,
398
- completionTokens: 0
1401
+ inputTokens: 0,
1402
+ outputTokens: 0,
1403
+ totalTokens: 0,
1404
+ reasoningTokens: 0,
1405
+ cachedInputTokens: 0
399
1406
  };
400
- const providerMetadata = {};
401
- if (response.usage && ((_d = this.settings.usage) == null ? void 0 : _d.include)) {
402
- providerMetadata.llmgateway = {
403
- usage: {
404
- promptTokens: response.usage.prompt_tokens,
405
- promptTokensDetails: response.usage.prompt_tokens_details ? {
406
- cachedTokens: (_e = response.usage.prompt_tokens_details.cached_tokens) != null ? _e : 0
407
- } : void 0,
408
- completionTokens: response.usage.completion_tokens,
409
- completionTokensDetails: response.usage.completion_tokens_details ? {
410
- reasoningTokens: (_f = response.usage.completion_tokens_details.reasoning_tokens) != null ? _f : 0
411
- } : void 0,
412
- cost: response.usage.cost,
413
- totalTokens: (_g = response.usage.total_tokens) != null ? _g : 0
414
- }
415
- };
416
- }
417
- const hasProviderMetadata = Object.keys(providerMetadata).length > 0;
418
- const reasoningDetails = (_h = choice.message.reasoning_details) != null ? _h : [];
1407
+ const reasoningDetails = (_i = choice.message.reasoning_details) != null ? _i : [];
419
1408
  const reasoning = reasoningDetails.length > 0 ? reasoningDetails.map((detail) => {
420
- var _a2;
421
1409
  switch (detail.type) {
422
1410
  case "reasoning.text" /* Text */: {
423
1411
  if (detail.text) {
424
1412
  return {
425
- type: "text",
426
- text: detail.text,
427
- signature: (_a2 = detail.signature) != null ? _a2 : void 0
1413
+ type: "reasoning",
1414
+ text: detail.text
428
1415
  };
429
1416
  }
430
1417
  break;
@@ -432,7 +1419,7 @@ var LLMGatewayChatLanguageModel = class {
432
1419
  case "reasoning.summary" /* Summary */: {
433
1420
  if (detail.summary) {
434
1421
  return {
435
- type: "text",
1422
+ type: "reasoning",
436
1423
  text: detail.summary
437
1424
  };
438
1425
  }
@@ -441,51 +1428,80 @@ var LLMGatewayChatLanguageModel = class {
441
1428
  case "reasoning.encrypted" /* Encrypted */: {
442
1429
  if (detail.data) {
443
1430
  return {
444
- type: "redacted",
445
- data: detail.data
1431
+ type: "reasoning",
1432
+ text: "[REDACTED]"
446
1433
  };
447
1434
  }
448
1435
  break;
449
1436
  }
450
1437
  default: {
451
- detail;
1438
+ const _exhaustiveCheck = detail;
1439
+ return _exhaustiveCheck;
452
1440
  }
453
1441
  }
454
1442
  return null;
455
1443
  }).filter((p) => p !== null) : choice.message.reasoning ? [
456
1444
  {
457
- type: "text",
1445
+ type: "reasoning",
458
1446
  text: choice.message.reasoning
459
1447
  }
460
1448
  ] : [];
461
- return __spreadValues({
462
- response: {
463
- id: response.id,
464
- modelId: response.model
465
- },
466
- text: (_i = choice.message.content) != null ? _i : void 0,
467
- reasoning,
468
- toolCalls: (_j = choice.message.tool_calls) == null ? void 0 : _j.map((toolCall) => {
469
- var _a2;
470
- return {
471
- toolCallType: "function",
472
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
1449
+ const content = [];
1450
+ content.push(...reasoning);
1451
+ if (choice.message.content) {
1452
+ content.push({
1453
+ type: "text",
1454
+ text: choice.message.content
1455
+ });
1456
+ }
1457
+ if (choice.message.tool_calls) {
1458
+ for (const toolCall of choice.message.tool_calls) {
1459
+ content.push({
1460
+ type: "tool-call",
1461
+ toolCallId: (_j = toolCall.id) != null ? _j : generateId(),
473
1462
  toolName: toolCall.function.name,
474
- args: toolCall.function.arguments
475
- };
476
- }),
1463
+ input: toolCall.function.arguments
1464
+ });
1465
+ }
1466
+ }
1467
+ return {
1468
+ content,
477
1469
  finishReason: mapLLMGatewayFinishReason(choice.finish_reason),
478
1470
  usage: usageInfo,
479
- rawCall: { rawPrompt, rawSettings },
480
- rawResponse: { headers: responseHeaders },
481
1471
  warnings: [],
482
- logprobs: mapLLMGatewayChatLogProbsOutput(choice.logprobs)
483
- }, hasProviderMetadata ? { providerMetadata } : {});
1472
+ providerMetadata: {
1473
+ llmgateway: {
1474
+ usage: {
1475
+ promptTokens: (_k = usageInfo.inputTokens) != null ? _k : 0,
1476
+ completionTokens: (_l = usageInfo.outputTokens) != null ? _l : 0,
1477
+ totalTokens: (_m = usageInfo.totalTokens) != null ? _m : 0,
1478
+ cost: (_n = response.usage) == null ? void 0 : _n.cost,
1479
+ promptTokensDetails: {
1480
+ cachedTokens: (_q = (_p = (_o = response.usage) == null ? void 0 : _o.prompt_tokens_details) == null ? void 0 : _p.cached_tokens) != null ? _q : 0
1481
+ },
1482
+ completionTokensDetails: {
1483
+ reasoningTokens: (_t = (_s = (_r = response.usage) == null ? void 0 : _r.completion_tokens_details) == null ? void 0 : _s.reasoning_tokens) != null ? _t : 0
1484
+ },
1485
+ costDetails: {
1486
+ upstreamInferenceCost: (_w = (_v = (_u = response.usage) == null ? void 0 : _u.cost_details) == null ? void 0 : _v.upstream_inference_cost) != null ? _w : 0
1487
+ }
1488
+ }
1489
+ }
1490
+ },
1491
+ request: { body: args },
1492
+ response: {
1493
+ id: response.id,
1494
+ modelId: response.model,
1495
+ headers: responseHeaders
1496
+ }
1497
+ };
484
1498
  }
485
1499
  async doStream(options) {
486
- var _a, _c;
487
- const args = this.getArgs(options);
488
- const { responseHeaders, value: response } = await postJsonToApi({
1500
+ var _a15;
1501
+ const providerOptions = options.providerOptions || {};
1502
+ const llmgatewayOptions = providerOptions.llmgateway || {};
1503
+ const args = __spreadValues(__spreadValues({}, this.getArgs(options)), llmgatewayOptions);
1504
+ const { value: response, responseHeaders } = await postJsonToApi({
489
1505
  url: this.config.url({
490
1506
  path: "/chat/completions",
491
1507
  modelId: this.modelId
@@ -496,7 +1512,7 @@ var LLMGatewayChatLanguageModel = class {
496
1512
  // only include stream_options when in strict compatibility mode:
497
1513
  stream_options: this.config.compatibility === "strict" ? __spreadValues({
498
1514
  include_usage: true
499
- }, ((_a = this.settings.usage) == null ? void 0 : _a.include) ? { include_usage: true } : {}) : void 0
1515
+ }, ((_a15 = this.settings.usage) == null ? void 0 : _a15.include) ? { include_usage: true } : {}) : void 0
500
1516
  }),
501
1517
  failedResponseHandler: llmgatewayFailedResponseHandler,
502
1518
  successfulResponseHandler: createEventSourceResponseHandler(
@@ -505,21 +1521,26 @@ var LLMGatewayChatLanguageModel = class {
505
1521
  abortSignal: options.abortSignal,
506
1522
  fetch: this.config.fetch
507
1523
  });
508
- const _b = args, { messages: rawPrompt } = _b, rawSettings = __objRest(_b, ["messages"]);
509
1524
  const toolCalls = [];
510
1525
  let finishReason = "other";
511
- let usage = {
512
- promptTokens: Number.NaN,
513
- completionTokens: Number.NaN
1526
+ const usage = {
1527
+ inputTokens: Number.NaN,
1528
+ outputTokens: Number.NaN,
1529
+ totalTokens: Number.NaN,
1530
+ reasoningTokens: Number.NaN,
1531
+ cachedInputTokens: Number.NaN
514
1532
  };
515
- let logprobs;
516
1533
  const llmgatewayUsage = {};
517
- const shouldIncludeUsageAccounting = !!((_c = this.settings.usage) == null ? void 0 : _c.include);
1534
+ let textStarted = false;
1535
+ let reasoningStarted = false;
1536
+ let textId;
1537
+ let reasoningId;
1538
+ let llmgatewayResponseId;
518
1539
  return {
519
1540
  stream: response.pipeThrough(
520
1541
  new TransformStream({
521
1542
  transform(chunk, controller) {
522
- var _a2, _b2, _c2, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
1543
+ var _a16, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
523
1544
  if (!chunk.success) {
524
1545
  finishReason = "error";
525
1546
  controller.enqueue({ type: "error", error: chunk.error });
@@ -532,6 +1553,7 @@ var LLMGatewayChatLanguageModel = class {
532
1553
  return;
533
1554
  }
534
1555
  if (value.id) {
1556
+ llmgatewayResponseId = value.id;
535
1557
  controller.enqueue({
536
1558
  type: "response-metadata",
537
1559
  id: value.id
@@ -544,20 +1566,23 @@ var LLMGatewayChatLanguageModel = class {
544
1566
  });
545
1567
  }
546
1568
  if (value.usage != null) {
547
- usage = {
548
- promptTokens: value.usage.prompt_tokens,
549
- completionTokens: value.usage.completion_tokens
550
- };
1569
+ usage.inputTokens = value.usage.prompt_tokens;
1570
+ usage.outputTokens = value.usage.completion_tokens;
1571
+ usage.totalTokens = value.usage.prompt_tokens + value.usage.completion_tokens;
551
1572
  llmgatewayUsage.promptTokens = value.usage.prompt_tokens;
552
1573
  if (value.usage.prompt_tokens_details) {
1574
+ const cachedInputTokens = (_a16 = value.usage.prompt_tokens_details.cached_tokens) != null ? _a16 : 0;
1575
+ usage.cachedInputTokens = cachedInputTokens;
553
1576
  llmgatewayUsage.promptTokensDetails = {
554
- cachedTokens: (_a2 = value.usage.prompt_tokens_details.cached_tokens) != null ? _a2 : 0
1577
+ cachedTokens: cachedInputTokens
555
1578
  };
556
1579
  }
557
1580
  llmgatewayUsage.completionTokens = value.usage.completion_tokens;
558
1581
  if (value.usage.completion_tokens_details) {
1582
+ const reasoningTokens = (_b = value.usage.completion_tokens_details.reasoning_tokens) != null ? _b : 0;
1583
+ usage.reasoningTokens = reasoningTokens;
559
1584
  llmgatewayUsage.completionTokensDetails = {
560
- reasoningTokens: (_b2 = value.usage.completion_tokens_details.reasoning_tokens) != null ? _b2 : 0
1585
+ reasoningTokens
561
1586
  };
562
1587
  }
563
1588
  llmgatewayUsage.cost = value.usage.cost;
@@ -571,51 +1596,39 @@ var LLMGatewayChatLanguageModel = class {
571
1596
  return;
572
1597
  }
573
1598
  const delta = choice.delta;
574
- if (delta.content != null) {
575
- controller.enqueue({
576
- type: "text-delta",
577
- textDelta: delta.content
578
- });
579
- }
580
- if (delta.reasoning != null) {
1599
+ const emitReasoningChunk = (chunkText) => {
1600
+ if (!reasoningStarted) {
1601
+ reasoningId = llmgatewayResponseId || generateId();
1602
+ controller.enqueue({
1603
+ type: "reasoning-start",
1604
+ id: reasoningId
1605
+ });
1606
+ reasoningStarted = true;
1607
+ }
581
1608
  controller.enqueue({
582
- type: "reasoning",
583
- textDelta: delta.reasoning
1609
+ type: "reasoning-delta",
1610
+ delta: chunkText,
1611
+ id: reasoningId || generateId()
584
1612
  });
585
- }
1613
+ };
586
1614
  if (delta.reasoning_details && delta.reasoning_details.length > 0) {
587
1615
  for (const detail of delta.reasoning_details) {
588
1616
  switch (detail.type) {
589
1617
  case "reasoning.text" /* Text */: {
590
1618
  if (detail.text) {
591
- controller.enqueue({
592
- type: "reasoning",
593
- textDelta: detail.text
594
- });
595
- }
596
- if (detail.signature) {
597
- controller.enqueue({
598
- type: "reasoning-signature",
599
- signature: detail.signature
600
- });
1619
+ emitReasoningChunk(detail.text);
601
1620
  }
602
1621
  break;
603
1622
  }
604
1623
  case "reasoning.encrypted" /* Encrypted */: {
605
1624
  if (detail.data) {
606
- controller.enqueue({
607
- type: "redacted-reasoning",
608
- data: detail.data
609
- });
1625
+ emitReasoningChunk("[REDACTED]");
610
1626
  }
611
1627
  break;
612
1628
  }
613
1629
  case "reasoning.summary" /* Summary */: {
614
1630
  if (detail.summary) {
615
- controller.enqueue({
616
- type: "reasoning",
617
- textDelta: detail.summary
618
- });
1631
+ emitReasoningChunk(detail.summary);
619
1632
  }
620
1633
  break;
621
1634
  }
@@ -625,19 +1638,27 @@ var LLMGatewayChatLanguageModel = class {
625
1638
  }
626
1639
  }
627
1640
  }
1641
+ } else if (delta.reasoning != null) {
1642
+ emitReasoningChunk(delta.reasoning);
628
1643
  }
629
- const mappedLogprobs = mapLLMGatewayChatLogProbsOutput(
630
- choice == null ? void 0 : choice.logprobs
631
- );
632
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
633
- if (logprobs === void 0) {
634
- logprobs = [];
1644
+ if (delta.content != null) {
1645
+ if (!textStarted) {
1646
+ textId = llmgatewayResponseId || generateId();
1647
+ controller.enqueue({
1648
+ type: "text-start",
1649
+ id: textId
1650
+ });
1651
+ textStarted = true;
635
1652
  }
636
- logprobs.push(...mappedLogprobs);
1653
+ controller.enqueue({
1654
+ type: "text-delta",
1655
+ delta: delta.content,
1656
+ id: textId || generateId()
1657
+ });
637
1658
  }
638
1659
  if (delta.tool_calls != null) {
639
1660
  for (const toolCallDelta of delta.tool_calls) {
640
- const index = toolCallDelta.index;
1661
+ const index = (_c = toolCallDelta.index) != null ? _c : toolCalls.length - 1;
641
1662
  if (toolCalls[index] == null) {
642
1663
  if (toolCallDelta.type !== "function") {
643
1664
  throw new InvalidResponseDataError({
@@ -651,7 +1672,7 @@ var LLMGatewayChatLanguageModel = class {
651
1672
  message: `Expected 'id' to be a string.`
652
1673
  });
653
1674
  }
654
- if (((_c2 = toolCallDelta.function) == null ? void 0 : _c2.name) == null) {
1675
+ if (((_d = toolCallDelta.function) == null ? void 0 : _d.name) == null) {
655
1676
  throw new InvalidResponseDataError({
656
1677
  data: toolCallDelta,
657
1678
  message: `Expected 'function.name' to be a string.`
@@ -662,28 +1683,36 @@ var LLMGatewayChatLanguageModel = class {
662
1683
  type: "function",
663
1684
  function: {
664
1685
  name: toolCallDelta.function.name,
665
- arguments: (_d = toolCallDelta.function.arguments) != null ? _d : ""
1686
+ arguments: (_e = toolCallDelta.function.arguments) != null ? _e : ""
666
1687
  },
1688
+ inputStarted: false,
667
1689
  sent: false
668
1690
  };
669
1691
  const toolCall2 = toolCalls[index];
670
1692
  if (toolCall2 == null) {
671
1693
  throw new Error("Tool call is missing");
672
1694
  }
673
- if (((_e = toolCall2.function) == null ? void 0 : _e.name) != null && ((_f = toolCall2.function) == null ? void 0 : _f.arguments) != null && isParsableJson(toolCall2.function.arguments)) {
1695
+ if (((_f = toolCall2.function) == null ? void 0 : _f.name) != null && ((_g = toolCall2.function) == null ? void 0 : _g.arguments) != null && isParsableJson(toolCall2.function.arguments)) {
1696
+ toolCall2.inputStarted = true;
674
1697
  controller.enqueue({
675
- type: "tool-call-delta",
676
- toolCallType: "function",
677
- toolCallId: toolCall2.id,
678
- toolName: toolCall2.function.name,
679
- argsTextDelta: toolCall2.function.arguments
1698
+ type: "tool-input-start",
1699
+ id: toolCall2.id,
1700
+ toolName: toolCall2.function.name
1701
+ });
1702
+ controller.enqueue({
1703
+ type: "tool-input-delta",
1704
+ id: toolCall2.id,
1705
+ delta: toolCall2.function.arguments
1706
+ });
1707
+ controller.enqueue({
1708
+ type: "tool-input-end",
1709
+ id: toolCall2.id
680
1710
  });
681
1711
  controller.enqueue({
682
1712
  type: "tool-call",
683
- toolCallType: "function",
684
- toolCallId: (_g = toolCall2.id) != null ? _g : generateId(),
1713
+ toolCallId: toolCall2.id,
685
1714
  toolName: toolCall2.function.name,
686
- args: toolCall2.function.arguments
1715
+ input: toolCall2.function.arguments
687
1716
  });
688
1717
  toolCall2.sent = true;
689
1718
  }
@@ -693,23 +1722,28 @@ var LLMGatewayChatLanguageModel = class {
693
1722
  if (toolCall == null) {
694
1723
  throw new Error("Tool call is missing");
695
1724
  }
1725
+ if (!toolCall.inputStarted) {
1726
+ toolCall.inputStarted = true;
1727
+ controller.enqueue({
1728
+ type: "tool-input-start",
1729
+ id: toolCall.id,
1730
+ toolName: toolCall.function.name
1731
+ });
1732
+ }
696
1733
  if (((_h = toolCallDelta.function) == null ? void 0 : _h.arguments) != null) {
697
1734
  toolCall.function.arguments += (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : "";
698
1735
  }
699
1736
  controller.enqueue({
700
- type: "tool-call-delta",
701
- toolCallType: "function",
702
- toolCallId: toolCall.id,
703
- toolName: toolCall.function.name,
704
- argsTextDelta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
1737
+ type: "tool-input-delta",
1738
+ id: toolCall.id,
1739
+ delta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
705
1740
  });
706
1741
  if (((_l = toolCall.function) == null ? void 0 : _l.name) != null && ((_m = toolCall.function) == null ? void 0 : _m.arguments) != null && isParsableJson(toolCall.function.arguments)) {
707
1742
  controller.enqueue({
708
1743
  type: "tool-call",
709
- toolCallType: "function",
710
1744
  toolCallId: (_n = toolCall.id) != null ? _n : generateId(),
711
1745
  toolName: toolCall.function.name,
712
- args: toolCall.function.arguments
1746
+ input: toolCall.function.arguments
713
1747
  });
714
1748
  toolCall.sent = true;
715
1749
  }
@@ -717,206 +1751,54 @@ var LLMGatewayChatLanguageModel = class {
717
1751
  }
718
1752
  },
719
1753
  flush(controller) {
720
- var _a2;
1754
+ var _a16;
721
1755
  if (finishReason === "tool-calls") {
722
1756
  for (const toolCall of toolCalls) {
723
- if (!toolCall.sent) {
1757
+ if (toolCall && !toolCall.sent) {
724
1758
  controller.enqueue({
725
1759
  type: "tool-call",
726
- toolCallType: "function",
727
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
1760
+ toolCallId: (_a16 = toolCall.id) != null ? _a16 : generateId(),
728
1761
  toolName: toolCall.function.name,
729
1762
  // Coerce invalid arguments to an empty JSON object
730
- args: isParsableJson(toolCall.function.arguments) ? toolCall.function.arguments : "{}"
1763
+ input: isParsableJson(toolCall.function.arguments) ? toolCall.function.arguments : "{}"
731
1764
  });
732
1765
  toolCall.sent = true;
733
1766
  }
734
1767
  }
735
1768
  }
736
- const providerMetadata = {};
737
- if (shouldIncludeUsageAccounting && (llmgatewayUsage.totalTokens !== void 0 || llmgatewayUsage.cost !== void 0 || llmgatewayUsage.promptTokensDetails !== void 0 || llmgatewayUsage.completionTokensDetails !== void 0)) {
738
- providerMetadata.llmgateway = {
739
- usage: llmgatewayUsage
740
- };
1769
+ if (textStarted) {
1770
+ controller.enqueue({
1771
+ type: "text-end",
1772
+ id: textId || generateId()
1773
+ });
1774
+ }
1775
+ if (reasoningStarted) {
1776
+ controller.enqueue({
1777
+ type: "reasoning-end",
1778
+ id: reasoningId || generateId()
1779
+ });
741
1780
  }
742
- const hasProviderMetadata = Object.keys(providerMetadata).length > 0 && shouldIncludeUsageAccounting;
743
- controller.enqueue(__spreadValues({
1781
+ controller.enqueue({
744
1782
  type: "finish",
745
1783
  finishReason,
746
- logprobs,
747
- usage
748
- }, hasProviderMetadata ? { providerMetadata } : {}));
1784
+ usage,
1785
+ providerMetadata: {
1786
+ llmgateway: {
1787
+ usage: llmgatewayUsage
1788
+ }
1789
+ }
1790
+ });
749
1791
  }
750
1792
  })
751
1793
  ),
752
- rawCall: { rawPrompt, rawSettings },
753
- rawResponse: { headers: responseHeaders },
754
- warnings: []
1794
+ warnings: [],
1795
+ request: { body: args },
1796
+ response: { headers: responseHeaders }
755
1797
  };
756
1798
  }
757
1799
  };
758
- var LLMGatewayChatCompletionBaseResponseSchema = z3.object({
759
- id: z3.string().optional(),
760
- model: z3.string().optional(),
761
- usage: z3.object({
762
- prompt_tokens: z3.number(),
763
- prompt_tokens_details: z3.object({
764
- cached_tokens: z3.number()
765
- }).nullish(),
766
- completion_tokens: z3.number(),
767
- completion_tokens_details: z3.object({
768
- reasoning_tokens: z3.number()
769
- }).nullish(),
770
- total_tokens: z3.number(),
771
- cost: z3.number().optional()
772
- }).nullish()
773
- });
774
- var LLMGatewayNonStreamChatCompletionResponseSchema = LLMGatewayChatCompletionBaseResponseSchema.extend({
775
- choices: z3.array(
776
- z3.object({
777
- message: z3.object({
778
- role: z3.literal("assistant"),
779
- content: z3.string().nullable().optional(),
780
- reasoning: z3.string().nullable().optional(),
781
- reasoning_details: ReasoningDetailArraySchema.nullish(),
782
- tool_calls: z3.array(
783
- z3.object({
784
- id: z3.string().optional().nullable(),
785
- type: z3.literal("function"),
786
- function: z3.object({
787
- name: z3.string(),
788
- arguments: z3.string()
789
- })
790
- })
791
- ).optional()
792
- }),
793
- index: z3.number(),
794
- logprobs: z3.object({
795
- content: z3.array(
796
- z3.object({
797
- token: z3.string(),
798
- logprob: z3.number(),
799
- top_logprobs: z3.array(
800
- z3.object({
801
- token: z3.string(),
802
- logprob: z3.number()
803
- })
804
- )
805
- })
806
- ).nullable()
807
- }).nullable().optional(),
808
- finish_reason: z3.string().optional().nullable()
809
- })
810
- )
811
- });
812
- var LLMGatewayStreamChatCompletionChunkSchema = z3.union([
813
- LLMGatewayChatCompletionBaseResponseSchema.extend({
814
- choices: z3.array(
815
- z3.object({
816
- delta: z3.object({
817
- role: z3.enum(["assistant"]).optional(),
818
- content: z3.string().nullish(),
819
- reasoning: z3.string().nullish().optional(),
820
- reasoning_details: ReasoningDetailArraySchema.nullish(),
821
- tool_calls: z3.array(
822
- z3.object({
823
- index: z3.number(),
824
- id: z3.string().nullish(),
825
- type: z3.literal("function").optional(),
826
- function: z3.object({
827
- name: z3.string().nullish(),
828
- arguments: z3.string().nullish()
829
- })
830
- })
831
- ).nullish()
832
- }).nullish(),
833
- logprobs: z3.object({
834
- content: z3.array(
835
- z3.object({
836
- token: z3.string(),
837
- logprob: z3.number(),
838
- top_logprobs: z3.array(
839
- z3.object({
840
- token: z3.string(),
841
- logprob: z3.number()
842
- })
843
- )
844
- })
845
- ).nullable()
846
- }).nullish(),
847
- finish_reason: z3.string().nullable().optional(),
848
- index: z3.number()
849
- })
850
- )
851
- }),
852
- LLMGatewayErrorResponseSchema
853
- ]);
854
- function prepareToolsAndToolChoice(mode) {
855
- var _a;
856
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
857
- if (tools == null) {
858
- return { tools: void 0, tool_choice: void 0 };
859
- }
860
- const mappedTools = tools.map((tool) => {
861
- if (isFunctionTool(tool)) {
862
- return {
863
- type: "function",
864
- function: {
865
- name: tool.name,
866
- description: tool.description,
867
- parameters: tool.parameters
868
- }
869
- };
870
- }
871
- return {
872
- type: "function",
873
- function: {
874
- name: tool.name
875
- }
876
- };
877
- });
878
- const toolChoice = mode.toolChoice;
879
- if (toolChoice == null) {
880
- return { tools: mappedTools, tool_choice: void 0 };
881
- }
882
- const type = toolChoice.type;
883
- switch (type) {
884
- case "auto":
885
- case "none":
886
- case "required":
887
- return { tools: mappedTools, tool_choice: type };
888
- case "tool":
889
- return {
890
- tools: mappedTools,
891
- tool_choice: {
892
- type: "function",
893
- function: {
894
- name: toolChoice.toolName
895
- }
896
- }
897
- };
898
- default: {
899
- const _exhaustiveCheck = type;
900
- throw new Error(`Unsupported tool choice type: ${_exhaustiveCheck}`);
901
- }
902
- }
903
- }
904
-
905
- // src/llmgateway-completion-language-model.ts
906
- import { UnsupportedFunctionalityError as UnsupportedFunctionalityError3 } from "@ai-sdk/provider";
907
- import {
908
- combineHeaders as combineHeaders2,
909
- createEventSourceResponseHandler as createEventSourceResponseHandler2,
910
- createJsonResponseHandler as createJsonResponseHandler2,
911
- postJsonToApi as postJsonToApi2
912
- } from "@ai-sdk/provider-utils";
913
- import { z as z4 } from "zod";
914
1800
 
915
- // src/convert-to-llmgateway-completion-prompt.ts
916
- import {
917
- InvalidPromptError,
918
- UnsupportedFunctionalityError as UnsupportedFunctionalityError2
919
- } from "@ai-sdk/provider";
1801
+ // src/completion/convert-to-llmgateway-completion-prompt.ts
920
1802
  function convertToLLMGatewayCompletionPrompt({
921
1803
  prompt,
922
1804
  inputFormat,
@@ -937,7 +1819,7 @@ function convertToLLMGatewayCompletionPrompt({
937
1819
  switch (role) {
938
1820
  case "system": {
939
1821
  throw new InvalidPromptError({
940
- message: "Unexpected system message in prompt: ${content}",
1822
+ message: `Unexpected system message in prompt: ${content}`,
941
1823
  prompt
942
1824
  });
943
1825
  }
@@ -947,21 +1829,13 @@ function convertToLLMGatewayCompletionPrompt({
947
1829
  case "text": {
948
1830
  return part.text;
949
1831
  }
950
- case "image": {
951
- throw new UnsupportedFunctionalityError2({
952
- functionality: "images"
953
- });
954
- }
955
1832
  case "file": {
956
- throw new UnsupportedFunctionalityError2({
1833
+ throw new UnsupportedFunctionalityError({
957
1834
  functionality: "file attachments"
958
1835
  });
959
1836
  }
960
1837
  default: {
961
- const _exhaustiveCheck = part;
962
- throw new Error(
963
- `Unsupported content type: ${_exhaustiveCheck}`
964
- );
1838
+ return "";
965
1839
  }
966
1840
  }
967
1841
  }).join("");
@@ -972,39 +1846,38 @@ ${userMessage}
972
1846
  break;
973
1847
  }
974
1848
  case "assistant": {
975
- const assistantMessage = content.map((part) => {
976
- switch (part.type) {
977
- case "text": {
978
- return part.text;
979
- }
980
- case "tool-call": {
981
- throw new UnsupportedFunctionalityError2({
982
- functionality: "tool-call messages"
983
- });
984
- }
985
- case "reasoning": {
986
- throw new UnsupportedFunctionalityError2({
987
- functionality: "reasoning messages"
988
- });
989
- }
990
- case "redacted-reasoning": {
991
- throw new UnsupportedFunctionalityError2({
992
- functionality: "redacted reasoning messages"
993
- });
994
- }
995
- case "file": {
996
- throw new UnsupportedFunctionalityError2({
997
- functionality: "file attachments"
998
- });
999
- }
1000
- default: {
1001
- const _exhaustiveCheck = part;
1002
- throw new Error(
1003
- `Unsupported content type: ${_exhaustiveCheck}`
1004
- );
1849
+ const assistantMessage = content.map(
1850
+ (part) => {
1851
+ switch (part.type) {
1852
+ case "text": {
1853
+ return part.text;
1854
+ }
1855
+ case "tool-call": {
1856
+ throw new UnsupportedFunctionalityError({
1857
+ functionality: "tool-call messages"
1858
+ });
1859
+ }
1860
+ case "tool-result": {
1861
+ throw new UnsupportedFunctionalityError({
1862
+ functionality: "tool-result messages"
1863
+ });
1864
+ }
1865
+ case "reasoning": {
1866
+ throw new UnsupportedFunctionalityError({
1867
+ functionality: "reasoning messages"
1868
+ });
1869
+ }
1870
+ case "file": {
1871
+ throw new UnsupportedFunctionalityError({
1872
+ functionality: "file attachments"
1873
+ });
1874
+ }
1875
+ default: {
1876
+ return "";
1877
+ }
1005
1878
  }
1006
1879
  }
1007
- }).join("");
1880
+ ).join("");
1008
1881
  text += `${assistant}:
1009
1882
  ${assistantMessage}
1010
1883
 
@@ -1012,13 +1885,12 @@ ${assistantMessage}
1012
1885
  break;
1013
1886
  }
1014
1887
  case "tool": {
1015
- throw new UnsupportedFunctionalityError2({
1888
+ throw new UnsupportedFunctionalityError({
1016
1889
  functionality: "tool messages"
1017
1890
  });
1018
1891
  }
1019
1892
  default: {
1020
- const _exhaustiveCheck = role;
1021
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1893
+ break;
1022
1894
  }
1023
1895
  }
1024
1896
  }
@@ -1029,40 +1901,63 @@ ${assistantMessage}
1029
1901
  };
1030
1902
  }
1031
1903
 
1032
- // src/map-llmgateway-completion-logprobs.ts
1033
- function mapLLMGatewayCompletionLogprobs(logprobs) {
1034
- return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => {
1035
- var _a, _b;
1036
- return {
1037
- token,
1038
- logprob: (_a = logprobs.token_logprobs[index]) != null ? _a : 0,
1039
- topLogprobs: logprobs.top_logprobs ? Object.entries((_b = logprobs.top_logprobs[index]) != null ? _b : {}).map(
1040
- ([token2, logprob]) => ({
1041
- token: token2,
1042
- logprob
1043
- })
1044
- ) : []
1045
- };
1046
- });
1047
- }
1904
+ // src/completion/schemas.ts
1905
+ import { z as z6 } from "zod/v4";
1906
+ var LLMGatewayCompletionChunkSchema = z6.union([
1907
+ z6.object({
1908
+ id: z6.string().optional(),
1909
+ model: z6.string().optional(),
1910
+ choices: z6.array(
1911
+ z6.object({
1912
+ text: z6.string(),
1913
+ reasoning: z6.string().nullish().optional(),
1914
+ reasoning_details: ReasoningDetailArraySchema.nullish(),
1915
+ finish_reason: z6.string().nullish(),
1916
+ index: z6.number().nullish(),
1917
+ logprobs: z6.object({
1918
+ tokens: z6.array(z6.string()),
1919
+ token_logprobs: z6.array(z6.number()),
1920
+ top_logprobs: z6.array(z6.record(z6.string(), z6.number())).nullable()
1921
+ }).nullable().optional()
1922
+ })
1923
+ ),
1924
+ usage: z6.object({
1925
+ prompt_tokens: z6.number(),
1926
+ prompt_tokens_details: z6.object({
1927
+ cached_tokens: z6.number()
1928
+ }).nullish(),
1929
+ completion_tokens: z6.number(),
1930
+ completion_tokens_details: z6.object({
1931
+ reasoning_tokens: z6.number()
1932
+ }).nullish(),
1933
+ total_tokens: z6.number(),
1934
+ cost: z6.number().optional()
1935
+ }).nullish()
1936
+ }),
1937
+ LLMGatewayErrorResponseSchema
1938
+ ]);
1048
1939
 
1049
- // src/llmgateway-completion-language-model.ts
1940
+ // src/completion/index.ts
1050
1941
  var LLMGatewayCompletionLanguageModel = class {
1051
1942
  constructor(modelId, settings, config) {
1052
- this.specificationVersion = "v1";
1943
+ this.specificationVersion = "v2";
1944
+ this.provider = "llmgateway";
1945
+ this.supportedUrls = {
1946
+ "image/*": [
1947
+ /^data:image\/[a-zA-Z]+;base64,/,
1948
+ /^https?:\/\/.+\.(jpg|jpeg|png|gif|webp)$/i
1949
+ ],
1950
+ "text/*": [/^data:text\//, /^https?:\/\/.+$/],
1951
+ "application/*": [/^data:application\//, /^https?:\/\/.+$/]
1952
+ };
1053
1953
  this.defaultObjectGenerationMode = void 0;
1054
1954
  this.modelId = modelId;
1055
1955
  this.settings = settings;
1056
1956
  this.config = config;
1057
1957
  }
1058
- get provider() {
1059
- return this.config.provider;
1060
- }
1061
1958
  getArgs({
1062
- mode,
1063
- inputFormat,
1064
1959
  prompt,
1065
- maxTokens,
1960
+ maxOutputTokens,
1066
1961
  temperature,
1067
1962
  topP,
1068
1963
  frequencyPenalty,
@@ -1071,16 +1966,24 @@ var LLMGatewayCompletionLanguageModel = class {
1071
1966
  responseFormat,
1072
1967
  topK,
1073
1968
  stopSequences,
1074
- providerMetadata
1969
+ tools,
1970
+ toolChoice
1075
1971
  }) {
1076
- var _a, _b;
1077
- const type = mode.type;
1078
- const extraCallingBody = (_a = providerMetadata == null ? void 0 : providerMetadata.llmgateway) != null ? _a : {};
1079
1972
  const { prompt: completionPrompt } = convertToLLMGatewayCompletionPrompt({
1080
1973
  prompt,
1081
- inputFormat
1974
+ inputFormat: "prompt"
1082
1975
  });
1083
- const baseArgs = __spreadValues(__spreadValues(__spreadValues({
1976
+ if (tools == null ? void 0 : tools.length) {
1977
+ throw new UnsupportedFunctionalityError({
1978
+ functionality: "tools"
1979
+ });
1980
+ }
1981
+ if (toolChoice) {
1982
+ throw new UnsupportedFunctionalityError({
1983
+ functionality: "toolChoice"
1984
+ });
1985
+ }
1986
+ return __spreadValues(__spreadValues({
1084
1987
  // model id:
1085
1988
  model: this.modelId,
1086
1989
  models: this.settings.models,
@@ -1090,7 +1993,7 @@ var LLMGatewayCompletionLanguageModel = class {
1090
1993
  suffix: this.settings.suffix,
1091
1994
  user: this.settings.user,
1092
1995
  // standardized settings:
1093
- max_tokens: maxTokens,
1996
+ max_tokens: maxOutputTokens,
1094
1997
  temperature,
1095
1998
  top_p: topP,
1096
1999
  frequency_penalty: frequencyPenalty,
@@ -1100,59 +2003,31 @@ var LLMGatewayCompletionLanguageModel = class {
1100
2003
  response_format: responseFormat,
1101
2004
  top_k: topK,
1102
2005
  // prompt:
1103
- prompt: completionPrompt
1104
- }, this.config.extraBody), this.settings.extraBody), extraCallingBody);
1105
- switch (type) {
1106
- case "regular": {
1107
- if ((_b = mode.tools) == null ? void 0 : _b.length) {
1108
- throw new UnsupportedFunctionalityError3({
1109
- functionality: "tools"
1110
- });
1111
- }
1112
- if (mode.toolChoice) {
1113
- throw new UnsupportedFunctionalityError3({
1114
- functionality: "toolChoice"
1115
- });
1116
- }
1117
- return baseArgs;
1118
- }
1119
- case "object-json": {
1120
- throw new UnsupportedFunctionalityError3({
1121
- functionality: "object-json mode"
1122
- });
1123
- }
1124
- case "object-tool": {
1125
- throw new UnsupportedFunctionalityError3({
1126
- functionality: "object-tool mode"
1127
- });
1128
- }
1129
- // Handle all non-text types with a single default case
1130
- default: {
1131
- const _exhaustiveCheck = type;
1132
- throw new UnsupportedFunctionalityError3({
1133
- functionality: `${_exhaustiveCheck} mode`
1134
- });
1135
- }
1136
- }
2006
+ prompt: completionPrompt,
2007
+ // LLMGateway specific settings:
2008
+ include_reasoning: this.settings.includeReasoning,
2009
+ reasoning: this.settings.reasoning
2010
+ }, this.config.extraBody), this.settings.extraBody);
1137
2011
  }
1138
2012
  async doGenerate(options) {
1139
- var _b, _c, _d, _e, _f;
1140
- const args = this.getArgs(options);
1141
- const { responseHeaders, value: response } = await postJsonToApi2({
2013
+ var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o;
2014
+ const providerOptions = options.providerOptions || {};
2015
+ const llmgatewayOptions = providerOptions.llmgateway || {};
2016
+ const args = __spreadValues(__spreadValues({}, this.getArgs(options)), llmgatewayOptions);
2017
+ const { value: response, responseHeaders } = await postJsonToApi({
1142
2018
  url: this.config.url({
1143
2019
  path: "/completions",
1144
2020
  modelId: this.modelId
1145
2021
  }),
1146
- headers: combineHeaders2(this.config.headers(), options.headers),
2022
+ headers: combineHeaders(this.config.headers(), options.headers),
1147
2023
  body: args,
1148
2024
  failedResponseHandler: llmgatewayFailedResponseHandler,
1149
- successfulResponseHandler: createJsonResponseHandler2(
2025
+ successfulResponseHandler: createJsonResponseHandler(
1150
2026
  LLMGatewayCompletionChunkSchema
1151
2027
  ),
1152
2028
  abortSignal: options.abortSignal,
1153
2029
  fetch: this.config.fetch
1154
2030
  });
1155
- const _a = args, { prompt: rawPrompt } = _a, rawSettings = __objRest(_a, ["prompt"]);
1156
2031
  if ("error" in response) {
1157
2032
  throw new Error(`${response.error.message}`);
1158
2033
  }
@@ -1161,54 +2036,62 @@ var LLMGatewayCompletionLanguageModel = class {
1161
2036
  throw new Error("No choice in LLMGateway completion response");
1162
2037
  }
1163
2038
  return {
1164
- response: {
1165
- id: response.id,
1166
- modelId: response.model
1167
- },
1168
- text: (_b = choice.text) != null ? _b : "",
1169
- reasoning: choice.reasoning || void 0,
2039
+ content: [
2040
+ {
2041
+ type: "text",
2042
+ text: (_a15 = choice.text) != null ? _a15 : ""
2043
+ }
2044
+ ],
2045
+ finishReason: mapLLMGatewayFinishReason(choice.finish_reason),
1170
2046
  usage: {
1171
- promptTokens: (_d = (_c = response.usage) == null ? void 0 : _c.prompt_tokens) != null ? _d : 0,
1172
- completionTokens: (_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens) != null ? _f : 0
2047
+ inputTokens: (_c = (_b = response.usage) == null ? void 0 : _b.prompt_tokens) != null ? _c : 0,
2048
+ outputTokens: (_e = (_d = response.usage) == null ? void 0 : _d.completion_tokens) != null ? _e : 0,
2049
+ totalTokens: ((_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : 0) + ((_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : 0),
2050
+ reasoningTokens: (_l = (_k = (_j = response.usage) == null ? void 0 : _j.completion_tokens_details) == null ? void 0 : _k.reasoning_tokens) != null ? _l : 0,
2051
+ cachedInputTokens: (_o = (_n = (_m = response.usage) == null ? void 0 : _m.prompt_tokens_details) == null ? void 0 : _n.cached_tokens) != null ? _o : 0
1173
2052
  },
1174
- finishReason: mapLLMGatewayFinishReason(choice.finish_reason),
1175
- logprobs: mapLLMGatewayCompletionLogprobs(choice.logprobs),
1176
- rawCall: { rawPrompt, rawSettings },
1177
- rawResponse: { headers: responseHeaders },
1178
- warnings: []
2053
+ warnings: [],
2054
+ response: {
2055
+ headers: responseHeaders
2056
+ }
1179
2057
  };
1180
2058
  }
1181
2059
  async doStream(options) {
1182
- const args = this.getArgs(options);
1183
- const { responseHeaders, value: response } = await postJsonToApi2({
2060
+ const providerOptions = options.providerOptions || {};
2061
+ const llmgatewayOptions = providerOptions.llmgateway || {};
2062
+ const args = __spreadValues(__spreadValues({}, this.getArgs(options)), llmgatewayOptions);
2063
+ const { value: response, responseHeaders } = await postJsonToApi({
1184
2064
  url: this.config.url({
1185
2065
  path: "/completions",
1186
2066
  modelId: this.modelId
1187
2067
  }),
1188
- headers: combineHeaders2(this.config.headers(), options.headers),
1189
- body: __spreadProps(__spreadValues({}, this.getArgs(options)), {
2068
+ headers: combineHeaders(this.config.headers(), options.headers),
2069
+ body: __spreadProps(__spreadValues({}, args), {
1190
2070
  stream: true,
1191
2071
  // only include stream_options when in strict compatibility mode:
1192
2072
  stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1193
2073
  }),
1194
2074
  failedResponseHandler: llmgatewayFailedResponseHandler,
1195
- successfulResponseHandler: createEventSourceResponseHandler2(
2075
+ successfulResponseHandler: createEventSourceResponseHandler(
1196
2076
  LLMGatewayCompletionChunkSchema
1197
2077
  ),
1198
2078
  abortSignal: options.abortSignal,
1199
2079
  fetch: this.config.fetch
1200
2080
  });
1201
- const _a = args, { prompt: rawPrompt } = _a, rawSettings = __objRest(_a, ["prompt"]);
1202
2081
  let finishReason = "other";
1203
- let usage = {
1204
- promptTokens: Number.NaN,
1205
- completionTokens: Number.NaN
2082
+ const usage = {
2083
+ inputTokens: Number.NaN,
2084
+ outputTokens: Number.NaN,
2085
+ totalTokens: Number.NaN,
2086
+ reasoningTokens: Number.NaN,
2087
+ cachedInputTokens: Number.NaN
1206
2088
  };
1207
- let logprobs;
2089
+ const llmgatewayUsage = {};
1208
2090
  return {
1209
2091
  stream: response.pipeThrough(
1210
2092
  new TransformStream({
1211
2093
  transform(chunk, controller) {
2094
+ var _a15, _b;
1212
2095
  if (!chunk.success) {
1213
2096
  finishReason = "error";
1214
2097
  controller.enqueue({ type: "error", error: chunk.error });
@@ -1221,10 +2104,27 @@ var LLMGatewayCompletionLanguageModel = class {
1221
2104
  return;
1222
2105
  }
1223
2106
  if (value.usage != null) {
1224
- usage = {
1225
- promptTokens: value.usage.prompt_tokens,
1226
- completionTokens: value.usage.completion_tokens
1227
- };
2107
+ usage.inputTokens = value.usage.prompt_tokens;
2108
+ usage.outputTokens = value.usage.completion_tokens;
2109
+ usage.totalTokens = value.usage.prompt_tokens + value.usage.completion_tokens;
2110
+ llmgatewayUsage.promptTokens = value.usage.prompt_tokens;
2111
+ if (value.usage.prompt_tokens_details) {
2112
+ const cachedInputTokens = (_a15 = value.usage.prompt_tokens_details.cached_tokens) != null ? _a15 : 0;
2113
+ usage.cachedInputTokens = cachedInputTokens;
2114
+ llmgatewayUsage.promptTokensDetails = {
2115
+ cachedTokens: cachedInputTokens
2116
+ };
2117
+ }
2118
+ llmgatewayUsage.completionTokens = value.usage.completion_tokens;
2119
+ if (value.usage.completion_tokens_details) {
2120
+ const reasoningTokens = (_b = value.usage.completion_tokens_details.reasoning_tokens) != null ? _b : 0;
2121
+ usage.reasoningTokens = reasoningTokens;
2122
+ llmgatewayUsage.completionTokensDetails = {
2123
+ reasoningTokens
2124
+ };
2125
+ }
2126
+ llmgatewayUsage.cost = value.usage.cost;
2127
+ llmgatewayUsage.totalTokens = value.usage.total_tokens;
1228
2128
  }
1229
2129
  const choice = value.choices[0];
1230
2130
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1233,69 +2133,40 @@ var LLMGatewayCompletionLanguageModel = class {
1233
2133
  if ((choice == null ? void 0 : choice.text) != null) {
1234
2134
  controller.enqueue({
1235
2135
  type: "text-delta",
1236
- textDelta: choice.text
2136
+ delta: choice.text,
2137
+ id: generateId()
1237
2138
  });
1238
2139
  }
1239
- const mappedLogprobs = mapLLMGatewayCompletionLogprobs(
1240
- choice == null ? void 0 : choice.logprobs
1241
- );
1242
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1243
- if (logprobs === void 0) {
1244
- logprobs = [];
1245
- }
1246
- logprobs.push(...mappedLogprobs);
1247
- }
1248
2140
  },
1249
2141
  flush(controller) {
1250
2142
  controller.enqueue({
1251
2143
  type: "finish",
1252
2144
  finishReason,
1253
- logprobs,
1254
- usage
2145
+ usage,
2146
+ providerMetadata: {
2147
+ llmgateway: {
2148
+ usage: llmgatewayUsage
2149
+ }
2150
+ }
1255
2151
  });
1256
2152
  }
1257
2153
  })
1258
2154
  ),
1259
- rawCall: { rawPrompt, rawSettings },
1260
- rawResponse: { headers: responseHeaders },
1261
- warnings: []
2155
+ response: {
2156
+ headers: responseHeaders
2157
+ }
1262
2158
  };
1263
2159
  }
1264
2160
  };
1265
- var LLMGatewayCompletionChunkSchema = z4.union([
1266
- z4.object({
1267
- id: z4.string().optional(),
1268
- model: z4.string().optional(),
1269
- choices: z4.array(
1270
- z4.object({
1271
- text: z4.string(),
1272
- reasoning: z4.string().nullish().optional(),
1273
- reasoning_details: ReasoningDetailArraySchema.nullish(),
1274
- finish_reason: z4.string().nullish(),
1275
- index: z4.number(),
1276
- logprobs: z4.object({
1277
- tokens: z4.array(z4.string()),
1278
- token_logprobs: z4.array(z4.number()),
1279
- top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
1280
- }).nullable().optional()
1281
- })
1282
- ),
1283
- usage: z4.object({
1284
- prompt_tokens: z4.number(),
1285
- completion_tokens: z4.number()
1286
- }).optional().nullable()
1287
- }),
1288
- LLMGatewayErrorResponseSchema
1289
- ]);
1290
2161
 
1291
- // src/llmgateway-facade.ts
2162
+ // src/facade.ts
1292
2163
  var LLMGateway = class {
1293
2164
  /**
1294
2165
  * Creates a new LLMGateway provider instance.
1295
2166
  */
1296
2167
  constructor(options = {}) {
1297
- var _a, _b;
1298
- this.baseURL = (_b = withoutTrailingSlash((_a = options.baseURL) != null ? _a : options.baseURL)) != null ? _b : "https://api.llmgateway.io/v1";
2168
+ var _a15, _b;
2169
+ this.baseURL = (_b = withoutTrailingSlash((_a15 = options.baseURL) != null ? _a15 : options.baseUrl)) != null ? _b : "https://api.llmgateway.io/v1";
1299
2170
  this.apiKey = options.apiKey;
1300
2171
  this.headers = options.headers;
1301
2172
  }
@@ -1305,7 +2176,7 @@ var LLMGateway = class {
1305
2176
  headers: () => __spreadValues({
1306
2177
  Authorization: `Bearer ${loadApiKey({
1307
2178
  apiKey: this.apiKey,
1308
- environmentVariableName: "LLMGATEWAY_API_KEY",
2179
+ environmentVariableName: "LLM_GATEWAY_API_KEY",
1309
2180
  description: "LLMGateway"
1310
2181
  })}`
1311
2182
  }, this.headers)
@@ -1329,16 +2200,15 @@ var LLMGateway = class {
1329
2200
  }
1330
2201
  };
1331
2202
 
1332
- // src/llmgateway-provider.ts
1333
- import { loadApiKey as loadApiKey2, withoutTrailingSlash as withoutTrailingSlash2 } from "@ai-sdk/provider-utils";
2203
+ // src/provider.ts
1334
2204
  function createLLMGateway(options = {}) {
1335
- var _a, _b, _c;
1336
- const baseURL = (_b = withoutTrailingSlash2((_a = options.baseURL) != null ? _a : options.baseURL)) != null ? _b : "https://api.llmgateway.io/v1";
2205
+ var _a15, _b, _c;
2206
+ const baseURL = (_b = withoutTrailingSlash((_a15 = options.baseURL) != null ? _a15 : options.baseUrl)) != null ? _b : "https://api.llmgateway.io/v1";
1337
2207
  const compatibility = (_c = options.compatibility) != null ? _c : "compatible";
1338
2208
  const getHeaders = () => __spreadValues({
1339
- Authorization: `Bearer ${loadApiKey2({
2209
+ Authorization: `Bearer ${loadApiKey({
1340
2210
  apiKey: options.apiKey,
1341
- environmentVariableName: "LLMGATEWAY_API_KEY",
2211
+ environmentVariableName: "LLM_GATEWAY_API_KEY",
1342
2212
  description: "LLMGateway"
1343
2213
  })}`
1344
2214
  }, options.headers);