@mastra/rag 0.1.19-alpha.2 → 0.1.19-alpha.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -1,14 +1,3980 @@
1
1
  'use strict';
2
2
 
3
3
  var llamaindex = require('llamaindex');
4
+ var zod = require('zod');
4
5
  var nodeHtmlBetterParser = require('node-html-better-parser');
5
6
  var jsTiktoken = require('js-tiktoken');
6
7
  var relevance = require('@mastra/core/relevance');
7
8
  var tools = require('@mastra/core/tools');
8
- var zod = require('zod');
9
9
  var ai = require('ai');
10
10
 
11
- // src/document/document.ts
11
+ var __create = Object.create;
12
+ var __defProp = Object.defineProperty;
13
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
14
+ var __getOwnPropNames = Object.getOwnPropertyNames;
15
+ var __getProtoOf = Object.getPrototypeOf;
16
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
17
+ var __commonJS = (cb, mod) => function __require() {
18
+ return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
19
+ };
20
+ var __copyProps = (to, from, except, desc) => {
21
+ if (from && typeof from === "object" || typeof from === "function") {
22
+ for (let key of __getOwnPropNames(from))
23
+ if (!__hasOwnProp.call(to, key) && key !== except)
24
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
25
+ }
26
+ return to;
27
+ };
28
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
29
+ // If the importer is in node compatibility mode or this is not an ESM
30
+ // file that has been converted to a CommonJS file using a Babel-
31
+ // compatible transform (i.e. "__esModule" has not been set), then set
32
+ // "default" to the CommonJS "module.exports" for node compatibility.
33
+ __defProp(target, "default", { value: mod, enumerable: true }) ,
34
+ mod
35
+ ));
36
+
37
+ // ../../node_modules/.pnpm/secure-json-parse@2.7.0/node_modules/secure-json-parse/index.js
38
+ var require_secure_json_parse = __commonJS({
39
+ "../../node_modules/.pnpm/secure-json-parse@2.7.0/node_modules/secure-json-parse/index.js"(exports, module) {
40
+ var hasBuffer = typeof Buffer !== "undefined";
41
+ var suspectProtoRx = /"(?:_|\\u005[Ff])(?:_|\\u005[Ff])(?:p|\\u0070)(?:r|\\u0072)(?:o|\\u006[Ff])(?:t|\\u0074)(?:o|\\u006[Ff])(?:_|\\u005[Ff])(?:_|\\u005[Ff])"\s*:/;
42
+ var suspectConstructorRx = /"(?:c|\\u0063)(?:o|\\u006[Ff])(?:n|\\u006[Ee])(?:s|\\u0073)(?:t|\\u0074)(?:r|\\u0072)(?:u|\\u0075)(?:c|\\u0063)(?:t|\\u0074)(?:o|\\u006[Ff])(?:r|\\u0072)"\s*:/;
43
+ function _parse(text, reviver, options) {
44
+ if (options == null) {
45
+ if (reviver !== null && typeof reviver === "object") {
46
+ options = reviver;
47
+ reviver = void 0;
48
+ }
49
+ }
50
+ if (hasBuffer && Buffer.isBuffer(text)) {
51
+ text = text.toString();
52
+ }
53
+ if (text && text.charCodeAt(0) === 65279) {
54
+ text = text.slice(1);
55
+ }
56
+ const obj = JSON.parse(text, reviver);
57
+ if (obj === null || typeof obj !== "object") {
58
+ return obj;
59
+ }
60
+ const protoAction = options && options.protoAction || "error";
61
+ const constructorAction = options && options.constructorAction || "error";
62
+ if (protoAction === "ignore" && constructorAction === "ignore") {
63
+ return obj;
64
+ }
65
+ if (protoAction !== "ignore" && constructorAction !== "ignore") {
66
+ if (suspectProtoRx.test(text) === false && suspectConstructorRx.test(text) === false) {
67
+ return obj;
68
+ }
69
+ } else if (protoAction !== "ignore" && constructorAction === "ignore") {
70
+ if (suspectProtoRx.test(text) === false) {
71
+ return obj;
72
+ }
73
+ } else {
74
+ if (suspectConstructorRx.test(text) === false) {
75
+ return obj;
76
+ }
77
+ }
78
+ return filter(obj, { protoAction, constructorAction, safe: options && options.safe });
79
+ }
80
+ function filter(obj, { protoAction = "error", constructorAction = "error", safe } = {}) {
81
+ let next = [obj];
82
+ while (next.length) {
83
+ const nodes = next;
84
+ next = [];
85
+ for (const node of nodes) {
86
+ if (protoAction !== "ignore" && Object.prototype.hasOwnProperty.call(node, "__proto__")) {
87
+ if (safe === true) {
88
+ return null;
89
+ } else if (protoAction === "error") {
90
+ throw new SyntaxError("Object contains forbidden prototype property");
91
+ }
92
+ delete node.__proto__;
93
+ }
94
+ if (constructorAction !== "ignore" && Object.prototype.hasOwnProperty.call(node, "constructor") && Object.prototype.hasOwnProperty.call(node.constructor, "prototype")) {
95
+ if (safe === true) {
96
+ return null;
97
+ } else if (constructorAction === "error") {
98
+ throw new SyntaxError("Object contains forbidden prototype property");
99
+ }
100
+ delete node.constructor;
101
+ }
102
+ for (const key in node) {
103
+ const value = node[key];
104
+ if (value && typeof value === "object") {
105
+ next.push(value);
106
+ }
107
+ }
108
+ }
109
+ }
110
+ return obj;
111
+ }
112
+ function parse2(text, reviver, options) {
113
+ const stackTraceLimit = Error.stackTraceLimit;
114
+ Error.stackTraceLimit = 0;
115
+ try {
116
+ return _parse(text, reviver, options);
117
+ } finally {
118
+ Error.stackTraceLimit = stackTraceLimit;
119
+ }
120
+ }
121
+ function safeParse(text, reviver) {
122
+ const stackTraceLimit = Error.stackTraceLimit;
123
+ Error.stackTraceLimit = 0;
124
+ try {
125
+ return _parse(text, reviver, { safe: true });
126
+ } catch (_e) {
127
+ return null;
128
+ } finally {
129
+ Error.stackTraceLimit = stackTraceLimit;
130
+ }
131
+ }
132
+ module.exports = parse2;
133
+ module.exports.default = parse2;
134
+ module.exports.parse = parse2;
135
+ module.exports.safeParse = safeParse;
136
+ module.exports.scan = filter;
137
+ }
138
+ });
139
+
140
+ // ../../node_modules/.pnpm/@ai-sdk+provider@1.1.2/node_modules/@ai-sdk/provider/dist/index.mjs
141
+ var marker = "vercel.ai.error";
142
+ var symbol = Symbol.for(marker);
143
+ var _a;
144
+ var _AISDKError = class _AISDKError2 extends Error {
145
+ /**
146
+ * Creates an AI SDK Error.
147
+ *
148
+ * @param {Object} params - The parameters for creating the error.
149
+ * @param {string} params.name - The name of the error.
150
+ * @param {string} params.message - The error message.
151
+ * @param {unknown} [params.cause] - The underlying cause of the error.
152
+ */
153
+ constructor({
154
+ name: name14,
155
+ message,
156
+ cause
157
+ }) {
158
+ super(message);
159
+ this[_a] = true;
160
+ this.name = name14;
161
+ this.cause = cause;
162
+ }
163
+ /**
164
+ * Checks if the given error is an AI SDK Error.
165
+ * @param {unknown} error - The error to check.
166
+ * @returns {boolean} True if the error is an AI SDK Error, false otherwise.
167
+ */
168
+ static isInstance(error) {
169
+ return _AISDKError2.hasMarker(error, marker);
170
+ }
171
+ static hasMarker(error, marker15) {
172
+ const markerSymbol = Symbol.for(marker15);
173
+ return error != null && typeof error === "object" && markerSymbol in error && typeof error[markerSymbol] === "boolean" && error[markerSymbol] === true;
174
+ }
175
+ };
176
+ _a = symbol;
177
+ var AISDKError = _AISDKError;
178
+ var name = "AI_APICallError";
179
+ var marker2 = `vercel.ai.error.${name}`;
180
+ var symbol2 = Symbol.for(marker2);
181
+ var _a2;
182
+ var APICallError = class extends AISDKError {
183
+ constructor({
184
+ message,
185
+ url,
186
+ requestBodyValues,
187
+ statusCode,
188
+ responseHeaders,
189
+ responseBody,
190
+ cause,
191
+ isRetryable = statusCode != null && (statusCode === 408 || // request timeout
192
+ statusCode === 409 || // conflict
193
+ statusCode === 429 || // too many requests
194
+ statusCode >= 500),
195
+ // server error
196
+ data
197
+ }) {
198
+ super({ name, message, cause });
199
+ this[_a2] = true;
200
+ this.url = url;
201
+ this.requestBodyValues = requestBodyValues;
202
+ this.statusCode = statusCode;
203
+ this.responseHeaders = responseHeaders;
204
+ this.responseBody = responseBody;
205
+ this.isRetryable = isRetryable;
206
+ this.data = data;
207
+ }
208
+ static isInstance(error) {
209
+ return AISDKError.hasMarker(error, marker2);
210
+ }
211
+ };
212
+ _a2 = symbol2;
213
+ var name2 = "AI_EmptyResponseBodyError";
214
+ var marker3 = `vercel.ai.error.${name2}`;
215
+ var symbol3 = Symbol.for(marker3);
216
+ var _a3;
217
+ var EmptyResponseBodyError = class extends AISDKError {
218
+ // used in isInstance
219
+ constructor({ message = "Empty response body" } = {}) {
220
+ super({ name: name2, message });
221
+ this[_a3] = true;
222
+ }
223
+ static isInstance(error) {
224
+ return AISDKError.hasMarker(error, marker3);
225
+ }
226
+ };
227
+ _a3 = symbol3;
228
+ function getErrorMessage(error) {
229
+ if (error == null) {
230
+ return "unknown error";
231
+ }
232
+ if (typeof error === "string") {
233
+ return error;
234
+ }
235
+ if (error instanceof Error) {
236
+ return error.message;
237
+ }
238
+ return JSON.stringify(error);
239
+ }
240
+ var name3 = "AI_InvalidArgumentError";
241
+ var marker4 = `vercel.ai.error.${name3}`;
242
+ var symbol4 = Symbol.for(marker4);
243
+ var _a4;
244
+ var InvalidArgumentError = class extends AISDKError {
245
+ constructor({
246
+ message,
247
+ cause,
248
+ argument
249
+ }) {
250
+ super({ name: name3, message, cause });
251
+ this[_a4] = true;
252
+ this.argument = argument;
253
+ }
254
+ static isInstance(error) {
255
+ return AISDKError.hasMarker(error, marker4);
256
+ }
257
+ };
258
+ _a4 = symbol4;
259
+ var name4 = "AI_InvalidPromptError";
260
+ var marker5 = `vercel.ai.error.${name4}`;
261
+ var symbol5 = Symbol.for(marker5);
262
+ var _a5;
263
+ var InvalidPromptError = class extends AISDKError {
264
+ constructor({
265
+ prompt,
266
+ message,
267
+ cause
268
+ }) {
269
+ super({ name: name4, message: `Invalid prompt: ${message}`, cause });
270
+ this[_a5] = true;
271
+ this.prompt = prompt;
272
+ }
273
+ static isInstance(error) {
274
+ return AISDKError.hasMarker(error, marker5);
275
+ }
276
+ };
277
+ _a5 = symbol5;
278
+ var name5 = "AI_InvalidResponseDataError";
279
+ var marker6 = `vercel.ai.error.${name5}`;
280
+ var symbol6 = Symbol.for(marker6);
281
+ var _a6;
282
+ var InvalidResponseDataError = class extends AISDKError {
283
+ constructor({
284
+ data,
285
+ message = `Invalid response data: ${JSON.stringify(data)}.`
286
+ }) {
287
+ super({ name: name5, message });
288
+ this[_a6] = true;
289
+ this.data = data;
290
+ }
291
+ static isInstance(error) {
292
+ return AISDKError.hasMarker(error, marker6);
293
+ }
294
+ };
295
+ _a6 = symbol6;
296
+ var name6 = "AI_JSONParseError";
297
+ var marker7 = `vercel.ai.error.${name6}`;
298
+ var symbol7 = Symbol.for(marker7);
299
+ var _a7;
300
+ var JSONParseError = class extends AISDKError {
301
+ constructor({ text, cause }) {
302
+ super({
303
+ name: name6,
304
+ message: `JSON parsing failed: Text: ${text}.
305
+ Error message: ${getErrorMessage(cause)}`,
306
+ cause
307
+ });
308
+ this[_a7] = true;
309
+ this.text = text;
310
+ }
311
+ static isInstance(error) {
312
+ return AISDKError.hasMarker(error, marker7);
313
+ }
314
+ };
315
+ _a7 = symbol7;
316
+ var name7 = "AI_LoadAPIKeyError";
317
+ var marker8 = `vercel.ai.error.${name7}`;
318
+ var symbol8 = Symbol.for(marker8);
319
+ var _a8;
320
+ var LoadAPIKeyError = class extends AISDKError {
321
+ // used in isInstance
322
+ constructor({ message }) {
323
+ super({ name: name7, message });
324
+ this[_a8] = true;
325
+ }
326
+ static isInstance(error) {
327
+ return AISDKError.hasMarker(error, marker8);
328
+ }
329
+ };
330
+ _a8 = symbol8;
331
+ var name11 = "AI_TooManyEmbeddingValuesForCallError";
332
+ var marker12 = `vercel.ai.error.${name11}`;
333
+ var symbol12 = Symbol.for(marker12);
334
+ var _a12;
335
+ var TooManyEmbeddingValuesForCallError = class extends AISDKError {
336
+ constructor(options) {
337
+ super({
338
+ name: name11,
339
+ message: `Too many values for a single embedding call. The ${options.provider} model "${options.modelId}" can only embed up to ${options.maxEmbeddingsPerCall} values per call, but ${options.values.length} values were provided.`
340
+ });
341
+ this[_a12] = true;
342
+ this.provider = options.provider;
343
+ this.modelId = options.modelId;
344
+ this.maxEmbeddingsPerCall = options.maxEmbeddingsPerCall;
345
+ this.values = options.values;
346
+ }
347
+ static isInstance(error) {
348
+ return AISDKError.hasMarker(error, marker12);
349
+ }
350
+ };
351
+ _a12 = symbol12;
352
+ var name12 = "AI_TypeValidationError";
353
+ var marker13 = `vercel.ai.error.${name12}`;
354
+ var symbol13 = Symbol.for(marker13);
355
+ var _a13;
356
+ var _TypeValidationError = class _TypeValidationError2 extends AISDKError {
357
+ constructor({ value, cause }) {
358
+ super({
359
+ name: name12,
360
+ message: `Type validation failed: Value: ${JSON.stringify(value)}.
361
+ Error message: ${getErrorMessage(cause)}`,
362
+ cause
363
+ });
364
+ this[_a13] = true;
365
+ this.value = value;
366
+ }
367
+ static isInstance(error) {
368
+ return AISDKError.hasMarker(error, marker13);
369
+ }
370
+ /**
371
+ * Wraps an error into a TypeValidationError.
372
+ * If the cause is already a TypeValidationError with the same value, it returns the cause.
373
+ * Otherwise, it creates a new TypeValidationError.
374
+ *
375
+ * @param {Object} params - The parameters for wrapping the error.
376
+ * @param {unknown} params.value - The value that failed validation.
377
+ * @param {unknown} params.cause - The original error or cause of the validation failure.
378
+ * @returns {TypeValidationError} A TypeValidationError instance.
379
+ */
380
+ static wrap({
381
+ value,
382
+ cause
383
+ }) {
384
+ return _TypeValidationError2.isInstance(cause) && cause.value === value ? cause : new _TypeValidationError2({ value, cause });
385
+ }
386
+ };
387
+ _a13 = symbol13;
388
+ var TypeValidationError = _TypeValidationError;
389
+ var name13 = "AI_UnsupportedFunctionalityError";
390
+ var marker14 = `vercel.ai.error.${name13}`;
391
+ var symbol14 = Symbol.for(marker14);
392
+ var _a14;
393
+ var UnsupportedFunctionalityError = class extends AISDKError {
394
+ constructor({
395
+ functionality,
396
+ message = `'${functionality}' functionality not supported.`
397
+ }) {
398
+ super({ name: name13, message });
399
+ this[_a14] = true;
400
+ this.functionality = functionality;
401
+ }
402
+ static isInstance(error) {
403
+ return AISDKError.hasMarker(error, marker14);
404
+ }
405
+ };
406
+ _a14 = symbol14;
407
+
408
+ // ../../node_modules/.pnpm/nanoid@3.3.11/node_modules/nanoid/non-secure/index.js
409
+ var customAlphabet = (alphabet, defaultSize = 21) => {
410
+ return (size = defaultSize) => {
411
+ let id = "";
412
+ let i = size | 0;
413
+ while (i--) {
414
+ id += alphabet[Math.random() * alphabet.length | 0];
415
+ }
416
+ return id;
417
+ };
418
+ };
419
+
420
+ // ../../node_modules/.pnpm/@ai-sdk+provider-utils@2.2.6_zod@3.24.2/node_modules/@ai-sdk/provider-utils/dist/index.mjs
421
+ var import_secure_json_parse = __toESM(require_secure_json_parse());
422
+ function combineHeaders(...headers) {
423
+ return headers.reduce(
424
+ (combinedHeaders, currentHeaders) => ({
425
+ ...combinedHeaders,
426
+ ...currentHeaders != null ? currentHeaders : {}
427
+ }),
428
+ {}
429
+ );
430
+ }
431
+ function createEventSourceParserStream() {
432
+ let buffer = "";
433
+ let event = void 0;
434
+ let data = [];
435
+ let lastEventId = void 0;
436
+ let retry = void 0;
437
+ function parseLine(line, controller) {
438
+ if (line === "") {
439
+ dispatchEvent(controller);
440
+ return;
441
+ }
442
+ if (line.startsWith(":")) {
443
+ return;
444
+ }
445
+ const colonIndex = line.indexOf(":");
446
+ if (colonIndex === -1) {
447
+ handleField(line, "");
448
+ return;
449
+ }
450
+ const field = line.slice(0, colonIndex);
451
+ const valueStart = colonIndex + 1;
452
+ const value = valueStart < line.length && line[valueStart] === " " ? line.slice(valueStart + 1) : line.slice(valueStart);
453
+ handleField(field, value);
454
+ }
455
+ function dispatchEvent(controller) {
456
+ if (data.length > 0) {
457
+ controller.enqueue({
458
+ event,
459
+ data: data.join("\n"),
460
+ id: lastEventId,
461
+ retry
462
+ });
463
+ data = [];
464
+ event = void 0;
465
+ retry = void 0;
466
+ }
467
+ }
468
+ function handleField(field, value) {
469
+ switch (field) {
470
+ case "event":
471
+ event = value;
472
+ break;
473
+ case "data":
474
+ data.push(value);
475
+ break;
476
+ case "id":
477
+ lastEventId = value;
478
+ break;
479
+ case "retry":
480
+ const parsedRetry = parseInt(value, 10);
481
+ if (!isNaN(parsedRetry)) {
482
+ retry = parsedRetry;
483
+ }
484
+ break;
485
+ }
486
+ }
487
+ return new TransformStream({
488
+ transform(chunk, controller) {
489
+ const { lines, incompleteLine } = splitLines(buffer, chunk);
490
+ buffer = incompleteLine;
491
+ for (let i = 0; i < lines.length; i++) {
492
+ parseLine(lines[i], controller);
493
+ }
494
+ },
495
+ flush(controller) {
496
+ parseLine(buffer, controller);
497
+ dispatchEvent(controller);
498
+ }
499
+ });
500
+ }
501
+ function splitLines(buffer, chunk) {
502
+ const lines = [];
503
+ let currentLine = buffer;
504
+ for (let i = 0; i < chunk.length; ) {
505
+ const char = chunk[i++];
506
+ if (char === "\n") {
507
+ lines.push(currentLine);
508
+ currentLine = "";
509
+ } else if (char === "\r") {
510
+ lines.push(currentLine);
511
+ currentLine = "";
512
+ if (chunk[i + 1] === "\n") {
513
+ i++;
514
+ }
515
+ } else {
516
+ currentLine += char;
517
+ }
518
+ }
519
+ return { lines, incompleteLine: currentLine };
520
+ }
521
+ function extractResponseHeaders(response) {
522
+ const headers = {};
523
+ response.headers.forEach((value, key) => {
524
+ headers[key] = value;
525
+ });
526
+ return headers;
527
+ }
528
+ var createIdGenerator = ({
529
+ prefix,
530
+ size: defaultSize = 16,
531
+ alphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
532
+ separator = "-"
533
+ } = {}) => {
534
+ const generator = customAlphabet(alphabet, defaultSize);
535
+ if (prefix == null) {
536
+ return generator;
537
+ }
538
+ if (alphabet.includes(separator)) {
539
+ throw new InvalidArgumentError({
540
+ argument: "separator",
541
+ message: `The separator "${separator}" must not be part of the alphabet "${alphabet}".`
542
+ });
543
+ }
544
+ return (size) => `${prefix}${separator}${generator(size)}`;
545
+ };
546
+ var generateId = createIdGenerator();
547
+ function removeUndefinedEntries(record) {
548
+ return Object.fromEntries(
549
+ Object.entries(record).filter(([_key, value]) => value != null)
550
+ );
551
+ }
552
+ function isAbortError(error) {
553
+ return error instanceof Error && (error.name === "AbortError" || error.name === "TimeoutError");
554
+ }
555
+ function loadApiKey({
556
+ apiKey,
557
+ environmentVariableName,
558
+ apiKeyParameterName = "apiKey",
559
+ description
560
+ }) {
561
+ if (typeof apiKey === "string") {
562
+ return apiKey;
563
+ }
564
+ if (apiKey != null) {
565
+ throw new LoadAPIKeyError({
566
+ message: `${description} API key must be a string.`
567
+ });
568
+ }
569
+ if (typeof process === "undefined") {
570
+ throw new LoadAPIKeyError({
571
+ message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter. Environment variables is not supported in this environment.`
572
+ });
573
+ }
574
+ apiKey = process.env[environmentVariableName];
575
+ if (apiKey == null) {
576
+ throw new LoadAPIKeyError({
577
+ message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter or the ${environmentVariableName} environment variable.`
578
+ });
579
+ }
580
+ if (typeof apiKey !== "string") {
581
+ throw new LoadAPIKeyError({
582
+ message: `${description} API key must be a string. The value of the ${environmentVariableName} environment variable is not a string.`
583
+ });
584
+ }
585
+ return apiKey;
586
+ }
587
+ var validatorSymbol = Symbol.for("vercel.ai.validator");
588
+ function validator(validate) {
589
+ return { [validatorSymbol]: true, validate };
590
+ }
591
+ function isValidator(value) {
592
+ return typeof value === "object" && value !== null && validatorSymbol in value && value[validatorSymbol] === true && "validate" in value;
593
+ }
594
+ function asValidator(value) {
595
+ return isValidator(value) ? value : zodValidator(value);
596
+ }
597
+ function zodValidator(zodSchema) {
598
+ return validator((value) => {
599
+ const result = zodSchema.safeParse(value);
600
+ return result.success ? { success: true, value: result.data } : { success: false, error: result.error };
601
+ });
602
+ }
603
+ function validateTypes({
604
+ value,
605
+ schema: inputSchema
606
+ }) {
607
+ const result = safeValidateTypes({ value, schema: inputSchema });
608
+ if (!result.success) {
609
+ throw TypeValidationError.wrap({ value, cause: result.error });
610
+ }
611
+ return result.value;
612
+ }
613
+ function safeValidateTypes({
614
+ value,
615
+ schema
616
+ }) {
617
+ const validator2 = asValidator(schema);
618
+ try {
619
+ if (validator2.validate == null) {
620
+ return { success: true, value };
621
+ }
622
+ const result = validator2.validate(value);
623
+ if (result.success) {
624
+ return result;
625
+ }
626
+ return {
627
+ success: false,
628
+ error: TypeValidationError.wrap({ value, cause: result.error })
629
+ };
630
+ } catch (error) {
631
+ return {
632
+ success: false,
633
+ error: TypeValidationError.wrap({ value, cause: error })
634
+ };
635
+ }
636
+ }
637
+ function parseJSON({
638
+ text,
639
+ schema
640
+ }) {
641
+ try {
642
+ const value = import_secure_json_parse.default.parse(text);
643
+ if (schema == null) {
644
+ return value;
645
+ }
646
+ return validateTypes({ value, schema });
647
+ } catch (error) {
648
+ if (JSONParseError.isInstance(error) || TypeValidationError.isInstance(error)) {
649
+ throw error;
650
+ }
651
+ throw new JSONParseError({ text, cause: error });
652
+ }
653
+ }
654
+ function safeParseJSON({
655
+ text,
656
+ schema
657
+ }) {
658
+ try {
659
+ const value = import_secure_json_parse.default.parse(text);
660
+ if (schema == null) {
661
+ return { success: true, value, rawValue: value };
662
+ }
663
+ const validationResult = safeValidateTypes({ value, schema });
664
+ return validationResult.success ? { ...validationResult, rawValue: value } : validationResult;
665
+ } catch (error) {
666
+ return {
667
+ success: false,
668
+ error: JSONParseError.isInstance(error) ? error : new JSONParseError({ text, cause: error })
669
+ };
670
+ }
671
+ }
672
+ function isParsableJson(input) {
673
+ try {
674
+ import_secure_json_parse.default.parse(input);
675
+ return true;
676
+ } catch (e) {
677
+ return false;
678
+ }
679
+ }
680
+ function parseProviderOptions({
681
+ provider,
682
+ providerOptions,
683
+ schema
684
+ }) {
685
+ if ((providerOptions == null ? void 0 : providerOptions[provider]) == null) {
686
+ return void 0;
687
+ }
688
+ const parsedProviderOptions = safeValidateTypes({
689
+ value: providerOptions[provider],
690
+ schema
691
+ });
692
+ if (!parsedProviderOptions.success) {
693
+ throw new InvalidArgumentError({
694
+ argument: "providerOptions",
695
+ message: `invalid ${provider} provider options`,
696
+ cause: parsedProviderOptions.error
697
+ });
698
+ }
699
+ return parsedProviderOptions.value;
700
+ }
701
+ var getOriginalFetch2 = () => globalThis.fetch;
702
+ var postJsonToApi = async ({
703
+ url,
704
+ headers,
705
+ body,
706
+ failedResponseHandler,
707
+ successfulResponseHandler,
708
+ abortSignal,
709
+ fetch
710
+ }) => postToApi({
711
+ url,
712
+ headers: {
713
+ "Content-Type": "application/json",
714
+ ...headers
715
+ },
716
+ body: {
717
+ content: JSON.stringify(body),
718
+ values: body
719
+ },
720
+ failedResponseHandler,
721
+ successfulResponseHandler,
722
+ abortSignal,
723
+ fetch
724
+ });
725
+ var postFormDataToApi = async ({
726
+ url,
727
+ headers,
728
+ formData,
729
+ failedResponseHandler,
730
+ successfulResponseHandler,
731
+ abortSignal,
732
+ fetch
733
+ }) => postToApi({
734
+ url,
735
+ headers,
736
+ body: {
737
+ content: formData,
738
+ values: Object.fromEntries(formData.entries())
739
+ },
740
+ failedResponseHandler,
741
+ successfulResponseHandler,
742
+ abortSignal,
743
+ fetch
744
+ });
745
+ var postToApi = async ({
746
+ url,
747
+ headers = {},
748
+ body,
749
+ successfulResponseHandler,
750
+ failedResponseHandler,
751
+ abortSignal,
752
+ fetch = getOriginalFetch2()
753
+ }) => {
754
+ try {
755
+ const response = await fetch(url, {
756
+ method: "POST",
757
+ headers: removeUndefinedEntries(headers),
758
+ body: body.content,
759
+ signal: abortSignal
760
+ });
761
+ const responseHeaders = extractResponseHeaders(response);
762
+ if (!response.ok) {
763
+ let errorInformation;
764
+ try {
765
+ errorInformation = await failedResponseHandler({
766
+ response,
767
+ url,
768
+ requestBodyValues: body.values
769
+ });
770
+ } catch (error) {
771
+ if (isAbortError(error) || APICallError.isInstance(error)) {
772
+ throw error;
773
+ }
774
+ throw new APICallError({
775
+ message: "Failed to process error response",
776
+ cause: error,
777
+ statusCode: response.status,
778
+ url,
779
+ responseHeaders,
780
+ requestBodyValues: body.values
781
+ });
782
+ }
783
+ throw errorInformation.value;
784
+ }
785
+ try {
786
+ return await successfulResponseHandler({
787
+ response,
788
+ url,
789
+ requestBodyValues: body.values
790
+ });
791
+ } catch (error) {
792
+ if (error instanceof Error) {
793
+ if (isAbortError(error) || APICallError.isInstance(error)) {
794
+ throw error;
795
+ }
796
+ }
797
+ throw new APICallError({
798
+ message: "Failed to process successful response",
799
+ cause: error,
800
+ statusCode: response.status,
801
+ url,
802
+ responseHeaders,
803
+ requestBodyValues: body.values
804
+ });
805
+ }
806
+ } catch (error) {
807
+ if (isAbortError(error)) {
808
+ throw error;
809
+ }
810
+ if (error instanceof TypeError && error.message === "fetch failed") {
811
+ const cause = error.cause;
812
+ if (cause != null) {
813
+ throw new APICallError({
814
+ message: `Cannot connect to API: ${cause.message}`,
815
+ cause,
816
+ url,
817
+ requestBodyValues: body.values,
818
+ isRetryable: true
819
+ // retry when network error
820
+ });
821
+ }
822
+ }
823
+ throw error;
824
+ }
825
+ };
826
+ var createJsonErrorResponseHandler = ({
827
+ errorSchema,
828
+ errorToMessage,
829
+ isRetryable
830
+ }) => async ({ response, url, requestBodyValues }) => {
831
+ const responseBody = await response.text();
832
+ const responseHeaders = extractResponseHeaders(response);
833
+ if (responseBody.trim() === "") {
834
+ return {
835
+ responseHeaders,
836
+ value: new APICallError({
837
+ message: response.statusText,
838
+ url,
839
+ requestBodyValues,
840
+ statusCode: response.status,
841
+ responseHeaders,
842
+ responseBody,
843
+ isRetryable: isRetryable == null ? void 0 : isRetryable(response)
844
+ })
845
+ };
846
+ }
847
+ try {
848
+ const parsedError = parseJSON({
849
+ text: responseBody,
850
+ schema: errorSchema
851
+ });
852
+ return {
853
+ responseHeaders,
854
+ value: new APICallError({
855
+ message: errorToMessage(parsedError),
856
+ url,
857
+ requestBodyValues,
858
+ statusCode: response.status,
859
+ responseHeaders,
860
+ responseBody,
861
+ data: parsedError,
862
+ isRetryable: isRetryable == null ? void 0 : isRetryable(response, parsedError)
863
+ })
864
+ };
865
+ } catch (parseError) {
866
+ return {
867
+ responseHeaders,
868
+ value: new APICallError({
869
+ message: response.statusText,
870
+ url,
871
+ requestBodyValues,
872
+ statusCode: response.status,
873
+ responseHeaders,
874
+ responseBody,
875
+ isRetryable: isRetryable == null ? void 0 : isRetryable(response)
876
+ })
877
+ };
878
+ }
879
+ };
880
+ var createEventSourceResponseHandler = (chunkSchema) => async ({ response }) => {
881
+ const responseHeaders = extractResponseHeaders(response);
882
+ if (response.body == null) {
883
+ throw new EmptyResponseBodyError({});
884
+ }
885
+ return {
886
+ responseHeaders,
887
+ value: response.body.pipeThrough(new TextDecoderStream()).pipeThrough(createEventSourceParserStream()).pipeThrough(
888
+ new TransformStream({
889
+ transform({ data }, controller) {
890
+ if (data === "[DONE]") {
891
+ return;
892
+ }
893
+ controller.enqueue(
894
+ safeParseJSON({
895
+ text: data,
896
+ schema: chunkSchema
897
+ })
898
+ );
899
+ }
900
+ })
901
+ )
902
+ };
903
+ };
904
+ var createJsonResponseHandler = (responseSchema) => async ({ response, url, requestBodyValues }) => {
905
+ const responseBody = await response.text();
906
+ const parsedResult = safeParseJSON({
907
+ text: responseBody,
908
+ schema: responseSchema
909
+ });
910
+ const responseHeaders = extractResponseHeaders(response);
911
+ if (!parsedResult.success) {
912
+ throw new APICallError({
913
+ message: "Invalid JSON response",
914
+ cause: parsedResult.error,
915
+ statusCode: response.status,
916
+ responseHeaders,
917
+ responseBody,
918
+ url,
919
+ requestBodyValues
920
+ });
921
+ }
922
+ return {
923
+ responseHeaders,
924
+ value: parsedResult.value,
925
+ rawValue: parsedResult.rawValue
926
+ };
927
+ };
928
+ var { btoa, atob } = globalThis;
929
+ function convertBase64ToUint8Array(base64String) {
930
+ const base64Url = base64String.replace(/-/g, "+").replace(/_/g, "/");
931
+ const latin1string = atob(base64Url);
932
+ return Uint8Array.from(latin1string, (byte) => byte.codePointAt(0));
933
+ }
934
+ function convertUint8ArrayToBase64(array) {
935
+ let latin1string = "";
936
+ for (let i = 0; i < array.length; i++) {
937
+ latin1string += String.fromCodePoint(array[i]);
938
+ }
939
+ return btoa(latin1string);
940
+ }
941
+ function withoutTrailingSlash(url) {
942
+ return url == null ? void 0 : url.replace(/\/$/, "");
943
+ }
944
+ function convertToOpenAIChatMessages({
945
+ prompt,
946
+ useLegacyFunctionCalling = false,
947
+ systemMessageMode = "system"
948
+ }) {
949
+ const messages = [];
950
+ const warnings = [];
951
+ for (const { role, content } of prompt) {
952
+ switch (role) {
953
+ case "system": {
954
+ switch (systemMessageMode) {
955
+ case "system": {
956
+ messages.push({ role: "system", content });
957
+ break;
958
+ }
959
+ case "developer": {
960
+ messages.push({ role: "developer", content });
961
+ break;
962
+ }
963
+ case "remove": {
964
+ warnings.push({
965
+ type: "other",
966
+ message: "system messages are removed for this model"
967
+ });
968
+ break;
969
+ }
970
+ default: {
971
+ const _exhaustiveCheck = systemMessageMode;
972
+ throw new Error(
973
+ `Unsupported system message mode: ${_exhaustiveCheck}`
974
+ );
975
+ }
976
+ }
977
+ break;
978
+ }
979
+ case "user": {
980
+ if (content.length === 1 && content[0].type === "text") {
981
+ messages.push({ role: "user", content: content[0].text });
982
+ break;
983
+ }
984
+ messages.push({
985
+ role: "user",
986
+ content: content.map((part, index) => {
987
+ var _a15, _b, _c, _d;
988
+ switch (part.type) {
989
+ case "text": {
990
+ return { type: "text", text: part.text };
991
+ }
992
+ case "image": {
993
+ return {
994
+ type: "image_url",
995
+ image_url: {
996
+ url: part.image instanceof URL ? part.image.toString() : `data:${(_a15 = part.mimeType) != null ? _a15 : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`,
997
+ // OpenAI specific extension: image detail
998
+ detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
999
+ }
1000
+ };
1001
+ }
1002
+ case "file": {
1003
+ if (part.data instanceof URL) {
1004
+ throw new UnsupportedFunctionalityError({
1005
+ functionality: "'File content parts with URL data' functionality not supported."
1006
+ });
1007
+ }
1008
+ switch (part.mimeType) {
1009
+ case "audio/wav": {
1010
+ return {
1011
+ type: "input_audio",
1012
+ input_audio: { data: part.data, format: "wav" }
1013
+ };
1014
+ }
1015
+ case "audio/mp3":
1016
+ case "audio/mpeg": {
1017
+ return {
1018
+ type: "input_audio",
1019
+ input_audio: { data: part.data, format: "mp3" }
1020
+ };
1021
+ }
1022
+ case "application/pdf": {
1023
+ return {
1024
+ type: "file",
1025
+ file: {
1026
+ filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
1027
+ file_data: `data:application/pdf;base64,${part.data}`
1028
+ }
1029
+ };
1030
+ }
1031
+ default: {
1032
+ throw new UnsupportedFunctionalityError({
1033
+ functionality: `File content part type ${part.mimeType} in user messages`
1034
+ });
1035
+ }
1036
+ }
1037
+ }
1038
+ }
1039
+ })
1040
+ });
1041
+ break;
1042
+ }
1043
+ case "assistant": {
1044
+ let text = "";
1045
+ const toolCalls = [];
1046
+ for (const part of content) {
1047
+ switch (part.type) {
1048
+ case "text": {
1049
+ text += part.text;
1050
+ break;
1051
+ }
1052
+ case "tool-call": {
1053
+ toolCalls.push({
1054
+ id: part.toolCallId,
1055
+ type: "function",
1056
+ function: {
1057
+ name: part.toolName,
1058
+ arguments: JSON.stringify(part.args)
1059
+ }
1060
+ });
1061
+ break;
1062
+ }
1063
+ }
1064
+ }
1065
+ if (useLegacyFunctionCalling) {
1066
+ if (toolCalls.length > 1) {
1067
+ throw new UnsupportedFunctionalityError({
1068
+ functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
1069
+ });
1070
+ }
1071
+ messages.push({
1072
+ role: "assistant",
1073
+ content: text,
1074
+ function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
1075
+ });
1076
+ } else {
1077
+ messages.push({
1078
+ role: "assistant",
1079
+ content: text,
1080
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
1081
+ });
1082
+ }
1083
+ break;
1084
+ }
1085
+ case "tool": {
1086
+ for (const toolResponse of content) {
1087
+ if (useLegacyFunctionCalling) {
1088
+ messages.push({
1089
+ role: "function",
1090
+ name: toolResponse.toolName,
1091
+ content: JSON.stringify(toolResponse.result)
1092
+ });
1093
+ } else {
1094
+ messages.push({
1095
+ role: "tool",
1096
+ tool_call_id: toolResponse.toolCallId,
1097
+ content: JSON.stringify(toolResponse.result)
1098
+ });
1099
+ }
1100
+ }
1101
+ break;
1102
+ }
1103
+ default: {
1104
+ const _exhaustiveCheck = role;
1105
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1106
+ }
1107
+ }
1108
+ }
1109
+ return { messages, warnings };
1110
+ }
1111
+ function mapOpenAIChatLogProbsOutput(logprobs) {
1112
+ var _a15, _b;
1113
+ return (_b = (_a15 = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a15.map(({ token, logprob, top_logprobs }) => ({
1114
+ token,
1115
+ logprob,
1116
+ topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
1117
+ token: token2,
1118
+ logprob: logprob2
1119
+ })) : []
1120
+ }))) != null ? _b : void 0;
1121
+ }
1122
+ function mapOpenAIFinishReason(finishReason) {
1123
+ switch (finishReason) {
1124
+ case "stop":
1125
+ return "stop";
1126
+ case "length":
1127
+ return "length";
1128
+ case "content_filter":
1129
+ return "content-filter";
1130
+ case "function_call":
1131
+ case "tool_calls":
1132
+ return "tool-calls";
1133
+ default:
1134
+ return "unknown";
1135
+ }
1136
+ }
1137
+ var openaiErrorDataSchema = zod.z.object({
1138
+ error: zod.z.object({
1139
+ message: zod.z.string(),
1140
+ // The additional information below is handled loosely to support
1141
+ // OpenAI-compatible providers that have slightly different error
1142
+ // responses:
1143
+ type: zod.z.string().nullish(),
1144
+ param: zod.z.any().nullish(),
1145
+ code: zod.z.union([zod.z.string(), zod.z.number()]).nullish()
1146
+ })
1147
+ });
1148
+ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
1149
+ errorSchema: openaiErrorDataSchema,
1150
+ errorToMessage: (data) => data.error.message
1151
+ });
1152
+ function getResponseMetadata({
1153
+ id,
1154
+ model,
1155
+ created
1156
+ }) {
1157
+ return {
1158
+ id: id != null ? id : void 0,
1159
+ modelId: model != null ? model : void 0,
1160
+ timestamp: created != null ? new Date(created * 1e3) : void 0
1161
+ };
1162
+ }
1163
+ function prepareTools({
1164
+ mode,
1165
+ useLegacyFunctionCalling = false,
1166
+ structuredOutputs
1167
+ }) {
1168
+ var _a15;
1169
+ const tools = ((_a15 = mode.tools) == null ? void 0 : _a15.length) ? mode.tools : void 0;
1170
+ const toolWarnings = [];
1171
+ if (tools == null) {
1172
+ return { tools: void 0, tool_choice: void 0, toolWarnings };
1173
+ }
1174
+ const toolChoice = mode.toolChoice;
1175
+ if (useLegacyFunctionCalling) {
1176
+ const openaiFunctions = [];
1177
+ for (const tool of tools) {
1178
+ if (tool.type === "provider-defined") {
1179
+ toolWarnings.push({ type: "unsupported-tool", tool });
1180
+ } else {
1181
+ openaiFunctions.push({
1182
+ name: tool.name,
1183
+ description: tool.description,
1184
+ parameters: tool.parameters
1185
+ });
1186
+ }
1187
+ }
1188
+ if (toolChoice == null) {
1189
+ return {
1190
+ functions: openaiFunctions,
1191
+ function_call: void 0,
1192
+ toolWarnings
1193
+ };
1194
+ }
1195
+ const type2 = toolChoice.type;
1196
+ switch (type2) {
1197
+ case "auto":
1198
+ case "none":
1199
+ case void 0:
1200
+ return {
1201
+ functions: openaiFunctions,
1202
+ function_call: void 0,
1203
+ toolWarnings
1204
+ };
1205
+ case "required":
1206
+ throw new UnsupportedFunctionalityError({
1207
+ functionality: "useLegacyFunctionCalling and toolChoice: required"
1208
+ });
1209
+ default:
1210
+ return {
1211
+ functions: openaiFunctions,
1212
+ function_call: { name: toolChoice.toolName },
1213
+ toolWarnings
1214
+ };
1215
+ }
1216
+ }
1217
+ const openaiTools2 = [];
1218
+ for (const tool of tools) {
1219
+ if (tool.type === "provider-defined") {
1220
+ toolWarnings.push({ type: "unsupported-tool", tool });
1221
+ } else {
1222
+ openaiTools2.push({
1223
+ type: "function",
1224
+ function: {
1225
+ name: tool.name,
1226
+ description: tool.description,
1227
+ parameters: tool.parameters,
1228
+ strict: structuredOutputs ? true : void 0
1229
+ }
1230
+ });
1231
+ }
1232
+ }
1233
+ if (toolChoice == null) {
1234
+ return { tools: openaiTools2, tool_choice: void 0, toolWarnings };
1235
+ }
1236
+ const type = toolChoice.type;
1237
+ switch (type) {
1238
+ case "auto":
1239
+ case "none":
1240
+ case "required":
1241
+ return { tools: openaiTools2, tool_choice: type, toolWarnings };
1242
+ case "tool":
1243
+ return {
1244
+ tools: openaiTools2,
1245
+ tool_choice: {
1246
+ type: "function",
1247
+ function: {
1248
+ name: toolChoice.toolName
1249
+ }
1250
+ },
1251
+ toolWarnings
1252
+ };
1253
+ default: {
1254
+ const _exhaustiveCheck = type;
1255
+ throw new UnsupportedFunctionalityError({
1256
+ functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
1257
+ });
1258
+ }
1259
+ }
1260
+ }
1261
+ var OpenAIChatLanguageModel = class {
1262
+ constructor(modelId, settings, config) {
1263
+ this.specificationVersion = "v1";
1264
+ this.modelId = modelId;
1265
+ this.settings = settings;
1266
+ this.config = config;
1267
+ }
1268
+ get supportsStructuredOutputs() {
1269
+ var _a15;
1270
+ return (_a15 = this.settings.structuredOutputs) != null ? _a15 : isReasoningModel(this.modelId);
1271
+ }
1272
+ get defaultObjectGenerationMode() {
1273
+ if (isAudioModel(this.modelId)) {
1274
+ return "tool";
1275
+ }
1276
+ return this.supportsStructuredOutputs ? "json" : "tool";
1277
+ }
1278
+ get provider() {
1279
+ return this.config.provider;
1280
+ }
1281
+ get supportsImageUrls() {
1282
+ return !this.settings.downloadImages;
1283
+ }
1284
+ getArgs({
1285
+ mode,
1286
+ prompt,
1287
+ maxTokens,
1288
+ temperature,
1289
+ topP,
1290
+ topK,
1291
+ frequencyPenalty,
1292
+ presencePenalty,
1293
+ stopSequences,
1294
+ responseFormat,
1295
+ seed,
1296
+ providerMetadata
1297
+ }) {
1298
+ var _a15, _b, _c, _d, _e, _f, _g, _h;
1299
+ const type = mode.type;
1300
+ const warnings = [];
1301
+ if (topK != null) {
1302
+ warnings.push({
1303
+ type: "unsupported-setting",
1304
+ setting: "topK"
1305
+ });
1306
+ }
1307
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
1308
+ warnings.push({
1309
+ type: "unsupported-setting",
1310
+ setting: "responseFormat",
1311
+ details: "JSON response format schema is only supported with structuredOutputs"
1312
+ });
1313
+ }
1314
+ const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
1315
+ if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
1316
+ throw new UnsupportedFunctionalityError({
1317
+ functionality: "useLegacyFunctionCalling with parallelToolCalls"
1318
+ });
1319
+ }
1320
+ if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
1321
+ throw new UnsupportedFunctionalityError({
1322
+ functionality: "structuredOutputs with useLegacyFunctionCalling"
1323
+ });
1324
+ }
1325
+ const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
1326
+ {
1327
+ prompt,
1328
+ useLegacyFunctionCalling,
1329
+ systemMessageMode: getSystemMessageMode(this.modelId)
1330
+ }
1331
+ );
1332
+ warnings.push(...messageWarnings);
1333
+ const baseArgs = {
1334
+ // model id:
1335
+ model: this.modelId,
1336
+ // model specific settings:
1337
+ logit_bias: this.settings.logitBias,
1338
+ logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
1339
+ top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1340
+ user: this.settings.user,
1341
+ parallel_tool_calls: this.settings.parallelToolCalls,
1342
+ // standardized settings:
1343
+ max_tokens: maxTokens,
1344
+ temperature,
1345
+ top_p: topP,
1346
+ frequency_penalty: frequencyPenalty,
1347
+ presence_penalty: presencePenalty,
1348
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
1349
+ type: "json_schema",
1350
+ json_schema: {
1351
+ schema: responseFormat.schema,
1352
+ strict: true,
1353
+ name: (_a15 = responseFormat.name) != null ? _a15 : "response",
1354
+ description: responseFormat.description
1355
+ }
1356
+ } : { type: "json_object" } : void 0,
1357
+ stop: stopSequences,
1358
+ seed,
1359
+ // openai specific settings:
1360
+ // TODO remove in next major version; we auto-map maxTokens now
1361
+ max_completion_tokens: (_b = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _b.maxCompletionTokens,
1362
+ store: (_c = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _c.store,
1363
+ metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
1364
+ prediction: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.prediction,
1365
+ reasoning_effort: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
1366
+ // messages:
1367
+ messages
1368
+ };
1369
+ if (isReasoningModel(this.modelId)) {
1370
+ if (baseArgs.temperature != null) {
1371
+ baseArgs.temperature = void 0;
1372
+ warnings.push({
1373
+ type: "unsupported-setting",
1374
+ setting: "temperature",
1375
+ details: "temperature is not supported for reasoning models"
1376
+ });
1377
+ }
1378
+ if (baseArgs.top_p != null) {
1379
+ baseArgs.top_p = void 0;
1380
+ warnings.push({
1381
+ type: "unsupported-setting",
1382
+ setting: "topP",
1383
+ details: "topP is not supported for reasoning models"
1384
+ });
1385
+ }
1386
+ if (baseArgs.frequency_penalty != null) {
1387
+ baseArgs.frequency_penalty = void 0;
1388
+ warnings.push({
1389
+ type: "unsupported-setting",
1390
+ setting: "frequencyPenalty",
1391
+ details: "frequencyPenalty is not supported for reasoning models"
1392
+ });
1393
+ }
1394
+ if (baseArgs.presence_penalty != null) {
1395
+ baseArgs.presence_penalty = void 0;
1396
+ warnings.push({
1397
+ type: "unsupported-setting",
1398
+ setting: "presencePenalty",
1399
+ details: "presencePenalty is not supported for reasoning models"
1400
+ });
1401
+ }
1402
+ if (baseArgs.logit_bias != null) {
1403
+ baseArgs.logit_bias = void 0;
1404
+ warnings.push({
1405
+ type: "other",
1406
+ message: "logitBias is not supported for reasoning models"
1407
+ });
1408
+ }
1409
+ if (baseArgs.logprobs != null) {
1410
+ baseArgs.logprobs = void 0;
1411
+ warnings.push({
1412
+ type: "other",
1413
+ message: "logprobs is not supported for reasoning models"
1414
+ });
1415
+ }
1416
+ if (baseArgs.top_logprobs != null) {
1417
+ baseArgs.top_logprobs = void 0;
1418
+ warnings.push({
1419
+ type: "other",
1420
+ message: "topLogprobs is not supported for reasoning models"
1421
+ });
1422
+ }
1423
+ if (baseArgs.max_tokens != null) {
1424
+ if (baseArgs.max_completion_tokens == null) {
1425
+ baseArgs.max_completion_tokens = baseArgs.max_tokens;
1426
+ }
1427
+ baseArgs.max_tokens = void 0;
1428
+ }
1429
+ }
1430
+ switch (type) {
1431
+ case "regular": {
1432
+ const { tools, tool_choice, functions, function_call, toolWarnings } = prepareTools({
1433
+ mode,
1434
+ useLegacyFunctionCalling,
1435
+ structuredOutputs: this.supportsStructuredOutputs
1436
+ });
1437
+ return {
1438
+ args: {
1439
+ ...baseArgs,
1440
+ tools,
1441
+ tool_choice,
1442
+ functions,
1443
+ function_call
1444
+ },
1445
+ warnings: [...warnings, ...toolWarnings]
1446
+ };
1447
+ }
1448
+ case "object-json": {
1449
+ return {
1450
+ args: {
1451
+ ...baseArgs,
1452
+ response_format: this.supportsStructuredOutputs && mode.schema != null ? {
1453
+ type: "json_schema",
1454
+ json_schema: {
1455
+ schema: mode.schema,
1456
+ strict: true,
1457
+ name: (_h = mode.name) != null ? _h : "response",
1458
+ description: mode.description
1459
+ }
1460
+ } : { type: "json_object" }
1461
+ },
1462
+ warnings
1463
+ };
1464
+ }
1465
+ case "object-tool": {
1466
+ return {
1467
+ args: useLegacyFunctionCalling ? {
1468
+ ...baseArgs,
1469
+ function_call: {
1470
+ name: mode.tool.name
1471
+ },
1472
+ functions: [
1473
+ {
1474
+ name: mode.tool.name,
1475
+ description: mode.tool.description,
1476
+ parameters: mode.tool.parameters
1477
+ }
1478
+ ]
1479
+ } : {
1480
+ ...baseArgs,
1481
+ tool_choice: {
1482
+ type: "function",
1483
+ function: { name: mode.tool.name }
1484
+ },
1485
+ tools: [
1486
+ {
1487
+ type: "function",
1488
+ function: {
1489
+ name: mode.tool.name,
1490
+ description: mode.tool.description,
1491
+ parameters: mode.tool.parameters,
1492
+ strict: this.supportsStructuredOutputs ? true : void 0
1493
+ }
1494
+ }
1495
+ ]
1496
+ },
1497
+ warnings
1498
+ };
1499
+ }
1500
+ default: {
1501
+ const _exhaustiveCheck = type;
1502
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
1503
+ }
1504
+ }
1505
+ }
1506
+ async doGenerate(options) {
1507
+ var _a15, _b, _c, _d, _e, _f, _g, _h;
1508
+ const { args: body, warnings } = this.getArgs(options);
1509
+ const {
1510
+ responseHeaders,
1511
+ value: response,
1512
+ rawValue: rawResponse
1513
+ } = await postJsonToApi({
1514
+ url: this.config.url({
1515
+ path: "/chat/completions",
1516
+ modelId: this.modelId
1517
+ }),
1518
+ headers: combineHeaders(this.config.headers(), options.headers),
1519
+ body,
1520
+ failedResponseHandler: openaiFailedResponseHandler,
1521
+ successfulResponseHandler: createJsonResponseHandler(
1522
+ openaiChatResponseSchema
1523
+ ),
1524
+ abortSignal: options.abortSignal,
1525
+ fetch: this.config.fetch
1526
+ });
1527
+ const { messages: rawPrompt, ...rawSettings } = body;
1528
+ const choice = response.choices[0];
1529
+ const completionTokenDetails = (_a15 = response.usage) == null ? void 0 : _a15.completion_tokens_details;
1530
+ const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
1531
+ const providerMetadata = { openai: {} };
1532
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
1533
+ providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
1534
+ }
1535
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
1536
+ providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
1537
+ }
1538
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
1539
+ providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
1540
+ }
1541
+ if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
1542
+ providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
1543
+ }
1544
+ return {
1545
+ text: (_c = choice.message.content) != null ? _c : void 0,
1546
+ toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
1547
+ {
1548
+ toolCallType: "function",
1549
+ toolCallId: generateId(),
1550
+ toolName: choice.message.function_call.name,
1551
+ args: choice.message.function_call.arguments
1552
+ }
1553
+ ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
1554
+ var _a22;
1555
+ return {
1556
+ toolCallType: "function",
1557
+ toolCallId: (_a22 = toolCall.id) != null ? _a22 : generateId(),
1558
+ toolName: toolCall.function.name,
1559
+ args: toolCall.function.arguments
1560
+ };
1561
+ }),
1562
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
1563
+ usage: {
1564
+ promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
1565
+ completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
1566
+ },
1567
+ rawCall: { rawPrompt, rawSettings },
1568
+ rawResponse: { headers: responseHeaders, body: rawResponse },
1569
+ request: { body: JSON.stringify(body) },
1570
+ response: getResponseMetadata(response),
1571
+ warnings,
1572
+ logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
1573
+ providerMetadata
1574
+ };
1575
+ }
1576
+ async doStream(options) {
1577
+ if (this.settings.simulateStreaming) {
1578
+ const result = await this.doGenerate(options);
1579
+ const simulatedStream = new ReadableStream({
1580
+ start(controller) {
1581
+ controller.enqueue({ type: "response-metadata", ...result.response });
1582
+ if (result.text) {
1583
+ controller.enqueue({
1584
+ type: "text-delta",
1585
+ textDelta: result.text
1586
+ });
1587
+ }
1588
+ if (result.toolCalls) {
1589
+ for (const toolCall of result.toolCalls) {
1590
+ controller.enqueue({
1591
+ type: "tool-call-delta",
1592
+ toolCallType: "function",
1593
+ toolCallId: toolCall.toolCallId,
1594
+ toolName: toolCall.toolName,
1595
+ argsTextDelta: toolCall.args
1596
+ });
1597
+ controller.enqueue({
1598
+ type: "tool-call",
1599
+ ...toolCall
1600
+ });
1601
+ }
1602
+ }
1603
+ controller.enqueue({
1604
+ type: "finish",
1605
+ finishReason: result.finishReason,
1606
+ usage: result.usage,
1607
+ logprobs: result.logprobs,
1608
+ providerMetadata: result.providerMetadata
1609
+ });
1610
+ controller.close();
1611
+ }
1612
+ });
1613
+ return {
1614
+ stream: simulatedStream,
1615
+ rawCall: result.rawCall,
1616
+ rawResponse: result.rawResponse,
1617
+ warnings: result.warnings
1618
+ };
1619
+ }
1620
+ const { args, warnings } = this.getArgs(options);
1621
+ const body = {
1622
+ ...args,
1623
+ stream: true,
1624
+ // only include stream_options when in strict compatibility mode:
1625
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1626
+ };
1627
+ const { responseHeaders, value: response } = await postJsonToApi({
1628
+ url: this.config.url({
1629
+ path: "/chat/completions",
1630
+ modelId: this.modelId
1631
+ }),
1632
+ headers: combineHeaders(this.config.headers(), options.headers),
1633
+ body,
1634
+ failedResponseHandler: openaiFailedResponseHandler,
1635
+ successfulResponseHandler: createEventSourceResponseHandler(
1636
+ openaiChatChunkSchema
1637
+ ),
1638
+ abortSignal: options.abortSignal,
1639
+ fetch: this.config.fetch
1640
+ });
1641
+ const { messages: rawPrompt, ...rawSettings } = args;
1642
+ const toolCalls = [];
1643
+ let finishReason = "unknown";
1644
+ let usage = {
1645
+ promptTokens: void 0,
1646
+ completionTokens: void 0
1647
+ };
1648
+ let logprobs;
1649
+ let isFirstChunk = true;
1650
+ const { useLegacyFunctionCalling } = this.settings;
1651
+ const providerMetadata = { openai: {} };
1652
+ return {
1653
+ stream: response.pipeThrough(
1654
+ new TransformStream({
1655
+ transform(chunk, controller) {
1656
+ var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
1657
+ if (!chunk.success) {
1658
+ finishReason = "error";
1659
+ controller.enqueue({ type: "error", error: chunk.error });
1660
+ return;
1661
+ }
1662
+ const value = chunk.value;
1663
+ if ("error" in value) {
1664
+ finishReason = "error";
1665
+ controller.enqueue({ type: "error", error: value.error });
1666
+ return;
1667
+ }
1668
+ if (isFirstChunk) {
1669
+ isFirstChunk = false;
1670
+ controller.enqueue({
1671
+ type: "response-metadata",
1672
+ ...getResponseMetadata(value)
1673
+ });
1674
+ }
1675
+ if (value.usage != null) {
1676
+ const {
1677
+ prompt_tokens,
1678
+ completion_tokens,
1679
+ prompt_tokens_details,
1680
+ completion_tokens_details
1681
+ } = value.usage;
1682
+ usage = {
1683
+ promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
1684
+ completionTokens: completion_tokens != null ? completion_tokens : void 0
1685
+ };
1686
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
1687
+ providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
1688
+ }
1689
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
1690
+ providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
1691
+ }
1692
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
1693
+ providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
1694
+ }
1695
+ if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
1696
+ providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
1697
+ }
1698
+ }
1699
+ const choice = value.choices[0];
1700
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
1701
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
1702
+ }
1703
+ if ((choice == null ? void 0 : choice.delta) == null) {
1704
+ return;
1705
+ }
1706
+ const delta = choice.delta;
1707
+ if (delta.content != null) {
1708
+ controller.enqueue({
1709
+ type: "text-delta",
1710
+ textDelta: delta.content
1711
+ });
1712
+ }
1713
+ const mappedLogprobs = mapOpenAIChatLogProbsOutput(
1714
+ choice == null ? void 0 : choice.logprobs
1715
+ );
1716
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1717
+ if (logprobs === void 0) logprobs = [];
1718
+ logprobs.push(...mappedLogprobs);
1719
+ }
1720
+ const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
1721
+ {
1722
+ type: "function",
1723
+ id: generateId(),
1724
+ function: delta.function_call,
1725
+ index: 0
1726
+ }
1727
+ ] : delta.tool_calls;
1728
+ if (mappedToolCalls != null) {
1729
+ for (const toolCallDelta of mappedToolCalls) {
1730
+ const index = toolCallDelta.index;
1731
+ if (toolCalls[index] == null) {
1732
+ if (toolCallDelta.type !== "function") {
1733
+ throw new InvalidResponseDataError({
1734
+ data: toolCallDelta,
1735
+ message: `Expected 'function' type.`
1736
+ });
1737
+ }
1738
+ if (toolCallDelta.id == null) {
1739
+ throw new InvalidResponseDataError({
1740
+ data: toolCallDelta,
1741
+ message: `Expected 'id' to be a string.`
1742
+ });
1743
+ }
1744
+ if (((_a15 = toolCallDelta.function) == null ? void 0 : _a15.name) == null) {
1745
+ throw new InvalidResponseDataError({
1746
+ data: toolCallDelta,
1747
+ message: `Expected 'function.name' to be a string.`
1748
+ });
1749
+ }
1750
+ toolCalls[index] = {
1751
+ id: toolCallDelta.id,
1752
+ type: "function",
1753
+ function: {
1754
+ name: toolCallDelta.function.name,
1755
+ arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
1756
+ },
1757
+ hasFinished: false
1758
+ };
1759
+ const toolCall2 = toolCalls[index];
1760
+ if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
1761
+ if (toolCall2.function.arguments.length > 0) {
1762
+ controller.enqueue({
1763
+ type: "tool-call-delta",
1764
+ toolCallType: "function",
1765
+ toolCallId: toolCall2.id,
1766
+ toolName: toolCall2.function.name,
1767
+ argsTextDelta: toolCall2.function.arguments
1768
+ });
1769
+ }
1770
+ if (isParsableJson(toolCall2.function.arguments)) {
1771
+ controller.enqueue({
1772
+ type: "tool-call",
1773
+ toolCallType: "function",
1774
+ toolCallId: (_e = toolCall2.id) != null ? _e : generateId(),
1775
+ toolName: toolCall2.function.name,
1776
+ args: toolCall2.function.arguments
1777
+ });
1778
+ toolCall2.hasFinished = true;
1779
+ }
1780
+ }
1781
+ continue;
1782
+ }
1783
+ const toolCall = toolCalls[index];
1784
+ if (toolCall.hasFinished) {
1785
+ continue;
1786
+ }
1787
+ if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
1788
+ toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
1789
+ }
1790
+ controller.enqueue({
1791
+ type: "tool-call-delta",
1792
+ toolCallType: "function",
1793
+ toolCallId: toolCall.id,
1794
+ toolName: toolCall.function.name,
1795
+ argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
1796
+ });
1797
+ if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && isParsableJson(toolCall.function.arguments)) {
1798
+ controller.enqueue({
1799
+ type: "tool-call",
1800
+ toolCallType: "function",
1801
+ toolCallId: (_l = toolCall.id) != null ? _l : generateId(),
1802
+ toolName: toolCall.function.name,
1803
+ args: toolCall.function.arguments
1804
+ });
1805
+ toolCall.hasFinished = true;
1806
+ }
1807
+ }
1808
+ }
1809
+ },
1810
+ flush(controller) {
1811
+ var _a15, _b;
1812
+ controller.enqueue({
1813
+ type: "finish",
1814
+ finishReason,
1815
+ logprobs,
1816
+ usage: {
1817
+ promptTokens: (_a15 = usage.promptTokens) != null ? _a15 : NaN,
1818
+ completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
1819
+ },
1820
+ ...providerMetadata != null ? { providerMetadata } : {}
1821
+ });
1822
+ }
1823
+ })
1824
+ ),
1825
+ rawCall: { rawPrompt, rawSettings },
1826
+ rawResponse: { headers: responseHeaders },
1827
+ request: { body: JSON.stringify(body) },
1828
+ warnings
1829
+ };
1830
+ }
1831
+ };
1832
+ var openaiTokenUsageSchema = zod.z.object({
1833
+ prompt_tokens: zod.z.number().nullish(),
1834
+ completion_tokens: zod.z.number().nullish(),
1835
+ prompt_tokens_details: zod.z.object({
1836
+ cached_tokens: zod.z.number().nullish()
1837
+ }).nullish(),
1838
+ completion_tokens_details: zod.z.object({
1839
+ reasoning_tokens: zod.z.number().nullish(),
1840
+ accepted_prediction_tokens: zod.z.number().nullish(),
1841
+ rejected_prediction_tokens: zod.z.number().nullish()
1842
+ }).nullish()
1843
+ }).nullish();
1844
+ var openaiChatResponseSchema = zod.z.object({
1845
+ id: zod.z.string().nullish(),
1846
+ created: zod.z.number().nullish(),
1847
+ model: zod.z.string().nullish(),
1848
+ choices: zod.z.array(
1849
+ zod.z.object({
1850
+ message: zod.z.object({
1851
+ role: zod.z.literal("assistant").nullish(),
1852
+ content: zod.z.string().nullish(),
1853
+ function_call: zod.z.object({
1854
+ arguments: zod.z.string(),
1855
+ name: zod.z.string()
1856
+ }).nullish(),
1857
+ tool_calls: zod.z.array(
1858
+ zod.z.object({
1859
+ id: zod.z.string().nullish(),
1860
+ type: zod.z.literal("function"),
1861
+ function: zod.z.object({
1862
+ name: zod.z.string(),
1863
+ arguments: zod.z.string()
1864
+ })
1865
+ })
1866
+ ).nullish()
1867
+ }),
1868
+ index: zod.z.number(),
1869
+ logprobs: zod.z.object({
1870
+ content: zod.z.array(
1871
+ zod.z.object({
1872
+ token: zod.z.string(),
1873
+ logprob: zod.z.number(),
1874
+ top_logprobs: zod.z.array(
1875
+ zod.z.object({
1876
+ token: zod.z.string(),
1877
+ logprob: zod.z.number()
1878
+ })
1879
+ )
1880
+ })
1881
+ ).nullable()
1882
+ }).nullish(),
1883
+ finish_reason: zod.z.string().nullish()
1884
+ })
1885
+ ),
1886
+ usage: openaiTokenUsageSchema
1887
+ });
1888
+ var openaiChatChunkSchema = zod.z.union([
1889
+ zod.z.object({
1890
+ id: zod.z.string().nullish(),
1891
+ created: zod.z.number().nullish(),
1892
+ model: zod.z.string().nullish(),
1893
+ choices: zod.z.array(
1894
+ zod.z.object({
1895
+ delta: zod.z.object({
1896
+ role: zod.z.enum(["assistant"]).nullish(),
1897
+ content: zod.z.string().nullish(),
1898
+ function_call: zod.z.object({
1899
+ name: zod.z.string().optional(),
1900
+ arguments: zod.z.string().optional()
1901
+ }).nullish(),
1902
+ tool_calls: zod.z.array(
1903
+ zod.z.object({
1904
+ index: zod.z.number(),
1905
+ id: zod.z.string().nullish(),
1906
+ type: zod.z.literal("function").optional(),
1907
+ function: zod.z.object({
1908
+ name: zod.z.string().nullish(),
1909
+ arguments: zod.z.string().nullish()
1910
+ })
1911
+ })
1912
+ ).nullish()
1913
+ }).nullish(),
1914
+ logprobs: zod.z.object({
1915
+ content: zod.z.array(
1916
+ zod.z.object({
1917
+ token: zod.z.string(),
1918
+ logprob: zod.z.number(),
1919
+ top_logprobs: zod.z.array(
1920
+ zod.z.object({
1921
+ token: zod.z.string(),
1922
+ logprob: zod.z.number()
1923
+ })
1924
+ )
1925
+ })
1926
+ ).nullable()
1927
+ }).nullish(),
1928
+ finish_reason: zod.z.string().nullable().optional(),
1929
+ index: zod.z.number()
1930
+ })
1931
+ ),
1932
+ usage: openaiTokenUsageSchema
1933
+ }),
1934
+ openaiErrorDataSchema
1935
+ ]);
1936
+ function isReasoningModel(modelId) {
1937
+ return modelId === "o1" || modelId.startsWith("o1-") || modelId === "o3" || modelId.startsWith("o3-");
1938
+ }
1939
+ function isAudioModel(modelId) {
1940
+ return modelId.startsWith("gpt-4o-audio-preview");
1941
+ }
1942
+ function getSystemMessageMode(modelId) {
1943
+ var _a15, _b;
1944
+ if (!isReasoningModel(modelId)) {
1945
+ return "system";
1946
+ }
1947
+ return (_b = (_a15 = reasoningModels[modelId]) == null ? void 0 : _a15.systemMessageMode) != null ? _b : "developer";
1948
+ }
1949
+ var reasoningModels = {
1950
+ "o1-mini": {
1951
+ systemMessageMode: "remove"
1952
+ },
1953
+ "o1-mini-2024-09-12": {
1954
+ systemMessageMode: "remove"
1955
+ },
1956
+ "o1-preview": {
1957
+ systemMessageMode: "remove"
1958
+ },
1959
+ "o1-preview-2024-09-12": {
1960
+ systemMessageMode: "remove"
1961
+ },
1962
+ "o3-mini": {
1963
+ systemMessageMode: "developer"
1964
+ },
1965
+ "o3-mini-2025-01-31": {
1966
+ systemMessageMode: "developer"
1967
+ }
1968
+ };
1969
+ function convertToOpenAICompletionPrompt({
1970
+ prompt,
1971
+ inputFormat,
1972
+ user = "user",
1973
+ assistant = "assistant"
1974
+ }) {
1975
+ if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
1976
+ return { prompt: prompt[0].content[0].text };
1977
+ }
1978
+ let text = "";
1979
+ if (prompt[0].role === "system") {
1980
+ text += `${prompt[0].content}
1981
+
1982
+ `;
1983
+ prompt = prompt.slice(1);
1984
+ }
1985
+ for (const { role, content } of prompt) {
1986
+ switch (role) {
1987
+ case "system": {
1988
+ throw new InvalidPromptError({
1989
+ message: "Unexpected system message in prompt: ${content}",
1990
+ prompt
1991
+ });
1992
+ }
1993
+ case "user": {
1994
+ const userMessage = content.map((part) => {
1995
+ switch (part.type) {
1996
+ case "text": {
1997
+ return part.text;
1998
+ }
1999
+ case "image": {
2000
+ throw new UnsupportedFunctionalityError({
2001
+ functionality: "images"
2002
+ });
2003
+ }
2004
+ }
2005
+ }).join("");
2006
+ text += `${user}:
2007
+ ${userMessage}
2008
+
2009
+ `;
2010
+ break;
2011
+ }
2012
+ case "assistant": {
2013
+ const assistantMessage = content.map((part) => {
2014
+ switch (part.type) {
2015
+ case "text": {
2016
+ return part.text;
2017
+ }
2018
+ case "tool-call": {
2019
+ throw new UnsupportedFunctionalityError({
2020
+ functionality: "tool-call messages"
2021
+ });
2022
+ }
2023
+ }
2024
+ }).join("");
2025
+ text += `${assistant}:
2026
+ ${assistantMessage}
2027
+
2028
+ `;
2029
+ break;
2030
+ }
2031
+ case "tool": {
2032
+ throw new UnsupportedFunctionalityError({
2033
+ functionality: "tool messages"
2034
+ });
2035
+ }
2036
+ default: {
2037
+ const _exhaustiveCheck = role;
2038
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
2039
+ }
2040
+ }
2041
+ }
2042
+ text += `${assistant}:
2043
+ `;
2044
+ return {
2045
+ prompt: text,
2046
+ stopSequences: [`
2047
+ ${user}:`]
2048
+ };
2049
+ }
2050
+ function mapOpenAICompletionLogProbs(logprobs) {
2051
+ return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
2052
+ token,
2053
+ logprob: logprobs.token_logprobs[index],
2054
+ topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
2055
+ ([token2, logprob]) => ({
2056
+ token: token2,
2057
+ logprob
2058
+ })
2059
+ ) : []
2060
+ }));
2061
+ }
2062
+ var OpenAICompletionLanguageModel = class {
2063
+ constructor(modelId, settings, config) {
2064
+ this.specificationVersion = "v1";
2065
+ this.defaultObjectGenerationMode = void 0;
2066
+ this.modelId = modelId;
2067
+ this.settings = settings;
2068
+ this.config = config;
2069
+ }
2070
+ get provider() {
2071
+ return this.config.provider;
2072
+ }
2073
+ getArgs({
2074
+ mode,
2075
+ inputFormat,
2076
+ prompt,
2077
+ maxTokens,
2078
+ temperature,
2079
+ topP,
2080
+ topK,
2081
+ frequencyPenalty,
2082
+ presencePenalty,
2083
+ stopSequences: userStopSequences,
2084
+ responseFormat,
2085
+ seed
2086
+ }) {
2087
+ var _a15;
2088
+ const type = mode.type;
2089
+ const warnings = [];
2090
+ if (topK != null) {
2091
+ warnings.push({
2092
+ type: "unsupported-setting",
2093
+ setting: "topK"
2094
+ });
2095
+ }
2096
+ if (responseFormat != null && responseFormat.type !== "text") {
2097
+ warnings.push({
2098
+ type: "unsupported-setting",
2099
+ setting: "responseFormat",
2100
+ details: "JSON response format is not supported."
2101
+ });
2102
+ }
2103
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
2104
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
2105
+ const baseArgs = {
2106
+ // model id:
2107
+ model: this.modelId,
2108
+ // model specific settings:
2109
+ echo: this.settings.echo,
2110
+ logit_bias: this.settings.logitBias,
2111
+ logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
2112
+ suffix: this.settings.suffix,
2113
+ user: this.settings.user,
2114
+ // standardized settings:
2115
+ max_tokens: maxTokens,
2116
+ temperature,
2117
+ top_p: topP,
2118
+ frequency_penalty: frequencyPenalty,
2119
+ presence_penalty: presencePenalty,
2120
+ seed,
2121
+ // prompt:
2122
+ prompt: completionPrompt,
2123
+ // stop sequences:
2124
+ stop: stop.length > 0 ? stop : void 0
2125
+ };
2126
+ switch (type) {
2127
+ case "regular": {
2128
+ if ((_a15 = mode.tools) == null ? void 0 : _a15.length) {
2129
+ throw new UnsupportedFunctionalityError({
2130
+ functionality: "tools"
2131
+ });
2132
+ }
2133
+ if (mode.toolChoice) {
2134
+ throw new UnsupportedFunctionalityError({
2135
+ functionality: "toolChoice"
2136
+ });
2137
+ }
2138
+ return { args: baseArgs, warnings };
2139
+ }
2140
+ case "object-json": {
2141
+ throw new UnsupportedFunctionalityError({
2142
+ functionality: "object-json mode"
2143
+ });
2144
+ }
2145
+ case "object-tool": {
2146
+ throw new UnsupportedFunctionalityError({
2147
+ functionality: "object-tool mode"
2148
+ });
2149
+ }
2150
+ default: {
2151
+ const _exhaustiveCheck = type;
2152
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
2153
+ }
2154
+ }
2155
+ }
2156
+ async doGenerate(options) {
2157
+ const { args, warnings } = this.getArgs(options);
2158
+ const {
2159
+ responseHeaders,
2160
+ value: response,
2161
+ rawValue: rawResponse
2162
+ } = await postJsonToApi({
2163
+ url: this.config.url({
2164
+ path: "/completions",
2165
+ modelId: this.modelId
2166
+ }),
2167
+ headers: combineHeaders(this.config.headers(), options.headers),
2168
+ body: args,
2169
+ failedResponseHandler: openaiFailedResponseHandler,
2170
+ successfulResponseHandler: createJsonResponseHandler(
2171
+ openaiCompletionResponseSchema
2172
+ ),
2173
+ abortSignal: options.abortSignal,
2174
+ fetch: this.config.fetch
2175
+ });
2176
+ const { prompt: rawPrompt, ...rawSettings } = args;
2177
+ const choice = response.choices[0];
2178
+ return {
2179
+ text: choice.text,
2180
+ usage: {
2181
+ promptTokens: response.usage.prompt_tokens,
2182
+ completionTokens: response.usage.completion_tokens
2183
+ },
2184
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
2185
+ logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
2186
+ rawCall: { rawPrompt, rawSettings },
2187
+ rawResponse: { headers: responseHeaders, body: rawResponse },
2188
+ response: getResponseMetadata(response),
2189
+ warnings,
2190
+ request: { body: JSON.stringify(args) }
2191
+ };
2192
+ }
2193
+ async doStream(options) {
2194
+ const { args, warnings } = this.getArgs(options);
2195
+ const body = {
2196
+ ...args,
2197
+ stream: true,
2198
+ // only include stream_options when in strict compatibility mode:
2199
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
2200
+ };
2201
+ const { responseHeaders, value: response } = await postJsonToApi({
2202
+ url: this.config.url({
2203
+ path: "/completions",
2204
+ modelId: this.modelId
2205
+ }),
2206
+ headers: combineHeaders(this.config.headers(), options.headers),
2207
+ body,
2208
+ failedResponseHandler: openaiFailedResponseHandler,
2209
+ successfulResponseHandler: createEventSourceResponseHandler(
2210
+ openaiCompletionChunkSchema
2211
+ ),
2212
+ abortSignal: options.abortSignal,
2213
+ fetch: this.config.fetch
2214
+ });
2215
+ const { prompt: rawPrompt, ...rawSettings } = args;
2216
+ let finishReason = "unknown";
2217
+ let usage = {
2218
+ promptTokens: Number.NaN,
2219
+ completionTokens: Number.NaN
2220
+ };
2221
+ let logprobs;
2222
+ let isFirstChunk = true;
2223
+ return {
2224
+ stream: response.pipeThrough(
2225
+ new TransformStream({
2226
+ transform(chunk, controller) {
2227
+ if (!chunk.success) {
2228
+ finishReason = "error";
2229
+ controller.enqueue({ type: "error", error: chunk.error });
2230
+ return;
2231
+ }
2232
+ const value = chunk.value;
2233
+ if ("error" in value) {
2234
+ finishReason = "error";
2235
+ controller.enqueue({ type: "error", error: value.error });
2236
+ return;
2237
+ }
2238
+ if (isFirstChunk) {
2239
+ isFirstChunk = false;
2240
+ controller.enqueue({
2241
+ type: "response-metadata",
2242
+ ...getResponseMetadata(value)
2243
+ });
2244
+ }
2245
+ if (value.usage != null) {
2246
+ usage = {
2247
+ promptTokens: value.usage.prompt_tokens,
2248
+ completionTokens: value.usage.completion_tokens
2249
+ };
2250
+ }
2251
+ const choice = value.choices[0];
2252
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
2253
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
2254
+ }
2255
+ if ((choice == null ? void 0 : choice.text) != null) {
2256
+ controller.enqueue({
2257
+ type: "text-delta",
2258
+ textDelta: choice.text
2259
+ });
2260
+ }
2261
+ const mappedLogprobs = mapOpenAICompletionLogProbs(
2262
+ choice == null ? void 0 : choice.logprobs
2263
+ );
2264
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
2265
+ if (logprobs === void 0) logprobs = [];
2266
+ logprobs.push(...mappedLogprobs);
2267
+ }
2268
+ },
2269
+ flush(controller) {
2270
+ controller.enqueue({
2271
+ type: "finish",
2272
+ finishReason,
2273
+ logprobs,
2274
+ usage
2275
+ });
2276
+ }
2277
+ })
2278
+ ),
2279
+ rawCall: { rawPrompt, rawSettings },
2280
+ rawResponse: { headers: responseHeaders },
2281
+ warnings,
2282
+ request: { body: JSON.stringify(body) }
2283
+ };
2284
+ }
2285
+ };
2286
+ var openaiCompletionResponseSchema = zod.z.object({
2287
+ id: zod.z.string().nullish(),
2288
+ created: zod.z.number().nullish(),
2289
+ model: zod.z.string().nullish(),
2290
+ choices: zod.z.array(
2291
+ zod.z.object({
2292
+ text: zod.z.string(),
2293
+ finish_reason: zod.z.string(),
2294
+ logprobs: zod.z.object({
2295
+ tokens: zod.z.array(zod.z.string()),
2296
+ token_logprobs: zod.z.array(zod.z.number()),
2297
+ top_logprobs: zod.z.array(zod.z.record(zod.z.string(), zod.z.number())).nullable()
2298
+ }).nullish()
2299
+ })
2300
+ ),
2301
+ usage: zod.z.object({
2302
+ prompt_tokens: zod.z.number(),
2303
+ completion_tokens: zod.z.number()
2304
+ })
2305
+ });
2306
+ var openaiCompletionChunkSchema = zod.z.union([
2307
+ zod.z.object({
2308
+ id: zod.z.string().nullish(),
2309
+ created: zod.z.number().nullish(),
2310
+ model: zod.z.string().nullish(),
2311
+ choices: zod.z.array(
2312
+ zod.z.object({
2313
+ text: zod.z.string(),
2314
+ finish_reason: zod.z.string().nullish(),
2315
+ index: zod.z.number(),
2316
+ logprobs: zod.z.object({
2317
+ tokens: zod.z.array(zod.z.string()),
2318
+ token_logprobs: zod.z.array(zod.z.number()),
2319
+ top_logprobs: zod.z.array(zod.z.record(zod.z.string(), zod.z.number())).nullable()
2320
+ }).nullish()
2321
+ })
2322
+ ),
2323
+ usage: zod.z.object({
2324
+ prompt_tokens: zod.z.number(),
2325
+ completion_tokens: zod.z.number()
2326
+ }).nullish()
2327
+ }),
2328
+ openaiErrorDataSchema
2329
+ ]);
2330
+ var OpenAIEmbeddingModel = class {
2331
+ constructor(modelId, settings, config) {
2332
+ this.specificationVersion = "v1";
2333
+ this.modelId = modelId;
2334
+ this.settings = settings;
2335
+ this.config = config;
2336
+ }
2337
+ get provider() {
2338
+ return this.config.provider;
2339
+ }
2340
+ get maxEmbeddingsPerCall() {
2341
+ var _a15;
2342
+ return (_a15 = this.settings.maxEmbeddingsPerCall) != null ? _a15 : 2048;
2343
+ }
2344
+ get supportsParallelCalls() {
2345
+ var _a15;
2346
+ return (_a15 = this.settings.supportsParallelCalls) != null ? _a15 : true;
2347
+ }
2348
+ async doEmbed({
2349
+ values,
2350
+ headers,
2351
+ abortSignal
2352
+ }) {
2353
+ if (values.length > this.maxEmbeddingsPerCall) {
2354
+ throw new TooManyEmbeddingValuesForCallError({
2355
+ provider: this.provider,
2356
+ modelId: this.modelId,
2357
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
2358
+ values
2359
+ });
2360
+ }
2361
+ const { responseHeaders, value: response } = await postJsonToApi({
2362
+ url: this.config.url({
2363
+ path: "/embeddings",
2364
+ modelId: this.modelId
2365
+ }),
2366
+ headers: combineHeaders(this.config.headers(), headers),
2367
+ body: {
2368
+ model: this.modelId,
2369
+ input: values,
2370
+ encoding_format: "float",
2371
+ dimensions: this.settings.dimensions,
2372
+ user: this.settings.user
2373
+ },
2374
+ failedResponseHandler: openaiFailedResponseHandler,
2375
+ successfulResponseHandler: createJsonResponseHandler(
2376
+ openaiTextEmbeddingResponseSchema
2377
+ ),
2378
+ abortSignal,
2379
+ fetch: this.config.fetch
2380
+ });
2381
+ return {
2382
+ embeddings: response.data.map((item) => item.embedding),
2383
+ usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
2384
+ rawResponse: { headers: responseHeaders }
2385
+ };
2386
+ }
2387
+ };
2388
+ var openaiTextEmbeddingResponseSchema = zod.z.object({
2389
+ data: zod.z.array(zod.z.object({ embedding: zod.z.array(zod.z.number()) })),
2390
+ usage: zod.z.object({ prompt_tokens: zod.z.number() }).nullish()
2391
+ });
2392
+ var modelMaxImagesPerCall = {
2393
+ "dall-e-3": 1,
2394
+ "dall-e-2": 10
2395
+ };
2396
+ var OpenAIImageModel = class {
2397
+ constructor(modelId, settings, config) {
2398
+ this.modelId = modelId;
2399
+ this.settings = settings;
2400
+ this.config = config;
2401
+ this.specificationVersion = "v1";
2402
+ }
2403
+ get maxImagesPerCall() {
2404
+ var _a15, _b;
2405
+ return (_b = (_a15 = this.settings.maxImagesPerCall) != null ? _a15 : modelMaxImagesPerCall[this.modelId]) != null ? _b : 1;
2406
+ }
2407
+ get provider() {
2408
+ return this.config.provider;
2409
+ }
2410
+ async doGenerate({
2411
+ prompt,
2412
+ n,
2413
+ size,
2414
+ aspectRatio,
2415
+ seed,
2416
+ providerOptions,
2417
+ headers,
2418
+ abortSignal
2419
+ }) {
2420
+ var _a15, _b, _c, _d;
2421
+ const warnings = [];
2422
+ if (aspectRatio != null) {
2423
+ warnings.push({
2424
+ type: "unsupported-setting",
2425
+ setting: "aspectRatio",
2426
+ details: "This model does not support aspect ratio. Use `size` instead."
2427
+ });
2428
+ }
2429
+ if (seed != null) {
2430
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
2431
+ }
2432
+ const currentDate = (_c = (_b = (_a15 = this.config._internal) == null ? void 0 : _a15.currentDate) == null ? void 0 : _b.call(_a15)) != null ? _c : /* @__PURE__ */ new Date();
2433
+ const { value: response, responseHeaders } = await postJsonToApi({
2434
+ url: this.config.url({
2435
+ path: "/images/generations",
2436
+ modelId: this.modelId
2437
+ }),
2438
+ headers: combineHeaders(this.config.headers(), headers),
2439
+ body: {
2440
+ model: this.modelId,
2441
+ prompt,
2442
+ n,
2443
+ size,
2444
+ ...(_d = providerOptions.openai) != null ? _d : {},
2445
+ response_format: "b64_json"
2446
+ },
2447
+ failedResponseHandler: openaiFailedResponseHandler,
2448
+ successfulResponseHandler: createJsonResponseHandler(
2449
+ openaiImageResponseSchema
2450
+ ),
2451
+ abortSignal,
2452
+ fetch: this.config.fetch
2453
+ });
2454
+ return {
2455
+ images: response.data.map((item) => item.b64_json),
2456
+ warnings,
2457
+ response: {
2458
+ timestamp: currentDate,
2459
+ modelId: this.modelId,
2460
+ headers: responseHeaders
2461
+ }
2462
+ };
2463
+ }
2464
+ };
2465
+ var openaiImageResponseSchema = zod.z.object({
2466
+ data: zod.z.array(zod.z.object({ b64_json: zod.z.string() }))
2467
+ });
2468
+ var OpenAIProviderOptionsSchema = zod.z.object({
2469
+ include: zod.z.array(zod.z.string()).optional().describe(
2470
+ "Additional information to include in the transcription response."
2471
+ ),
2472
+ language: zod.z.string().optional().describe("The language of the input audio in ISO-639-1 format."),
2473
+ prompt: zod.z.string().optional().describe(
2474
+ "An optional text to guide the model's style or continue a previous audio segment."
2475
+ ),
2476
+ temperature: zod.z.number().min(0).max(1).optional().default(0).describe("The sampling temperature, between 0 and 1."),
2477
+ timestampGranularities: zod.z.array(zod.z.enum(["word", "segment"])).optional().default(["segment"]).describe(
2478
+ "The timestamp granularities to populate for this transcription."
2479
+ )
2480
+ });
2481
+ var languageMap = {
2482
+ afrikaans: "af",
2483
+ arabic: "ar",
2484
+ armenian: "hy",
2485
+ azerbaijani: "az",
2486
+ belarusian: "be",
2487
+ bosnian: "bs",
2488
+ bulgarian: "bg",
2489
+ catalan: "ca",
2490
+ chinese: "zh",
2491
+ croatian: "hr",
2492
+ czech: "cs",
2493
+ danish: "da",
2494
+ dutch: "nl",
2495
+ english: "en",
2496
+ estonian: "et",
2497
+ finnish: "fi",
2498
+ french: "fr",
2499
+ galician: "gl",
2500
+ german: "de",
2501
+ greek: "el",
2502
+ hebrew: "he",
2503
+ hindi: "hi",
2504
+ hungarian: "hu",
2505
+ icelandic: "is",
2506
+ indonesian: "id",
2507
+ italian: "it",
2508
+ japanese: "ja",
2509
+ kannada: "kn",
2510
+ kazakh: "kk",
2511
+ korean: "ko",
2512
+ latvian: "lv",
2513
+ lithuanian: "lt",
2514
+ macedonian: "mk",
2515
+ malay: "ms",
2516
+ marathi: "mr",
2517
+ maori: "mi",
2518
+ nepali: "ne",
2519
+ norwegian: "no",
2520
+ persian: "fa",
2521
+ polish: "pl",
2522
+ portuguese: "pt",
2523
+ romanian: "ro",
2524
+ russian: "ru",
2525
+ serbian: "sr",
2526
+ slovak: "sk",
2527
+ slovenian: "sl",
2528
+ spanish: "es",
2529
+ swahili: "sw",
2530
+ swedish: "sv",
2531
+ tagalog: "tl",
2532
+ tamil: "ta",
2533
+ thai: "th",
2534
+ turkish: "tr",
2535
+ ukrainian: "uk",
2536
+ urdu: "ur",
2537
+ vietnamese: "vi",
2538
+ welsh: "cy"
2539
+ };
2540
+ var OpenAITranscriptionModel = class {
2541
+ constructor(modelId, config) {
2542
+ this.modelId = modelId;
2543
+ this.config = config;
2544
+ this.specificationVersion = "v1";
2545
+ }
2546
+ get provider() {
2547
+ return this.config.provider;
2548
+ }
2549
+ getArgs({
2550
+ audio,
2551
+ mediaType,
2552
+ providerOptions
2553
+ }) {
2554
+ const warnings = [];
2555
+ const openAIOptions = parseProviderOptions({
2556
+ provider: "openai",
2557
+ providerOptions,
2558
+ schema: OpenAIProviderOptionsSchema
2559
+ });
2560
+ const formData = new FormData();
2561
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
2562
+ formData.append("model", this.modelId);
2563
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
2564
+ if (openAIOptions) {
2565
+ const transcriptionModelOptions = {
2566
+ include: openAIOptions.include,
2567
+ language: openAIOptions.language,
2568
+ prompt: openAIOptions.prompt,
2569
+ temperature: openAIOptions.temperature,
2570
+ timestamp_granularities: openAIOptions.timestampGranularities
2571
+ };
2572
+ for (const key in transcriptionModelOptions) {
2573
+ const value = transcriptionModelOptions[key];
2574
+ if (value !== void 0) {
2575
+ formData.append(key, value);
2576
+ }
2577
+ }
2578
+ }
2579
+ return {
2580
+ formData,
2581
+ warnings
2582
+ };
2583
+ }
2584
+ async doGenerate(options) {
2585
+ var _a15, _b, _c, _d, _e, _f;
2586
+ const currentDate = (_c = (_b = (_a15 = this.config._internal) == null ? void 0 : _a15.currentDate) == null ? void 0 : _b.call(_a15)) != null ? _c : /* @__PURE__ */ new Date();
2587
+ const { formData, warnings } = this.getArgs(options);
2588
+ const {
2589
+ value: response,
2590
+ responseHeaders,
2591
+ rawValue: rawResponse
2592
+ } = await postFormDataToApi({
2593
+ url: this.config.url({
2594
+ path: "/audio/transcriptions",
2595
+ modelId: this.modelId
2596
+ }),
2597
+ headers: combineHeaders(this.config.headers(), options.headers),
2598
+ formData,
2599
+ failedResponseHandler: openaiFailedResponseHandler,
2600
+ successfulResponseHandler: createJsonResponseHandler(
2601
+ openaiTranscriptionResponseSchema
2602
+ ),
2603
+ abortSignal: options.abortSignal,
2604
+ fetch: this.config.fetch
2605
+ });
2606
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
2607
+ return {
2608
+ text: response.text,
2609
+ segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
2610
+ text: word.word,
2611
+ startSecond: word.start,
2612
+ endSecond: word.end
2613
+ }))) != null ? _e : [],
2614
+ language,
2615
+ durationInSeconds: (_f = response.duration) != null ? _f : void 0,
2616
+ warnings,
2617
+ response: {
2618
+ timestamp: currentDate,
2619
+ modelId: this.modelId,
2620
+ headers: responseHeaders,
2621
+ body: rawResponse
2622
+ }
2623
+ };
2624
+ }
2625
+ };
2626
+ var openaiTranscriptionResponseSchema = zod.z.object({
2627
+ text: zod.z.string(),
2628
+ language: zod.z.string().nullish(),
2629
+ duration: zod.z.number().nullish(),
2630
+ words: zod.z.array(
2631
+ zod.z.object({
2632
+ word: zod.z.string(),
2633
+ start: zod.z.number(),
2634
+ end: zod.z.number()
2635
+ })
2636
+ ).nullish()
2637
+ });
2638
+ function convertToOpenAIResponsesMessages({
2639
+ prompt,
2640
+ systemMessageMode
2641
+ }) {
2642
+ const messages = [];
2643
+ const warnings = [];
2644
+ for (const { role, content } of prompt) {
2645
+ switch (role) {
2646
+ case "system": {
2647
+ switch (systemMessageMode) {
2648
+ case "system": {
2649
+ messages.push({ role: "system", content });
2650
+ break;
2651
+ }
2652
+ case "developer": {
2653
+ messages.push({ role: "developer", content });
2654
+ break;
2655
+ }
2656
+ case "remove": {
2657
+ warnings.push({
2658
+ type: "other",
2659
+ message: "system messages are removed for this model"
2660
+ });
2661
+ break;
2662
+ }
2663
+ default: {
2664
+ const _exhaustiveCheck = systemMessageMode;
2665
+ throw new Error(
2666
+ `Unsupported system message mode: ${_exhaustiveCheck}`
2667
+ );
2668
+ }
2669
+ }
2670
+ break;
2671
+ }
2672
+ case "user": {
2673
+ messages.push({
2674
+ role: "user",
2675
+ content: content.map((part, index) => {
2676
+ var _a15, _b, _c, _d;
2677
+ switch (part.type) {
2678
+ case "text": {
2679
+ return { type: "input_text", text: part.text };
2680
+ }
2681
+ case "image": {
2682
+ return {
2683
+ type: "input_image",
2684
+ image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a15 = part.mimeType) != null ? _a15 : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`,
2685
+ // OpenAI specific extension: image detail
2686
+ detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
2687
+ };
2688
+ }
2689
+ case "file": {
2690
+ if (part.data instanceof URL) {
2691
+ throw new UnsupportedFunctionalityError({
2692
+ functionality: "File URLs in user messages"
2693
+ });
2694
+ }
2695
+ switch (part.mimeType) {
2696
+ case "application/pdf": {
2697
+ return {
2698
+ type: "input_file",
2699
+ filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
2700
+ file_data: `data:application/pdf;base64,${part.data}`
2701
+ };
2702
+ }
2703
+ default: {
2704
+ throw new UnsupportedFunctionalityError({
2705
+ functionality: "Only PDF files are supported in user messages"
2706
+ });
2707
+ }
2708
+ }
2709
+ }
2710
+ }
2711
+ })
2712
+ });
2713
+ break;
2714
+ }
2715
+ case "assistant": {
2716
+ for (const part of content) {
2717
+ switch (part.type) {
2718
+ case "text": {
2719
+ messages.push({
2720
+ role: "assistant",
2721
+ content: [{ type: "output_text", text: part.text }]
2722
+ });
2723
+ break;
2724
+ }
2725
+ case "tool-call": {
2726
+ messages.push({
2727
+ type: "function_call",
2728
+ call_id: part.toolCallId,
2729
+ name: part.toolName,
2730
+ arguments: JSON.stringify(part.args)
2731
+ });
2732
+ break;
2733
+ }
2734
+ }
2735
+ }
2736
+ break;
2737
+ }
2738
+ case "tool": {
2739
+ for (const part of content) {
2740
+ messages.push({
2741
+ type: "function_call_output",
2742
+ call_id: part.toolCallId,
2743
+ output: JSON.stringify(part.result)
2744
+ });
2745
+ }
2746
+ break;
2747
+ }
2748
+ default: {
2749
+ const _exhaustiveCheck = role;
2750
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
2751
+ }
2752
+ }
2753
+ }
2754
+ return { messages, warnings };
2755
+ }
2756
+ function mapOpenAIResponseFinishReason({
2757
+ finishReason,
2758
+ hasToolCalls
2759
+ }) {
2760
+ switch (finishReason) {
2761
+ case void 0:
2762
+ case null:
2763
+ return hasToolCalls ? "tool-calls" : "stop";
2764
+ case "max_output_tokens":
2765
+ return "length";
2766
+ case "content_filter":
2767
+ return "content-filter";
2768
+ default:
2769
+ return hasToolCalls ? "tool-calls" : "unknown";
2770
+ }
2771
+ }
2772
+ function prepareResponsesTools({
2773
+ mode,
2774
+ strict
2775
+ }) {
2776
+ var _a15;
2777
+ const tools = ((_a15 = mode.tools) == null ? void 0 : _a15.length) ? mode.tools : void 0;
2778
+ const toolWarnings = [];
2779
+ if (tools == null) {
2780
+ return { tools: void 0, tool_choice: void 0, toolWarnings };
2781
+ }
2782
+ const toolChoice = mode.toolChoice;
2783
+ const openaiTools2 = [];
2784
+ for (const tool of tools) {
2785
+ switch (tool.type) {
2786
+ case "function":
2787
+ openaiTools2.push({
2788
+ type: "function",
2789
+ name: tool.name,
2790
+ description: tool.description,
2791
+ parameters: tool.parameters,
2792
+ strict: strict ? true : void 0
2793
+ });
2794
+ break;
2795
+ case "provider-defined":
2796
+ switch (tool.id) {
2797
+ case "openai.web_search_preview":
2798
+ openaiTools2.push({
2799
+ type: "web_search_preview",
2800
+ search_context_size: tool.args.searchContextSize,
2801
+ user_location: tool.args.userLocation
2802
+ });
2803
+ break;
2804
+ default:
2805
+ toolWarnings.push({ type: "unsupported-tool", tool });
2806
+ break;
2807
+ }
2808
+ break;
2809
+ default:
2810
+ toolWarnings.push({ type: "unsupported-tool", tool });
2811
+ break;
2812
+ }
2813
+ }
2814
+ if (toolChoice == null) {
2815
+ return { tools: openaiTools2, tool_choice: void 0, toolWarnings };
2816
+ }
2817
+ const type = toolChoice.type;
2818
+ switch (type) {
2819
+ case "auto":
2820
+ case "none":
2821
+ case "required":
2822
+ return { tools: openaiTools2, tool_choice: type, toolWarnings };
2823
+ case "tool": {
2824
+ if (toolChoice.toolName === "web_search_preview") {
2825
+ return {
2826
+ tools: openaiTools2,
2827
+ tool_choice: {
2828
+ type: "web_search_preview"
2829
+ },
2830
+ toolWarnings
2831
+ };
2832
+ }
2833
+ return {
2834
+ tools: openaiTools2,
2835
+ tool_choice: {
2836
+ type: "function",
2837
+ name: toolChoice.toolName
2838
+ },
2839
+ toolWarnings
2840
+ };
2841
+ }
2842
+ default: {
2843
+ const _exhaustiveCheck = type;
2844
+ throw new UnsupportedFunctionalityError({
2845
+ functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
2846
+ });
2847
+ }
2848
+ }
2849
+ }
2850
+ var OpenAIResponsesLanguageModel = class {
2851
+ constructor(modelId, config) {
2852
+ this.specificationVersion = "v1";
2853
+ this.defaultObjectGenerationMode = "json";
2854
+ this.modelId = modelId;
2855
+ this.config = config;
2856
+ }
2857
+ get provider() {
2858
+ return this.config.provider;
2859
+ }
2860
+ getArgs({
2861
+ mode,
2862
+ maxTokens,
2863
+ temperature,
2864
+ stopSequences,
2865
+ topP,
2866
+ topK,
2867
+ presencePenalty,
2868
+ frequencyPenalty,
2869
+ seed,
2870
+ prompt,
2871
+ providerMetadata,
2872
+ responseFormat
2873
+ }) {
2874
+ var _a15, _b, _c;
2875
+ const warnings = [];
2876
+ const modelConfig = getResponsesModelConfig(this.modelId);
2877
+ const type = mode.type;
2878
+ if (topK != null) {
2879
+ warnings.push({
2880
+ type: "unsupported-setting",
2881
+ setting: "topK"
2882
+ });
2883
+ }
2884
+ if (seed != null) {
2885
+ warnings.push({
2886
+ type: "unsupported-setting",
2887
+ setting: "seed"
2888
+ });
2889
+ }
2890
+ if (presencePenalty != null) {
2891
+ warnings.push({
2892
+ type: "unsupported-setting",
2893
+ setting: "presencePenalty"
2894
+ });
2895
+ }
2896
+ if (frequencyPenalty != null) {
2897
+ warnings.push({
2898
+ type: "unsupported-setting",
2899
+ setting: "frequencyPenalty"
2900
+ });
2901
+ }
2902
+ if (stopSequences != null) {
2903
+ warnings.push({
2904
+ type: "unsupported-setting",
2905
+ setting: "stopSequences"
2906
+ });
2907
+ }
2908
+ const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
2909
+ prompt,
2910
+ systemMessageMode: modelConfig.systemMessageMode
2911
+ });
2912
+ warnings.push(...messageWarnings);
2913
+ const openaiOptions = parseProviderOptions({
2914
+ provider: "openai",
2915
+ providerOptions: providerMetadata,
2916
+ schema: openaiResponsesProviderOptionsSchema
2917
+ });
2918
+ const isStrict = (_a15 = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a15 : true;
2919
+ const baseArgs = {
2920
+ model: this.modelId,
2921
+ input: messages,
2922
+ temperature,
2923
+ top_p: topP,
2924
+ max_output_tokens: maxTokens,
2925
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2926
+ text: {
2927
+ format: responseFormat.schema != null ? {
2928
+ type: "json_schema",
2929
+ strict: isStrict,
2930
+ name: (_b = responseFormat.name) != null ? _b : "response",
2931
+ description: responseFormat.description,
2932
+ schema: responseFormat.schema
2933
+ } : { type: "json_object" }
2934
+ }
2935
+ },
2936
+ // provider options:
2937
+ metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
2938
+ parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
2939
+ previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
2940
+ store: openaiOptions == null ? void 0 : openaiOptions.store,
2941
+ user: openaiOptions == null ? void 0 : openaiOptions.user,
2942
+ instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2943
+ // model-specific settings:
2944
+ ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2945
+ reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
2946
+ },
2947
+ ...modelConfig.requiredAutoTruncation && {
2948
+ truncation: "auto"
2949
+ }
2950
+ };
2951
+ if (modelConfig.isReasoningModel) {
2952
+ if (baseArgs.temperature != null) {
2953
+ baseArgs.temperature = void 0;
2954
+ warnings.push({
2955
+ type: "unsupported-setting",
2956
+ setting: "temperature",
2957
+ details: "temperature is not supported for reasoning models"
2958
+ });
2959
+ }
2960
+ if (baseArgs.top_p != null) {
2961
+ baseArgs.top_p = void 0;
2962
+ warnings.push({
2963
+ type: "unsupported-setting",
2964
+ setting: "topP",
2965
+ details: "topP is not supported for reasoning models"
2966
+ });
2967
+ }
2968
+ }
2969
+ switch (type) {
2970
+ case "regular": {
2971
+ const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
2972
+ mode,
2973
+ strict: isStrict
2974
+ // TODO support provider options on tools
2975
+ });
2976
+ return {
2977
+ args: {
2978
+ ...baseArgs,
2979
+ tools,
2980
+ tool_choice
2981
+ },
2982
+ warnings: [...warnings, ...toolWarnings]
2983
+ };
2984
+ }
2985
+ case "object-json": {
2986
+ return {
2987
+ args: {
2988
+ ...baseArgs,
2989
+ text: {
2990
+ format: mode.schema != null ? {
2991
+ type: "json_schema",
2992
+ strict: isStrict,
2993
+ name: (_c = mode.name) != null ? _c : "response",
2994
+ description: mode.description,
2995
+ schema: mode.schema
2996
+ } : { type: "json_object" }
2997
+ }
2998
+ },
2999
+ warnings
3000
+ };
3001
+ }
3002
+ case "object-tool": {
3003
+ return {
3004
+ args: {
3005
+ ...baseArgs,
3006
+ tool_choice: { type: "function", name: mode.tool.name },
3007
+ tools: [
3008
+ {
3009
+ type: "function",
3010
+ name: mode.tool.name,
3011
+ description: mode.tool.description,
3012
+ parameters: mode.tool.parameters,
3013
+ strict: isStrict
3014
+ }
3015
+ ]
3016
+ },
3017
+ warnings
3018
+ };
3019
+ }
3020
+ default: {
3021
+ const _exhaustiveCheck = type;
3022
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
3023
+ }
3024
+ }
3025
+ }
3026
+ async doGenerate(options) {
3027
+ var _a15, _b, _c, _d, _e;
3028
+ const { args: body, warnings } = this.getArgs(options);
3029
+ const {
3030
+ responseHeaders,
3031
+ value: response,
3032
+ rawValue: rawResponse
3033
+ } = await postJsonToApi({
3034
+ url: this.config.url({
3035
+ path: "/responses",
3036
+ modelId: this.modelId
3037
+ }),
3038
+ headers: combineHeaders(this.config.headers(), options.headers),
3039
+ body,
3040
+ failedResponseHandler: openaiFailedResponseHandler,
3041
+ successfulResponseHandler: createJsonResponseHandler(
3042
+ zod.z.object({
3043
+ id: zod.z.string(),
3044
+ created_at: zod.z.number(),
3045
+ model: zod.z.string(),
3046
+ output: zod.z.array(
3047
+ zod.z.discriminatedUnion("type", [
3048
+ zod.z.object({
3049
+ type: zod.z.literal("message"),
3050
+ role: zod.z.literal("assistant"),
3051
+ content: zod.z.array(
3052
+ zod.z.object({
3053
+ type: zod.z.literal("output_text"),
3054
+ text: zod.z.string(),
3055
+ annotations: zod.z.array(
3056
+ zod.z.object({
3057
+ type: zod.z.literal("url_citation"),
3058
+ start_index: zod.z.number(),
3059
+ end_index: zod.z.number(),
3060
+ url: zod.z.string(),
3061
+ title: zod.z.string()
3062
+ })
3063
+ )
3064
+ })
3065
+ )
3066
+ }),
3067
+ zod.z.object({
3068
+ type: zod.z.literal("function_call"),
3069
+ call_id: zod.z.string(),
3070
+ name: zod.z.string(),
3071
+ arguments: zod.z.string()
3072
+ }),
3073
+ zod.z.object({
3074
+ type: zod.z.literal("web_search_call")
3075
+ }),
3076
+ zod.z.object({
3077
+ type: zod.z.literal("computer_call")
3078
+ }),
3079
+ zod.z.object({
3080
+ type: zod.z.literal("reasoning")
3081
+ })
3082
+ ])
3083
+ ),
3084
+ incomplete_details: zod.z.object({ reason: zod.z.string() }).nullable(),
3085
+ usage: usageSchema
3086
+ })
3087
+ ),
3088
+ abortSignal: options.abortSignal,
3089
+ fetch: this.config.fetch
3090
+ });
3091
+ const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
3092
+ const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
3093
+ toolCallType: "function",
3094
+ toolCallId: output.call_id,
3095
+ toolName: output.name,
3096
+ args: output.arguments
3097
+ }));
3098
+ return {
3099
+ text: outputTextElements.map((content) => content.text).join("\n"),
3100
+ sources: outputTextElements.flatMap(
3101
+ (content) => content.annotations.map((annotation) => {
3102
+ var _a22, _b2, _c2;
3103
+ return {
3104
+ sourceType: "url",
3105
+ id: (_c2 = (_b2 = (_a22 = this.config).generateId) == null ? void 0 : _b2.call(_a22)) != null ? _c2 : generateId(),
3106
+ url: annotation.url,
3107
+ title: annotation.title
3108
+ };
3109
+ })
3110
+ ),
3111
+ finishReason: mapOpenAIResponseFinishReason({
3112
+ finishReason: (_a15 = response.incomplete_details) == null ? void 0 : _a15.reason,
3113
+ hasToolCalls: toolCalls.length > 0
3114
+ }),
3115
+ toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
3116
+ usage: {
3117
+ promptTokens: response.usage.input_tokens,
3118
+ completionTokens: response.usage.output_tokens
3119
+ },
3120
+ rawCall: {
3121
+ rawPrompt: void 0,
3122
+ rawSettings: {}
3123
+ },
3124
+ rawResponse: {
3125
+ headers: responseHeaders,
3126
+ body: rawResponse
3127
+ },
3128
+ request: {
3129
+ body: JSON.stringify(body)
3130
+ },
3131
+ response: {
3132
+ id: response.id,
3133
+ timestamp: new Date(response.created_at * 1e3),
3134
+ modelId: response.model
3135
+ },
3136
+ providerMetadata: {
3137
+ openai: {
3138
+ responseId: response.id,
3139
+ cachedPromptTokens: (_c = (_b = response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : null,
3140
+ reasoningTokens: (_e = (_d = response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : null
3141
+ }
3142
+ },
3143
+ warnings
3144
+ };
3145
+ }
3146
+ async doStream(options) {
3147
+ const { args: body, warnings } = this.getArgs(options);
3148
+ const { responseHeaders, value: response } = await postJsonToApi({
3149
+ url: this.config.url({
3150
+ path: "/responses",
3151
+ modelId: this.modelId
3152
+ }),
3153
+ headers: combineHeaders(this.config.headers(), options.headers),
3154
+ body: {
3155
+ ...body,
3156
+ stream: true
3157
+ },
3158
+ failedResponseHandler: openaiFailedResponseHandler,
3159
+ successfulResponseHandler: createEventSourceResponseHandler(
3160
+ openaiResponsesChunkSchema
3161
+ ),
3162
+ abortSignal: options.abortSignal,
3163
+ fetch: this.config.fetch
3164
+ });
3165
+ const self = this;
3166
+ let finishReason = "unknown";
3167
+ let promptTokens = NaN;
3168
+ let completionTokens = NaN;
3169
+ let cachedPromptTokens = null;
3170
+ let reasoningTokens = null;
3171
+ let responseId = null;
3172
+ const ongoingToolCalls = {};
3173
+ let hasToolCalls = false;
3174
+ return {
3175
+ stream: response.pipeThrough(
3176
+ new TransformStream({
3177
+ transform(chunk, controller) {
3178
+ var _a15, _b, _c, _d, _e, _f, _g, _h;
3179
+ if (!chunk.success) {
3180
+ finishReason = "error";
3181
+ controller.enqueue({ type: "error", error: chunk.error });
3182
+ return;
3183
+ }
3184
+ const value = chunk.value;
3185
+ if (isResponseOutputItemAddedChunk(value)) {
3186
+ if (value.item.type === "function_call") {
3187
+ ongoingToolCalls[value.output_index] = {
3188
+ toolName: value.item.name,
3189
+ toolCallId: value.item.call_id
3190
+ };
3191
+ controller.enqueue({
3192
+ type: "tool-call-delta",
3193
+ toolCallType: "function",
3194
+ toolCallId: value.item.call_id,
3195
+ toolName: value.item.name,
3196
+ argsTextDelta: value.item.arguments
3197
+ });
3198
+ }
3199
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
3200
+ const toolCall = ongoingToolCalls[value.output_index];
3201
+ if (toolCall != null) {
3202
+ controller.enqueue({
3203
+ type: "tool-call-delta",
3204
+ toolCallType: "function",
3205
+ toolCallId: toolCall.toolCallId,
3206
+ toolName: toolCall.toolName,
3207
+ argsTextDelta: value.delta
3208
+ });
3209
+ }
3210
+ } else if (isResponseCreatedChunk(value)) {
3211
+ responseId = value.response.id;
3212
+ controller.enqueue({
3213
+ type: "response-metadata",
3214
+ id: value.response.id,
3215
+ timestamp: new Date(value.response.created_at * 1e3),
3216
+ modelId: value.response.model
3217
+ });
3218
+ } else if (isTextDeltaChunk(value)) {
3219
+ controller.enqueue({
3220
+ type: "text-delta",
3221
+ textDelta: value.delta
3222
+ });
3223
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
3224
+ ongoingToolCalls[value.output_index] = void 0;
3225
+ hasToolCalls = true;
3226
+ controller.enqueue({
3227
+ type: "tool-call",
3228
+ toolCallType: "function",
3229
+ toolCallId: value.item.call_id,
3230
+ toolName: value.item.name,
3231
+ args: value.item.arguments
3232
+ });
3233
+ } else if (isResponseFinishedChunk(value)) {
3234
+ finishReason = mapOpenAIResponseFinishReason({
3235
+ finishReason: (_a15 = value.response.incomplete_details) == null ? void 0 : _a15.reason,
3236
+ hasToolCalls
3237
+ });
3238
+ promptTokens = value.response.usage.input_tokens;
3239
+ completionTokens = value.response.usage.output_tokens;
3240
+ cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
3241
+ reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
3242
+ } else if (isResponseAnnotationAddedChunk(value)) {
3243
+ controller.enqueue({
3244
+ type: "source",
3245
+ source: {
3246
+ sourceType: "url",
3247
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId(),
3248
+ url: value.annotation.url,
3249
+ title: value.annotation.title
3250
+ }
3251
+ });
3252
+ }
3253
+ },
3254
+ flush(controller) {
3255
+ controller.enqueue({
3256
+ type: "finish",
3257
+ finishReason,
3258
+ usage: { promptTokens, completionTokens },
3259
+ ...(cachedPromptTokens != null || reasoningTokens != null) && {
3260
+ providerMetadata: {
3261
+ openai: {
3262
+ responseId,
3263
+ cachedPromptTokens,
3264
+ reasoningTokens
3265
+ }
3266
+ }
3267
+ }
3268
+ });
3269
+ }
3270
+ })
3271
+ ),
3272
+ rawCall: {
3273
+ rawPrompt: void 0,
3274
+ rawSettings: {}
3275
+ },
3276
+ rawResponse: { headers: responseHeaders },
3277
+ request: { body: JSON.stringify(body) },
3278
+ warnings
3279
+ };
3280
+ }
3281
+ };
3282
+ var usageSchema = zod.z.object({
3283
+ input_tokens: zod.z.number(),
3284
+ input_tokens_details: zod.z.object({ cached_tokens: zod.z.number().nullish() }).nullish(),
3285
+ output_tokens: zod.z.number(),
3286
+ output_tokens_details: zod.z.object({ reasoning_tokens: zod.z.number().nullish() }).nullish()
3287
+ });
3288
+ var textDeltaChunkSchema = zod.z.object({
3289
+ type: zod.z.literal("response.output_text.delta"),
3290
+ delta: zod.z.string()
3291
+ });
3292
+ var responseFinishedChunkSchema = zod.z.object({
3293
+ type: zod.z.enum(["response.completed", "response.incomplete"]),
3294
+ response: zod.z.object({
3295
+ incomplete_details: zod.z.object({ reason: zod.z.string() }).nullish(),
3296
+ usage: usageSchema
3297
+ })
3298
+ });
3299
+ var responseCreatedChunkSchema = zod.z.object({
3300
+ type: zod.z.literal("response.created"),
3301
+ response: zod.z.object({
3302
+ id: zod.z.string(),
3303
+ created_at: zod.z.number(),
3304
+ model: zod.z.string()
3305
+ })
3306
+ });
3307
+ var responseOutputItemDoneSchema = zod.z.object({
3308
+ type: zod.z.literal("response.output_item.done"),
3309
+ output_index: zod.z.number(),
3310
+ item: zod.z.discriminatedUnion("type", [
3311
+ zod.z.object({
3312
+ type: zod.z.literal("message")
3313
+ }),
3314
+ zod.z.object({
3315
+ type: zod.z.literal("function_call"),
3316
+ id: zod.z.string(),
3317
+ call_id: zod.z.string(),
3318
+ name: zod.z.string(),
3319
+ arguments: zod.z.string(),
3320
+ status: zod.z.literal("completed")
3321
+ })
3322
+ ])
3323
+ });
3324
+ var responseFunctionCallArgumentsDeltaSchema = zod.z.object({
3325
+ type: zod.z.literal("response.function_call_arguments.delta"),
3326
+ item_id: zod.z.string(),
3327
+ output_index: zod.z.number(),
3328
+ delta: zod.z.string()
3329
+ });
3330
+ var responseOutputItemAddedSchema = zod.z.object({
3331
+ type: zod.z.literal("response.output_item.added"),
3332
+ output_index: zod.z.number(),
3333
+ item: zod.z.discriminatedUnion("type", [
3334
+ zod.z.object({
3335
+ type: zod.z.literal("message")
3336
+ }),
3337
+ zod.z.object({
3338
+ type: zod.z.literal("function_call"),
3339
+ id: zod.z.string(),
3340
+ call_id: zod.z.string(),
3341
+ name: zod.z.string(),
3342
+ arguments: zod.z.string()
3343
+ })
3344
+ ])
3345
+ });
3346
+ var responseAnnotationAddedSchema = zod.z.object({
3347
+ type: zod.z.literal("response.output_text.annotation.added"),
3348
+ annotation: zod.z.object({
3349
+ type: zod.z.literal("url_citation"),
3350
+ url: zod.z.string(),
3351
+ title: zod.z.string()
3352
+ })
3353
+ });
3354
+ var openaiResponsesChunkSchema = zod.z.union([
3355
+ textDeltaChunkSchema,
3356
+ responseFinishedChunkSchema,
3357
+ responseCreatedChunkSchema,
3358
+ responseOutputItemDoneSchema,
3359
+ responseFunctionCallArgumentsDeltaSchema,
3360
+ responseOutputItemAddedSchema,
3361
+ responseAnnotationAddedSchema,
3362
+ zod.z.object({ type: zod.z.string() }).passthrough()
3363
+ // fallback for unknown chunks
3364
+ ]);
3365
+ function isTextDeltaChunk(chunk) {
3366
+ return chunk.type === "response.output_text.delta";
3367
+ }
3368
+ function isResponseOutputItemDoneChunk(chunk) {
3369
+ return chunk.type === "response.output_item.done";
3370
+ }
3371
+ function isResponseFinishedChunk(chunk) {
3372
+ return chunk.type === "response.completed" || chunk.type === "response.incomplete";
3373
+ }
3374
+ function isResponseCreatedChunk(chunk) {
3375
+ return chunk.type === "response.created";
3376
+ }
3377
+ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
3378
+ return chunk.type === "response.function_call_arguments.delta";
3379
+ }
3380
+ function isResponseOutputItemAddedChunk(chunk) {
3381
+ return chunk.type === "response.output_item.added";
3382
+ }
3383
+ function isResponseAnnotationAddedChunk(chunk) {
3384
+ return chunk.type === "response.output_text.annotation.added";
3385
+ }
3386
+ function getResponsesModelConfig(modelId) {
3387
+ if (modelId.startsWith("o")) {
3388
+ if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
3389
+ return {
3390
+ isReasoningModel: true,
3391
+ systemMessageMode: "remove",
3392
+ requiredAutoTruncation: false
3393
+ };
3394
+ }
3395
+ return {
3396
+ isReasoningModel: true,
3397
+ systemMessageMode: "developer",
3398
+ requiredAutoTruncation: false
3399
+ };
3400
+ }
3401
+ return {
3402
+ isReasoningModel: false,
3403
+ systemMessageMode: "system",
3404
+ requiredAutoTruncation: false
3405
+ };
3406
+ }
3407
+ var openaiResponsesProviderOptionsSchema = zod.z.object({
3408
+ metadata: zod.z.any().nullish(),
3409
+ parallelToolCalls: zod.z.boolean().nullish(),
3410
+ previousResponseId: zod.z.string().nullish(),
3411
+ store: zod.z.boolean().nullish(),
3412
+ user: zod.z.string().nullish(),
3413
+ reasoningEffort: zod.z.string().nullish(),
3414
+ strictSchemas: zod.z.boolean().nullish(),
3415
+ instructions: zod.z.string().nullish()
3416
+ });
3417
+ var WebSearchPreviewParameters = zod.z.object({});
3418
+ function webSearchPreviewTool({
3419
+ searchContextSize,
3420
+ userLocation
3421
+ } = {}) {
3422
+ return {
3423
+ type: "provider-defined",
3424
+ id: "openai.web_search_preview",
3425
+ args: {
3426
+ searchContextSize,
3427
+ userLocation
3428
+ },
3429
+ parameters: WebSearchPreviewParameters
3430
+ };
3431
+ }
3432
+ var openaiTools = {
3433
+ webSearchPreview: webSearchPreviewTool
3434
+ };
3435
+ function createOpenAI(options = {}) {
3436
+ var _a15, _b, _c;
3437
+ const baseURL = (_a15 = withoutTrailingSlash(options.baseURL)) != null ? _a15 : "https://api.openai.com/v1";
3438
+ const compatibility = (_b = options.compatibility) != null ? _b : "compatible";
3439
+ const providerName = (_c = options.name) != null ? _c : "openai";
3440
+ const getHeaders = () => ({
3441
+ Authorization: `Bearer ${loadApiKey({
3442
+ apiKey: options.apiKey,
3443
+ environmentVariableName: "OPENAI_API_KEY",
3444
+ description: "OpenAI"
3445
+ })}`,
3446
+ "OpenAI-Organization": options.organization,
3447
+ "OpenAI-Project": options.project,
3448
+ ...options.headers
3449
+ });
3450
+ const createChatModel = (modelId, settings = {}) => new OpenAIChatLanguageModel(modelId, settings, {
3451
+ provider: `${providerName}.chat`,
3452
+ url: ({ path }) => `${baseURL}${path}`,
3453
+ headers: getHeaders,
3454
+ compatibility,
3455
+ fetch: options.fetch
3456
+ });
3457
+ const createCompletionModel = (modelId, settings = {}) => new OpenAICompletionLanguageModel(modelId, settings, {
3458
+ provider: `${providerName}.completion`,
3459
+ url: ({ path }) => `${baseURL}${path}`,
3460
+ headers: getHeaders,
3461
+ compatibility,
3462
+ fetch: options.fetch
3463
+ });
3464
+ const createEmbeddingModel = (modelId, settings = {}) => new OpenAIEmbeddingModel(modelId, settings, {
3465
+ provider: `${providerName}.embedding`,
3466
+ url: ({ path }) => `${baseURL}${path}`,
3467
+ headers: getHeaders,
3468
+ fetch: options.fetch
3469
+ });
3470
+ const createImageModel = (modelId, settings = {}) => new OpenAIImageModel(modelId, settings, {
3471
+ provider: `${providerName}.image`,
3472
+ url: ({ path }) => `${baseURL}${path}`,
3473
+ headers: getHeaders,
3474
+ fetch: options.fetch
3475
+ });
3476
+ const createTranscriptionModel = (modelId) => new OpenAITranscriptionModel(modelId, {
3477
+ provider: `${providerName}.transcription`,
3478
+ url: ({ path }) => `${baseURL}${path}`,
3479
+ headers: getHeaders,
3480
+ fetch: options.fetch
3481
+ });
3482
+ const createLanguageModel = (modelId, settings) => {
3483
+ if (new.target) {
3484
+ throw new Error(
3485
+ "The OpenAI model function cannot be called with the new keyword."
3486
+ );
3487
+ }
3488
+ if (modelId === "gpt-3.5-turbo-instruct") {
3489
+ return createCompletionModel(
3490
+ modelId,
3491
+ settings
3492
+ );
3493
+ }
3494
+ return createChatModel(modelId, settings);
3495
+ };
3496
+ const createResponsesModel = (modelId) => {
3497
+ return new OpenAIResponsesLanguageModel(modelId, {
3498
+ provider: `${providerName}.responses`,
3499
+ url: ({ path }) => `${baseURL}${path}`,
3500
+ headers: getHeaders,
3501
+ fetch: options.fetch
3502
+ });
3503
+ };
3504
+ const provider = function(modelId, settings) {
3505
+ return createLanguageModel(modelId, settings);
3506
+ };
3507
+ provider.languageModel = createLanguageModel;
3508
+ provider.chat = createChatModel;
3509
+ provider.completion = createCompletionModel;
3510
+ provider.responses = createResponsesModel;
3511
+ provider.embedding = createEmbeddingModel;
3512
+ provider.textEmbedding = createEmbeddingModel;
3513
+ provider.textEmbeddingModel = createEmbeddingModel;
3514
+ provider.image = createImageModel;
3515
+ provider.imageModel = createImageModel;
3516
+ provider.transcription = createTranscriptionModel;
3517
+ provider.transcriptionModel = createTranscriptionModel;
3518
+ provider.tools = openaiTools;
3519
+ return provider;
3520
+ }
3521
+ createOpenAI({
3522
+ compatibility: "strict"
3523
+ // strict for OpenAI API
3524
+ });
3525
+
3526
+ // src/document/extractors/types.ts
3527
+ var STRIP_REGEX = /(\r\n|\n|\r)/gm;
3528
+ var openai2 = createOpenAI({ apiKey: process.env.OPENAI_API_KEY });
3529
+ var baseLLM = openai2("gpt-4o");
3530
+
3531
+ // src/document/extractors/title.ts
3532
+ var TitleExtractor = class extends llamaindex.BaseExtractor {
3533
+ /**
3534
+ * MastraLanguageModel instance.
3535
+ * @type {MastraLanguageModel}
3536
+ */
3537
+ llm;
3538
+ /**
3539
+ * Can work for mixture of text and non-text nodes
3540
+ * @type {boolean}
3541
+ * @default false
3542
+ */
3543
+ isTextNodeOnly = false;
3544
+ /**
3545
+ * Number of nodes to extrct titles from.
3546
+ * @type {number}
3547
+ * @default 5
3548
+ */
3549
+ nodes = 5;
3550
+ /**
3551
+ * The prompt template to use for the title extractor.
3552
+ * @type {string}
3553
+ */
3554
+ nodeTemplate;
3555
+ /**
3556
+ * The prompt template to merge title with..
3557
+ * @type {string}
3558
+ */
3559
+ combineTemplate;
3560
+ /**
3561
+ * Constructor for the TitleExtractor class.
3562
+ * @param {MastraLanguageModel} llm MastraLanguageModel instance.
3563
+ * @param {number} nodes Number of nodes to extract titles from.
3564
+ * @param {TitleExtractorPrompt} nodeTemplate The prompt template to use for the title extractor.
3565
+ * @param {string} combineTemplate The prompt template to merge title with..
3566
+ */
3567
+ constructor(options) {
3568
+ super();
3569
+ this.llm = options?.llm ?? baseLLM;
3570
+ this.nodes = options?.nodes ?? 5;
3571
+ this.nodeTemplate = options?.nodeTemplate ? new llamaindex.PromptTemplate({
3572
+ templateVars: ["context"],
3573
+ template: options.nodeTemplate
3574
+ }) : llamaindex.defaultTitleExtractorPromptTemplate;
3575
+ this.combineTemplate = options?.combineTemplate ? new llamaindex.PromptTemplate({
3576
+ templateVars: ["context"],
3577
+ template: options.combineTemplate
3578
+ }) : llamaindex.defaultTitleCombinePromptTemplate;
3579
+ }
3580
+ /**
3581
+ * Extract titles from a list of nodes.
3582
+ * @param {BaseNode[]} nodes Nodes to extract titles from.
3583
+ * @returns {Promise<BaseNode<ExtractTitle>[]>} Titles extracted from the nodes.
3584
+ */
3585
+ async extract(nodes) {
3586
+ const results = new Array(nodes.length);
3587
+ const nodesToExtractTitle = [];
3588
+ const nodeIndexes = [];
3589
+ nodes.forEach((node, idx) => {
3590
+ const text = node.getContent(this.metadataMode);
3591
+ if (!text || text.trim() === "") {
3592
+ results[idx] = { documentTitle: "" };
3593
+ } else {
3594
+ nodesToExtractTitle.push(node);
3595
+ nodeIndexes.push(idx);
3596
+ }
3597
+ });
3598
+ if (nodesToExtractTitle.length) {
3599
+ const filteredNodes = this.filterNodes(nodesToExtractTitle);
3600
+ if (filteredNodes.length) {
3601
+ const nodesByDocument = this.separateNodesByDocument(filteredNodes);
3602
+ const titlesByDocument = await this.extractTitles(nodesByDocument);
3603
+ filteredNodes.forEach((node, i) => {
3604
+ const nodeIndex = nodeIndexes[i];
3605
+ const groupKey = node.sourceNode?.nodeId ?? node.id_;
3606
+ if (typeof nodeIndex === "number") {
3607
+ results[nodeIndex] = {
3608
+ documentTitle: titlesByDocument[groupKey] ?? ""
3609
+ };
3610
+ }
3611
+ });
3612
+ }
3613
+ }
3614
+ return results;
3615
+ }
3616
+ filterNodes(nodes) {
3617
+ return nodes.filter((node) => {
3618
+ if (this.isTextNodeOnly && !(node instanceof llamaindex.TextNode)) {
3619
+ return false;
3620
+ }
3621
+ return true;
3622
+ });
3623
+ }
3624
+ separateNodesByDocument(nodes) {
3625
+ const nodesByDocument = {};
3626
+ for (const node of nodes) {
3627
+ const groupKey = node.sourceNode?.nodeId ?? node.id_;
3628
+ nodesByDocument[groupKey] = nodesByDocument[groupKey] || [];
3629
+ nodesByDocument[groupKey].push(node);
3630
+ }
3631
+ return nodesByDocument;
3632
+ }
3633
+ async extractTitles(nodesByDocument) {
3634
+ const titlesByDocument = {};
3635
+ for (const [key, nodes] of Object.entries(nodesByDocument)) {
3636
+ const titleCandidates = await this.getTitlesCandidates(nodes);
3637
+ const combinedTitles = titleCandidates.join(", ");
3638
+ const completion = await this.llm.doGenerate({
3639
+ inputFormat: "messages",
3640
+ mode: { type: "regular" },
3641
+ prompt: [
3642
+ {
3643
+ role: "user",
3644
+ content: [
3645
+ {
3646
+ type: "text",
3647
+ text: this.combineTemplate.format({
3648
+ context: combinedTitles
3649
+ })
3650
+ }
3651
+ ]
3652
+ }
3653
+ ]
3654
+ });
3655
+ let title = "";
3656
+ if (typeof completion.text === "string") {
3657
+ title = completion.text.trim();
3658
+ } else {
3659
+ console.warn("Title extraction LLM output was not a string:", completion.text);
3660
+ }
3661
+ titlesByDocument[key] = title;
3662
+ }
3663
+ return titlesByDocument;
3664
+ }
3665
+ async getTitlesCandidates(nodes) {
3666
+ const titleJobs = nodes.map(async (node) => {
3667
+ const completion = await this.llm.doGenerate({
3668
+ inputFormat: "messages",
3669
+ mode: { type: "regular" },
3670
+ prompt: [
3671
+ {
3672
+ role: "user",
3673
+ content: [
3674
+ {
3675
+ type: "text",
3676
+ text: this.nodeTemplate.format({
3677
+ context: node.getContent(llamaindex.MetadataMode.ALL)
3678
+ })
3679
+ }
3680
+ ]
3681
+ }
3682
+ ]
3683
+ });
3684
+ if (typeof completion.text === "string") {
3685
+ return completion.text.trim();
3686
+ } else {
3687
+ console.warn("Title candidate extraction LLM output was not a string:", completion.text);
3688
+ return "";
3689
+ }
3690
+ });
3691
+ return await Promise.all(titleJobs);
3692
+ }
3693
+ };
3694
+ var SummaryExtractor = class extends llamaindex.BaseExtractor {
3695
+ /**
3696
+ * MastraLanguageModel instance.
3697
+ * @type {MastraLanguageModel}
3698
+ */
3699
+ llm;
3700
+ /**
3701
+ * List of summaries to extract: 'self', 'prev', 'next'
3702
+ * @type {string[]}
3703
+ */
3704
+ summaries;
3705
+ /**
3706
+ * The prompt template to use for the summary extractor.
3707
+ * @type {string}
3708
+ */
3709
+ promptTemplate;
3710
+ selfSummary;
3711
+ prevSummary;
3712
+ nextSummary;
3713
+ constructor(options) {
3714
+ const summaries = options?.summaries ?? ["self"];
3715
+ if (summaries && !summaries.some((s) => ["self", "prev", "next"].includes(s)))
3716
+ throw new Error("Summaries must be one of 'self', 'prev', 'next'");
3717
+ super();
3718
+ this.llm = options?.llm ?? baseLLM;
3719
+ this.summaries = summaries;
3720
+ this.promptTemplate = options?.promptTemplate ? new llamaindex.PromptTemplate({
3721
+ templateVars: ["context"],
3722
+ template: options.promptTemplate
3723
+ }) : llamaindex.defaultSummaryPrompt;
3724
+ this.selfSummary = summaries?.includes("self") ?? false;
3725
+ this.prevSummary = summaries?.includes("prev") ?? false;
3726
+ this.nextSummary = summaries?.includes("next") ?? false;
3727
+ }
3728
+ /**
3729
+ * Extract summary from a node.
3730
+ * @param {BaseNode} node Node to extract summary from.
3731
+ * @returns {Promise<string>} Summary extracted from the node.
3732
+ */
3733
+ async generateNodeSummary(node) {
3734
+ const text = node.getContent(this.metadataMode);
3735
+ if (!text || text.trim() === "") {
3736
+ return "";
3737
+ }
3738
+ if (this.isTextNodeOnly && !(node instanceof llamaindex.TextNode)) {
3739
+ return "";
3740
+ }
3741
+ const context = node.getContent(this.metadataMode);
3742
+ const prompt = this.promptTemplate.format({
3743
+ context
3744
+ });
3745
+ const result = await this.llm.doGenerate({
3746
+ inputFormat: "messages",
3747
+ mode: { type: "regular" },
3748
+ prompt: [
3749
+ {
3750
+ role: "user",
3751
+ content: [{ type: "text", text: prompt }]
3752
+ }
3753
+ ]
3754
+ });
3755
+ let summary = "";
3756
+ if (typeof result.text === "string") {
3757
+ summary = result.text.trim();
3758
+ } else {
3759
+ console.warn("Summary extraction LLM output was not a string:", result.text);
3760
+ }
3761
+ return summary.replace(STRIP_REGEX, "");
3762
+ }
3763
+ /**
3764
+ * Extract summaries from a list of nodes.
3765
+ * @param {BaseNode[]} nodes Nodes to extract summaries from.
3766
+ * @returns {Promise<ExtractSummary[]>} Summaries extracted from the nodes.
3767
+ */
3768
+ async extract(nodes) {
3769
+ if (!nodes.every((n) => n instanceof llamaindex.TextNode)) throw new Error("Only `TextNode` is allowed for `Summary` extractor");
3770
+ const nodeSummaries = await Promise.all(nodes.map((node) => this.generateNodeSummary(node)));
3771
+ const metadataList = nodes.map(() => ({}));
3772
+ for (let i = 0; i < nodes.length; i++) {
3773
+ if (i > 0 && this.prevSummary && nodeSummaries[i - 1]) {
3774
+ metadataList[i]["prevSectionSummary"] = nodeSummaries[i - 1];
3775
+ }
3776
+ if (i < nodes.length - 1 && this.nextSummary && nodeSummaries[i + 1]) {
3777
+ metadataList[i]["nextSectionSummary"] = nodeSummaries[i + 1];
3778
+ }
3779
+ if (this.selfSummary && nodeSummaries[i]) {
3780
+ metadataList[i]["sectionSummary"] = nodeSummaries[i];
3781
+ }
3782
+ }
3783
+ return metadataList;
3784
+ }
3785
+ };
3786
+ var QuestionsAnsweredExtractor = class extends llamaindex.BaseExtractor {
3787
+ /**
3788
+ * MastraLanguageModel instance.
3789
+ * @type {MastraLanguageModel}
3790
+ */
3791
+ llm;
3792
+ /**
3793
+ * Number of questions to generate.
3794
+ * @type {number}
3795
+ * @default 5
3796
+ */
3797
+ questions = 5;
3798
+ /**
3799
+ * The prompt template to use for the question extractor.
3800
+ * @type {string}
3801
+ */
3802
+ promptTemplate;
3803
+ /**
3804
+ * Wheter to use metadata for embeddings only
3805
+ * @type {boolean}
3806
+ * @default false
3807
+ */
3808
+ embeddingOnly = false;
3809
+ /**
3810
+ * Constructor for the QuestionsAnsweredExtractor class.
3811
+ * @param {MastraLanguageModel} llm MastraLanguageModel instance.
3812
+ * @param {number} questions Number of questions to generate.
3813
+ * @param {QuestionExtractPrompt['template']} promptTemplate Optional custom prompt template (should include {context}).
3814
+ * @param {boolean} embeddingOnly Whether to use metadata for embeddings only.
3815
+ */
3816
+ constructor(options) {
3817
+ if (options?.questions && options.questions < 1) throw new Error("Questions must be greater than 0");
3818
+ super();
3819
+ this.llm = options?.llm ?? baseLLM;
3820
+ this.questions = options?.questions ?? 5;
3821
+ this.promptTemplate = options?.promptTemplate ? new llamaindex.PromptTemplate({
3822
+ templateVars: ["numQuestions", "context"],
3823
+ template: options.promptTemplate
3824
+ }).partialFormat({
3825
+ numQuestions: "5"
3826
+ }) : llamaindex.defaultQuestionExtractPrompt;
3827
+ this.embeddingOnly = options?.embeddingOnly ?? false;
3828
+ }
3829
+ /**
3830
+ * Extract answered questions from a node.
3831
+ * @param {BaseNode} node Node to extract questions from.
3832
+ * @returns {Promise<Array<ExtractQuestion> | Array<{}>>} Questions extracted from the node.
3833
+ */
3834
+ async extractQuestionsFromNode(node) {
3835
+ const text = node.getContent(this.metadataMode);
3836
+ if (!text || text.trim() === "") {
3837
+ return { questionsThisExcerptCanAnswer: "" };
3838
+ }
3839
+ if (this.isTextNodeOnly && !(node instanceof llamaindex.TextNode)) {
3840
+ return { questionsThisExcerptCanAnswer: "" };
3841
+ }
3842
+ const contextStr = node.getContent(this.metadataMode);
3843
+ const prompt = this.promptTemplate.format({
3844
+ context: contextStr,
3845
+ numQuestions: this.questions.toString()
3846
+ });
3847
+ const questions = await this.llm.doGenerate({
3848
+ inputFormat: "messages",
3849
+ mode: { type: "regular" },
3850
+ prompt: [
3851
+ {
3852
+ role: "user",
3853
+ content: [{ type: "text", text: prompt }]
3854
+ }
3855
+ ]
3856
+ });
3857
+ let result = "";
3858
+ try {
3859
+ if (typeof questions.text === "string") {
3860
+ result = questions.text.replace(STRIP_REGEX, "").trim();
3861
+ } else {
3862
+ console.warn("Question extraction LLM output was not a string:", questions.text);
3863
+ }
3864
+ } catch (err) {
3865
+ console.warn("Question extraction failed:", err);
3866
+ }
3867
+ return {
3868
+ questionsThisExcerptCanAnswer: result
3869
+ };
3870
+ }
3871
+ /**
3872
+ * Extract answered questions from a list of nodes.
3873
+ * @param {BaseNode[]} nodes Nodes to extract questions from.
3874
+ * @returns {Promise<Array<ExtractQuestion> | Array<{}>>} Questions extracted from the nodes.
3875
+ */
3876
+ async extract(nodes) {
3877
+ const results = await Promise.all(nodes.map((node) => this.extractQuestionsFromNode(node)));
3878
+ return results;
3879
+ }
3880
+ };
3881
+ var KeywordExtractor = class extends llamaindex.BaseExtractor {
3882
+ /**
3883
+ * MastraLanguageModel instance.
3884
+ * @type {MastraLanguageModel}
3885
+ */
3886
+ llm;
3887
+ /**
3888
+ * Number of keywords to extract.
3889
+ * @type {number}
3890
+ * @default 5
3891
+ */
3892
+ keywords = 5;
3893
+ /**
3894
+ * The prompt template to use for the question extractor.
3895
+ * @type {string}
3896
+ */
3897
+ promptTemplate;
3898
+ /**
3899
+ * Constructor for the KeywordExtractor class.
3900
+ * @param {MastraLanguageModel} llm MastraLanguageModel instance.
3901
+ * @param {number} keywords Number of keywords to extract.
3902
+ * @param {string} [promptTemplate] Optional custom prompt template (must include {context})
3903
+ * @throws {Error} If keywords is less than 1.
3904
+ */
3905
+ constructor(options) {
3906
+ if (options?.keywords && options.keywords < 1) throw new Error("Keywords must be greater than 0");
3907
+ super();
3908
+ this.llm = options?.llm ?? baseLLM;
3909
+ this.keywords = options?.keywords ?? 5;
3910
+ this.promptTemplate = options?.promptTemplate ? new llamaindex.PromptTemplate({
3911
+ templateVars: ["context", "maxKeywords"],
3912
+ template: options.promptTemplate
3913
+ }) : llamaindex.defaultKeywordExtractPrompt;
3914
+ }
3915
+ /**
3916
+ *
3917
+ * @param node Node to extract keywords from.
3918
+ * @returns Keywords extracted from the node.
3919
+ */
3920
+ /**
3921
+ * Extract keywords from a node. Returns an object with a comma-separated string of keywords, or an empty string if extraction fails.
3922
+ * Adds error handling for malformed/empty LLM output.
3923
+ */
3924
+ async extractKeywordsFromNodes(node) {
3925
+ const text = node.getContent(this.metadataMode);
3926
+ if (!text || text.trim() === "") {
3927
+ return { excerptKeywords: "" };
3928
+ }
3929
+ if (this.isTextNodeOnly && !(node instanceof llamaindex.TextNode)) {
3930
+ return { excerptKeywords: "" };
3931
+ }
3932
+ let keywords = "";
3933
+ try {
3934
+ const completion = await this.llm.doGenerate({
3935
+ inputFormat: "messages",
3936
+ mode: { type: "regular" },
3937
+ prompt: [
3938
+ {
3939
+ role: "user",
3940
+ content: [
3941
+ {
3942
+ type: "text",
3943
+ text: this.promptTemplate.format({
3944
+ context: node.getContent(llamaindex.MetadataMode.ALL),
3945
+ maxKeywords: this.keywords.toString()
3946
+ })
3947
+ }
3948
+ ]
3949
+ }
3950
+ ]
3951
+ });
3952
+ if (typeof completion.text === "string") {
3953
+ keywords = completion.text.trim();
3954
+ } else {
3955
+ console.warn("Keyword extraction LLM output was not a string:", completion.text);
3956
+ }
3957
+ } catch (err) {
3958
+ console.warn("Keyword extraction failed:", err);
3959
+ }
3960
+ return { excerptKeywords: keywords };
3961
+ }
3962
+ /**
3963
+ *
3964
+ * @param nodes Nodes to extract keywords from.
3965
+ * @returns Keywords extracted from the nodes.
3966
+ */
3967
+ /**
3968
+ * Extract keywords from an array of nodes. Always returns an array (may be empty).
3969
+ * @param nodes Nodes to extract keywords from.
3970
+ * @returns Array of keyword extraction results.
3971
+ */
3972
+ async extract(nodes) {
3973
+ if (!Array.isArray(nodes) || nodes.length === 0) return [];
3974
+ const results = await Promise.all(nodes.map((node) => this.extractKeywordsFromNodes(node)));
3975
+ return results;
3976
+ }
3977
+ };
12
3978
 
13
3979
  // src/document/types.ts
14
3980
  var Language = /* @__PURE__ */ ((Language2) => {
@@ -510,7 +4476,7 @@ var HTMLSectionTransformer = class {
510
4476
  headersToSplitOn;
511
4477
  options;
512
4478
  constructor(headersToSplitOn, options = {}) {
513
- this.headersToSplitOn = Object.fromEntries(headersToSplitOn.map(([tag, name]) => [tag.toLowerCase(), name]));
4479
+ this.headersToSplitOn = Object.fromEntries(headersToSplitOn.map(([tag, name14]) => [tag.toLowerCase(), name14]));
514
4480
  this.options = options;
515
4481
  }
516
4482
  splitText(text) {
@@ -1104,7 +5070,7 @@ var MarkdownHeaderTransformer = class {
1104
5070
  continue;
1105
5071
  }
1106
5072
  let headerMatched = false;
1107
- for (const [sep, name] of this.headersToSplitOn) {
5073
+ for (const [sep, name14] of this.headersToSplitOn) {
1108
5074
  if (strippedLine.startsWith(sep) && (strippedLine.length === sep.length || strippedLine[sep.length] === " ")) {
1109
5075
  headerMatched = true;
1110
5076
  if (currentContent.length > 0) {
@@ -1114,7 +5080,7 @@ var MarkdownHeaderTransformer = class {
1114
5080
  });
1115
5081
  currentContent = [];
1116
5082
  }
1117
- if (name !== null) {
5083
+ if (name14 !== null) {
1118
5084
  const currentHeaderLevel = (sep.match(/#/g) || []).length;
1119
5085
  while (headerStack.length > 0 && headerStack?.[headerStack.length - 1].level >= currentHeaderLevel) {
1120
5086
  const poppedHeader = headerStack.pop();
@@ -1124,11 +5090,11 @@ var MarkdownHeaderTransformer = class {
1124
5090
  }
1125
5091
  const header = {
1126
5092
  level: currentHeaderLevel,
1127
- name,
5093
+ name: name14,
1128
5094
  data: strippedLine.slice(sep.length).trim()
1129
5095
  };
1130
5096
  headerStack.push(header);
1131
- initialMetadata[name] = header.data;
5097
+ initialMetadata[name14] = header.data;
1132
5098
  }
1133
5099
  linesWithMetadata.push({
1134
5100
  content: line,
@@ -1295,27 +5261,27 @@ var MDocument = class _MDocument {
1295
5261
  async extractMetadata({ title, summary, questions, keywords }) {
1296
5262
  const transformations = [];
1297
5263
  if (typeof summary !== "undefined") {
1298
- transformations.push(new llamaindex.SummaryExtractor(typeof summary === "boolean" ? {} : summary));
5264
+ transformations.push(new SummaryExtractor(typeof summary === "boolean" ? {} : summary));
1299
5265
  }
1300
5266
  if (typeof questions !== "undefined") {
1301
- transformations.push(new llamaindex.QuestionsAnsweredExtractor(typeof questions === "boolean" ? {} : questions));
5267
+ transformations.push(new QuestionsAnsweredExtractor(typeof questions === "boolean" ? {} : questions));
1302
5268
  }
1303
5269
  if (typeof keywords !== "undefined") {
1304
- transformations.push(new llamaindex.KeywordExtractor(typeof keywords === "boolean" ? {} : keywords));
5270
+ transformations.push(new KeywordExtractor(typeof keywords === "boolean" ? {} : keywords));
1305
5271
  }
1306
5272
  if (typeof title !== "undefined") {
1307
- transformations.push(new llamaindex.TitleExtractor(typeof title === "boolean" ? {} : title));
5273
+ transformations.push(new TitleExtractor(typeof title === "boolean" ? {} : title));
1308
5274
  this.chunks = this.chunks.map(
1309
- (doc, i) => new llamaindex.Document({
5275
+ (doc) => doc?.metadata?.docId ? new llamaindex.Document({
1310
5276
  ...doc,
1311
5277
  relationships: {
1312
5278
  [llamaindex.NodeRelationship.SOURCE]: {
1313
- nodeId: `doc-${i}`,
5279
+ nodeId: doc.metadata.docId,
1314
5280
  nodeType: llamaindex.ObjectType.DOCUMENT,
1315
5281
  metadata: doc.metadata
1316
5282
  }
1317
5283
  }
1318
- })
5284
+ }) : doc
1319
5285
  );
1320
5286
  }
1321
5287
  const pipeline = new llamaindex.IngestionPipeline({