@mastra/rag 0.1.19-alpha.3 → 0.1.19-alpha.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -1,14 +1,4118 @@
1
1
  'use strict';
2
2
 
3
3
  var llamaindex = require('llamaindex');
4
+ var zod = require('zod');
4
5
  var nodeHtmlBetterParser = require('node-html-better-parser');
5
6
  var jsTiktoken = require('js-tiktoken');
6
7
  var relevance = require('@mastra/core/relevance');
7
8
  var tools = require('@mastra/core/tools');
8
- var zod = require('zod');
9
9
  var ai = require('ai');
10
10
 
11
- // src/document/document.ts
11
+ var __create = Object.create;
12
+ var __defProp = Object.defineProperty;
13
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
14
+ var __getOwnPropNames = Object.getOwnPropertyNames;
15
+ var __getProtoOf = Object.getPrototypeOf;
16
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
17
+ var __commonJS = (cb, mod) => function __require() {
18
+ return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
19
+ };
20
+ var __copyProps = (to, from, except, desc) => {
21
+ if (from && typeof from === "object" || typeof from === "function") {
22
+ for (let key of __getOwnPropNames(from))
23
+ if (!__hasOwnProp.call(to, key) && key !== except)
24
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
25
+ }
26
+ return to;
27
+ };
28
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
29
+ // If the importer is in node compatibility mode or this is not an ESM
30
+ // file that has been converted to a CommonJS file using a Babel-
31
+ // compatible transform (i.e. "__esModule" has not been set), then set
32
+ // "default" to the CommonJS "module.exports" for node compatibility.
33
+ __defProp(target, "default", { value: mod, enumerable: true }) ,
34
+ mod
35
+ ));
36
+
37
+ // ../../node_modules/.pnpm/secure-json-parse@2.7.0/node_modules/secure-json-parse/index.js
38
+ var require_secure_json_parse = __commonJS({
39
+ "../../node_modules/.pnpm/secure-json-parse@2.7.0/node_modules/secure-json-parse/index.js"(exports, module) {
40
+ var hasBuffer = typeof Buffer !== "undefined";
41
+ var suspectProtoRx = /"(?:_|\\u005[Ff])(?:_|\\u005[Ff])(?:p|\\u0070)(?:r|\\u0072)(?:o|\\u006[Ff])(?:t|\\u0074)(?:o|\\u006[Ff])(?:_|\\u005[Ff])(?:_|\\u005[Ff])"\s*:/;
42
+ var suspectConstructorRx = /"(?:c|\\u0063)(?:o|\\u006[Ff])(?:n|\\u006[Ee])(?:s|\\u0073)(?:t|\\u0074)(?:r|\\u0072)(?:u|\\u0075)(?:c|\\u0063)(?:t|\\u0074)(?:o|\\u006[Ff])(?:r|\\u0072)"\s*:/;
43
+ function _parse(text, reviver, options) {
44
+ if (options == null) {
45
+ if (reviver !== null && typeof reviver === "object") {
46
+ options = reviver;
47
+ reviver = void 0;
48
+ }
49
+ }
50
+ if (hasBuffer && Buffer.isBuffer(text)) {
51
+ text = text.toString();
52
+ }
53
+ if (text && text.charCodeAt(0) === 65279) {
54
+ text = text.slice(1);
55
+ }
56
+ const obj = JSON.parse(text, reviver);
57
+ if (obj === null || typeof obj !== "object") {
58
+ return obj;
59
+ }
60
+ const protoAction = options && options.protoAction || "error";
61
+ const constructorAction = options && options.constructorAction || "error";
62
+ if (protoAction === "ignore" && constructorAction === "ignore") {
63
+ return obj;
64
+ }
65
+ if (protoAction !== "ignore" && constructorAction !== "ignore") {
66
+ if (suspectProtoRx.test(text) === false && suspectConstructorRx.test(text) === false) {
67
+ return obj;
68
+ }
69
+ } else if (protoAction !== "ignore" && constructorAction === "ignore") {
70
+ if (suspectProtoRx.test(text) === false) {
71
+ return obj;
72
+ }
73
+ } else {
74
+ if (suspectConstructorRx.test(text) === false) {
75
+ return obj;
76
+ }
77
+ }
78
+ return filter(obj, { protoAction, constructorAction, safe: options && options.safe });
79
+ }
80
+ function filter(obj, { protoAction = "error", constructorAction = "error", safe } = {}) {
81
+ let next = [obj];
82
+ while (next.length) {
83
+ const nodes = next;
84
+ next = [];
85
+ for (const node of nodes) {
86
+ if (protoAction !== "ignore" && Object.prototype.hasOwnProperty.call(node, "__proto__")) {
87
+ if (safe === true) {
88
+ return null;
89
+ } else if (protoAction === "error") {
90
+ throw new SyntaxError("Object contains forbidden prototype property");
91
+ }
92
+ delete node.__proto__;
93
+ }
94
+ if (constructorAction !== "ignore" && Object.prototype.hasOwnProperty.call(node, "constructor") && Object.prototype.hasOwnProperty.call(node.constructor, "prototype")) {
95
+ if (safe === true) {
96
+ return null;
97
+ } else if (constructorAction === "error") {
98
+ throw new SyntaxError("Object contains forbidden prototype property");
99
+ }
100
+ delete node.constructor;
101
+ }
102
+ for (const key in node) {
103
+ const value = node[key];
104
+ if (value && typeof value === "object") {
105
+ next.push(value);
106
+ }
107
+ }
108
+ }
109
+ }
110
+ return obj;
111
+ }
112
+ function parse2(text, reviver, options) {
113
+ const stackTraceLimit = Error.stackTraceLimit;
114
+ Error.stackTraceLimit = 0;
115
+ try {
116
+ return _parse(text, reviver, options);
117
+ } finally {
118
+ Error.stackTraceLimit = stackTraceLimit;
119
+ }
120
+ }
121
+ function safeParse(text, reviver) {
122
+ const stackTraceLimit = Error.stackTraceLimit;
123
+ Error.stackTraceLimit = 0;
124
+ try {
125
+ return _parse(text, reviver, { safe: true });
126
+ } catch (_e) {
127
+ return null;
128
+ } finally {
129
+ Error.stackTraceLimit = stackTraceLimit;
130
+ }
131
+ }
132
+ module.exports = parse2;
133
+ module.exports.default = parse2;
134
+ module.exports.parse = parse2;
135
+ module.exports.safeParse = safeParse;
136
+ module.exports.scan = filter;
137
+ }
138
+ });
139
+
140
+ // ../../node_modules/.pnpm/@ai-sdk+provider@1.1.3/node_modules/@ai-sdk/provider/dist/index.mjs
141
+ var marker = "vercel.ai.error";
142
+ var symbol = Symbol.for(marker);
143
+ var _a;
144
+ var _AISDKError = class _AISDKError2 extends Error {
145
+ /**
146
+ * Creates an AI SDK Error.
147
+ *
148
+ * @param {Object} params - The parameters for creating the error.
149
+ * @param {string} params.name - The name of the error.
150
+ * @param {string} params.message - The error message.
151
+ * @param {unknown} [params.cause] - The underlying cause of the error.
152
+ */
153
+ constructor({
154
+ name: name14,
155
+ message,
156
+ cause
157
+ }) {
158
+ super(message);
159
+ this[_a] = true;
160
+ this.name = name14;
161
+ this.cause = cause;
162
+ }
163
+ /**
164
+ * Checks if the given error is an AI SDK Error.
165
+ * @param {unknown} error - The error to check.
166
+ * @returns {boolean} True if the error is an AI SDK Error, false otherwise.
167
+ */
168
+ static isInstance(error) {
169
+ return _AISDKError2.hasMarker(error, marker);
170
+ }
171
+ static hasMarker(error, marker15) {
172
+ const markerSymbol = Symbol.for(marker15);
173
+ return error != null && typeof error === "object" && markerSymbol in error && typeof error[markerSymbol] === "boolean" && error[markerSymbol] === true;
174
+ }
175
+ };
176
+ _a = symbol;
177
+ var AISDKError = _AISDKError;
178
+ var name = "AI_APICallError";
179
+ var marker2 = `vercel.ai.error.${name}`;
180
+ var symbol2 = Symbol.for(marker2);
181
+ var _a2;
182
+ var APICallError = class extends AISDKError {
183
+ constructor({
184
+ message,
185
+ url,
186
+ requestBodyValues,
187
+ statusCode,
188
+ responseHeaders,
189
+ responseBody,
190
+ cause,
191
+ isRetryable = statusCode != null && (statusCode === 408 || // request timeout
192
+ statusCode === 409 || // conflict
193
+ statusCode === 429 || // too many requests
194
+ statusCode >= 500),
195
+ // server error
196
+ data
197
+ }) {
198
+ super({ name, message, cause });
199
+ this[_a2] = true;
200
+ this.url = url;
201
+ this.requestBodyValues = requestBodyValues;
202
+ this.statusCode = statusCode;
203
+ this.responseHeaders = responseHeaders;
204
+ this.responseBody = responseBody;
205
+ this.isRetryable = isRetryable;
206
+ this.data = data;
207
+ }
208
+ static isInstance(error) {
209
+ return AISDKError.hasMarker(error, marker2);
210
+ }
211
+ };
212
+ _a2 = symbol2;
213
+ var name2 = "AI_EmptyResponseBodyError";
214
+ var marker3 = `vercel.ai.error.${name2}`;
215
+ var symbol3 = Symbol.for(marker3);
216
+ var _a3;
217
+ var EmptyResponseBodyError = class extends AISDKError {
218
+ // used in isInstance
219
+ constructor({ message = "Empty response body" } = {}) {
220
+ super({ name: name2, message });
221
+ this[_a3] = true;
222
+ }
223
+ static isInstance(error) {
224
+ return AISDKError.hasMarker(error, marker3);
225
+ }
226
+ };
227
+ _a3 = symbol3;
228
+ function getErrorMessage(error) {
229
+ if (error == null) {
230
+ return "unknown error";
231
+ }
232
+ if (typeof error === "string") {
233
+ return error;
234
+ }
235
+ if (error instanceof Error) {
236
+ return error.message;
237
+ }
238
+ return JSON.stringify(error);
239
+ }
240
+ var name3 = "AI_InvalidArgumentError";
241
+ var marker4 = `vercel.ai.error.${name3}`;
242
+ var symbol4 = Symbol.for(marker4);
243
+ var _a4;
244
+ var InvalidArgumentError = class extends AISDKError {
245
+ constructor({
246
+ message,
247
+ cause,
248
+ argument
249
+ }) {
250
+ super({ name: name3, message, cause });
251
+ this[_a4] = true;
252
+ this.argument = argument;
253
+ }
254
+ static isInstance(error) {
255
+ return AISDKError.hasMarker(error, marker4);
256
+ }
257
+ };
258
+ _a4 = symbol4;
259
+ var name4 = "AI_InvalidPromptError";
260
+ var marker5 = `vercel.ai.error.${name4}`;
261
+ var symbol5 = Symbol.for(marker5);
262
+ var _a5;
263
+ var InvalidPromptError = class extends AISDKError {
264
+ constructor({
265
+ prompt,
266
+ message,
267
+ cause
268
+ }) {
269
+ super({ name: name4, message: `Invalid prompt: ${message}`, cause });
270
+ this[_a5] = true;
271
+ this.prompt = prompt;
272
+ }
273
+ static isInstance(error) {
274
+ return AISDKError.hasMarker(error, marker5);
275
+ }
276
+ };
277
+ _a5 = symbol5;
278
+ var name5 = "AI_InvalidResponseDataError";
279
+ var marker6 = `vercel.ai.error.${name5}`;
280
+ var symbol6 = Symbol.for(marker6);
281
+ var _a6;
282
+ var InvalidResponseDataError = class extends AISDKError {
283
+ constructor({
284
+ data,
285
+ message = `Invalid response data: ${JSON.stringify(data)}.`
286
+ }) {
287
+ super({ name: name5, message });
288
+ this[_a6] = true;
289
+ this.data = data;
290
+ }
291
+ static isInstance(error) {
292
+ return AISDKError.hasMarker(error, marker6);
293
+ }
294
+ };
295
+ _a6 = symbol6;
296
+ var name6 = "AI_JSONParseError";
297
+ var marker7 = `vercel.ai.error.${name6}`;
298
+ var symbol7 = Symbol.for(marker7);
299
+ var _a7;
300
+ var JSONParseError = class extends AISDKError {
301
+ constructor({ text, cause }) {
302
+ super({
303
+ name: name6,
304
+ message: `JSON parsing failed: Text: ${text}.
305
+ Error message: ${getErrorMessage(cause)}`,
306
+ cause
307
+ });
308
+ this[_a7] = true;
309
+ this.text = text;
310
+ }
311
+ static isInstance(error) {
312
+ return AISDKError.hasMarker(error, marker7);
313
+ }
314
+ };
315
+ _a7 = symbol7;
316
+ var name7 = "AI_LoadAPIKeyError";
317
+ var marker8 = `vercel.ai.error.${name7}`;
318
+ var symbol8 = Symbol.for(marker8);
319
+ var _a8;
320
+ var LoadAPIKeyError = class extends AISDKError {
321
+ // used in isInstance
322
+ constructor({ message }) {
323
+ super({ name: name7, message });
324
+ this[_a8] = true;
325
+ }
326
+ static isInstance(error) {
327
+ return AISDKError.hasMarker(error, marker8);
328
+ }
329
+ };
330
+ _a8 = symbol8;
331
+ var name11 = "AI_TooManyEmbeddingValuesForCallError";
332
+ var marker12 = `vercel.ai.error.${name11}`;
333
+ var symbol12 = Symbol.for(marker12);
334
+ var _a12;
335
+ var TooManyEmbeddingValuesForCallError = class extends AISDKError {
336
+ constructor(options) {
337
+ super({
338
+ name: name11,
339
+ message: `Too many values for a single embedding call. The ${options.provider} model "${options.modelId}" can only embed up to ${options.maxEmbeddingsPerCall} values per call, but ${options.values.length} values were provided.`
340
+ });
341
+ this[_a12] = true;
342
+ this.provider = options.provider;
343
+ this.modelId = options.modelId;
344
+ this.maxEmbeddingsPerCall = options.maxEmbeddingsPerCall;
345
+ this.values = options.values;
346
+ }
347
+ static isInstance(error) {
348
+ return AISDKError.hasMarker(error, marker12);
349
+ }
350
+ };
351
+ _a12 = symbol12;
352
+ var name12 = "AI_TypeValidationError";
353
+ var marker13 = `vercel.ai.error.${name12}`;
354
+ var symbol13 = Symbol.for(marker13);
355
+ var _a13;
356
+ var _TypeValidationError = class _TypeValidationError2 extends AISDKError {
357
+ constructor({ value, cause }) {
358
+ super({
359
+ name: name12,
360
+ message: `Type validation failed: Value: ${JSON.stringify(value)}.
361
+ Error message: ${getErrorMessage(cause)}`,
362
+ cause
363
+ });
364
+ this[_a13] = true;
365
+ this.value = value;
366
+ }
367
+ static isInstance(error) {
368
+ return AISDKError.hasMarker(error, marker13);
369
+ }
370
+ /**
371
+ * Wraps an error into a TypeValidationError.
372
+ * If the cause is already a TypeValidationError with the same value, it returns the cause.
373
+ * Otherwise, it creates a new TypeValidationError.
374
+ *
375
+ * @param {Object} params - The parameters for wrapping the error.
376
+ * @param {unknown} params.value - The value that failed validation.
377
+ * @param {unknown} params.cause - The original error or cause of the validation failure.
378
+ * @returns {TypeValidationError} A TypeValidationError instance.
379
+ */
380
+ static wrap({
381
+ value,
382
+ cause
383
+ }) {
384
+ return _TypeValidationError2.isInstance(cause) && cause.value === value ? cause : new _TypeValidationError2({ value, cause });
385
+ }
386
+ };
387
+ _a13 = symbol13;
388
+ var TypeValidationError = _TypeValidationError;
389
+ var name13 = "AI_UnsupportedFunctionalityError";
390
+ var marker14 = `vercel.ai.error.${name13}`;
391
+ var symbol14 = Symbol.for(marker14);
392
+ var _a14;
393
+ var UnsupportedFunctionalityError = class extends AISDKError {
394
+ constructor({
395
+ functionality,
396
+ message = `'${functionality}' functionality not supported.`
397
+ }) {
398
+ super({ name: name13, message });
399
+ this[_a14] = true;
400
+ this.functionality = functionality;
401
+ }
402
+ static isInstance(error) {
403
+ return AISDKError.hasMarker(error, marker14);
404
+ }
405
+ };
406
+ _a14 = symbol14;
407
+
408
+ // ../../node_modules/.pnpm/nanoid@3.3.11/node_modules/nanoid/non-secure/index.js
409
+ var customAlphabet = (alphabet, defaultSize = 21) => {
410
+ return (size = defaultSize) => {
411
+ let id = "";
412
+ let i = size | 0;
413
+ while (i--) {
414
+ id += alphabet[Math.random() * alphabet.length | 0];
415
+ }
416
+ return id;
417
+ };
418
+ };
419
+
420
+ // ../../node_modules/.pnpm/@ai-sdk+provider-utils@2.2.7_zod@3.24.3/node_modules/@ai-sdk/provider-utils/dist/index.mjs
421
+ var import_secure_json_parse = __toESM(require_secure_json_parse());
422
+ function combineHeaders(...headers) {
423
+ return headers.reduce(
424
+ (combinedHeaders, currentHeaders) => ({
425
+ ...combinedHeaders,
426
+ ...currentHeaders != null ? currentHeaders : {}
427
+ }),
428
+ {}
429
+ );
430
+ }
431
+ function createEventSourceParserStream() {
432
+ let buffer = "";
433
+ let event = void 0;
434
+ let data = [];
435
+ let lastEventId = void 0;
436
+ let retry = void 0;
437
+ function parseLine(line, controller) {
438
+ if (line === "") {
439
+ dispatchEvent(controller);
440
+ return;
441
+ }
442
+ if (line.startsWith(":")) {
443
+ return;
444
+ }
445
+ const colonIndex = line.indexOf(":");
446
+ if (colonIndex === -1) {
447
+ handleField(line, "");
448
+ return;
449
+ }
450
+ const field = line.slice(0, colonIndex);
451
+ const valueStart = colonIndex + 1;
452
+ const value = valueStart < line.length && line[valueStart] === " " ? line.slice(valueStart + 1) : line.slice(valueStart);
453
+ handleField(field, value);
454
+ }
455
+ function dispatchEvent(controller) {
456
+ if (data.length > 0) {
457
+ controller.enqueue({
458
+ event,
459
+ data: data.join("\n"),
460
+ id: lastEventId,
461
+ retry
462
+ });
463
+ data = [];
464
+ event = void 0;
465
+ retry = void 0;
466
+ }
467
+ }
468
+ function handleField(field, value) {
469
+ switch (field) {
470
+ case "event":
471
+ event = value;
472
+ break;
473
+ case "data":
474
+ data.push(value);
475
+ break;
476
+ case "id":
477
+ lastEventId = value;
478
+ break;
479
+ case "retry":
480
+ const parsedRetry = parseInt(value, 10);
481
+ if (!isNaN(parsedRetry)) {
482
+ retry = parsedRetry;
483
+ }
484
+ break;
485
+ }
486
+ }
487
+ return new TransformStream({
488
+ transform(chunk, controller) {
489
+ const { lines, incompleteLine } = splitLines(buffer, chunk);
490
+ buffer = incompleteLine;
491
+ for (let i = 0; i < lines.length; i++) {
492
+ parseLine(lines[i], controller);
493
+ }
494
+ },
495
+ flush(controller) {
496
+ parseLine(buffer, controller);
497
+ dispatchEvent(controller);
498
+ }
499
+ });
500
+ }
501
+ function splitLines(buffer, chunk) {
502
+ const lines = [];
503
+ let currentLine = buffer;
504
+ for (let i = 0; i < chunk.length; ) {
505
+ const char = chunk[i++];
506
+ if (char === "\n") {
507
+ lines.push(currentLine);
508
+ currentLine = "";
509
+ } else if (char === "\r") {
510
+ lines.push(currentLine);
511
+ currentLine = "";
512
+ if (chunk[i + 1] === "\n") {
513
+ i++;
514
+ }
515
+ } else {
516
+ currentLine += char;
517
+ }
518
+ }
519
+ return { lines, incompleteLine: currentLine };
520
+ }
521
+ function extractResponseHeaders(response) {
522
+ const headers = {};
523
+ response.headers.forEach((value, key) => {
524
+ headers[key] = value;
525
+ });
526
+ return headers;
527
+ }
528
+ var createIdGenerator = ({
529
+ prefix,
530
+ size: defaultSize = 16,
531
+ alphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
532
+ separator = "-"
533
+ } = {}) => {
534
+ const generator = customAlphabet(alphabet, defaultSize);
535
+ if (prefix == null) {
536
+ return generator;
537
+ }
538
+ if (alphabet.includes(separator)) {
539
+ throw new InvalidArgumentError({
540
+ argument: "separator",
541
+ message: `The separator "${separator}" must not be part of the alphabet "${alphabet}".`
542
+ });
543
+ }
544
+ return (size) => `${prefix}${separator}${generator(size)}`;
545
+ };
546
+ var generateId = createIdGenerator();
547
+ function removeUndefinedEntries(record) {
548
+ return Object.fromEntries(
549
+ Object.entries(record).filter(([_key, value]) => value != null)
550
+ );
551
+ }
552
+ function isAbortError(error) {
553
+ return error instanceof Error && (error.name === "AbortError" || error.name === "TimeoutError");
554
+ }
555
+ function loadApiKey({
556
+ apiKey,
557
+ environmentVariableName,
558
+ apiKeyParameterName = "apiKey",
559
+ description
560
+ }) {
561
+ if (typeof apiKey === "string") {
562
+ return apiKey;
563
+ }
564
+ if (apiKey != null) {
565
+ throw new LoadAPIKeyError({
566
+ message: `${description} API key must be a string.`
567
+ });
568
+ }
569
+ if (typeof process === "undefined") {
570
+ throw new LoadAPIKeyError({
571
+ message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter. Environment variables is not supported in this environment.`
572
+ });
573
+ }
574
+ apiKey = process.env[environmentVariableName];
575
+ if (apiKey == null) {
576
+ throw new LoadAPIKeyError({
577
+ message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter or the ${environmentVariableName} environment variable.`
578
+ });
579
+ }
580
+ if (typeof apiKey !== "string") {
581
+ throw new LoadAPIKeyError({
582
+ message: `${description} API key must be a string. The value of the ${environmentVariableName} environment variable is not a string.`
583
+ });
584
+ }
585
+ return apiKey;
586
+ }
587
+ var validatorSymbol = Symbol.for("vercel.ai.validator");
588
+ function validator(validate) {
589
+ return { [validatorSymbol]: true, validate };
590
+ }
591
+ function isValidator(value) {
592
+ return typeof value === "object" && value !== null && validatorSymbol in value && value[validatorSymbol] === true && "validate" in value;
593
+ }
594
+ function asValidator(value) {
595
+ return isValidator(value) ? value : zodValidator(value);
596
+ }
597
+ function zodValidator(zodSchema) {
598
+ return validator((value) => {
599
+ const result = zodSchema.safeParse(value);
600
+ return result.success ? { success: true, value: result.data } : { success: false, error: result.error };
601
+ });
602
+ }
603
+ function validateTypes({
604
+ value,
605
+ schema: inputSchema
606
+ }) {
607
+ const result = safeValidateTypes({ value, schema: inputSchema });
608
+ if (!result.success) {
609
+ throw TypeValidationError.wrap({ value, cause: result.error });
610
+ }
611
+ return result.value;
612
+ }
613
+ function safeValidateTypes({
614
+ value,
615
+ schema
616
+ }) {
617
+ const validator2 = asValidator(schema);
618
+ try {
619
+ if (validator2.validate == null) {
620
+ return { success: true, value };
621
+ }
622
+ const result = validator2.validate(value);
623
+ if (result.success) {
624
+ return result;
625
+ }
626
+ return {
627
+ success: false,
628
+ error: TypeValidationError.wrap({ value, cause: result.error })
629
+ };
630
+ } catch (error) {
631
+ return {
632
+ success: false,
633
+ error: TypeValidationError.wrap({ value, cause: error })
634
+ };
635
+ }
636
+ }
637
+ function parseJSON({
638
+ text,
639
+ schema
640
+ }) {
641
+ try {
642
+ const value = import_secure_json_parse.default.parse(text);
643
+ if (schema == null) {
644
+ return value;
645
+ }
646
+ return validateTypes({ value, schema });
647
+ } catch (error) {
648
+ if (JSONParseError.isInstance(error) || TypeValidationError.isInstance(error)) {
649
+ throw error;
650
+ }
651
+ throw new JSONParseError({ text, cause: error });
652
+ }
653
+ }
654
+ function safeParseJSON({
655
+ text,
656
+ schema
657
+ }) {
658
+ try {
659
+ const value = import_secure_json_parse.default.parse(text);
660
+ if (schema == null) {
661
+ return { success: true, value, rawValue: value };
662
+ }
663
+ const validationResult = safeValidateTypes({ value, schema });
664
+ return validationResult.success ? { ...validationResult, rawValue: value } : validationResult;
665
+ } catch (error) {
666
+ return {
667
+ success: false,
668
+ error: JSONParseError.isInstance(error) ? error : new JSONParseError({ text, cause: error })
669
+ };
670
+ }
671
+ }
672
+ function isParsableJson(input) {
673
+ try {
674
+ import_secure_json_parse.default.parse(input);
675
+ return true;
676
+ } catch (e) {
677
+ return false;
678
+ }
679
+ }
680
+ function parseProviderOptions({
681
+ provider,
682
+ providerOptions,
683
+ schema
684
+ }) {
685
+ if ((providerOptions == null ? void 0 : providerOptions[provider]) == null) {
686
+ return void 0;
687
+ }
688
+ const parsedProviderOptions = safeValidateTypes({
689
+ value: providerOptions[provider],
690
+ schema
691
+ });
692
+ if (!parsedProviderOptions.success) {
693
+ throw new InvalidArgumentError({
694
+ argument: "providerOptions",
695
+ message: `invalid ${provider} provider options`,
696
+ cause: parsedProviderOptions.error
697
+ });
698
+ }
699
+ return parsedProviderOptions.value;
700
+ }
701
+ var getOriginalFetch2 = () => globalThis.fetch;
702
+ var postJsonToApi = async ({
703
+ url,
704
+ headers,
705
+ body,
706
+ failedResponseHandler,
707
+ successfulResponseHandler,
708
+ abortSignal,
709
+ fetch
710
+ }) => postToApi({
711
+ url,
712
+ headers: {
713
+ "Content-Type": "application/json",
714
+ ...headers
715
+ },
716
+ body: {
717
+ content: JSON.stringify(body),
718
+ values: body
719
+ },
720
+ failedResponseHandler,
721
+ successfulResponseHandler,
722
+ abortSignal,
723
+ fetch
724
+ });
725
+ var postFormDataToApi = async ({
726
+ url,
727
+ headers,
728
+ formData,
729
+ failedResponseHandler,
730
+ successfulResponseHandler,
731
+ abortSignal,
732
+ fetch
733
+ }) => postToApi({
734
+ url,
735
+ headers,
736
+ body: {
737
+ content: formData,
738
+ values: Object.fromEntries(formData.entries())
739
+ },
740
+ failedResponseHandler,
741
+ successfulResponseHandler,
742
+ abortSignal,
743
+ fetch
744
+ });
745
+ var postToApi = async ({
746
+ url,
747
+ headers = {},
748
+ body,
749
+ successfulResponseHandler,
750
+ failedResponseHandler,
751
+ abortSignal,
752
+ fetch = getOriginalFetch2()
753
+ }) => {
754
+ try {
755
+ const response = await fetch(url, {
756
+ method: "POST",
757
+ headers: removeUndefinedEntries(headers),
758
+ body: body.content,
759
+ signal: abortSignal
760
+ });
761
+ const responseHeaders = extractResponseHeaders(response);
762
+ if (!response.ok) {
763
+ let errorInformation;
764
+ try {
765
+ errorInformation = await failedResponseHandler({
766
+ response,
767
+ url,
768
+ requestBodyValues: body.values
769
+ });
770
+ } catch (error) {
771
+ if (isAbortError(error) || APICallError.isInstance(error)) {
772
+ throw error;
773
+ }
774
+ throw new APICallError({
775
+ message: "Failed to process error response",
776
+ cause: error,
777
+ statusCode: response.status,
778
+ url,
779
+ responseHeaders,
780
+ requestBodyValues: body.values
781
+ });
782
+ }
783
+ throw errorInformation.value;
784
+ }
785
+ try {
786
+ return await successfulResponseHandler({
787
+ response,
788
+ url,
789
+ requestBodyValues: body.values
790
+ });
791
+ } catch (error) {
792
+ if (error instanceof Error) {
793
+ if (isAbortError(error) || APICallError.isInstance(error)) {
794
+ throw error;
795
+ }
796
+ }
797
+ throw new APICallError({
798
+ message: "Failed to process successful response",
799
+ cause: error,
800
+ statusCode: response.status,
801
+ url,
802
+ responseHeaders,
803
+ requestBodyValues: body.values
804
+ });
805
+ }
806
+ } catch (error) {
807
+ if (isAbortError(error)) {
808
+ throw error;
809
+ }
810
+ if (error instanceof TypeError && error.message === "fetch failed") {
811
+ const cause = error.cause;
812
+ if (cause != null) {
813
+ throw new APICallError({
814
+ message: `Cannot connect to API: ${cause.message}`,
815
+ cause,
816
+ url,
817
+ requestBodyValues: body.values,
818
+ isRetryable: true
819
+ // retry when network error
820
+ });
821
+ }
822
+ }
823
+ throw error;
824
+ }
825
+ };
826
+ var createJsonErrorResponseHandler = ({
827
+ errorSchema,
828
+ errorToMessage,
829
+ isRetryable
830
+ }) => async ({ response, url, requestBodyValues }) => {
831
+ const responseBody = await response.text();
832
+ const responseHeaders = extractResponseHeaders(response);
833
+ if (responseBody.trim() === "") {
834
+ return {
835
+ responseHeaders,
836
+ value: new APICallError({
837
+ message: response.statusText,
838
+ url,
839
+ requestBodyValues,
840
+ statusCode: response.status,
841
+ responseHeaders,
842
+ responseBody,
843
+ isRetryable: isRetryable == null ? void 0 : isRetryable(response)
844
+ })
845
+ };
846
+ }
847
+ try {
848
+ const parsedError = parseJSON({
849
+ text: responseBody,
850
+ schema: errorSchema
851
+ });
852
+ return {
853
+ responseHeaders,
854
+ value: new APICallError({
855
+ message: errorToMessage(parsedError),
856
+ url,
857
+ requestBodyValues,
858
+ statusCode: response.status,
859
+ responseHeaders,
860
+ responseBody,
861
+ data: parsedError,
862
+ isRetryable: isRetryable == null ? void 0 : isRetryable(response, parsedError)
863
+ })
864
+ };
865
+ } catch (parseError) {
866
+ return {
867
+ responseHeaders,
868
+ value: new APICallError({
869
+ message: response.statusText,
870
+ url,
871
+ requestBodyValues,
872
+ statusCode: response.status,
873
+ responseHeaders,
874
+ responseBody,
875
+ isRetryable: isRetryable == null ? void 0 : isRetryable(response)
876
+ })
877
+ };
878
+ }
879
+ };
880
+ var createEventSourceResponseHandler = (chunkSchema) => async ({ response }) => {
881
+ const responseHeaders = extractResponseHeaders(response);
882
+ if (response.body == null) {
883
+ throw new EmptyResponseBodyError({});
884
+ }
885
+ return {
886
+ responseHeaders,
887
+ value: response.body.pipeThrough(new TextDecoderStream()).pipeThrough(createEventSourceParserStream()).pipeThrough(
888
+ new TransformStream({
889
+ transform({ data }, controller) {
890
+ if (data === "[DONE]") {
891
+ return;
892
+ }
893
+ controller.enqueue(
894
+ safeParseJSON({
895
+ text: data,
896
+ schema: chunkSchema
897
+ })
898
+ );
899
+ }
900
+ })
901
+ )
902
+ };
903
+ };
904
+ var createJsonResponseHandler = (responseSchema) => async ({ response, url, requestBodyValues }) => {
905
+ const responseBody = await response.text();
906
+ const parsedResult = safeParseJSON({
907
+ text: responseBody,
908
+ schema: responseSchema
909
+ });
910
+ const responseHeaders = extractResponseHeaders(response);
911
+ if (!parsedResult.success) {
912
+ throw new APICallError({
913
+ message: "Invalid JSON response",
914
+ cause: parsedResult.error,
915
+ statusCode: response.status,
916
+ responseHeaders,
917
+ responseBody,
918
+ url,
919
+ requestBodyValues
920
+ });
921
+ }
922
+ return {
923
+ responseHeaders,
924
+ value: parsedResult.value,
925
+ rawValue: parsedResult.rawValue
926
+ };
927
+ };
928
+ var createBinaryResponseHandler = () => async ({ response, url, requestBodyValues }) => {
929
+ const responseHeaders = extractResponseHeaders(response);
930
+ if (!response.body) {
931
+ throw new APICallError({
932
+ message: "Response body is empty",
933
+ url,
934
+ requestBodyValues,
935
+ statusCode: response.status,
936
+ responseHeaders,
937
+ responseBody: void 0
938
+ });
939
+ }
940
+ try {
941
+ const buffer = await response.arrayBuffer();
942
+ return {
943
+ responseHeaders,
944
+ value: new Uint8Array(buffer)
945
+ };
946
+ } catch (error) {
947
+ throw new APICallError({
948
+ message: "Failed to read response as array buffer",
949
+ url,
950
+ requestBodyValues,
951
+ statusCode: response.status,
952
+ responseHeaders,
953
+ responseBody: void 0,
954
+ cause: error
955
+ });
956
+ }
957
+ };
958
+ var { btoa, atob } = globalThis;
959
+ function convertBase64ToUint8Array(base64String) {
960
+ const base64Url = base64String.replace(/-/g, "+").replace(/_/g, "/");
961
+ const latin1string = atob(base64Url);
962
+ return Uint8Array.from(latin1string, (byte) => byte.codePointAt(0));
963
+ }
964
+ function convertUint8ArrayToBase64(array) {
965
+ let latin1string = "";
966
+ for (let i = 0; i < array.length; i++) {
967
+ latin1string += String.fromCodePoint(array[i]);
968
+ }
969
+ return btoa(latin1string);
970
+ }
971
+ function withoutTrailingSlash(url) {
972
+ return url == null ? void 0 : url.replace(/\/$/, "");
973
+ }
974
+ function convertToOpenAIChatMessages({
975
+ prompt,
976
+ useLegacyFunctionCalling = false,
977
+ systemMessageMode = "system"
978
+ }) {
979
+ const messages = [];
980
+ const warnings = [];
981
+ for (const { role, content } of prompt) {
982
+ switch (role) {
983
+ case "system": {
984
+ switch (systemMessageMode) {
985
+ case "system": {
986
+ messages.push({ role: "system", content });
987
+ break;
988
+ }
989
+ case "developer": {
990
+ messages.push({ role: "developer", content });
991
+ break;
992
+ }
993
+ case "remove": {
994
+ warnings.push({
995
+ type: "other",
996
+ message: "system messages are removed for this model"
997
+ });
998
+ break;
999
+ }
1000
+ default: {
1001
+ const _exhaustiveCheck = systemMessageMode;
1002
+ throw new Error(
1003
+ `Unsupported system message mode: ${_exhaustiveCheck}`
1004
+ );
1005
+ }
1006
+ }
1007
+ break;
1008
+ }
1009
+ case "user": {
1010
+ if (content.length === 1 && content[0].type === "text") {
1011
+ messages.push({ role: "user", content: content[0].text });
1012
+ break;
1013
+ }
1014
+ messages.push({
1015
+ role: "user",
1016
+ content: content.map((part, index) => {
1017
+ var _a15, _b, _c, _d;
1018
+ switch (part.type) {
1019
+ case "text": {
1020
+ return { type: "text", text: part.text };
1021
+ }
1022
+ case "image": {
1023
+ return {
1024
+ type: "image_url",
1025
+ image_url: {
1026
+ url: part.image instanceof URL ? part.image.toString() : `data:${(_a15 = part.mimeType) != null ? _a15 : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`,
1027
+ // OpenAI specific extension: image detail
1028
+ detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
1029
+ }
1030
+ };
1031
+ }
1032
+ case "file": {
1033
+ if (part.data instanceof URL) {
1034
+ throw new UnsupportedFunctionalityError({
1035
+ functionality: "'File content parts with URL data' functionality not supported."
1036
+ });
1037
+ }
1038
+ switch (part.mimeType) {
1039
+ case "audio/wav": {
1040
+ return {
1041
+ type: "input_audio",
1042
+ input_audio: { data: part.data, format: "wav" }
1043
+ };
1044
+ }
1045
+ case "audio/mp3":
1046
+ case "audio/mpeg": {
1047
+ return {
1048
+ type: "input_audio",
1049
+ input_audio: { data: part.data, format: "mp3" }
1050
+ };
1051
+ }
1052
+ case "application/pdf": {
1053
+ return {
1054
+ type: "file",
1055
+ file: {
1056
+ filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
1057
+ file_data: `data:application/pdf;base64,${part.data}`
1058
+ }
1059
+ };
1060
+ }
1061
+ default: {
1062
+ throw new UnsupportedFunctionalityError({
1063
+ functionality: `File content part type ${part.mimeType} in user messages`
1064
+ });
1065
+ }
1066
+ }
1067
+ }
1068
+ }
1069
+ })
1070
+ });
1071
+ break;
1072
+ }
1073
+ case "assistant": {
1074
+ let text = "";
1075
+ const toolCalls = [];
1076
+ for (const part of content) {
1077
+ switch (part.type) {
1078
+ case "text": {
1079
+ text += part.text;
1080
+ break;
1081
+ }
1082
+ case "tool-call": {
1083
+ toolCalls.push({
1084
+ id: part.toolCallId,
1085
+ type: "function",
1086
+ function: {
1087
+ name: part.toolName,
1088
+ arguments: JSON.stringify(part.args)
1089
+ }
1090
+ });
1091
+ break;
1092
+ }
1093
+ }
1094
+ }
1095
+ if (useLegacyFunctionCalling) {
1096
+ if (toolCalls.length > 1) {
1097
+ throw new UnsupportedFunctionalityError({
1098
+ functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
1099
+ });
1100
+ }
1101
+ messages.push({
1102
+ role: "assistant",
1103
+ content: text,
1104
+ function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
1105
+ });
1106
+ } else {
1107
+ messages.push({
1108
+ role: "assistant",
1109
+ content: text,
1110
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
1111
+ });
1112
+ }
1113
+ break;
1114
+ }
1115
+ case "tool": {
1116
+ for (const toolResponse of content) {
1117
+ if (useLegacyFunctionCalling) {
1118
+ messages.push({
1119
+ role: "function",
1120
+ name: toolResponse.toolName,
1121
+ content: JSON.stringify(toolResponse.result)
1122
+ });
1123
+ } else {
1124
+ messages.push({
1125
+ role: "tool",
1126
+ tool_call_id: toolResponse.toolCallId,
1127
+ content: JSON.stringify(toolResponse.result)
1128
+ });
1129
+ }
1130
+ }
1131
+ break;
1132
+ }
1133
+ default: {
1134
+ const _exhaustiveCheck = role;
1135
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1136
+ }
1137
+ }
1138
+ }
1139
+ return { messages, warnings };
1140
+ }
1141
+ function mapOpenAIChatLogProbsOutput(logprobs) {
1142
+ var _a15, _b;
1143
+ return (_b = (_a15 = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a15.map(({ token, logprob, top_logprobs }) => ({
1144
+ token,
1145
+ logprob,
1146
+ topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
1147
+ token: token2,
1148
+ logprob: logprob2
1149
+ })) : []
1150
+ }))) != null ? _b : void 0;
1151
+ }
1152
+ function mapOpenAIFinishReason(finishReason) {
1153
+ switch (finishReason) {
1154
+ case "stop":
1155
+ return "stop";
1156
+ case "length":
1157
+ return "length";
1158
+ case "content_filter":
1159
+ return "content-filter";
1160
+ case "function_call":
1161
+ case "tool_calls":
1162
+ return "tool-calls";
1163
+ default:
1164
+ return "unknown";
1165
+ }
1166
+ }
1167
+ var openaiErrorDataSchema = zod.z.object({
1168
+ error: zod.z.object({
1169
+ message: zod.z.string(),
1170
+ // The additional information below is handled loosely to support
1171
+ // OpenAI-compatible providers that have slightly different error
1172
+ // responses:
1173
+ type: zod.z.string().nullish(),
1174
+ param: zod.z.any().nullish(),
1175
+ code: zod.z.union([zod.z.string(), zod.z.number()]).nullish()
1176
+ })
1177
+ });
1178
+ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
1179
+ errorSchema: openaiErrorDataSchema,
1180
+ errorToMessage: (data) => data.error.message
1181
+ });
1182
+ function getResponseMetadata({
1183
+ id,
1184
+ model,
1185
+ created
1186
+ }) {
1187
+ return {
1188
+ id: id != null ? id : void 0,
1189
+ modelId: model != null ? model : void 0,
1190
+ timestamp: created != null ? new Date(created * 1e3) : void 0
1191
+ };
1192
+ }
1193
+ function prepareTools({
1194
+ mode,
1195
+ useLegacyFunctionCalling = false,
1196
+ structuredOutputs
1197
+ }) {
1198
+ var _a15;
1199
+ const tools = ((_a15 = mode.tools) == null ? void 0 : _a15.length) ? mode.tools : void 0;
1200
+ const toolWarnings = [];
1201
+ if (tools == null) {
1202
+ return { tools: void 0, tool_choice: void 0, toolWarnings };
1203
+ }
1204
+ const toolChoice = mode.toolChoice;
1205
+ if (useLegacyFunctionCalling) {
1206
+ const openaiFunctions = [];
1207
+ for (const tool of tools) {
1208
+ if (tool.type === "provider-defined") {
1209
+ toolWarnings.push({ type: "unsupported-tool", tool });
1210
+ } else {
1211
+ openaiFunctions.push({
1212
+ name: tool.name,
1213
+ description: tool.description,
1214
+ parameters: tool.parameters
1215
+ });
1216
+ }
1217
+ }
1218
+ if (toolChoice == null) {
1219
+ return {
1220
+ functions: openaiFunctions,
1221
+ function_call: void 0,
1222
+ toolWarnings
1223
+ };
1224
+ }
1225
+ const type2 = toolChoice.type;
1226
+ switch (type2) {
1227
+ case "auto":
1228
+ case "none":
1229
+ case void 0:
1230
+ return {
1231
+ functions: openaiFunctions,
1232
+ function_call: void 0,
1233
+ toolWarnings
1234
+ };
1235
+ case "required":
1236
+ throw new UnsupportedFunctionalityError({
1237
+ functionality: "useLegacyFunctionCalling and toolChoice: required"
1238
+ });
1239
+ default:
1240
+ return {
1241
+ functions: openaiFunctions,
1242
+ function_call: { name: toolChoice.toolName },
1243
+ toolWarnings
1244
+ };
1245
+ }
1246
+ }
1247
+ const openaiTools2 = [];
1248
+ for (const tool of tools) {
1249
+ if (tool.type === "provider-defined") {
1250
+ toolWarnings.push({ type: "unsupported-tool", tool });
1251
+ } else {
1252
+ openaiTools2.push({
1253
+ type: "function",
1254
+ function: {
1255
+ name: tool.name,
1256
+ description: tool.description,
1257
+ parameters: tool.parameters,
1258
+ strict: structuredOutputs ? true : void 0
1259
+ }
1260
+ });
1261
+ }
1262
+ }
1263
+ if (toolChoice == null) {
1264
+ return { tools: openaiTools2, tool_choice: void 0, toolWarnings };
1265
+ }
1266
+ const type = toolChoice.type;
1267
+ switch (type) {
1268
+ case "auto":
1269
+ case "none":
1270
+ case "required":
1271
+ return { tools: openaiTools2, tool_choice: type, toolWarnings };
1272
+ case "tool":
1273
+ return {
1274
+ tools: openaiTools2,
1275
+ tool_choice: {
1276
+ type: "function",
1277
+ function: {
1278
+ name: toolChoice.toolName
1279
+ }
1280
+ },
1281
+ toolWarnings
1282
+ };
1283
+ default: {
1284
+ const _exhaustiveCheck = type;
1285
+ throw new UnsupportedFunctionalityError({
1286
+ functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
1287
+ });
1288
+ }
1289
+ }
1290
+ }
1291
+ var OpenAIChatLanguageModel = class {
1292
+ constructor(modelId, settings, config) {
1293
+ this.specificationVersion = "v1";
1294
+ this.modelId = modelId;
1295
+ this.settings = settings;
1296
+ this.config = config;
1297
+ }
1298
+ get supportsStructuredOutputs() {
1299
+ var _a15;
1300
+ return (_a15 = this.settings.structuredOutputs) != null ? _a15 : isReasoningModel(this.modelId);
1301
+ }
1302
+ get defaultObjectGenerationMode() {
1303
+ if (isAudioModel(this.modelId)) {
1304
+ return "tool";
1305
+ }
1306
+ return this.supportsStructuredOutputs ? "json" : "tool";
1307
+ }
1308
+ get provider() {
1309
+ return this.config.provider;
1310
+ }
1311
+ get supportsImageUrls() {
1312
+ return !this.settings.downloadImages;
1313
+ }
1314
+ getArgs({
1315
+ mode,
1316
+ prompt,
1317
+ maxTokens,
1318
+ temperature,
1319
+ topP,
1320
+ topK,
1321
+ frequencyPenalty,
1322
+ presencePenalty,
1323
+ stopSequences,
1324
+ responseFormat,
1325
+ seed,
1326
+ providerMetadata
1327
+ }) {
1328
+ var _a15, _b, _c, _d, _e, _f, _g, _h;
1329
+ const type = mode.type;
1330
+ const warnings = [];
1331
+ if (topK != null) {
1332
+ warnings.push({
1333
+ type: "unsupported-setting",
1334
+ setting: "topK"
1335
+ });
1336
+ }
1337
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
1338
+ warnings.push({
1339
+ type: "unsupported-setting",
1340
+ setting: "responseFormat",
1341
+ details: "JSON response format schema is only supported with structuredOutputs"
1342
+ });
1343
+ }
1344
+ const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
1345
+ if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
1346
+ throw new UnsupportedFunctionalityError({
1347
+ functionality: "useLegacyFunctionCalling with parallelToolCalls"
1348
+ });
1349
+ }
1350
+ if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
1351
+ throw new UnsupportedFunctionalityError({
1352
+ functionality: "structuredOutputs with useLegacyFunctionCalling"
1353
+ });
1354
+ }
1355
+ const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
1356
+ {
1357
+ prompt,
1358
+ useLegacyFunctionCalling,
1359
+ systemMessageMode: getSystemMessageMode(this.modelId)
1360
+ }
1361
+ );
1362
+ warnings.push(...messageWarnings);
1363
+ const baseArgs = {
1364
+ // model id:
1365
+ model: this.modelId,
1366
+ // model specific settings:
1367
+ logit_bias: this.settings.logitBias,
1368
+ logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
1369
+ top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1370
+ user: this.settings.user,
1371
+ parallel_tool_calls: this.settings.parallelToolCalls,
1372
+ // standardized settings:
1373
+ max_tokens: maxTokens,
1374
+ temperature,
1375
+ top_p: topP,
1376
+ frequency_penalty: frequencyPenalty,
1377
+ presence_penalty: presencePenalty,
1378
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
1379
+ type: "json_schema",
1380
+ json_schema: {
1381
+ schema: responseFormat.schema,
1382
+ strict: true,
1383
+ name: (_a15 = responseFormat.name) != null ? _a15 : "response",
1384
+ description: responseFormat.description
1385
+ }
1386
+ } : { type: "json_object" } : void 0,
1387
+ stop: stopSequences,
1388
+ seed,
1389
+ // openai specific settings:
1390
+ // TODO remove in next major version; we auto-map maxTokens now
1391
+ max_completion_tokens: (_b = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _b.maxCompletionTokens,
1392
+ store: (_c = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _c.store,
1393
+ metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
1394
+ prediction: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.prediction,
1395
+ reasoning_effort: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
1396
+ // messages:
1397
+ messages
1398
+ };
1399
+ if (isReasoningModel(this.modelId)) {
1400
+ if (baseArgs.temperature != null) {
1401
+ baseArgs.temperature = void 0;
1402
+ warnings.push({
1403
+ type: "unsupported-setting",
1404
+ setting: "temperature",
1405
+ details: "temperature is not supported for reasoning models"
1406
+ });
1407
+ }
1408
+ if (baseArgs.top_p != null) {
1409
+ baseArgs.top_p = void 0;
1410
+ warnings.push({
1411
+ type: "unsupported-setting",
1412
+ setting: "topP",
1413
+ details: "topP is not supported for reasoning models"
1414
+ });
1415
+ }
1416
+ if (baseArgs.frequency_penalty != null) {
1417
+ baseArgs.frequency_penalty = void 0;
1418
+ warnings.push({
1419
+ type: "unsupported-setting",
1420
+ setting: "frequencyPenalty",
1421
+ details: "frequencyPenalty is not supported for reasoning models"
1422
+ });
1423
+ }
1424
+ if (baseArgs.presence_penalty != null) {
1425
+ baseArgs.presence_penalty = void 0;
1426
+ warnings.push({
1427
+ type: "unsupported-setting",
1428
+ setting: "presencePenalty",
1429
+ details: "presencePenalty is not supported for reasoning models"
1430
+ });
1431
+ }
1432
+ if (baseArgs.logit_bias != null) {
1433
+ baseArgs.logit_bias = void 0;
1434
+ warnings.push({
1435
+ type: "other",
1436
+ message: "logitBias is not supported for reasoning models"
1437
+ });
1438
+ }
1439
+ if (baseArgs.logprobs != null) {
1440
+ baseArgs.logprobs = void 0;
1441
+ warnings.push({
1442
+ type: "other",
1443
+ message: "logprobs is not supported for reasoning models"
1444
+ });
1445
+ }
1446
+ if (baseArgs.top_logprobs != null) {
1447
+ baseArgs.top_logprobs = void 0;
1448
+ warnings.push({
1449
+ type: "other",
1450
+ message: "topLogprobs is not supported for reasoning models"
1451
+ });
1452
+ }
1453
+ if (baseArgs.max_tokens != null) {
1454
+ if (baseArgs.max_completion_tokens == null) {
1455
+ baseArgs.max_completion_tokens = baseArgs.max_tokens;
1456
+ }
1457
+ baseArgs.max_tokens = void 0;
1458
+ }
1459
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
1460
+ if (baseArgs.temperature != null) {
1461
+ baseArgs.temperature = void 0;
1462
+ warnings.push({
1463
+ type: "unsupported-setting",
1464
+ setting: "temperature",
1465
+ details: "temperature is not supported for the search preview models and has been removed."
1466
+ });
1467
+ }
1468
+ }
1469
+ switch (type) {
1470
+ case "regular": {
1471
+ const { tools, tool_choice, functions, function_call, toolWarnings } = prepareTools({
1472
+ mode,
1473
+ useLegacyFunctionCalling,
1474
+ structuredOutputs: this.supportsStructuredOutputs
1475
+ });
1476
+ return {
1477
+ args: {
1478
+ ...baseArgs,
1479
+ tools,
1480
+ tool_choice,
1481
+ functions,
1482
+ function_call
1483
+ },
1484
+ warnings: [...warnings, ...toolWarnings]
1485
+ };
1486
+ }
1487
+ case "object-json": {
1488
+ return {
1489
+ args: {
1490
+ ...baseArgs,
1491
+ response_format: this.supportsStructuredOutputs && mode.schema != null ? {
1492
+ type: "json_schema",
1493
+ json_schema: {
1494
+ schema: mode.schema,
1495
+ strict: true,
1496
+ name: (_h = mode.name) != null ? _h : "response",
1497
+ description: mode.description
1498
+ }
1499
+ } : { type: "json_object" }
1500
+ },
1501
+ warnings
1502
+ };
1503
+ }
1504
+ case "object-tool": {
1505
+ return {
1506
+ args: useLegacyFunctionCalling ? {
1507
+ ...baseArgs,
1508
+ function_call: {
1509
+ name: mode.tool.name
1510
+ },
1511
+ functions: [
1512
+ {
1513
+ name: mode.tool.name,
1514
+ description: mode.tool.description,
1515
+ parameters: mode.tool.parameters
1516
+ }
1517
+ ]
1518
+ } : {
1519
+ ...baseArgs,
1520
+ tool_choice: {
1521
+ type: "function",
1522
+ function: { name: mode.tool.name }
1523
+ },
1524
+ tools: [
1525
+ {
1526
+ type: "function",
1527
+ function: {
1528
+ name: mode.tool.name,
1529
+ description: mode.tool.description,
1530
+ parameters: mode.tool.parameters,
1531
+ strict: this.supportsStructuredOutputs ? true : void 0
1532
+ }
1533
+ }
1534
+ ]
1535
+ },
1536
+ warnings
1537
+ };
1538
+ }
1539
+ default: {
1540
+ const _exhaustiveCheck = type;
1541
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
1542
+ }
1543
+ }
1544
+ }
1545
+ async doGenerate(options) {
1546
+ var _a15, _b, _c, _d, _e, _f, _g, _h;
1547
+ const { args: body, warnings } = this.getArgs(options);
1548
+ const {
1549
+ responseHeaders,
1550
+ value: response,
1551
+ rawValue: rawResponse
1552
+ } = await postJsonToApi({
1553
+ url: this.config.url({
1554
+ path: "/chat/completions",
1555
+ modelId: this.modelId
1556
+ }),
1557
+ headers: combineHeaders(this.config.headers(), options.headers),
1558
+ body,
1559
+ failedResponseHandler: openaiFailedResponseHandler,
1560
+ successfulResponseHandler: createJsonResponseHandler(
1561
+ openaiChatResponseSchema
1562
+ ),
1563
+ abortSignal: options.abortSignal,
1564
+ fetch: this.config.fetch
1565
+ });
1566
+ const { messages: rawPrompt, ...rawSettings } = body;
1567
+ const choice = response.choices[0];
1568
+ const completionTokenDetails = (_a15 = response.usage) == null ? void 0 : _a15.completion_tokens_details;
1569
+ const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
1570
+ const providerMetadata = { openai: {} };
1571
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
1572
+ providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
1573
+ }
1574
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
1575
+ providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
1576
+ }
1577
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
1578
+ providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
1579
+ }
1580
+ if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
1581
+ providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
1582
+ }
1583
+ return {
1584
+ text: (_c = choice.message.content) != null ? _c : void 0,
1585
+ toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
1586
+ {
1587
+ toolCallType: "function",
1588
+ toolCallId: generateId(),
1589
+ toolName: choice.message.function_call.name,
1590
+ args: choice.message.function_call.arguments
1591
+ }
1592
+ ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
1593
+ var _a22;
1594
+ return {
1595
+ toolCallType: "function",
1596
+ toolCallId: (_a22 = toolCall.id) != null ? _a22 : generateId(),
1597
+ toolName: toolCall.function.name,
1598
+ args: toolCall.function.arguments
1599
+ };
1600
+ }),
1601
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
1602
+ usage: {
1603
+ promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
1604
+ completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
1605
+ },
1606
+ rawCall: { rawPrompt, rawSettings },
1607
+ rawResponse: { headers: responseHeaders, body: rawResponse },
1608
+ request: { body: JSON.stringify(body) },
1609
+ response: getResponseMetadata(response),
1610
+ warnings,
1611
+ logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
1612
+ providerMetadata
1613
+ };
1614
+ }
1615
+ async doStream(options) {
1616
+ if (this.settings.simulateStreaming) {
1617
+ const result = await this.doGenerate(options);
1618
+ const simulatedStream = new ReadableStream({
1619
+ start(controller) {
1620
+ controller.enqueue({ type: "response-metadata", ...result.response });
1621
+ if (result.text) {
1622
+ controller.enqueue({
1623
+ type: "text-delta",
1624
+ textDelta: result.text
1625
+ });
1626
+ }
1627
+ if (result.toolCalls) {
1628
+ for (const toolCall of result.toolCalls) {
1629
+ controller.enqueue({
1630
+ type: "tool-call-delta",
1631
+ toolCallType: "function",
1632
+ toolCallId: toolCall.toolCallId,
1633
+ toolName: toolCall.toolName,
1634
+ argsTextDelta: toolCall.args
1635
+ });
1636
+ controller.enqueue({
1637
+ type: "tool-call",
1638
+ ...toolCall
1639
+ });
1640
+ }
1641
+ }
1642
+ controller.enqueue({
1643
+ type: "finish",
1644
+ finishReason: result.finishReason,
1645
+ usage: result.usage,
1646
+ logprobs: result.logprobs,
1647
+ providerMetadata: result.providerMetadata
1648
+ });
1649
+ controller.close();
1650
+ }
1651
+ });
1652
+ return {
1653
+ stream: simulatedStream,
1654
+ rawCall: result.rawCall,
1655
+ rawResponse: result.rawResponse,
1656
+ warnings: result.warnings
1657
+ };
1658
+ }
1659
+ const { args, warnings } = this.getArgs(options);
1660
+ const body = {
1661
+ ...args,
1662
+ stream: true,
1663
+ // only include stream_options when in strict compatibility mode:
1664
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1665
+ };
1666
+ const { responseHeaders, value: response } = await postJsonToApi({
1667
+ url: this.config.url({
1668
+ path: "/chat/completions",
1669
+ modelId: this.modelId
1670
+ }),
1671
+ headers: combineHeaders(this.config.headers(), options.headers),
1672
+ body,
1673
+ failedResponseHandler: openaiFailedResponseHandler,
1674
+ successfulResponseHandler: createEventSourceResponseHandler(
1675
+ openaiChatChunkSchema
1676
+ ),
1677
+ abortSignal: options.abortSignal,
1678
+ fetch: this.config.fetch
1679
+ });
1680
+ const { messages: rawPrompt, ...rawSettings } = args;
1681
+ const toolCalls = [];
1682
+ let finishReason = "unknown";
1683
+ let usage = {
1684
+ promptTokens: void 0,
1685
+ completionTokens: void 0
1686
+ };
1687
+ let logprobs;
1688
+ let isFirstChunk = true;
1689
+ const { useLegacyFunctionCalling } = this.settings;
1690
+ const providerMetadata = { openai: {} };
1691
+ return {
1692
+ stream: response.pipeThrough(
1693
+ new TransformStream({
1694
+ transform(chunk, controller) {
1695
+ var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
1696
+ if (!chunk.success) {
1697
+ finishReason = "error";
1698
+ controller.enqueue({ type: "error", error: chunk.error });
1699
+ return;
1700
+ }
1701
+ const value = chunk.value;
1702
+ if ("error" in value) {
1703
+ finishReason = "error";
1704
+ controller.enqueue({ type: "error", error: value.error });
1705
+ return;
1706
+ }
1707
+ if (isFirstChunk) {
1708
+ isFirstChunk = false;
1709
+ controller.enqueue({
1710
+ type: "response-metadata",
1711
+ ...getResponseMetadata(value)
1712
+ });
1713
+ }
1714
+ if (value.usage != null) {
1715
+ const {
1716
+ prompt_tokens,
1717
+ completion_tokens,
1718
+ prompt_tokens_details,
1719
+ completion_tokens_details
1720
+ } = value.usage;
1721
+ usage = {
1722
+ promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
1723
+ completionTokens: completion_tokens != null ? completion_tokens : void 0
1724
+ };
1725
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
1726
+ providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
1727
+ }
1728
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
1729
+ providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
1730
+ }
1731
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
1732
+ providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
1733
+ }
1734
+ if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
1735
+ providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
1736
+ }
1737
+ }
1738
+ const choice = value.choices[0];
1739
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
1740
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
1741
+ }
1742
+ if ((choice == null ? void 0 : choice.delta) == null) {
1743
+ return;
1744
+ }
1745
+ const delta = choice.delta;
1746
+ if (delta.content != null) {
1747
+ controller.enqueue({
1748
+ type: "text-delta",
1749
+ textDelta: delta.content
1750
+ });
1751
+ }
1752
+ const mappedLogprobs = mapOpenAIChatLogProbsOutput(
1753
+ choice == null ? void 0 : choice.logprobs
1754
+ );
1755
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1756
+ if (logprobs === void 0) logprobs = [];
1757
+ logprobs.push(...mappedLogprobs);
1758
+ }
1759
+ const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
1760
+ {
1761
+ type: "function",
1762
+ id: generateId(),
1763
+ function: delta.function_call,
1764
+ index: 0
1765
+ }
1766
+ ] : delta.tool_calls;
1767
+ if (mappedToolCalls != null) {
1768
+ for (const toolCallDelta of mappedToolCalls) {
1769
+ const index = toolCallDelta.index;
1770
+ if (toolCalls[index] == null) {
1771
+ if (toolCallDelta.type !== "function") {
1772
+ throw new InvalidResponseDataError({
1773
+ data: toolCallDelta,
1774
+ message: `Expected 'function' type.`
1775
+ });
1776
+ }
1777
+ if (toolCallDelta.id == null) {
1778
+ throw new InvalidResponseDataError({
1779
+ data: toolCallDelta,
1780
+ message: `Expected 'id' to be a string.`
1781
+ });
1782
+ }
1783
+ if (((_a15 = toolCallDelta.function) == null ? void 0 : _a15.name) == null) {
1784
+ throw new InvalidResponseDataError({
1785
+ data: toolCallDelta,
1786
+ message: `Expected 'function.name' to be a string.`
1787
+ });
1788
+ }
1789
+ toolCalls[index] = {
1790
+ id: toolCallDelta.id,
1791
+ type: "function",
1792
+ function: {
1793
+ name: toolCallDelta.function.name,
1794
+ arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
1795
+ },
1796
+ hasFinished: false
1797
+ };
1798
+ const toolCall2 = toolCalls[index];
1799
+ if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
1800
+ if (toolCall2.function.arguments.length > 0) {
1801
+ controller.enqueue({
1802
+ type: "tool-call-delta",
1803
+ toolCallType: "function",
1804
+ toolCallId: toolCall2.id,
1805
+ toolName: toolCall2.function.name,
1806
+ argsTextDelta: toolCall2.function.arguments
1807
+ });
1808
+ }
1809
+ if (isParsableJson(toolCall2.function.arguments)) {
1810
+ controller.enqueue({
1811
+ type: "tool-call",
1812
+ toolCallType: "function",
1813
+ toolCallId: (_e = toolCall2.id) != null ? _e : generateId(),
1814
+ toolName: toolCall2.function.name,
1815
+ args: toolCall2.function.arguments
1816
+ });
1817
+ toolCall2.hasFinished = true;
1818
+ }
1819
+ }
1820
+ continue;
1821
+ }
1822
+ const toolCall = toolCalls[index];
1823
+ if (toolCall.hasFinished) {
1824
+ continue;
1825
+ }
1826
+ if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
1827
+ toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
1828
+ }
1829
+ controller.enqueue({
1830
+ type: "tool-call-delta",
1831
+ toolCallType: "function",
1832
+ toolCallId: toolCall.id,
1833
+ toolName: toolCall.function.name,
1834
+ argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
1835
+ });
1836
+ if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && isParsableJson(toolCall.function.arguments)) {
1837
+ controller.enqueue({
1838
+ type: "tool-call",
1839
+ toolCallType: "function",
1840
+ toolCallId: (_l = toolCall.id) != null ? _l : generateId(),
1841
+ toolName: toolCall.function.name,
1842
+ args: toolCall.function.arguments
1843
+ });
1844
+ toolCall.hasFinished = true;
1845
+ }
1846
+ }
1847
+ }
1848
+ },
1849
+ flush(controller) {
1850
+ var _a15, _b;
1851
+ controller.enqueue({
1852
+ type: "finish",
1853
+ finishReason,
1854
+ logprobs,
1855
+ usage: {
1856
+ promptTokens: (_a15 = usage.promptTokens) != null ? _a15 : NaN,
1857
+ completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
1858
+ },
1859
+ ...providerMetadata != null ? { providerMetadata } : {}
1860
+ });
1861
+ }
1862
+ })
1863
+ ),
1864
+ rawCall: { rawPrompt, rawSettings },
1865
+ rawResponse: { headers: responseHeaders },
1866
+ request: { body: JSON.stringify(body) },
1867
+ warnings
1868
+ };
1869
+ }
1870
+ };
1871
+ var openaiTokenUsageSchema = zod.z.object({
1872
+ prompt_tokens: zod.z.number().nullish(),
1873
+ completion_tokens: zod.z.number().nullish(),
1874
+ prompt_tokens_details: zod.z.object({
1875
+ cached_tokens: zod.z.number().nullish()
1876
+ }).nullish(),
1877
+ completion_tokens_details: zod.z.object({
1878
+ reasoning_tokens: zod.z.number().nullish(),
1879
+ accepted_prediction_tokens: zod.z.number().nullish(),
1880
+ rejected_prediction_tokens: zod.z.number().nullish()
1881
+ }).nullish()
1882
+ }).nullish();
1883
+ var openaiChatResponseSchema = zod.z.object({
1884
+ id: zod.z.string().nullish(),
1885
+ created: zod.z.number().nullish(),
1886
+ model: zod.z.string().nullish(),
1887
+ choices: zod.z.array(
1888
+ zod.z.object({
1889
+ message: zod.z.object({
1890
+ role: zod.z.literal("assistant").nullish(),
1891
+ content: zod.z.string().nullish(),
1892
+ function_call: zod.z.object({
1893
+ arguments: zod.z.string(),
1894
+ name: zod.z.string()
1895
+ }).nullish(),
1896
+ tool_calls: zod.z.array(
1897
+ zod.z.object({
1898
+ id: zod.z.string().nullish(),
1899
+ type: zod.z.literal("function"),
1900
+ function: zod.z.object({
1901
+ name: zod.z.string(),
1902
+ arguments: zod.z.string()
1903
+ })
1904
+ })
1905
+ ).nullish()
1906
+ }),
1907
+ index: zod.z.number(),
1908
+ logprobs: zod.z.object({
1909
+ content: zod.z.array(
1910
+ zod.z.object({
1911
+ token: zod.z.string(),
1912
+ logprob: zod.z.number(),
1913
+ top_logprobs: zod.z.array(
1914
+ zod.z.object({
1915
+ token: zod.z.string(),
1916
+ logprob: zod.z.number()
1917
+ })
1918
+ )
1919
+ })
1920
+ ).nullable()
1921
+ }).nullish(),
1922
+ finish_reason: zod.z.string().nullish()
1923
+ })
1924
+ ),
1925
+ usage: openaiTokenUsageSchema
1926
+ });
1927
+ var openaiChatChunkSchema = zod.z.union([
1928
+ zod.z.object({
1929
+ id: zod.z.string().nullish(),
1930
+ created: zod.z.number().nullish(),
1931
+ model: zod.z.string().nullish(),
1932
+ choices: zod.z.array(
1933
+ zod.z.object({
1934
+ delta: zod.z.object({
1935
+ role: zod.z.enum(["assistant"]).nullish(),
1936
+ content: zod.z.string().nullish(),
1937
+ function_call: zod.z.object({
1938
+ name: zod.z.string().optional(),
1939
+ arguments: zod.z.string().optional()
1940
+ }).nullish(),
1941
+ tool_calls: zod.z.array(
1942
+ zod.z.object({
1943
+ index: zod.z.number(),
1944
+ id: zod.z.string().nullish(),
1945
+ type: zod.z.literal("function").optional(),
1946
+ function: zod.z.object({
1947
+ name: zod.z.string().nullish(),
1948
+ arguments: zod.z.string().nullish()
1949
+ })
1950
+ })
1951
+ ).nullish()
1952
+ }).nullish(),
1953
+ logprobs: zod.z.object({
1954
+ content: zod.z.array(
1955
+ zod.z.object({
1956
+ token: zod.z.string(),
1957
+ logprob: zod.z.number(),
1958
+ top_logprobs: zod.z.array(
1959
+ zod.z.object({
1960
+ token: zod.z.string(),
1961
+ logprob: zod.z.number()
1962
+ })
1963
+ )
1964
+ })
1965
+ ).nullable()
1966
+ }).nullish(),
1967
+ finish_reason: zod.z.string().nullable().optional(),
1968
+ index: zod.z.number()
1969
+ })
1970
+ ),
1971
+ usage: openaiTokenUsageSchema
1972
+ }),
1973
+ openaiErrorDataSchema
1974
+ ]);
1975
+ function isReasoningModel(modelId) {
1976
+ return modelId.startsWith("o");
1977
+ }
1978
+ function isAudioModel(modelId) {
1979
+ return modelId.startsWith("gpt-4o-audio-preview");
1980
+ }
1981
+ function getSystemMessageMode(modelId) {
1982
+ var _a15, _b;
1983
+ if (!isReasoningModel(modelId)) {
1984
+ return "system";
1985
+ }
1986
+ return (_b = (_a15 = reasoningModels[modelId]) == null ? void 0 : _a15.systemMessageMode) != null ? _b : "developer";
1987
+ }
1988
+ var reasoningModels = {
1989
+ "o1-mini": {
1990
+ systemMessageMode: "remove"
1991
+ },
1992
+ "o1-mini-2024-09-12": {
1993
+ systemMessageMode: "remove"
1994
+ },
1995
+ "o1-preview": {
1996
+ systemMessageMode: "remove"
1997
+ },
1998
+ "o1-preview-2024-09-12": {
1999
+ systemMessageMode: "remove"
2000
+ },
2001
+ "o3-mini": {
2002
+ systemMessageMode: "developer"
2003
+ },
2004
+ "o3-mini-2025-01-31": {
2005
+ systemMessageMode: "developer"
2006
+ }
2007
+ };
2008
+ function convertToOpenAICompletionPrompt({
2009
+ prompt,
2010
+ inputFormat,
2011
+ user = "user",
2012
+ assistant = "assistant"
2013
+ }) {
2014
+ if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
2015
+ return { prompt: prompt[0].content[0].text };
2016
+ }
2017
+ let text = "";
2018
+ if (prompt[0].role === "system") {
2019
+ text += `${prompt[0].content}
2020
+
2021
+ `;
2022
+ prompt = prompt.slice(1);
2023
+ }
2024
+ for (const { role, content } of prompt) {
2025
+ switch (role) {
2026
+ case "system": {
2027
+ throw new InvalidPromptError({
2028
+ message: "Unexpected system message in prompt: ${content}",
2029
+ prompt
2030
+ });
2031
+ }
2032
+ case "user": {
2033
+ const userMessage = content.map((part) => {
2034
+ switch (part.type) {
2035
+ case "text": {
2036
+ return part.text;
2037
+ }
2038
+ case "image": {
2039
+ throw new UnsupportedFunctionalityError({
2040
+ functionality: "images"
2041
+ });
2042
+ }
2043
+ }
2044
+ }).join("");
2045
+ text += `${user}:
2046
+ ${userMessage}
2047
+
2048
+ `;
2049
+ break;
2050
+ }
2051
+ case "assistant": {
2052
+ const assistantMessage = content.map((part) => {
2053
+ switch (part.type) {
2054
+ case "text": {
2055
+ return part.text;
2056
+ }
2057
+ case "tool-call": {
2058
+ throw new UnsupportedFunctionalityError({
2059
+ functionality: "tool-call messages"
2060
+ });
2061
+ }
2062
+ }
2063
+ }).join("");
2064
+ text += `${assistant}:
2065
+ ${assistantMessage}
2066
+
2067
+ `;
2068
+ break;
2069
+ }
2070
+ case "tool": {
2071
+ throw new UnsupportedFunctionalityError({
2072
+ functionality: "tool messages"
2073
+ });
2074
+ }
2075
+ default: {
2076
+ const _exhaustiveCheck = role;
2077
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
2078
+ }
2079
+ }
2080
+ }
2081
+ text += `${assistant}:
2082
+ `;
2083
+ return {
2084
+ prompt: text,
2085
+ stopSequences: [`
2086
+ ${user}:`]
2087
+ };
2088
+ }
2089
+ function mapOpenAICompletionLogProbs(logprobs) {
2090
+ return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
2091
+ token,
2092
+ logprob: logprobs.token_logprobs[index],
2093
+ topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
2094
+ ([token2, logprob]) => ({
2095
+ token: token2,
2096
+ logprob
2097
+ })
2098
+ ) : []
2099
+ }));
2100
+ }
2101
+ var OpenAICompletionLanguageModel = class {
2102
+ constructor(modelId, settings, config) {
2103
+ this.specificationVersion = "v1";
2104
+ this.defaultObjectGenerationMode = void 0;
2105
+ this.modelId = modelId;
2106
+ this.settings = settings;
2107
+ this.config = config;
2108
+ }
2109
+ get provider() {
2110
+ return this.config.provider;
2111
+ }
2112
+ getArgs({
2113
+ mode,
2114
+ inputFormat,
2115
+ prompt,
2116
+ maxTokens,
2117
+ temperature,
2118
+ topP,
2119
+ topK,
2120
+ frequencyPenalty,
2121
+ presencePenalty,
2122
+ stopSequences: userStopSequences,
2123
+ responseFormat,
2124
+ seed
2125
+ }) {
2126
+ var _a15;
2127
+ const type = mode.type;
2128
+ const warnings = [];
2129
+ if (topK != null) {
2130
+ warnings.push({
2131
+ type: "unsupported-setting",
2132
+ setting: "topK"
2133
+ });
2134
+ }
2135
+ if (responseFormat != null && responseFormat.type !== "text") {
2136
+ warnings.push({
2137
+ type: "unsupported-setting",
2138
+ setting: "responseFormat",
2139
+ details: "JSON response format is not supported."
2140
+ });
2141
+ }
2142
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
2143
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
2144
+ const baseArgs = {
2145
+ // model id:
2146
+ model: this.modelId,
2147
+ // model specific settings:
2148
+ echo: this.settings.echo,
2149
+ logit_bias: this.settings.logitBias,
2150
+ logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
2151
+ suffix: this.settings.suffix,
2152
+ user: this.settings.user,
2153
+ // standardized settings:
2154
+ max_tokens: maxTokens,
2155
+ temperature,
2156
+ top_p: topP,
2157
+ frequency_penalty: frequencyPenalty,
2158
+ presence_penalty: presencePenalty,
2159
+ seed,
2160
+ // prompt:
2161
+ prompt: completionPrompt,
2162
+ // stop sequences:
2163
+ stop: stop.length > 0 ? stop : void 0
2164
+ };
2165
+ switch (type) {
2166
+ case "regular": {
2167
+ if ((_a15 = mode.tools) == null ? void 0 : _a15.length) {
2168
+ throw new UnsupportedFunctionalityError({
2169
+ functionality: "tools"
2170
+ });
2171
+ }
2172
+ if (mode.toolChoice) {
2173
+ throw new UnsupportedFunctionalityError({
2174
+ functionality: "toolChoice"
2175
+ });
2176
+ }
2177
+ return { args: baseArgs, warnings };
2178
+ }
2179
+ case "object-json": {
2180
+ throw new UnsupportedFunctionalityError({
2181
+ functionality: "object-json mode"
2182
+ });
2183
+ }
2184
+ case "object-tool": {
2185
+ throw new UnsupportedFunctionalityError({
2186
+ functionality: "object-tool mode"
2187
+ });
2188
+ }
2189
+ default: {
2190
+ const _exhaustiveCheck = type;
2191
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
2192
+ }
2193
+ }
2194
+ }
2195
+ async doGenerate(options) {
2196
+ const { args, warnings } = this.getArgs(options);
2197
+ const {
2198
+ responseHeaders,
2199
+ value: response,
2200
+ rawValue: rawResponse
2201
+ } = await postJsonToApi({
2202
+ url: this.config.url({
2203
+ path: "/completions",
2204
+ modelId: this.modelId
2205
+ }),
2206
+ headers: combineHeaders(this.config.headers(), options.headers),
2207
+ body: args,
2208
+ failedResponseHandler: openaiFailedResponseHandler,
2209
+ successfulResponseHandler: createJsonResponseHandler(
2210
+ openaiCompletionResponseSchema
2211
+ ),
2212
+ abortSignal: options.abortSignal,
2213
+ fetch: this.config.fetch
2214
+ });
2215
+ const { prompt: rawPrompt, ...rawSettings } = args;
2216
+ const choice = response.choices[0];
2217
+ return {
2218
+ text: choice.text,
2219
+ usage: {
2220
+ promptTokens: response.usage.prompt_tokens,
2221
+ completionTokens: response.usage.completion_tokens
2222
+ },
2223
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
2224
+ logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
2225
+ rawCall: { rawPrompt, rawSettings },
2226
+ rawResponse: { headers: responseHeaders, body: rawResponse },
2227
+ response: getResponseMetadata(response),
2228
+ warnings,
2229
+ request: { body: JSON.stringify(args) }
2230
+ };
2231
+ }
2232
+ async doStream(options) {
2233
+ const { args, warnings } = this.getArgs(options);
2234
+ const body = {
2235
+ ...args,
2236
+ stream: true,
2237
+ // only include stream_options when in strict compatibility mode:
2238
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
2239
+ };
2240
+ const { responseHeaders, value: response } = await postJsonToApi({
2241
+ url: this.config.url({
2242
+ path: "/completions",
2243
+ modelId: this.modelId
2244
+ }),
2245
+ headers: combineHeaders(this.config.headers(), options.headers),
2246
+ body,
2247
+ failedResponseHandler: openaiFailedResponseHandler,
2248
+ successfulResponseHandler: createEventSourceResponseHandler(
2249
+ openaiCompletionChunkSchema
2250
+ ),
2251
+ abortSignal: options.abortSignal,
2252
+ fetch: this.config.fetch
2253
+ });
2254
+ const { prompt: rawPrompt, ...rawSettings } = args;
2255
+ let finishReason = "unknown";
2256
+ let usage = {
2257
+ promptTokens: Number.NaN,
2258
+ completionTokens: Number.NaN
2259
+ };
2260
+ let logprobs;
2261
+ let isFirstChunk = true;
2262
+ return {
2263
+ stream: response.pipeThrough(
2264
+ new TransformStream({
2265
+ transform(chunk, controller) {
2266
+ if (!chunk.success) {
2267
+ finishReason = "error";
2268
+ controller.enqueue({ type: "error", error: chunk.error });
2269
+ return;
2270
+ }
2271
+ const value = chunk.value;
2272
+ if ("error" in value) {
2273
+ finishReason = "error";
2274
+ controller.enqueue({ type: "error", error: value.error });
2275
+ return;
2276
+ }
2277
+ if (isFirstChunk) {
2278
+ isFirstChunk = false;
2279
+ controller.enqueue({
2280
+ type: "response-metadata",
2281
+ ...getResponseMetadata(value)
2282
+ });
2283
+ }
2284
+ if (value.usage != null) {
2285
+ usage = {
2286
+ promptTokens: value.usage.prompt_tokens,
2287
+ completionTokens: value.usage.completion_tokens
2288
+ };
2289
+ }
2290
+ const choice = value.choices[0];
2291
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
2292
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
2293
+ }
2294
+ if ((choice == null ? void 0 : choice.text) != null) {
2295
+ controller.enqueue({
2296
+ type: "text-delta",
2297
+ textDelta: choice.text
2298
+ });
2299
+ }
2300
+ const mappedLogprobs = mapOpenAICompletionLogProbs(
2301
+ choice == null ? void 0 : choice.logprobs
2302
+ );
2303
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
2304
+ if (logprobs === void 0) logprobs = [];
2305
+ logprobs.push(...mappedLogprobs);
2306
+ }
2307
+ },
2308
+ flush(controller) {
2309
+ controller.enqueue({
2310
+ type: "finish",
2311
+ finishReason,
2312
+ logprobs,
2313
+ usage
2314
+ });
2315
+ }
2316
+ })
2317
+ ),
2318
+ rawCall: { rawPrompt, rawSettings },
2319
+ rawResponse: { headers: responseHeaders },
2320
+ warnings,
2321
+ request: { body: JSON.stringify(body) }
2322
+ };
2323
+ }
2324
+ };
2325
+ var openaiCompletionResponseSchema = zod.z.object({
2326
+ id: zod.z.string().nullish(),
2327
+ created: zod.z.number().nullish(),
2328
+ model: zod.z.string().nullish(),
2329
+ choices: zod.z.array(
2330
+ zod.z.object({
2331
+ text: zod.z.string(),
2332
+ finish_reason: zod.z.string(),
2333
+ logprobs: zod.z.object({
2334
+ tokens: zod.z.array(zod.z.string()),
2335
+ token_logprobs: zod.z.array(zod.z.number()),
2336
+ top_logprobs: zod.z.array(zod.z.record(zod.z.string(), zod.z.number())).nullable()
2337
+ }).nullish()
2338
+ })
2339
+ ),
2340
+ usage: zod.z.object({
2341
+ prompt_tokens: zod.z.number(),
2342
+ completion_tokens: zod.z.number()
2343
+ })
2344
+ });
2345
+ var openaiCompletionChunkSchema = zod.z.union([
2346
+ zod.z.object({
2347
+ id: zod.z.string().nullish(),
2348
+ created: zod.z.number().nullish(),
2349
+ model: zod.z.string().nullish(),
2350
+ choices: zod.z.array(
2351
+ zod.z.object({
2352
+ text: zod.z.string(),
2353
+ finish_reason: zod.z.string().nullish(),
2354
+ index: zod.z.number(),
2355
+ logprobs: zod.z.object({
2356
+ tokens: zod.z.array(zod.z.string()),
2357
+ token_logprobs: zod.z.array(zod.z.number()),
2358
+ top_logprobs: zod.z.array(zod.z.record(zod.z.string(), zod.z.number())).nullable()
2359
+ }).nullish()
2360
+ })
2361
+ ),
2362
+ usage: zod.z.object({
2363
+ prompt_tokens: zod.z.number(),
2364
+ completion_tokens: zod.z.number()
2365
+ }).nullish()
2366
+ }),
2367
+ openaiErrorDataSchema
2368
+ ]);
2369
+ var OpenAIEmbeddingModel = class {
2370
+ constructor(modelId, settings, config) {
2371
+ this.specificationVersion = "v1";
2372
+ this.modelId = modelId;
2373
+ this.settings = settings;
2374
+ this.config = config;
2375
+ }
2376
+ get provider() {
2377
+ return this.config.provider;
2378
+ }
2379
+ get maxEmbeddingsPerCall() {
2380
+ var _a15;
2381
+ return (_a15 = this.settings.maxEmbeddingsPerCall) != null ? _a15 : 2048;
2382
+ }
2383
+ get supportsParallelCalls() {
2384
+ var _a15;
2385
+ return (_a15 = this.settings.supportsParallelCalls) != null ? _a15 : true;
2386
+ }
2387
+ async doEmbed({
2388
+ values,
2389
+ headers,
2390
+ abortSignal
2391
+ }) {
2392
+ if (values.length > this.maxEmbeddingsPerCall) {
2393
+ throw new TooManyEmbeddingValuesForCallError({
2394
+ provider: this.provider,
2395
+ modelId: this.modelId,
2396
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
2397
+ values
2398
+ });
2399
+ }
2400
+ const { responseHeaders, value: response } = await postJsonToApi({
2401
+ url: this.config.url({
2402
+ path: "/embeddings",
2403
+ modelId: this.modelId
2404
+ }),
2405
+ headers: combineHeaders(this.config.headers(), headers),
2406
+ body: {
2407
+ model: this.modelId,
2408
+ input: values,
2409
+ encoding_format: "float",
2410
+ dimensions: this.settings.dimensions,
2411
+ user: this.settings.user
2412
+ },
2413
+ failedResponseHandler: openaiFailedResponseHandler,
2414
+ successfulResponseHandler: createJsonResponseHandler(
2415
+ openaiTextEmbeddingResponseSchema
2416
+ ),
2417
+ abortSignal,
2418
+ fetch: this.config.fetch
2419
+ });
2420
+ return {
2421
+ embeddings: response.data.map((item) => item.embedding),
2422
+ usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
2423
+ rawResponse: { headers: responseHeaders }
2424
+ };
2425
+ }
2426
+ };
2427
+ var openaiTextEmbeddingResponseSchema = zod.z.object({
2428
+ data: zod.z.array(zod.z.object({ embedding: zod.z.array(zod.z.number()) })),
2429
+ usage: zod.z.object({ prompt_tokens: zod.z.number() }).nullish()
2430
+ });
2431
+ var modelMaxImagesPerCall = {
2432
+ "dall-e-3": 1,
2433
+ "dall-e-2": 10
2434
+ };
2435
+ var OpenAIImageModel = class {
2436
+ constructor(modelId, settings, config) {
2437
+ this.modelId = modelId;
2438
+ this.settings = settings;
2439
+ this.config = config;
2440
+ this.specificationVersion = "v1";
2441
+ }
2442
+ get maxImagesPerCall() {
2443
+ var _a15, _b;
2444
+ return (_b = (_a15 = this.settings.maxImagesPerCall) != null ? _a15 : modelMaxImagesPerCall[this.modelId]) != null ? _b : 1;
2445
+ }
2446
+ get provider() {
2447
+ return this.config.provider;
2448
+ }
2449
+ async doGenerate({
2450
+ prompt,
2451
+ n,
2452
+ size,
2453
+ aspectRatio,
2454
+ seed,
2455
+ providerOptions,
2456
+ headers,
2457
+ abortSignal
2458
+ }) {
2459
+ var _a15, _b, _c, _d;
2460
+ const warnings = [];
2461
+ if (aspectRatio != null) {
2462
+ warnings.push({
2463
+ type: "unsupported-setting",
2464
+ setting: "aspectRatio",
2465
+ details: "This model does not support aspect ratio. Use `size` instead."
2466
+ });
2467
+ }
2468
+ if (seed != null) {
2469
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
2470
+ }
2471
+ const currentDate = (_c = (_b = (_a15 = this.config._internal) == null ? void 0 : _a15.currentDate) == null ? void 0 : _b.call(_a15)) != null ? _c : /* @__PURE__ */ new Date();
2472
+ const { value: response, responseHeaders } = await postJsonToApi({
2473
+ url: this.config.url({
2474
+ path: "/images/generations",
2475
+ modelId: this.modelId
2476
+ }),
2477
+ headers: combineHeaders(this.config.headers(), headers),
2478
+ body: {
2479
+ model: this.modelId,
2480
+ prompt,
2481
+ n,
2482
+ size,
2483
+ ...(_d = providerOptions.openai) != null ? _d : {},
2484
+ response_format: "b64_json"
2485
+ },
2486
+ failedResponseHandler: openaiFailedResponseHandler,
2487
+ successfulResponseHandler: createJsonResponseHandler(
2488
+ openaiImageResponseSchema
2489
+ ),
2490
+ abortSignal,
2491
+ fetch: this.config.fetch
2492
+ });
2493
+ return {
2494
+ images: response.data.map((item) => item.b64_json),
2495
+ warnings,
2496
+ response: {
2497
+ timestamp: currentDate,
2498
+ modelId: this.modelId,
2499
+ headers: responseHeaders
2500
+ }
2501
+ };
2502
+ }
2503
+ };
2504
+ var openaiImageResponseSchema = zod.z.object({
2505
+ data: zod.z.array(zod.z.object({ b64_json: zod.z.string() }))
2506
+ });
2507
+ var openAIProviderOptionsSchema = zod.z.object({
2508
+ include: zod.z.array(zod.z.string()).nullish(),
2509
+ language: zod.z.string().nullish(),
2510
+ prompt: zod.z.string().nullish(),
2511
+ temperature: zod.z.number().min(0).max(1).nullish().default(0),
2512
+ timestampGranularities: zod.z.array(zod.z.enum(["word", "segment"])).nullish().default(["segment"])
2513
+ });
2514
+ var languageMap = {
2515
+ afrikaans: "af",
2516
+ arabic: "ar",
2517
+ armenian: "hy",
2518
+ azerbaijani: "az",
2519
+ belarusian: "be",
2520
+ bosnian: "bs",
2521
+ bulgarian: "bg",
2522
+ catalan: "ca",
2523
+ chinese: "zh",
2524
+ croatian: "hr",
2525
+ czech: "cs",
2526
+ danish: "da",
2527
+ dutch: "nl",
2528
+ english: "en",
2529
+ estonian: "et",
2530
+ finnish: "fi",
2531
+ french: "fr",
2532
+ galician: "gl",
2533
+ german: "de",
2534
+ greek: "el",
2535
+ hebrew: "he",
2536
+ hindi: "hi",
2537
+ hungarian: "hu",
2538
+ icelandic: "is",
2539
+ indonesian: "id",
2540
+ italian: "it",
2541
+ japanese: "ja",
2542
+ kannada: "kn",
2543
+ kazakh: "kk",
2544
+ korean: "ko",
2545
+ latvian: "lv",
2546
+ lithuanian: "lt",
2547
+ macedonian: "mk",
2548
+ malay: "ms",
2549
+ marathi: "mr",
2550
+ maori: "mi",
2551
+ nepali: "ne",
2552
+ norwegian: "no",
2553
+ persian: "fa",
2554
+ polish: "pl",
2555
+ portuguese: "pt",
2556
+ romanian: "ro",
2557
+ russian: "ru",
2558
+ serbian: "sr",
2559
+ slovak: "sk",
2560
+ slovenian: "sl",
2561
+ spanish: "es",
2562
+ swahili: "sw",
2563
+ swedish: "sv",
2564
+ tagalog: "tl",
2565
+ tamil: "ta",
2566
+ thai: "th",
2567
+ turkish: "tr",
2568
+ ukrainian: "uk",
2569
+ urdu: "ur",
2570
+ vietnamese: "vi",
2571
+ welsh: "cy"
2572
+ };
2573
+ var OpenAITranscriptionModel = class {
2574
+ constructor(modelId, config) {
2575
+ this.modelId = modelId;
2576
+ this.config = config;
2577
+ this.specificationVersion = "v1";
2578
+ }
2579
+ get provider() {
2580
+ return this.config.provider;
2581
+ }
2582
+ getArgs({
2583
+ audio,
2584
+ mediaType,
2585
+ providerOptions
2586
+ }) {
2587
+ var _a15, _b, _c, _d, _e;
2588
+ const warnings = [];
2589
+ const openAIOptions = parseProviderOptions({
2590
+ provider: "openai",
2591
+ providerOptions,
2592
+ schema: openAIProviderOptionsSchema
2593
+ });
2594
+ const formData = new FormData();
2595
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
2596
+ formData.append("model", this.modelId);
2597
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
2598
+ if (openAIOptions) {
2599
+ const transcriptionModelOptions = {
2600
+ include: (_a15 = openAIOptions.include) != null ? _a15 : void 0,
2601
+ language: (_b = openAIOptions.language) != null ? _b : void 0,
2602
+ prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
2603
+ temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
2604
+ timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
2605
+ };
2606
+ for (const key in transcriptionModelOptions) {
2607
+ const value = transcriptionModelOptions[key];
2608
+ if (value !== void 0) {
2609
+ formData.append(key, String(value));
2610
+ }
2611
+ }
2612
+ }
2613
+ return {
2614
+ formData,
2615
+ warnings
2616
+ };
2617
+ }
2618
+ async doGenerate(options) {
2619
+ var _a15, _b, _c, _d, _e, _f;
2620
+ const currentDate = (_c = (_b = (_a15 = this.config._internal) == null ? void 0 : _a15.currentDate) == null ? void 0 : _b.call(_a15)) != null ? _c : /* @__PURE__ */ new Date();
2621
+ const { formData, warnings } = this.getArgs(options);
2622
+ const {
2623
+ value: response,
2624
+ responseHeaders,
2625
+ rawValue: rawResponse
2626
+ } = await postFormDataToApi({
2627
+ url: this.config.url({
2628
+ path: "/audio/transcriptions",
2629
+ modelId: this.modelId
2630
+ }),
2631
+ headers: combineHeaders(this.config.headers(), options.headers),
2632
+ formData,
2633
+ failedResponseHandler: openaiFailedResponseHandler,
2634
+ successfulResponseHandler: createJsonResponseHandler(
2635
+ openaiTranscriptionResponseSchema
2636
+ ),
2637
+ abortSignal: options.abortSignal,
2638
+ fetch: this.config.fetch
2639
+ });
2640
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
2641
+ return {
2642
+ text: response.text,
2643
+ segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
2644
+ text: word.word,
2645
+ startSecond: word.start,
2646
+ endSecond: word.end
2647
+ }))) != null ? _e : [],
2648
+ language,
2649
+ durationInSeconds: (_f = response.duration) != null ? _f : void 0,
2650
+ warnings,
2651
+ response: {
2652
+ timestamp: currentDate,
2653
+ modelId: this.modelId,
2654
+ headers: responseHeaders,
2655
+ body: rawResponse
2656
+ }
2657
+ };
2658
+ }
2659
+ };
2660
+ var openaiTranscriptionResponseSchema = zod.z.object({
2661
+ text: zod.z.string(),
2662
+ language: zod.z.string().nullish(),
2663
+ duration: zod.z.number().nullish(),
2664
+ words: zod.z.array(
2665
+ zod.z.object({
2666
+ word: zod.z.string(),
2667
+ start: zod.z.number(),
2668
+ end: zod.z.number()
2669
+ })
2670
+ ).nullish()
2671
+ });
2672
+ function convertToOpenAIResponsesMessages({
2673
+ prompt,
2674
+ systemMessageMode
2675
+ }) {
2676
+ const messages = [];
2677
+ const warnings = [];
2678
+ for (const { role, content } of prompt) {
2679
+ switch (role) {
2680
+ case "system": {
2681
+ switch (systemMessageMode) {
2682
+ case "system": {
2683
+ messages.push({ role: "system", content });
2684
+ break;
2685
+ }
2686
+ case "developer": {
2687
+ messages.push({ role: "developer", content });
2688
+ break;
2689
+ }
2690
+ case "remove": {
2691
+ warnings.push({
2692
+ type: "other",
2693
+ message: "system messages are removed for this model"
2694
+ });
2695
+ break;
2696
+ }
2697
+ default: {
2698
+ const _exhaustiveCheck = systemMessageMode;
2699
+ throw new Error(
2700
+ `Unsupported system message mode: ${_exhaustiveCheck}`
2701
+ );
2702
+ }
2703
+ }
2704
+ break;
2705
+ }
2706
+ case "user": {
2707
+ messages.push({
2708
+ role: "user",
2709
+ content: content.map((part, index) => {
2710
+ var _a15, _b, _c, _d;
2711
+ switch (part.type) {
2712
+ case "text": {
2713
+ return { type: "input_text", text: part.text };
2714
+ }
2715
+ case "image": {
2716
+ return {
2717
+ type: "input_image",
2718
+ image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a15 = part.mimeType) != null ? _a15 : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`,
2719
+ // OpenAI specific extension: image detail
2720
+ detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
2721
+ };
2722
+ }
2723
+ case "file": {
2724
+ if (part.data instanceof URL) {
2725
+ throw new UnsupportedFunctionalityError({
2726
+ functionality: "File URLs in user messages"
2727
+ });
2728
+ }
2729
+ switch (part.mimeType) {
2730
+ case "application/pdf": {
2731
+ return {
2732
+ type: "input_file",
2733
+ filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
2734
+ file_data: `data:application/pdf;base64,${part.data}`
2735
+ };
2736
+ }
2737
+ default: {
2738
+ throw new UnsupportedFunctionalityError({
2739
+ functionality: "Only PDF files are supported in user messages"
2740
+ });
2741
+ }
2742
+ }
2743
+ }
2744
+ }
2745
+ })
2746
+ });
2747
+ break;
2748
+ }
2749
+ case "assistant": {
2750
+ for (const part of content) {
2751
+ switch (part.type) {
2752
+ case "text": {
2753
+ messages.push({
2754
+ role: "assistant",
2755
+ content: [{ type: "output_text", text: part.text }]
2756
+ });
2757
+ break;
2758
+ }
2759
+ case "tool-call": {
2760
+ messages.push({
2761
+ type: "function_call",
2762
+ call_id: part.toolCallId,
2763
+ name: part.toolName,
2764
+ arguments: JSON.stringify(part.args)
2765
+ });
2766
+ break;
2767
+ }
2768
+ }
2769
+ }
2770
+ break;
2771
+ }
2772
+ case "tool": {
2773
+ for (const part of content) {
2774
+ messages.push({
2775
+ type: "function_call_output",
2776
+ call_id: part.toolCallId,
2777
+ output: JSON.stringify(part.result)
2778
+ });
2779
+ }
2780
+ break;
2781
+ }
2782
+ default: {
2783
+ const _exhaustiveCheck = role;
2784
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
2785
+ }
2786
+ }
2787
+ }
2788
+ return { messages, warnings };
2789
+ }
2790
+ function mapOpenAIResponseFinishReason({
2791
+ finishReason,
2792
+ hasToolCalls
2793
+ }) {
2794
+ switch (finishReason) {
2795
+ case void 0:
2796
+ case null:
2797
+ return hasToolCalls ? "tool-calls" : "stop";
2798
+ case "max_output_tokens":
2799
+ return "length";
2800
+ case "content_filter":
2801
+ return "content-filter";
2802
+ default:
2803
+ return hasToolCalls ? "tool-calls" : "unknown";
2804
+ }
2805
+ }
2806
+ function prepareResponsesTools({
2807
+ mode,
2808
+ strict
2809
+ }) {
2810
+ var _a15;
2811
+ const tools = ((_a15 = mode.tools) == null ? void 0 : _a15.length) ? mode.tools : void 0;
2812
+ const toolWarnings = [];
2813
+ if (tools == null) {
2814
+ return { tools: void 0, tool_choice: void 0, toolWarnings };
2815
+ }
2816
+ const toolChoice = mode.toolChoice;
2817
+ const openaiTools2 = [];
2818
+ for (const tool of tools) {
2819
+ switch (tool.type) {
2820
+ case "function":
2821
+ openaiTools2.push({
2822
+ type: "function",
2823
+ name: tool.name,
2824
+ description: tool.description,
2825
+ parameters: tool.parameters,
2826
+ strict: strict ? true : void 0
2827
+ });
2828
+ break;
2829
+ case "provider-defined":
2830
+ switch (tool.id) {
2831
+ case "openai.web_search_preview":
2832
+ openaiTools2.push({
2833
+ type: "web_search_preview",
2834
+ search_context_size: tool.args.searchContextSize,
2835
+ user_location: tool.args.userLocation
2836
+ });
2837
+ break;
2838
+ default:
2839
+ toolWarnings.push({ type: "unsupported-tool", tool });
2840
+ break;
2841
+ }
2842
+ break;
2843
+ default:
2844
+ toolWarnings.push({ type: "unsupported-tool", tool });
2845
+ break;
2846
+ }
2847
+ }
2848
+ if (toolChoice == null) {
2849
+ return { tools: openaiTools2, tool_choice: void 0, toolWarnings };
2850
+ }
2851
+ const type = toolChoice.type;
2852
+ switch (type) {
2853
+ case "auto":
2854
+ case "none":
2855
+ case "required":
2856
+ return { tools: openaiTools2, tool_choice: type, toolWarnings };
2857
+ case "tool": {
2858
+ if (toolChoice.toolName === "web_search_preview") {
2859
+ return {
2860
+ tools: openaiTools2,
2861
+ tool_choice: {
2862
+ type: "web_search_preview"
2863
+ },
2864
+ toolWarnings
2865
+ };
2866
+ }
2867
+ return {
2868
+ tools: openaiTools2,
2869
+ tool_choice: {
2870
+ type: "function",
2871
+ name: toolChoice.toolName
2872
+ },
2873
+ toolWarnings
2874
+ };
2875
+ }
2876
+ default: {
2877
+ const _exhaustiveCheck = type;
2878
+ throw new UnsupportedFunctionalityError({
2879
+ functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
2880
+ });
2881
+ }
2882
+ }
2883
+ }
2884
+ var OpenAIResponsesLanguageModel = class {
2885
+ constructor(modelId, config) {
2886
+ this.specificationVersion = "v1";
2887
+ this.defaultObjectGenerationMode = "json";
2888
+ this.supportsStructuredOutputs = true;
2889
+ this.modelId = modelId;
2890
+ this.config = config;
2891
+ }
2892
+ get provider() {
2893
+ return this.config.provider;
2894
+ }
2895
+ getArgs({
2896
+ mode,
2897
+ maxTokens,
2898
+ temperature,
2899
+ stopSequences,
2900
+ topP,
2901
+ topK,
2902
+ presencePenalty,
2903
+ frequencyPenalty,
2904
+ seed,
2905
+ prompt,
2906
+ providerMetadata,
2907
+ responseFormat
2908
+ }) {
2909
+ var _a15, _b, _c;
2910
+ const warnings = [];
2911
+ const modelConfig = getResponsesModelConfig(this.modelId);
2912
+ const type = mode.type;
2913
+ if (topK != null) {
2914
+ warnings.push({
2915
+ type: "unsupported-setting",
2916
+ setting: "topK"
2917
+ });
2918
+ }
2919
+ if (seed != null) {
2920
+ warnings.push({
2921
+ type: "unsupported-setting",
2922
+ setting: "seed"
2923
+ });
2924
+ }
2925
+ if (presencePenalty != null) {
2926
+ warnings.push({
2927
+ type: "unsupported-setting",
2928
+ setting: "presencePenalty"
2929
+ });
2930
+ }
2931
+ if (frequencyPenalty != null) {
2932
+ warnings.push({
2933
+ type: "unsupported-setting",
2934
+ setting: "frequencyPenalty"
2935
+ });
2936
+ }
2937
+ if (stopSequences != null) {
2938
+ warnings.push({
2939
+ type: "unsupported-setting",
2940
+ setting: "stopSequences"
2941
+ });
2942
+ }
2943
+ const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
2944
+ prompt,
2945
+ systemMessageMode: modelConfig.systemMessageMode
2946
+ });
2947
+ warnings.push(...messageWarnings);
2948
+ const openaiOptions = parseProviderOptions({
2949
+ provider: "openai",
2950
+ providerOptions: providerMetadata,
2951
+ schema: openaiResponsesProviderOptionsSchema
2952
+ });
2953
+ const isStrict = (_a15 = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a15 : true;
2954
+ const baseArgs = {
2955
+ model: this.modelId,
2956
+ input: messages,
2957
+ temperature,
2958
+ top_p: topP,
2959
+ max_output_tokens: maxTokens,
2960
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2961
+ text: {
2962
+ format: responseFormat.schema != null ? {
2963
+ type: "json_schema",
2964
+ strict: isStrict,
2965
+ name: (_b = responseFormat.name) != null ? _b : "response",
2966
+ description: responseFormat.description,
2967
+ schema: responseFormat.schema
2968
+ } : { type: "json_object" }
2969
+ }
2970
+ },
2971
+ // provider options:
2972
+ metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
2973
+ parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
2974
+ previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
2975
+ store: openaiOptions == null ? void 0 : openaiOptions.store,
2976
+ user: openaiOptions == null ? void 0 : openaiOptions.user,
2977
+ instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2978
+ // model-specific settings:
2979
+ ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2980
+ reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
2981
+ },
2982
+ ...modelConfig.requiredAutoTruncation && {
2983
+ truncation: "auto"
2984
+ }
2985
+ };
2986
+ if (modelConfig.isReasoningModel) {
2987
+ if (baseArgs.temperature != null) {
2988
+ baseArgs.temperature = void 0;
2989
+ warnings.push({
2990
+ type: "unsupported-setting",
2991
+ setting: "temperature",
2992
+ details: "temperature is not supported for reasoning models"
2993
+ });
2994
+ }
2995
+ if (baseArgs.top_p != null) {
2996
+ baseArgs.top_p = void 0;
2997
+ warnings.push({
2998
+ type: "unsupported-setting",
2999
+ setting: "topP",
3000
+ details: "topP is not supported for reasoning models"
3001
+ });
3002
+ }
3003
+ }
3004
+ switch (type) {
3005
+ case "regular": {
3006
+ const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
3007
+ mode,
3008
+ strict: isStrict
3009
+ // TODO support provider options on tools
3010
+ });
3011
+ return {
3012
+ args: {
3013
+ ...baseArgs,
3014
+ tools,
3015
+ tool_choice
3016
+ },
3017
+ warnings: [...warnings, ...toolWarnings]
3018
+ };
3019
+ }
3020
+ case "object-json": {
3021
+ return {
3022
+ args: {
3023
+ ...baseArgs,
3024
+ text: {
3025
+ format: mode.schema != null ? {
3026
+ type: "json_schema",
3027
+ strict: isStrict,
3028
+ name: (_c = mode.name) != null ? _c : "response",
3029
+ description: mode.description,
3030
+ schema: mode.schema
3031
+ } : { type: "json_object" }
3032
+ }
3033
+ },
3034
+ warnings
3035
+ };
3036
+ }
3037
+ case "object-tool": {
3038
+ return {
3039
+ args: {
3040
+ ...baseArgs,
3041
+ tool_choice: { type: "function", name: mode.tool.name },
3042
+ tools: [
3043
+ {
3044
+ type: "function",
3045
+ name: mode.tool.name,
3046
+ description: mode.tool.description,
3047
+ parameters: mode.tool.parameters,
3048
+ strict: isStrict
3049
+ }
3050
+ ]
3051
+ },
3052
+ warnings
3053
+ };
3054
+ }
3055
+ default: {
3056
+ const _exhaustiveCheck = type;
3057
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
3058
+ }
3059
+ }
3060
+ }
3061
+ async doGenerate(options) {
3062
+ var _a15, _b, _c, _d, _e;
3063
+ const { args: body, warnings } = this.getArgs(options);
3064
+ const {
3065
+ responseHeaders,
3066
+ value: response,
3067
+ rawValue: rawResponse
3068
+ } = await postJsonToApi({
3069
+ url: this.config.url({
3070
+ path: "/responses",
3071
+ modelId: this.modelId
3072
+ }),
3073
+ headers: combineHeaders(this.config.headers(), options.headers),
3074
+ body,
3075
+ failedResponseHandler: openaiFailedResponseHandler,
3076
+ successfulResponseHandler: createJsonResponseHandler(
3077
+ zod.z.object({
3078
+ id: zod.z.string(),
3079
+ created_at: zod.z.number(),
3080
+ model: zod.z.string(),
3081
+ output: zod.z.array(
3082
+ zod.z.discriminatedUnion("type", [
3083
+ zod.z.object({
3084
+ type: zod.z.literal("message"),
3085
+ role: zod.z.literal("assistant"),
3086
+ content: zod.z.array(
3087
+ zod.z.object({
3088
+ type: zod.z.literal("output_text"),
3089
+ text: zod.z.string(),
3090
+ annotations: zod.z.array(
3091
+ zod.z.object({
3092
+ type: zod.z.literal("url_citation"),
3093
+ start_index: zod.z.number(),
3094
+ end_index: zod.z.number(),
3095
+ url: zod.z.string(),
3096
+ title: zod.z.string()
3097
+ })
3098
+ )
3099
+ })
3100
+ )
3101
+ }),
3102
+ zod.z.object({
3103
+ type: zod.z.literal("function_call"),
3104
+ call_id: zod.z.string(),
3105
+ name: zod.z.string(),
3106
+ arguments: zod.z.string()
3107
+ }),
3108
+ zod.z.object({
3109
+ type: zod.z.literal("web_search_call")
3110
+ }),
3111
+ zod.z.object({
3112
+ type: zod.z.literal("computer_call")
3113
+ }),
3114
+ zod.z.object({
3115
+ type: zod.z.literal("reasoning")
3116
+ })
3117
+ ])
3118
+ ),
3119
+ incomplete_details: zod.z.object({ reason: zod.z.string() }).nullable(),
3120
+ usage: usageSchema
3121
+ })
3122
+ ),
3123
+ abortSignal: options.abortSignal,
3124
+ fetch: this.config.fetch
3125
+ });
3126
+ const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
3127
+ const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
3128
+ toolCallType: "function",
3129
+ toolCallId: output.call_id,
3130
+ toolName: output.name,
3131
+ args: output.arguments
3132
+ }));
3133
+ return {
3134
+ text: outputTextElements.map((content) => content.text).join("\n"),
3135
+ sources: outputTextElements.flatMap(
3136
+ (content) => content.annotations.map((annotation) => {
3137
+ var _a22, _b2, _c2;
3138
+ return {
3139
+ sourceType: "url",
3140
+ id: (_c2 = (_b2 = (_a22 = this.config).generateId) == null ? void 0 : _b2.call(_a22)) != null ? _c2 : generateId(),
3141
+ url: annotation.url,
3142
+ title: annotation.title
3143
+ };
3144
+ })
3145
+ ),
3146
+ finishReason: mapOpenAIResponseFinishReason({
3147
+ finishReason: (_a15 = response.incomplete_details) == null ? void 0 : _a15.reason,
3148
+ hasToolCalls: toolCalls.length > 0
3149
+ }),
3150
+ toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
3151
+ usage: {
3152
+ promptTokens: response.usage.input_tokens,
3153
+ completionTokens: response.usage.output_tokens
3154
+ },
3155
+ rawCall: {
3156
+ rawPrompt: void 0,
3157
+ rawSettings: {}
3158
+ },
3159
+ rawResponse: {
3160
+ headers: responseHeaders,
3161
+ body: rawResponse
3162
+ },
3163
+ request: {
3164
+ body: JSON.stringify(body)
3165
+ },
3166
+ response: {
3167
+ id: response.id,
3168
+ timestamp: new Date(response.created_at * 1e3),
3169
+ modelId: response.model
3170
+ },
3171
+ providerMetadata: {
3172
+ openai: {
3173
+ responseId: response.id,
3174
+ cachedPromptTokens: (_c = (_b = response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : null,
3175
+ reasoningTokens: (_e = (_d = response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : null
3176
+ }
3177
+ },
3178
+ warnings
3179
+ };
3180
+ }
3181
+ async doStream(options) {
3182
+ const { args: body, warnings } = this.getArgs(options);
3183
+ const { responseHeaders, value: response } = await postJsonToApi({
3184
+ url: this.config.url({
3185
+ path: "/responses",
3186
+ modelId: this.modelId
3187
+ }),
3188
+ headers: combineHeaders(this.config.headers(), options.headers),
3189
+ body: {
3190
+ ...body,
3191
+ stream: true
3192
+ },
3193
+ failedResponseHandler: openaiFailedResponseHandler,
3194
+ successfulResponseHandler: createEventSourceResponseHandler(
3195
+ openaiResponsesChunkSchema
3196
+ ),
3197
+ abortSignal: options.abortSignal,
3198
+ fetch: this.config.fetch
3199
+ });
3200
+ const self = this;
3201
+ let finishReason = "unknown";
3202
+ let promptTokens = NaN;
3203
+ let completionTokens = NaN;
3204
+ let cachedPromptTokens = null;
3205
+ let reasoningTokens = null;
3206
+ let responseId = null;
3207
+ const ongoingToolCalls = {};
3208
+ let hasToolCalls = false;
3209
+ return {
3210
+ stream: response.pipeThrough(
3211
+ new TransformStream({
3212
+ transform(chunk, controller) {
3213
+ var _a15, _b, _c, _d, _e, _f, _g, _h;
3214
+ if (!chunk.success) {
3215
+ finishReason = "error";
3216
+ controller.enqueue({ type: "error", error: chunk.error });
3217
+ return;
3218
+ }
3219
+ const value = chunk.value;
3220
+ if (isResponseOutputItemAddedChunk(value)) {
3221
+ if (value.item.type === "function_call") {
3222
+ ongoingToolCalls[value.output_index] = {
3223
+ toolName: value.item.name,
3224
+ toolCallId: value.item.call_id
3225
+ };
3226
+ controller.enqueue({
3227
+ type: "tool-call-delta",
3228
+ toolCallType: "function",
3229
+ toolCallId: value.item.call_id,
3230
+ toolName: value.item.name,
3231
+ argsTextDelta: value.item.arguments
3232
+ });
3233
+ }
3234
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
3235
+ const toolCall = ongoingToolCalls[value.output_index];
3236
+ if (toolCall != null) {
3237
+ controller.enqueue({
3238
+ type: "tool-call-delta",
3239
+ toolCallType: "function",
3240
+ toolCallId: toolCall.toolCallId,
3241
+ toolName: toolCall.toolName,
3242
+ argsTextDelta: value.delta
3243
+ });
3244
+ }
3245
+ } else if (isResponseCreatedChunk(value)) {
3246
+ responseId = value.response.id;
3247
+ controller.enqueue({
3248
+ type: "response-metadata",
3249
+ id: value.response.id,
3250
+ timestamp: new Date(value.response.created_at * 1e3),
3251
+ modelId: value.response.model
3252
+ });
3253
+ } else if (isTextDeltaChunk(value)) {
3254
+ controller.enqueue({
3255
+ type: "text-delta",
3256
+ textDelta: value.delta
3257
+ });
3258
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
3259
+ ongoingToolCalls[value.output_index] = void 0;
3260
+ hasToolCalls = true;
3261
+ controller.enqueue({
3262
+ type: "tool-call",
3263
+ toolCallType: "function",
3264
+ toolCallId: value.item.call_id,
3265
+ toolName: value.item.name,
3266
+ args: value.item.arguments
3267
+ });
3268
+ } else if (isResponseFinishedChunk(value)) {
3269
+ finishReason = mapOpenAIResponseFinishReason({
3270
+ finishReason: (_a15 = value.response.incomplete_details) == null ? void 0 : _a15.reason,
3271
+ hasToolCalls
3272
+ });
3273
+ promptTokens = value.response.usage.input_tokens;
3274
+ completionTokens = value.response.usage.output_tokens;
3275
+ cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
3276
+ reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
3277
+ } else if (isResponseAnnotationAddedChunk(value)) {
3278
+ controller.enqueue({
3279
+ type: "source",
3280
+ source: {
3281
+ sourceType: "url",
3282
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId(),
3283
+ url: value.annotation.url,
3284
+ title: value.annotation.title
3285
+ }
3286
+ });
3287
+ }
3288
+ },
3289
+ flush(controller) {
3290
+ controller.enqueue({
3291
+ type: "finish",
3292
+ finishReason,
3293
+ usage: { promptTokens, completionTokens },
3294
+ ...(cachedPromptTokens != null || reasoningTokens != null) && {
3295
+ providerMetadata: {
3296
+ openai: {
3297
+ responseId,
3298
+ cachedPromptTokens,
3299
+ reasoningTokens
3300
+ }
3301
+ }
3302
+ }
3303
+ });
3304
+ }
3305
+ })
3306
+ ),
3307
+ rawCall: {
3308
+ rawPrompt: void 0,
3309
+ rawSettings: {}
3310
+ },
3311
+ rawResponse: { headers: responseHeaders },
3312
+ request: { body: JSON.stringify(body) },
3313
+ warnings
3314
+ };
3315
+ }
3316
+ };
3317
+ var usageSchema = zod.z.object({
3318
+ input_tokens: zod.z.number(),
3319
+ input_tokens_details: zod.z.object({ cached_tokens: zod.z.number().nullish() }).nullish(),
3320
+ output_tokens: zod.z.number(),
3321
+ output_tokens_details: zod.z.object({ reasoning_tokens: zod.z.number().nullish() }).nullish()
3322
+ });
3323
+ var textDeltaChunkSchema = zod.z.object({
3324
+ type: zod.z.literal("response.output_text.delta"),
3325
+ delta: zod.z.string()
3326
+ });
3327
+ var responseFinishedChunkSchema = zod.z.object({
3328
+ type: zod.z.enum(["response.completed", "response.incomplete"]),
3329
+ response: zod.z.object({
3330
+ incomplete_details: zod.z.object({ reason: zod.z.string() }).nullish(),
3331
+ usage: usageSchema
3332
+ })
3333
+ });
3334
+ var responseCreatedChunkSchema = zod.z.object({
3335
+ type: zod.z.literal("response.created"),
3336
+ response: zod.z.object({
3337
+ id: zod.z.string(),
3338
+ created_at: zod.z.number(),
3339
+ model: zod.z.string()
3340
+ })
3341
+ });
3342
+ var responseOutputItemDoneSchema = zod.z.object({
3343
+ type: zod.z.literal("response.output_item.done"),
3344
+ output_index: zod.z.number(),
3345
+ item: zod.z.discriminatedUnion("type", [
3346
+ zod.z.object({
3347
+ type: zod.z.literal("message")
3348
+ }),
3349
+ zod.z.object({
3350
+ type: zod.z.literal("function_call"),
3351
+ id: zod.z.string(),
3352
+ call_id: zod.z.string(),
3353
+ name: zod.z.string(),
3354
+ arguments: zod.z.string(),
3355
+ status: zod.z.literal("completed")
3356
+ })
3357
+ ])
3358
+ });
3359
+ var responseFunctionCallArgumentsDeltaSchema = zod.z.object({
3360
+ type: zod.z.literal("response.function_call_arguments.delta"),
3361
+ item_id: zod.z.string(),
3362
+ output_index: zod.z.number(),
3363
+ delta: zod.z.string()
3364
+ });
3365
+ var responseOutputItemAddedSchema = zod.z.object({
3366
+ type: zod.z.literal("response.output_item.added"),
3367
+ output_index: zod.z.number(),
3368
+ item: zod.z.discriminatedUnion("type", [
3369
+ zod.z.object({
3370
+ type: zod.z.literal("message")
3371
+ }),
3372
+ zod.z.object({
3373
+ type: zod.z.literal("function_call"),
3374
+ id: zod.z.string(),
3375
+ call_id: zod.z.string(),
3376
+ name: zod.z.string(),
3377
+ arguments: zod.z.string()
3378
+ })
3379
+ ])
3380
+ });
3381
+ var responseAnnotationAddedSchema = zod.z.object({
3382
+ type: zod.z.literal("response.output_text.annotation.added"),
3383
+ annotation: zod.z.object({
3384
+ type: zod.z.literal("url_citation"),
3385
+ url: zod.z.string(),
3386
+ title: zod.z.string()
3387
+ })
3388
+ });
3389
+ var openaiResponsesChunkSchema = zod.z.union([
3390
+ textDeltaChunkSchema,
3391
+ responseFinishedChunkSchema,
3392
+ responseCreatedChunkSchema,
3393
+ responseOutputItemDoneSchema,
3394
+ responseFunctionCallArgumentsDeltaSchema,
3395
+ responseOutputItemAddedSchema,
3396
+ responseAnnotationAddedSchema,
3397
+ zod.z.object({ type: zod.z.string() }).passthrough()
3398
+ // fallback for unknown chunks
3399
+ ]);
3400
+ function isTextDeltaChunk(chunk) {
3401
+ return chunk.type === "response.output_text.delta";
3402
+ }
3403
+ function isResponseOutputItemDoneChunk(chunk) {
3404
+ return chunk.type === "response.output_item.done";
3405
+ }
3406
+ function isResponseFinishedChunk(chunk) {
3407
+ return chunk.type === "response.completed" || chunk.type === "response.incomplete";
3408
+ }
3409
+ function isResponseCreatedChunk(chunk) {
3410
+ return chunk.type === "response.created";
3411
+ }
3412
+ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
3413
+ return chunk.type === "response.function_call_arguments.delta";
3414
+ }
3415
+ function isResponseOutputItemAddedChunk(chunk) {
3416
+ return chunk.type === "response.output_item.added";
3417
+ }
3418
+ function isResponseAnnotationAddedChunk(chunk) {
3419
+ return chunk.type === "response.output_text.annotation.added";
3420
+ }
3421
+ function getResponsesModelConfig(modelId) {
3422
+ if (modelId.startsWith("o")) {
3423
+ if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
3424
+ return {
3425
+ isReasoningModel: true,
3426
+ systemMessageMode: "remove",
3427
+ requiredAutoTruncation: false
3428
+ };
3429
+ }
3430
+ return {
3431
+ isReasoningModel: true,
3432
+ systemMessageMode: "developer",
3433
+ requiredAutoTruncation: false
3434
+ };
3435
+ }
3436
+ return {
3437
+ isReasoningModel: false,
3438
+ systemMessageMode: "system",
3439
+ requiredAutoTruncation: false
3440
+ };
3441
+ }
3442
+ var openaiResponsesProviderOptionsSchema = zod.z.object({
3443
+ metadata: zod.z.any().nullish(),
3444
+ parallelToolCalls: zod.z.boolean().nullish(),
3445
+ previousResponseId: zod.z.string().nullish(),
3446
+ store: zod.z.boolean().nullish(),
3447
+ user: zod.z.string().nullish(),
3448
+ reasoningEffort: zod.z.string().nullish(),
3449
+ strictSchemas: zod.z.boolean().nullish(),
3450
+ instructions: zod.z.string().nullish()
3451
+ });
3452
+ var WebSearchPreviewParameters = zod.z.object({});
3453
+ function webSearchPreviewTool({
3454
+ searchContextSize,
3455
+ userLocation
3456
+ } = {}) {
3457
+ return {
3458
+ type: "provider-defined",
3459
+ id: "openai.web_search_preview",
3460
+ args: {
3461
+ searchContextSize,
3462
+ userLocation
3463
+ },
3464
+ parameters: WebSearchPreviewParameters
3465
+ };
3466
+ }
3467
+ var openaiTools = {
3468
+ webSearchPreview: webSearchPreviewTool
3469
+ };
3470
+ var OpenAIProviderOptionsSchema = zod.z.object({
3471
+ instructions: zod.z.string().nullish(),
3472
+ speed: zod.z.number().min(0.25).max(4).default(1).nullish()
3473
+ });
3474
+ var OpenAISpeechModel = class {
3475
+ constructor(modelId, config) {
3476
+ this.modelId = modelId;
3477
+ this.config = config;
3478
+ this.specificationVersion = "v1";
3479
+ }
3480
+ get provider() {
3481
+ return this.config.provider;
3482
+ }
3483
+ getArgs({
3484
+ text,
3485
+ voice = "alloy",
3486
+ outputFormat = "mp3",
3487
+ speed,
3488
+ instructions,
3489
+ providerOptions
3490
+ }) {
3491
+ const warnings = [];
3492
+ const openAIOptions = parseProviderOptions({
3493
+ provider: "openai",
3494
+ providerOptions,
3495
+ schema: OpenAIProviderOptionsSchema
3496
+ });
3497
+ const requestBody = {
3498
+ model: this.modelId,
3499
+ input: text,
3500
+ voice,
3501
+ response_format: "mp3",
3502
+ speed,
3503
+ instructions
3504
+ };
3505
+ if (outputFormat) {
3506
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
3507
+ requestBody.response_format = outputFormat;
3508
+ } else {
3509
+ warnings.push({
3510
+ type: "unsupported-setting",
3511
+ setting: "outputFormat",
3512
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
3513
+ });
3514
+ }
3515
+ }
3516
+ if (openAIOptions) {
3517
+ const speechModelOptions = {};
3518
+ for (const key in speechModelOptions) {
3519
+ const value = speechModelOptions[key];
3520
+ if (value !== void 0) {
3521
+ requestBody[key] = value;
3522
+ }
3523
+ }
3524
+ }
3525
+ return {
3526
+ requestBody,
3527
+ warnings
3528
+ };
3529
+ }
3530
+ async doGenerate(options) {
3531
+ var _a15, _b, _c;
3532
+ const currentDate = (_c = (_b = (_a15 = this.config._internal) == null ? void 0 : _a15.currentDate) == null ? void 0 : _b.call(_a15)) != null ? _c : /* @__PURE__ */ new Date();
3533
+ const { requestBody, warnings } = this.getArgs(options);
3534
+ const {
3535
+ value: audio,
3536
+ responseHeaders,
3537
+ rawValue: rawResponse
3538
+ } = await postJsonToApi({
3539
+ url: this.config.url({
3540
+ path: "/audio/speech",
3541
+ modelId: this.modelId
3542
+ }),
3543
+ headers: combineHeaders(this.config.headers(), options.headers),
3544
+ body: requestBody,
3545
+ failedResponseHandler: openaiFailedResponseHandler,
3546
+ successfulResponseHandler: createBinaryResponseHandler(),
3547
+ abortSignal: options.abortSignal,
3548
+ fetch: this.config.fetch
3549
+ });
3550
+ return {
3551
+ audio,
3552
+ warnings,
3553
+ request: {
3554
+ body: JSON.stringify(requestBody)
3555
+ },
3556
+ response: {
3557
+ timestamp: currentDate,
3558
+ modelId: this.modelId,
3559
+ headers: responseHeaders,
3560
+ body: rawResponse
3561
+ }
3562
+ };
3563
+ }
3564
+ };
3565
+ function createOpenAI(options = {}) {
3566
+ var _a15, _b, _c;
3567
+ const baseURL = (_a15 = withoutTrailingSlash(options.baseURL)) != null ? _a15 : "https://api.openai.com/v1";
3568
+ const compatibility = (_b = options.compatibility) != null ? _b : "compatible";
3569
+ const providerName = (_c = options.name) != null ? _c : "openai";
3570
+ const getHeaders = () => ({
3571
+ Authorization: `Bearer ${loadApiKey({
3572
+ apiKey: options.apiKey,
3573
+ environmentVariableName: "OPENAI_API_KEY",
3574
+ description: "OpenAI"
3575
+ })}`,
3576
+ "OpenAI-Organization": options.organization,
3577
+ "OpenAI-Project": options.project,
3578
+ ...options.headers
3579
+ });
3580
+ const createChatModel = (modelId, settings = {}) => new OpenAIChatLanguageModel(modelId, settings, {
3581
+ provider: `${providerName}.chat`,
3582
+ url: ({ path }) => `${baseURL}${path}`,
3583
+ headers: getHeaders,
3584
+ compatibility,
3585
+ fetch: options.fetch
3586
+ });
3587
+ const createCompletionModel = (modelId, settings = {}) => new OpenAICompletionLanguageModel(modelId, settings, {
3588
+ provider: `${providerName}.completion`,
3589
+ url: ({ path }) => `${baseURL}${path}`,
3590
+ headers: getHeaders,
3591
+ compatibility,
3592
+ fetch: options.fetch
3593
+ });
3594
+ const createEmbeddingModel = (modelId, settings = {}) => new OpenAIEmbeddingModel(modelId, settings, {
3595
+ provider: `${providerName}.embedding`,
3596
+ url: ({ path }) => `${baseURL}${path}`,
3597
+ headers: getHeaders,
3598
+ fetch: options.fetch
3599
+ });
3600
+ const createImageModel = (modelId, settings = {}) => new OpenAIImageModel(modelId, settings, {
3601
+ provider: `${providerName}.image`,
3602
+ url: ({ path }) => `${baseURL}${path}`,
3603
+ headers: getHeaders,
3604
+ fetch: options.fetch
3605
+ });
3606
+ const createTranscriptionModel = (modelId) => new OpenAITranscriptionModel(modelId, {
3607
+ provider: `${providerName}.transcription`,
3608
+ url: ({ path }) => `${baseURL}${path}`,
3609
+ headers: getHeaders,
3610
+ fetch: options.fetch
3611
+ });
3612
+ const createSpeechModel = (modelId) => new OpenAISpeechModel(modelId, {
3613
+ provider: `${providerName}.speech`,
3614
+ url: ({ path }) => `${baseURL}${path}`,
3615
+ headers: getHeaders,
3616
+ fetch: options.fetch
3617
+ });
3618
+ const createLanguageModel = (modelId, settings) => {
3619
+ if (new.target) {
3620
+ throw new Error(
3621
+ "The OpenAI model function cannot be called with the new keyword."
3622
+ );
3623
+ }
3624
+ if (modelId === "gpt-3.5-turbo-instruct") {
3625
+ return createCompletionModel(
3626
+ modelId,
3627
+ settings
3628
+ );
3629
+ }
3630
+ return createChatModel(modelId, settings);
3631
+ };
3632
+ const createResponsesModel = (modelId) => {
3633
+ return new OpenAIResponsesLanguageModel(modelId, {
3634
+ provider: `${providerName}.responses`,
3635
+ url: ({ path }) => `${baseURL}${path}`,
3636
+ headers: getHeaders,
3637
+ fetch: options.fetch
3638
+ });
3639
+ };
3640
+ const provider = function(modelId, settings) {
3641
+ return createLanguageModel(modelId, settings);
3642
+ };
3643
+ provider.languageModel = createLanguageModel;
3644
+ provider.chat = createChatModel;
3645
+ provider.completion = createCompletionModel;
3646
+ provider.responses = createResponsesModel;
3647
+ provider.embedding = createEmbeddingModel;
3648
+ provider.textEmbedding = createEmbeddingModel;
3649
+ provider.textEmbeddingModel = createEmbeddingModel;
3650
+ provider.image = createImageModel;
3651
+ provider.imageModel = createImageModel;
3652
+ provider.transcription = createTranscriptionModel;
3653
+ provider.transcriptionModel = createTranscriptionModel;
3654
+ provider.speech = createSpeechModel;
3655
+ provider.speechModel = createSpeechModel;
3656
+ provider.tools = openaiTools;
3657
+ return provider;
3658
+ }
3659
+ createOpenAI({
3660
+ compatibility: "strict"
3661
+ // strict for OpenAI API
3662
+ });
3663
+
3664
+ // src/document/extractors/types.ts
3665
+ var STRIP_REGEX = /(\r\n|\n|\r)/gm;
3666
+ var openai2 = createOpenAI({ apiKey: process.env.OPENAI_API_KEY });
3667
+ var baseLLM = openai2("gpt-4o");
3668
+
3669
+ // src/document/extractors/title.ts
3670
+ var TitleExtractor = class extends llamaindex.BaseExtractor {
3671
+ /**
3672
+ * MastraLanguageModel instance.
3673
+ * @type {MastraLanguageModel}
3674
+ */
3675
+ llm;
3676
+ /**
3677
+ * Can work for mixture of text and non-text nodes
3678
+ * @type {boolean}
3679
+ * @default false
3680
+ */
3681
+ isTextNodeOnly = false;
3682
+ /**
3683
+ * Number of nodes to extrct titles from.
3684
+ * @type {number}
3685
+ * @default 5
3686
+ */
3687
+ nodes = 5;
3688
+ /**
3689
+ * The prompt template to use for the title extractor.
3690
+ * @type {string}
3691
+ */
3692
+ nodeTemplate;
3693
+ /**
3694
+ * The prompt template to merge title with..
3695
+ * @type {string}
3696
+ */
3697
+ combineTemplate;
3698
+ /**
3699
+ * Constructor for the TitleExtractor class.
3700
+ * @param {MastraLanguageModel} llm MastraLanguageModel instance.
3701
+ * @param {number} nodes Number of nodes to extract titles from.
3702
+ * @param {TitleExtractorPrompt} nodeTemplate The prompt template to use for the title extractor.
3703
+ * @param {string} combineTemplate The prompt template to merge title with..
3704
+ */
3705
+ constructor(options) {
3706
+ super();
3707
+ this.llm = options?.llm ?? baseLLM;
3708
+ this.nodes = options?.nodes ?? 5;
3709
+ this.nodeTemplate = options?.nodeTemplate ? new llamaindex.PromptTemplate({
3710
+ templateVars: ["context"],
3711
+ template: options.nodeTemplate
3712
+ }) : llamaindex.defaultTitleExtractorPromptTemplate;
3713
+ this.combineTemplate = options?.combineTemplate ? new llamaindex.PromptTemplate({
3714
+ templateVars: ["context"],
3715
+ template: options.combineTemplate
3716
+ }) : llamaindex.defaultTitleCombinePromptTemplate;
3717
+ }
3718
+ /**
3719
+ * Extract titles from a list of nodes.
3720
+ * @param {BaseNode[]} nodes Nodes to extract titles from.
3721
+ * @returns {Promise<BaseNode<ExtractTitle>[]>} Titles extracted from the nodes.
3722
+ */
3723
+ async extract(nodes) {
3724
+ const results = new Array(nodes.length);
3725
+ const nodesToExtractTitle = [];
3726
+ const nodeIndexes = [];
3727
+ nodes.forEach((node, idx) => {
3728
+ const text = node.getContent(this.metadataMode);
3729
+ if (!text || text.trim() === "") {
3730
+ results[idx] = { documentTitle: "" };
3731
+ } else {
3732
+ nodesToExtractTitle.push(node);
3733
+ nodeIndexes.push(idx);
3734
+ }
3735
+ });
3736
+ if (nodesToExtractTitle.length) {
3737
+ const filteredNodes = this.filterNodes(nodesToExtractTitle);
3738
+ if (filteredNodes.length) {
3739
+ const nodesByDocument = this.separateNodesByDocument(filteredNodes);
3740
+ const titlesByDocument = await this.extractTitles(nodesByDocument);
3741
+ filteredNodes.forEach((node, i) => {
3742
+ const nodeIndex = nodeIndexes[i];
3743
+ const groupKey = node.sourceNode?.nodeId ?? node.id_;
3744
+ if (typeof nodeIndex === "number") {
3745
+ results[nodeIndex] = {
3746
+ documentTitle: titlesByDocument[groupKey] ?? ""
3747
+ };
3748
+ }
3749
+ });
3750
+ }
3751
+ }
3752
+ return results;
3753
+ }
3754
+ filterNodes(nodes) {
3755
+ return nodes.filter((node) => {
3756
+ if (this.isTextNodeOnly && !(node instanceof llamaindex.TextNode)) {
3757
+ return false;
3758
+ }
3759
+ return true;
3760
+ });
3761
+ }
3762
+ separateNodesByDocument(nodes) {
3763
+ const nodesByDocument = {};
3764
+ for (const node of nodes) {
3765
+ const groupKey = node.sourceNode?.nodeId ?? node.id_;
3766
+ nodesByDocument[groupKey] = nodesByDocument[groupKey] || [];
3767
+ nodesByDocument[groupKey].push(node);
3768
+ }
3769
+ return nodesByDocument;
3770
+ }
3771
+ async extractTitles(nodesByDocument) {
3772
+ const titlesByDocument = {};
3773
+ for (const [key, nodes] of Object.entries(nodesByDocument)) {
3774
+ const titleCandidates = await this.getTitlesCandidates(nodes);
3775
+ const combinedTitles = titleCandidates.join(", ");
3776
+ const completion = await this.llm.doGenerate({
3777
+ inputFormat: "messages",
3778
+ mode: { type: "regular" },
3779
+ prompt: [
3780
+ {
3781
+ role: "user",
3782
+ content: [
3783
+ {
3784
+ type: "text",
3785
+ text: this.combineTemplate.format({
3786
+ context: combinedTitles
3787
+ })
3788
+ }
3789
+ ]
3790
+ }
3791
+ ]
3792
+ });
3793
+ let title = "";
3794
+ if (typeof completion.text === "string") {
3795
+ title = completion.text.trim();
3796
+ } else {
3797
+ console.warn("Title extraction LLM output was not a string:", completion.text);
3798
+ }
3799
+ titlesByDocument[key] = title;
3800
+ }
3801
+ return titlesByDocument;
3802
+ }
3803
+ async getTitlesCandidates(nodes) {
3804
+ const titleJobs = nodes.map(async (node) => {
3805
+ const completion = await this.llm.doGenerate({
3806
+ inputFormat: "messages",
3807
+ mode: { type: "regular" },
3808
+ prompt: [
3809
+ {
3810
+ role: "user",
3811
+ content: [
3812
+ {
3813
+ type: "text",
3814
+ text: this.nodeTemplate.format({
3815
+ context: node.getContent(llamaindex.MetadataMode.ALL)
3816
+ })
3817
+ }
3818
+ ]
3819
+ }
3820
+ ]
3821
+ });
3822
+ if (typeof completion.text === "string") {
3823
+ return completion.text.trim();
3824
+ } else {
3825
+ console.warn("Title candidate extraction LLM output was not a string:", completion.text);
3826
+ return "";
3827
+ }
3828
+ });
3829
+ return await Promise.all(titleJobs);
3830
+ }
3831
+ };
3832
+ var SummaryExtractor = class extends llamaindex.BaseExtractor {
3833
+ /**
3834
+ * MastraLanguageModel instance.
3835
+ * @type {MastraLanguageModel}
3836
+ */
3837
+ llm;
3838
+ /**
3839
+ * List of summaries to extract: 'self', 'prev', 'next'
3840
+ * @type {string[]}
3841
+ */
3842
+ summaries;
3843
+ /**
3844
+ * The prompt template to use for the summary extractor.
3845
+ * @type {string}
3846
+ */
3847
+ promptTemplate;
3848
+ selfSummary;
3849
+ prevSummary;
3850
+ nextSummary;
3851
+ constructor(options) {
3852
+ const summaries = options?.summaries ?? ["self"];
3853
+ if (summaries && !summaries.some((s) => ["self", "prev", "next"].includes(s)))
3854
+ throw new Error("Summaries must be one of 'self', 'prev', 'next'");
3855
+ super();
3856
+ this.llm = options?.llm ?? baseLLM;
3857
+ this.summaries = summaries;
3858
+ this.promptTemplate = options?.promptTemplate ? new llamaindex.PromptTemplate({
3859
+ templateVars: ["context"],
3860
+ template: options.promptTemplate
3861
+ }) : llamaindex.defaultSummaryPrompt;
3862
+ this.selfSummary = summaries?.includes("self") ?? false;
3863
+ this.prevSummary = summaries?.includes("prev") ?? false;
3864
+ this.nextSummary = summaries?.includes("next") ?? false;
3865
+ }
3866
+ /**
3867
+ * Extract summary from a node.
3868
+ * @param {BaseNode} node Node to extract summary from.
3869
+ * @returns {Promise<string>} Summary extracted from the node.
3870
+ */
3871
+ async generateNodeSummary(node) {
3872
+ const text = node.getContent(this.metadataMode);
3873
+ if (!text || text.trim() === "") {
3874
+ return "";
3875
+ }
3876
+ if (this.isTextNodeOnly && !(node instanceof llamaindex.TextNode)) {
3877
+ return "";
3878
+ }
3879
+ const context = node.getContent(this.metadataMode);
3880
+ const prompt = this.promptTemplate.format({
3881
+ context
3882
+ });
3883
+ const result = await this.llm.doGenerate({
3884
+ inputFormat: "messages",
3885
+ mode: { type: "regular" },
3886
+ prompt: [
3887
+ {
3888
+ role: "user",
3889
+ content: [{ type: "text", text: prompt }]
3890
+ }
3891
+ ]
3892
+ });
3893
+ let summary = "";
3894
+ if (typeof result.text === "string") {
3895
+ summary = result.text.trim();
3896
+ } else {
3897
+ console.warn("Summary extraction LLM output was not a string:", result.text);
3898
+ }
3899
+ return summary.replace(STRIP_REGEX, "");
3900
+ }
3901
+ /**
3902
+ * Extract summaries from a list of nodes.
3903
+ * @param {BaseNode[]} nodes Nodes to extract summaries from.
3904
+ * @returns {Promise<ExtractSummary[]>} Summaries extracted from the nodes.
3905
+ */
3906
+ async extract(nodes) {
3907
+ if (!nodes.every((n) => n instanceof llamaindex.TextNode)) throw new Error("Only `TextNode` is allowed for `Summary` extractor");
3908
+ const nodeSummaries = await Promise.all(nodes.map((node) => this.generateNodeSummary(node)));
3909
+ const metadataList = nodes.map(() => ({}));
3910
+ for (let i = 0; i < nodes.length; i++) {
3911
+ if (i > 0 && this.prevSummary && nodeSummaries[i - 1]) {
3912
+ metadataList[i]["prevSectionSummary"] = nodeSummaries[i - 1];
3913
+ }
3914
+ if (i < nodes.length - 1 && this.nextSummary && nodeSummaries[i + 1]) {
3915
+ metadataList[i]["nextSectionSummary"] = nodeSummaries[i + 1];
3916
+ }
3917
+ if (this.selfSummary && nodeSummaries[i]) {
3918
+ metadataList[i]["sectionSummary"] = nodeSummaries[i];
3919
+ }
3920
+ }
3921
+ return metadataList;
3922
+ }
3923
+ };
3924
+ var QuestionsAnsweredExtractor = class extends llamaindex.BaseExtractor {
3925
+ /**
3926
+ * MastraLanguageModel instance.
3927
+ * @type {MastraLanguageModel}
3928
+ */
3929
+ llm;
3930
+ /**
3931
+ * Number of questions to generate.
3932
+ * @type {number}
3933
+ * @default 5
3934
+ */
3935
+ questions = 5;
3936
+ /**
3937
+ * The prompt template to use for the question extractor.
3938
+ * @type {string}
3939
+ */
3940
+ promptTemplate;
3941
+ /**
3942
+ * Wheter to use metadata for embeddings only
3943
+ * @type {boolean}
3944
+ * @default false
3945
+ */
3946
+ embeddingOnly = false;
3947
+ /**
3948
+ * Constructor for the QuestionsAnsweredExtractor class.
3949
+ * @param {MastraLanguageModel} llm MastraLanguageModel instance.
3950
+ * @param {number} questions Number of questions to generate.
3951
+ * @param {QuestionExtractPrompt['template']} promptTemplate Optional custom prompt template (should include {context}).
3952
+ * @param {boolean} embeddingOnly Whether to use metadata for embeddings only.
3953
+ */
3954
+ constructor(options) {
3955
+ if (options?.questions && options.questions < 1) throw new Error("Questions must be greater than 0");
3956
+ super();
3957
+ this.llm = options?.llm ?? baseLLM;
3958
+ this.questions = options?.questions ?? 5;
3959
+ this.promptTemplate = options?.promptTemplate ? new llamaindex.PromptTemplate({
3960
+ templateVars: ["numQuestions", "context"],
3961
+ template: options.promptTemplate
3962
+ }).partialFormat({
3963
+ numQuestions: "5"
3964
+ }) : llamaindex.defaultQuestionExtractPrompt;
3965
+ this.embeddingOnly = options?.embeddingOnly ?? false;
3966
+ }
3967
+ /**
3968
+ * Extract answered questions from a node.
3969
+ * @param {BaseNode} node Node to extract questions from.
3970
+ * @returns {Promise<Array<ExtractQuestion> | Array<{}>>} Questions extracted from the node.
3971
+ */
3972
+ async extractQuestionsFromNode(node) {
3973
+ const text = node.getContent(this.metadataMode);
3974
+ if (!text || text.trim() === "") {
3975
+ return { questionsThisExcerptCanAnswer: "" };
3976
+ }
3977
+ if (this.isTextNodeOnly && !(node instanceof llamaindex.TextNode)) {
3978
+ return { questionsThisExcerptCanAnswer: "" };
3979
+ }
3980
+ const contextStr = node.getContent(this.metadataMode);
3981
+ const prompt = this.promptTemplate.format({
3982
+ context: contextStr,
3983
+ numQuestions: this.questions.toString()
3984
+ });
3985
+ const questions = await this.llm.doGenerate({
3986
+ inputFormat: "messages",
3987
+ mode: { type: "regular" },
3988
+ prompt: [
3989
+ {
3990
+ role: "user",
3991
+ content: [{ type: "text", text: prompt }]
3992
+ }
3993
+ ]
3994
+ });
3995
+ let result = "";
3996
+ try {
3997
+ if (typeof questions.text === "string") {
3998
+ result = questions.text.replace(STRIP_REGEX, "").trim();
3999
+ } else {
4000
+ console.warn("Question extraction LLM output was not a string:", questions.text);
4001
+ }
4002
+ } catch (err) {
4003
+ console.warn("Question extraction failed:", err);
4004
+ }
4005
+ return {
4006
+ questionsThisExcerptCanAnswer: result
4007
+ };
4008
+ }
4009
+ /**
4010
+ * Extract answered questions from a list of nodes.
4011
+ * @param {BaseNode[]} nodes Nodes to extract questions from.
4012
+ * @returns {Promise<Array<ExtractQuestion> | Array<{}>>} Questions extracted from the nodes.
4013
+ */
4014
+ async extract(nodes) {
4015
+ const results = await Promise.all(nodes.map((node) => this.extractQuestionsFromNode(node)));
4016
+ return results;
4017
+ }
4018
+ };
4019
+ var KeywordExtractor = class extends llamaindex.BaseExtractor {
4020
+ /**
4021
+ * MastraLanguageModel instance.
4022
+ * @type {MastraLanguageModel}
4023
+ */
4024
+ llm;
4025
+ /**
4026
+ * Number of keywords to extract.
4027
+ * @type {number}
4028
+ * @default 5
4029
+ */
4030
+ keywords = 5;
4031
+ /**
4032
+ * The prompt template to use for the question extractor.
4033
+ * @type {string}
4034
+ */
4035
+ promptTemplate;
4036
+ /**
4037
+ * Constructor for the KeywordExtractor class.
4038
+ * @param {MastraLanguageModel} llm MastraLanguageModel instance.
4039
+ * @param {number} keywords Number of keywords to extract.
4040
+ * @param {string} [promptTemplate] Optional custom prompt template (must include {context})
4041
+ * @throws {Error} If keywords is less than 1.
4042
+ */
4043
+ constructor(options) {
4044
+ if (options?.keywords && options.keywords < 1) throw new Error("Keywords must be greater than 0");
4045
+ super();
4046
+ this.llm = options?.llm ?? baseLLM;
4047
+ this.keywords = options?.keywords ?? 5;
4048
+ this.promptTemplate = options?.promptTemplate ? new llamaindex.PromptTemplate({
4049
+ templateVars: ["context", "maxKeywords"],
4050
+ template: options.promptTemplate
4051
+ }) : llamaindex.defaultKeywordExtractPrompt;
4052
+ }
4053
+ /**
4054
+ *
4055
+ * @param node Node to extract keywords from.
4056
+ * @returns Keywords extracted from the node.
4057
+ */
4058
+ /**
4059
+ * Extract keywords from a node. Returns an object with a comma-separated string of keywords, or an empty string if extraction fails.
4060
+ * Adds error handling for malformed/empty LLM output.
4061
+ */
4062
+ async extractKeywordsFromNodes(node) {
4063
+ const text = node.getContent(this.metadataMode);
4064
+ if (!text || text.trim() === "") {
4065
+ return { excerptKeywords: "" };
4066
+ }
4067
+ if (this.isTextNodeOnly && !(node instanceof llamaindex.TextNode)) {
4068
+ return { excerptKeywords: "" };
4069
+ }
4070
+ let keywords = "";
4071
+ try {
4072
+ const completion = await this.llm.doGenerate({
4073
+ inputFormat: "messages",
4074
+ mode: { type: "regular" },
4075
+ prompt: [
4076
+ {
4077
+ role: "user",
4078
+ content: [
4079
+ {
4080
+ type: "text",
4081
+ text: this.promptTemplate.format({
4082
+ context: node.getContent(llamaindex.MetadataMode.ALL),
4083
+ maxKeywords: this.keywords.toString()
4084
+ })
4085
+ }
4086
+ ]
4087
+ }
4088
+ ]
4089
+ });
4090
+ if (typeof completion.text === "string") {
4091
+ keywords = completion.text.trim();
4092
+ } else {
4093
+ console.warn("Keyword extraction LLM output was not a string:", completion.text);
4094
+ }
4095
+ } catch (err) {
4096
+ console.warn("Keyword extraction failed:", err);
4097
+ }
4098
+ return { excerptKeywords: keywords };
4099
+ }
4100
+ /**
4101
+ *
4102
+ * @param nodes Nodes to extract keywords from.
4103
+ * @returns Keywords extracted from the nodes.
4104
+ */
4105
+ /**
4106
+ * Extract keywords from an array of nodes. Always returns an array (may be empty).
4107
+ * @param nodes Nodes to extract keywords from.
4108
+ * @returns Array of keyword extraction results.
4109
+ */
4110
+ async extract(nodes) {
4111
+ if (!Array.isArray(nodes) || nodes.length === 0) return [];
4112
+ const results = await Promise.all(nodes.map((node) => this.extractKeywordsFromNodes(node)));
4113
+ return results;
4114
+ }
4115
+ };
12
4116
 
13
4117
  // src/document/types.ts
14
4118
  var Language = /* @__PURE__ */ ((Language2) => {
@@ -510,7 +4614,7 @@ var HTMLSectionTransformer = class {
510
4614
  headersToSplitOn;
511
4615
  options;
512
4616
  constructor(headersToSplitOn, options = {}) {
513
- this.headersToSplitOn = Object.fromEntries(headersToSplitOn.map(([tag, name]) => [tag.toLowerCase(), name]));
4617
+ this.headersToSplitOn = Object.fromEntries(headersToSplitOn.map(([tag, name14]) => [tag.toLowerCase(), name14]));
514
4618
  this.options = options;
515
4619
  }
516
4620
  splitText(text) {
@@ -1104,7 +5208,7 @@ var MarkdownHeaderTransformer = class {
1104
5208
  continue;
1105
5209
  }
1106
5210
  let headerMatched = false;
1107
- for (const [sep, name] of this.headersToSplitOn) {
5211
+ for (const [sep, name14] of this.headersToSplitOn) {
1108
5212
  if (strippedLine.startsWith(sep) && (strippedLine.length === sep.length || strippedLine[sep.length] === " ")) {
1109
5213
  headerMatched = true;
1110
5214
  if (currentContent.length > 0) {
@@ -1114,7 +5218,7 @@ var MarkdownHeaderTransformer = class {
1114
5218
  });
1115
5219
  currentContent = [];
1116
5220
  }
1117
- if (name !== null) {
5221
+ if (name14 !== null) {
1118
5222
  const currentHeaderLevel = (sep.match(/#/g) || []).length;
1119
5223
  while (headerStack.length > 0 && headerStack?.[headerStack.length - 1].level >= currentHeaderLevel) {
1120
5224
  const poppedHeader = headerStack.pop();
@@ -1124,11 +5228,11 @@ var MarkdownHeaderTransformer = class {
1124
5228
  }
1125
5229
  const header = {
1126
5230
  level: currentHeaderLevel,
1127
- name,
5231
+ name: name14,
1128
5232
  data: strippedLine.slice(sep.length).trim()
1129
5233
  };
1130
5234
  headerStack.push(header);
1131
- initialMetadata[name] = header.data;
5235
+ initialMetadata[name14] = header.data;
1132
5236
  }
1133
5237
  linesWithMetadata.push({
1134
5238
  content: line,
@@ -1295,27 +5399,27 @@ var MDocument = class _MDocument {
1295
5399
  async extractMetadata({ title, summary, questions, keywords }) {
1296
5400
  const transformations = [];
1297
5401
  if (typeof summary !== "undefined") {
1298
- transformations.push(new llamaindex.SummaryExtractor(typeof summary === "boolean" ? {} : summary));
5402
+ transformations.push(new SummaryExtractor(typeof summary === "boolean" ? {} : summary));
1299
5403
  }
1300
5404
  if (typeof questions !== "undefined") {
1301
- transformations.push(new llamaindex.QuestionsAnsweredExtractor(typeof questions === "boolean" ? {} : questions));
5405
+ transformations.push(new QuestionsAnsweredExtractor(typeof questions === "boolean" ? {} : questions));
1302
5406
  }
1303
5407
  if (typeof keywords !== "undefined") {
1304
- transformations.push(new llamaindex.KeywordExtractor(typeof keywords === "boolean" ? {} : keywords));
5408
+ transformations.push(new KeywordExtractor(typeof keywords === "boolean" ? {} : keywords));
1305
5409
  }
1306
5410
  if (typeof title !== "undefined") {
1307
- transformations.push(new llamaindex.TitleExtractor(typeof title === "boolean" ? {} : title));
5411
+ transformations.push(new TitleExtractor(typeof title === "boolean" ? {} : title));
1308
5412
  this.chunks = this.chunks.map(
1309
- (doc, i) => new llamaindex.Document({
5413
+ (doc) => doc?.metadata?.docId ? new llamaindex.Document({
1310
5414
  ...doc,
1311
5415
  relationships: {
1312
5416
  [llamaindex.NodeRelationship.SOURCE]: {
1313
- nodeId: `doc-${i}`,
5417
+ nodeId: doc.metadata.docId,
1314
5418
  nodeType: llamaindex.ObjectType.DOCUMENT,
1315
5419
  metadata: doc.metadata
1316
5420
  }
1317
5421
  }
1318
- })
5422
+ }) : doc
1319
5423
  );
1320
5424
  }
1321
5425
  const pipeline = new llamaindex.IngestionPipeline({