@mastra/rag 0.1.19-alpha.3 → 0.1.19-alpha.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,12 +1,4116 @@
1
- import { Document, SummaryExtractor, QuestionsAnsweredExtractor, KeywordExtractor, TitleExtractor, ObjectType, NodeRelationship, IngestionPipeline } from 'llamaindex';
1
+ import { Document, ObjectType, NodeRelationship, IngestionPipeline, BaseExtractor, PromptTemplate, defaultSummaryPrompt, TextNode, defaultQuestionExtractPrompt, defaultKeywordExtractPrompt, MetadataMode, defaultTitleExtractorPromptTemplate, defaultTitleCombinePromptTemplate } from 'llamaindex';
2
+ import { z } from 'zod';
2
3
  import { parse } from 'node-html-better-parser';
3
4
  import { encodingForModel, getEncoding } from 'js-tiktoken';
4
5
  import { CohereRelevanceScorer, MastraAgentRelevanceScorer } from '@mastra/core/relevance';
5
6
  import { createTool } from '@mastra/core/tools';
6
- import { z } from 'zod';
7
7
  import { embed } from 'ai';
8
8
 
9
- // src/document/document.ts
9
+ var __create = Object.create;
10
+ var __defProp = Object.defineProperty;
11
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
12
+ var __getOwnPropNames = Object.getOwnPropertyNames;
13
+ var __getProtoOf = Object.getPrototypeOf;
14
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
15
+ var __commonJS = (cb, mod) => function __require() {
16
+ return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
17
+ };
18
+ var __copyProps = (to, from, except, desc) => {
19
+ if (from && typeof from === "object" || typeof from === "function") {
20
+ for (let key of __getOwnPropNames(from))
21
+ if (!__hasOwnProp.call(to, key) && key !== except)
22
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
23
+ }
24
+ return to;
25
+ };
26
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
27
+ // If the importer is in node compatibility mode or this is not an ESM
28
+ // file that has been converted to a CommonJS file using a Babel-
29
+ // compatible transform (i.e. "__esModule" has not been set), then set
30
+ // "default" to the CommonJS "module.exports" for node compatibility.
31
+ __defProp(target, "default", { value: mod, enumerable: true }) ,
32
+ mod
33
+ ));
34
+
35
+ // ../../node_modules/.pnpm/secure-json-parse@2.7.0/node_modules/secure-json-parse/index.js
36
+ var require_secure_json_parse = __commonJS({
37
+ "../../node_modules/.pnpm/secure-json-parse@2.7.0/node_modules/secure-json-parse/index.js"(exports, module) {
38
+ var hasBuffer = typeof Buffer !== "undefined";
39
+ var suspectProtoRx = /"(?:_|\\u005[Ff])(?:_|\\u005[Ff])(?:p|\\u0070)(?:r|\\u0072)(?:o|\\u006[Ff])(?:t|\\u0074)(?:o|\\u006[Ff])(?:_|\\u005[Ff])(?:_|\\u005[Ff])"\s*:/;
40
+ var suspectConstructorRx = /"(?:c|\\u0063)(?:o|\\u006[Ff])(?:n|\\u006[Ee])(?:s|\\u0073)(?:t|\\u0074)(?:r|\\u0072)(?:u|\\u0075)(?:c|\\u0063)(?:t|\\u0074)(?:o|\\u006[Ff])(?:r|\\u0072)"\s*:/;
41
+ function _parse(text, reviver, options) {
42
+ if (options == null) {
43
+ if (reviver !== null && typeof reviver === "object") {
44
+ options = reviver;
45
+ reviver = void 0;
46
+ }
47
+ }
48
+ if (hasBuffer && Buffer.isBuffer(text)) {
49
+ text = text.toString();
50
+ }
51
+ if (text && text.charCodeAt(0) === 65279) {
52
+ text = text.slice(1);
53
+ }
54
+ const obj = JSON.parse(text, reviver);
55
+ if (obj === null || typeof obj !== "object") {
56
+ return obj;
57
+ }
58
+ const protoAction = options && options.protoAction || "error";
59
+ const constructorAction = options && options.constructorAction || "error";
60
+ if (protoAction === "ignore" && constructorAction === "ignore") {
61
+ return obj;
62
+ }
63
+ if (protoAction !== "ignore" && constructorAction !== "ignore") {
64
+ if (suspectProtoRx.test(text) === false && suspectConstructorRx.test(text) === false) {
65
+ return obj;
66
+ }
67
+ } else if (protoAction !== "ignore" && constructorAction === "ignore") {
68
+ if (suspectProtoRx.test(text) === false) {
69
+ return obj;
70
+ }
71
+ } else {
72
+ if (suspectConstructorRx.test(text) === false) {
73
+ return obj;
74
+ }
75
+ }
76
+ return filter(obj, { protoAction, constructorAction, safe: options && options.safe });
77
+ }
78
+ function filter(obj, { protoAction = "error", constructorAction = "error", safe } = {}) {
79
+ let next = [obj];
80
+ while (next.length) {
81
+ const nodes = next;
82
+ next = [];
83
+ for (const node of nodes) {
84
+ if (protoAction !== "ignore" && Object.prototype.hasOwnProperty.call(node, "__proto__")) {
85
+ if (safe === true) {
86
+ return null;
87
+ } else if (protoAction === "error") {
88
+ throw new SyntaxError("Object contains forbidden prototype property");
89
+ }
90
+ delete node.__proto__;
91
+ }
92
+ if (constructorAction !== "ignore" && Object.prototype.hasOwnProperty.call(node, "constructor") && Object.prototype.hasOwnProperty.call(node.constructor, "prototype")) {
93
+ if (safe === true) {
94
+ return null;
95
+ } else if (constructorAction === "error") {
96
+ throw new SyntaxError("Object contains forbidden prototype property");
97
+ }
98
+ delete node.constructor;
99
+ }
100
+ for (const key in node) {
101
+ const value = node[key];
102
+ if (value && typeof value === "object") {
103
+ next.push(value);
104
+ }
105
+ }
106
+ }
107
+ }
108
+ return obj;
109
+ }
110
+ function parse2(text, reviver, options) {
111
+ const stackTraceLimit = Error.stackTraceLimit;
112
+ Error.stackTraceLimit = 0;
113
+ try {
114
+ return _parse(text, reviver, options);
115
+ } finally {
116
+ Error.stackTraceLimit = stackTraceLimit;
117
+ }
118
+ }
119
+ function safeParse(text, reviver) {
120
+ const stackTraceLimit = Error.stackTraceLimit;
121
+ Error.stackTraceLimit = 0;
122
+ try {
123
+ return _parse(text, reviver, { safe: true });
124
+ } catch (_e) {
125
+ return null;
126
+ } finally {
127
+ Error.stackTraceLimit = stackTraceLimit;
128
+ }
129
+ }
130
+ module.exports = parse2;
131
+ module.exports.default = parse2;
132
+ module.exports.parse = parse2;
133
+ module.exports.safeParse = safeParse;
134
+ module.exports.scan = filter;
135
+ }
136
+ });
137
+
138
+ // ../../node_modules/.pnpm/@ai-sdk+provider@1.1.3/node_modules/@ai-sdk/provider/dist/index.mjs
139
+ var marker = "vercel.ai.error";
140
+ var symbol = Symbol.for(marker);
141
+ var _a;
142
+ var _AISDKError = class _AISDKError2 extends Error {
143
+ /**
144
+ * Creates an AI SDK Error.
145
+ *
146
+ * @param {Object} params - The parameters for creating the error.
147
+ * @param {string} params.name - The name of the error.
148
+ * @param {string} params.message - The error message.
149
+ * @param {unknown} [params.cause] - The underlying cause of the error.
150
+ */
151
+ constructor({
152
+ name: name14,
153
+ message,
154
+ cause
155
+ }) {
156
+ super(message);
157
+ this[_a] = true;
158
+ this.name = name14;
159
+ this.cause = cause;
160
+ }
161
+ /**
162
+ * Checks if the given error is an AI SDK Error.
163
+ * @param {unknown} error - The error to check.
164
+ * @returns {boolean} True if the error is an AI SDK Error, false otherwise.
165
+ */
166
+ static isInstance(error) {
167
+ return _AISDKError2.hasMarker(error, marker);
168
+ }
169
+ static hasMarker(error, marker15) {
170
+ const markerSymbol = Symbol.for(marker15);
171
+ return error != null && typeof error === "object" && markerSymbol in error && typeof error[markerSymbol] === "boolean" && error[markerSymbol] === true;
172
+ }
173
+ };
174
+ _a = symbol;
175
+ var AISDKError = _AISDKError;
176
+ var name = "AI_APICallError";
177
+ var marker2 = `vercel.ai.error.${name}`;
178
+ var symbol2 = Symbol.for(marker2);
179
+ var _a2;
180
+ var APICallError = class extends AISDKError {
181
+ constructor({
182
+ message,
183
+ url,
184
+ requestBodyValues,
185
+ statusCode,
186
+ responseHeaders,
187
+ responseBody,
188
+ cause,
189
+ isRetryable = statusCode != null && (statusCode === 408 || // request timeout
190
+ statusCode === 409 || // conflict
191
+ statusCode === 429 || // too many requests
192
+ statusCode >= 500),
193
+ // server error
194
+ data
195
+ }) {
196
+ super({ name, message, cause });
197
+ this[_a2] = true;
198
+ this.url = url;
199
+ this.requestBodyValues = requestBodyValues;
200
+ this.statusCode = statusCode;
201
+ this.responseHeaders = responseHeaders;
202
+ this.responseBody = responseBody;
203
+ this.isRetryable = isRetryable;
204
+ this.data = data;
205
+ }
206
+ static isInstance(error) {
207
+ return AISDKError.hasMarker(error, marker2);
208
+ }
209
+ };
210
+ _a2 = symbol2;
211
+ var name2 = "AI_EmptyResponseBodyError";
212
+ var marker3 = `vercel.ai.error.${name2}`;
213
+ var symbol3 = Symbol.for(marker3);
214
+ var _a3;
215
+ var EmptyResponseBodyError = class extends AISDKError {
216
+ // used in isInstance
217
+ constructor({ message = "Empty response body" } = {}) {
218
+ super({ name: name2, message });
219
+ this[_a3] = true;
220
+ }
221
+ static isInstance(error) {
222
+ return AISDKError.hasMarker(error, marker3);
223
+ }
224
+ };
225
+ _a3 = symbol3;
226
+ function getErrorMessage(error) {
227
+ if (error == null) {
228
+ return "unknown error";
229
+ }
230
+ if (typeof error === "string") {
231
+ return error;
232
+ }
233
+ if (error instanceof Error) {
234
+ return error.message;
235
+ }
236
+ return JSON.stringify(error);
237
+ }
238
+ var name3 = "AI_InvalidArgumentError";
239
+ var marker4 = `vercel.ai.error.${name3}`;
240
+ var symbol4 = Symbol.for(marker4);
241
+ var _a4;
242
+ var InvalidArgumentError = class extends AISDKError {
243
+ constructor({
244
+ message,
245
+ cause,
246
+ argument
247
+ }) {
248
+ super({ name: name3, message, cause });
249
+ this[_a4] = true;
250
+ this.argument = argument;
251
+ }
252
+ static isInstance(error) {
253
+ return AISDKError.hasMarker(error, marker4);
254
+ }
255
+ };
256
+ _a4 = symbol4;
257
+ var name4 = "AI_InvalidPromptError";
258
+ var marker5 = `vercel.ai.error.${name4}`;
259
+ var symbol5 = Symbol.for(marker5);
260
+ var _a5;
261
+ var InvalidPromptError = class extends AISDKError {
262
+ constructor({
263
+ prompt,
264
+ message,
265
+ cause
266
+ }) {
267
+ super({ name: name4, message: `Invalid prompt: ${message}`, cause });
268
+ this[_a5] = true;
269
+ this.prompt = prompt;
270
+ }
271
+ static isInstance(error) {
272
+ return AISDKError.hasMarker(error, marker5);
273
+ }
274
+ };
275
+ _a5 = symbol5;
276
+ var name5 = "AI_InvalidResponseDataError";
277
+ var marker6 = `vercel.ai.error.${name5}`;
278
+ var symbol6 = Symbol.for(marker6);
279
+ var _a6;
280
+ var InvalidResponseDataError = class extends AISDKError {
281
+ constructor({
282
+ data,
283
+ message = `Invalid response data: ${JSON.stringify(data)}.`
284
+ }) {
285
+ super({ name: name5, message });
286
+ this[_a6] = true;
287
+ this.data = data;
288
+ }
289
+ static isInstance(error) {
290
+ return AISDKError.hasMarker(error, marker6);
291
+ }
292
+ };
293
+ _a6 = symbol6;
294
+ var name6 = "AI_JSONParseError";
295
+ var marker7 = `vercel.ai.error.${name6}`;
296
+ var symbol7 = Symbol.for(marker7);
297
+ var _a7;
298
+ var JSONParseError = class extends AISDKError {
299
+ constructor({ text, cause }) {
300
+ super({
301
+ name: name6,
302
+ message: `JSON parsing failed: Text: ${text}.
303
+ Error message: ${getErrorMessage(cause)}`,
304
+ cause
305
+ });
306
+ this[_a7] = true;
307
+ this.text = text;
308
+ }
309
+ static isInstance(error) {
310
+ return AISDKError.hasMarker(error, marker7);
311
+ }
312
+ };
313
+ _a7 = symbol7;
314
+ var name7 = "AI_LoadAPIKeyError";
315
+ var marker8 = `vercel.ai.error.${name7}`;
316
+ var symbol8 = Symbol.for(marker8);
317
+ var _a8;
318
+ var LoadAPIKeyError = class extends AISDKError {
319
+ // used in isInstance
320
+ constructor({ message }) {
321
+ super({ name: name7, message });
322
+ this[_a8] = true;
323
+ }
324
+ static isInstance(error) {
325
+ return AISDKError.hasMarker(error, marker8);
326
+ }
327
+ };
328
+ _a8 = symbol8;
329
+ var name11 = "AI_TooManyEmbeddingValuesForCallError";
330
+ var marker12 = `vercel.ai.error.${name11}`;
331
+ var symbol12 = Symbol.for(marker12);
332
+ var _a12;
333
+ var TooManyEmbeddingValuesForCallError = class extends AISDKError {
334
+ constructor(options) {
335
+ super({
336
+ name: name11,
337
+ message: `Too many values for a single embedding call. The ${options.provider} model "${options.modelId}" can only embed up to ${options.maxEmbeddingsPerCall} values per call, but ${options.values.length} values were provided.`
338
+ });
339
+ this[_a12] = true;
340
+ this.provider = options.provider;
341
+ this.modelId = options.modelId;
342
+ this.maxEmbeddingsPerCall = options.maxEmbeddingsPerCall;
343
+ this.values = options.values;
344
+ }
345
+ static isInstance(error) {
346
+ return AISDKError.hasMarker(error, marker12);
347
+ }
348
+ };
349
+ _a12 = symbol12;
350
+ var name12 = "AI_TypeValidationError";
351
+ var marker13 = `vercel.ai.error.${name12}`;
352
+ var symbol13 = Symbol.for(marker13);
353
+ var _a13;
354
+ var _TypeValidationError = class _TypeValidationError2 extends AISDKError {
355
+ constructor({ value, cause }) {
356
+ super({
357
+ name: name12,
358
+ message: `Type validation failed: Value: ${JSON.stringify(value)}.
359
+ Error message: ${getErrorMessage(cause)}`,
360
+ cause
361
+ });
362
+ this[_a13] = true;
363
+ this.value = value;
364
+ }
365
+ static isInstance(error) {
366
+ return AISDKError.hasMarker(error, marker13);
367
+ }
368
+ /**
369
+ * Wraps an error into a TypeValidationError.
370
+ * If the cause is already a TypeValidationError with the same value, it returns the cause.
371
+ * Otherwise, it creates a new TypeValidationError.
372
+ *
373
+ * @param {Object} params - The parameters for wrapping the error.
374
+ * @param {unknown} params.value - The value that failed validation.
375
+ * @param {unknown} params.cause - The original error or cause of the validation failure.
376
+ * @returns {TypeValidationError} A TypeValidationError instance.
377
+ */
378
+ static wrap({
379
+ value,
380
+ cause
381
+ }) {
382
+ return _TypeValidationError2.isInstance(cause) && cause.value === value ? cause : new _TypeValidationError2({ value, cause });
383
+ }
384
+ };
385
+ _a13 = symbol13;
386
+ var TypeValidationError = _TypeValidationError;
387
+ var name13 = "AI_UnsupportedFunctionalityError";
388
+ var marker14 = `vercel.ai.error.${name13}`;
389
+ var symbol14 = Symbol.for(marker14);
390
+ var _a14;
391
+ var UnsupportedFunctionalityError = class extends AISDKError {
392
+ constructor({
393
+ functionality,
394
+ message = `'${functionality}' functionality not supported.`
395
+ }) {
396
+ super({ name: name13, message });
397
+ this[_a14] = true;
398
+ this.functionality = functionality;
399
+ }
400
+ static isInstance(error) {
401
+ return AISDKError.hasMarker(error, marker14);
402
+ }
403
+ };
404
+ _a14 = symbol14;
405
+
406
+ // ../../node_modules/.pnpm/nanoid@3.3.11/node_modules/nanoid/non-secure/index.js
407
+ var customAlphabet = (alphabet, defaultSize = 21) => {
408
+ return (size = defaultSize) => {
409
+ let id = "";
410
+ let i = size | 0;
411
+ while (i--) {
412
+ id += alphabet[Math.random() * alphabet.length | 0];
413
+ }
414
+ return id;
415
+ };
416
+ };
417
+
418
+ // ../../node_modules/.pnpm/@ai-sdk+provider-utils@2.2.7_zod@3.24.3/node_modules/@ai-sdk/provider-utils/dist/index.mjs
419
+ var import_secure_json_parse = __toESM(require_secure_json_parse());
420
+ function combineHeaders(...headers) {
421
+ return headers.reduce(
422
+ (combinedHeaders, currentHeaders) => ({
423
+ ...combinedHeaders,
424
+ ...currentHeaders != null ? currentHeaders : {}
425
+ }),
426
+ {}
427
+ );
428
+ }
429
+ function createEventSourceParserStream() {
430
+ let buffer = "";
431
+ let event = void 0;
432
+ let data = [];
433
+ let lastEventId = void 0;
434
+ let retry = void 0;
435
+ function parseLine(line, controller) {
436
+ if (line === "") {
437
+ dispatchEvent(controller);
438
+ return;
439
+ }
440
+ if (line.startsWith(":")) {
441
+ return;
442
+ }
443
+ const colonIndex = line.indexOf(":");
444
+ if (colonIndex === -1) {
445
+ handleField(line, "");
446
+ return;
447
+ }
448
+ const field = line.slice(0, colonIndex);
449
+ const valueStart = colonIndex + 1;
450
+ const value = valueStart < line.length && line[valueStart] === " " ? line.slice(valueStart + 1) : line.slice(valueStart);
451
+ handleField(field, value);
452
+ }
453
+ function dispatchEvent(controller) {
454
+ if (data.length > 0) {
455
+ controller.enqueue({
456
+ event,
457
+ data: data.join("\n"),
458
+ id: lastEventId,
459
+ retry
460
+ });
461
+ data = [];
462
+ event = void 0;
463
+ retry = void 0;
464
+ }
465
+ }
466
+ function handleField(field, value) {
467
+ switch (field) {
468
+ case "event":
469
+ event = value;
470
+ break;
471
+ case "data":
472
+ data.push(value);
473
+ break;
474
+ case "id":
475
+ lastEventId = value;
476
+ break;
477
+ case "retry":
478
+ const parsedRetry = parseInt(value, 10);
479
+ if (!isNaN(parsedRetry)) {
480
+ retry = parsedRetry;
481
+ }
482
+ break;
483
+ }
484
+ }
485
+ return new TransformStream({
486
+ transform(chunk, controller) {
487
+ const { lines, incompleteLine } = splitLines(buffer, chunk);
488
+ buffer = incompleteLine;
489
+ for (let i = 0; i < lines.length; i++) {
490
+ parseLine(lines[i], controller);
491
+ }
492
+ },
493
+ flush(controller) {
494
+ parseLine(buffer, controller);
495
+ dispatchEvent(controller);
496
+ }
497
+ });
498
+ }
499
+ function splitLines(buffer, chunk) {
500
+ const lines = [];
501
+ let currentLine = buffer;
502
+ for (let i = 0; i < chunk.length; ) {
503
+ const char = chunk[i++];
504
+ if (char === "\n") {
505
+ lines.push(currentLine);
506
+ currentLine = "";
507
+ } else if (char === "\r") {
508
+ lines.push(currentLine);
509
+ currentLine = "";
510
+ if (chunk[i + 1] === "\n") {
511
+ i++;
512
+ }
513
+ } else {
514
+ currentLine += char;
515
+ }
516
+ }
517
+ return { lines, incompleteLine: currentLine };
518
+ }
519
+ function extractResponseHeaders(response) {
520
+ const headers = {};
521
+ response.headers.forEach((value, key) => {
522
+ headers[key] = value;
523
+ });
524
+ return headers;
525
+ }
526
+ var createIdGenerator = ({
527
+ prefix,
528
+ size: defaultSize = 16,
529
+ alphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
530
+ separator = "-"
531
+ } = {}) => {
532
+ const generator = customAlphabet(alphabet, defaultSize);
533
+ if (prefix == null) {
534
+ return generator;
535
+ }
536
+ if (alphabet.includes(separator)) {
537
+ throw new InvalidArgumentError({
538
+ argument: "separator",
539
+ message: `The separator "${separator}" must not be part of the alphabet "${alphabet}".`
540
+ });
541
+ }
542
+ return (size) => `${prefix}${separator}${generator(size)}`;
543
+ };
544
+ var generateId = createIdGenerator();
545
+ function removeUndefinedEntries(record) {
546
+ return Object.fromEntries(
547
+ Object.entries(record).filter(([_key, value]) => value != null)
548
+ );
549
+ }
550
+ function isAbortError(error) {
551
+ return error instanceof Error && (error.name === "AbortError" || error.name === "TimeoutError");
552
+ }
553
+ function loadApiKey({
554
+ apiKey,
555
+ environmentVariableName,
556
+ apiKeyParameterName = "apiKey",
557
+ description
558
+ }) {
559
+ if (typeof apiKey === "string") {
560
+ return apiKey;
561
+ }
562
+ if (apiKey != null) {
563
+ throw new LoadAPIKeyError({
564
+ message: `${description} API key must be a string.`
565
+ });
566
+ }
567
+ if (typeof process === "undefined") {
568
+ throw new LoadAPIKeyError({
569
+ message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter. Environment variables is not supported in this environment.`
570
+ });
571
+ }
572
+ apiKey = process.env[environmentVariableName];
573
+ if (apiKey == null) {
574
+ throw new LoadAPIKeyError({
575
+ message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter or the ${environmentVariableName} environment variable.`
576
+ });
577
+ }
578
+ if (typeof apiKey !== "string") {
579
+ throw new LoadAPIKeyError({
580
+ message: `${description} API key must be a string. The value of the ${environmentVariableName} environment variable is not a string.`
581
+ });
582
+ }
583
+ return apiKey;
584
+ }
585
+ var validatorSymbol = Symbol.for("vercel.ai.validator");
586
+ function validator(validate) {
587
+ return { [validatorSymbol]: true, validate };
588
+ }
589
+ function isValidator(value) {
590
+ return typeof value === "object" && value !== null && validatorSymbol in value && value[validatorSymbol] === true && "validate" in value;
591
+ }
592
+ function asValidator(value) {
593
+ return isValidator(value) ? value : zodValidator(value);
594
+ }
595
+ function zodValidator(zodSchema) {
596
+ return validator((value) => {
597
+ const result = zodSchema.safeParse(value);
598
+ return result.success ? { success: true, value: result.data } : { success: false, error: result.error };
599
+ });
600
+ }
601
+ function validateTypes({
602
+ value,
603
+ schema: inputSchema
604
+ }) {
605
+ const result = safeValidateTypes({ value, schema: inputSchema });
606
+ if (!result.success) {
607
+ throw TypeValidationError.wrap({ value, cause: result.error });
608
+ }
609
+ return result.value;
610
+ }
611
+ function safeValidateTypes({
612
+ value,
613
+ schema
614
+ }) {
615
+ const validator2 = asValidator(schema);
616
+ try {
617
+ if (validator2.validate == null) {
618
+ return { success: true, value };
619
+ }
620
+ const result = validator2.validate(value);
621
+ if (result.success) {
622
+ return result;
623
+ }
624
+ return {
625
+ success: false,
626
+ error: TypeValidationError.wrap({ value, cause: result.error })
627
+ };
628
+ } catch (error) {
629
+ return {
630
+ success: false,
631
+ error: TypeValidationError.wrap({ value, cause: error })
632
+ };
633
+ }
634
+ }
635
+ function parseJSON({
636
+ text,
637
+ schema
638
+ }) {
639
+ try {
640
+ const value = import_secure_json_parse.default.parse(text);
641
+ if (schema == null) {
642
+ return value;
643
+ }
644
+ return validateTypes({ value, schema });
645
+ } catch (error) {
646
+ if (JSONParseError.isInstance(error) || TypeValidationError.isInstance(error)) {
647
+ throw error;
648
+ }
649
+ throw new JSONParseError({ text, cause: error });
650
+ }
651
+ }
652
+ function safeParseJSON({
653
+ text,
654
+ schema
655
+ }) {
656
+ try {
657
+ const value = import_secure_json_parse.default.parse(text);
658
+ if (schema == null) {
659
+ return { success: true, value, rawValue: value };
660
+ }
661
+ const validationResult = safeValidateTypes({ value, schema });
662
+ return validationResult.success ? { ...validationResult, rawValue: value } : validationResult;
663
+ } catch (error) {
664
+ return {
665
+ success: false,
666
+ error: JSONParseError.isInstance(error) ? error : new JSONParseError({ text, cause: error })
667
+ };
668
+ }
669
+ }
670
+ function isParsableJson(input) {
671
+ try {
672
+ import_secure_json_parse.default.parse(input);
673
+ return true;
674
+ } catch (e) {
675
+ return false;
676
+ }
677
+ }
678
+ function parseProviderOptions({
679
+ provider,
680
+ providerOptions,
681
+ schema
682
+ }) {
683
+ if ((providerOptions == null ? void 0 : providerOptions[provider]) == null) {
684
+ return void 0;
685
+ }
686
+ const parsedProviderOptions = safeValidateTypes({
687
+ value: providerOptions[provider],
688
+ schema
689
+ });
690
+ if (!parsedProviderOptions.success) {
691
+ throw new InvalidArgumentError({
692
+ argument: "providerOptions",
693
+ message: `invalid ${provider} provider options`,
694
+ cause: parsedProviderOptions.error
695
+ });
696
+ }
697
+ return parsedProviderOptions.value;
698
+ }
699
+ var getOriginalFetch2 = () => globalThis.fetch;
700
+ var postJsonToApi = async ({
701
+ url,
702
+ headers,
703
+ body,
704
+ failedResponseHandler,
705
+ successfulResponseHandler,
706
+ abortSignal,
707
+ fetch
708
+ }) => postToApi({
709
+ url,
710
+ headers: {
711
+ "Content-Type": "application/json",
712
+ ...headers
713
+ },
714
+ body: {
715
+ content: JSON.stringify(body),
716
+ values: body
717
+ },
718
+ failedResponseHandler,
719
+ successfulResponseHandler,
720
+ abortSignal,
721
+ fetch
722
+ });
723
+ var postFormDataToApi = async ({
724
+ url,
725
+ headers,
726
+ formData,
727
+ failedResponseHandler,
728
+ successfulResponseHandler,
729
+ abortSignal,
730
+ fetch
731
+ }) => postToApi({
732
+ url,
733
+ headers,
734
+ body: {
735
+ content: formData,
736
+ values: Object.fromEntries(formData.entries())
737
+ },
738
+ failedResponseHandler,
739
+ successfulResponseHandler,
740
+ abortSignal,
741
+ fetch
742
+ });
743
+ var postToApi = async ({
744
+ url,
745
+ headers = {},
746
+ body,
747
+ successfulResponseHandler,
748
+ failedResponseHandler,
749
+ abortSignal,
750
+ fetch = getOriginalFetch2()
751
+ }) => {
752
+ try {
753
+ const response = await fetch(url, {
754
+ method: "POST",
755
+ headers: removeUndefinedEntries(headers),
756
+ body: body.content,
757
+ signal: abortSignal
758
+ });
759
+ const responseHeaders = extractResponseHeaders(response);
760
+ if (!response.ok) {
761
+ let errorInformation;
762
+ try {
763
+ errorInformation = await failedResponseHandler({
764
+ response,
765
+ url,
766
+ requestBodyValues: body.values
767
+ });
768
+ } catch (error) {
769
+ if (isAbortError(error) || APICallError.isInstance(error)) {
770
+ throw error;
771
+ }
772
+ throw new APICallError({
773
+ message: "Failed to process error response",
774
+ cause: error,
775
+ statusCode: response.status,
776
+ url,
777
+ responseHeaders,
778
+ requestBodyValues: body.values
779
+ });
780
+ }
781
+ throw errorInformation.value;
782
+ }
783
+ try {
784
+ return await successfulResponseHandler({
785
+ response,
786
+ url,
787
+ requestBodyValues: body.values
788
+ });
789
+ } catch (error) {
790
+ if (error instanceof Error) {
791
+ if (isAbortError(error) || APICallError.isInstance(error)) {
792
+ throw error;
793
+ }
794
+ }
795
+ throw new APICallError({
796
+ message: "Failed to process successful response",
797
+ cause: error,
798
+ statusCode: response.status,
799
+ url,
800
+ responseHeaders,
801
+ requestBodyValues: body.values
802
+ });
803
+ }
804
+ } catch (error) {
805
+ if (isAbortError(error)) {
806
+ throw error;
807
+ }
808
+ if (error instanceof TypeError && error.message === "fetch failed") {
809
+ const cause = error.cause;
810
+ if (cause != null) {
811
+ throw new APICallError({
812
+ message: `Cannot connect to API: ${cause.message}`,
813
+ cause,
814
+ url,
815
+ requestBodyValues: body.values,
816
+ isRetryable: true
817
+ // retry when network error
818
+ });
819
+ }
820
+ }
821
+ throw error;
822
+ }
823
+ };
824
+ var createJsonErrorResponseHandler = ({
825
+ errorSchema,
826
+ errorToMessage,
827
+ isRetryable
828
+ }) => async ({ response, url, requestBodyValues }) => {
829
+ const responseBody = await response.text();
830
+ const responseHeaders = extractResponseHeaders(response);
831
+ if (responseBody.trim() === "") {
832
+ return {
833
+ responseHeaders,
834
+ value: new APICallError({
835
+ message: response.statusText,
836
+ url,
837
+ requestBodyValues,
838
+ statusCode: response.status,
839
+ responseHeaders,
840
+ responseBody,
841
+ isRetryable: isRetryable == null ? void 0 : isRetryable(response)
842
+ })
843
+ };
844
+ }
845
+ try {
846
+ const parsedError = parseJSON({
847
+ text: responseBody,
848
+ schema: errorSchema
849
+ });
850
+ return {
851
+ responseHeaders,
852
+ value: new APICallError({
853
+ message: errorToMessage(parsedError),
854
+ url,
855
+ requestBodyValues,
856
+ statusCode: response.status,
857
+ responseHeaders,
858
+ responseBody,
859
+ data: parsedError,
860
+ isRetryable: isRetryable == null ? void 0 : isRetryable(response, parsedError)
861
+ })
862
+ };
863
+ } catch (parseError) {
864
+ return {
865
+ responseHeaders,
866
+ value: new APICallError({
867
+ message: response.statusText,
868
+ url,
869
+ requestBodyValues,
870
+ statusCode: response.status,
871
+ responseHeaders,
872
+ responseBody,
873
+ isRetryable: isRetryable == null ? void 0 : isRetryable(response)
874
+ })
875
+ };
876
+ }
877
+ };
878
+ var createEventSourceResponseHandler = (chunkSchema) => async ({ response }) => {
879
+ const responseHeaders = extractResponseHeaders(response);
880
+ if (response.body == null) {
881
+ throw new EmptyResponseBodyError({});
882
+ }
883
+ return {
884
+ responseHeaders,
885
+ value: response.body.pipeThrough(new TextDecoderStream()).pipeThrough(createEventSourceParserStream()).pipeThrough(
886
+ new TransformStream({
887
+ transform({ data }, controller) {
888
+ if (data === "[DONE]") {
889
+ return;
890
+ }
891
+ controller.enqueue(
892
+ safeParseJSON({
893
+ text: data,
894
+ schema: chunkSchema
895
+ })
896
+ );
897
+ }
898
+ })
899
+ )
900
+ };
901
+ };
902
+ var createJsonResponseHandler = (responseSchema) => async ({ response, url, requestBodyValues }) => {
903
+ const responseBody = await response.text();
904
+ const parsedResult = safeParseJSON({
905
+ text: responseBody,
906
+ schema: responseSchema
907
+ });
908
+ const responseHeaders = extractResponseHeaders(response);
909
+ if (!parsedResult.success) {
910
+ throw new APICallError({
911
+ message: "Invalid JSON response",
912
+ cause: parsedResult.error,
913
+ statusCode: response.status,
914
+ responseHeaders,
915
+ responseBody,
916
+ url,
917
+ requestBodyValues
918
+ });
919
+ }
920
+ return {
921
+ responseHeaders,
922
+ value: parsedResult.value,
923
+ rawValue: parsedResult.rawValue
924
+ };
925
+ };
926
+ var createBinaryResponseHandler = () => async ({ response, url, requestBodyValues }) => {
927
+ const responseHeaders = extractResponseHeaders(response);
928
+ if (!response.body) {
929
+ throw new APICallError({
930
+ message: "Response body is empty",
931
+ url,
932
+ requestBodyValues,
933
+ statusCode: response.status,
934
+ responseHeaders,
935
+ responseBody: void 0
936
+ });
937
+ }
938
+ try {
939
+ const buffer = await response.arrayBuffer();
940
+ return {
941
+ responseHeaders,
942
+ value: new Uint8Array(buffer)
943
+ };
944
+ } catch (error) {
945
+ throw new APICallError({
946
+ message: "Failed to read response as array buffer",
947
+ url,
948
+ requestBodyValues,
949
+ statusCode: response.status,
950
+ responseHeaders,
951
+ responseBody: void 0,
952
+ cause: error
953
+ });
954
+ }
955
+ };
956
+ var { btoa, atob } = globalThis;
957
+ function convertBase64ToUint8Array(base64String) {
958
+ const base64Url = base64String.replace(/-/g, "+").replace(/_/g, "/");
959
+ const latin1string = atob(base64Url);
960
+ return Uint8Array.from(latin1string, (byte) => byte.codePointAt(0));
961
+ }
962
+ function convertUint8ArrayToBase64(array) {
963
+ let latin1string = "";
964
+ for (let i = 0; i < array.length; i++) {
965
+ latin1string += String.fromCodePoint(array[i]);
966
+ }
967
+ return btoa(latin1string);
968
+ }
969
+ function withoutTrailingSlash(url) {
970
+ return url == null ? void 0 : url.replace(/\/$/, "");
971
+ }
972
+ function convertToOpenAIChatMessages({
973
+ prompt,
974
+ useLegacyFunctionCalling = false,
975
+ systemMessageMode = "system"
976
+ }) {
977
+ const messages = [];
978
+ const warnings = [];
979
+ for (const { role, content } of prompt) {
980
+ switch (role) {
981
+ case "system": {
982
+ switch (systemMessageMode) {
983
+ case "system": {
984
+ messages.push({ role: "system", content });
985
+ break;
986
+ }
987
+ case "developer": {
988
+ messages.push({ role: "developer", content });
989
+ break;
990
+ }
991
+ case "remove": {
992
+ warnings.push({
993
+ type: "other",
994
+ message: "system messages are removed for this model"
995
+ });
996
+ break;
997
+ }
998
+ default: {
999
+ const _exhaustiveCheck = systemMessageMode;
1000
+ throw new Error(
1001
+ `Unsupported system message mode: ${_exhaustiveCheck}`
1002
+ );
1003
+ }
1004
+ }
1005
+ break;
1006
+ }
1007
+ case "user": {
1008
+ if (content.length === 1 && content[0].type === "text") {
1009
+ messages.push({ role: "user", content: content[0].text });
1010
+ break;
1011
+ }
1012
+ messages.push({
1013
+ role: "user",
1014
+ content: content.map((part, index) => {
1015
+ var _a15, _b, _c, _d;
1016
+ switch (part.type) {
1017
+ case "text": {
1018
+ return { type: "text", text: part.text };
1019
+ }
1020
+ case "image": {
1021
+ return {
1022
+ type: "image_url",
1023
+ image_url: {
1024
+ url: part.image instanceof URL ? part.image.toString() : `data:${(_a15 = part.mimeType) != null ? _a15 : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`,
1025
+ // OpenAI specific extension: image detail
1026
+ detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
1027
+ }
1028
+ };
1029
+ }
1030
+ case "file": {
1031
+ if (part.data instanceof URL) {
1032
+ throw new UnsupportedFunctionalityError({
1033
+ functionality: "'File content parts with URL data' functionality not supported."
1034
+ });
1035
+ }
1036
+ switch (part.mimeType) {
1037
+ case "audio/wav": {
1038
+ return {
1039
+ type: "input_audio",
1040
+ input_audio: { data: part.data, format: "wav" }
1041
+ };
1042
+ }
1043
+ case "audio/mp3":
1044
+ case "audio/mpeg": {
1045
+ return {
1046
+ type: "input_audio",
1047
+ input_audio: { data: part.data, format: "mp3" }
1048
+ };
1049
+ }
1050
+ case "application/pdf": {
1051
+ return {
1052
+ type: "file",
1053
+ file: {
1054
+ filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
1055
+ file_data: `data:application/pdf;base64,${part.data}`
1056
+ }
1057
+ };
1058
+ }
1059
+ default: {
1060
+ throw new UnsupportedFunctionalityError({
1061
+ functionality: `File content part type ${part.mimeType} in user messages`
1062
+ });
1063
+ }
1064
+ }
1065
+ }
1066
+ }
1067
+ })
1068
+ });
1069
+ break;
1070
+ }
1071
+ case "assistant": {
1072
+ let text = "";
1073
+ const toolCalls = [];
1074
+ for (const part of content) {
1075
+ switch (part.type) {
1076
+ case "text": {
1077
+ text += part.text;
1078
+ break;
1079
+ }
1080
+ case "tool-call": {
1081
+ toolCalls.push({
1082
+ id: part.toolCallId,
1083
+ type: "function",
1084
+ function: {
1085
+ name: part.toolName,
1086
+ arguments: JSON.stringify(part.args)
1087
+ }
1088
+ });
1089
+ break;
1090
+ }
1091
+ }
1092
+ }
1093
+ if (useLegacyFunctionCalling) {
1094
+ if (toolCalls.length > 1) {
1095
+ throw new UnsupportedFunctionalityError({
1096
+ functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
1097
+ });
1098
+ }
1099
+ messages.push({
1100
+ role: "assistant",
1101
+ content: text,
1102
+ function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
1103
+ });
1104
+ } else {
1105
+ messages.push({
1106
+ role: "assistant",
1107
+ content: text,
1108
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
1109
+ });
1110
+ }
1111
+ break;
1112
+ }
1113
+ case "tool": {
1114
+ for (const toolResponse of content) {
1115
+ if (useLegacyFunctionCalling) {
1116
+ messages.push({
1117
+ role: "function",
1118
+ name: toolResponse.toolName,
1119
+ content: JSON.stringify(toolResponse.result)
1120
+ });
1121
+ } else {
1122
+ messages.push({
1123
+ role: "tool",
1124
+ tool_call_id: toolResponse.toolCallId,
1125
+ content: JSON.stringify(toolResponse.result)
1126
+ });
1127
+ }
1128
+ }
1129
+ break;
1130
+ }
1131
+ default: {
1132
+ const _exhaustiveCheck = role;
1133
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1134
+ }
1135
+ }
1136
+ }
1137
+ return { messages, warnings };
1138
+ }
1139
+ function mapOpenAIChatLogProbsOutput(logprobs) {
1140
+ var _a15, _b;
1141
+ return (_b = (_a15 = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a15.map(({ token, logprob, top_logprobs }) => ({
1142
+ token,
1143
+ logprob,
1144
+ topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
1145
+ token: token2,
1146
+ logprob: logprob2
1147
+ })) : []
1148
+ }))) != null ? _b : void 0;
1149
+ }
1150
+ function mapOpenAIFinishReason(finishReason) {
1151
+ switch (finishReason) {
1152
+ case "stop":
1153
+ return "stop";
1154
+ case "length":
1155
+ return "length";
1156
+ case "content_filter":
1157
+ return "content-filter";
1158
+ case "function_call":
1159
+ case "tool_calls":
1160
+ return "tool-calls";
1161
+ default:
1162
+ return "unknown";
1163
+ }
1164
+ }
1165
+ var openaiErrorDataSchema = z.object({
1166
+ error: z.object({
1167
+ message: z.string(),
1168
+ // The additional information below is handled loosely to support
1169
+ // OpenAI-compatible providers that have slightly different error
1170
+ // responses:
1171
+ type: z.string().nullish(),
1172
+ param: z.any().nullish(),
1173
+ code: z.union([z.string(), z.number()]).nullish()
1174
+ })
1175
+ });
1176
+ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
1177
+ errorSchema: openaiErrorDataSchema,
1178
+ errorToMessage: (data) => data.error.message
1179
+ });
1180
+ function getResponseMetadata({
1181
+ id,
1182
+ model,
1183
+ created
1184
+ }) {
1185
+ return {
1186
+ id: id != null ? id : void 0,
1187
+ modelId: model != null ? model : void 0,
1188
+ timestamp: created != null ? new Date(created * 1e3) : void 0
1189
+ };
1190
+ }
1191
+ function prepareTools({
1192
+ mode,
1193
+ useLegacyFunctionCalling = false,
1194
+ structuredOutputs
1195
+ }) {
1196
+ var _a15;
1197
+ const tools = ((_a15 = mode.tools) == null ? void 0 : _a15.length) ? mode.tools : void 0;
1198
+ const toolWarnings = [];
1199
+ if (tools == null) {
1200
+ return { tools: void 0, tool_choice: void 0, toolWarnings };
1201
+ }
1202
+ const toolChoice = mode.toolChoice;
1203
+ if (useLegacyFunctionCalling) {
1204
+ const openaiFunctions = [];
1205
+ for (const tool of tools) {
1206
+ if (tool.type === "provider-defined") {
1207
+ toolWarnings.push({ type: "unsupported-tool", tool });
1208
+ } else {
1209
+ openaiFunctions.push({
1210
+ name: tool.name,
1211
+ description: tool.description,
1212
+ parameters: tool.parameters
1213
+ });
1214
+ }
1215
+ }
1216
+ if (toolChoice == null) {
1217
+ return {
1218
+ functions: openaiFunctions,
1219
+ function_call: void 0,
1220
+ toolWarnings
1221
+ };
1222
+ }
1223
+ const type2 = toolChoice.type;
1224
+ switch (type2) {
1225
+ case "auto":
1226
+ case "none":
1227
+ case void 0:
1228
+ return {
1229
+ functions: openaiFunctions,
1230
+ function_call: void 0,
1231
+ toolWarnings
1232
+ };
1233
+ case "required":
1234
+ throw new UnsupportedFunctionalityError({
1235
+ functionality: "useLegacyFunctionCalling and toolChoice: required"
1236
+ });
1237
+ default:
1238
+ return {
1239
+ functions: openaiFunctions,
1240
+ function_call: { name: toolChoice.toolName },
1241
+ toolWarnings
1242
+ };
1243
+ }
1244
+ }
1245
+ const openaiTools2 = [];
1246
+ for (const tool of tools) {
1247
+ if (tool.type === "provider-defined") {
1248
+ toolWarnings.push({ type: "unsupported-tool", tool });
1249
+ } else {
1250
+ openaiTools2.push({
1251
+ type: "function",
1252
+ function: {
1253
+ name: tool.name,
1254
+ description: tool.description,
1255
+ parameters: tool.parameters,
1256
+ strict: structuredOutputs ? true : void 0
1257
+ }
1258
+ });
1259
+ }
1260
+ }
1261
+ if (toolChoice == null) {
1262
+ return { tools: openaiTools2, tool_choice: void 0, toolWarnings };
1263
+ }
1264
+ const type = toolChoice.type;
1265
+ switch (type) {
1266
+ case "auto":
1267
+ case "none":
1268
+ case "required":
1269
+ return { tools: openaiTools2, tool_choice: type, toolWarnings };
1270
+ case "tool":
1271
+ return {
1272
+ tools: openaiTools2,
1273
+ tool_choice: {
1274
+ type: "function",
1275
+ function: {
1276
+ name: toolChoice.toolName
1277
+ }
1278
+ },
1279
+ toolWarnings
1280
+ };
1281
+ default: {
1282
+ const _exhaustiveCheck = type;
1283
+ throw new UnsupportedFunctionalityError({
1284
+ functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
1285
+ });
1286
+ }
1287
+ }
1288
+ }
1289
+ var OpenAIChatLanguageModel = class {
1290
+ constructor(modelId, settings, config) {
1291
+ this.specificationVersion = "v1";
1292
+ this.modelId = modelId;
1293
+ this.settings = settings;
1294
+ this.config = config;
1295
+ }
1296
+ get supportsStructuredOutputs() {
1297
+ var _a15;
1298
+ return (_a15 = this.settings.structuredOutputs) != null ? _a15 : isReasoningModel(this.modelId);
1299
+ }
1300
+ get defaultObjectGenerationMode() {
1301
+ if (isAudioModel(this.modelId)) {
1302
+ return "tool";
1303
+ }
1304
+ return this.supportsStructuredOutputs ? "json" : "tool";
1305
+ }
1306
+ get provider() {
1307
+ return this.config.provider;
1308
+ }
1309
+ get supportsImageUrls() {
1310
+ return !this.settings.downloadImages;
1311
+ }
1312
+ getArgs({
1313
+ mode,
1314
+ prompt,
1315
+ maxTokens,
1316
+ temperature,
1317
+ topP,
1318
+ topK,
1319
+ frequencyPenalty,
1320
+ presencePenalty,
1321
+ stopSequences,
1322
+ responseFormat,
1323
+ seed,
1324
+ providerMetadata
1325
+ }) {
1326
+ var _a15, _b, _c, _d, _e, _f, _g, _h;
1327
+ const type = mode.type;
1328
+ const warnings = [];
1329
+ if (topK != null) {
1330
+ warnings.push({
1331
+ type: "unsupported-setting",
1332
+ setting: "topK"
1333
+ });
1334
+ }
1335
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
1336
+ warnings.push({
1337
+ type: "unsupported-setting",
1338
+ setting: "responseFormat",
1339
+ details: "JSON response format schema is only supported with structuredOutputs"
1340
+ });
1341
+ }
1342
+ const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
1343
+ if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
1344
+ throw new UnsupportedFunctionalityError({
1345
+ functionality: "useLegacyFunctionCalling with parallelToolCalls"
1346
+ });
1347
+ }
1348
+ if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
1349
+ throw new UnsupportedFunctionalityError({
1350
+ functionality: "structuredOutputs with useLegacyFunctionCalling"
1351
+ });
1352
+ }
1353
+ const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
1354
+ {
1355
+ prompt,
1356
+ useLegacyFunctionCalling,
1357
+ systemMessageMode: getSystemMessageMode(this.modelId)
1358
+ }
1359
+ );
1360
+ warnings.push(...messageWarnings);
1361
+ const baseArgs = {
1362
+ // model id:
1363
+ model: this.modelId,
1364
+ // model specific settings:
1365
+ logit_bias: this.settings.logitBias,
1366
+ logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
1367
+ top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1368
+ user: this.settings.user,
1369
+ parallel_tool_calls: this.settings.parallelToolCalls,
1370
+ // standardized settings:
1371
+ max_tokens: maxTokens,
1372
+ temperature,
1373
+ top_p: topP,
1374
+ frequency_penalty: frequencyPenalty,
1375
+ presence_penalty: presencePenalty,
1376
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
1377
+ type: "json_schema",
1378
+ json_schema: {
1379
+ schema: responseFormat.schema,
1380
+ strict: true,
1381
+ name: (_a15 = responseFormat.name) != null ? _a15 : "response",
1382
+ description: responseFormat.description
1383
+ }
1384
+ } : { type: "json_object" } : void 0,
1385
+ stop: stopSequences,
1386
+ seed,
1387
+ // openai specific settings:
1388
+ // TODO remove in next major version; we auto-map maxTokens now
1389
+ max_completion_tokens: (_b = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _b.maxCompletionTokens,
1390
+ store: (_c = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _c.store,
1391
+ metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
1392
+ prediction: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.prediction,
1393
+ reasoning_effort: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
1394
+ // messages:
1395
+ messages
1396
+ };
1397
+ if (isReasoningModel(this.modelId)) {
1398
+ if (baseArgs.temperature != null) {
1399
+ baseArgs.temperature = void 0;
1400
+ warnings.push({
1401
+ type: "unsupported-setting",
1402
+ setting: "temperature",
1403
+ details: "temperature is not supported for reasoning models"
1404
+ });
1405
+ }
1406
+ if (baseArgs.top_p != null) {
1407
+ baseArgs.top_p = void 0;
1408
+ warnings.push({
1409
+ type: "unsupported-setting",
1410
+ setting: "topP",
1411
+ details: "topP is not supported for reasoning models"
1412
+ });
1413
+ }
1414
+ if (baseArgs.frequency_penalty != null) {
1415
+ baseArgs.frequency_penalty = void 0;
1416
+ warnings.push({
1417
+ type: "unsupported-setting",
1418
+ setting: "frequencyPenalty",
1419
+ details: "frequencyPenalty is not supported for reasoning models"
1420
+ });
1421
+ }
1422
+ if (baseArgs.presence_penalty != null) {
1423
+ baseArgs.presence_penalty = void 0;
1424
+ warnings.push({
1425
+ type: "unsupported-setting",
1426
+ setting: "presencePenalty",
1427
+ details: "presencePenalty is not supported for reasoning models"
1428
+ });
1429
+ }
1430
+ if (baseArgs.logit_bias != null) {
1431
+ baseArgs.logit_bias = void 0;
1432
+ warnings.push({
1433
+ type: "other",
1434
+ message: "logitBias is not supported for reasoning models"
1435
+ });
1436
+ }
1437
+ if (baseArgs.logprobs != null) {
1438
+ baseArgs.logprobs = void 0;
1439
+ warnings.push({
1440
+ type: "other",
1441
+ message: "logprobs is not supported for reasoning models"
1442
+ });
1443
+ }
1444
+ if (baseArgs.top_logprobs != null) {
1445
+ baseArgs.top_logprobs = void 0;
1446
+ warnings.push({
1447
+ type: "other",
1448
+ message: "topLogprobs is not supported for reasoning models"
1449
+ });
1450
+ }
1451
+ if (baseArgs.max_tokens != null) {
1452
+ if (baseArgs.max_completion_tokens == null) {
1453
+ baseArgs.max_completion_tokens = baseArgs.max_tokens;
1454
+ }
1455
+ baseArgs.max_tokens = void 0;
1456
+ }
1457
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
1458
+ if (baseArgs.temperature != null) {
1459
+ baseArgs.temperature = void 0;
1460
+ warnings.push({
1461
+ type: "unsupported-setting",
1462
+ setting: "temperature",
1463
+ details: "temperature is not supported for the search preview models and has been removed."
1464
+ });
1465
+ }
1466
+ }
1467
+ switch (type) {
1468
+ case "regular": {
1469
+ const { tools, tool_choice, functions, function_call, toolWarnings } = prepareTools({
1470
+ mode,
1471
+ useLegacyFunctionCalling,
1472
+ structuredOutputs: this.supportsStructuredOutputs
1473
+ });
1474
+ return {
1475
+ args: {
1476
+ ...baseArgs,
1477
+ tools,
1478
+ tool_choice,
1479
+ functions,
1480
+ function_call
1481
+ },
1482
+ warnings: [...warnings, ...toolWarnings]
1483
+ };
1484
+ }
1485
+ case "object-json": {
1486
+ return {
1487
+ args: {
1488
+ ...baseArgs,
1489
+ response_format: this.supportsStructuredOutputs && mode.schema != null ? {
1490
+ type: "json_schema",
1491
+ json_schema: {
1492
+ schema: mode.schema,
1493
+ strict: true,
1494
+ name: (_h = mode.name) != null ? _h : "response",
1495
+ description: mode.description
1496
+ }
1497
+ } : { type: "json_object" }
1498
+ },
1499
+ warnings
1500
+ };
1501
+ }
1502
+ case "object-tool": {
1503
+ return {
1504
+ args: useLegacyFunctionCalling ? {
1505
+ ...baseArgs,
1506
+ function_call: {
1507
+ name: mode.tool.name
1508
+ },
1509
+ functions: [
1510
+ {
1511
+ name: mode.tool.name,
1512
+ description: mode.tool.description,
1513
+ parameters: mode.tool.parameters
1514
+ }
1515
+ ]
1516
+ } : {
1517
+ ...baseArgs,
1518
+ tool_choice: {
1519
+ type: "function",
1520
+ function: { name: mode.tool.name }
1521
+ },
1522
+ tools: [
1523
+ {
1524
+ type: "function",
1525
+ function: {
1526
+ name: mode.tool.name,
1527
+ description: mode.tool.description,
1528
+ parameters: mode.tool.parameters,
1529
+ strict: this.supportsStructuredOutputs ? true : void 0
1530
+ }
1531
+ }
1532
+ ]
1533
+ },
1534
+ warnings
1535
+ };
1536
+ }
1537
+ default: {
1538
+ const _exhaustiveCheck = type;
1539
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
1540
+ }
1541
+ }
1542
+ }
1543
+ async doGenerate(options) {
1544
+ var _a15, _b, _c, _d, _e, _f, _g, _h;
1545
+ const { args: body, warnings } = this.getArgs(options);
1546
+ const {
1547
+ responseHeaders,
1548
+ value: response,
1549
+ rawValue: rawResponse
1550
+ } = await postJsonToApi({
1551
+ url: this.config.url({
1552
+ path: "/chat/completions",
1553
+ modelId: this.modelId
1554
+ }),
1555
+ headers: combineHeaders(this.config.headers(), options.headers),
1556
+ body,
1557
+ failedResponseHandler: openaiFailedResponseHandler,
1558
+ successfulResponseHandler: createJsonResponseHandler(
1559
+ openaiChatResponseSchema
1560
+ ),
1561
+ abortSignal: options.abortSignal,
1562
+ fetch: this.config.fetch
1563
+ });
1564
+ const { messages: rawPrompt, ...rawSettings } = body;
1565
+ const choice = response.choices[0];
1566
+ const completionTokenDetails = (_a15 = response.usage) == null ? void 0 : _a15.completion_tokens_details;
1567
+ const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
1568
+ const providerMetadata = { openai: {} };
1569
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
1570
+ providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
1571
+ }
1572
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
1573
+ providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
1574
+ }
1575
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
1576
+ providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
1577
+ }
1578
+ if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
1579
+ providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
1580
+ }
1581
+ return {
1582
+ text: (_c = choice.message.content) != null ? _c : void 0,
1583
+ toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
1584
+ {
1585
+ toolCallType: "function",
1586
+ toolCallId: generateId(),
1587
+ toolName: choice.message.function_call.name,
1588
+ args: choice.message.function_call.arguments
1589
+ }
1590
+ ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
1591
+ var _a22;
1592
+ return {
1593
+ toolCallType: "function",
1594
+ toolCallId: (_a22 = toolCall.id) != null ? _a22 : generateId(),
1595
+ toolName: toolCall.function.name,
1596
+ args: toolCall.function.arguments
1597
+ };
1598
+ }),
1599
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
1600
+ usage: {
1601
+ promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
1602
+ completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
1603
+ },
1604
+ rawCall: { rawPrompt, rawSettings },
1605
+ rawResponse: { headers: responseHeaders, body: rawResponse },
1606
+ request: { body: JSON.stringify(body) },
1607
+ response: getResponseMetadata(response),
1608
+ warnings,
1609
+ logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
1610
+ providerMetadata
1611
+ };
1612
+ }
1613
+ async doStream(options) {
1614
+ if (this.settings.simulateStreaming) {
1615
+ const result = await this.doGenerate(options);
1616
+ const simulatedStream = new ReadableStream({
1617
+ start(controller) {
1618
+ controller.enqueue({ type: "response-metadata", ...result.response });
1619
+ if (result.text) {
1620
+ controller.enqueue({
1621
+ type: "text-delta",
1622
+ textDelta: result.text
1623
+ });
1624
+ }
1625
+ if (result.toolCalls) {
1626
+ for (const toolCall of result.toolCalls) {
1627
+ controller.enqueue({
1628
+ type: "tool-call-delta",
1629
+ toolCallType: "function",
1630
+ toolCallId: toolCall.toolCallId,
1631
+ toolName: toolCall.toolName,
1632
+ argsTextDelta: toolCall.args
1633
+ });
1634
+ controller.enqueue({
1635
+ type: "tool-call",
1636
+ ...toolCall
1637
+ });
1638
+ }
1639
+ }
1640
+ controller.enqueue({
1641
+ type: "finish",
1642
+ finishReason: result.finishReason,
1643
+ usage: result.usage,
1644
+ logprobs: result.logprobs,
1645
+ providerMetadata: result.providerMetadata
1646
+ });
1647
+ controller.close();
1648
+ }
1649
+ });
1650
+ return {
1651
+ stream: simulatedStream,
1652
+ rawCall: result.rawCall,
1653
+ rawResponse: result.rawResponse,
1654
+ warnings: result.warnings
1655
+ };
1656
+ }
1657
+ const { args, warnings } = this.getArgs(options);
1658
+ const body = {
1659
+ ...args,
1660
+ stream: true,
1661
+ // only include stream_options when in strict compatibility mode:
1662
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1663
+ };
1664
+ const { responseHeaders, value: response } = await postJsonToApi({
1665
+ url: this.config.url({
1666
+ path: "/chat/completions",
1667
+ modelId: this.modelId
1668
+ }),
1669
+ headers: combineHeaders(this.config.headers(), options.headers),
1670
+ body,
1671
+ failedResponseHandler: openaiFailedResponseHandler,
1672
+ successfulResponseHandler: createEventSourceResponseHandler(
1673
+ openaiChatChunkSchema
1674
+ ),
1675
+ abortSignal: options.abortSignal,
1676
+ fetch: this.config.fetch
1677
+ });
1678
+ const { messages: rawPrompt, ...rawSettings } = args;
1679
+ const toolCalls = [];
1680
+ let finishReason = "unknown";
1681
+ let usage = {
1682
+ promptTokens: void 0,
1683
+ completionTokens: void 0
1684
+ };
1685
+ let logprobs;
1686
+ let isFirstChunk = true;
1687
+ const { useLegacyFunctionCalling } = this.settings;
1688
+ const providerMetadata = { openai: {} };
1689
+ return {
1690
+ stream: response.pipeThrough(
1691
+ new TransformStream({
1692
+ transform(chunk, controller) {
1693
+ var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
1694
+ if (!chunk.success) {
1695
+ finishReason = "error";
1696
+ controller.enqueue({ type: "error", error: chunk.error });
1697
+ return;
1698
+ }
1699
+ const value = chunk.value;
1700
+ if ("error" in value) {
1701
+ finishReason = "error";
1702
+ controller.enqueue({ type: "error", error: value.error });
1703
+ return;
1704
+ }
1705
+ if (isFirstChunk) {
1706
+ isFirstChunk = false;
1707
+ controller.enqueue({
1708
+ type: "response-metadata",
1709
+ ...getResponseMetadata(value)
1710
+ });
1711
+ }
1712
+ if (value.usage != null) {
1713
+ const {
1714
+ prompt_tokens,
1715
+ completion_tokens,
1716
+ prompt_tokens_details,
1717
+ completion_tokens_details
1718
+ } = value.usage;
1719
+ usage = {
1720
+ promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
1721
+ completionTokens: completion_tokens != null ? completion_tokens : void 0
1722
+ };
1723
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
1724
+ providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
1725
+ }
1726
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
1727
+ providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
1728
+ }
1729
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
1730
+ providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
1731
+ }
1732
+ if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
1733
+ providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
1734
+ }
1735
+ }
1736
+ const choice = value.choices[0];
1737
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
1738
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
1739
+ }
1740
+ if ((choice == null ? void 0 : choice.delta) == null) {
1741
+ return;
1742
+ }
1743
+ const delta = choice.delta;
1744
+ if (delta.content != null) {
1745
+ controller.enqueue({
1746
+ type: "text-delta",
1747
+ textDelta: delta.content
1748
+ });
1749
+ }
1750
+ const mappedLogprobs = mapOpenAIChatLogProbsOutput(
1751
+ choice == null ? void 0 : choice.logprobs
1752
+ );
1753
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1754
+ if (logprobs === void 0) logprobs = [];
1755
+ logprobs.push(...mappedLogprobs);
1756
+ }
1757
+ const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
1758
+ {
1759
+ type: "function",
1760
+ id: generateId(),
1761
+ function: delta.function_call,
1762
+ index: 0
1763
+ }
1764
+ ] : delta.tool_calls;
1765
+ if (mappedToolCalls != null) {
1766
+ for (const toolCallDelta of mappedToolCalls) {
1767
+ const index = toolCallDelta.index;
1768
+ if (toolCalls[index] == null) {
1769
+ if (toolCallDelta.type !== "function") {
1770
+ throw new InvalidResponseDataError({
1771
+ data: toolCallDelta,
1772
+ message: `Expected 'function' type.`
1773
+ });
1774
+ }
1775
+ if (toolCallDelta.id == null) {
1776
+ throw new InvalidResponseDataError({
1777
+ data: toolCallDelta,
1778
+ message: `Expected 'id' to be a string.`
1779
+ });
1780
+ }
1781
+ if (((_a15 = toolCallDelta.function) == null ? void 0 : _a15.name) == null) {
1782
+ throw new InvalidResponseDataError({
1783
+ data: toolCallDelta,
1784
+ message: `Expected 'function.name' to be a string.`
1785
+ });
1786
+ }
1787
+ toolCalls[index] = {
1788
+ id: toolCallDelta.id,
1789
+ type: "function",
1790
+ function: {
1791
+ name: toolCallDelta.function.name,
1792
+ arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
1793
+ },
1794
+ hasFinished: false
1795
+ };
1796
+ const toolCall2 = toolCalls[index];
1797
+ if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
1798
+ if (toolCall2.function.arguments.length > 0) {
1799
+ controller.enqueue({
1800
+ type: "tool-call-delta",
1801
+ toolCallType: "function",
1802
+ toolCallId: toolCall2.id,
1803
+ toolName: toolCall2.function.name,
1804
+ argsTextDelta: toolCall2.function.arguments
1805
+ });
1806
+ }
1807
+ if (isParsableJson(toolCall2.function.arguments)) {
1808
+ controller.enqueue({
1809
+ type: "tool-call",
1810
+ toolCallType: "function",
1811
+ toolCallId: (_e = toolCall2.id) != null ? _e : generateId(),
1812
+ toolName: toolCall2.function.name,
1813
+ args: toolCall2.function.arguments
1814
+ });
1815
+ toolCall2.hasFinished = true;
1816
+ }
1817
+ }
1818
+ continue;
1819
+ }
1820
+ const toolCall = toolCalls[index];
1821
+ if (toolCall.hasFinished) {
1822
+ continue;
1823
+ }
1824
+ if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
1825
+ toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
1826
+ }
1827
+ controller.enqueue({
1828
+ type: "tool-call-delta",
1829
+ toolCallType: "function",
1830
+ toolCallId: toolCall.id,
1831
+ toolName: toolCall.function.name,
1832
+ argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
1833
+ });
1834
+ if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && isParsableJson(toolCall.function.arguments)) {
1835
+ controller.enqueue({
1836
+ type: "tool-call",
1837
+ toolCallType: "function",
1838
+ toolCallId: (_l = toolCall.id) != null ? _l : generateId(),
1839
+ toolName: toolCall.function.name,
1840
+ args: toolCall.function.arguments
1841
+ });
1842
+ toolCall.hasFinished = true;
1843
+ }
1844
+ }
1845
+ }
1846
+ },
1847
+ flush(controller) {
1848
+ var _a15, _b;
1849
+ controller.enqueue({
1850
+ type: "finish",
1851
+ finishReason,
1852
+ logprobs,
1853
+ usage: {
1854
+ promptTokens: (_a15 = usage.promptTokens) != null ? _a15 : NaN,
1855
+ completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
1856
+ },
1857
+ ...providerMetadata != null ? { providerMetadata } : {}
1858
+ });
1859
+ }
1860
+ })
1861
+ ),
1862
+ rawCall: { rawPrompt, rawSettings },
1863
+ rawResponse: { headers: responseHeaders },
1864
+ request: { body: JSON.stringify(body) },
1865
+ warnings
1866
+ };
1867
+ }
1868
+ };
1869
+ var openaiTokenUsageSchema = z.object({
1870
+ prompt_tokens: z.number().nullish(),
1871
+ completion_tokens: z.number().nullish(),
1872
+ prompt_tokens_details: z.object({
1873
+ cached_tokens: z.number().nullish()
1874
+ }).nullish(),
1875
+ completion_tokens_details: z.object({
1876
+ reasoning_tokens: z.number().nullish(),
1877
+ accepted_prediction_tokens: z.number().nullish(),
1878
+ rejected_prediction_tokens: z.number().nullish()
1879
+ }).nullish()
1880
+ }).nullish();
1881
+ var openaiChatResponseSchema = z.object({
1882
+ id: z.string().nullish(),
1883
+ created: z.number().nullish(),
1884
+ model: z.string().nullish(),
1885
+ choices: z.array(
1886
+ z.object({
1887
+ message: z.object({
1888
+ role: z.literal("assistant").nullish(),
1889
+ content: z.string().nullish(),
1890
+ function_call: z.object({
1891
+ arguments: z.string(),
1892
+ name: z.string()
1893
+ }).nullish(),
1894
+ tool_calls: z.array(
1895
+ z.object({
1896
+ id: z.string().nullish(),
1897
+ type: z.literal("function"),
1898
+ function: z.object({
1899
+ name: z.string(),
1900
+ arguments: z.string()
1901
+ })
1902
+ })
1903
+ ).nullish()
1904
+ }),
1905
+ index: z.number(),
1906
+ logprobs: z.object({
1907
+ content: z.array(
1908
+ z.object({
1909
+ token: z.string(),
1910
+ logprob: z.number(),
1911
+ top_logprobs: z.array(
1912
+ z.object({
1913
+ token: z.string(),
1914
+ logprob: z.number()
1915
+ })
1916
+ )
1917
+ })
1918
+ ).nullable()
1919
+ }).nullish(),
1920
+ finish_reason: z.string().nullish()
1921
+ })
1922
+ ),
1923
+ usage: openaiTokenUsageSchema
1924
+ });
1925
+ var openaiChatChunkSchema = z.union([
1926
+ z.object({
1927
+ id: z.string().nullish(),
1928
+ created: z.number().nullish(),
1929
+ model: z.string().nullish(),
1930
+ choices: z.array(
1931
+ z.object({
1932
+ delta: z.object({
1933
+ role: z.enum(["assistant"]).nullish(),
1934
+ content: z.string().nullish(),
1935
+ function_call: z.object({
1936
+ name: z.string().optional(),
1937
+ arguments: z.string().optional()
1938
+ }).nullish(),
1939
+ tool_calls: z.array(
1940
+ z.object({
1941
+ index: z.number(),
1942
+ id: z.string().nullish(),
1943
+ type: z.literal("function").optional(),
1944
+ function: z.object({
1945
+ name: z.string().nullish(),
1946
+ arguments: z.string().nullish()
1947
+ })
1948
+ })
1949
+ ).nullish()
1950
+ }).nullish(),
1951
+ logprobs: z.object({
1952
+ content: z.array(
1953
+ z.object({
1954
+ token: z.string(),
1955
+ logprob: z.number(),
1956
+ top_logprobs: z.array(
1957
+ z.object({
1958
+ token: z.string(),
1959
+ logprob: z.number()
1960
+ })
1961
+ )
1962
+ })
1963
+ ).nullable()
1964
+ }).nullish(),
1965
+ finish_reason: z.string().nullable().optional(),
1966
+ index: z.number()
1967
+ })
1968
+ ),
1969
+ usage: openaiTokenUsageSchema
1970
+ }),
1971
+ openaiErrorDataSchema
1972
+ ]);
1973
+ function isReasoningModel(modelId) {
1974
+ return modelId.startsWith("o");
1975
+ }
1976
+ function isAudioModel(modelId) {
1977
+ return modelId.startsWith("gpt-4o-audio-preview");
1978
+ }
1979
+ function getSystemMessageMode(modelId) {
1980
+ var _a15, _b;
1981
+ if (!isReasoningModel(modelId)) {
1982
+ return "system";
1983
+ }
1984
+ return (_b = (_a15 = reasoningModels[modelId]) == null ? void 0 : _a15.systemMessageMode) != null ? _b : "developer";
1985
+ }
1986
+ var reasoningModels = {
1987
+ "o1-mini": {
1988
+ systemMessageMode: "remove"
1989
+ },
1990
+ "o1-mini-2024-09-12": {
1991
+ systemMessageMode: "remove"
1992
+ },
1993
+ "o1-preview": {
1994
+ systemMessageMode: "remove"
1995
+ },
1996
+ "o1-preview-2024-09-12": {
1997
+ systemMessageMode: "remove"
1998
+ },
1999
+ "o3-mini": {
2000
+ systemMessageMode: "developer"
2001
+ },
2002
+ "o3-mini-2025-01-31": {
2003
+ systemMessageMode: "developer"
2004
+ }
2005
+ };
2006
+ function convertToOpenAICompletionPrompt({
2007
+ prompt,
2008
+ inputFormat,
2009
+ user = "user",
2010
+ assistant = "assistant"
2011
+ }) {
2012
+ if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
2013
+ return { prompt: prompt[0].content[0].text };
2014
+ }
2015
+ let text = "";
2016
+ if (prompt[0].role === "system") {
2017
+ text += `${prompt[0].content}
2018
+
2019
+ `;
2020
+ prompt = prompt.slice(1);
2021
+ }
2022
+ for (const { role, content } of prompt) {
2023
+ switch (role) {
2024
+ case "system": {
2025
+ throw new InvalidPromptError({
2026
+ message: "Unexpected system message in prompt: ${content}",
2027
+ prompt
2028
+ });
2029
+ }
2030
+ case "user": {
2031
+ const userMessage = content.map((part) => {
2032
+ switch (part.type) {
2033
+ case "text": {
2034
+ return part.text;
2035
+ }
2036
+ case "image": {
2037
+ throw new UnsupportedFunctionalityError({
2038
+ functionality: "images"
2039
+ });
2040
+ }
2041
+ }
2042
+ }).join("");
2043
+ text += `${user}:
2044
+ ${userMessage}
2045
+
2046
+ `;
2047
+ break;
2048
+ }
2049
+ case "assistant": {
2050
+ const assistantMessage = content.map((part) => {
2051
+ switch (part.type) {
2052
+ case "text": {
2053
+ return part.text;
2054
+ }
2055
+ case "tool-call": {
2056
+ throw new UnsupportedFunctionalityError({
2057
+ functionality: "tool-call messages"
2058
+ });
2059
+ }
2060
+ }
2061
+ }).join("");
2062
+ text += `${assistant}:
2063
+ ${assistantMessage}
2064
+
2065
+ `;
2066
+ break;
2067
+ }
2068
+ case "tool": {
2069
+ throw new UnsupportedFunctionalityError({
2070
+ functionality: "tool messages"
2071
+ });
2072
+ }
2073
+ default: {
2074
+ const _exhaustiveCheck = role;
2075
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
2076
+ }
2077
+ }
2078
+ }
2079
+ text += `${assistant}:
2080
+ `;
2081
+ return {
2082
+ prompt: text,
2083
+ stopSequences: [`
2084
+ ${user}:`]
2085
+ };
2086
+ }
2087
+ function mapOpenAICompletionLogProbs(logprobs) {
2088
+ return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
2089
+ token,
2090
+ logprob: logprobs.token_logprobs[index],
2091
+ topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
2092
+ ([token2, logprob]) => ({
2093
+ token: token2,
2094
+ logprob
2095
+ })
2096
+ ) : []
2097
+ }));
2098
+ }
2099
+ var OpenAICompletionLanguageModel = class {
2100
+ constructor(modelId, settings, config) {
2101
+ this.specificationVersion = "v1";
2102
+ this.defaultObjectGenerationMode = void 0;
2103
+ this.modelId = modelId;
2104
+ this.settings = settings;
2105
+ this.config = config;
2106
+ }
2107
+ get provider() {
2108
+ return this.config.provider;
2109
+ }
2110
+ getArgs({
2111
+ mode,
2112
+ inputFormat,
2113
+ prompt,
2114
+ maxTokens,
2115
+ temperature,
2116
+ topP,
2117
+ topK,
2118
+ frequencyPenalty,
2119
+ presencePenalty,
2120
+ stopSequences: userStopSequences,
2121
+ responseFormat,
2122
+ seed
2123
+ }) {
2124
+ var _a15;
2125
+ const type = mode.type;
2126
+ const warnings = [];
2127
+ if (topK != null) {
2128
+ warnings.push({
2129
+ type: "unsupported-setting",
2130
+ setting: "topK"
2131
+ });
2132
+ }
2133
+ if (responseFormat != null && responseFormat.type !== "text") {
2134
+ warnings.push({
2135
+ type: "unsupported-setting",
2136
+ setting: "responseFormat",
2137
+ details: "JSON response format is not supported."
2138
+ });
2139
+ }
2140
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
2141
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
2142
+ const baseArgs = {
2143
+ // model id:
2144
+ model: this.modelId,
2145
+ // model specific settings:
2146
+ echo: this.settings.echo,
2147
+ logit_bias: this.settings.logitBias,
2148
+ logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
2149
+ suffix: this.settings.suffix,
2150
+ user: this.settings.user,
2151
+ // standardized settings:
2152
+ max_tokens: maxTokens,
2153
+ temperature,
2154
+ top_p: topP,
2155
+ frequency_penalty: frequencyPenalty,
2156
+ presence_penalty: presencePenalty,
2157
+ seed,
2158
+ // prompt:
2159
+ prompt: completionPrompt,
2160
+ // stop sequences:
2161
+ stop: stop.length > 0 ? stop : void 0
2162
+ };
2163
+ switch (type) {
2164
+ case "regular": {
2165
+ if ((_a15 = mode.tools) == null ? void 0 : _a15.length) {
2166
+ throw new UnsupportedFunctionalityError({
2167
+ functionality: "tools"
2168
+ });
2169
+ }
2170
+ if (mode.toolChoice) {
2171
+ throw new UnsupportedFunctionalityError({
2172
+ functionality: "toolChoice"
2173
+ });
2174
+ }
2175
+ return { args: baseArgs, warnings };
2176
+ }
2177
+ case "object-json": {
2178
+ throw new UnsupportedFunctionalityError({
2179
+ functionality: "object-json mode"
2180
+ });
2181
+ }
2182
+ case "object-tool": {
2183
+ throw new UnsupportedFunctionalityError({
2184
+ functionality: "object-tool mode"
2185
+ });
2186
+ }
2187
+ default: {
2188
+ const _exhaustiveCheck = type;
2189
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
2190
+ }
2191
+ }
2192
+ }
2193
+ async doGenerate(options) {
2194
+ const { args, warnings } = this.getArgs(options);
2195
+ const {
2196
+ responseHeaders,
2197
+ value: response,
2198
+ rawValue: rawResponse
2199
+ } = await postJsonToApi({
2200
+ url: this.config.url({
2201
+ path: "/completions",
2202
+ modelId: this.modelId
2203
+ }),
2204
+ headers: combineHeaders(this.config.headers(), options.headers),
2205
+ body: args,
2206
+ failedResponseHandler: openaiFailedResponseHandler,
2207
+ successfulResponseHandler: createJsonResponseHandler(
2208
+ openaiCompletionResponseSchema
2209
+ ),
2210
+ abortSignal: options.abortSignal,
2211
+ fetch: this.config.fetch
2212
+ });
2213
+ const { prompt: rawPrompt, ...rawSettings } = args;
2214
+ const choice = response.choices[0];
2215
+ return {
2216
+ text: choice.text,
2217
+ usage: {
2218
+ promptTokens: response.usage.prompt_tokens,
2219
+ completionTokens: response.usage.completion_tokens
2220
+ },
2221
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
2222
+ logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
2223
+ rawCall: { rawPrompt, rawSettings },
2224
+ rawResponse: { headers: responseHeaders, body: rawResponse },
2225
+ response: getResponseMetadata(response),
2226
+ warnings,
2227
+ request: { body: JSON.stringify(args) }
2228
+ };
2229
+ }
2230
+ async doStream(options) {
2231
+ const { args, warnings } = this.getArgs(options);
2232
+ const body = {
2233
+ ...args,
2234
+ stream: true,
2235
+ // only include stream_options when in strict compatibility mode:
2236
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
2237
+ };
2238
+ const { responseHeaders, value: response } = await postJsonToApi({
2239
+ url: this.config.url({
2240
+ path: "/completions",
2241
+ modelId: this.modelId
2242
+ }),
2243
+ headers: combineHeaders(this.config.headers(), options.headers),
2244
+ body,
2245
+ failedResponseHandler: openaiFailedResponseHandler,
2246
+ successfulResponseHandler: createEventSourceResponseHandler(
2247
+ openaiCompletionChunkSchema
2248
+ ),
2249
+ abortSignal: options.abortSignal,
2250
+ fetch: this.config.fetch
2251
+ });
2252
+ const { prompt: rawPrompt, ...rawSettings } = args;
2253
+ let finishReason = "unknown";
2254
+ let usage = {
2255
+ promptTokens: Number.NaN,
2256
+ completionTokens: Number.NaN
2257
+ };
2258
+ let logprobs;
2259
+ let isFirstChunk = true;
2260
+ return {
2261
+ stream: response.pipeThrough(
2262
+ new TransformStream({
2263
+ transform(chunk, controller) {
2264
+ if (!chunk.success) {
2265
+ finishReason = "error";
2266
+ controller.enqueue({ type: "error", error: chunk.error });
2267
+ return;
2268
+ }
2269
+ const value = chunk.value;
2270
+ if ("error" in value) {
2271
+ finishReason = "error";
2272
+ controller.enqueue({ type: "error", error: value.error });
2273
+ return;
2274
+ }
2275
+ if (isFirstChunk) {
2276
+ isFirstChunk = false;
2277
+ controller.enqueue({
2278
+ type: "response-metadata",
2279
+ ...getResponseMetadata(value)
2280
+ });
2281
+ }
2282
+ if (value.usage != null) {
2283
+ usage = {
2284
+ promptTokens: value.usage.prompt_tokens,
2285
+ completionTokens: value.usage.completion_tokens
2286
+ };
2287
+ }
2288
+ const choice = value.choices[0];
2289
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
2290
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
2291
+ }
2292
+ if ((choice == null ? void 0 : choice.text) != null) {
2293
+ controller.enqueue({
2294
+ type: "text-delta",
2295
+ textDelta: choice.text
2296
+ });
2297
+ }
2298
+ const mappedLogprobs = mapOpenAICompletionLogProbs(
2299
+ choice == null ? void 0 : choice.logprobs
2300
+ );
2301
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
2302
+ if (logprobs === void 0) logprobs = [];
2303
+ logprobs.push(...mappedLogprobs);
2304
+ }
2305
+ },
2306
+ flush(controller) {
2307
+ controller.enqueue({
2308
+ type: "finish",
2309
+ finishReason,
2310
+ logprobs,
2311
+ usage
2312
+ });
2313
+ }
2314
+ })
2315
+ ),
2316
+ rawCall: { rawPrompt, rawSettings },
2317
+ rawResponse: { headers: responseHeaders },
2318
+ warnings,
2319
+ request: { body: JSON.stringify(body) }
2320
+ };
2321
+ }
2322
+ };
2323
+ var openaiCompletionResponseSchema = z.object({
2324
+ id: z.string().nullish(),
2325
+ created: z.number().nullish(),
2326
+ model: z.string().nullish(),
2327
+ choices: z.array(
2328
+ z.object({
2329
+ text: z.string(),
2330
+ finish_reason: z.string(),
2331
+ logprobs: z.object({
2332
+ tokens: z.array(z.string()),
2333
+ token_logprobs: z.array(z.number()),
2334
+ top_logprobs: z.array(z.record(z.string(), z.number())).nullable()
2335
+ }).nullish()
2336
+ })
2337
+ ),
2338
+ usage: z.object({
2339
+ prompt_tokens: z.number(),
2340
+ completion_tokens: z.number()
2341
+ })
2342
+ });
2343
+ var openaiCompletionChunkSchema = z.union([
2344
+ z.object({
2345
+ id: z.string().nullish(),
2346
+ created: z.number().nullish(),
2347
+ model: z.string().nullish(),
2348
+ choices: z.array(
2349
+ z.object({
2350
+ text: z.string(),
2351
+ finish_reason: z.string().nullish(),
2352
+ index: z.number(),
2353
+ logprobs: z.object({
2354
+ tokens: z.array(z.string()),
2355
+ token_logprobs: z.array(z.number()),
2356
+ top_logprobs: z.array(z.record(z.string(), z.number())).nullable()
2357
+ }).nullish()
2358
+ })
2359
+ ),
2360
+ usage: z.object({
2361
+ prompt_tokens: z.number(),
2362
+ completion_tokens: z.number()
2363
+ }).nullish()
2364
+ }),
2365
+ openaiErrorDataSchema
2366
+ ]);
2367
+ var OpenAIEmbeddingModel = class {
2368
+ constructor(modelId, settings, config) {
2369
+ this.specificationVersion = "v1";
2370
+ this.modelId = modelId;
2371
+ this.settings = settings;
2372
+ this.config = config;
2373
+ }
2374
+ get provider() {
2375
+ return this.config.provider;
2376
+ }
2377
+ get maxEmbeddingsPerCall() {
2378
+ var _a15;
2379
+ return (_a15 = this.settings.maxEmbeddingsPerCall) != null ? _a15 : 2048;
2380
+ }
2381
+ get supportsParallelCalls() {
2382
+ var _a15;
2383
+ return (_a15 = this.settings.supportsParallelCalls) != null ? _a15 : true;
2384
+ }
2385
+ async doEmbed({
2386
+ values,
2387
+ headers,
2388
+ abortSignal
2389
+ }) {
2390
+ if (values.length > this.maxEmbeddingsPerCall) {
2391
+ throw new TooManyEmbeddingValuesForCallError({
2392
+ provider: this.provider,
2393
+ modelId: this.modelId,
2394
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
2395
+ values
2396
+ });
2397
+ }
2398
+ const { responseHeaders, value: response } = await postJsonToApi({
2399
+ url: this.config.url({
2400
+ path: "/embeddings",
2401
+ modelId: this.modelId
2402
+ }),
2403
+ headers: combineHeaders(this.config.headers(), headers),
2404
+ body: {
2405
+ model: this.modelId,
2406
+ input: values,
2407
+ encoding_format: "float",
2408
+ dimensions: this.settings.dimensions,
2409
+ user: this.settings.user
2410
+ },
2411
+ failedResponseHandler: openaiFailedResponseHandler,
2412
+ successfulResponseHandler: createJsonResponseHandler(
2413
+ openaiTextEmbeddingResponseSchema
2414
+ ),
2415
+ abortSignal,
2416
+ fetch: this.config.fetch
2417
+ });
2418
+ return {
2419
+ embeddings: response.data.map((item) => item.embedding),
2420
+ usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
2421
+ rawResponse: { headers: responseHeaders }
2422
+ };
2423
+ }
2424
+ };
2425
+ var openaiTextEmbeddingResponseSchema = z.object({
2426
+ data: z.array(z.object({ embedding: z.array(z.number()) })),
2427
+ usage: z.object({ prompt_tokens: z.number() }).nullish()
2428
+ });
2429
+ var modelMaxImagesPerCall = {
2430
+ "dall-e-3": 1,
2431
+ "dall-e-2": 10
2432
+ };
2433
+ var OpenAIImageModel = class {
2434
+ constructor(modelId, settings, config) {
2435
+ this.modelId = modelId;
2436
+ this.settings = settings;
2437
+ this.config = config;
2438
+ this.specificationVersion = "v1";
2439
+ }
2440
+ get maxImagesPerCall() {
2441
+ var _a15, _b;
2442
+ return (_b = (_a15 = this.settings.maxImagesPerCall) != null ? _a15 : modelMaxImagesPerCall[this.modelId]) != null ? _b : 1;
2443
+ }
2444
+ get provider() {
2445
+ return this.config.provider;
2446
+ }
2447
+ async doGenerate({
2448
+ prompt,
2449
+ n,
2450
+ size,
2451
+ aspectRatio,
2452
+ seed,
2453
+ providerOptions,
2454
+ headers,
2455
+ abortSignal
2456
+ }) {
2457
+ var _a15, _b, _c, _d;
2458
+ const warnings = [];
2459
+ if (aspectRatio != null) {
2460
+ warnings.push({
2461
+ type: "unsupported-setting",
2462
+ setting: "aspectRatio",
2463
+ details: "This model does not support aspect ratio. Use `size` instead."
2464
+ });
2465
+ }
2466
+ if (seed != null) {
2467
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
2468
+ }
2469
+ const currentDate = (_c = (_b = (_a15 = this.config._internal) == null ? void 0 : _a15.currentDate) == null ? void 0 : _b.call(_a15)) != null ? _c : /* @__PURE__ */ new Date();
2470
+ const { value: response, responseHeaders } = await postJsonToApi({
2471
+ url: this.config.url({
2472
+ path: "/images/generations",
2473
+ modelId: this.modelId
2474
+ }),
2475
+ headers: combineHeaders(this.config.headers(), headers),
2476
+ body: {
2477
+ model: this.modelId,
2478
+ prompt,
2479
+ n,
2480
+ size,
2481
+ ...(_d = providerOptions.openai) != null ? _d : {},
2482
+ response_format: "b64_json"
2483
+ },
2484
+ failedResponseHandler: openaiFailedResponseHandler,
2485
+ successfulResponseHandler: createJsonResponseHandler(
2486
+ openaiImageResponseSchema
2487
+ ),
2488
+ abortSignal,
2489
+ fetch: this.config.fetch
2490
+ });
2491
+ return {
2492
+ images: response.data.map((item) => item.b64_json),
2493
+ warnings,
2494
+ response: {
2495
+ timestamp: currentDate,
2496
+ modelId: this.modelId,
2497
+ headers: responseHeaders
2498
+ }
2499
+ };
2500
+ }
2501
+ };
2502
+ var openaiImageResponseSchema = z.object({
2503
+ data: z.array(z.object({ b64_json: z.string() }))
2504
+ });
2505
+ var openAIProviderOptionsSchema = z.object({
2506
+ include: z.array(z.string()).nullish(),
2507
+ language: z.string().nullish(),
2508
+ prompt: z.string().nullish(),
2509
+ temperature: z.number().min(0).max(1).nullish().default(0),
2510
+ timestampGranularities: z.array(z.enum(["word", "segment"])).nullish().default(["segment"])
2511
+ });
2512
+ var languageMap = {
2513
+ afrikaans: "af",
2514
+ arabic: "ar",
2515
+ armenian: "hy",
2516
+ azerbaijani: "az",
2517
+ belarusian: "be",
2518
+ bosnian: "bs",
2519
+ bulgarian: "bg",
2520
+ catalan: "ca",
2521
+ chinese: "zh",
2522
+ croatian: "hr",
2523
+ czech: "cs",
2524
+ danish: "da",
2525
+ dutch: "nl",
2526
+ english: "en",
2527
+ estonian: "et",
2528
+ finnish: "fi",
2529
+ french: "fr",
2530
+ galician: "gl",
2531
+ german: "de",
2532
+ greek: "el",
2533
+ hebrew: "he",
2534
+ hindi: "hi",
2535
+ hungarian: "hu",
2536
+ icelandic: "is",
2537
+ indonesian: "id",
2538
+ italian: "it",
2539
+ japanese: "ja",
2540
+ kannada: "kn",
2541
+ kazakh: "kk",
2542
+ korean: "ko",
2543
+ latvian: "lv",
2544
+ lithuanian: "lt",
2545
+ macedonian: "mk",
2546
+ malay: "ms",
2547
+ marathi: "mr",
2548
+ maori: "mi",
2549
+ nepali: "ne",
2550
+ norwegian: "no",
2551
+ persian: "fa",
2552
+ polish: "pl",
2553
+ portuguese: "pt",
2554
+ romanian: "ro",
2555
+ russian: "ru",
2556
+ serbian: "sr",
2557
+ slovak: "sk",
2558
+ slovenian: "sl",
2559
+ spanish: "es",
2560
+ swahili: "sw",
2561
+ swedish: "sv",
2562
+ tagalog: "tl",
2563
+ tamil: "ta",
2564
+ thai: "th",
2565
+ turkish: "tr",
2566
+ ukrainian: "uk",
2567
+ urdu: "ur",
2568
+ vietnamese: "vi",
2569
+ welsh: "cy"
2570
+ };
2571
+ var OpenAITranscriptionModel = class {
2572
+ constructor(modelId, config) {
2573
+ this.modelId = modelId;
2574
+ this.config = config;
2575
+ this.specificationVersion = "v1";
2576
+ }
2577
+ get provider() {
2578
+ return this.config.provider;
2579
+ }
2580
+ getArgs({
2581
+ audio,
2582
+ mediaType,
2583
+ providerOptions
2584
+ }) {
2585
+ var _a15, _b, _c, _d, _e;
2586
+ const warnings = [];
2587
+ const openAIOptions = parseProviderOptions({
2588
+ provider: "openai",
2589
+ providerOptions,
2590
+ schema: openAIProviderOptionsSchema
2591
+ });
2592
+ const formData = new FormData();
2593
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
2594
+ formData.append("model", this.modelId);
2595
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
2596
+ if (openAIOptions) {
2597
+ const transcriptionModelOptions = {
2598
+ include: (_a15 = openAIOptions.include) != null ? _a15 : void 0,
2599
+ language: (_b = openAIOptions.language) != null ? _b : void 0,
2600
+ prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
2601
+ temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
2602
+ timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
2603
+ };
2604
+ for (const key in transcriptionModelOptions) {
2605
+ const value = transcriptionModelOptions[key];
2606
+ if (value !== void 0) {
2607
+ formData.append(key, String(value));
2608
+ }
2609
+ }
2610
+ }
2611
+ return {
2612
+ formData,
2613
+ warnings
2614
+ };
2615
+ }
2616
+ async doGenerate(options) {
2617
+ var _a15, _b, _c, _d, _e, _f;
2618
+ const currentDate = (_c = (_b = (_a15 = this.config._internal) == null ? void 0 : _a15.currentDate) == null ? void 0 : _b.call(_a15)) != null ? _c : /* @__PURE__ */ new Date();
2619
+ const { formData, warnings } = this.getArgs(options);
2620
+ const {
2621
+ value: response,
2622
+ responseHeaders,
2623
+ rawValue: rawResponse
2624
+ } = await postFormDataToApi({
2625
+ url: this.config.url({
2626
+ path: "/audio/transcriptions",
2627
+ modelId: this.modelId
2628
+ }),
2629
+ headers: combineHeaders(this.config.headers(), options.headers),
2630
+ formData,
2631
+ failedResponseHandler: openaiFailedResponseHandler,
2632
+ successfulResponseHandler: createJsonResponseHandler(
2633
+ openaiTranscriptionResponseSchema
2634
+ ),
2635
+ abortSignal: options.abortSignal,
2636
+ fetch: this.config.fetch
2637
+ });
2638
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
2639
+ return {
2640
+ text: response.text,
2641
+ segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
2642
+ text: word.word,
2643
+ startSecond: word.start,
2644
+ endSecond: word.end
2645
+ }))) != null ? _e : [],
2646
+ language,
2647
+ durationInSeconds: (_f = response.duration) != null ? _f : void 0,
2648
+ warnings,
2649
+ response: {
2650
+ timestamp: currentDate,
2651
+ modelId: this.modelId,
2652
+ headers: responseHeaders,
2653
+ body: rawResponse
2654
+ }
2655
+ };
2656
+ }
2657
+ };
2658
+ var openaiTranscriptionResponseSchema = z.object({
2659
+ text: z.string(),
2660
+ language: z.string().nullish(),
2661
+ duration: z.number().nullish(),
2662
+ words: z.array(
2663
+ z.object({
2664
+ word: z.string(),
2665
+ start: z.number(),
2666
+ end: z.number()
2667
+ })
2668
+ ).nullish()
2669
+ });
2670
+ function convertToOpenAIResponsesMessages({
2671
+ prompt,
2672
+ systemMessageMode
2673
+ }) {
2674
+ const messages = [];
2675
+ const warnings = [];
2676
+ for (const { role, content } of prompt) {
2677
+ switch (role) {
2678
+ case "system": {
2679
+ switch (systemMessageMode) {
2680
+ case "system": {
2681
+ messages.push({ role: "system", content });
2682
+ break;
2683
+ }
2684
+ case "developer": {
2685
+ messages.push({ role: "developer", content });
2686
+ break;
2687
+ }
2688
+ case "remove": {
2689
+ warnings.push({
2690
+ type: "other",
2691
+ message: "system messages are removed for this model"
2692
+ });
2693
+ break;
2694
+ }
2695
+ default: {
2696
+ const _exhaustiveCheck = systemMessageMode;
2697
+ throw new Error(
2698
+ `Unsupported system message mode: ${_exhaustiveCheck}`
2699
+ );
2700
+ }
2701
+ }
2702
+ break;
2703
+ }
2704
+ case "user": {
2705
+ messages.push({
2706
+ role: "user",
2707
+ content: content.map((part, index) => {
2708
+ var _a15, _b, _c, _d;
2709
+ switch (part.type) {
2710
+ case "text": {
2711
+ return { type: "input_text", text: part.text };
2712
+ }
2713
+ case "image": {
2714
+ return {
2715
+ type: "input_image",
2716
+ image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a15 = part.mimeType) != null ? _a15 : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`,
2717
+ // OpenAI specific extension: image detail
2718
+ detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
2719
+ };
2720
+ }
2721
+ case "file": {
2722
+ if (part.data instanceof URL) {
2723
+ throw new UnsupportedFunctionalityError({
2724
+ functionality: "File URLs in user messages"
2725
+ });
2726
+ }
2727
+ switch (part.mimeType) {
2728
+ case "application/pdf": {
2729
+ return {
2730
+ type: "input_file",
2731
+ filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
2732
+ file_data: `data:application/pdf;base64,${part.data}`
2733
+ };
2734
+ }
2735
+ default: {
2736
+ throw new UnsupportedFunctionalityError({
2737
+ functionality: "Only PDF files are supported in user messages"
2738
+ });
2739
+ }
2740
+ }
2741
+ }
2742
+ }
2743
+ })
2744
+ });
2745
+ break;
2746
+ }
2747
+ case "assistant": {
2748
+ for (const part of content) {
2749
+ switch (part.type) {
2750
+ case "text": {
2751
+ messages.push({
2752
+ role: "assistant",
2753
+ content: [{ type: "output_text", text: part.text }]
2754
+ });
2755
+ break;
2756
+ }
2757
+ case "tool-call": {
2758
+ messages.push({
2759
+ type: "function_call",
2760
+ call_id: part.toolCallId,
2761
+ name: part.toolName,
2762
+ arguments: JSON.stringify(part.args)
2763
+ });
2764
+ break;
2765
+ }
2766
+ }
2767
+ }
2768
+ break;
2769
+ }
2770
+ case "tool": {
2771
+ for (const part of content) {
2772
+ messages.push({
2773
+ type: "function_call_output",
2774
+ call_id: part.toolCallId,
2775
+ output: JSON.stringify(part.result)
2776
+ });
2777
+ }
2778
+ break;
2779
+ }
2780
+ default: {
2781
+ const _exhaustiveCheck = role;
2782
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
2783
+ }
2784
+ }
2785
+ }
2786
+ return { messages, warnings };
2787
+ }
2788
+ function mapOpenAIResponseFinishReason({
2789
+ finishReason,
2790
+ hasToolCalls
2791
+ }) {
2792
+ switch (finishReason) {
2793
+ case void 0:
2794
+ case null:
2795
+ return hasToolCalls ? "tool-calls" : "stop";
2796
+ case "max_output_tokens":
2797
+ return "length";
2798
+ case "content_filter":
2799
+ return "content-filter";
2800
+ default:
2801
+ return hasToolCalls ? "tool-calls" : "unknown";
2802
+ }
2803
+ }
2804
+ function prepareResponsesTools({
2805
+ mode,
2806
+ strict
2807
+ }) {
2808
+ var _a15;
2809
+ const tools = ((_a15 = mode.tools) == null ? void 0 : _a15.length) ? mode.tools : void 0;
2810
+ const toolWarnings = [];
2811
+ if (tools == null) {
2812
+ return { tools: void 0, tool_choice: void 0, toolWarnings };
2813
+ }
2814
+ const toolChoice = mode.toolChoice;
2815
+ const openaiTools2 = [];
2816
+ for (const tool of tools) {
2817
+ switch (tool.type) {
2818
+ case "function":
2819
+ openaiTools2.push({
2820
+ type: "function",
2821
+ name: tool.name,
2822
+ description: tool.description,
2823
+ parameters: tool.parameters,
2824
+ strict: strict ? true : void 0
2825
+ });
2826
+ break;
2827
+ case "provider-defined":
2828
+ switch (tool.id) {
2829
+ case "openai.web_search_preview":
2830
+ openaiTools2.push({
2831
+ type: "web_search_preview",
2832
+ search_context_size: tool.args.searchContextSize,
2833
+ user_location: tool.args.userLocation
2834
+ });
2835
+ break;
2836
+ default:
2837
+ toolWarnings.push({ type: "unsupported-tool", tool });
2838
+ break;
2839
+ }
2840
+ break;
2841
+ default:
2842
+ toolWarnings.push({ type: "unsupported-tool", tool });
2843
+ break;
2844
+ }
2845
+ }
2846
+ if (toolChoice == null) {
2847
+ return { tools: openaiTools2, tool_choice: void 0, toolWarnings };
2848
+ }
2849
+ const type = toolChoice.type;
2850
+ switch (type) {
2851
+ case "auto":
2852
+ case "none":
2853
+ case "required":
2854
+ return { tools: openaiTools2, tool_choice: type, toolWarnings };
2855
+ case "tool": {
2856
+ if (toolChoice.toolName === "web_search_preview") {
2857
+ return {
2858
+ tools: openaiTools2,
2859
+ tool_choice: {
2860
+ type: "web_search_preview"
2861
+ },
2862
+ toolWarnings
2863
+ };
2864
+ }
2865
+ return {
2866
+ tools: openaiTools2,
2867
+ tool_choice: {
2868
+ type: "function",
2869
+ name: toolChoice.toolName
2870
+ },
2871
+ toolWarnings
2872
+ };
2873
+ }
2874
+ default: {
2875
+ const _exhaustiveCheck = type;
2876
+ throw new UnsupportedFunctionalityError({
2877
+ functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
2878
+ });
2879
+ }
2880
+ }
2881
+ }
2882
+ var OpenAIResponsesLanguageModel = class {
2883
+ constructor(modelId, config) {
2884
+ this.specificationVersion = "v1";
2885
+ this.defaultObjectGenerationMode = "json";
2886
+ this.supportsStructuredOutputs = true;
2887
+ this.modelId = modelId;
2888
+ this.config = config;
2889
+ }
2890
+ get provider() {
2891
+ return this.config.provider;
2892
+ }
2893
+ getArgs({
2894
+ mode,
2895
+ maxTokens,
2896
+ temperature,
2897
+ stopSequences,
2898
+ topP,
2899
+ topK,
2900
+ presencePenalty,
2901
+ frequencyPenalty,
2902
+ seed,
2903
+ prompt,
2904
+ providerMetadata,
2905
+ responseFormat
2906
+ }) {
2907
+ var _a15, _b, _c;
2908
+ const warnings = [];
2909
+ const modelConfig = getResponsesModelConfig(this.modelId);
2910
+ const type = mode.type;
2911
+ if (topK != null) {
2912
+ warnings.push({
2913
+ type: "unsupported-setting",
2914
+ setting: "topK"
2915
+ });
2916
+ }
2917
+ if (seed != null) {
2918
+ warnings.push({
2919
+ type: "unsupported-setting",
2920
+ setting: "seed"
2921
+ });
2922
+ }
2923
+ if (presencePenalty != null) {
2924
+ warnings.push({
2925
+ type: "unsupported-setting",
2926
+ setting: "presencePenalty"
2927
+ });
2928
+ }
2929
+ if (frequencyPenalty != null) {
2930
+ warnings.push({
2931
+ type: "unsupported-setting",
2932
+ setting: "frequencyPenalty"
2933
+ });
2934
+ }
2935
+ if (stopSequences != null) {
2936
+ warnings.push({
2937
+ type: "unsupported-setting",
2938
+ setting: "stopSequences"
2939
+ });
2940
+ }
2941
+ const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
2942
+ prompt,
2943
+ systemMessageMode: modelConfig.systemMessageMode
2944
+ });
2945
+ warnings.push(...messageWarnings);
2946
+ const openaiOptions = parseProviderOptions({
2947
+ provider: "openai",
2948
+ providerOptions: providerMetadata,
2949
+ schema: openaiResponsesProviderOptionsSchema
2950
+ });
2951
+ const isStrict = (_a15 = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a15 : true;
2952
+ const baseArgs = {
2953
+ model: this.modelId,
2954
+ input: messages,
2955
+ temperature,
2956
+ top_p: topP,
2957
+ max_output_tokens: maxTokens,
2958
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2959
+ text: {
2960
+ format: responseFormat.schema != null ? {
2961
+ type: "json_schema",
2962
+ strict: isStrict,
2963
+ name: (_b = responseFormat.name) != null ? _b : "response",
2964
+ description: responseFormat.description,
2965
+ schema: responseFormat.schema
2966
+ } : { type: "json_object" }
2967
+ }
2968
+ },
2969
+ // provider options:
2970
+ metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
2971
+ parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
2972
+ previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
2973
+ store: openaiOptions == null ? void 0 : openaiOptions.store,
2974
+ user: openaiOptions == null ? void 0 : openaiOptions.user,
2975
+ instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2976
+ // model-specific settings:
2977
+ ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2978
+ reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
2979
+ },
2980
+ ...modelConfig.requiredAutoTruncation && {
2981
+ truncation: "auto"
2982
+ }
2983
+ };
2984
+ if (modelConfig.isReasoningModel) {
2985
+ if (baseArgs.temperature != null) {
2986
+ baseArgs.temperature = void 0;
2987
+ warnings.push({
2988
+ type: "unsupported-setting",
2989
+ setting: "temperature",
2990
+ details: "temperature is not supported for reasoning models"
2991
+ });
2992
+ }
2993
+ if (baseArgs.top_p != null) {
2994
+ baseArgs.top_p = void 0;
2995
+ warnings.push({
2996
+ type: "unsupported-setting",
2997
+ setting: "topP",
2998
+ details: "topP is not supported for reasoning models"
2999
+ });
3000
+ }
3001
+ }
3002
+ switch (type) {
3003
+ case "regular": {
3004
+ const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
3005
+ mode,
3006
+ strict: isStrict
3007
+ // TODO support provider options on tools
3008
+ });
3009
+ return {
3010
+ args: {
3011
+ ...baseArgs,
3012
+ tools,
3013
+ tool_choice
3014
+ },
3015
+ warnings: [...warnings, ...toolWarnings]
3016
+ };
3017
+ }
3018
+ case "object-json": {
3019
+ return {
3020
+ args: {
3021
+ ...baseArgs,
3022
+ text: {
3023
+ format: mode.schema != null ? {
3024
+ type: "json_schema",
3025
+ strict: isStrict,
3026
+ name: (_c = mode.name) != null ? _c : "response",
3027
+ description: mode.description,
3028
+ schema: mode.schema
3029
+ } : { type: "json_object" }
3030
+ }
3031
+ },
3032
+ warnings
3033
+ };
3034
+ }
3035
+ case "object-tool": {
3036
+ return {
3037
+ args: {
3038
+ ...baseArgs,
3039
+ tool_choice: { type: "function", name: mode.tool.name },
3040
+ tools: [
3041
+ {
3042
+ type: "function",
3043
+ name: mode.tool.name,
3044
+ description: mode.tool.description,
3045
+ parameters: mode.tool.parameters,
3046
+ strict: isStrict
3047
+ }
3048
+ ]
3049
+ },
3050
+ warnings
3051
+ };
3052
+ }
3053
+ default: {
3054
+ const _exhaustiveCheck = type;
3055
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
3056
+ }
3057
+ }
3058
+ }
3059
+ async doGenerate(options) {
3060
+ var _a15, _b, _c, _d, _e;
3061
+ const { args: body, warnings } = this.getArgs(options);
3062
+ const {
3063
+ responseHeaders,
3064
+ value: response,
3065
+ rawValue: rawResponse
3066
+ } = await postJsonToApi({
3067
+ url: this.config.url({
3068
+ path: "/responses",
3069
+ modelId: this.modelId
3070
+ }),
3071
+ headers: combineHeaders(this.config.headers(), options.headers),
3072
+ body,
3073
+ failedResponseHandler: openaiFailedResponseHandler,
3074
+ successfulResponseHandler: createJsonResponseHandler(
3075
+ z.object({
3076
+ id: z.string(),
3077
+ created_at: z.number(),
3078
+ model: z.string(),
3079
+ output: z.array(
3080
+ z.discriminatedUnion("type", [
3081
+ z.object({
3082
+ type: z.literal("message"),
3083
+ role: z.literal("assistant"),
3084
+ content: z.array(
3085
+ z.object({
3086
+ type: z.literal("output_text"),
3087
+ text: z.string(),
3088
+ annotations: z.array(
3089
+ z.object({
3090
+ type: z.literal("url_citation"),
3091
+ start_index: z.number(),
3092
+ end_index: z.number(),
3093
+ url: z.string(),
3094
+ title: z.string()
3095
+ })
3096
+ )
3097
+ })
3098
+ )
3099
+ }),
3100
+ z.object({
3101
+ type: z.literal("function_call"),
3102
+ call_id: z.string(),
3103
+ name: z.string(),
3104
+ arguments: z.string()
3105
+ }),
3106
+ z.object({
3107
+ type: z.literal("web_search_call")
3108
+ }),
3109
+ z.object({
3110
+ type: z.literal("computer_call")
3111
+ }),
3112
+ z.object({
3113
+ type: z.literal("reasoning")
3114
+ })
3115
+ ])
3116
+ ),
3117
+ incomplete_details: z.object({ reason: z.string() }).nullable(),
3118
+ usage: usageSchema
3119
+ })
3120
+ ),
3121
+ abortSignal: options.abortSignal,
3122
+ fetch: this.config.fetch
3123
+ });
3124
+ const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
3125
+ const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
3126
+ toolCallType: "function",
3127
+ toolCallId: output.call_id,
3128
+ toolName: output.name,
3129
+ args: output.arguments
3130
+ }));
3131
+ return {
3132
+ text: outputTextElements.map((content) => content.text).join("\n"),
3133
+ sources: outputTextElements.flatMap(
3134
+ (content) => content.annotations.map((annotation) => {
3135
+ var _a22, _b2, _c2;
3136
+ return {
3137
+ sourceType: "url",
3138
+ id: (_c2 = (_b2 = (_a22 = this.config).generateId) == null ? void 0 : _b2.call(_a22)) != null ? _c2 : generateId(),
3139
+ url: annotation.url,
3140
+ title: annotation.title
3141
+ };
3142
+ })
3143
+ ),
3144
+ finishReason: mapOpenAIResponseFinishReason({
3145
+ finishReason: (_a15 = response.incomplete_details) == null ? void 0 : _a15.reason,
3146
+ hasToolCalls: toolCalls.length > 0
3147
+ }),
3148
+ toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
3149
+ usage: {
3150
+ promptTokens: response.usage.input_tokens,
3151
+ completionTokens: response.usage.output_tokens
3152
+ },
3153
+ rawCall: {
3154
+ rawPrompt: void 0,
3155
+ rawSettings: {}
3156
+ },
3157
+ rawResponse: {
3158
+ headers: responseHeaders,
3159
+ body: rawResponse
3160
+ },
3161
+ request: {
3162
+ body: JSON.stringify(body)
3163
+ },
3164
+ response: {
3165
+ id: response.id,
3166
+ timestamp: new Date(response.created_at * 1e3),
3167
+ modelId: response.model
3168
+ },
3169
+ providerMetadata: {
3170
+ openai: {
3171
+ responseId: response.id,
3172
+ cachedPromptTokens: (_c = (_b = response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : null,
3173
+ reasoningTokens: (_e = (_d = response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : null
3174
+ }
3175
+ },
3176
+ warnings
3177
+ };
3178
+ }
3179
+ async doStream(options) {
3180
+ const { args: body, warnings } = this.getArgs(options);
3181
+ const { responseHeaders, value: response } = await postJsonToApi({
3182
+ url: this.config.url({
3183
+ path: "/responses",
3184
+ modelId: this.modelId
3185
+ }),
3186
+ headers: combineHeaders(this.config.headers(), options.headers),
3187
+ body: {
3188
+ ...body,
3189
+ stream: true
3190
+ },
3191
+ failedResponseHandler: openaiFailedResponseHandler,
3192
+ successfulResponseHandler: createEventSourceResponseHandler(
3193
+ openaiResponsesChunkSchema
3194
+ ),
3195
+ abortSignal: options.abortSignal,
3196
+ fetch: this.config.fetch
3197
+ });
3198
+ const self = this;
3199
+ let finishReason = "unknown";
3200
+ let promptTokens = NaN;
3201
+ let completionTokens = NaN;
3202
+ let cachedPromptTokens = null;
3203
+ let reasoningTokens = null;
3204
+ let responseId = null;
3205
+ const ongoingToolCalls = {};
3206
+ let hasToolCalls = false;
3207
+ return {
3208
+ stream: response.pipeThrough(
3209
+ new TransformStream({
3210
+ transform(chunk, controller) {
3211
+ var _a15, _b, _c, _d, _e, _f, _g, _h;
3212
+ if (!chunk.success) {
3213
+ finishReason = "error";
3214
+ controller.enqueue({ type: "error", error: chunk.error });
3215
+ return;
3216
+ }
3217
+ const value = chunk.value;
3218
+ if (isResponseOutputItemAddedChunk(value)) {
3219
+ if (value.item.type === "function_call") {
3220
+ ongoingToolCalls[value.output_index] = {
3221
+ toolName: value.item.name,
3222
+ toolCallId: value.item.call_id
3223
+ };
3224
+ controller.enqueue({
3225
+ type: "tool-call-delta",
3226
+ toolCallType: "function",
3227
+ toolCallId: value.item.call_id,
3228
+ toolName: value.item.name,
3229
+ argsTextDelta: value.item.arguments
3230
+ });
3231
+ }
3232
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
3233
+ const toolCall = ongoingToolCalls[value.output_index];
3234
+ if (toolCall != null) {
3235
+ controller.enqueue({
3236
+ type: "tool-call-delta",
3237
+ toolCallType: "function",
3238
+ toolCallId: toolCall.toolCallId,
3239
+ toolName: toolCall.toolName,
3240
+ argsTextDelta: value.delta
3241
+ });
3242
+ }
3243
+ } else if (isResponseCreatedChunk(value)) {
3244
+ responseId = value.response.id;
3245
+ controller.enqueue({
3246
+ type: "response-metadata",
3247
+ id: value.response.id,
3248
+ timestamp: new Date(value.response.created_at * 1e3),
3249
+ modelId: value.response.model
3250
+ });
3251
+ } else if (isTextDeltaChunk(value)) {
3252
+ controller.enqueue({
3253
+ type: "text-delta",
3254
+ textDelta: value.delta
3255
+ });
3256
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
3257
+ ongoingToolCalls[value.output_index] = void 0;
3258
+ hasToolCalls = true;
3259
+ controller.enqueue({
3260
+ type: "tool-call",
3261
+ toolCallType: "function",
3262
+ toolCallId: value.item.call_id,
3263
+ toolName: value.item.name,
3264
+ args: value.item.arguments
3265
+ });
3266
+ } else if (isResponseFinishedChunk(value)) {
3267
+ finishReason = mapOpenAIResponseFinishReason({
3268
+ finishReason: (_a15 = value.response.incomplete_details) == null ? void 0 : _a15.reason,
3269
+ hasToolCalls
3270
+ });
3271
+ promptTokens = value.response.usage.input_tokens;
3272
+ completionTokens = value.response.usage.output_tokens;
3273
+ cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
3274
+ reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
3275
+ } else if (isResponseAnnotationAddedChunk(value)) {
3276
+ controller.enqueue({
3277
+ type: "source",
3278
+ source: {
3279
+ sourceType: "url",
3280
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId(),
3281
+ url: value.annotation.url,
3282
+ title: value.annotation.title
3283
+ }
3284
+ });
3285
+ }
3286
+ },
3287
+ flush(controller) {
3288
+ controller.enqueue({
3289
+ type: "finish",
3290
+ finishReason,
3291
+ usage: { promptTokens, completionTokens },
3292
+ ...(cachedPromptTokens != null || reasoningTokens != null) && {
3293
+ providerMetadata: {
3294
+ openai: {
3295
+ responseId,
3296
+ cachedPromptTokens,
3297
+ reasoningTokens
3298
+ }
3299
+ }
3300
+ }
3301
+ });
3302
+ }
3303
+ })
3304
+ ),
3305
+ rawCall: {
3306
+ rawPrompt: void 0,
3307
+ rawSettings: {}
3308
+ },
3309
+ rawResponse: { headers: responseHeaders },
3310
+ request: { body: JSON.stringify(body) },
3311
+ warnings
3312
+ };
3313
+ }
3314
+ };
3315
+ var usageSchema = z.object({
3316
+ input_tokens: z.number(),
3317
+ input_tokens_details: z.object({ cached_tokens: z.number().nullish() }).nullish(),
3318
+ output_tokens: z.number(),
3319
+ output_tokens_details: z.object({ reasoning_tokens: z.number().nullish() }).nullish()
3320
+ });
3321
+ var textDeltaChunkSchema = z.object({
3322
+ type: z.literal("response.output_text.delta"),
3323
+ delta: z.string()
3324
+ });
3325
+ var responseFinishedChunkSchema = z.object({
3326
+ type: z.enum(["response.completed", "response.incomplete"]),
3327
+ response: z.object({
3328
+ incomplete_details: z.object({ reason: z.string() }).nullish(),
3329
+ usage: usageSchema
3330
+ })
3331
+ });
3332
+ var responseCreatedChunkSchema = z.object({
3333
+ type: z.literal("response.created"),
3334
+ response: z.object({
3335
+ id: z.string(),
3336
+ created_at: z.number(),
3337
+ model: z.string()
3338
+ })
3339
+ });
3340
+ var responseOutputItemDoneSchema = z.object({
3341
+ type: z.literal("response.output_item.done"),
3342
+ output_index: z.number(),
3343
+ item: z.discriminatedUnion("type", [
3344
+ z.object({
3345
+ type: z.literal("message")
3346
+ }),
3347
+ z.object({
3348
+ type: z.literal("function_call"),
3349
+ id: z.string(),
3350
+ call_id: z.string(),
3351
+ name: z.string(),
3352
+ arguments: z.string(),
3353
+ status: z.literal("completed")
3354
+ })
3355
+ ])
3356
+ });
3357
+ var responseFunctionCallArgumentsDeltaSchema = z.object({
3358
+ type: z.literal("response.function_call_arguments.delta"),
3359
+ item_id: z.string(),
3360
+ output_index: z.number(),
3361
+ delta: z.string()
3362
+ });
3363
+ var responseOutputItemAddedSchema = z.object({
3364
+ type: z.literal("response.output_item.added"),
3365
+ output_index: z.number(),
3366
+ item: z.discriminatedUnion("type", [
3367
+ z.object({
3368
+ type: z.literal("message")
3369
+ }),
3370
+ z.object({
3371
+ type: z.literal("function_call"),
3372
+ id: z.string(),
3373
+ call_id: z.string(),
3374
+ name: z.string(),
3375
+ arguments: z.string()
3376
+ })
3377
+ ])
3378
+ });
3379
+ var responseAnnotationAddedSchema = z.object({
3380
+ type: z.literal("response.output_text.annotation.added"),
3381
+ annotation: z.object({
3382
+ type: z.literal("url_citation"),
3383
+ url: z.string(),
3384
+ title: z.string()
3385
+ })
3386
+ });
3387
+ var openaiResponsesChunkSchema = z.union([
3388
+ textDeltaChunkSchema,
3389
+ responseFinishedChunkSchema,
3390
+ responseCreatedChunkSchema,
3391
+ responseOutputItemDoneSchema,
3392
+ responseFunctionCallArgumentsDeltaSchema,
3393
+ responseOutputItemAddedSchema,
3394
+ responseAnnotationAddedSchema,
3395
+ z.object({ type: z.string() }).passthrough()
3396
+ // fallback for unknown chunks
3397
+ ]);
3398
+ function isTextDeltaChunk(chunk) {
3399
+ return chunk.type === "response.output_text.delta";
3400
+ }
3401
+ function isResponseOutputItemDoneChunk(chunk) {
3402
+ return chunk.type === "response.output_item.done";
3403
+ }
3404
+ function isResponseFinishedChunk(chunk) {
3405
+ return chunk.type === "response.completed" || chunk.type === "response.incomplete";
3406
+ }
3407
+ function isResponseCreatedChunk(chunk) {
3408
+ return chunk.type === "response.created";
3409
+ }
3410
+ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
3411
+ return chunk.type === "response.function_call_arguments.delta";
3412
+ }
3413
+ function isResponseOutputItemAddedChunk(chunk) {
3414
+ return chunk.type === "response.output_item.added";
3415
+ }
3416
+ function isResponseAnnotationAddedChunk(chunk) {
3417
+ return chunk.type === "response.output_text.annotation.added";
3418
+ }
3419
+ function getResponsesModelConfig(modelId) {
3420
+ if (modelId.startsWith("o")) {
3421
+ if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
3422
+ return {
3423
+ isReasoningModel: true,
3424
+ systemMessageMode: "remove",
3425
+ requiredAutoTruncation: false
3426
+ };
3427
+ }
3428
+ return {
3429
+ isReasoningModel: true,
3430
+ systemMessageMode: "developer",
3431
+ requiredAutoTruncation: false
3432
+ };
3433
+ }
3434
+ return {
3435
+ isReasoningModel: false,
3436
+ systemMessageMode: "system",
3437
+ requiredAutoTruncation: false
3438
+ };
3439
+ }
3440
+ var openaiResponsesProviderOptionsSchema = z.object({
3441
+ metadata: z.any().nullish(),
3442
+ parallelToolCalls: z.boolean().nullish(),
3443
+ previousResponseId: z.string().nullish(),
3444
+ store: z.boolean().nullish(),
3445
+ user: z.string().nullish(),
3446
+ reasoningEffort: z.string().nullish(),
3447
+ strictSchemas: z.boolean().nullish(),
3448
+ instructions: z.string().nullish()
3449
+ });
3450
+ var WebSearchPreviewParameters = z.object({});
3451
+ function webSearchPreviewTool({
3452
+ searchContextSize,
3453
+ userLocation
3454
+ } = {}) {
3455
+ return {
3456
+ type: "provider-defined",
3457
+ id: "openai.web_search_preview",
3458
+ args: {
3459
+ searchContextSize,
3460
+ userLocation
3461
+ },
3462
+ parameters: WebSearchPreviewParameters
3463
+ };
3464
+ }
3465
+ var openaiTools = {
3466
+ webSearchPreview: webSearchPreviewTool
3467
+ };
3468
+ var OpenAIProviderOptionsSchema = z.object({
3469
+ instructions: z.string().nullish(),
3470
+ speed: z.number().min(0.25).max(4).default(1).nullish()
3471
+ });
3472
+ var OpenAISpeechModel = class {
3473
+ constructor(modelId, config) {
3474
+ this.modelId = modelId;
3475
+ this.config = config;
3476
+ this.specificationVersion = "v1";
3477
+ }
3478
+ get provider() {
3479
+ return this.config.provider;
3480
+ }
3481
+ getArgs({
3482
+ text,
3483
+ voice = "alloy",
3484
+ outputFormat = "mp3",
3485
+ speed,
3486
+ instructions,
3487
+ providerOptions
3488
+ }) {
3489
+ const warnings = [];
3490
+ const openAIOptions = parseProviderOptions({
3491
+ provider: "openai",
3492
+ providerOptions,
3493
+ schema: OpenAIProviderOptionsSchema
3494
+ });
3495
+ const requestBody = {
3496
+ model: this.modelId,
3497
+ input: text,
3498
+ voice,
3499
+ response_format: "mp3",
3500
+ speed,
3501
+ instructions
3502
+ };
3503
+ if (outputFormat) {
3504
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
3505
+ requestBody.response_format = outputFormat;
3506
+ } else {
3507
+ warnings.push({
3508
+ type: "unsupported-setting",
3509
+ setting: "outputFormat",
3510
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
3511
+ });
3512
+ }
3513
+ }
3514
+ if (openAIOptions) {
3515
+ const speechModelOptions = {};
3516
+ for (const key in speechModelOptions) {
3517
+ const value = speechModelOptions[key];
3518
+ if (value !== void 0) {
3519
+ requestBody[key] = value;
3520
+ }
3521
+ }
3522
+ }
3523
+ return {
3524
+ requestBody,
3525
+ warnings
3526
+ };
3527
+ }
3528
+ async doGenerate(options) {
3529
+ var _a15, _b, _c;
3530
+ const currentDate = (_c = (_b = (_a15 = this.config._internal) == null ? void 0 : _a15.currentDate) == null ? void 0 : _b.call(_a15)) != null ? _c : /* @__PURE__ */ new Date();
3531
+ const { requestBody, warnings } = this.getArgs(options);
3532
+ const {
3533
+ value: audio,
3534
+ responseHeaders,
3535
+ rawValue: rawResponse
3536
+ } = await postJsonToApi({
3537
+ url: this.config.url({
3538
+ path: "/audio/speech",
3539
+ modelId: this.modelId
3540
+ }),
3541
+ headers: combineHeaders(this.config.headers(), options.headers),
3542
+ body: requestBody,
3543
+ failedResponseHandler: openaiFailedResponseHandler,
3544
+ successfulResponseHandler: createBinaryResponseHandler(),
3545
+ abortSignal: options.abortSignal,
3546
+ fetch: this.config.fetch
3547
+ });
3548
+ return {
3549
+ audio,
3550
+ warnings,
3551
+ request: {
3552
+ body: JSON.stringify(requestBody)
3553
+ },
3554
+ response: {
3555
+ timestamp: currentDate,
3556
+ modelId: this.modelId,
3557
+ headers: responseHeaders,
3558
+ body: rawResponse
3559
+ }
3560
+ };
3561
+ }
3562
+ };
3563
+ function createOpenAI(options = {}) {
3564
+ var _a15, _b, _c;
3565
+ const baseURL = (_a15 = withoutTrailingSlash(options.baseURL)) != null ? _a15 : "https://api.openai.com/v1";
3566
+ const compatibility = (_b = options.compatibility) != null ? _b : "compatible";
3567
+ const providerName = (_c = options.name) != null ? _c : "openai";
3568
+ const getHeaders = () => ({
3569
+ Authorization: `Bearer ${loadApiKey({
3570
+ apiKey: options.apiKey,
3571
+ environmentVariableName: "OPENAI_API_KEY",
3572
+ description: "OpenAI"
3573
+ })}`,
3574
+ "OpenAI-Organization": options.organization,
3575
+ "OpenAI-Project": options.project,
3576
+ ...options.headers
3577
+ });
3578
+ const createChatModel = (modelId, settings = {}) => new OpenAIChatLanguageModel(modelId, settings, {
3579
+ provider: `${providerName}.chat`,
3580
+ url: ({ path }) => `${baseURL}${path}`,
3581
+ headers: getHeaders,
3582
+ compatibility,
3583
+ fetch: options.fetch
3584
+ });
3585
+ const createCompletionModel = (modelId, settings = {}) => new OpenAICompletionLanguageModel(modelId, settings, {
3586
+ provider: `${providerName}.completion`,
3587
+ url: ({ path }) => `${baseURL}${path}`,
3588
+ headers: getHeaders,
3589
+ compatibility,
3590
+ fetch: options.fetch
3591
+ });
3592
+ const createEmbeddingModel = (modelId, settings = {}) => new OpenAIEmbeddingModel(modelId, settings, {
3593
+ provider: `${providerName}.embedding`,
3594
+ url: ({ path }) => `${baseURL}${path}`,
3595
+ headers: getHeaders,
3596
+ fetch: options.fetch
3597
+ });
3598
+ const createImageModel = (modelId, settings = {}) => new OpenAIImageModel(modelId, settings, {
3599
+ provider: `${providerName}.image`,
3600
+ url: ({ path }) => `${baseURL}${path}`,
3601
+ headers: getHeaders,
3602
+ fetch: options.fetch
3603
+ });
3604
+ const createTranscriptionModel = (modelId) => new OpenAITranscriptionModel(modelId, {
3605
+ provider: `${providerName}.transcription`,
3606
+ url: ({ path }) => `${baseURL}${path}`,
3607
+ headers: getHeaders,
3608
+ fetch: options.fetch
3609
+ });
3610
+ const createSpeechModel = (modelId) => new OpenAISpeechModel(modelId, {
3611
+ provider: `${providerName}.speech`,
3612
+ url: ({ path }) => `${baseURL}${path}`,
3613
+ headers: getHeaders,
3614
+ fetch: options.fetch
3615
+ });
3616
+ const createLanguageModel = (modelId, settings) => {
3617
+ if (new.target) {
3618
+ throw new Error(
3619
+ "The OpenAI model function cannot be called with the new keyword."
3620
+ );
3621
+ }
3622
+ if (modelId === "gpt-3.5-turbo-instruct") {
3623
+ return createCompletionModel(
3624
+ modelId,
3625
+ settings
3626
+ );
3627
+ }
3628
+ return createChatModel(modelId, settings);
3629
+ };
3630
+ const createResponsesModel = (modelId) => {
3631
+ return new OpenAIResponsesLanguageModel(modelId, {
3632
+ provider: `${providerName}.responses`,
3633
+ url: ({ path }) => `${baseURL}${path}`,
3634
+ headers: getHeaders,
3635
+ fetch: options.fetch
3636
+ });
3637
+ };
3638
+ const provider = function(modelId, settings) {
3639
+ return createLanguageModel(modelId, settings);
3640
+ };
3641
+ provider.languageModel = createLanguageModel;
3642
+ provider.chat = createChatModel;
3643
+ provider.completion = createCompletionModel;
3644
+ provider.responses = createResponsesModel;
3645
+ provider.embedding = createEmbeddingModel;
3646
+ provider.textEmbedding = createEmbeddingModel;
3647
+ provider.textEmbeddingModel = createEmbeddingModel;
3648
+ provider.image = createImageModel;
3649
+ provider.imageModel = createImageModel;
3650
+ provider.transcription = createTranscriptionModel;
3651
+ provider.transcriptionModel = createTranscriptionModel;
3652
+ provider.speech = createSpeechModel;
3653
+ provider.speechModel = createSpeechModel;
3654
+ provider.tools = openaiTools;
3655
+ return provider;
3656
+ }
3657
+ createOpenAI({
3658
+ compatibility: "strict"
3659
+ // strict for OpenAI API
3660
+ });
3661
+
3662
+ // src/document/extractors/types.ts
3663
+ var STRIP_REGEX = /(\r\n|\n|\r)/gm;
3664
+ var openai2 = createOpenAI({ apiKey: process.env.OPENAI_API_KEY });
3665
+ var baseLLM = openai2("gpt-4o");
3666
+
3667
+ // src/document/extractors/title.ts
3668
+ var TitleExtractor = class extends BaseExtractor {
3669
+ /**
3670
+ * MastraLanguageModel instance.
3671
+ * @type {MastraLanguageModel}
3672
+ */
3673
+ llm;
3674
+ /**
3675
+ * Can work for mixture of text and non-text nodes
3676
+ * @type {boolean}
3677
+ * @default false
3678
+ */
3679
+ isTextNodeOnly = false;
3680
+ /**
3681
+ * Number of nodes to extrct titles from.
3682
+ * @type {number}
3683
+ * @default 5
3684
+ */
3685
+ nodes = 5;
3686
+ /**
3687
+ * The prompt template to use for the title extractor.
3688
+ * @type {string}
3689
+ */
3690
+ nodeTemplate;
3691
+ /**
3692
+ * The prompt template to merge title with..
3693
+ * @type {string}
3694
+ */
3695
+ combineTemplate;
3696
+ /**
3697
+ * Constructor for the TitleExtractor class.
3698
+ * @param {MastraLanguageModel} llm MastraLanguageModel instance.
3699
+ * @param {number} nodes Number of nodes to extract titles from.
3700
+ * @param {TitleExtractorPrompt} nodeTemplate The prompt template to use for the title extractor.
3701
+ * @param {string} combineTemplate The prompt template to merge title with..
3702
+ */
3703
+ constructor(options) {
3704
+ super();
3705
+ this.llm = options?.llm ?? baseLLM;
3706
+ this.nodes = options?.nodes ?? 5;
3707
+ this.nodeTemplate = options?.nodeTemplate ? new PromptTemplate({
3708
+ templateVars: ["context"],
3709
+ template: options.nodeTemplate
3710
+ }) : defaultTitleExtractorPromptTemplate;
3711
+ this.combineTemplate = options?.combineTemplate ? new PromptTemplate({
3712
+ templateVars: ["context"],
3713
+ template: options.combineTemplate
3714
+ }) : defaultTitleCombinePromptTemplate;
3715
+ }
3716
+ /**
3717
+ * Extract titles from a list of nodes.
3718
+ * @param {BaseNode[]} nodes Nodes to extract titles from.
3719
+ * @returns {Promise<BaseNode<ExtractTitle>[]>} Titles extracted from the nodes.
3720
+ */
3721
+ async extract(nodes) {
3722
+ const results = new Array(nodes.length);
3723
+ const nodesToExtractTitle = [];
3724
+ const nodeIndexes = [];
3725
+ nodes.forEach((node, idx) => {
3726
+ const text = node.getContent(this.metadataMode);
3727
+ if (!text || text.trim() === "") {
3728
+ results[idx] = { documentTitle: "" };
3729
+ } else {
3730
+ nodesToExtractTitle.push(node);
3731
+ nodeIndexes.push(idx);
3732
+ }
3733
+ });
3734
+ if (nodesToExtractTitle.length) {
3735
+ const filteredNodes = this.filterNodes(nodesToExtractTitle);
3736
+ if (filteredNodes.length) {
3737
+ const nodesByDocument = this.separateNodesByDocument(filteredNodes);
3738
+ const titlesByDocument = await this.extractTitles(nodesByDocument);
3739
+ filteredNodes.forEach((node, i) => {
3740
+ const nodeIndex = nodeIndexes[i];
3741
+ const groupKey = node.sourceNode?.nodeId ?? node.id_;
3742
+ if (typeof nodeIndex === "number") {
3743
+ results[nodeIndex] = {
3744
+ documentTitle: titlesByDocument[groupKey] ?? ""
3745
+ };
3746
+ }
3747
+ });
3748
+ }
3749
+ }
3750
+ return results;
3751
+ }
3752
+ filterNodes(nodes) {
3753
+ return nodes.filter((node) => {
3754
+ if (this.isTextNodeOnly && !(node instanceof TextNode)) {
3755
+ return false;
3756
+ }
3757
+ return true;
3758
+ });
3759
+ }
3760
+ separateNodesByDocument(nodes) {
3761
+ const nodesByDocument = {};
3762
+ for (const node of nodes) {
3763
+ const groupKey = node.sourceNode?.nodeId ?? node.id_;
3764
+ nodesByDocument[groupKey] = nodesByDocument[groupKey] || [];
3765
+ nodesByDocument[groupKey].push(node);
3766
+ }
3767
+ return nodesByDocument;
3768
+ }
3769
+ async extractTitles(nodesByDocument) {
3770
+ const titlesByDocument = {};
3771
+ for (const [key, nodes] of Object.entries(nodesByDocument)) {
3772
+ const titleCandidates = await this.getTitlesCandidates(nodes);
3773
+ const combinedTitles = titleCandidates.join(", ");
3774
+ const completion = await this.llm.doGenerate({
3775
+ inputFormat: "messages",
3776
+ mode: { type: "regular" },
3777
+ prompt: [
3778
+ {
3779
+ role: "user",
3780
+ content: [
3781
+ {
3782
+ type: "text",
3783
+ text: this.combineTemplate.format({
3784
+ context: combinedTitles
3785
+ })
3786
+ }
3787
+ ]
3788
+ }
3789
+ ]
3790
+ });
3791
+ let title = "";
3792
+ if (typeof completion.text === "string") {
3793
+ title = completion.text.trim();
3794
+ } else {
3795
+ console.warn("Title extraction LLM output was not a string:", completion.text);
3796
+ }
3797
+ titlesByDocument[key] = title;
3798
+ }
3799
+ return titlesByDocument;
3800
+ }
3801
+ async getTitlesCandidates(nodes) {
3802
+ const titleJobs = nodes.map(async (node) => {
3803
+ const completion = await this.llm.doGenerate({
3804
+ inputFormat: "messages",
3805
+ mode: { type: "regular" },
3806
+ prompt: [
3807
+ {
3808
+ role: "user",
3809
+ content: [
3810
+ {
3811
+ type: "text",
3812
+ text: this.nodeTemplate.format({
3813
+ context: node.getContent(MetadataMode.ALL)
3814
+ })
3815
+ }
3816
+ ]
3817
+ }
3818
+ ]
3819
+ });
3820
+ if (typeof completion.text === "string") {
3821
+ return completion.text.trim();
3822
+ } else {
3823
+ console.warn("Title candidate extraction LLM output was not a string:", completion.text);
3824
+ return "";
3825
+ }
3826
+ });
3827
+ return await Promise.all(titleJobs);
3828
+ }
3829
+ };
3830
+ var SummaryExtractor = class extends BaseExtractor {
3831
+ /**
3832
+ * MastraLanguageModel instance.
3833
+ * @type {MastraLanguageModel}
3834
+ */
3835
+ llm;
3836
+ /**
3837
+ * List of summaries to extract: 'self', 'prev', 'next'
3838
+ * @type {string[]}
3839
+ */
3840
+ summaries;
3841
+ /**
3842
+ * The prompt template to use for the summary extractor.
3843
+ * @type {string}
3844
+ */
3845
+ promptTemplate;
3846
+ selfSummary;
3847
+ prevSummary;
3848
+ nextSummary;
3849
+ constructor(options) {
3850
+ const summaries = options?.summaries ?? ["self"];
3851
+ if (summaries && !summaries.some((s) => ["self", "prev", "next"].includes(s)))
3852
+ throw new Error("Summaries must be one of 'self', 'prev', 'next'");
3853
+ super();
3854
+ this.llm = options?.llm ?? baseLLM;
3855
+ this.summaries = summaries;
3856
+ this.promptTemplate = options?.promptTemplate ? new PromptTemplate({
3857
+ templateVars: ["context"],
3858
+ template: options.promptTemplate
3859
+ }) : defaultSummaryPrompt;
3860
+ this.selfSummary = summaries?.includes("self") ?? false;
3861
+ this.prevSummary = summaries?.includes("prev") ?? false;
3862
+ this.nextSummary = summaries?.includes("next") ?? false;
3863
+ }
3864
+ /**
3865
+ * Extract summary from a node.
3866
+ * @param {BaseNode} node Node to extract summary from.
3867
+ * @returns {Promise<string>} Summary extracted from the node.
3868
+ */
3869
+ async generateNodeSummary(node) {
3870
+ const text = node.getContent(this.metadataMode);
3871
+ if (!text || text.trim() === "") {
3872
+ return "";
3873
+ }
3874
+ if (this.isTextNodeOnly && !(node instanceof TextNode)) {
3875
+ return "";
3876
+ }
3877
+ const context = node.getContent(this.metadataMode);
3878
+ const prompt = this.promptTemplate.format({
3879
+ context
3880
+ });
3881
+ const result = await this.llm.doGenerate({
3882
+ inputFormat: "messages",
3883
+ mode: { type: "regular" },
3884
+ prompt: [
3885
+ {
3886
+ role: "user",
3887
+ content: [{ type: "text", text: prompt }]
3888
+ }
3889
+ ]
3890
+ });
3891
+ let summary = "";
3892
+ if (typeof result.text === "string") {
3893
+ summary = result.text.trim();
3894
+ } else {
3895
+ console.warn("Summary extraction LLM output was not a string:", result.text);
3896
+ }
3897
+ return summary.replace(STRIP_REGEX, "");
3898
+ }
3899
+ /**
3900
+ * Extract summaries from a list of nodes.
3901
+ * @param {BaseNode[]} nodes Nodes to extract summaries from.
3902
+ * @returns {Promise<ExtractSummary[]>} Summaries extracted from the nodes.
3903
+ */
3904
+ async extract(nodes) {
3905
+ if (!nodes.every((n) => n instanceof TextNode)) throw new Error("Only `TextNode` is allowed for `Summary` extractor");
3906
+ const nodeSummaries = await Promise.all(nodes.map((node) => this.generateNodeSummary(node)));
3907
+ const metadataList = nodes.map(() => ({}));
3908
+ for (let i = 0; i < nodes.length; i++) {
3909
+ if (i > 0 && this.prevSummary && nodeSummaries[i - 1]) {
3910
+ metadataList[i]["prevSectionSummary"] = nodeSummaries[i - 1];
3911
+ }
3912
+ if (i < nodes.length - 1 && this.nextSummary && nodeSummaries[i + 1]) {
3913
+ metadataList[i]["nextSectionSummary"] = nodeSummaries[i + 1];
3914
+ }
3915
+ if (this.selfSummary && nodeSummaries[i]) {
3916
+ metadataList[i]["sectionSummary"] = nodeSummaries[i];
3917
+ }
3918
+ }
3919
+ return metadataList;
3920
+ }
3921
+ };
3922
+ var QuestionsAnsweredExtractor = class extends BaseExtractor {
3923
+ /**
3924
+ * MastraLanguageModel instance.
3925
+ * @type {MastraLanguageModel}
3926
+ */
3927
+ llm;
3928
+ /**
3929
+ * Number of questions to generate.
3930
+ * @type {number}
3931
+ * @default 5
3932
+ */
3933
+ questions = 5;
3934
+ /**
3935
+ * The prompt template to use for the question extractor.
3936
+ * @type {string}
3937
+ */
3938
+ promptTemplate;
3939
+ /**
3940
+ * Wheter to use metadata for embeddings only
3941
+ * @type {boolean}
3942
+ * @default false
3943
+ */
3944
+ embeddingOnly = false;
3945
+ /**
3946
+ * Constructor for the QuestionsAnsweredExtractor class.
3947
+ * @param {MastraLanguageModel} llm MastraLanguageModel instance.
3948
+ * @param {number} questions Number of questions to generate.
3949
+ * @param {QuestionExtractPrompt['template']} promptTemplate Optional custom prompt template (should include {context}).
3950
+ * @param {boolean} embeddingOnly Whether to use metadata for embeddings only.
3951
+ */
3952
+ constructor(options) {
3953
+ if (options?.questions && options.questions < 1) throw new Error("Questions must be greater than 0");
3954
+ super();
3955
+ this.llm = options?.llm ?? baseLLM;
3956
+ this.questions = options?.questions ?? 5;
3957
+ this.promptTemplate = options?.promptTemplate ? new PromptTemplate({
3958
+ templateVars: ["numQuestions", "context"],
3959
+ template: options.promptTemplate
3960
+ }).partialFormat({
3961
+ numQuestions: "5"
3962
+ }) : defaultQuestionExtractPrompt;
3963
+ this.embeddingOnly = options?.embeddingOnly ?? false;
3964
+ }
3965
+ /**
3966
+ * Extract answered questions from a node.
3967
+ * @param {BaseNode} node Node to extract questions from.
3968
+ * @returns {Promise<Array<ExtractQuestion> | Array<{}>>} Questions extracted from the node.
3969
+ */
3970
+ async extractQuestionsFromNode(node) {
3971
+ const text = node.getContent(this.metadataMode);
3972
+ if (!text || text.trim() === "") {
3973
+ return { questionsThisExcerptCanAnswer: "" };
3974
+ }
3975
+ if (this.isTextNodeOnly && !(node instanceof TextNode)) {
3976
+ return { questionsThisExcerptCanAnswer: "" };
3977
+ }
3978
+ const contextStr = node.getContent(this.metadataMode);
3979
+ const prompt = this.promptTemplate.format({
3980
+ context: contextStr,
3981
+ numQuestions: this.questions.toString()
3982
+ });
3983
+ const questions = await this.llm.doGenerate({
3984
+ inputFormat: "messages",
3985
+ mode: { type: "regular" },
3986
+ prompt: [
3987
+ {
3988
+ role: "user",
3989
+ content: [{ type: "text", text: prompt }]
3990
+ }
3991
+ ]
3992
+ });
3993
+ let result = "";
3994
+ try {
3995
+ if (typeof questions.text === "string") {
3996
+ result = questions.text.replace(STRIP_REGEX, "").trim();
3997
+ } else {
3998
+ console.warn("Question extraction LLM output was not a string:", questions.text);
3999
+ }
4000
+ } catch (err) {
4001
+ console.warn("Question extraction failed:", err);
4002
+ }
4003
+ return {
4004
+ questionsThisExcerptCanAnswer: result
4005
+ };
4006
+ }
4007
+ /**
4008
+ * Extract answered questions from a list of nodes.
4009
+ * @param {BaseNode[]} nodes Nodes to extract questions from.
4010
+ * @returns {Promise<Array<ExtractQuestion> | Array<{}>>} Questions extracted from the nodes.
4011
+ */
4012
+ async extract(nodes) {
4013
+ const results = await Promise.all(nodes.map((node) => this.extractQuestionsFromNode(node)));
4014
+ return results;
4015
+ }
4016
+ };
4017
+ var KeywordExtractor = class extends BaseExtractor {
4018
+ /**
4019
+ * MastraLanguageModel instance.
4020
+ * @type {MastraLanguageModel}
4021
+ */
4022
+ llm;
4023
+ /**
4024
+ * Number of keywords to extract.
4025
+ * @type {number}
4026
+ * @default 5
4027
+ */
4028
+ keywords = 5;
4029
+ /**
4030
+ * The prompt template to use for the question extractor.
4031
+ * @type {string}
4032
+ */
4033
+ promptTemplate;
4034
+ /**
4035
+ * Constructor for the KeywordExtractor class.
4036
+ * @param {MastraLanguageModel} llm MastraLanguageModel instance.
4037
+ * @param {number} keywords Number of keywords to extract.
4038
+ * @param {string} [promptTemplate] Optional custom prompt template (must include {context})
4039
+ * @throws {Error} If keywords is less than 1.
4040
+ */
4041
+ constructor(options) {
4042
+ if (options?.keywords && options.keywords < 1) throw new Error("Keywords must be greater than 0");
4043
+ super();
4044
+ this.llm = options?.llm ?? baseLLM;
4045
+ this.keywords = options?.keywords ?? 5;
4046
+ this.promptTemplate = options?.promptTemplate ? new PromptTemplate({
4047
+ templateVars: ["context", "maxKeywords"],
4048
+ template: options.promptTemplate
4049
+ }) : defaultKeywordExtractPrompt;
4050
+ }
4051
+ /**
4052
+ *
4053
+ * @param node Node to extract keywords from.
4054
+ * @returns Keywords extracted from the node.
4055
+ */
4056
+ /**
4057
+ * Extract keywords from a node. Returns an object with a comma-separated string of keywords, or an empty string if extraction fails.
4058
+ * Adds error handling for malformed/empty LLM output.
4059
+ */
4060
+ async extractKeywordsFromNodes(node) {
4061
+ const text = node.getContent(this.metadataMode);
4062
+ if (!text || text.trim() === "") {
4063
+ return { excerptKeywords: "" };
4064
+ }
4065
+ if (this.isTextNodeOnly && !(node instanceof TextNode)) {
4066
+ return { excerptKeywords: "" };
4067
+ }
4068
+ let keywords = "";
4069
+ try {
4070
+ const completion = await this.llm.doGenerate({
4071
+ inputFormat: "messages",
4072
+ mode: { type: "regular" },
4073
+ prompt: [
4074
+ {
4075
+ role: "user",
4076
+ content: [
4077
+ {
4078
+ type: "text",
4079
+ text: this.promptTemplate.format({
4080
+ context: node.getContent(MetadataMode.ALL),
4081
+ maxKeywords: this.keywords.toString()
4082
+ })
4083
+ }
4084
+ ]
4085
+ }
4086
+ ]
4087
+ });
4088
+ if (typeof completion.text === "string") {
4089
+ keywords = completion.text.trim();
4090
+ } else {
4091
+ console.warn("Keyword extraction LLM output was not a string:", completion.text);
4092
+ }
4093
+ } catch (err) {
4094
+ console.warn("Keyword extraction failed:", err);
4095
+ }
4096
+ return { excerptKeywords: keywords };
4097
+ }
4098
+ /**
4099
+ *
4100
+ * @param nodes Nodes to extract keywords from.
4101
+ * @returns Keywords extracted from the nodes.
4102
+ */
4103
+ /**
4104
+ * Extract keywords from an array of nodes. Always returns an array (may be empty).
4105
+ * @param nodes Nodes to extract keywords from.
4106
+ * @returns Array of keyword extraction results.
4107
+ */
4108
+ async extract(nodes) {
4109
+ if (!Array.isArray(nodes) || nodes.length === 0) return [];
4110
+ const results = await Promise.all(nodes.map((node) => this.extractKeywordsFromNodes(node)));
4111
+ return results;
4112
+ }
4113
+ };
10
4114
 
11
4115
  // src/document/types.ts
12
4116
  var Language = /* @__PURE__ */ ((Language2) => {
@@ -508,7 +4612,7 @@ var HTMLSectionTransformer = class {
508
4612
  headersToSplitOn;
509
4613
  options;
510
4614
  constructor(headersToSplitOn, options = {}) {
511
- this.headersToSplitOn = Object.fromEntries(headersToSplitOn.map(([tag, name]) => [tag.toLowerCase(), name]));
4615
+ this.headersToSplitOn = Object.fromEntries(headersToSplitOn.map(([tag, name14]) => [tag.toLowerCase(), name14]));
512
4616
  this.options = options;
513
4617
  }
514
4618
  splitText(text) {
@@ -1102,7 +5206,7 @@ var MarkdownHeaderTransformer = class {
1102
5206
  continue;
1103
5207
  }
1104
5208
  let headerMatched = false;
1105
- for (const [sep, name] of this.headersToSplitOn) {
5209
+ for (const [sep, name14] of this.headersToSplitOn) {
1106
5210
  if (strippedLine.startsWith(sep) && (strippedLine.length === sep.length || strippedLine[sep.length] === " ")) {
1107
5211
  headerMatched = true;
1108
5212
  if (currentContent.length > 0) {
@@ -1112,7 +5216,7 @@ var MarkdownHeaderTransformer = class {
1112
5216
  });
1113
5217
  currentContent = [];
1114
5218
  }
1115
- if (name !== null) {
5219
+ if (name14 !== null) {
1116
5220
  const currentHeaderLevel = (sep.match(/#/g) || []).length;
1117
5221
  while (headerStack.length > 0 && headerStack?.[headerStack.length - 1].level >= currentHeaderLevel) {
1118
5222
  const poppedHeader = headerStack.pop();
@@ -1122,11 +5226,11 @@ var MarkdownHeaderTransformer = class {
1122
5226
  }
1123
5227
  const header = {
1124
5228
  level: currentHeaderLevel,
1125
- name,
5229
+ name: name14,
1126
5230
  data: strippedLine.slice(sep.length).trim()
1127
5231
  };
1128
5232
  headerStack.push(header);
1129
- initialMetadata[name] = header.data;
5233
+ initialMetadata[name14] = header.data;
1130
5234
  }
1131
5235
  linesWithMetadata.push({
1132
5236
  content: line,
@@ -1304,16 +5408,16 @@ var MDocument = class _MDocument {
1304
5408
  if (typeof title !== "undefined") {
1305
5409
  transformations.push(new TitleExtractor(typeof title === "boolean" ? {} : title));
1306
5410
  this.chunks = this.chunks.map(
1307
- (doc, i) => new Document({
5411
+ (doc) => doc?.metadata?.docId ? new Document({
1308
5412
  ...doc,
1309
5413
  relationships: {
1310
5414
  [NodeRelationship.SOURCE]: {
1311
- nodeId: `doc-${i}`,
5415
+ nodeId: doc.metadata.docId,
1312
5416
  nodeType: ObjectType.DOCUMENT,
1313
5417
  metadata: doc.metadata
1314
5418
  }
1315
5419
  }
1316
- })
5420
+ }) : doc
1317
5421
  );
1318
5422
  }
1319
5423
  const pipeline = new IngestionPipeline({