@zenning/openai 1.4.4 → 1.4.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,8 +1,164 @@
1
+ // ../provider/dist/index.mjs
2
+ var marker = "vercel.ai.error";
3
+ var symbol = Symbol.for(marker);
4
+ var _a;
5
+ var _AISDKError = class _AISDKError2 extends Error {
6
+ /**
7
+ * Creates an AI SDK Error.
8
+ *
9
+ * @param {Object} params - The parameters for creating the error.
10
+ * @param {string} params.name - The name of the error.
11
+ * @param {string} params.message - The error message.
12
+ * @param {unknown} [params.cause] - The underlying cause of the error.
13
+ */
14
+ constructor({
15
+ name: name14,
16
+ message,
17
+ cause
18
+ }) {
19
+ super(message);
20
+ this[_a] = true;
21
+ this.name = name14;
22
+ this.cause = cause;
23
+ }
24
+ /**
25
+ * Checks if the given error is an AI SDK Error.
26
+ * @param {unknown} error - The error to check.
27
+ * @returns {boolean} True if the error is an AI SDK Error, false otherwise.
28
+ */
29
+ static isInstance(error) {
30
+ return _AISDKError2.hasMarker(error, marker);
31
+ }
32
+ static hasMarker(error, marker15) {
33
+ const markerSymbol = Symbol.for(marker15);
34
+ return error != null && typeof error === "object" && markerSymbol in error && typeof error[markerSymbol] === "boolean" && error[markerSymbol] === true;
35
+ }
36
+ };
37
+ _a = symbol;
38
+ var AISDKError = _AISDKError;
39
+ var name = "AI_APICallError";
40
+ var marker2 = `vercel.ai.error.${name}`;
41
+ var symbol2 = Symbol.for(marker2);
42
+ var _a2;
43
+ _a2 = symbol2;
44
+ var name2 = "AI_EmptyResponseBodyError";
45
+ var marker3 = `vercel.ai.error.${name2}`;
46
+ var symbol3 = Symbol.for(marker3);
47
+ var _a3;
48
+ _a3 = symbol3;
49
+ var name3 = "AI_InvalidArgumentError";
50
+ var marker4 = `vercel.ai.error.${name3}`;
51
+ var symbol4 = Symbol.for(marker4);
52
+ var _a4;
53
+ _a4 = symbol4;
54
+ var name4 = "AI_InvalidPromptError";
55
+ var marker5 = `vercel.ai.error.${name4}`;
56
+ var symbol5 = Symbol.for(marker5);
57
+ var _a5;
58
+ var InvalidPromptError = class extends AISDKError {
59
+ constructor({
60
+ prompt,
61
+ message,
62
+ cause
63
+ }) {
64
+ super({ name: name4, message: `Invalid prompt: ${message}`, cause });
65
+ this[_a5] = true;
66
+ this.prompt = prompt;
67
+ }
68
+ static isInstance(error) {
69
+ return AISDKError.hasMarker(error, marker5);
70
+ }
71
+ };
72
+ _a5 = symbol5;
73
+ var name5 = "AI_InvalidResponseDataError";
74
+ var marker6 = `vercel.ai.error.${name5}`;
75
+ var symbol6 = Symbol.for(marker6);
76
+ var _a6;
77
+ var InvalidResponseDataError = class extends AISDKError {
78
+ constructor({
79
+ data,
80
+ message = `Invalid response data: ${JSON.stringify(data)}.`
81
+ }) {
82
+ super({ name: name5, message });
83
+ this[_a6] = true;
84
+ this.data = data;
85
+ }
86
+ static isInstance(error) {
87
+ return AISDKError.hasMarker(error, marker6);
88
+ }
89
+ };
90
+ _a6 = symbol6;
91
+ var name6 = "AI_JSONParseError";
92
+ var marker7 = `vercel.ai.error.${name6}`;
93
+ var symbol7 = Symbol.for(marker7);
94
+ var _a7;
95
+ _a7 = symbol7;
96
+ var name7 = "AI_LoadAPIKeyError";
97
+ var marker8 = `vercel.ai.error.${name7}`;
98
+ var symbol8 = Symbol.for(marker8);
99
+ var _a8;
100
+ _a8 = symbol8;
101
+ var name8 = "AI_LoadSettingError";
102
+ var marker9 = `vercel.ai.error.${name8}`;
103
+ var symbol9 = Symbol.for(marker9);
104
+ var _a9;
105
+ _a9 = symbol9;
106
+ var name9 = "AI_NoContentGeneratedError";
107
+ var marker10 = `vercel.ai.error.${name9}`;
108
+ var symbol10 = Symbol.for(marker10);
109
+ var _a10;
110
+ _a10 = symbol10;
111
+ var name10 = "AI_NoSuchModelError";
112
+ var marker11 = `vercel.ai.error.${name10}`;
113
+ var symbol11 = Symbol.for(marker11);
114
+ var _a11;
115
+ _a11 = symbol11;
116
+ var name11 = "AI_TooManyEmbeddingValuesForCallError";
117
+ var marker12 = `vercel.ai.error.${name11}`;
118
+ var symbol12 = Symbol.for(marker12);
119
+ var _a12;
120
+ var TooManyEmbeddingValuesForCallError = class extends AISDKError {
121
+ constructor(options) {
122
+ super({
123
+ name: name11,
124
+ message: `Too many values for a single embedding call. The ${options.provider} model "${options.modelId}" can only embed up to ${options.maxEmbeddingsPerCall} values per call, but ${options.values.length} values were provided.`
125
+ });
126
+ this[_a12] = true;
127
+ this.provider = options.provider;
128
+ this.modelId = options.modelId;
129
+ this.maxEmbeddingsPerCall = options.maxEmbeddingsPerCall;
130
+ this.values = options.values;
131
+ }
132
+ static isInstance(error) {
133
+ return AISDKError.hasMarker(error, marker12);
134
+ }
135
+ };
136
+ _a12 = symbol12;
137
+ var name12 = "AI_TypeValidationError";
138
+ var marker13 = `vercel.ai.error.${name12}`;
139
+ var symbol13 = Symbol.for(marker13);
140
+ var _a13;
141
+ _a13 = symbol13;
142
+ var name13 = "AI_UnsupportedFunctionalityError";
143
+ var marker14 = `vercel.ai.error.${name13}`;
144
+ var symbol14 = Symbol.for(marker14);
145
+ var _a14;
146
+ var UnsupportedFunctionalityError = class extends AISDKError {
147
+ constructor({
148
+ functionality,
149
+ message = `'${functionality}' functionality not supported.`
150
+ }) {
151
+ super({ name: name13, message });
152
+ this[_a14] = true;
153
+ this.functionality = functionality;
154
+ }
155
+ static isInstance(error) {
156
+ return AISDKError.hasMarker(error, marker14);
157
+ }
158
+ };
159
+ _a14 = symbol14;
160
+
1
161
  // src/openai-chat-language-model.ts
2
- import {
3
- InvalidResponseDataError,
4
- UnsupportedFunctionalityError as UnsupportedFunctionalityError3
5
- } from "@ai-sdk/provider";
6
162
  import {
7
163
  combineHeaders,
8
164
  createEventSourceResponseHandler,
@@ -14,9 +170,6 @@ import {
14
170
  import { z as z2 } from "zod";
15
171
 
16
172
  // src/convert-to-openai-chat-messages.ts
17
- import {
18
- UnsupportedFunctionalityError
19
- } from "@ai-sdk/provider";
20
173
  import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
21
174
  function convertToOpenAIChatMessages({
22
175
  prompt,
@@ -61,7 +214,7 @@ function convertToOpenAIChatMessages({
61
214
  messages.push({
62
215
  role: "user",
63
216
  content: content.map((part, index) => {
64
- var _a, _b, _c, _d;
217
+ var _a15, _b, _c, _d;
65
218
  switch (part.type) {
66
219
  case "text": {
67
220
  return { type: "text", text: part.text };
@@ -70,7 +223,7 @@ function convertToOpenAIChatMessages({
70
223
  return {
71
224
  type: "image_url",
72
225
  image_url: {
73
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`,
226
+ url: part.image instanceof URL ? part.image.toString() : `data:${(_a15 = part.mimeType) != null ? _a15 : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`,
74
227
  // OpenAI specific extension: image detail
75
228
  detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
76
229
  }
@@ -188,8 +341,8 @@ function convertToOpenAIChatMessages({
188
341
 
189
342
  // src/map-openai-chat-logprobs.ts
190
343
  function mapOpenAIChatLogProbsOutput(logprobs) {
191
- var _a, _b;
192
- return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
344
+ var _a15, _b;
345
+ return (_b = (_a15 = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a15.map(({ token, logprob, top_logprobs }) => ({
193
346
  token,
194
347
  logprob,
195
348
  topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
@@ -249,16 +402,13 @@ function getResponseMetadata({
249
402
  }
250
403
 
251
404
  // src/openai-prepare-tools.ts
252
- import {
253
- UnsupportedFunctionalityError as UnsupportedFunctionalityError2
254
- } from "@ai-sdk/provider";
255
405
  function prepareTools({
256
406
  mode,
257
407
  useLegacyFunctionCalling = false,
258
408
  structuredOutputs
259
409
  }) {
260
- var _a;
261
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
410
+ var _a15;
411
+ const tools = ((_a15 = mode.tools) == null ? void 0 : _a15.length) ? mode.tools : void 0;
262
412
  const toolWarnings = [];
263
413
  if (tools == null) {
264
414
  return { tools: void 0, tool_choice: void 0, toolWarnings };
@@ -295,7 +445,7 @@ function prepareTools({
295
445
  toolWarnings
296
446
  };
297
447
  case "required":
298
- throw new UnsupportedFunctionalityError2({
448
+ throw new UnsupportedFunctionalityError({
299
449
  functionality: "useLegacyFunctionCalling and toolChoice: required"
300
450
  });
301
451
  default:
@@ -344,7 +494,7 @@ function prepareTools({
344
494
  };
345
495
  default: {
346
496
  const _exhaustiveCheck = type;
347
- throw new UnsupportedFunctionalityError2({
497
+ throw new UnsupportedFunctionalityError({
348
498
  functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
349
499
  });
350
500
  }
@@ -360,8 +510,8 @@ var OpenAIChatLanguageModel = class {
360
510
  this.config = config;
361
511
  }
362
512
  get supportsStructuredOutputs() {
363
- var _a;
364
- return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
513
+ var _a15;
514
+ return (_a15 = this.settings.structuredOutputs) != null ? _a15 : isReasoningModel(this.modelId);
365
515
  }
366
516
  get defaultObjectGenerationMode() {
367
517
  if (isAudioModel(this.modelId)) {
@@ -389,7 +539,7 @@ var OpenAIChatLanguageModel = class {
389
539
  seed,
390
540
  providerMetadata
391
541
  }) {
392
- var _a, _b, _c, _d, _e, _f, _g, _h;
542
+ var _a15, _b, _c, _d, _e, _f, _g, _h;
393
543
  const type = mode.type;
394
544
  const warnings = [];
395
545
  if (topK != null) {
@@ -407,12 +557,12 @@ var OpenAIChatLanguageModel = class {
407
557
  }
408
558
  const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
409
559
  if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
410
- throw new UnsupportedFunctionalityError3({
560
+ throw new UnsupportedFunctionalityError({
411
561
  functionality: "useLegacyFunctionCalling with parallelToolCalls"
412
562
  });
413
563
  }
414
564
  if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
415
- throw new UnsupportedFunctionalityError3({
565
+ throw new UnsupportedFunctionalityError({
416
566
  functionality: "structuredOutputs with useLegacyFunctionCalling"
417
567
  });
418
568
  }
@@ -444,7 +594,7 @@ var OpenAIChatLanguageModel = class {
444
594
  json_schema: {
445
595
  schema: responseFormat.schema,
446
596
  strict: true,
447
- name: (_a = responseFormat.name) != null ? _a : "response",
597
+ name: (_a15 = responseFormat.name) != null ? _a15 : "response",
448
598
  description: responseFormat.description
449
599
  }
450
600
  } : { type: "json_object" } : void 0,
@@ -607,7 +757,7 @@ var OpenAIChatLanguageModel = class {
607
757
  }
608
758
  }
609
759
  async doGenerate(options) {
610
- var _a, _b, _c, _d, _e, _f, _g, _h;
760
+ var _a15, _b, _c, _d, _e, _f, _g, _h;
611
761
  const { args: body, warnings } = this.getArgs(options);
612
762
  const {
613
763
  responseHeaders,
@@ -629,7 +779,7 @@ var OpenAIChatLanguageModel = class {
629
779
  });
630
780
  const { messages: rawPrompt, ...rawSettings } = body;
631
781
  const choice = response.choices[0];
632
- const completionTokenDetails = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details;
782
+ const completionTokenDetails = (_a15 = response.usage) == null ? void 0 : _a15.completion_tokens_details;
633
783
  const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
634
784
  const providerMetadata = { openai: {} };
635
785
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
@@ -654,10 +804,10 @@ var OpenAIChatLanguageModel = class {
654
804
  args: choice.message.function_call.arguments
655
805
  }
656
806
  ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
657
- var _a2;
807
+ var _a16;
658
808
  return {
659
809
  toolCallType: "function",
660
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
810
+ toolCallId: (_a16 = toolCall.id) != null ? _a16 : generateId(),
661
811
  toolName: toolCall.function.name,
662
812
  args: toolCall.function.arguments
663
813
  };
@@ -756,7 +906,7 @@ var OpenAIChatLanguageModel = class {
756
906
  stream: response.pipeThrough(
757
907
  new TransformStream({
758
908
  transform(chunk, controller) {
759
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
909
+ var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
760
910
  if (!chunk.success) {
761
911
  finishReason = "error";
762
912
  controller.enqueue({ type: "error", error: chunk.error });
@@ -844,7 +994,7 @@ var OpenAIChatLanguageModel = class {
844
994
  message: `Expected 'id' to be a string.`
845
995
  });
846
996
  }
847
- if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
997
+ if (((_a15 = toolCallDelta.function) == null ? void 0 : _a15.name) == null) {
848
998
  throw new InvalidResponseDataError({
849
999
  data: toolCallDelta,
850
1000
  message: `Expected 'function.name' to be a string.`
@@ -911,13 +1061,13 @@ var OpenAIChatLanguageModel = class {
911
1061
  }
912
1062
  },
913
1063
  flush(controller) {
914
- var _a, _b;
1064
+ var _a15, _b;
915
1065
  controller.enqueue({
916
1066
  type: "finish",
917
1067
  finishReason,
918
1068
  logprobs,
919
1069
  usage: {
920
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
1070
+ promptTokens: (_a15 = usage.promptTokens) != null ? _a15 : NaN,
921
1071
  completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
922
1072
  },
923
1073
  ...providerMetadata != null ? { providerMetadata } : {}
@@ -1043,11 +1193,11 @@ function isAudioModel(modelId) {
1043
1193
  return modelId.startsWith("gpt-4o-audio-preview");
1044
1194
  }
1045
1195
  function getSystemMessageMode(modelId) {
1046
- var _a, _b;
1196
+ var _a15, _b;
1047
1197
  if (!isReasoningModel(modelId)) {
1048
1198
  return "system";
1049
1199
  }
1050
- return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
1200
+ return (_b = (_a15 = reasoningModels[modelId]) == null ? void 0 : _a15.systemMessageMode) != null ? _b : "developer";
1051
1201
  }
1052
1202
  var reasoningModels = {
1053
1203
  "o1-mini": {
@@ -1083,9 +1233,6 @@ var reasoningModels = {
1083
1233
  };
1084
1234
 
1085
1235
  // src/openai-completion-language-model.ts
1086
- import {
1087
- UnsupportedFunctionalityError as UnsupportedFunctionalityError5
1088
- } from "@ai-sdk/provider";
1089
1236
  import {
1090
1237
  combineHeaders as combineHeaders2,
1091
1238
  createEventSourceResponseHandler as createEventSourceResponseHandler2,
@@ -1095,10 +1242,6 @@ import {
1095
1242
  import { z as z3 } from "zod";
1096
1243
 
1097
1244
  // src/convert-to-openai-completion-prompt.ts
1098
- import {
1099
- InvalidPromptError,
1100
- UnsupportedFunctionalityError as UnsupportedFunctionalityError4
1101
- } from "@ai-sdk/provider";
1102
1245
  function convertToOpenAICompletionPrompt({
1103
1246
  prompt,
1104
1247
  inputFormat,
@@ -1130,7 +1273,7 @@ function convertToOpenAICompletionPrompt({
1130
1273
  return part.text;
1131
1274
  }
1132
1275
  case "image": {
1133
- throw new UnsupportedFunctionalityError4({
1276
+ throw new UnsupportedFunctionalityError({
1134
1277
  functionality: "images"
1135
1278
  });
1136
1279
  }
@@ -1149,7 +1292,7 @@ ${userMessage}
1149
1292
  return part.text;
1150
1293
  }
1151
1294
  case "tool-call": {
1152
- throw new UnsupportedFunctionalityError4({
1295
+ throw new UnsupportedFunctionalityError({
1153
1296
  functionality: "tool-call messages"
1154
1297
  });
1155
1298
  }
@@ -1162,7 +1305,7 @@ ${assistantMessage}
1162
1305
  break;
1163
1306
  }
1164
1307
  case "tool": {
1165
- throw new UnsupportedFunctionalityError4({
1308
+ throw new UnsupportedFunctionalityError({
1166
1309
  functionality: "tool messages"
1167
1310
  });
1168
1311
  }
@@ -1221,7 +1364,7 @@ var OpenAICompletionLanguageModel = class {
1221
1364
  responseFormat,
1222
1365
  seed
1223
1366
  }) {
1224
- var _a;
1367
+ var _a15;
1225
1368
  const type = mode.type;
1226
1369
  const warnings = [];
1227
1370
  if (topK != null) {
@@ -1262,25 +1405,25 @@ var OpenAICompletionLanguageModel = class {
1262
1405
  };
1263
1406
  switch (type) {
1264
1407
  case "regular": {
1265
- if ((_a = mode.tools) == null ? void 0 : _a.length) {
1266
- throw new UnsupportedFunctionalityError5({
1408
+ if ((_a15 = mode.tools) == null ? void 0 : _a15.length) {
1409
+ throw new UnsupportedFunctionalityError({
1267
1410
  functionality: "tools"
1268
1411
  });
1269
1412
  }
1270
1413
  if (mode.toolChoice) {
1271
- throw new UnsupportedFunctionalityError5({
1414
+ throw new UnsupportedFunctionalityError({
1272
1415
  functionality: "toolChoice"
1273
1416
  });
1274
1417
  }
1275
1418
  return { args: baseArgs, warnings };
1276
1419
  }
1277
1420
  case "object-json": {
1278
- throw new UnsupportedFunctionalityError5({
1421
+ throw new UnsupportedFunctionalityError({
1279
1422
  functionality: "object-json mode"
1280
1423
  });
1281
1424
  }
1282
1425
  case "object-tool": {
1283
- throw new UnsupportedFunctionalityError5({
1426
+ throw new UnsupportedFunctionalityError({
1284
1427
  functionality: "object-tool mode"
1285
1428
  });
1286
1429
  }
@@ -1466,9 +1609,6 @@ var openaiCompletionChunkSchema = z3.union([
1466
1609
  ]);
1467
1610
 
1468
1611
  // src/openai-embedding-model.ts
1469
- import {
1470
- TooManyEmbeddingValuesForCallError
1471
- } from "@ai-sdk/provider";
1472
1612
  import {
1473
1613
  combineHeaders as combineHeaders3,
1474
1614
  createJsonResponseHandler as createJsonResponseHandler3,
@@ -1486,12 +1626,12 @@ var OpenAIEmbeddingModel = class {
1486
1626
  return this.config.provider;
1487
1627
  }
1488
1628
  get maxEmbeddingsPerCall() {
1489
- var _a;
1490
- return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
1629
+ var _a15;
1630
+ return (_a15 = this.settings.maxEmbeddingsPerCall) != null ? _a15 : 2048;
1491
1631
  }
1492
1632
  get supportsParallelCalls() {
1493
- var _a;
1494
- return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
1633
+ var _a15;
1634
+ return (_a15 = this.settings.supportsParallelCalls) != null ? _a15 : true;
1495
1635
  }
1496
1636
  async doEmbed({
1497
1637
  values,
@@ -1563,8 +1703,8 @@ var OpenAIImageModel = class {
1563
1703
  this.specificationVersion = "v1";
1564
1704
  }
1565
1705
  get maxImagesPerCall() {
1566
- var _a, _b;
1567
- return (_b = (_a = this.settings.maxImagesPerCall) != null ? _a : modelMaxImagesPerCall[this.modelId]) != null ? _b : 1;
1706
+ var _a15, _b;
1707
+ return (_b = (_a15 = this.settings.maxImagesPerCall) != null ? _a15 : modelMaxImagesPerCall[this.modelId]) != null ? _b : 1;
1568
1708
  }
1569
1709
  get provider() {
1570
1710
  return this.config.provider;
@@ -1579,7 +1719,7 @@ var OpenAIImageModel = class {
1579
1719
  headers,
1580
1720
  abortSignal
1581
1721
  }) {
1582
- var _a, _b, _c, _d;
1722
+ var _a15, _b, _c, _d;
1583
1723
  const warnings = [];
1584
1724
  if (aspectRatio != null) {
1585
1725
  warnings.push({
@@ -1591,7 +1731,7 @@ var OpenAIImageModel = class {
1591
1731
  if (seed != null) {
1592
1732
  warnings.push({ type: "unsupported-setting", setting: "seed" });
1593
1733
  }
1594
- const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1734
+ const currentDate = (_c = (_b = (_a15 = this.config._internal) == null ? void 0 : _a15.currentDate) == null ? void 0 : _b.call(_a15)) != null ? _c : /* @__PURE__ */ new Date();
1595
1735
  const { value: response, responseHeaders } = await postJsonToApi4({
1596
1736
  url: this.config.url({
1597
1737
  path: "/images/generations",
@@ -1717,7 +1857,7 @@ var OpenAITranscriptionModel = class {
1717
1857
  mediaType,
1718
1858
  providerOptions
1719
1859
  }) {
1720
- var _a, _b, _c, _d, _e;
1860
+ var _a15, _b, _c, _d, _e;
1721
1861
  const warnings = [];
1722
1862
  const openAIOptions = parseProviderOptions({
1723
1863
  provider: "openai",
@@ -1730,7 +1870,7 @@ var OpenAITranscriptionModel = class {
1730
1870
  formData.append("file", new File([blob], "audio", { type: mediaType }));
1731
1871
  if (openAIOptions) {
1732
1872
  const transcriptionModelOptions = {
1733
- include: (_a = openAIOptions.include) != null ? _a : void 0,
1873
+ include: (_a15 = openAIOptions.include) != null ? _a15 : void 0,
1734
1874
  language: (_b = openAIOptions.language) != null ? _b : void 0,
1735
1875
  prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1736
1876
  temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
@@ -1749,8 +1889,8 @@ var OpenAITranscriptionModel = class {
1749
1889
  };
1750
1890
  }
1751
1891
  async doGenerate(options) {
1752
- var _a, _b, _c, _d, _e, _f;
1753
- const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1892
+ var _a15, _b, _c, _d, _e, _f;
1893
+ const currentDate = (_c = (_b = (_a15 = this.config._internal) == null ? void 0 : _a15.currentDate) == null ? void 0 : _b.call(_a15)) != null ? _c : /* @__PURE__ */ new Date();
1754
1894
  const { formData, warnings } = this.getArgs(options);
1755
1895
  const {
1756
1896
  value: response,
@@ -1872,8 +2012,8 @@ var OpenAISpeechModel = class {
1872
2012
  };
1873
2013
  }
1874
2014
  async doGenerate(options) {
1875
- var _a, _b, _c;
1876
- const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
2015
+ var _a15, _b, _c;
2016
+ const currentDate = (_c = (_b = (_a15 = this.config._internal) == null ? void 0 : _a15.currentDate) == null ? void 0 : _b.call(_a15)) != null ? _c : /* @__PURE__ */ new Date();
1877
2017
  const { requestBody, warnings } = this.getArgs(options);
1878
2018
  const {
1879
2019
  value: audio,
@@ -1919,9 +2059,6 @@ import {
1919
2059
  import { z as z12 } from "zod";
1920
2060
 
1921
2061
  // src/responses/convert-to-openai-responses-messages.ts
1922
- import {
1923
- UnsupportedFunctionalityError as UnsupportedFunctionalityError6
1924
- } from "@ai-sdk/provider";
1925
2062
  import { convertUint8ArrayToBase64 as convertUint8ArrayToBase642 } from "@ai-sdk/provider-utils";
1926
2063
  function convertToOpenAIResponsesMessages({
1927
2064
  prompt,
@@ -1961,7 +2098,7 @@ function convertToOpenAIResponsesMessages({
1961
2098
  messages.push({
1962
2099
  role: "user",
1963
2100
  content: content.map((part, index) => {
1964
- var _a, _b, _c, _d;
2101
+ var _a15, _b, _c, _d;
1965
2102
  switch (part.type) {
1966
2103
  case "text": {
1967
2104
  return { type: "input_text", text: part.text };
@@ -1969,14 +2106,14 @@ function convertToOpenAIResponsesMessages({
1969
2106
  case "image": {
1970
2107
  return {
1971
2108
  type: "input_image",
1972
- image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase642(part.image)}`,
2109
+ image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a15 = part.mimeType) != null ? _a15 : "image/jpeg"};base64,${convertUint8ArrayToBase642(part.image)}`,
1973
2110
  // OpenAI specific extension: image detail
1974
2111
  detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
1975
2112
  };
1976
2113
  }
1977
2114
  case "file": {
1978
2115
  if (part.data instanceof URL) {
1979
- throw new UnsupportedFunctionalityError6({
2116
+ throw new UnsupportedFunctionalityError({
1980
2117
  functionality: "File URLs in user messages"
1981
2118
  });
1982
2119
  }
@@ -1989,7 +2126,7 @@ function convertToOpenAIResponsesMessages({
1989
2126
  };
1990
2127
  }
1991
2128
  default: {
1992
- throw new UnsupportedFunctionalityError6({
2129
+ throw new UnsupportedFunctionalityError({
1993
2130
  functionality: "Only PDF files are supported in user messages"
1994
2131
  });
1995
2132
  }
@@ -2060,11 +2197,6 @@ function mapOpenAIResponseFinishReason({
2060
2197
  }
2061
2198
  }
2062
2199
 
2063
- // src/responses/openai-responses-prepare-tools.ts
2064
- import {
2065
- UnsupportedFunctionalityError as UnsupportedFunctionalityError7
2066
- } from "@ai-sdk/provider";
2067
-
2068
2200
  // src/tool/code-interpreter.ts
2069
2201
  import { z as z8 } from "zod";
2070
2202
  var codeInterpreterArgsSchema = z8.object({
@@ -2133,8 +2265,8 @@ function prepareResponsesTools({
2133
2265
  mode,
2134
2266
  strict
2135
2267
  }) {
2136
- var _a;
2137
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
2268
+ var _a15;
2269
+ const tools = ((_a15 = mode.tools) == null ? void 0 : _a15.length) ? mode.tools : void 0;
2138
2270
  const toolWarnings = [];
2139
2271
  if (tools == null) {
2140
2272
  return { tools: void 0, tool_choice: void 0, toolWarnings };
@@ -2220,7 +2352,7 @@ function prepareResponsesTools({
2220
2352
  };
2221
2353
  default: {
2222
2354
  const _exhaustiveCheck = type;
2223
- throw new UnsupportedFunctionalityError7({
2355
+ throw new UnsupportedFunctionalityError({
2224
2356
  functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
2225
2357
  });
2226
2358
  }
@@ -2253,7 +2385,7 @@ var OpenAIResponsesLanguageModel = class {
2253
2385
  providerMetadata,
2254
2386
  responseFormat
2255
2387
  }) {
2256
- var _a, _b, _c;
2388
+ var _a15, _b, _c;
2257
2389
  const warnings = [];
2258
2390
  const modelConfig = getResponsesModelConfig(this.modelId);
2259
2391
  const type = mode.type;
@@ -2298,7 +2430,7 @@ var OpenAIResponsesLanguageModel = class {
2298
2430
  providerOptions: providerMetadata,
2299
2431
  schema: openaiResponsesProviderOptionsSchema
2300
2432
  });
2301
- const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
2433
+ const isStrict = (_a15 = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a15 : true;
2302
2434
  console.log("openaiOptions", JSON.stringify(openaiOptions));
2303
2435
  const baseArgs = {
2304
2436
  model: this.modelId,
@@ -2416,7 +2548,7 @@ var OpenAIResponsesLanguageModel = class {
2416
2548
  }
2417
2549
  }
2418
2550
  async doGenerate(options) {
2419
- var _a, _b, _c, _d, _e, _f, _g;
2551
+ var _a15, _b, _c, _d, _e, _f, _g;
2420
2552
  const { args: body, warnings } = this.getArgs(options);
2421
2553
  const {
2422
2554
  responseHeaders,
@@ -2571,36 +2703,59 @@ var OpenAIResponsesLanguageModel = class {
2571
2703
  });
2572
2704
  }
2573
2705
  }
2574
- const reasoningSummary = (_b = (_a = response.output.find((item) => item.type === "reasoning")) == null ? void 0 : _a.summary) != null ? _b : null;
2575
- console.log(JSON.stringify({
2706
+ const reasoningSummary = (_b = (_a15 = response.output.find((item) => item.type === "reasoning")) == null ? void 0 : _a15.summary) != null ? _b : null;
2707
+ const allAnnotations = outputTextElements.flatMap((content) => content.annotations);
2708
+ console.log("\u{1F4CB} Processing annotations in doGenerate:", JSON.stringify({
2576
2709
  msg: "ai-sdk: content annotations",
2577
- annotations: outputTextElements.flatMap((content) => content.annotations)
2578
- }));
2710
+ count: allAnnotations.length,
2711
+ annotations: allAnnotations
2712
+ }, null, 2));
2579
2713
  return {
2580
2714
  text: outputTextElements.map((content) => content.text).join("\n"),
2581
2715
  sources: outputTextElements.flatMap(
2582
2716
  (content) => content.annotations.map((annotation) => {
2583
- var _a2, _b2, _c2, _d2, _e2, _f2, _g2, _h, _i, _j, _k;
2584
- if (annotation.type === "url_citation") {
2585
- return {
2586
- sourceType: "url",
2587
- id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : generateId2(),
2588
- url: annotation.url,
2589
- title: annotation.title
2590
- };
2591
- } else if (annotation.type === "file_citation") {
2592
- return {
2593
- sourceType: "url",
2594
- id: (_f2 = (_e2 = (_d2 = this.config).generateId) == null ? void 0 : _e2.call(_d2)) != null ? _f2 : generateId2(),
2595
- url: `file://${annotation.file_id}`,
2596
- title: (_h = (_g2 = annotation.quote) != null ? _g2 : annotation.filename) != null ? _h : "Document"
2597
- };
2598
- } else {
2717
+ var _a16, _b2, _c2, _d2, _e2, _f2, _g2, _h, _i, _j, _k, _l;
2718
+ console.log("\u{1F517} Processing annotation for source:", JSON.stringify(annotation, null, 2));
2719
+ try {
2720
+ if (annotation.type === "url_citation") {
2721
+ const urlSource = {
2722
+ sourceType: "url",
2723
+ id: (_c2 = (_b2 = (_a16 = this.config).generateId) == null ? void 0 : _b2.call(_a16)) != null ? _c2 : generateId2(),
2724
+ url: annotation.url,
2725
+ title: annotation.title
2726
+ };
2727
+ console.log("\u2705 Created URL source:", JSON.stringify(urlSource, null, 2));
2728
+ return urlSource;
2729
+ } else if (annotation.type === "file_citation") {
2730
+ const documentSource = {
2731
+ sourceType: "document",
2732
+ id: (_f2 = (_e2 = (_d2 = this.config).generateId) == null ? void 0 : _e2.call(_d2)) != null ? _f2 : generateId2(),
2733
+ mediaType: "text/plain",
2734
+ title: annotation.quote || annotation.filename || "Document",
2735
+ filename: annotation.filename,
2736
+ quote: annotation.quote
2737
+ };
2738
+ console.log("\u{1F4C4} Created document source:", JSON.stringify(documentSource, null, 2));
2739
+ return documentSource;
2740
+ } else {
2741
+ console.log("\u26A0\uFE0F Unknown annotation type in doGenerate:", annotation.type);
2742
+ return {
2743
+ sourceType: "url",
2744
+ id: (_i = (_h = (_g2 = this.config).generateId) == null ? void 0 : _h.call(_g2)) != null ? _i : generateId2(),
2745
+ url: "",
2746
+ title: "Unknown Source"
2747
+ };
2748
+ }
2749
+ } catch (error) {
2750
+ console.error("\u274C Error creating source in doGenerate:", {
2751
+ annotation,
2752
+ error: error instanceof Error ? error.message : String(error)
2753
+ });
2599
2754
  return {
2600
2755
  sourceType: "url",
2601
- id: (_k = (_j = (_i = this.config).generateId) == null ? void 0 : _j.call(_i)) != null ? _k : generateId2(),
2756
+ id: (_l = (_k = (_j = this.config).generateId) == null ? void 0 : _k.call(_j)) != null ? _l : generateId2(),
2602
2757
  url: "",
2603
- title: "Unknown Source"
2758
+ title: "Error Source"
2604
2759
  };
2605
2760
  }
2606
2761
  })
@@ -2645,24 +2800,40 @@ var OpenAIResponsesLanguageModel = class {
2645
2800
  };
2646
2801
  }
2647
2802
  async doStream(options) {
2803
+ console.log("\u{1F680} Starting doStream with options:", JSON.stringify({
2804
+ modelId: this.modelId,
2805
+ hasAbortSignal: !!options.abortSignal
2806
+ }, null, 2));
2648
2807
  const { args: body, warnings } = this.getArgs(options);
2649
- const { responseHeaders, value: response } = await postJsonToApi6({
2650
- url: this.config.url({
2651
- path: "/responses",
2652
- modelId: this.modelId
2653
- }),
2654
- headers: combineHeaders7(this.config.headers(), options.headers),
2655
- body: {
2656
- ...body,
2657
- stream: true
2658
- },
2659
- failedResponseHandler: openaiFailedResponseHandler,
2660
- successfulResponseHandler: createEventSourceResponseHandler3(
2661
- openaiResponsesChunkSchema
2662
- ),
2663
- abortSignal: options.abortSignal,
2664
- fetch: this.config.fetch
2665
- });
2808
+ console.log("\u{1F4E4} Request body:", JSON.stringify(body, null, 2));
2809
+ let response;
2810
+ let responseHeaders;
2811
+ try {
2812
+ console.log("\u{1F4E1} Making API request...");
2813
+ const result = await postJsonToApi6({
2814
+ url: this.config.url({
2815
+ path: "/responses",
2816
+ modelId: this.modelId
2817
+ }),
2818
+ headers: combineHeaders7(this.config.headers(), options.headers),
2819
+ body: {
2820
+ ...body,
2821
+ stream: true
2822
+ },
2823
+ failedResponseHandler: openaiFailedResponseHandler,
2824
+ successfulResponseHandler: createEventSourceResponseHandler3(
2825
+ openaiResponsesChunkSchema
2826
+ ),
2827
+ abortSignal: options.abortSignal,
2828
+ fetch: this.config.fetch
2829
+ });
2830
+ response = result.value;
2831
+ responseHeaders = result.responseHeaders;
2832
+ console.log("\u2705 API request successful, starting stream processing");
2833
+ } catch (error) {
2834
+ console.error("\u274C API request failed:", error);
2835
+ throw error;
2836
+ }
2666
2837
  const self = this;
2667
2838
  let finishReason = "unknown";
2668
2839
  let promptTokens = NaN;
@@ -2676,172 +2847,214 @@ var OpenAIResponsesLanguageModel = class {
2676
2847
  stream: response.pipeThrough(
2677
2848
  new TransformStream({
2678
2849
  transform(chunk, controller) {
2679
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2680
- if (!chunk.success) {
2681
- finishReason = "error";
2682
- controller.enqueue({ type: "error", error: chunk.error });
2683
- return;
2684
- }
2685
- const value = chunk.value;
2686
- if (isResponseOutputItemAddedChunk(value)) {
2687
- if (value.item.type === "function_call") {
2688
- ongoingToolCalls[value.output_index] = {
2689
- toolName: value.item.name,
2690
- toolCallId: value.item.call_id
2691
- };
2692
- controller.enqueue({
2693
- type: "tool-call-delta",
2694
- toolCallType: "function",
2695
- toolCallId: value.item.call_id,
2696
- toolName: value.item.name,
2697
- argsTextDelta: value.item.arguments
2698
- });
2699
- } else if (value.item.type === "web_search_call") {
2700
- ongoingToolCalls[value.output_index] = {
2701
- toolName: "web_search_preview",
2702
- toolCallId: value.item.id
2703
- };
2850
+ var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
2851
+ try {
2852
+ console.log("\u{1F4E6} Processing chunk:", JSON.stringify(chunk, null, 2));
2853
+ if (!chunk.success) {
2854
+ console.error("\u274C Chunk parsing failed:", chunk.error);
2855
+ finishReason = "error";
2856
+ controller.enqueue({ type: "error", error: chunk.error });
2857
+ return;
2858
+ }
2859
+ const value = chunk.value;
2860
+ console.log("\u{1F4E5} Chunk value type:", value.type);
2861
+ if (isResponseOutputItemAddedChunk(value)) {
2862
+ console.log("\u{1F4DD} Output item added:", JSON.stringify(value, null, 2));
2863
+ if (value.item.type === "function_call") {
2864
+ ongoingToolCalls[value.output_index] = {
2865
+ toolName: value.item.name,
2866
+ toolCallId: value.item.call_id
2867
+ };
2868
+ controller.enqueue({
2869
+ type: "tool-call-delta",
2870
+ toolCallType: "function",
2871
+ toolCallId: value.item.call_id,
2872
+ toolName: value.item.name,
2873
+ argsTextDelta: value.item.arguments
2874
+ });
2875
+ } else if (value.item.type === "web_search_call") {
2876
+ ongoingToolCalls[value.output_index] = {
2877
+ toolName: "web_search_preview",
2878
+ toolCallId: value.item.id
2879
+ };
2880
+ controller.enqueue({
2881
+ type: "tool-call-delta",
2882
+ toolCallType: "function",
2883
+ toolCallId: value.item.id,
2884
+ toolName: "web_search_preview",
2885
+ argsTextDelta: JSON.stringify({ action: value.item.action })
2886
+ });
2887
+ } else if (value.item.type === "computer_call") {
2888
+ ongoingToolCalls[value.output_index] = {
2889
+ toolName: "computer_use",
2890
+ toolCallId: value.item.id
2891
+ };
2892
+ controller.enqueue({
2893
+ type: "tool-call-delta",
2894
+ toolCallType: "function",
2895
+ toolCallId: value.item.id,
2896
+ toolName: "computer_use",
2897
+ argsTextDelta: ""
2898
+ });
2899
+ } else if (value.item.type === "file_search_call") {
2900
+ ongoingToolCalls[value.output_index] = {
2901
+ toolName: "file_search",
2902
+ toolCallId: value.item.id
2903
+ };
2904
+ controller.enqueue({
2905
+ type: "tool-call-delta",
2906
+ toolCallType: "function",
2907
+ toolCallId: value.item.id,
2908
+ toolName: "file_search",
2909
+ argsTextDelta: ""
2910
+ });
2911
+ }
2912
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2913
+ console.log("\u{1F527} Function call arguments delta:", JSON.stringify(value, null, 2));
2914
+ const toolCall = ongoingToolCalls[value.output_index];
2915
+ if (toolCall != null) {
2916
+ controller.enqueue({
2917
+ type: "tool-call-delta",
2918
+ toolCallType: "function",
2919
+ toolCallId: toolCall.toolCallId,
2920
+ toolName: toolCall.toolName,
2921
+ argsTextDelta: value.delta
2922
+ });
2923
+ }
2924
+ } else if (isResponseCreatedChunk(value)) {
2925
+ console.log("\u{1F680} Response created:", JSON.stringify(value, null, 2));
2926
+ responseId = value.response.id;
2704
2927
  controller.enqueue({
2705
- type: "tool-call-delta",
2706
- toolCallType: "function",
2707
- toolCallId: value.item.id,
2708
- toolName: "web_search_preview",
2709
- argsTextDelta: JSON.stringify({ action: value.item.action })
2928
+ type: "response-metadata",
2929
+ id: value.response.id,
2930
+ timestamp: new Date(value.response.created_at * 1e3),
2931
+ modelId: value.response.model
2710
2932
  });
2711
- } else if (value.item.type === "computer_call") {
2712
- ongoingToolCalls[value.output_index] = {
2713
- toolName: "computer_use",
2714
- toolCallId: value.item.id
2715
- };
2933
+ } else if (isTextDeltaChunk(value)) {
2934
+ console.log("\u{1F4DD} Text delta chunk:", JSON.stringify(value, null, 2));
2716
2935
  controller.enqueue({
2717
- type: "tool-call-delta",
2718
- toolCallType: "function",
2719
- toolCallId: value.item.id,
2720
- toolName: "computer_use",
2721
- argsTextDelta: ""
2936
+ type: "text-delta",
2937
+ textDelta: value.delta
2722
2938
  });
2723
- } else if (value.item.type === "file_search_call") {
2724
- ongoingToolCalls[value.output_index] = {
2725
- toolName: "file_search",
2726
- toolCallId: value.item.id
2727
- };
2939
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2940
+ console.log("\u{1F9E0} Reasoning summary delta:", JSON.stringify(value, null, 2));
2728
2941
  controller.enqueue({
2729
- type: "tool-call-delta",
2730
- toolCallType: "function",
2731
- toolCallId: value.item.id,
2732
- toolName: "file_search",
2733
- argsTextDelta: ""
2942
+ type: "reasoning",
2943
+ textDelta: value.delta
2734
2944
  });
2735
- }
2736
- } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2737
- const toolCall = ongoingToolCalls[value.output_index];
2738
- if (toolCall != null) {
2739
- controller.enqueue({
2740
- type: "tool-call-delta",
2741
- toolCallType: "function",
2742
- toolCallId: toolCall.toolCallId,
2743
- toolName: toolCall.toolName,
2744
- argsTextDelta: value.delta
2945
+ } else if (isResponseOutputItemDoneChunk(value)) {
2946
+ console.log("\u2705 Output item done:", JSON.stringify(value, null, 2));
2947
+ if (value.item.type === "function_call") {
2948
+ ongoingToolCalls[value.output_index] = void 0;
2949
+ hasToolCalls = true;
2950
+ controller.enqueue({
2951
+ type: "tool-call",
2952
+ toolCallType: "function",
2953
+ toolCallId: value.item.call_id,
2954
+ toolName: value.item.name,
2955
+ args: value.item.arguments
2956
+ });
2957
+ } else if (value.item.type === "web_search_call") {
2958
+ ongoingToolCalls[value.output_index] = void 0;
2959
+ hasToolCalls = true;
2960
+ controller.enqueue({
2961
+ type: "tool-call",
2962
+ toolCallType: "function",
2963
+ toolCallId: value.item.id,
2964
+ toolName: "web_search_preview",
2965
+ args: JSON.stringify({ action: value.item.action })
2966
+ });
2967
+ } else if (value.item.type === "computer_call") {
2968
+ ongoingToolCalls[value.output_index] = void 0;
2969
+ hasToolCalls = true;
2970
+ controller.enqueue({
2971
+ type: "tool-call",
2972
+ toolCallType: "function",
2973
+ toolCallId: value.item.id,
2974
+ toolName: "computer_use",
2975
+ args: ""
2976
+ });
2977
+ } else if (value.item.type === "file_search_call") {
2978
+ ongoingToolCalls[value.output_index] = void 0;
2979
+ hasToolCalls = true;
2980
+ controller.enqueue({
2981
+ type: "tool-call",
2982
+ toolCallType: "function",
2983
+ toolCallId: value.item.id,
2984
+ toolName: "file_search",
2985
+ args: JSON.stringify({
2986
+ queries: value.item.queries,
2987
+ results: value.item.results
2988
+ })
2989
+ });
2990
+ }
2991
+ } else if (isResponseFinishedChunk(value)) {
2992
+ console.log("\u{1F3C1} Response finished:", JSON.stringify(value, null, 2));
2993
+ finishReason = mapOpenAIResponseFinishReason({
2994
+ finishReason: (_a15 = value.response.incomplete_details) == null ? void 0 : _a15.reason,
2995
+ hasToolCalls
2745
2996
  });
2997
+ promptTokens = value.response.usage.input_tokens;
2998
+ completionTokens = value.response.usage.output_tokens;
2999
+ cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
3000
+ reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
3001
+ } else if (isResponseAnnotationAddedChunk(value)) {
3002
+ console.log("\u{1F50D} Processing annotation chunk:", JSON.stringify({
3003
+ type: value.type,
3004
+ annotation: value.annotation
3005
+ }, null, 2));
3006
+ try {
3007
+ if (value.annotation.type === "url_citation") {
3008
+ const urlSource = {
3009
+ sourceType: "url",
3010
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
3011
+ url: value.annotation.url,
3012
+ title: value.annotation.title
3013
+ };
3014
+ console.log("\u2705 Creating URL source:", JSON.stringify(urlSource, null, 2));
3015
+ controller.enqueue({
3016
+ type: "source",
3017
+ source: urlSource
3018
+ });
3019
+ console.log("\u2705 URL source enqueued successfully");
3020
+ } else if (value.annotation.type === "file_citation") {
3021
+ const documentSource = {
3022
+ sourceType: "document",
3023
+ id: (_k = (_j = (_i = self.config).generateId) == null ? void 0 : _j.call(_i)) != null ? _k : generateId2(),
3024
+ mediaType: "text/plain",
3025
+ title: value.annotation.quote || value.annotation.filename || "Document",
3026
+ filename: value.annotation.filename,
3027
+ quote: value.annotation.quote
3028
+ };
3029
+ console.log("\u{1F4C4} Creating document source:", JSON.stringify(documentSource, null, 2));
3030
+ controller.enqueue({
3031
+ type: "source",
3032
+ source: documentSource
3033
+ });
3034
+ console.log("\u2705 Document source enqueued successfully");
3035
+ } else {
3036
+ console.log("\u26A0\uFE0F Unknown annotation type:", value.annotation.type);
3037
+ }
3038
+ } catch (error) {
3039
+ console.error("\u274C Error processing annotation:", {
3040
+ annotation: value.annotation,
3041
+ error: error instanceof Error ? error.message : String(error),
3042
+ stack: error instanceof Error ? error.stack : void 0
3043
+ });
3044
+ }
3045
+ } else {
3046
+ console.log("\u2753 Unhandled chunk type:", value.type, JSON.stringify(value, null, 2));
2746
3047
  }
2747
- } else if (isResponseCreatedChunk(value)) {
2748
- responseId = value.response.id;
2749
- controller.enqueue({
2750
- type: "response-metadata",
2751
- id: value.response.id,
2752
- timestamp: new Date(value.response.created_at * 1e3),
2753
- modelId: value.response.model
2754
- });
2755
- } else if (isTextDeltaChunk(value)) {
2756
- controller.enqueue({
2757
- type: "text-delta",
2758
- textDelta: value.delta
3048
+ } catch (error) {
3049
+ console.error("\u{1F4A5} FATAL ERROR in chunk processing:", {
3050
+ error: error instanceof Error ? error.message : String(error),
3051
+ stack: error instanceof Error ? error.stack : void 0
2759
3052
  });
2760
- } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
3053
+ finishReason = "error";
2761
3054
  controller.enqueue({
2762
- type: "reasoning",
2763
- textDelta: value.delta
2764
- });
2765
- } else if (isResponseOutputItemDoneChunk(value)) {
2766
- if (value.item.type === "function_call") {
2767
- ongoingToolCalls[value.output_index] = void 0;
2768
- hasToolCalls = true;
2769
- controller.enqueue({
2770
- type: "tool-call",
2771
- toolCallType: "function",
2772
- toolCallId: value.item.call_id,
2773
- toolName: value.item.name,
2774
- args: value.item.arguments
2775
- });
2776
- } else if (value.item.type === "web_search_call") {
2777
- ongoingToolCalls[value.output_index] = void 0;
2778
- hasToolCalls = true;
2779
- controller.enqueue({
2780
- type: "tool-call",
2781
- toolCallType: "function",
2782
- toolCallId: value.item.id,
2783
- toolName: "web_search_preview",
2784
- args: JSON.stringify({ action: value.item.action })
2785
- });
2786
- } else if (value.item.type === "computer_call") {
2787
- ongoingToolCalls[value.output_index] = void 0;
2788
- hasToolCalls = true;
2789
- controller.enqueue({
2790
- type: "tool-call",
2791
- toolCallType: "function",
2792
- toolCallId: value.item.id,
2793
- toolName: "computer_use",
2794
- args: ""
2795
- });
2796
- } else if (value.item.type === "file_search_call") {
2797
- ongoingToolCalls[value.output_index] = void 0;
2798
- hasToolCalls = true;
2799
- controller.enqueue({
2800
- type: "tool-call",
2801
- toolCallType: "function",
2802
- toolCallId: value.item.id,
2803
- toolName: "file_search",
2804
- args: JSON.stringify({
2805
- queries: value.item.queries,
2806
- results: value.item.results
2807
- })
2808
- });
2809
- }
2810
- } else if (isResponseFinishedChunk(value)) {
2811
- finishReason = mapOpenAIResponseFinishReason({
2812
- finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2813
- hasToolCalls
3055
+ type: "error",
3056
+ error: error instanceof Error ? error : new Error(String(error))
2814
3057
  });
2815
- promptTokens = value.response.usage.input_tokens;
2816
- completionTokens = value.response.usage.output_tokens;
2817
- cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2818
- reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2819
- } else if (isResponseAnnotationAddedChunk(value)) {
2820
- console.log(JSON.stringify({
2821
- msg: "ai-sdk: source (stream)",
2822
- source: value.annotation
2823
- }));
2824
- if (value.annotation.type === "url_citation") {
2825
- controller.enqueue({
2826
- type: "source",
2827
- source: {
2828
- sourceType: "url",
2829
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
2830
- url: value.annotation.url,
2831
- title: value.annotation.title
2832
- }
2833
- });
2834
- } else if (value.annotation.type === "file_citation") {
2835
- controller.enqueue({
2836
- type: "source",
2837
- source: {
2838
- sourceType: "url",
2839
- id: (_k = (_j = (_i = self.config).generateId) == null ? void 0 : _j.call(_i)) != null ? _k : generateId2(),
2840
- url: `file://${value.annotation.file_id}`,
2841
- title: (_m = (_l = value.annotation.quote) != null ? _l : value.annotation.filename) != null ? _m : "Document"
2842
- }
2843
- });
2844
- }
2845
3058
  }
2846
3059
  },
2847
3060
  flush(controller) {