@ai-sdk/openai 3.0.33 → 3.0.34

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -279,6 +279,7 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
279
279
  item: {
280
280
  type: "message";
281
281
  id: string;
282
+ phase?: "commentary" | "final_answer" | null | undefined;
282
283
  } | {
283
284
  type: "reasoning";
284
285
  id: string;
@@ -374,6 +375,7 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
374
375
  item: {
375
376
  type: "message";
376
377
  id: string;
378
+ phase?: "commentary" | "final_answer" | null | undefined;
377
379
  } | {
378
380
  type: "reasoning";
379
381
  id: string;
@@ -634,6 +636,7 @@ type OpenaiResponsesProviderMetadata = {
634
636
  };
635
637
  type ResponsesTextProviderMetadata = {
636
638
  itemId: string;
639
+ phase?: 'commentary' | 'final_answer' | null;
637
640
  annotations?: Array<ResponsesOutputTextAnnotationProviderMetadata>;
638
641
  };
639
642
  type OpenaiResponsesTextProviderMetadata = {
@@ -279,6 +279,7 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
279
279
  item: {
280
280
  type: "message";
281
281
  id: string;
282
+ phase?: "commentary" | "final_answer" | null | undefined;
282
283
  } | {
283
284
  type: "reasoning";
284
285
  id: string;
@@ -374,6 +375,7 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
374
375
  item: {
375
376
  type: "message";
376
377
  id: string;
378
+ phase?: "commentary" | "final_answer" | null | undefined;
377
379
  } | {
378
380
  type: "reasoning";
379
381
  id: string;
@@ -634,6 +636,7 @@ type OpenaiResponsesProviderMetadata = {
634
636
  };
635
637
  type ResponsesTextProviderMetadata = {
636
638
  itemId: string;
639
+ phase?: 'commentary' | 'final_answer' | null;
637
640
  annotations?: Array<ResponsesOutputTextAnnotationProviderMetadata>;
638
641
  };
639
642
  type OpenaiResponsesTextProviderMetadata = {
@@ -1780,6 +1780,7 @@ var modelMaxImagesPerCall = {
1780
1780
  "chatgpt-image-latest": 10
1781
1781
  };
1782
1782
  var defaultResponseFormatPrefixes = [
1783
+ "chatgpt-image-",
1783
1784
  "gpt-image-1-mini",
1784
1785
  "gpt-image-1.5",
1785
1786
  "gpt-image-1"
@@ -2565,7 +2566,7 @@ async function convertToOpenAIResponsesInput({
2565
2566
  hasShellTool = false,
2566
2567
  hasApplyPatchTool = false
2567
2568
  }) {
2568
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2569
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
2569
2570
  const input = [];
2570
2571
  const warnings = [];
2571
2572
  const processedApprovalIds = /* @__PURE__ */ new Set();
@@ -2646,7 +2647,9 @@ async function convertToOpenAIResponsesInput({
2646
2647
  for (const part of content) {
2647
2648
  switch (part.type) {
2648
2649
  case "text": {
2649
- const id = (_b = (_a = part.providerOptions) == null ? void 0 : _a[providerOptionsName]) == null ? void 0 : _b.itemId;
2650
+ const providerOpts = (_a = part.providerOptions) == null ? void 0 : _a[providerOptionsName];
2651
+ const id = providerOpts == null ? void 0 : providerOpts.itemId;
2652
+ const phase = providerOpts == null ? void 0 : providerOpts.phase;
2650
2653
  if (hasConversation && id != null) {
2651
2654
  break;
2652
2655
  }
@@ -2657,12 +2660,13 @@ async function convertToOpenAIResponsesInput({
2657
2660
  input.push({
2658
2661
  role: "assistant",
2659
2662
  content: [{ type: "output_text", text: part.text }],
2660
- id
2663
+ id,
2664
+ ...phase != null && { phase }
2661
2665
  });
2662
2666
  break;
2663
2667
  }
2664
2668
  case "tool-call": {
2665
- const id = (_g = (_d = (_c = part.providerOptions) == null ? void 0 : _c[providerOptionsName]) == null ? void 0 : _d.itemId) != null ? _g : (_f = (_e = part.providerMetadata) == null ? void 0 : _e[providerOptionsName]) == null ? void 0 : _f.itemId;
2669
+ const id = (_f = (_c = (_b = part.providerOptions) == null ? void 0 : _b[providerOptionsName]) == null ? void 0 : _c.itemId) != null ? _f : (_e = (_d = part.providerMetadata) == null ? void 0 : _d[providerOptionsName]) == null ? void 0 : _e.itemId;
2666
2670
  if (hasConversation && id != null) {
2667
2671
  break;
2668
2672
  }
@@ -2773,7 +2777,7 @@ async function convertToOpenAIResponsesInput({
2773
2777
  break;
2774
2778
  }
2775
2779
  if (store) {
2776
- const itemId = (_j = (_i = (_h = part.providerOptions) == null ? void 0 : _h[providerOptionsName]) == null ? void 0 : _i.itemId) != null ? _j : part.toolCallId;
2780
+ const itemId = (_i = (_h = (_g = part.providerOptions) == null ? void 0 : _g[providerOptionsName]) == null ? void 0 : _h.itemId) != null ? _i : part.toolCallId;
2777
2781
  input.push({ type: "item_reference", id: itemId });
2778
2782
  } else {
2779
2783
  warnings.push({
@@ -2867,7 +2871,7 @@ async function convertToOpenAIResponsesInput({
2867
2871
  }
2868
2872
  const output = part.output;
2869
2873
  if (output.type === "execution-denied") {
2870
- const approvalId = (_l = (_k = output.providerOptions) == null ? void 0 : _k.openai) == null ? void 0 : _l.approvalId;
2874
+ const approvalId = (_k = (_j = output.providerOptions) == null ? void 0 : _j.openai) == null ? void 0 : _k.approvalId;
2871
2875
  if (approvalId) {
2872
2876
  continue;
2873
2877
  }
@@ -2926,7 +2930,7 @@ async function convertToOpenAIResponsesInput({
2926
2930
  contentValue = output.value;
2927
2931
  break;
2928
2932
  case "execution-denied":
2929
- contentValue = (_m = output.reason) != null ? _m : "Tool execution denied.";
2933
+ contentValue = (_l = output.reason) != null ? _l : "Tool execution denied.";
2930
2934
  break;
2931
2935
  case "json":
2932
2936
  case "error-json":
@@ -3059,7 +3063,8 @@ var openaiResponsesChunkSchema = (0, import_provider_utils23.lazySchema)(
3059
3063
  item: import_v416.z.discriminatedUnion("type", [
3060
3064
  import_v416.z.object({
3061
3065
  type: import_v416.z.literal("message"),
3062
- id: import_v416.z.string()
3066
+ id: import_v416.z.string(),
3067
+ phase: import_v416.z.enum(["commentary", "final_answer"]).nullish()
3063
3068
  }),
3064
3069
  import_v416.z.object({
3065
3070
  type: import_v416.z.literal("reasoning"),
@@ -3176,7 +3181,8 @@ var openaiResponsesChunkSchema = (0, import_provider_utils23.lazySchema)(
3176
3181
  item: import_v416.z.discriminatedUnion("type", [
3177
3182
  import_v416.z.object({
3178
3183
  type: import_v416.z.literal("message"),
3179
- id: import_v416.z.string()
3184
+ id: import_v416.z.string(),
3185
+ phase: import_v416.z.enum(["commentary", "final_answer"]).nullish()
3180
3186
  }),
3181
3187
  import_v416.z.object({
3182
3188
  type: import_v416.z.literal("reasoning"),
@@ -3488,6 +3494,7 @@ var openaiResponsesResponseSchema = (0, import_provider_utils23.lazySchema)(
3488
3494
  type: import_v416.z.literal("message"),
3489
3495
  role: import_v416.z.literal("assistant"),
3490
3496
  id: import_v416.z.string(),
3497
+ phase: import_v416.z.enum(["commentary", "final_answer"]).nullish(),
3491
3498
  content: import_v416.z.array(
3492
3499
  import_v416.z.object({
3493
3500
  type: import_v416.z.literal("output_text"),
@@ -4912,6 +4919,7 @@ var OpenAIResponsesLanguageModel = class {
4912
4919
  }
4913
4920
  const providerMetadata2 = {
4914
4921
  itemId: part.id,
4922
+ ...part.phase != null && { phase: part.phase },
4915
4923
  ...contentPart.annotations.length > 0 && {
4916
4924
  annotations: contentPart.annotations
4917
4925
  }
@@ -5226,6 +5234,7 @@ var OpenAIResponsesLanguageModel = class {
5226
5234
  let responseId = null;
5227
5235
  const ongoingToolCalls = {};
5228
5236
  const ongoingAnnotations = [];
5237
+ let activeMessagePhase;
5229
5238
  let hasFunctionCall = false;
5230
5239
  const activeReasoning = {};
5231
5240
  let serviceTier;
@@ -5236,7 +5245,7 @@ var OpenAIResponsesLanguageModel = class {
5236
5245
  controller.enqueue({ type: "stream-start", warnings });
5237
5246
  },
5238
5247
  transform(chunk, controller) {
5239
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C, _D;
5248
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C, _D, _E, _F;
5240
5249
  if (options.includeRawChunks) {
5241
5250
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
5242
5251
  }
@@ -5377,12 +5386,16 @@ var OpenAIResponsesLanguageModel = class {
5377
5386
  } else if (value.item.type === "shell_call_output") {
5378
5387
  } else if (value.item.type === "message") {
5379
5388
  ongoingAnnotations.splice(0, ongoingAnnotations.length);
5389
+ activeMessagePhase = (_a = value.item.phase) != null ? _a : void 0;
5380
5390
  controller.enqueue({
5381
5391
  type: "text-start",
5382
5392
  id: value.item.id,
5383
5393
  providerMetadata: {
5384
5394
  [providerOptionsName]: {
5385
- itemId: value.item.id
5395
+ itemId: value.item.id,
5396
+ ...value.item.phase != null && {
5397
+ phase: value.item.phase
5398
+ }
5386
5399
  }
5387
5400
  }
5388
5401
  });
@@ -5397,19 +5410,22 @@ var OpenAIResponsesLanguageModel = class {
5397
5410
  providerMetadata: {
5398
5411
  [providerOptionsName]: {
5399
5412
  itemId: value.item.id,
5400
- reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
5413
+ reasoningEncryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
5401
5414
  }
5402
5415
  }
5403
5416
  });
5404
5417
  }
5405
5418
  } else if (isResponseOutputItemDoneChunk(value)) {
5406
5419
  if (value.item.type === "message") {
5420
+ const phase = (_c = value.item.phase) != null ? _c : activeMessagePhase;
5421
+ activeMessagePhase = void 0;
5407
5422
  controller.enqueue({
5408
5423
  type: "text-end",
5409
5424
  id: value.item.id,
5410
5425
  providerMetadata: {
5411
5426
  [providerOptionsName]: {
5412
5427
  itemId: value.item.id,
5428
+ ...phase != null && { phase },
5413
5429
  ...ongoingAnnotations.length > 0 && {
5414
5430
  annotations: ongoingAnnotations
5415
5431
  }
@@ -5474,13 +5490,13 @@ var OpenAIResponsesLanguageModel = class {
5474
5490
  toolName: toolNameMapping.toCustomToolName("file_search"),
5475
5491
  result: {
5476
5492
  queries: value.item.queries,
5477
- results: (_c = (_b = value.item.results) == null ? void 0 : _b.map((result) => ({
5493
+ results: (_e = (_d = value.item.results) == null ? void 0 : _d.map((result) => ({
5478
5494
  attributes: result.attributes,
5479
5495
  fileId: result.file_id,
5480
5496
  filename: result.filename,
5481
5497
  score: result.score,
5482
5498
  text: result.text
5483
- }))) != null ? _c : null
5499
+ }))) != null ? _e : null
5484
5500
  }
5485
5501
  });
5486
5502
  } else if (value.item.type === "code_interpreter_call") {
@@ -5504,10 +5520,10 @@ var OpenAIResponsesLanguageModel = class {
5504
5520
  });
5505
5521
  } else if (value.item.type === "mcp_call") {
5506
5522
  ongoingToolCalls[value.output_index] = void 0;
5507
- const approvalRequestId = (_d = value.item.approval_request_id) != null ? _d : void 0;
5508
- const aliasedToolCallId = approvalRequestId != null ? (_f = (_e = approvalRequestIdToDummyToolCallIdFromStream.get(
5523
+ const approvalRequestId = (_f = value.item.approval_request_id) != null ? _f : void 0;
5524
+ const aliasedToolCallId = approvalRequestId != null ? (_h = (_g = approvalRequestIdToDummyToolCallIdFromStream.get(
5509
5525
  approvalRequestId
5510
- )) != null ? _e : approvalRequestIdToDummyToolCallIdFromPrompt[approvalRequestId]) != null ? _f : value.item.id : value.item.id;
5526
+ )) != null ? _g : approvalRequestIdToDummyToolCallIdFromPrompt[approvalRequestId]) != null ? _h : value.item.id : value.item.id;
5511
5527
  const toolName = `mcp.${value.item.name}`;
5512
5528
  controller.enqueue({
5513
5529
  type: "tool-call",
@@ -5577,8 +5593,8 @@ var OpenAIResponsesLanguageModel = class {
5577
5593
  ongoingToolCalls[value.output_index] = void 0;
5578
5594
  } else if (value.item.type === "mcp_approval_request") {
5579
5595
  ongoingToolCalls[value.output_index] = void 0;
5580
- const dummyToolCallId = (_i = (_h = (_g = self.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0, import_provider_utils32.generateId)();
5581
- const approvalRequestId = (_j = value.item.approval_request_id) != null ? _j : value.item.id;
5596
+ const dummyToolCallId = (_k = (_j = (_i = self.config).generateId) == null ? void 0 : _j.call(_i)) != null ? _k : (0, import_provider_utils32.generateId)();
5597
+ const approvalRequestId = (_l = value.item.approval_request_id) != null ? _l : value.item.id;
5582
5598
  approvalRequestIdToDummyToolCallIdFromStream.set(
5583
5599
  approvalRequestId,
5584
5600
  dummyToolCallId
@@ -5667,7 +5683,7 @@ var OpenAIResponsesLanguageModel = class {
5667
5683
  providerMetadata: {
5668
5684
  [providerOptionsName]: {
5669
5685
  itemId: value.item.id,
5670
- reasoningEncryptedContent: (_k = value.item.encrypted_content) != null ? _k : null
5686
+ reasoningEncryptedContent: (_m = value.item.encrypted_content) != null ? _m : null
5671
5687
  }
5672
5688
  }
5673
5689
  });
@@ -5771,7 +5787,7 @@ var OpenAIResponsesLanguageModel = class {
5771
5787
  id: value.item_id,
5772
5788
  delta: value.delta
5773
5789
  });
5774
- if (((_m = (_l = options.providerOptions) == null ? void 0 : _l[providerOptionsName]) == null ? void 0 : _m.logprobs) && value.logprobs) {
5790
+ if (((_o = (_n = options.providerOptions) == null ? void 0 : _n[providerOptionsName]) == null ? void 0 : _o.logprobs) && value.logprobs) {
5775
5791
  logprobs.push(value.logprobs);
5776
5792
  }
5777
5793
  } else if (value.type === "response.reasoning_summary_part.added") {
@@ -5800,7 +5816,7 @@ var OpenAIResponsesLanguageModel = class {
5800
5816
  providerMetadata: {
5801
5817
  [providerOptionsName]: {
5802
5818
  itemId: value.item_id,
5803
- reasoningEncryptedContent: (_o = (_n = activeReasoning[value.item_id]) == null ? void 0 : _n.encryptedContent) != null ? _o : null
5819
+ reasoningEncryptedContent: (_q = (_p = activeReasoning[value.item_id]) == null ? void 0 : _p.encryptedContent) != null ? _q : null
5804
5820
  }
5805
5821
  }
5806
5822
  });
@@ -5834,10 +5850,10 @@ var OpenAIResponsesLanguageModel = class {
5834
5850
  } else if (isResponseFinishedChunk(value)) {
5835
5851
  finishReason = {
5836
5852
  unified: mapOpenAIResponseFinishReason({
5837
- finishReason: (_p = value.response.incomplete_details) == null ? void 0 : _p.reason,
5853
+ finishReason: (_r = value.response.incomplete_details) == null ? void 0 : _r.reason,
5838
5854
  hasFunctionCall
5839
5855
  }),
5840
- raw: (_r = (_q = value.response.incomplete_details) == null ? void 0 : _q.reason) != null ? _r : void 0
5856
+ raw: (_t = (_s = value.response.incomplete_details) == null ? void 0 : _s.reason) != null ? _t : void 0
5841
5857
  };
5842
5858
  usage = value.response.usage;
5843
5859
  if (typeof value.response.service_tier === "string") {
@@ -5849,7 +5865,7 @@ var OpenAIResponsesLanguageModel = class {
5849
5865
  controller.enqueue({
5850
5866
  type: "source",
5851
5867
  sourceType: "url",
5852
- id: (_u = (_t = (_s = self.config).generateId) == null ? void 0 : _t.call(_s)) != null ? _u : (0, import_provider_utils32.generateId)(),
5868
+ id: (_w = (_v = (_u = self.config).generateId) == null ? void 0 : _v.call(_u)) != null ? _w : (0, import_provider_utils32.generateId)(),
5853
5869
  url: value.annotation.url,
5854
5870
  title: value.annotation.title
5855
5871
  });
@@ -5857,7 +5873,7 @@ var OpenAIResponsesLanguageModel = class {
5857
5873
  controller.enqueue({
5858
5874
  type: "source",
5859
5875
  sourceType: "document",
5860
- id: (_x = (_w = (_v = self.config).generateId) == null ? void 0 : _w.call(_v)) != null ? _x : (0, import_provider_utils32.generateId)(),
5876
+ id: (_z = (_y = (_x = self.config).generateId) == null ? void 0 : _y.call(_x)) != null ? _z : (0, import_provider_utils32.generateId)(),
5861
5877
  mediaType: "text/plain",
5862
5878
  title: value.annotation.filename,
5863
5879
  filename: value.annotation.filename,
@@ -5873,7 +5889,7 @@ var OpenAIResponsesLanguageModel = class {
5873
5889
  controller.enqueue({
5874
5890
  type: "source",
5875
5891
  sourceType: "document",
5876
- id: (_A = (_z = (_y = self.config).generateId) == null ? void 0 : _z.call(_y)) != null ? _A : (0, import_provider_utils32.generateId)(),
5892
+ id: (_C = (_B = (_A = self.config).generateId) == null ? void 0 : _B.call(_A)) != null ? _C : (0, import_provider_utils32.generateId)(),
5877
5893
  mediaType: "text/plain",
5878
5894
  title: value.annotation.filename,
5879
5895
  filename: value.annotation.filename,
@@ -5889,7 +5905,7 @@ var OpenAIResponsesLanguageModel = class {
5889
5905
  controller.enqueue({
5890
5906
  type: "source",
5891
5907
  sourceType: "document",
5892
- id: (_D = (_C = (_B = self.config).generateId) == null ? void 0 : _C.call(_B)) != null ? _D : (0, import_provider_utils32.generateId)(),
5908
+ id: (_F = (_E = (_D = self.config).generateId) == null ? void 0 : _E.call(_D)) != null ? _F : (0, import_provider_utils32.generateId)(),
5893
5909
  mediaType: "application/octet-stream",
5894
5910
  title: value.annotation.file_id,
5895
5911
  filename: value.annotation.file_id,