@ai-sdk/openai 3.0.0-beta.50 → 3.0.0-beta.52

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -31,6 +31,15 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
31
31
  errorToMessage: (data) => data.error.message
32
32
  });
33
33
 
34
+ // src/openai-is-reasoning-model.ts
35
+ function isReasoningModel(modelId) {
36
+ if (modelId.startsWith("gpt-3")) return false;
37
+ if (modelId.startsWith("gpt-4")) return false;
38
+ if (modelId.startsWith("chatgpt-4o")) return false;
39
+ if (modelId.startsWith("gpt-5-chat")) return false;
40
+ return true;
41
+ }
42
+
34
43
  // src/chat/convert-to-openai-chat-messages.ts
35
44
  import {
36
45
  UnsupportedFunctionalityError
@@ -1036,9 +1045,6 @@ var OpenAIChatLanguageModel = class {
1036
1045
  };
1037
1046
  }
1038
1047
  };
1039
- function isReasoningModel(modelId) {
1040
- return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
1041
- }
1042
1048
  function supportsFlexProcessing(modelId) {
1043
1049
  return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
1044
1050
  }
@@ -1046,32 +1052,8 @@ function supportsPriorityProcessing(modelId) {
1046
1052
  return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1047
1053
  }
1048
1054
  function getSystemMessageMode(modelId) {
1049
- var _a, _b;
1050
- if (!isReasoningModel(modelId)) {
1051
- return "system";
1052
- }
1053
- return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
1055
+ return isReasoningModel(modelId) ? "developer" : "system";
1054
1056
  }
1055
- var reasoningModels = {
1056
- o3: {
1057
- systemMessageMode: "developer"
1058
- },
1059
- "o3-2025-04-16": {
1060
- systemMessageMode: "developer"
1061
- },
1062
- "o3-mini": {
1063
- systemMessageMode: "developer"
1064
- },
1065
- "o3-mini-2025-01-31": {
1066
- systemMessageMode: "developer"
1067
- },
1068
- "o4-mini": {
1069
- systemMessageMode: "developer"
1070
- },
1071
- "o4-mini-2025-04-16": {
1072
- systemMessageMode: "developer"
1073
- }
1074
- };
1075
1057
 
1076
1058
  // src/completion/openai-completion-language-model.ts
1077
1059
  import {
@@ -2718,6 +2700,20 @@ var openaiResponsesChunkSchema = lazySchema12(
2718
2700
  start_index: z14.number().nullish(),
2719
2701
  end_index: z14.number().nullish(),
2720
2702
  quote: z14.string().nullish()
2703
+ }),
2704
+ z14.object({
2705
+ type: z14.literal("container_file_citation"),
2706
+ container_id: z14.string(),
2707
+ file_id: z14.string(),
2708
+ filename: z14.string().nullish(),
2709
+ start_index: z14.number().nullish(),
2710
+ end_index: z14.number().nullish(),
2711
+ index: z14.number().nullish()
2712
+ }),
2713
+ z14.object({
2714
+ type: z14.literal("file_path"),
2715
+ file_id: z14.string(),
2716
+ index: z14.number().nullish()
2721
2717
  })
2722
2718
  ])
2723
2719
  }),
@@ -2803,7 +2799,18 @@ var openaiResponsesResponseSchema = lazySchema12(
2803
2799
  quote: z14.string().nullish()
2804
2800
  }),
2805
2801
  z14.object({
2806
- type: z14.literal("container_file_citation")
2802
+ type: z14.literal("container_file_citation"),
2803
+ container_id: z14.string(),
2804
+ file_id: z14.string(),
2805
+ filename: z14.string().nullish(),
2806
+ start_index: z14.number().nullish(),
2807
+ end_index: z14.number().nullish(),
2808
+ index: z14.number().nullish()
2809
+ }),
2810
+ z14.object({
2811
+ type: z14.literal("file_path"),
2812
+ file_id: z14.string(),
2813
+ index: z14.number().nullish()
2807
2814
  })
2808
2815
  ])
2809
2816
  )
@@ -3641,7 +3648,7 @@ var OpenAIResponsesLanguageModel = class {
3641
3648
  };
3642
3649
  }
3643
3650
  async doGenerate(options) {
3644
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s;
3651
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B;
3645
3652
  const {
3646
3653
  args: body,
3647
3654
  warnings,
@@ -3739,13 +3746,17 @@ var OpenAIResponsesLanguageModel = class {
3739
3746
  if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
3740
3747
  logprobs.push(contentPart.logprobs);
3741
3748
  }
3749
+ const providerMetadata2 = {
3750
+ itemId: part.id,
3751
+ ...contentPart.annotations.length > 0 && {
3752
+ annotations: contentPart.annotations
3753
+ }
3754
+ };
3742
3755
  content.push({
3743
3756
  type: "text",
3744
3757
  text: contentPart.text,
3745
3758
  providerMetadata: {
3746
- openai: {
3747
- itemId: part.id
3748
- }
3759
+ openai: providerMetadata2
3749
3760
  }
3750
3761
  });
3751
3762
  for (const annotation of contentPart.annotations) {
@@ -3773,6 +3784,37 @@ var OpenAIResponsesLanguageModel = class {
3773
3784
  }
3774
3785
  } : {}
3775
3786
  });
3787
+ } else if (annotation.type === "container_file_citation") {
3788
+ content.push({
3789
+ type: "source",
3790
+ sourceType: "document",
3791
+ id: (_o = (_n = (_m = this.config).generateId) == null ? void 0 : _n.call(_m)) != null ? _o : generateId2(),
3792
+ mediaType: "text/plain",
3793
+ title: (_q = (_p = annotation.filename) != null ? _p : annotation.file_id) != null ? _q : "Document",
3794
+ filename: (_r = annotation.filename) != null ? _r : annotation.file_id,
3795
+ providerMetadata: {
3796
+ openai: {
3797
+ fileId: annotation.file_id,
3798
+ containerId: annotation.container_id,
3799
+ ...annotation.index != null ? { index: annotation.index } : {}
3800
+ }
3801
+ }
3802
+ });
3803
+ } else if (annotation.type === "file_path") {
3804
+ content.push({
3805
+ type: "source",
3806
+ sourceType: "document",
3807
+ id: (_u = (_t = (_s = this.config).generateId) == null ? void 0 : _t.call(_s)) != null ? _u : generateId2(),
3808
+ mediaType: "application/octet-stream",
3809
+ title: annotation.file_id,
3810
+ filename: annotation.file_id,
3811
+ providerMetadata: {
3812
+ openai: {
3813
+ fileId: annotation.file_id,
3814
+ ...annotation.index != null ? { index: annotation.index } : {}
3815
+ }
3816
+ }
3817
+ });
3776
3818
  }
3777
3819
  }
3778
3820
  }
@@ -3842,13 +3884,13 @@ var OpenAIResponsesLanguageModel = class {
3842
3884
  toolName: "file_search",
3843
3885
  result: {
3844
3886
  queries: part.queries,
3845
- results: (_n = (_m = part.results) == null ? void 0 : _m.map((result) => ({
3887
+ results: (_w = (_v = part.results) == null ? void 0 : _v.map((result) => ({
3846
3888
  attributes: result.attributes,
3847
3889
  fileId: result.file_id,
3848
3890
  filename: result.filename,
3849
3891
  score: result.score,
3850
3892
  text: result.text
3851
- }))) != null ? _n : null
3893
+ }))) != null ? _w : null
3852
3894
  }
3853
3895
  });
3854
3896
  break;
@@ -3888,15 +3930,15 @@ var OpenAIResponsesLanguageModel = class {
3888
3930
  return {
3889
3931
  content,
3890
3932
  finishReason: mapOpenAIResponseFinishReason({
3891
- finishReason: (_o = response.incomplete_details) == null ? void 0 : _o.reason,
3933
+ finishReason: (_x = response.incomplete_details) == null ? void 0 : _x.reason,
3892
3934
  hasFunctionCall
3893
3935
  }),
3894
3936
  usage: {
3895
3937
  inputTokens: response.usage.input_tokens,
3896
3938
  outputTokens: response.usage.output_tokens,
3897
3939
  totalTokens: response.usage.input_tokens + response.usage.output_tokens,
3898
- reasoningTokens: (_q = (_p = response.usage.output_tokens_details) == null ? void 0 : _p.reasoning_tokens) != null ? _q : void 0,
3899
- cachedInputTokens: (_s = (_r = response.usage.input_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? _s : void 0
3940
+ reasoningTokens: (_z = (_y = response.usage.output_tokens_details) == null ? void 0 : _y.reasoning_tokens) != null ? _z : void 0,
3941
+ cachedInputTokens: (_B = (_A = response.usage.input_tokens_details) == null ? void 0 : _A.cached_tokens) != null ? _B : void 0
3900
3942
  },
3901
3943
  request: { body },
3902
3944
  response: {
@@ -3954,7 +3996,7 @@ var OpenAIResponsesLanguageModel = class {
3954
3996
  controller.enqueue({ type: "stream-start", warnings });
3955
3997
  },
3956
3998
  transform(chunk, controller) {
3957
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v;
3999
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C, _D, _E;
3958
4000
  if (options.includeRawChunks) {
3959
4001
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
3960
4002
  }
@@ -4359,6 +4401,37 @@ var OpenAIResponsesLanguageModel = class {
4359
4401
  }
4360
4402
  } : {}
4361
4403
  });
4404
+ } else if (value.annotation.type === "container_file_citation") {
4405
+ controller.enqueue({
4406
+ type: "source",
4407
+ sourceType: "document",
4408
+ id: (_y = (_x = (_w = self.config).generateId) == null ? void 0 : _x.call(_w)) != null ? _y : generateId2(),
4409
+ mediaType: "text/plain",
4410
+ title: (_A = (_z = value.annotation.filename) != null ? _z : value.annotation.file_id) != null ? _A : "Document",
4411
+ filename: (_B = value.annotation.filename) != null ? _B : value.annotation.file_id,
4412
+ providerMetadata: {
4413
+ openai: {
4414
+ fileId: value.annotation.file_id,
4415
+ containerId: value.annotation.container_id,
4416
+ ...value.annotation.index != null ? { index: value.annotation.index } : {}
4417
+ }
4418
+ }
4419
+ });
4420
+ } else if (value.annotation.type === "file_path") {
4421
+ controller.enqueue({
4422
+ type: "source",
4423
+ sourceType: "document",
4424
+ id: (_E = (_D = (_C = self.config).generateId) == null ? void 0 : _D.call(_C)) != null ? _E : generateId2(),
4425
+ mediaType: "application/octet-stream",
4426
+ title: value.annotation.file_id,
4427
+ filename: value.annotation.file_id,
4428
+ providerMetadata: {
4429
+ openai: {
4430
+ fileId: value.annotation.file_id,
4431
+ ...value.annotation.index != null ? { index: value.annotation.index } : {}
4432
+ }
4433
+ }
4434
+ });
4362
4435
  }
4363
4436
  } else if (isErrorChunk(value)) {
4364
4437
  controller.enqueue({ type: "error", error: value });
@@ -4426,27 +4499,13 @@ function isErrorChunk(chunk) {
4426
4499
  function getResponsesModelConfig(modelId) {
4427
4500
  const supportsFlexProcessing2 = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
4428
4501
  const supportsPriorityProcessing2 = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
4429
- const defaults = {
4430
- systemMessageMode: "system",
4431
- supportsFlexProcessing: supportsFlexProcessing2,
4432
- supportsPriorityProcessing: supportsPriorityProcessing2
4433
- };
4434
- if (modelId.startsWith("gpt-5-chat")) {
4435
- return {
4436
- ...defaults,
4437
- isReasoningModel: false
4438
- };
4439
- }
4440
- if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
4441
- return {
4442
- ...defaults,
4443
- isReasoningModel: true,
4444
- systemMessageMode: "developer"
4445
- };
4446
- }
4502
+ const isReasoningModel2 = isReasoningModel(modelId);
4503
+ const systemMessageMode = isReasoningModel2 ? "developer" : "system";
4447
4504
  return {
4448
- ...defaults,
4449
- isReasoningModel: false
4505
+ systemMessageMode,
4506
+ supportsFlexProcessing: supportsFlexProcessing2,
4507
+ supportsPriorityProcessing: supportsPriorityProcessing2,
4508
+ isReasoningModel: isReasoningModel2
4450
4509
  };
4451
4510
  }
4452
4511
  function mapWebSearchOutput(action) {