@ai-sdk/openai 4.0.0-beta.11 → 4.0.0-beta.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,11 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 4.0.0-beta.12
4
+
5
+ ### Patch Changes
6
+
7
+ - d9a1e9a: feat(openai): add server side compaction for openai
8
+
3
9
  ## 4.0.0-beta.11
4
10
 
5
11
  ### Patch Changes
@@ -47,18 +53,18 @@
47
53
  const toolNameMapping = createToolNameMapping({
48
54
  tools,
49
55
  providerToolNames: {
50
- 'openai.code_interpreter': 'code_interpreter',
51
- 'openai.file_search': 'file_search',
52
- 'openai.image_generation': 'image_generation',
53
- 'openai.local_shell': 'local_shell',
54
- 'openai.shell': 'shell',
55
- 'openai.web_search': 'web_search',
56
- 'openai.web_search_preview': 'web_search_preview',
57
- 'openai.mcp': 'mcp',
58
- 'openai.apply_patch': 'apply_patch',
56
+ "openai.code_interpreter": "code_interpreter",
57
+ "openai.file_search": "file_search",
58
+ "openai.image_generation": "image_generation",
59
+ "openai.local_shell": "local_shell",
60
+ "openai.shell": "shell",
61
+ "openai.web_search": "web_search",
62
+ "openai.web_search_preview": "web_search_preview",
63
+ "openai.mcp": "mcp",
64
+ "openai.apply_patch": "apply_patch",
59
65
  },
60
- resolveProviderToolName: tool =>
61
- tool.id === 'openai.custom'
66
+ resolveProviderToolName: (tool) =>
67
+ tool.id === "openai.custom"
62
68
  ? (tool.args as { name?: string }).name
63
69
  : undefined,
64
70
  });
@@ -498,13 +504,13 @@
498
504
  Before
499
505
 
500
506
  ```ts
501
- model.textEmbeddingModel('my-model-id');
507
+ model.textEmbeddingModel("my-model-id");
502
508
  ```
503
509
 
504
510
  After
505
511
 
506
512
  ```ts
507
- model.embeddingModel('my-model-id');
513
+ model.embeddingModel("my-model-id");
508
514
  ```
509
515
 
510
516
  - 60f4775: fix: remove code for unsuported o1-mini and o1-preview models
@@ -514,15 +520,15 @@
514
520
  - 2e86082: feat(provider/openai): `OpenAIChatLanguageModelOptions` type
515
521
 
516
522
  ```ts
517
- import { openai, type OpenAIChatLanguageModelOptions } from '@ai-sdk/openai';
518
- import { generateText } from 'ai';
523
+ import { openai, type OpenAIChatLanguageModelOptions } from "@ai-sdk/openai";
524
+ import { generateText } from "ai";
519
525
 
520
526
  await generateText({
521
- model: openai.chat('gpt-4o'),
522
- prompt: 'Invent a new holiday and describe its traditions.',
527
+ model: openai.chat("gpt-4o"),
528
+ prompt: "Invent a new holiday and describe its traditions.",
523
529
  providerOptions: {
524
530
  openai: {
525
- user: 'user-123',
531
+ user: "user-123",
526
532
  } satisfies OpenAIChatLanguageModelOptions,
527
533
  },
528
534
  });
@@ -923,13 +929,13 @@
923
929
  Before
924
930
 
925
931
  ```ts
926
- model.textEmbeddingModel('my-model-id');
932
+ model.textEmbeddingModel("my-model-id");
927
933
  ```
928
934
 
929
935
  After
930
936
 
931
937
  ```ts
932
- model.embeddingModel('my-model-id');
938
+ model.embeddingModel("my-model-id");
933
939
  ```
934
940
 
935
941
  - Updated dependencies [8d9e8ad]
@@ -1399,15 +1405,15 @@
1399
1405
  - 2e86082: feat(provider/openai): `OpenAIChatLanguageModelOptions` type
1400
1406
 
1401
1407
  ```ts
1402
- import { openai, type OpenAIChatLanguageModelOptions } from '@ai-sdk/openai';
1403
- import { generateText } from 'ai';
1408
+ import { openai, type OpenAIChatLanguageModelOptions } from "@ai-sdk/openai";
1409
+ import { generateText } from "ai";
1404
1410
 
1405
1411
  await generateText({
1406
- model: openai.chat('gpt-4o'),
1407
- prompt: 'Invent a new holiday and describe its traditions.',
1412
+ model: openai.chat("gpt-4o"),
1413
+ prompt: "Invent a new holiday and describe its traditions.",
1408
1414
  providerOptions: {
1409
1415
  openai: {
1410
- user: 'user-123',
1416
+ user: "user-123",
1411
1417
  } satisfies OpenAIChatLanguageModelOptions,
1412
1418
  },
1413
1419
  });
@@ -1703,7 +1709,7 @@
1703
1709
 
1704
1710
  ```js
1705
1711
  await generateImage({
1706
- model: luma.image('photon-flash-1', {
1712
+ model: luma.image("photon-flash-1", {
1707
1713
  maxImagesPerCall: 5,
1708
1714
  pollIntervalMillis: 500,
1709
1715
  }),
@@ -1716,7 +1722,7 @@
1716
1722
 
1717
1723
  ```js
1718
1724
  await generateImage({
1719
- model: luma.image('photon-flash-1'),
1725
+ model: luma.image("photon-flash-1"),
1720
1726
  prompt,
1721
1727
  n: 10,
1722
1728
  maxImagesPerCall: 5,
@@ -1778,10 +1784,10 @@
1778
1784
  The `experimental_generateImage` method from the `ai` package now returnes revised prompts for OpenAI's image models.
1779
1785
 
1780
1786
  ```js
1781
- const prompt = 'Santa Claus driving a Cadillac';
1787
+ const prompt = "Santa Claus driving a Cadillac";
1782
1788
 
1783
1789
  const { providerMetadata } = await experimental_generateImage({
1784
- model: openai.image('dall-e-3'),
1790
+ model: openai.image("dall-e-3"),
1785
1791
  prompt,
1786
1792
  });
1787
1793
 
@@ -2080,7 +2086,7 @@
2080
2086
 
2081
2087
  ```js
2082
2088
  await generateImage({
2083
- model: luma.image('photon-flash-1', {
2089
+ model: luma.image("photon-flash-1", {
2084
2090
  maxImagesPerCall: 5,
2085
2091
  pollIntervalMillis: 500,
2086
2092
  }),
@@ -2093,7 +2099,7 @@
2093
2099
 
2094
2100
  ```js
2095
2101
  await generateImage({
2096
- model: luma.image('photon-flash-1'),
2102
+ model: luma.image("photon-flash-1"),
2097
2103
  prompt,
2098
2104
  n: 10,
2099
2105
  maxImagesPerCall: 5,
@@ -2138,10 +2144,10 @@
2138
2144
  The `experimental_generateImage` method from the `ai` package now returnes revised prompts for OpenAI's image models.
2139
2145
 
2140
2146
  ```js
2141
- const prompt = 'Santa Claus driving a Cadillac';
2147
+ const prompt = "Santa Claus driving a Cadillac";
2142
2148
 
2143
2149
  const { providerMetadata } = await experimental_generateImage({
2144
- model: openai.image('dall-e-3'),
2150
+ model: openai.image("dall-e-3"),
2145
2151
  prompt,
2146
2152
  });
2147
2153
 
package/dist/index.d.mts CHANGED
@@ -300,6 +300,10 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
300
300
  action: {
301
301
  commands: string[];
302
302
  };
303
+ } | {
304
+ type: "compaction";
305
+ id: string;
306
+ encrypted_content?: string | null | undefined;
303
307
  } | {
304
308
  type: "shell_call_output";
305
309
  id: string;
@@ -483,6 +487,10 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
483
487
  action: {
484
488
  commands: string[];
485
489
  };
490
+ } | {
491
+ type: "compaction";
492
+ id: string;
493
+ encrypted_content: string;
486
494
  } | {
487
495
  type: "shell_call_output";
488
496
  id: string;
@@ -990,6 +998,10 @@ declare const openaiLanguageModelResponsesOptionsSchema: _ai_sdk_provider_utils.
990
998
  user?: string | null | undefined;
991
999
  systemMessageMode?: "remove" | "system" | "developer" | undefined;
992
1000
  forceReasoning?: boolean | undefined;
1001
+ contextManagement?: {
1002
+ type: "compaction";
1003
+ compactThreshold: number;
1004
+ }[] | null | undefined;
993
1005
  }>;
994
1006
  type OpenAILanguageModelResponsesOptions = InferSchema<typeof openaiLanguageModelResponsesOptionsSchema>;
995
1007
 
@@ -1124,6 +1136,14 @@ type OpenaiResponsesReasoningProviderMetadata = {
1124
1136
  type OpenaiResponsesProviderMetadata = {
1125
1137
  openai: ResponsesProviderMetadata;
1126
1138
  };
1139
+ type ResponsesCompactionProviderMetadata = {
1140
+ type: 'compaction';
1141
+ itemId: string;
1142
+ encryptedContent?: string;
1143
+ };
1144
+ type OpenaiResponsesCompactionProviderMetadata = {
1145
+ openai: ResponsesCompactionProviderMetadata;
1146
+ };
1127
1147
  type ResponsesTextProviderMetadata = {
1128
1148
  itemId: string;
1129
1149
  phase?: 'commentary' | 'final_answer' | null;
@@ -1151,4 +1171,4 @@ type OpenaiResponsesSourceDocumentProviderMetadata = {
1151
1171
 
1152
1172
  declare const VERSION: string;
1153
1173
 
1154
- export { type OpenAILanguageModelChatOptions as OpenAIChatLanguageModelOptions, type OpenAIEmbeddingModelOptions, type OpenAILanguageModelChatOptions, type OpenAILanguageModelCompletionOptions, type OpenAILanguageModelResponsesOptions, type OpenAIProvider, type OpenAIProviderSettings, type OpenAILanguageModelResponsesOptions as OpenAIResponsesProviderOptions, type OpenAISpeechModelOptions, type OpenAITranscriptionModelOptions, type OpenaiResponsesProviderMetadata, type OpenaiResponsesReasoningProviderMetadata, type OpenaiResponsesSourceDocumentProviderMetadata, type OpenaiResponsesTextProviderMetadata, VERSION, createOpenAI, openai };
1174
+ export { type OpenAILanguageModelChatOptions as OpenAIChatLanguageModelOptions, type OpenAIEmbeddingModelOptions, type OpenAILanguageModelChatOptions, type OpenAILanguageModelCompletionOptions, type OpenAILanguageModelResponsesOptions, type OpenAIProvider, type OpenAIProviderSettings, type OpenAILanguageModelResponsesOptions as OpenAIResponsesProviderOptions, type OpenAISpeechModelOptions, type OpenAITranscriptionModelOptions, type OpenaiResponsesCompactionProviderMetadata, type OpenaiResponsesProviderMetadata, type OpenaiResponsesReasoningProviderMetadata, type OpenaiResponsesSourceDocumentProviderMetadata, type OpenaiResponsesTextProviderMetadata, VERSION, createOpenAI, openai };
package/dist/index.d.ts CHANGED
@@ -300,6 +300,10 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
300
300
  action: {
301
301
  commands: string[];
302
302
  };
303
+ } | {
304
+ type: "compaction";
305
+ id: string;
306
+ encrypted_content?: string | null | undefined;
303
307
  } | {
304
308
  type: "shell_call_output";
305
309
  id: string;
@@ -483,6 +487,10 @@ declare const openaiResponsesChunkSchema: _ai_sdk_provider_utils.LazySchema<{
483
487
  action: {
484
488
  commands: string[];
485
489
  };
490
+ } | {
491
+ type: "compaction";
492
+ id: string;
493
+ encrypted_content: string;
486
494
  } | {
487
495
  type: "shell_call_output";
488
496
  id: string;
@@ -990,6 +998,10 @@ declare const openaiLanguageModelResponsesOptionsSchema: _ai_sdk_provider_utils.
990
998
  user?: string | null | undefined;
991
999
  systemMessageMode?: "remove" | "system" | "developer" | undefined;
992
1000
  forceReasoning?: boolean | undefined;
1001
+ contextManagement?: {
1002
+ type: "compaction";
1003
+ compactThreshold: number;
1004
+ }[] | null | undefined;
993
1005
  }>;
994
1006
  type OpenAILanguageModelResponsesOptions = InferSchema<typeof openaiLanguageModelResponsesOptionsSchema>;
995
1007
 
@@ -1124,6 +1136,14 @@ type OpenaiResponsesReasoningProviderMetadata = {
1124
1136
  type OpenaiResponsesProviderMetadata = {
1125
1137
  openai: ResponsesProviderMetadata;
1126
1138
  };
1139
+ type ResponsesCompactionProviderMetadata = {
1140
+ type: 'compaction';
1141
+ itemId: string;
1142
+ encryptedContent?: string;
1143
+ };
1144
+ type OpenaiResponsesCompactionProviderMetadata = {
1145
+ openai: ResponsesCompactionProviderMetadata;
1146
+ };
1127
1147
  type ResponsesTextProviderMetadata = {
1128
1148
  itemId: string;
1129
1149
  phase?: 'commentary' | 'final_answer' | null;
@@ -1151,4 +1171,4 @@ type OpenaiResponsesSourceDocumentProviderMetadata = {
1151
1171
 
1152
1172
  declare const VERSION: string;
1153
1173
 
1154
- export { type OpenAILanguageModelChatOptions as OpenAIChatLanguageModelOptions, type OpenAIEmbeddingModelOptions, type OpenAILanguageModelChatOptions, type OpenAILanguageModelCompletionOptions, type OpenAILanguageModelResponsesOptions, type OpenAIProvider, type OpenAIProviderSettings, type OpenAILanguageModelResponsesOptions as OpenAIResponsesProviderOptions, type OpenAISpeechModelOptions, type OpenAITranscriptionModelOptions, type OpenaiResponsesProviderMetadata, type OpenaiResponsesReasoningProviderMetadata, type OpenaiResponsesSourceDocumentProviderMetadata, type OpenaiResponsesTextProviderMetadata, VERSION, createOpenAI, openai };
1174
+ export { type OpenAILanguageModelChatOptions as OpenAIChatLanguageModelOptions, type OpenAIEmbeddingModelOptions, type OpenAILanguageModelChatOptions, type OpenAILanguageModelCompletionOptions, type OpenAILanguageModelResponsesOptions, type OpenAIProvider, type OpenAIProviderSettings, type OpenAILanguageModelResponsesOptions as OpenAIResponsesProviderOptions, type OpenAISpeechModelOptions, type OpenAITranscriptionModelOptions, type OpenaiResponsesCompactionProviderMetadata, type OpenaiResponsesProviderMetadata, type OpenaiResponsesReasoningProviderMetadata, type OpenaiResponsesSourceDocumentProviderMetadata, type OpenaiResponsesTextProviderMetadata, VERSION, createOpenAI, openai };
package/dist/index.js CHANGED
@@ -2656,7 +2656,7 @@ async function convertToOpenAIResponsesInput({
2656
2656
  hasApplyPatchTool = false,
2657
2657
  customProviderToolNames
2658
2658
  }) {
2659
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
2659
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r;
2660
2660
  let input = [];
2661
2661
  const warnings = [];
2662
2662
  const processedApprovalIds = /* @__PURE__ */ new Set();
@@ -3003,6 +3003,28 @@ async function convertToOpenAIResponsesInput({
3003
3003
  }
3004
3004
  break;
3005
3005
  }
3006
+ case "custom": {
3007
+ if (part.kind === "openai-compaction") {
3008
+ const providerOpts = (_n = part.providerOptions) == null ? void 0 : _n[providerOptionsName];
3009
+ const id = providerOpts == null ? void 0 : providerOpts.itemId;
3010
+ if (hasConversation && id != null) {
3011
+ break;
3012
+ }
3013
+ if (store && id != null) {
3014
+ input.push({ type: "item_reference", id });
3015
+ break;
3016
+ }
3017
+ const encryptedContent = providerOpts == null ? void 0 : providerOpts.encryptedContent;
3018
+ if (id != null) {
3019
+ input.push({
3020
+ type: "compaction",
3021
+ id,
3022
+ encrypted_content: encryptedContent
3023
+ });
3024
+ }
3025
+ }
3026
+ break;
3027
+ }
3006
3028
  }
3007
3029
  }
3008
3030
  break;
@@ -3030,7 +3052,7 @@ async function convertToOpenAIResponsesInput({
3030
3052
  }
3031
3053
  const output = part.output;
3032
3054
  if (output.type === "execution-denied") {
3033
- const approvalId = (_o = (_n = output.providerOptions) == null ? void 0 : _n.openai) == null ? void 0 : _o.approvalId;
3055
+ const approvalId = (_p = (_o = output.providerOptions) == null ? void 0 : _o.openai) == null ? void 0 : _p.approvalId;
3034
3056
  if (approvalId) {
3035
3057
  continue;
3036
3058
  }
@@ -3104,7 +3126,7 @@ async function convertToOpenAIResponsesInput({
3104
3126
  outputValue = output.value;
3105
3127
  break;
3106
3128
  case "execution-denied":
3107
- outputValue = (_p = output.reason) != null ? _p : "Tool execution denied.";
3129
+ outputValue = (_q = output.reason) != null ? _q : "Tool execution denied.";
3108
3130
  break;
3109
3131
  case "json":
3110
3132
  case "error-json":
@@ -3158,7 +3180,7 @@ async function convertToOpenAIResponsesInput({
3158
3180
  contentValue = output.value;
3159
3181
  break;
3160
3182
  case "execution-denied":
3161
- contentValue = (_q = output.reason) != null ? _q : "Tool execution denied.";
3183
+ contentValue = (_r = output.reason) != null ? _r : "Tool execution denied.";
3162
3184
  break;
3163
3185
  case "json":
3164
3186
  case "error-json":
@@ -3410,6 +3432,11 @@ var openaiResponsesChunkSchema = (0, import_provider_utils26.lazySchema)(
3410
3432
  commands: import_v421.z.array(import_v421.z.string())
3411
3433
  })
3412
3434
  }),
3435
+ import_v421.z.object({
3436
+ type: import_v421.z.literal("compaction"),
3437
+ id: import_v421.z.string(),
3438
+ encrypted_content: import_v421.z.string().nullish()
3439
+ }),
3413
3440
  import_v421.z.object({
3414
3441
  type: import_v421.z.literal("shell_call_output"),
3415
3442
  id: import_v421.z.string(),
@@ -3633,6 +3660,11 @@ var openaiResponsesChunkSchema = (0, import_provider_utils26.lazySchema)(
3633
3660
  commands: import_v421.z.array(import_v421.z.string())
3634
3661
  })
3635
3662
  }),
3663
+ import_v421.z.object({
3664
+ type: import_v421.z.literal("compaction"),
3665
+ id: import_v421.z.string(),
3666
+ encrypted_content: import_v421.z.string()
3667
+ }),
3636
3668
  import_v421.z.object({
3637
3669
  type: import_v421.z.literal("shell_call_output"),
3638
3670
  id: import_v421.z.string(),
@@ -4030,6 +4062,11 @@ var openaiResponsesResponseSchema = (0, import_provider_utils26.lazySchema)(
4030
4062
  commands: import_v421.z.array(import_v421.z.string())
4031
4063
  })
4032
4064
  }),
4065
+ import_v421.z.object({
4066
+ type: import_v421.z.literal("compaction"),
4067
+ id: import_v421.z.string(),
4068
+ encrypted_content: import_v421.z.string()
4069
+ }),
4033
4070
  import_v421.z.object({
4034
4071
  type: import_v421.z.literal("shell_call_output"),
4035
4072
  id: import_v421.z.string(),
@@ -4292,7 +4329,16 @@ var openaiLanguageModelResponsesOptionsSchema = (0, import_provider_utils27.lazy
4292
4329
  * When enabled, the SDK applies reasoning-model parameter compatibility rules
4293
4330
  * and defaults `systemMessageMode` to `developer` unless overridden.
4294
4331
  */
4295
- forceReasoning: import_v422.z.boolean().optional()
4332
+ forceReasoning: import_v422.z.boolean().optional(),
4333
+ /**
4334
+ * Enable server-side context management (compaction).
4335
+ */
4336
+ contextManagement: import_v422.z.array(
4337
+ import_v422.z.object({
4338
+ type: import_v422.z.literal("compaction"),
4339
+ compactThreshold: import_v422.z.number()
4340
+ })
4341
+ ).nullish()
4296
4342
  })
4297
4343
  )
4298
4344
  );
@@ -4759,6 +4805,13 @@ var OpenAIResponsesLanguageModel = class {
4759
4805
  safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
4760
4806
  top_logprobs: topLogprobs,
4761
4807
  truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
4808
+ // context management (server-side compaction):
4809
+ ...(openaiOptions == null ? void 0 : openaiOptions.contextManagement) && {
4810
+ context_management: openaiOptions.contextManagement.map((cm) => ({
4811
+ type: cm.type,
4812
+ compact_threshold: cm.compactThreshold
4813
+ }))
4814
+ },
4762
4815
  // model-specific settings:
4763
4816
  ...isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
4764
4817
  reasoning: {
@@ -5283,6 +5336,20 @@ var OpenAIResponsesLanguageModel = class {
5283
5336
  });
5284
5337
  break;
5285
5338
  }
5339
+ case "compaction": {
5340
+ content.push({
5341
+ type: "custom",
5342
+ kind: "openai-compaction",
5343
+ providerMetadata: {
5344
+ [providerOptionsName]: {
5345
+ type: "compaction",
5346
+ itemId: part.id,
5347
+ encryptedContent: part.encrypted_content
5348
+ }
5349
+ }
5350
+ });
5351
+ break;
5352
+ }
5286
5353
  }
5287
5354
  }
5288
5355
  const providerMetadata = {
@@ -5912,6 +5979,18 @@ var OpenAIResponsesLanguageModel = class {
5912
5979
  });
5913
5980
  }
5914
5981
  delete activeReasoning[value.item.id];
5982
+ } else if (value.item.type === "compaction") {
5983
+ controller.enqueue({
5984
+ type: "custom",
5985
+ kind: "openai-compaction",
5986
+ providerMetadata: {
5987
+ [providerOptionsName]: {
5988
+ type: "compaction",
5989
+ itemId: value.item.id,
5990
+ encryptedContent: value.item.encrypted_content
5991
+ }
5992
+ }
5993
+ });
5915
5994
  }
5916
5995
  } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
5917
5996
  const toolCall = ongoingToolCalls[value.output_index];
@@ -6599,7 +6678,7 @@ var OpenAITranscriptionModel = class {
6599
6678
  };
6600
6679
 
6601
6680
  // src/version.ts
6602
- var VERSION = true ? "4.0.0-beta.11" : "0.0.0-test";
6681
+ var VERSION = true ? "4.0.0-beta.12" : "0.0.0-test";
6603
6682
 
6604
6683
  // src/openai-provider.ts
6605
6684
  function createOpenAI(options = {}) {