@ai-sdk/openai 4.0.0-beta.4 → 4.0.0-beta.41

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/CHANGELOG.md +399 -22
  2. package/README.md +2 -0
  3. package/dist/index.d.ts +166 -49
  4. package/dist/index.js +2454 -1627
  5. package/dist/index.js.map +1 -1
  6. package/dist/internal/index.d.ts +176 -53
  7. package/dist/internal/index.js +2220 -1648
  8. package/dist/internal/index.js.map +1 -1
  9. package/docs/03-openai.mdx +292 -22
  10. package/package.json +13 -14
  11. package/src/chat/convert-openai-chat-usage.ts +2 -2
  12. package/src/chat/convert-to-openai-chat-messages.ts +99 -71
  13. package/src/chat/map-openai-finish-reason.ts +2 -2
  14. package/src/chat/openai-chat-api.ts +6 -2
  15. package/src/chat/openai-chat-language-model.ts +68 -164
  16. package/src/chat/openai-chat-options.ts +10 -1
  17. package/src/chat/openai-chat-prepare-tools.ts +7 -7
  18. package/src/completion/convert-openai-completion-usage.ts +2 -2
  19. package/src/completion/convert-to-openai-completion-prompt.ts +2 -3
  20. package/src/completion/map-openai-finish-reason.ts +2 -2
  21. package/src/completion/openai-completion-api.ts +5 -2
  22. package/src/completion/openai-completion-language-model.ts +46 -30
  23. package/src/completion/openai-completion-options.ts +5 -1
  24. package/src/embedding/openai-embedding-model.ts +25 -8
  25. package/src/embedding/openai-embedding-options.ts +5 -1
  26. package/src/files/openai-files-api.ts +17 -0
  27. package/src/files/openai-files-options.ts +22 -0
  28. package/src/files/openai-files.ts +100 -0
  29. package/src/image/openai-image-model.ts +31 -15
  30. package/src/image/openai-image-options.ts +3 -0
  31. package/src/index.ts +2 -0
  32. package/src/openai-config.ts +7 -7
  33. package/src/openai-language-model-capabilities.ts +3 -2
  34. package/src/openai-provider.ts +63 -30
  35. package/src/openai-tools.ts +12 -1
  36. package/src/responses/convert-openai-responses-usage.ts +2 -2
  37. package/src/responses/convert-to-openai-responses-input.ts +244 -77
  38. package/src/responses/map-openai-responses-finish-reason.ts +2 -2
  39. package/src/responses/openai-responses-api.ts +141 -3
  40. package/src/responses/openai-responses-language-model.ts +274 -61
  41. package/src/responses/openai-responses-options.ts +29 -3
  42. package/src/responses/openai-responses-prepare-tools.ts +48 -15
  43. package/src/responses/openai-responses-provider-metadata.ts +12 -2
  44. package/src/skills/openai-skills-api.ts +31 -0
  45. package/src/skills/openai-skills.ts +83 -0
  46. package/src/speech/openai-speech-model.ts +28 -12
  47. package/src/speech/openai-speech-options.ts +5 -1
  48. package/src/tool/apply-patch.ts +33 -32
  49. package/src/tool/code-interpreter.ts +40 -41
  50. package/src/tool/custom.ts +2 -8
  51. package/src/tool/file-search.ts +3 -3
  52. package/src/tool/image-generation.ts +2 -2
  53. package/src/tool/local-shell.ts +2 -2
  54. package/src/tool/mcp.ts +3 -3
  55. package/src/tool/shell.ts +9 -4
  56. package/src/tool/tool-search.ts +98 -0
  57. package/src/tool/web-search-preview.ts +2 -2
  58. package/src/tool/web-search.ts +2 -2
  59. package/src/transcription/openai-transcription-model.ts +30 -14
  60. package/src/transcription/openai-transcription-options.ts +5 -1
  61. package/dist/index.d.mts +0 -1107
  62. package/dist/index.mjs +0 -6508
  63. package/dist/index.mjs.map +0 -1
  64. package/dist/internal/index.d.mts +0 -1137
  65. package/dist/internal/index.mjs +0 -6321
  66. package/dist/internal/index.mjs.map +0 -1
@@ -257,6 +257,11 @@ The following provider options are available:
257
257
  - **forceReasoning** _boolean_
258
258
  Force treating this model as a reasoning model. This is useful for "stealth" reasoning models (e.g. via a custom baseURL) where the model ID is not recognized by the SDK's allowlist. When enabled, the SDK applies reasoning-model parameter compatibility rules and defaults `systemMessageMode` to `developer` unless overridden.
259
259
 
260
+ - **contextManagement** _Array<object>_
261
+ Enable server-side context management (compaction). When configured, the server automatically compresses conversation context when token usage crosses a specified threshold. Each object in the array should have:
262
+ - `type`: `'compaction'`
263
+ - `compactThreshold`: _number_ — the token count at which compaction is triggered
264
+
260
265
  The OpenAI responses provider also returns provider-specific metadata:
261
266
 
262
267
  For Responses models, you can type this metadata using `OpenaiResponsesProviderMetadata`:
@@ -764,7 +769,7 @@ const result = await generateText({
764
769
  }),
765
770
  },
766
771
  prompt: 'List the files in my home directory.',
767
- stopWhen: stepCountIs(2),
772
+ stopWhen: isStepCount(2),
768
773
  });
769
774
  ```
770
775
 
@@ -922,7 +927,7 @@ const result = await generateText({
922
927
  }),
923
928
  },
924
929
  prompt: 'Use the skill to solve this problem.',
925
- stopWhen: stepCountIs(5),
930
+ stopWhen: isStepCount(5),
926
931
  });
927
932
  ```
928
933
 
@@ -937,7 +942,7 @@ enabling iterative, multi-step code editing workflows.
937
942
 
938
943
  ```ts
939
944
  import { openai } from '@ai-sdk/openai';
940
- import { generateText, stepCountIs } from 'ai';
945
+ import { generateText, isStepCount } from 'ai';
941
946
 
942
947
  const result = await generateText({
943
948
  model: openai('gpt-5.1'),
@@ -949,7 +954,7 @@ const result = await generateText({
949
954
  }),
950
955
  },
951
956
  prompt: 'Create a python file that calculates the factorial of a number',
952
- stopWhen: stepCountIs(5),
957
+ stopWhen: isStepCount(5),
953
958
  });
954
959
  ```
955
960
 
@@ -958,6 +963,145 @@ Your execute function must return:
958
963
  - **status** _'completed' | 'failed'_ - Whether the patch was applied successfully
959
964
  - **output** _string_ (optional) - Human-readable log text (e.g., results or error messages)
960
965
 
966
+ #### Tool Search
967
+
968
+ Tool search allows the model to dynamically search for and load tools into context as needed,
969
+ rather than loading all tool definitions up front. This can reduce token usage, cost, and latency
970
+ when you have many tools. Mark the tools you want to make searchable with `deferLoading: true`
971
+ in their `providerOptions`.
972
+
973
+ There are two execution modes:
974
+
975
+ - **Server-executed (hosted):** OpenAI searches across the deferred tools declared in the request and returns the loaded subset in the same response. No extra round-trip is needed.
976
+ - **Client-executed:** The model emits a `tool_search_call`, your application performs the lookup, and you return the matching tools via the `execute` callback.
977
+
978
+ ##### Server-Executed (Hosted) Tool Search
979
+
980
+ Use hosted tool search when the candidate tools are already known at request time.
981
+ Add `openai.tools.toolSearch()` with no arguments and mark your tools with `deferLoading: true`:
982
+
983
+ ```ts
984
+ import { openai } from '@ai-sdk/openai';
985
+ import { generateText, tool, isStepCount } from 'ai';
986
+ import { z } from 'zod';
987
+
988
+ const result = await generateText({
989
+ model: openai.responses('gpt-5.4'),
990
+ prompt: 'What is the weather in San Francisco?',
991
+ stopWhen: isStepCount(10),
992
+ tools: {
993
+ toolSearch: openai.tools.toolSearch(),
994
+
995
+ get_weather: tool({
996
+ description: 'Get the current weather at a specific location',
997
+ inputSchema: z.object({
998
+ location: z.string(),
999
+ unit: z.enum(['celsius', 'fahrenheit']),
1000
+ }),
1001
+ execute: async ({ location, unit }) => ({
1002
+ location,
1003
+ temperature: unit === 'celsius' ? 18 : 64,
1004
+ }),
1005
+ providerOptions: {
1006
+ openai: { deferLoading: true },
1007
+ },
1008
+ }),
1009
+
1010
+ search_files: tool({
1011
+ description: 'Search through files in the workspace',
1012
+ inputSchema: z.object({ query: z.string() }),
1013
+ execute: async ({ query }) => ({
1014
+ results: [`Found 3 files matching "${query}"`],
1015
+ }),
1016
+ providerOptions: {
1017
+ openai: { deferLoading: true },
1018
+ },
1019
+ }),
1020
+ },
1021
+ });
1022
+ ```
1023
+
1024
+ In hosted mode, the model internally searches the deferred tools, loads the relevant ones, and
1025
+ proceeds to call them — all within a single response. The `tool_search_call` and
1026
+ `tool_search_output` items appear in the response with `execution: 'server'` and `call_id: null`.
1027
+
1028
+ ##### Client-Executed Tool Search
1029
+
1030
+ Use client-executed tool search when tool discovery depends on runtime state — for example,
1031
+ tools that vary per tenant, project, or external system. Pass `execution: 'client'` along with
1032
+ a `description`, `parameters` schema, and an `execute` callback:
1033
+
1034
+ ```ts
1035
+ import { openai } from '@ai-sdk/openai';
1036
+ import { generateText, tool, isStepCount } from 'ai';
1037
+ import { z } from 'zod';
1038
+
1039
+ const result = await generateText({
1040
+ model: openai.responses('gpt-5.4'),
1041
+ prompt: 'What is the weather in San Francisco?',
1042
+ stopWhen: isStepCount(10),
1043
+ tools: {
1044
+ toolSearch: openai.tools.toolSearch({
1045
+ execution: 'client',
1046
+ description: 'Search for available tools based on what the user needs.',
1047
+ parameters: {
1048
+ type: 'object',
1049
+ properties: {
1050
+ goal: {
1051
+ type: 'string',
1052
+ description: 'What the user is trying to accomplish',
1053
+ },
1054
+ },
1055
+ required: ['goal'],
1056
+ additionalProperties: false,
1057
+ },
1058
+ execute: async ({ arguments: args }) => {
1059
+ // Your custom tool discovery logic here.
1060
+ // Return the tools that match the search goal.
1061
+ return {
1062
+ tools: [
1063
+ {
1064
+ type: 'function',
1065
+ name: 'get_weather',
1066
+ description: 'Get the current weather at a specific location',
1067
+ deferLoading: true,
1068
+ parameters: {
1069
+ type: 'object',
1070
+ properties: {
1071
+ location: { type: 'string' },
1072
+ },
1073
+ required: ['location'],
1074
+ additionalProperties: false,
1075
+ },
1076
+ },
1077
+ ],
1078
+ };
1079
+ },
1080
+ }),
1081
+
1082
+ get_weather: tool({
1083
+ description: 'Get the current weather at a specific location',
1084
+ inputSchema: z.object({ location: z.string() }),
1085
+ execute: async ({ location }) => ({
1086
+ location,
1087
+ temperature: 64,
1088
+ condition: 'Partly cloudy',
1089
+ }),
1090
+ providerOptions: {
1091
+ openai: { deferLoading: true },
1092
+ },
1093
+ }),
1094
+ },
1095
+ });
1096
+ ```
1097
+
1098
+ In client mode, the flow spans two steps:
1099
+
1100
+ 1. **Step 1:** The model emits a `tool_search_call` with `execution: 'client'` and a non-null `call_id`. The SDK calls your `execute` callback with the search arguments. Your callback returns the discovered tools.
1101
+ 2. **Step 2:** The SDK sends the `tool_search_output` (with the matching `call_id`) back to the model. The model can now call the loaded tools as normal function calls.
1102
+
1103
+ For more details, see the [OpenAI Tool Search documentation](https://platform.openai.com/docs/guides/tools-tool-search).
1104
+
961
1105
  #### Custom Tool
962
1106
 
963
1107
  The OpenAI Responses API supports
@@ -969,13 +1113,12 @@ SQL queries, code snippets, or any output that must match a specific pattern.
969
1113
 
970
1114
  ```ts
971
1115
  import { openai } from '@ai-sdk/openai';
972
- import { generateText, stepCountIs } from 'ai';
1116
+ import { generateText, isStepCount } from 'ai';
973
1117
 
974
1118
  const result = await generateText({
975
1119
  model: openai.responses('gpt-5.2-codex'),
976
1120
  tools: {
977
1121
  write_sql: openai.tools.customTool({
978
- name: 'write_sql',
979
1122
  description: 'Write a SQL SELECT query to answer the user question.',
980
1123
  format: {
981
1124
  type: 'grammar',
@@ -991,7 +1134,7 @@ const result = await generateText({
991
1134
  },
992
1135
  toolChoice: 'required',
993
1136
  prompt: 'Write a SQL query to get all users older than 25.',
994
- stopWhen: stepCountIs(3),
1137
+ stopWhen: isStepCount(3),
995
1138
  });
996
1139
  ```
997
1140
 
@@ -1005,7 +1148,6 @@ const result = streamText({
1005
1148
  model: openai.responses('gpt-5.2-codex'),
1006
1149
  tools: {
1007
1150
  write_sql: openai.tools.customTool({
1008
- name: 'write_sql',
1009
1151
  description: 'Write a SQL SELECT query to answer the user question.',
1010
1152
  format: {
1011
1153
  type: 'grammar',
@@ -1028,7 +1170,6 @@ for await (const chunk of result.fullStream) {
1028
1170
 
1029
1171
  The custom tool can be configured with:
1030
1172
 
1031
- - **name** _string_ (required) - The name of the custom tool. Used to identify the tool in tool calls.
1032
1173
  - **description** _string_ (optional) - A description of what the tool does, to help the model understand when to use it.
1033
1174
  - **format** _object_ (optional) - The output format constraint. Omit for unconstrained text output.
1034
1175
  - **type** _'grammar' | 'text'_ - The format type. Use `'grammar'` for constrained output or `'text'` for explicit unconstrained text.
@@ -1053,8 +1194,9 @@ const result = await generateText({
1053
1194
  text: 'Please describe the image.',
1054
1195
  },
1055
1196
  {
1056
- type: 'image',
1057
- image: readFileSync('./data/image.png'),
1197
+ type: 'file',
1198
+ mediaType: 'image',
1199
+ data: readFileSync('./data/image.png'),
1058
1200
  },
1059
1201
  ],
1060
1202
  },
@@ -1069,8 +1211,9 @@ You can also pass a file-id from the OpenAI Files API.
1069
1211
 
1070
1212
  ```ts
1071
1213
  {
1072
- type: 'image',
1073
- image: 'file-8EFBcWHsQxZV7YGezBC1fq'
1214
+ type: 'file',
1215
+ mediaType: 'image',
1216
+ data: 'file-8EFBcWHsQxZV7YGezBC1fq'
1074
1217
  }
1075
1218
  ```
1076
1219
 
@@ -1078,8 +1221,9 @@ You can also pass the URL of an image.
1078
1221
 
1079
1222
  ```ts
1080
1223
  {
1081
- type: 'image',
1082
- image: 'https://sample.edu/image.png',
1224
+ type: 'file',
1225
+ mediaType: 'image',
1226
+ data: 'https://sample.edu/image.png',
1083
1227
  }
1084
1228
  ```
1085
1229
 
@@ -1375,6 +1519,125 @@ for (const part of result.content) {
1375
1519
  are fields like `filename` that are directly available on the source object.
1376
1520
  </Note>
1377
1521
 
1522
+ #### Compaction
1523
+
1524
+ The OpenAI Responses API supports server-side context compaction. When enabled, the server automatically compresses conversation context when token usage crosses a configured threshold. This is useful for long-running conversations or agent loops where you want to stay within token limits without manually managing context.
1525
+
1526
+ The compaction item returned by the server is opaque and encrypted — it carries forward key prior state and reasoning into the next turn using fewer tokens. The AI SDK handles this automatically: compaction items are returned as text parts with special `providerMetadata`, and when passed back in subsequent requests they are sent as compaction input items.
1527
+
1528
+ ```ts highlight="7-11"
1529
+ import {
1530
+ openai,
1531
+ type OpenAILanguageModelResponsesOptions,
1532
+ } from '@ai-sdk/openai';
1533
+ import { generateText } from 'ai';
1534
+
1535
+ const result = await generateText({
1536
+ model: openai.responses('gpt-5.2'),
1537
+ messages: conversationHistory,
1538
+ providerOptions: {
1539
+ openai: {
1540
+ store: false,
1541
+ contextManagement: [{ type: 'compaction', compactThreshold: 50000 }],
1542
+ } satisfies OpenAILanguageModelResponsesOptions,
1543
+ },
1544
+ });
1545
+ ```
1546
+
1547
+ **Configuration:**
1548
+
1549
+ - **type** — Must be `'compaction'`
1550
+ - **compactThreshold** — The token count at which compaction is triggered. When the rendered input token count crosses this threshold, the server runs a compaction pass before continuing inference.
1551
+
1552
+ <Note>
1553
+ Server-side compaction is ZDR-friendly when you set `store: false` on your
1554
+ requests.
1555
+ </Note>
1556
+
1557
+ ##### Detecting Compaction in Streams
1558
+
1559
+ When using `streamText`, you can detect compaction by checking the `providerMetadata` on `text-start` and `text-end` events:
1560
+
1561
+ ```ts
1562
+ import {
1563
+ openai,
1564
+ type OpenAILanguageModelResponsesOptions,
1565
+ } from '@ai-sdk/openai';
1566
+ import { streamText } from 'ai';
1567
+
1568
+ const result = streamText({
1569
+ model: openai.responses('gpt-5.2'),
1570
+ messages: conversationHistory,
1571
+ providerOptions: {
1572
+ openai: {
1573
+ store: false,
1574
+ contextManagement: [{ type: 'compaction', compactThreshold: 50000 }],
1575
+ } satisfies OpenAILanguageModelResponsesOptions,
1576
+ },
1577
+ });
1578
+
1579
+ for await (const part of result.fullStream) {
1580
+ switch (part.type) {
1581
+ case 'text-start': {
1582
+ const isCompaction = part.providerMetadata?.openai?.type === 'compaction';
1583
+ if (isCompaction) {
1584
+ // ... your logic
1585
+ }
1586
+ break;
1587
+ }
1588
+ case 'text-end': {
1589
+ const isCompaction = part.providerMetadata?.openai?.type === 'compaction';
1590
+ if (isCompaction) {
1591
+ // ... your logic
1592
+ }
1593
+ break;
1594
+ }
1595
+ case 'text-delta': {
1596
+ process.stdout.write(part.text);
1597
+ break;
1598
+ }
1599
+ }
1600
+ }
1601
+ ```
1602
+
1603
+ ##### Compaction in UI Applications
1604
+
1605
+ When using `useChat` or other UI hooks, compaction items appear as text parts with `providerMetadata`. You can detect and style them differently in your UI:
1606
+
1607
+ ```tsx
1608
+ {
1609
+ message.parts.map((part, index) => {
1610
+ if (part.type === 'text') {
1611
+ const isCompaction =
1612
+ (part.providerMetadata?.openai as { type?: string } | undefined)
1613
+ ?.type === 'compaction';
1614
+
1615
+ if (isCompaction) {
1616
+ return (
1617
+ <div
1618
+ key={index}
1619
+ className="bg-yellow-100 border-l-4 border-yellow-500 p-2"
1620
+ >
1621
+ <span className="font-bold">[Context Compacted]</span>
1622
+ <p className="text-sm text-yellow-700">
1623
+ The server compressed the conversation context to reduce token
1624
+ usage.
1625
+ </p>
1626
+ </div>
1627
+ );
1628
+ }
1629
+ return <div key={index}>{part.text}</div>;
1630
+ }
1631
+ });
1632
+ }
1633
+ ```
1634
+
1635
+ The metadata includes the following fields:
1636
+
1637
+ - **type** — Always `'compaction'`
1638
+ - **itemId** _string_ — The ID of the compaction item in the Responses API
1639
+ - **encryptedContent** _string_ (optional) — The encrypted compaction state. This is automatically sent back to the API when the message is included in subsequent requests.
1640
+
1378
1641
  ### Chat Models
1379
1642
 
1380
1643
  You can create models that call the [OpenAI chat API](https://platform.openai.com/docs/api-reference/chat) using the `.chat()` factory method.
@@ -1671,8 +1934,9 @@ const result = await generateText({
1671
1934
  text: 'Please describe the image.',
1672
1935
  },
1673
1936
  {
1674
- type: 'image',
1675
- image: readFileSync('./data/image.png'),
1937
+ type: 'file',
1938
+ mediaType: 'image',
1939
+ data: readFileSync('./data/image.png'),
1676
1940
  },
1677
1941
  ],
1678
1942
  },
@@ -1687,8 +1951,9 @@ You can also pass the URL of an image.
1687
1951
 
1688
1952
  ```ts
1689
1953
  {
1690
- type: 'image',
1691
- image: 'https://sample.edu/image.png',
1954
+ type: 'file',
1955
+ mediaType: 'image',
1956
+ data: 'https://sample.edu/image.png',
1692
1957
  }
1693
1958
  ```
1694
1959
 
@@ -1805,9 +2070,9 @@ const result = await generateText({
1805
2070
  content: [
1806
2071
  { type: 'text', text: 'Describe the image in detail.' },
1807
2072
  {
1808
- type: 'image',
1809
- image:
1810
- 'https://github.com/vercel/ai/blob/main/examples/ai-functions/data/comic-cat.png?raw=true',
2073
+ type: 'file',
2074
+ mediaType: 'image',
2075
+ data: 'https://github.com/vercel/ai/blob/main/examples/ai-functions/data/comic-cat.png?raw=true',
1811
2076
 
1812
2077
  // OpenAI specific options - image detail:
1813
2078
  providerOptions: {
@@ -2041,6 +2306,11 @@ The following optional provider options are available for OpenAI completion mode
2041
2306
 
2042
2307
  | Model | Image Input | Audio Input | Object Generation | Tool Usage |
2043
2308
  | --------------------- | ------------------- | ------------------- | ------------------- | ------------------- |
2309
+ | `gpt-5.4-pro` | <Check size={18} /> | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> |
2310
+ | `gpt-5.4` | <Check size={18} /> | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> |
2311
+ | `gpt-5.4-mini` | <Check size={18} /> | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> |
2312
+ | `gpt-5.4-nano` | <Check size={18} /> | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> |
2313
+ | `gpt-5.3-chat-latest` | <Check size={18} /> | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> |
2044
2314
  | `gpt-5.2-pro` | <Check size={18} /> | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> |
2045
2315
  | `gpt-5.2-chat-latest` | <Check size={18} /> | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> |
2046
2316
  | `gpt-5.2` | <Check size={18} /> | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> |
package/package.json CHANGED
@@ -1,10 +1,10 @@
1
1
  {
2
2
  "name": "@ai-sdk/openai",
3
- "version": "4.0.0-beta.4",
3
+ "version": "4.0.0-beta.41",
4
+ "type": "module",
4
5
  "license": "Apache-2.0",
5
6
  "sideEffects": false,
6
7
  "main": "./dist/index.js",
7
- "module": "./dist/index.mjs",
8
8
  "types": "./dist/index.d.ts",
9
9
  "files": [
10
10
  "dist/**/*",
@@ -25,26 +25,25 @@
25
25
  "./package.json": "./package.json",
26
26
  ".": {
27
27
  "types": "./dist/index.d.ts",
28
- "import": "./dist/index.mjs",
29
- "require": "./dist/index.js"
28
+ "import": "./dist/index.js",
29
+ "default": "./dist/index.js"
30
30
  },
31
31
  "./internal": {
32
32
  "types": "./dist/internal/index.d.ts",
33
- "import": "./dist/internal/index.mjs",
34
- "module": "./dist/internal/index.mjs",
35
- "require": "./dist/internal/index.js"
33
+ "import": "./dist/internal/index.js",
34
+ "default": "./dist/internal/index.js"
36
35
  }
37
36
  },
38
37
  "dependencies": {
39
- "@ai-sdk/provider-utils": "5.0.0-beta.1",
40
- "@ai-sdk/provider": "4.0.0-beta.0"
38
+ "@ai-sdk/provider": "4.0.0-beta.14",
39
+ "@ai-sdk/provider-utils": "5.0.0-beta.29"
41
40
  },
42
41
  "devDependencies": {
43
42
  "@types/node": "20.17.24",
44
43
  "tsup": "^8",
45
44
  "typescript": "5.8.3",
46
45
  "zod": "3.25.76",
47
- "@ai-sdk/test-server": "2.0.0-beta.0",
46
+ "@ai-sdk/test-server": "2.0.0-beta.3",
48
47
  "@vercel/ai-tsconfig": "0.0.0"
49
48
  },
50
49
  "peerDependencies": {
@@ -54,12 +53,14 @@
54
53
  "node": ">=18"
55
54
  },
56
55
  "publishConfig": {
57
- "access": "public"
56
+ "access": "public",
57
+ "provenance": true
58
58
  },
59
59
  "homepage": "https://ai-sdk.dev/docs",
60
60
  "repository": {
61
61
  "type": "git",
62
- "url": "git+https://github.com/vercel/ai.git"
62
+ "url": "https://github.com/vercel/ai",
63
+ "directory": "packages/openai"
63
64
  },
64
65
  "bugs": {
65
66
  "url": "https://github.com/vercel/ai/issues"
@@ -71,9 +72,7 @@
71
72
  "build": "pnpm clean && tsup --tsconfig tsconfig.build.json",
72
73
  "build:watch": "pnpm clean && tsup --watch",
73
74
  "clean": "del-cli dist docs *.tsbuildinfo",
74
- "lint": "eslint \"./**/*.ts*\"",
75
75
  "type-check": "tsc --build",
76
- "prettier-check": "prettier --check \"./**/*.ts*\"",
77
76
  "test": "pnpm test:node && pnpm test:edge",
78
77
  "test:update": "pnpm test:node -u",
79
78
  "test:watch": "vitest --config vitest.node.config.js",
@@ -1,4 +1,4 @@
1
- import { LanguageModelV3Usage } from '@ai-sdk/provider';
1
+ import type { LanguageModelV4Usage } from '@ai-sdk/provider';
2
2
 
3
3
  export type OpenAIChatUsage = {
4
4
  prompt_tokens?: number | null;
@@ -16,7 +16,7 @@ export type OpenAIChatUsage = {
16
16
 
17
17
  export function convertOpenAIChatUsage(
18
18
  usage: OpenAIChatUsage | undefined | null,
19
- ): LanguageModelV3Usage {
19
+ ): LanguageModelV4Usage {
20
20
  if (usage == null) {
21
21
  return {
22
22
  inputTokens: {