@ai-sdk/provider 2.0.0-canary.9 → 2.1.0-beta.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,316 @@
1
1
  # @ai-sdk/provider
2
2
 
3
+ ## 2.1.0-beta.0
4
+
5
+ ### Minor Changes
6
+
7
+ - 78928cb: release: start 5.1 beta
8
+
9
+ ## 2.0.0
10
+
11
+ ### Major Changes
12
+
13
+ - 742b7be: feat: forward id, streaming start, streaming end of content blocks
14
+ - 7cddb72: refactoring (provider): collapse provider defined tools into single definition
15
+ - ccce59b: feat (provider): support changing provider, model, supportedUrls in middleware
16
+ - e2b9e4b: feat (provider): add name for provider defined tools for future validation
17
+ - 95857aa: chore: restructure language model supported urls
18
+ - 6f6bb89: chore (provider): cleanup request and rawRequest (language model v2)
19
+ - d1a1aa1: chore (provider): merge rawRequest into request (language model v2)
20
+ - 63f9e9b: chore (provider,ai): tools have input/output instead of args,result
21
+ - d5f588f: AI SDK 5
22
+ - b6b43c7: chore: move warnings into stream-start part (spec)
23
+ - 411e483: chore (provider): refactor usage (language model v2)
24
+ - abf9a79: chore: rename mimeType to mediaType
25
+ - 14c9410: chore: refactor file towards source pattern (spec)
26
+ - e86be6f: chore: remove logprobs
27
+ - 0d06df6: chore (ai): remove v1 providers
28
+ - d9c98f4: chore: refactor reasoning parts (spec)
29
+ - a3f768e: chore: restructure reasoning support
30
+ - 7435eb5: feat: upgrade speech models to v2 specification
31
+ - 0054544: chore: refactor source parts (spec)
32
+ - 9e9c809: chore: refactor tool call and tool call delta parts (spec)
33
+ - 32831c6: chore: refactor text parts (spec)
34
+ - 6dc848c: chore (provider): remove image parts
35
+ - d0f9495: chore: refactor file parts (spec)
36
+ - 7979f7f: feat (provider): support reasoning tokens, cached input tokens, total token in usage information
37
+ - 44f4aba: feat: upgrade transcription models to v2 specification
38
+ - 7ea4132: chore: remove object generation mode
39
+ - 023ba40: feat (provider): support arbitrary media types in tool results
40
+ - e030615: chore (provider): remove prompt type from language model v2 spec
41
+ - 5e57fae: refactoring (provider): restructure tool result output
42
+ - c57e248: chore (provider): remove mode
43
+ - 3795467: chore: return content array from doGenerate (spec)
44
+ - 1766ede: chore: rename maxTokens to maxOutputTokens
45
+ - 33f4a6a: chore (provider): rename providerMetadata inputs to providerOptions
46
+
47
+ ### Patch Changes
48
+
49
+ - dc714f3: release alpha.4
50
+ - b5da06a: update to LanguageModelV2ProviderDefinedClientTool to add server side tool later on
51
+ - 48d257a: release alpha.15
52
+ - 0d2c085: chore (provider): tweak provider definition
53
+ - 9222aeb: release alpha.8
54
+ - e2aceaf: feat: add raw chunk support
55
+ - 7b3ae3f: chore (provider): change getSupportedUrls to supportedUrls (language model v2)
56
+ - a166433: feat: add transcription with experimental_transcribe
57
+ - 26735b5: chore(embedding-model): add v2 interface
58
+ - 443d8ec: feat(embedding-model-v2): add response body field
59
+ - a8c8bd5: feat(embed-many): respect supportsParallelCalls & concurrency
60
+ - 9bf7291: chore(providers/openai): enable structuredOutputs by default & switch to provider option
61
+ - 2e13791: feat(anthropic): add server-side web search support
62
+ - 472524a: spec (ai): add provider options to tools
63
+ - dd3ff01: chore: add language setting to speechv2
64
+ - 9301f86: refactor (image-model): rename `ImageModelV1` to `ImageModelV2`
65
+ - 0a87932: core (ai): change transcription model mimeType to mediaType
66
+ - c4a2fec: chore (provider): extract shared provider options and metadata (spec)
67
+ - 79457bd: chore (provider): extract LanguageModelV2File
68
+ - 8aa9e20: feat: add speech with experimental_generateSpeech
69
+ - 4617fab: chore(embedding-models): remove remaining settings
70
+ - cb68df0: feat: add transcription and speech model support to provider registry
71
+ - ad80501: chore (provider): allow both binary and base64 file content (spec)
72
+
73
+ Before
74
+
75
+ ```ts
76
+ import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils';
77
+
78
+ // Had to manually convert binary data to base64
79
+ const fileData = new Uint8Array([0, 1, 2, 3]);
80
+ const filePart = {
81
+ type: 'file',
82
+ mediaType: 'application/pdf',
83
+ data: convertUint8ArrayToBase64(fileData), // Required conversion
84
+ };
85
+ ```
86
+
87
+ After
88
+
89
+ ```ts
90
+ // Can use binary data directly
91
+ const fileData = new Uint8Array([0, 1, 2, 3]);
92
+ const filePart = {
93
+ type: 'file',
94
+ mediaType: 'application/pdf',
95
+ data: fileData, // Direct Uint8Array support
96
+ };
97
+ ```
98
+
99
+ - 68ecf2f: release alpha.13
100
+ - 6b98118: release alpha.3
101
+ - 3f2f00c: feat: `ImageModelV2#maxImagesPerCall` can be set to a function that returns a `number` or `undefined`, optionally as a promise
102
+
103
+ pull request: https://github.com/vercel/ai/pull/6343
104
+
105
+ - 9bd5ab5: feat (provider): add providerMetadata to ImageModelV2 interface (#5977)
106
+
107
+ The `experimental_generateImage` method from the `ai` package now returnes revised prompts for OpenAI's image models.
108
+
109
+ ```js
110
+ const prompt = 'Santa Claus driving a Cadillac';
111
+
112
+ const { providerMetadata } = await experimental_generateImage({
113
+ model: openai.image('dall-e-3'),
114
+ prompt,
115
+ });
116
+
117
+ const revisedPrompt = providerMetadata.openai.images[0]?.revisedPrompt;
118
+
119
+ console.log({
120
+ prompt,
121
+ revisedPrompt,
122
+ });
123
+ ```
124
+
125
+ - 5c56081: release alpha.7
126
+ - fd65bc6: chore(embedding-model-v2): rename rawResponse to response
127
+ - 26535e0: release alpha.2
128
+ - 393138b: feat(embedding-model-v2): add providerOptions
129
+ - 7182d14: Remove `Experimental_LanguageModelV2Middleware` type
130
+ - c1e6647: release alpha.11
131
+ - 811dff3: release alpha.9
132
+ - f10304b: feat(tool-calling): don't require the user to have to pass parameters
133
+ - 27deb4d: feat (provider/gateway): Add providerMetadata to embeddings response
134
+ - c4df419: release alpha.10
135
+
136
+ ## 2.0.0-beta.2
137
+
138
+ ### Patch Changes
139
+
140
+ - 27deb4d: feat (provider/gateway): Add providerMetadata to embeddings response
141
+
142
+ ## 2.0.0-beta.1
143
+
144
+ ### Major Changes
145
+
146
+ - 742b7be: feat: forward id, streaming start, streaming end of content blocks
147
+ - 7cddb72: refactoring (provider): collapse provider defined tools into single definition
148
+ - ccce59b: feat (provider): support changing provider, model, supportedUrls in middleware
149
+ - e2b9e4b: feat (provider): add name for provider defined tools for future validation
150
+ - 0d06df6: chore (ai): remove v1 providers
151
+ - 7435eb5: feat: upgrade speech models to v2 specification
152
+ - 44f4aba: feat: upgrade transcription models to v2 specification
153
+ - 023ba40: feat (provider): support arbitrary media types in tool results
154
+ - 5e57fae: refactoring (provider): restructure tool result output
155
+
156
+ ### Patch Changes
157
+
158
+ - 472524a: spec (ai): add provider options to tools
159
+ - dd3ff01: chore: add language setting to speechv2
160
+ - cb68df0: feat: add transcription and speech model support to provider registry
161
+
162
+ ## 2.0.0-alpha.15
163
+
164
+ ### Patch Changes
165
+
166
+ - 48d257a: release alpha.15
167
+
168
+ ## 2.0.0-alpha.14
169
+
170
+ ### Major Changes
171
+
172
+ - 63f9e9b: chore (provider,ai): tools have input/output instead of args,result
173
+
174
+ ### Patch Changes
175
+
176
+ - b5da06a: update to LanguageModelV2ProviderDefinedClientTool to add server side tool later on
177
+ - 2e13791: feat(anthropic): add server-side web search support
178
+
179
+ ## 2.0.0-alpha.13
180
+
181
+ ### Patch Changes
182
+
183
+ - 68ecf2f: release alpha.13
184
+
185
+ ## 2.0.0-alpha.12
186
+
187
+ ### Patch Changes
188
+
189
+ - e2aceaf: feat: add raw chunk support
190
+
191
+ ## 2.0.0-alpha.11
192
+
193
+ ### Patch Changes
194
+
195
+ - c1e6647: release alpha.11
196
+
197
+ ## 2.0.0-alpha.10
198
+
199
+ ### Patch Changes
200
+
201
+ - c4df419: release alpha.10
202
+
203
+ ## 2.0.0-alpha.9
204
+
205
+ ### Patch Changes
206
+
207
+ - 811dff3: release alpha.9
208
+
209
+ ## 2.0.0-alpha.8
210
+
211
+ ### Patch Changes
212
+
213
+ - 9222aeb: release alpha.8
214
+
215
+ ## 2.0.0-alpha.7
216
+
217
+ ### Patch Changes
218
+
219
+ - 5c56081: release alpha.7
220
+
221
+ ## 2.0.0-alpha.6
222
+
223
+ ### Patch Changes
224
+
225
+ - 0d2c085: chore (provider): tweak provider definition
226
+
227
+ ## 2.0.0-alpha.4
228
+
229
+ ### Patch Changes
230
+
231
+ - dc714f3: release alpha.4
232
+
233
+ ## 2.0.0-alpha.3
234
+
235
+ ### Patch Changes
236
+
237
+ - 6b98118: release alpha.3
238
+
239
+ ## 2.0.0-alpha.2
240
+
241
+ ### Patch Changes
242
+
243
+ - 26535e0: release alpha.2
244
+
245
+ ## 2.0.0-alpha.1
246
+
247
+ ### Patch Changes
248
+
249
+ - 3f2f00c: feat: `ImageModelV2#maxImagesPerCall` can be set to a function that returns a `number` or `undefined`, optionally as a promise
250
+
251
+ pull request: https://github.com/vercel/ai/pull/6343
252
+
253
+ ## 2.0.0-canary.14
254
+
255
+ ### Major Changes
256
+
257
+ - 7979f7f: feat (provider): support reasoning tokens, cached input tokens, total token in usage information
258
+
259
+ ### Patch Changes
260
+
261
+ - a8c8bd5: feat(embed-many): respect supportsParallelCalls & concurrency
262
+
263
+ ## 2.0.0-canary.13
264
+
265
+ ### Patch Changes
266
+
267
+ - 9bd5ab5: feat (provider): add providerMetadata to ImageModelV2 interface (#5977)
268
+
269
+ The `experimental_generateImage` method from the `ai` package now returnes revised prompts for OpenAI's image models.
270
+
271
+ ```js
272
+ const prompt = 'Santa Claus driving a Cadillac';
273
+
274
+ const { providerMetadata } = await experimental_generateImage({
275
+ model: openai.image('dall-e-3'),
276
+ prompt,
277
+ });
278
+
279
+ const revisedPrompt = providerMetadata.openai.images[0]?.revisedPrompt;
280
+
281
+ console.log({
282
+ prompt,
283
+ revisedPrompt,
284
+ });
285
+ ```
286
+
287
+ ## 2.0.0-canary.12
288
+
289
+ ### Patch Changes
290
+
291
+ - 7b3ae3f: chore (provider): change getSupportedUrls to supportedUrls (language model v2)
292
+
293
+ ## 2.0.0-canary.11
294
+
295
+ ### Major Changes
296
+
297
+ - e030615: chore (provider): remove prompt type from language model v2 spec
298
+
299
+ ### Patch Changes
300
+
301
+ - 9bf7291: chore(providers/openai): enable structuredOutputs by default & switch to provider option
302
+ - 4617fab: chore(embedding-models): remove remaining settings
303
+
304
+ ## 2.0.0-canary.10
305
+
306
+ ### Major Changes
307
+
308
+ - a3f768e: chore: restructure reasoning support
309
+
310
+ ### Patch Changes
311
+
312
+ - 9301f86: refactor (image-model): rename `ImageModelV1` to `ImageModelV2`
313
+
3
314
  ## 2.0.0-canary.9
4
315
 
5
316
  ### Major Changes