ai 5.0.157 → 5.0.158

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,12 @@
1
1
  # ai
2
2
 
3
+ ## 5.0.158
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [946ef88]
8
+ - @ai-sdk/gateway@2.0.62
9
+
3
10
  ## 5.0.157
4
11
 
5
12
  ### Patch Changes
@@ -573,15 +580,15 @@
573
580
  This change replaces
574
581
 
575
582
  ```ts
576
- import { experimental_createMCPClient } from 'ai';
577
- import { Experimental_StdioMCPTransport } from 'ai/mcp-stdio';
583
+ import { experimental_createMCPClient } from "ai";
584
+ import { Experimental_StdioMCPTransport } from "ai/mcp-stdio";
578
585
  ```
579
586
 
580
587
  with
581
588
 
582
589
  ```ts
583
- import { experimental_createMCPClient } from '@ai-sdk/mcp';
584
- import { Experimental_StdioMCPTransport } from '@ai-sdk/mcp/mcp-stdio';
590
+ import { experimental_createMCPClient } from "@ai-sdk/mcp";
591
+ import { Experimental_StdioMCPTransport } from "@ai-sdk/mcp/mcp-stdio";
585
592
  ```
586
593
 
587
594
  ## 5.0.78
@@ -1204,7 +1211,7 @@
1204
1211
 
1205
1212
  ```js
1206
1213
  await generateImage({
1207
- model: luma.image('photon-flash-1', {
1214
+ model: luma.image("photon-flash-1", {
1208
1215
  maxImagesPerCall: 5,
1209
1216
  pollIntervalMillis: 500,
1210
1217
  }),
@@ -1217,7 +1224,7 @@
1217
1224
 
1218
1225
  ```js
1219
1226
  await generateImage({
1220
- model: luma.image('photon-flash-1'),
1227
+ model: luma.image("photon-flash-1"),
1221
1228
  prompt,
1222
1229
  n: 10,
1223
1230
  maxImagesPerCall: 5,
@@ -1417,10 +1424,10 @@
1417
1424
  The `experimental_generateImage` method from the `ai` package now returnes revised prompts for OpenAI's image models.
1418
1425
 
1419
1426
  ```js
1420
- const prompt = 'Santa Claus driving a Cadillac';
1427
+ const prompt = "Santa Claus driving a Cadillac";
1421
1428
 
1422
1429
  const { providerMetadata } = await experimental_generateImage({
1423
- model: openai.image('dall-e-3'),
1430
+ model: openai.image("dall-e-3"),
1424
1431
  prompt,
1425
1432
  });
1426
1433
 
@@ -2242,7 +2249,7 @@
2242
2249
 
2243
2250
  ```js
2244
2251
  await generateImage({
2245
- model: luma.image('photon-flash-1', {
2252
+ model: luma.image("photon-flash-1", {
2246
2253
  maxImagesPerCall: 5,
2247
2254
  pollIntervalMillis: 500,
2248
2255
  }),
@@ -2255,7 +2262,7 @@
2255
2262
 
2256
2263
  ```js
2257
2264
  await generateImage({
2258
- model: luma.image('photon-flash-1'),
2265
+ model: luma.image("photon-flash-1"),
2259
2266
  prompt,
2260
2267
  n: 10,
2261
2268
  maxImagesPerCall: 5,
@@ -2356,10 +2363,10 @@
2356
2363
  The `experimental_generateImage` method from the `ai` package now returnes revised prompts for OpenAI's image models.
2357
2364
 
2358
2365
  ```js
2359
- const prompt = 'Santa Claus driving a Cadillac';
2366
+ const prompt = "Santa Claus driving a Cadillac";
2360
2367
 
2361
2368
  const { providerMetadata } = await experimental_generateImage({
2362
- model: openai.image('dall-e-3'),
2369
+ model: openai.image("dall-e-3"),
2363
2370
  prompt,
2364
2371
  });
2365
2372
 
package/dist/index.js CHANGED
@@ -779,7 +779,7 @@ var import_provider_utils2 = require("@ai-sdk/provider-utils");
779
779
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
780
780
 
781
781
  // src/version.ts
782
- var VERSION = true ? "5.0.157" : "0.0.0-test";
782
+ var VERSION = true ? "5.0.158" : "0.0.0-test";
783
783
 
784
784
  // src/util/download/download.ts
785
785
  var download = async ({