ai 5.0.157 → 5.0.159

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,19 @@
1
1
  # ai
2
2
 
3
+ ## 5.0.159
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [5e55f43]
8
+ - @ai-sdk/gateway@2.0.63
9
+
10
+ ## 5.0.158
11
+
12
+ ### Patch Changes
13
+
14
+ - Updated dependencies [946ef88]
15
+ - @ai-sdk/gateway@2.0.62
16
+
3
17
  ## 5.0.157
4
18
 
5
19
  ### Patch Changes
@@ -573,15 +587,15 @@
573
587
  This change replaces
574
588
 
575
589
  ```ts
576
- import { experimental_createMCPClient } from 'ai';
577
- import { Experimental_StdioMCPTransport } from 'ai/mcp-stdio';
590
+ import { experimental_createMCPClient } from "ai";
591
+ import { Experimental_StdioMCPTransport } from "ai/mcp-stdio";
578
592
  ```
579
593
 
580
594
  with
581
595
 
582
596
  ```ts
583
- import { experimental_createMCPClient } from '@ai-sdk/mcp';
584
- import { Experimental_StdioMCPTransport } from '@ai-sdk/mcp/mcp-stdio';
597
+ import { experimental_createMCPClient } from "@ai-sdk/mcp";
598
+ import { Experimental_StdioMCPTransport } from "@ai-sdk/mcp/mcp-stdio";
585
599
  ```
586
600
 
587
601
  ## 5.0.78
@@ -1204,7 +1218,7 @@
1204
1218
 
1205
1219
  ```js
1206
1220
  await generateImage({
1207
- model: luma.image('photon-flash-1', {
1221
+ model: luma.image("photon-flash-1", {
1208
1222
  maxImagesPerCall: 5,
1209
1223
  pollIntervalMillis: 500,
1210
1224
  }),
@@ -1217,7 +1231,7 @@
1217
1231
 
1218
1232
  ```js
1219
1233
  await generateImage({
1220
- model: luma.image('photon-flash-1'),
1234
+ model: luma.image("photon-flash-1"),
1221
1235
  prompt,
1222
1236
  n: 10,
1223
1237
  maxImagesPerCall: 5,
@@ -1417,10 +1431,10 @@
1417
1431
  The `experimental_generateImage` method from the `ai` package now returnes revised prompts for OpenAI's image models.
1418
1432
 
1419
1433
  ```js
1420
- const prompt = 'Santa Claus driving a Cadillac';
1434
+ const prompt = "Santa Claus driving a Cadillac";
1421
1435
 
1422
1436
  const { providerMetadata } = await experimental_generateImage({
1423
- model: openai.image('dall-e-3'),
1437
+ model: openai.image("dall-e-3"),
1424
1438
  prompt,
1425
1439
  });
1426
1440
 
@@ -2242,7 +2256,7 @@
2242
2256
 
2243
2257
  ```js
2244
2258
  await generateImage({
2245
- model: luma.image('photon-flash-1', {
2259
+ model: luma.image("photon-flash-1", {
2246
2260
  maxImagesPerCall: 5,
2247
2261
  pollIntervalMillis: 500,
2248
2262
  }),
@@ -2255,7 +2269,7 @@
2255
2269
 
2256
2270
  ```js
2257
2271
  await generateImage({
2258
- model: luma.image('photon-flash-1'),
2272
+ model: luma.image("photon-flash-1"),
2259
2273
  prompt,
2260
2274
  n: 10,
2261
2275
  maxImagesPerCall: 5,
@@ -2356,10 +2370,10 @@
2356
2370
  The `experimental_generateImage` method from the `ai` package now returnes revised prompts for OpenAI's image models.
2357
2371
 
2358
2372
  ```js
2359
- const prompt = 'Santa Claus driving a Cadillac';
2373
+ const prompt = "Santa Claus driving a Cadillac";
2360
2374
 
2361
2375
  const { providerMetadata } = await experimental_generateImage({
2362
- model: openai.image('dall-e-3'),
2376
+ model: openai.image("dall-e-3"),
2363
2377
  prompt,
2364
2378
  });
2365
2379
 
package/dist/index.js CHANGED
@@ -779,7 +779,7 @@ var import_provider_utils2 = require("@ai-sdk/provider-utils");
779
779
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
780
780
 
781
781
  // src/version.ts
782
- var VERSION = true ? "5.0.157" : "0.0.0-test";
782
+ var VERSION = true ? "5.0.159" : "0.0.0-test";
783
783
 
784
784
  // src/util/download/download.ts
785
785
  var download = async ({