@ai-sdk/provider 1.1.0 → 2.0.0-canary.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -1242,6 +1242,857 @@ represents no support for object generation.
1242
1242
  */
1243
1243
  type LanguageModelV1ObjectGenerationMode = 'json' | 'tool' | undefined;
1244
1244
 
1245
+ /**
1246
+ A tool has a name, a description, and a set of parameters.
1247
+
1248
+ Note: this is **not** the user-facing tool definition. The AI SDK methods will
1249
+ map the user-facing tool definitions to this format.
1250
+ */
1251
+ type LanguageModelV2FunctionTool = {
1252
+ /**
1253
+ The type of the tool (always 'function').
1254
+ */
1255
+ type: 'function';
1256
+ /**
1257
+ The name of the tool. Unique within this model call.
1258
+ */
1259
+ name: string;
1260
+ /**
1261
+ A description of the tool. The language model uses this to understand the
1262
+ tool's purpose and to provide better completion suggestions.
1263
+ */
1264
+ description?: string;
1265
+ /**
1266
+ The parameters that the tool expects. The language model uses this to
1267
+ understand the tool's input requirements and to provide matching suggestions.
1268
+ */
1269
+ parameters: JSONSchema7;
1270
+ };
1271
+
1272
+ /**
1273
+ * Additional provider-specific options.
1274
+ * Options are additional input to the provider.
1275
+ * They are passed through to the provider from the AI SDK
1276
+ * and enable provider-specific functionality
1277
+ * that can be fully encapsulated in the provider.
1278
+ *
1279
+ * This enables us to quickly ship provider-specific functionality
1280
+ * without affecting the core AI SDK.
1281
+ *
1282
+ * The outer record is keyed by the provider name, and the inner
1283
+ * record is keyed by the provider-specific metadata key.
1284
+ *
1285
+ * ```ts
1286
+ * {
1287
+ * "anthropic": {
1288
+ * "cacheControl": { "type": "ephemeral" }
1289
+ * }
1290
+ * }
1291
+ * ```
1292
+ */
1293
+ type LanguageModelV2ProviderOptions = Record<string, Record<string, JSONValue>>;
1294
+
1295
+ /**
1296
+ A prompt is a list of messages.
1297
+
1298
+ Note: Not all models and prompt formats support multi-modal inputs and
1299
+ tool calls. The validation happens at runtime.
1300
+
1301
+ Note: This is not a user-facing prompt. The AI SDK methods will map the
1302
+ user-facing prompt types such as chat or instruction prompts to this format.
1303
+ */
1304
+ type LanguageModelV2Prompt = Array<LanguageModelV2Message>;
1305
+ type LanguageModelV2Message = ({
1306
+ role: 'system';
1307
+ content: string;
1308
+ } | {
1309
+ role: 'user';
1310
+ content: Array<LanguageModelV2TextPart | LanguageModelV2ImagePart | LanguageModelV2FilePart>;
1311
+ } | {
1312
+ role: 'assistant';
1313
+ content: Array<LanguageModelV2TextPart | LanguageModelV2FilePart | LanguageModelV2ReasoningPart | LanguageModelV2RedactedReasoningPart | LanguageModelV2ToolCallPart>;
1314
+ } | {
1315
+ role: 'tool';
1316
+ content: Array<LanguageModelV2ToolResultPart>;
1317
+ }) & {
1318
+ /**
1319
+ * Additional provider-specific options. They are passed through
1320
+ * to the provider from the AI SDK and enable provider-specific
1321
+ * functionality that can be fully encapsulated in the provider.
1322
+ */
1323
+ providerOptions?: LanguageModelV2ProviderOptions;
1324
+ };
1325
+ /**
1326
+ Text content part of a prompt. It contains a string of text.
1327
+ */
1328
+ interface LanguageModelV2TextPart {
1329
+ type: 'text';
1330
+ /**
1331
+ The text content.
1332
+ */
1333
+ text: string;
1334
+ /**
1335
+ * Additional provider-specific options. They are passed through
1336
+ * to the provider from the AI SDK and enable provider-specific
1337
+ * functionality that can be fully encapsulated in the provider.
1338
+ */
1339
+ providerOptions?: LanguageModelV2ProviderOptions;
1340
+ }
1341
+ /**
1342
+ Reasoning content part of a prompt. It contains a string of reasoning text.
1343
+ */
1344
+ interface LanguageModelV2ReasoningPart {
1345
+ type: 'reasoning';
1346
+ /**
1347
+ The reasoning text.
1348
+ */
1349
+ text: string;
1350
+ /**
1351
+ An optional signature for verifying that the reasoning originated from the model.
1352
+ */
1353
+ signature?: string;
1354
+ /**
1355
+ * Additional provider-specific options. They are passed through
1356
+ * to the provider from the AI SDK and enable provider-specific
1357
+ * functionality that can be fully encapsulated in the provider.
1358
+ */
1359
+ providerOptions?: LanguageModelV2ProviderOptions;
1360
+ }
1361
+ /**
1362
+ Redacted reasoning content part of a prompt.
1363
+ */
1364
+ interface LanguageModelV2RedactedReasoningPart {
1365
+ type: 'redacted-reasoning';
1366
+ /**
1367
+ Redacted reasoning data.
1368
+ */
1369
+ data: string;
1370
+ /**
1371
+ * Additional provider-specific options. They are passed through
1372
+ * to the provider from the AI SDK and enable provider-specific
1373
+ * functionality that can be fully encapsulated in the provider.
1374
+ */
1375
+ providerOptions?: LanguageModelV2ProviderOptions;
1376
+ }
1377
+ /**
1378
+ Image content part of a prompt. It contains an image.
1379
+ */
1380
+ interface LanguageModelV2ImagePart {
1381
+ type: 'image';
1382
+ /**
1383
+ Image data as a Uint8Array (e.g. from a Blob or Buffer) or a URL.
1384
+ */
1385
+ image: Uint8Array | URL;
1386
+ /**
1387
+ Optional mime type of the image.
1388
+ */
1389
+ mimeType?: string;
1390
+ /**
1391
+ * Additional provider-specific options. They are passed through
1392
+ * to the provider from the AI SDK and enable provider-specific
1393
+ * functionality that can be fully encapsulated in the provider.
1394
+ */
1395
+ providerOptions?: LanguageModelV2ProviderOptions;
1396
+ }
1397
+ /**
1398
+ File content part of a prompt. It contains a file.
1399
+ */
1400
+ interface LanguageModelV2FilePart {
1401
+ type: 'file';
1402
+ /**
1403
+ * Optional filename of the file.
1404
+ */
1405
+ filename?: string;
1406
+ /**
1407
+ File data as base64 encoded string or as a URL.
1408
+ */
1409
+ data: string | URL;
1410
+ /**
1411
+ Mime type of the file.
1412
+ */
1413
+ mimeType: string;
1414
+ /**
1415
+ * Additional provider-specific options. They are passed through
1416
+ * to the provider from the AI SDK and enable provider-specific
1417
+ * functionality that can be fully encapsulated in the provider.
1418
+ */
1419
+ providerOptions?: LanguageModelV2ProviderOptions;
1420
+ }
1421
+ /**
1422
+ Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
1423
+ */
1424
+ interface LanguageModelV2ToolCallPart {
1425
+ type: 'tool-call';
1426
+ /**
1427
+ ID of the tool call. This ID is used to match the tool call with the tool result.
1428
+ */
1429
+ toolCallId: string;
1430
+ /**
1431
+ Name of the tool that is being called.
1432
+ */
1433
+ toolName: string;
1434
+ /**
1435
+ Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
1436
+ */
1437
+ args: unknown;
1438
+ /**
1439
+ * Additional provider-specific options. They are passed through
1440
+ * to the provider from the AI SDK and enable provider-specific
1441
+ * functionality that can be fully encapsulated in the provider.
1442
+ */
1443
+ providerOptions?: LanguageModelV2ProviderOptions;
1444
+ }
1445
+ /**
1446
+ Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
1447
+ */
1448
+ interface LanguageModelV2ToolResultPart {
1449
+ type: 'tool-result';
1450
+ /**
1451
+ ID of the tool call that this result is associated with.
1452
+ */
1453
+ toolCallId: string;
1454
+ /**
1455
+ Name of the tool that generated this result.
1456
+ */
1457
+ toolName: string;
1458
+ /**
1459
+ Result of the tool call. This is a JSON-serializable object.
1460
+ */
1461
+ result: unknown;
1462
+ /**
1463
+ Optional flag if the result is an error or an error message.
1464
+ */
1465
+ isError?: boolean;
1466
+ /**
1467
+ Tool results as an array of parts. This enables advanced tool results including images.
1468
+ When this is used, the `result` field should be ignored (if the provider supports content).
1469
+ */
1470
+ content?: Array<{
1471
+ type: 'text';
1472
+ /**
1473
+ Text content.
1474
+ */
1475
+ text: string;
1476
+ } | {
1477
+ type: 'image';
1478
+ /**
1479
+ base-64 encoded image data
1480
+ */
1481
+ data: string;
1482
+ /**
1483
+ Mime type of the image.
1484
+ */
1485
+ mimeType?: string;
1486
+ }>;
1487
+ /**
1488
+ * Additional provider-specific options. They are passed through
1489
+ * to the provider from the AI SDK and enable provider-specific
1490
+ * functionality that can be fully encapsulated in the provider.
1491
+ */
1492
+ providerOptions?: LanguageModelV2ProviderOptions;
1493
+ }
1494
+
1495
+ /**
1496
+ The configuration of a tool that is defined by the provider.
1497
+ */
1498
+ type LanguageModelV2ProviderDefinedTool = {
1499
+ /**
1500
+ The type of the tool (always 'provider-defined').
1501
+ */
1502
+ type: 'provider-defined';
1503
+ /**
1504
+ The ID of the tool. Should follow the format `<provider-name>.<tool-name>`.
1505
+ */
1506
+ id: `${string}.${string}`;
1507
+ /**
1508
+ The name of the tool. Unique within this model call.
1509
+ */
1510
+ name: string;
1511
+ /**
1512
+ The arguments for configuring the tool. Must match the expected arguments defined by the provider for this tool.
1513
+ */
1514
+ args: Record<string, unknown>;
1515
+ };
1516
+
1517
+ type LanguageModelV2ToolChoice = {
1518
+ type: 'auto';
1519
+ } | {
1520
+ type: 'none';
1521
+ } | {
1522
+ type: 'required';
1523
+ } | {
1524
+ type: 'tool';
1525
+ toolName: string;
1526
+ };
1527
+
1528
+ type LanguageModelV2CallOptions = {
1529
+ /**
1530
+ Whether the user provided the input as messages or as
1531
+ a prompt. This can help guide non-chat models in the
1532
+ expansion, bc different expansions can be needed for
1533
+ chat/non-chat use cases.
1534
+ */
1535
+ inputFormat: 'messages' | 'prompt';
1536
+ /**
1537
+ A language mode prompt is a standardized prompt type.
1538
+
1539
+ Note: This is **not** the user-facing prompt. The AI SDK methods will map the
1540
+ user-facing prompt types such as chat or instruction prompts to this format.
1541
+ That approach allows us to evolve the user facing prompts without breaking
1542
+ the language model interface.
1543
+ */
1544
+ prompt: LanguageModelV2Prompt;
1545
+ /**
1546
+ Maximum number of tokens to generate.
1547
+ */
1548
+ maxTokens?: number;
1549
+ /**
1550
+ Temperature setting.
1551
+
1552
+ It is recommended to set either `temperature` or `topP`, but not both.
1553
+ */
1554
+ temperature?: number;
1555
+ /**
1556
+ Stop sequences.
1557
+ If set, the model will stop generating text when one of the stop sequences is generated.
1558
+ Providers may have limits on the number of stop sequences.
1559
+ */
1560
+ stopSequences?: string[];
1561
+ /**
1562
+ Nucleus sampling.
1563
+
1564
+ It is recommended to set either `temperature` or `topP`, but not both.
1565
+ */
1566
+ topP?: number;
1567
+ /**
1568
+ Only sample from the top K options for each subsequent token.
1569
+
1570
+ Used to remove "long tail" low probability responses.
1571
+ Recommended for advanced use cases only. You usually only need to use temperature.
1572
+ */
1573
+ topK?: number;
1574
+ /**
1575
+ Presence penalty setting. It affects the likelihood of the model to
1576
+ repeat information that is already in the prompt.
1577
+ */
1578
+ presencePenalty?: number;
1579
+ /**
1580
+ Frequency penalty setting. It affects the likelihood of the model
1581
+ to repeatedly use the same words or phrases.
1582
+ */
1583
+ frequencyPenalty?: number;
1584
+ /**
1585
+ Response format. The output can either be text or JSON. Default is text.
1586
+
1587
+ If JSON is selected, a schema can optionally be provided to guide the LLM.
1588
+ */
1589
+ responseFormat?: {
1590
+ type: 'text';
1591
+ } | {
1592
+ type: 'json';
1593
+ /**
1594
+ * JSON schema that the generated output should conform to.
1595
+ */
1596
+ schema?: JSONSchema7;
1597
+ /**
1598
+ * Name of output that should be generated. Used by some providers for additional LLM guidance.
1599
+ */
1600
+ name?: string;
1601
+ /**
1602
+ * Description of the output that should be generated. Used by some providers for additional LLM guidance.
1603
+ */
1604
+ description?: string;
1605
+ };
1606
+ /**
1607
+ The seed (integer) to use for random sampling. If set and supported
1608
+ by the model, calls will generate deterministic results.
1609
+ */
1610
+ seed?: number;
1611
+ /**
1612
+ The tools that are available for the model.
1613
+ */
1614
+ tools?: Array<LanguageModelV2FunctionTool | LanguageModelV2ProviderDefinedTool>;
1615
+ /**
1616
+ Specifies how the tool should be selected. Defaults to 'auto'.
1617
+ */
1618
+ toolChoice?: LanguageModelV2ToolChoice;
1619
+ /**
1620
+ Abort signal for cancelling the operation.
1621
+ */
1622
+ abortSignal?: AbortSignal;
1623
+ /**
1624
+ Additional HTTP headers to be sent with the request.
1625
+ Only applicable for HTTP-based providers.
1626
+ */
1627
+ headers?: Record<string, string | undefined>;
1628
+ /**
1629
+ * Additional provider-specific options. They are passed through
1630
+ * to the provider from the AI SDK and enable provider-specific
1631
+ * functionality that can be fully encapsulated in the provider.
1632
+ */
1633
+ providerOptions?: LanguageModelV2ProviderOptions;
1634
+ };
1635
+
1636
+ /**
1637
+ Warning from the model provider for this call. The call will proceed, but e.g.
1638
+ some settings might not be supported, which can lead to suboptimal results.
1639
+ */
1640
+ type LanguageModelV2CallWarning = {
1641
+ type: 'unsupported-setting';
1642
+ setting: Omit<keyof LanguageModelV2CallOptions, 'prompt'>;
1643
+ details?: string;
1644
+ } | {
1645
+ type: 'unsupported-tool';
1646
+ tool: LanguageModelV2FunctionTool | LanguageModelV2ProviderDefinedTool;
1647
+ details?: string;
1648
+ } | {
1649
+ type: 'other';
1650
+ message: string;
1651
+ };
1652
+
1653
+ /**
1654
+ Reason why a language model finished generating a response.
1655
+
1656
+ Can be one of the following:
1657
+ - `stop`: model generated stop sequence
1658
+ - `length`: model generated maximum number of tokens
1659
+ - `content-filter`: content filter violation stopped the model
1660
+ - `tool-calls`: model triggered tool calls
1661
+ - `error`: model stopped because of an error
1662
+ - `other`: model stopped for other reasons
1663
+ - `unknown`: the model has not transmitted a finish reason
1664
+ */
1665
+ type LanguageModelV2FinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other' | 'unknown';
1666
+
1667
+ type LanguageModelV2FunctionToolCall = {
1668
+ toolCallType: 'function';
1669
+ toolCallId: string;
1670
+ toolName: string;
1671
+ /**
1672
+ Stringified JSON object with the tool call arguments. Must match the
1673
+ parameters schema of the tool.
1674
+ */
1675
+ args: string;
1676
+ };
1677
+
1678
+ /**
1679
+ Log probabilities for each token and its top log probabilities.
1680
+ */
1681
+ type LanguageModelV2LogProbs = Array<{
1682
+ token: string;
1683
+ logprob: number;
1684
+ topLogprobs: Array<{
1685
+ token: string;
1686
+ logprob: number;
1687
+ }>;
1688
+ }>;
1689
+
1690
+ /**
1691
+ * Additional provider-specific metadata.
1692
+ * Metadata are additional outputs from the provider.
1693
+ * They are passed through to the provider from the AI SDK
1694
+ * and enable provider-specific functionality
1695
+ * that can be fully encapsulated in the provider.
1696
+ *
1697
+ * This enables us to quickly ship provider-specific functionality
1698
+ * without affecting the core AI SDK.
1699
+ *
1700
+ * The outer record is keyed by the provider name, and the inner
1701
+ * record is keyed by the provider-specific metadata key.
1702
+ *
1703
+ * ```ts
1704
+ * {
1705
+ * "anthropic": {
1706
+ * "cacheControl": { "type": "ephemeral" }
1707
+ * }
1708
+ * }
1709
+ * ```
1710
+ */
1711
+ type LanguageModelV2ProviderMetadata = Record<string, Record<string, JSONValue>>;
1712
+
1713
+ /**
1714
+ * A source that has been used as input to generate the response.
1715
+ */
1716
+ type LanguageModelV2Source = {
1717
+ /**
1718
+ * A URL source. This is return by web search RAG models.
1719
+ */
1720
+ sourceType: 'url';
1721
+ /**
1722
+ * The ID of the source.
1723
+ */
1724
+ id: string;
1725
+ /**
1726
+ * The URL of the source.
1727
+ */
1728
+ url: string;
1729
+ /**
1730
+ * The title of the source.
1731
+ */
1732
+ title?: string;
1733
+ /**
1734
+ * Additional provider metadata for the source.
1735
+ */
1736
+ providerMetadata?: LanguageModelV2ProviderMetadata;
1737
+ };
1738
+
1739
+ /**
1740
+ Specification for a language model that implements the language model interface version 2.
1741
+ */
1742
+ type LanguageModelV2 = {
1743
+ /**
1744
+ The language model must specify which language model interface
1745
+ version it implements. This will allow us to evolve the language
1746
+ model interface and retain backwards compatibility. The different
1747
+ implementation versions can be handled as a discriminated union
1748
+ on our side.
1749
+ */
1750
+ readonly specificationVersion: 'v2';
1751
+ /**
1752
+ Name of the provider for logging purposes.
1753
+ */
1754
+ readonly provider: string;
1755
+ /**
1756
+ Provider-specific model ID for logging purposes.
1757
+ */
1758
+ readonly modelId: string;
1759
+ /**
1760
+ Default object generation mode that should be used with this model when
1761
+ no mode is specified. Should be the mode with the best results for this
1762
+ model. `undefined` can be returned if object generation is not supported.
1763
+
1764
+ This is needed to generate the best objects possible w/o requiring the
1765
+ user to explicitly specify the object generation mode.
1766
+ */
1767
+ readonly defaultObjectGenerationMode: LanguageModelV2ObjectGenerationMode;
1768
+ /**
1769
+ Flag whether this model supports image URLs. Default is `true`.
1770
+
1771
+ When the flag is set to `false`, the AI SDK will download the image and
1772
+ pass the image data to the model.
1773
+ */
1774
+ readonly supportsImageUrls?: boolean;
1775
+ /**
1776
+ Flag whether this model supports grammar-guided generation,
1777
+ i.e. follows JSON schemas for object generation
1778
+ when the response format is set to 'json' or
1779
+ when the `object-json` mode is used.
1780
+
1781
+ This means that the model guarantees that the generated JSON
1782
+ will be a valid JSON object AND that the object will match the
1783
+ JSON schema.
1784
+
1785
+ Please note that `generateObject` and `streamObject` will work
1786
+ regardless of this flag, but might send different prompts and
1787
+ use further optimizations if this flag is set to `true`.
1788
+
1789
+ Defaults to `false`.
1790
+ */
1791
+ readonly supportsStructuredOutputs?: boolean;
1792
+ /**
1793
+ Checks if the model supports the given URL for file parts natively.
1794
+ If the model does not support the URL,
1795
+ the AI SDK will download the file and pass the file data to the model.
1796
+
1797
+ When undefined, the AI SDK will download the file.
1798
+ */
1799
+ supportsUrl?(url: URL): boolean;
1800
+ /**
1801
+ Generates a language model output (non-streaming).
1802
+
1803
+ Naming: "do" prefix to prevent accidental direct usage of the method
1804
+ by the user.
1805
+ */
1806
+ doGenerate(options: LanguageModelV2CallOptions): PromiseLike<{
1807
+ /**
1808
+ Text that the model has generated.
1809
+ Can be undefined if the model did not generate any text.
1810
+ */
1811
+ text?: string;
1812
+ /**
1813
+ Reasoning that the model has generated.
1814
+ Can be undefined if the model does not support reasoning.
1815
+ */
1816
+ reasoning?: string | Array<{
1817
+ type: 'text';
1818
+ text: string;
1819
+ /**
1820
+ An optional signature for verifying that the reasoning originated from the model.
1821
+ */
1822
+ signature?: string;
1823
+ } | {
1824
+ type: 'redacted';
1825
+ data: string;
1826
+ }>;
1827
+ /**
1828
+ Generated files as base64 encoded strings or binary data.
1829
+ The files should be returned without any unnecessary conversion.
1830
+ If the API returns base64 encoded strings, the files should be returned
1831
+ as base64 encoded strings. If the API returns binary data, the files should
1832
+ be returned as binary data.
1833
+ */
1834
+ files?: Array<{
1835
+ data: string | Uint8Array;
1836
+ mimeType: string;
1837
+ }>;
1838
+ /**
1839
+ Tool calls that the model has generated.
1840
+ Can be undefined if the model did not generate any tool calls.
1841
+ */
1842
+ toolCalls?: Array<LanguageModelV2FunctionToolCall>;
1843
+ /**
1844
+ Finish reason.
1845
+ */
1846
+ finishReason: LanguageModelV2FinishReason;
1847
+ /**
1848
+ Usage information.
1849
+ */
1850
+ usage: {
1851
+ promptTokens: number;
1852
+ completionTokens: number;
1853
+ };
1854
+ /**
1855
+ Raw prompt and setting information for observability provider integration.
1856
+ */
1857
+ rawCall: {
1858
+ /**
1859
+ Raw prompt after expansion and conversion to the format that the
1860
+ provider uses to send the information to their API.
1861
+ */
1862
+ rawPrompt: unknown;
1863
+ /**
1864
+ Raw settings that are used for the API call. Includes provider-specific
1865
+ settings.
1866
+ */
1867
+ rawSettings: Record<string, unknown>;
1868
+ };
1869
+ /**
1870
+ Optional response information for telemetry and debugging purposes.
1871
+ */
1872
+ rawResponse?: {
1873
+ /**
1874
+ Response headers.
1875
+ */
1876
+ headers?: Record<string, string>;
1877
+ /**
1878
+ Response body.
1879
+ */
1880
+ body?: unknown;
1881
+ };
1882
+ /**
1883
+ Optional request information for telemetry and debugging purposes.
1884
+ */
1885
+ request?: {
1886
+ /**
1887
+ Raw request HTTP body that was sent to the provider API as a string (JSON should be stringified).
1888
+ Non-HTTP(s) providers should not set this.
1889
+ */
1890
+ body?: string;
1891
+ };
1892
+ /**
1893
+ Optional response information for telemetry and debugging purposes.
1894
+ */
1895
+ response?: {
1896
+ /**
1897
+ ID for the generated response, if the provider sends one.
1898
+ */
1899
+ id?: string;
1900
+ /**
1901
+ Timestamp for the start of the generated response, if the provider sends one.
1902
+ */
1903
+ timestamp?: Date;
1904
+ /**
1905
+ The ID of the response model that was used to generate the response, if the provider sends one.
1906
+ */
1907
+ modelId?: string;
1908
+ };
1909
+ warnings?: LanguageModelV2CallWarning[];
1910
+ /**
1911
+ Additional provider-specific metadata. They are passed through
1912
+ from the provider to the AI SDK and enable provider-specific
1913
+ results that can be fully encapsulated in the provider.
1914
+ */
1915
+ providerMetadata?: LanguageModelV2ProviderMetadata;
1916
+ /**
1917
+ Sources that have been used as input to generate the response.
1918
+ */
1919
+ sources?: LanguageModelV2Source[];
1920
+ /**
1921
+ Logprobs for the completion.
1922
+ `undefined` if the mode does not support logprobs or if was not enabled
1923
+
1924
+ @deprecated will be changed into a provider-specific extension in v2
1925
+ */
1926
+ logprobs?: LanguageModelV2LogProbs;
1927
+ }>;
1928
+ /**
1929
+ Generates a language model output (streaming).
1930
+
1931
+ Naming: "do" prefix to prevent accidental direct usage of the method
1932
+ by the user.
1933
+ *
1934
+ @return A stream of higher-level language model output parts.
1935
+ */
1936
+ doStream(options: LanguageModelV2CallOptions): PromiseLike<{
1937
+ stream: ReadableStream<LanguageModelV2StreamPart>;
1938
+ /**
1939
+ Raw prompt and setting information for observability provider integration.
1940
+ */
1941
+ rawCall: {
1942
+ /**
1943
+ Raw prompt after expansion and conversion to the format that the
1944
+ provider uses to send the information to their API.
1945
+ */
1946
+ rawPrompt: unknown;
1947
+ /**
1948
+ Raw settings that are used for the API call. Includes provider-specific
1949
+ settings.
1950
+ */
1951
+ rawSettings: Record<string, unknown>;
1952
+ };
1953
+ /**
1954
+ Optional raw response data.
1955
+ */
1956
+ rawResponse?: {
1957
+ /**
1958
+ Response headers.
1959
+ */
1960
+ headers?: Record<string, string>;
1961
+ };
1962
+ /**
1963
+ Optional request information for telemetry and debugging purposes.
1964
+ */
1965
+ request?: {
1966
+ /**
1967
+ Raw request HTTP body that was sent to the provider API as a string (JSON should be stringified).
1968
+ Non-HTTP(s) providers should not set this.
1969
+ */
1970
+ body?: string;
1971
+ };
1972
+ /**
1973
+ Warnings for the call, e.g. unsupported settings.
1974
+ */
1975
+ warnings?: Array<LanguageModelV2CallWarning>;
1976
+ }>;
1977
+ };
1978
+ type LanguageModelV2StreamPart = {
1979
+ type: 'text-delta';
1980
+ textDelta: string;
1981
+ } | {
1982
+ type: 'reasoning';
1983
+ textDelta: string;
1984
+ } | {
1985
+ type: 'reasoning-signature';
1986
+ signature: string;
1987
+ } | {
1988
+ type: 'redacted-reasoning';
1989
+ data: string;
1990
+ } | {
1991
+ type: 'source';
1992
+ source: LanguageModelV2Source;
1993
+ } | {
1994
+ type: 'file';
1995
+ mimeType: string;
1996
+ /**
1997
+ Generated file data as base64 encoded strings or binary data.
1998
+ The file data should be returned without any unnecessary conversion.
1999
+ If the API returns base64 encoded strings, the file data should be returned
2000
+ as base64 encoded strings. If the API returns binary data, the file data should
2001
+ be returned as binary data.
2002
+ */
2003
+ data: string | Uint8Array;
2004
+ } | ({
2005
+ type: 'tool-call';
2006
+ } & LanguageModelV2FunctionToolCall) | {
2007
+ type: 'tool-call-delta';
2008
+ toolCallType: 'function';
2009
+ toolCallId: string;
2010
+ toolName: string;
2011
+ argsTextDelta: string;
2012
+ } | {
2013
+ type: 'response-metadata';
2014
+ id?: string;
2015
+ timestamp?: Date;
2016
+ modelId?: string;
2017
+ } | {
2018
+ type: 'finish';
2019
+ finishReason: LanguageModelV2FinishReason;
2020
+ providerMetadata?: LanguageModelV2ProviderMetadata;
2021
+ usage: {
2022
+ promptTokens: number;
2023
+ completionTokens: number;
2024
+ };
2025
+ logprobs?: LanguageModelV2LogProbs;
2026
+ } | {
2027
+ type: 'error';
2028
+ error: unknown;
2029
+ };
2030
+ /**
2031
+ The object generation modes available for use with a model. `undefined`
2032
+ represents no support for object generation.
2033
+ */
2034
+ type LanguageModelV2ObjectGenerationMode = 'json' | 'tool' | undefined;
2035
+
2036
+ /**
2037
+ * Experimental middleware for LanguageModelV2.
2038
+ * This type defines the structure for middleware that can be used to modify
2039
+ * the behavior of LanguageModelV2 operations.
2040
+ */
2041
+ type LanguageModelV2Middleware = {
2042
+ /**
2043
+ * Middleware specification version. Use `v2` for the current version.
2044
+ */
2045
+ middlewareVersion?: 'v2' | undefined;
2046
+ /**
2047
+ * Transforms the parameters before they are passed to the language model.
2048
+ * @param options - Object containing the type of operation and the parameters.
2049
+ * @param options.type - The type of operation ('generate' or 'stream').
2050
+ * @param options.params - The original parameters for the language model call.
2051
+ * @returns A promise that resolves to the transformed parameters.
2052
+ */
2053
+ transformParams?: (options: {
2054
+ type: 'generate' | 'stream';
2055
+ params: LanguageModelV2CallOptions;
2056
+ }) => PromiseLike<LanguageModelV2CallOptions>;
2057
+ /**
2058
+ * Wraps the generate operation of the language model.
2059
+ * @param options - Object containing the generate function, parameters, and model.
2060
+ * @param options.doGenerate - The original generate function.
2061
+ * @param options.doStream - The original stream function.
2062
+ * @param options.params - The parameters for the generate call. If the
2063
+ * `transformParams` middleware is used, this will be the transformed parameters.
2064
+ * @param options.model - The language model instance.
2065
+ * @returns A promise that resolves to the result of the generate operation.
2066
+ */
2067
+ wrapGenerate?: (options: {
2068
+ doGenerate: () => ReturnType<LanguageModelV2['doGenerate']>;
2069
+ doStream: () => ReturnType<LanguageModelV2['doStream']>;
2070
+ params: LanguageModelV2CallOptions;
2071
+ model: LanguageModelV2;
2072
+ }) => Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
2073
+ /**
2074
+ * Wraps the stream operation of the language model.
2075
+ *
2076
+ * @param options - Object containing the stream function, parameters, and model.
2077
+ * @param options.doGenerate - The original generate function.
2078
+ * @param options.doStream - The original stream function.
2079
+ * @param options.params - The parameters for the stream call. If the
2080
+ * `transformParams` middleware is used, this will be the transformed parameters.
2081
+ * @param options.model - The language model instance.
2082
+ * @returns A promise that resolves to the result of the stream operation.
2083
+ */
2084
+ wrapStream?: (options: {
2085
+ doGenerate: () => ReturnType<LanguageModelV2['doGenerate']>;
2086
+ doStream: () => ReturnType<LanguageModelV2['doStream']>;
2087
+ params: LanguageModelV2CallOptions;
2088
+ model: LanguageModelV2;
2089
+ }) => PromiseLike<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
2090
+ };
2091
+ /**
2092
+ * @deprecated Use `LanguageModelV2Middleware` instead.
2093
+ */
2094
+ type Experimental_LanguageModelV2Middleware = LanguageModelV2Middleware;
2095
+
1245
2096
  /**
1246
2097
  * Provider for language, text embedding, and image generation models.
1247
2098
  */
@@ -1279,4 +2130,41 @@ interface ProviderV1 {
1279
2130
  readonly imageModel?: (modelId: string) => ImageModelV1;
1280
2131
  }
1281
2132
 
1282
- export { AISDKError, APICallError, type EmbeddingModelV1, type EmbeddingModelV1Embedding, EmptyResponseBodyError, type ImageModelV1, type ImageModelV1CallOptions, type ImageModelV1CallWarning, InvalidArgumentError, InvalidPromptError, InvalidResponseDataError, type JSONArray, type JSONObject, JSONParseError, type JSONValue, type LanguageModelV1, type LanguageModelV1CallOptions, type LanguageModelV1CallWarning, type LanguageModelV1FilePart, type LanguageModelV1FinishReason, type LanguageModelV1FunctionTool, type LanguageModelV1FunctionToolCall, type LanguageModelV1ImagePart, type LanguageModelV1LogProbs, type LanguageModelV1Message, type LanguageModelV1ObjectGenerationMode, type LanguageModelV1Prompt, type LanguageModelV1ProviderDefinedTool, type LanguageModelV1ProviderMetadata, type LanguageModelV1ReasoningPart, type LanguageModelV1RedactedReasoningPart, type LanguageModelV1Source, type LanguageModelV1StreamPart, type LanguageModelV1TextPart, type LanguageModelV1ToolCallPart, type LanguageModelV1ToolChoice, type LanguageModelV1ToolResultPart, LoadAPIKeyError, LoadSettingError, NoContentGeneratedError, NoSuchModelError, type ProviderV1, TooManyEmbeddingValuesForCallError, TypeValidationError, UnsupportedFunctionalityError, getErrorMessage, isJSONArray, isJSONObject, isJSONValue };
2133
+ /**
2134
+ * Provider for language, text embedding, and image generation models.
2135
+ */
2136
+ interface ProviderV2 {
2137
+ /**
2138
+ Returns the language model with the given id.
2139
+ The model id is then passed to the provider function to get the model.
2140
+
2141
+ @param {string} modelId - The id of the model to return.
2142
+
2143
+ @returns {LanguageModel} The language model associated with the id
2144
+
2145
+ @throws {NoSuchModelError} If no such model exists.
2146
+ */
2147
+ languageModel(modelId: string): LanguageModelV2;
2148
+ /**
2149
+ Returns the text embedding model with the given id.
2150
+ The model id is then passed to the provider function to get the model.
2151
+
2152
+ @param {string} modelId - The id of the model to return.
2153
+
2154
+ @returns {LanguageModel} The language model associated with the id
2155
+
2156
+ @throws {NoSuchModelError} If no such model exists.
2157
+ */
2158
+ textEmbeddingModel(modelId: string): EmbeddingModelV1<string>;
2159
+ /**
2160
+ Returns the image model with the given id.
2161
+ The model id is then passed to the provider function to get the model.
2162
+
2163
+ @param {string} modelId - The id of the model to return.
2164
+
2165
+ @returns {ImageModel} The image model associated with the id
2166
+ */
2167
+ readonly imageModel: (modelId: string) => ImageModelV1;
2168
+ }
2169
+
2170
+ export { AISDKError, APICallError, type EmbeddingModelV1, type EmbeddingModelV1Embedding, EmptyResponseBodyError, type Experimental_LanguageModelV2Middleware, type ImageModelV1, type ImageModelV1CallOptions, type ImageModelV1CallWarning, InvalidArgumentError, InvalidPromptError, InvalidResponseDataError, type JSONArray, type JSONObject, JSONParseError, type JSONValue, type LanguageModelV1, type LanguageModelV1CallOptions, type LanguageModelV1CallWarning, type LanguageModelV1FilePart, type LanguageModelV1FinishReason, type LanguageModelV1FunctionTool, type LanguageModelV1FunctionToolCall, type LanguageModelV1ImagePart, type LanguageModelV1LogProbs, type LanguageModelV1Message, type LanguageModelV1ObjectGenerationMode, type LanguageModelV1Prompt, type LanguageModelV1ProviderDefinedTool, type LanguageModelV1ProviderMetadata, type LanguageModelV1ReasoningPart, type LanguageModelV1RedactedReasoningPart, type LanguageModelV1Source, type LanguageModelV1StreamPart, type LanguageModelV1TextPart, type LanguageModelV1ToolCallPart, type LanguageModelV1ToolChoice, type LanguageModelV1ToolResultPart, type LanguageModelV2, type LanguageModelV2CallOptions, type LanguageModelV2CallWarning, type LanguageModelV2FilePart, type LanguageModelV2FinishReason, type LanguageModelV2FunctionTool, type LanguageModelV2FunctionToolCall, type LanguageModelV2ImagePart, type LanguageModelV2LogProbs, type LanguageModelV2Message, type LanguageModelV2Middleware, type LanguageModelV2ObjectGenerationMode, type LanguageModelV2Prompt, type LanguageModelV2ProviderDefinedTool, type LanguageModelV2ProviderMetadata, type LanguageModelV2ProviderOptions, type LanguageModelV2ReasoningPart, type LanguageModelV2RedactedReasoningPart, type LanguageModelV2Source, type LanguageModelV2StreamPart, type LanguageModelV2TextPart, type LanguageModelV2ToolCallPart, type LanguageModelV2ToolChoice, type LanguageModelV2ToolResultPart, LoadAPIKeyError, LoadSettingError, NoContentGeneratedError, NoSuchModelError, type ProviderV1, type ProviderV2, TooManyEmbeddingValuesForCallError, TypeValidationError, UnsupportedFunctionalityError, getErrorMessage, isJSONArray, isJSONObject, isJSONValue };