ai 3.3.20 → 3.3.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +49 -21
- package/dist/index.d.ts +49 -21
- package/dist/index.js +401 -210
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +401 -208
- package/dist/index.mjs.map +1 -1
- package/package.json +8 -8
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,17 @@
|
|
1
1
|
# ai
|
2
2
|
|
3
|
+
## 3.3.21
|
4
|
+
|
5
|
+
### Patch Changes
|
6
|
+
|
7
|
+
- aa2dc58: feat (ai/core): add maxToolRoundtrips to streamText
|
8
|
+
- Updated dependencies [aa2dc58]
|
9
|
+
- @ai-sdk/ui-utils@0.0.40
|
10
|
+
- @ai-sdk/react@0.0.53
|
11
|
+
- @ai-sdk/solid@0.0.43
|
12
|
+
- @ai-sdk/svelte@0.0.45
|
13
|
+
- @ai-sdk/vue@0.0.45
|
14
|
+
|
3
15
|
## 3.3.20
|
4
16
|
|
5
17
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
@@ -1,5 +1,5 @@
|
|
1
1
|
import { Schema, DeepPartial, ToolInvocation, Attachment, JSONValue as JSONValue$1, CreateMessage, FunctionCall as FunctionCall$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
|
2
|
-
export { AssistantMessage, AssistantStatus, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DeepPartial, Function, FunctionCall, FunctionCallHandler, IdGenerator, JSONValue, Message, RequestOptions, Schema, StreamPart, Tool, ToolCall, ToolCallHandler, ToolChoice, ToolInvocation, UseAssistantOptions, formatStreamPart, jsonSchema,
|
2
|
+
export { AssistantMessage, AssistantStatus, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DeepPartial, Function, FunctionCall, FunctionCallHandler, IdGenerator, JSONValue, Message, RequestOptions, Schema, StreamPart, Tool, ToolCall, ToolCallHandler, ToolChoice, ToolInvocation, UseAssistantOptions, formatStreamPart, jsonSchema, parseStreamPart, processDataProtocolResponse, readDataStream } from '@ai-sdk/ui-utils';
|
3
3
|
import { AttributeValue } from '@opentelemetry/api';
|
4
4
|
import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, JSONValue, NoSuchModelError, AISDKError } from '@ai-sdk/provider';
|
5
5
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
@@ -1319,47 +1319,54 @@ By default, it's set to 0, which will disable the feature.
|
|
1319
1319
|
*/
|
1320
1320
|
experimental_telemetry?: TelemetrySettings;
|
1321
1321
|
}): Promise<GenerateTextResult<TOOLS>>;
|
1322
|
-
/**
|
1323
|
-
* @deprecated Use `generateText` instead.
|
1324
|
-
*/
|
1325
|
-
declare const experimental_generateText: typeof generateText;
|
1326
1322
|
|
1327
1323
|
/**
|
1328
1324
|
A result object for accessing different stream types and additional information.
|
1329
1325
|
*/
|
1330
1326
|
interface StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
1331
1327
|
/**
|
1332
|
-
|
1328
|
+
Warnings from the model provider (e.g. unsupported settings) for the first roundtrip.
|
1333
1329
|
*/
|
1334
1330
|
readonly warnings: CallWarning[] | undefined;
|
1335
1331
|
/**
|
1336
|
-
|
1332
|
+
The total token usage of the generated response.
|
1333
|
+
When there are multiple roundtrips, the usage is the sum of all roundtrip usages.
|
1334
|
+
|
1335
|
+
Resolved when the response is finished.
|
1337
1336
|
*/
|
1338
1337
|
readonly usage: Promise<CompletionTokenUsage$1>;
|
1339
1338
|
/**
|
1340
|
-
|
1339
|
+
The reason why the generation finished. Taken from the last roundtrip.
|
1340
|
+
|
1341
|
+
Resolved when the response is finished.
|
1341
1342
|
*/
|
1342
1343
|
readonly finishReason: Promise<FinishReason>;
|
1343
1344
|
/**
|
1344
|
-
Additional provider-specific metadata
|
1345
|
-
from the provider to the AI SDK and
|
1346
|
-
results that can be fully encapsulated in the provider.
|
1345
|
+
Additional provider-specific metadata from the last roundtrip.
|
1346
|
+
Metadata is passed through from the provider to the AI SDK and
|
1347
|
+
enables provider-specific results that can be fully encapsulated in the provider.
|
1347
1348
|
*/
|
1348
1349
|
readonly experimental_providerMetadata: Promise<ProviderMetadata | undefined>;
|
1349
1350
|
/**
|
1350
|
-
|
1351
|
+
The full text that has been generated by the last roundtrip.
|
1352
|
+
|
1353
|
+
Resolved when the response is finished.
|
1351
1354
|
*/
|
1352
1355
|
readonly text: Promise<string>;
|
1353
1356
|
/**
|
1354
|
-
|
1357
|
+
The tool calls that have been executed in the last roundtrip.
|
1358
|
+
|
1359
|
+
Resolved when the response is finished.
|
1355
1360
|
*/
|
1356
1361
|
readonly toolCalls: Promise<ToToolCall<TOOLS>[]>;
|
1357
1362
|
/**
|
1358
|
-
|
1363
|
+
The tool results that have been generated in the last roundtrip.
|
1364
|
+
|
1365
|
+
Resolved when the all tool executions are finished.
|
1359
1366
|
*/
|
1360
1367
|
readonly toolResults: Promise<ToToolResult<TOOLS>[]>;
|
1361
1368
|
/**
|
1362
|
-
|
1369
|
+
Optional raw response data.
|
1363
1370
|
*/
|
1364
1371
|
readonly rawResponse?: {
|
1365
1372
|
/**
|
@@ -1485,6 +1492,16 @@ type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
|
|
1485
1492
|
} | ({
|
1486
1493
|
type: 'tool-result';
|
1487
1494
|
} & ToToolResult<TOOLS>) | {
|
1495
|
+
type: 'roundtrip-finish';
|
1496
|
+
finishReason: FinishReason;
|
1497
|
+
logprobs?: LogProbs;
|
1498
|
+
usage: {
|
1499
|
+
promptTokens: number;
|
1500
|
+
completionTokens: number;
|
1501
|
+
totalTokens: number;
|
1502
|
+
};
|
1503
|
+
experimental_providerMetadata?: ProviderMetadata;
|
1504
|
+
} | {
|
1488
1505
|
type: 'finish';
|
1489
1506
|
finishReason: FinishReason;
|
1490
1507
|
logprobs?: LogProbs;
|
@@ -1536,6 +1553,8 @@ If set and supported by the model, calls will generate deterministic results.
|
|
1536
1553
|
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
1537
1554
|
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
1538
1555
|
|
1556
|
+
@param maxToolRoundtrips - Maximal number of automatic roundtrips for tool calls.
|
1557
|
+
|
1539
1558
|
@param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
|
1540
1559
|
@param onFinish - Callback that is called when the LLM response and all request tool executions
|
1541
1560
|
(for tools that have an `execute` function) are finished.
|
@@ -1543,7 +1562,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
1543
1562
|
@return
|
1544
1563
|
A result object for accessing different stream types and additional information.
|
1545
1564
|
*/
|
1546
|
-
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, ...settings }: CallSettings & Prompt & {
|
1565
|
+
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, experimental_telemetry: telemetry, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, ...settings }: CallSettings & Prompt & {
|
1547
1566
|
/**
|
1548
1567
|
The language model to use.
|
1549
1568
|
*/
|
@@ -1557,6 +1576,19 @@ The tool choice strategy. Default: 'auto'.
|
|
1557
1576
|
*/
|
1558
1577
|
toolChoice?: CoreToolChoice<TOOLS>;
|
1559
1578
|
/**
|
1579
|
+
Maximal number of automatic roundtrips for tool calls.
|
1580
|
+
|
1581
|
+
An automatic tool call roundtrip is another LLM call with the
|
1582
|
+
tool call results when all tool calls of the last assistant
|
1583
|
+
message have results.
|
1584
|
+
|
1585
|
+
A maximum number is required to prevent infinite loops in the
|
1586
|
+
case of misconfigured tools.
|
1587
|
+
|
1588
|
+
By default, it's set to 0, which will disable the feature.
|
1589
|
+
*/
|
1590
|
+
maxToolRoundtrips?: number;
|
1591
|
+
/**
|
1560
1592
|
Optional telemetry configuration (experimental).
|
1561
1593
|
*/
|
1562
1594
|
experimental_telemetry?: TelemetrySettings;
|
@@ -1618,10 +1650,6 @@ Callback that is called when the LLM response and all request tool executions
|
|
1618
1650
|
readonly experimental_providerMetadata: ProviderMetadata | undefined;
|
1619
1651
|
}) => Promise<void> | void;
|
1620
1652
|
}): Promise<StreamTextResult<TOOLS>>;
|
1621
|
-
/**
|
1622
|
-
* @deprecated Use `streamText` instead.
|
1623
|
-
*/
|
1624
|
-
declare const experimental_streamText: typeof streamText;
|
1625
1653
|
|
1626
1654
|
/**
|
1627
1655
|
* Creates a custom provider with specified language models, text embedding models, and an optional fallback provider.
|
@@ -2735,4 +2763,4 @@ declare const generateId: (size?: number) => string;
|
|
2735
2763
|
*/
|
2736
2764
|
declare const nanoid: (size?: number) => string;
|
2737
2765
|
|
2738
|
-
export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantResponse, CallWarning, CohereStream, CompletionTokenUsage, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolChoice, CoreToolMessage, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingTokenUsage, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, MessageConversionError, MistralStream, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamInputPart, ObjectStreamPart, OpenAIStream, OpenAIStreamCallbacks, Provider, ProviderMetadata, ReplicateStream, RetryError, StreamData, StreamObjectResult, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, TokenUsage, ToolCallPart, ToolCallPayload, ToolContent, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createCallbacksTransformer, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_ModelRegistry, experimental_Provider, experimental_ProviderRegistry, experimental_StreamData, experimental_createModelRegistry, experimental_createProviderRegistry, experimental_customProvider, experimental_generateObject,
|
2766
|
+
export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantResponse, CallWarning, CohereStream, CompletionTokenUsage, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolChoice, CoreToolMessage, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingTokenUsage, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, MessageConversionError, MistralStream, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamInputPart, ObjectStreamPart, OpenAIStream, OpenAIStreamCallbacks, Provider, ProviderMetadata, ReplicateStream, RetryError, StreamData, StreamObjectResult, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, TokenUsage, ToolCallPart, ToolCallPayload, ToolContent, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createCallbacksTransformer, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_ModelRegistry, experimental_Provider, experimental_ProviderRegistry, experimental_StreamData, experimental_createModelRegistry, experimental_createProviderRegistry, experimental_customProvider, experimental_generateObject, experimental_streamObject, generateId, generateObject, generateText, nanoid, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
|
package/dist/index.d.ts
CHANGED
@@ -1,5 +1,5 @@
|
|
1
1
|
import { Schema, DeepPartial, ToolInvocation, Attachment, JSONValue as JSONValue$1, CreateMessage, FunctionCall as FunctionCall$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
|
2
|
-
export { AssistantMessage, AssistantStatus, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DeepPartial, Function, FunctionCall, FunctionCallHandler, IdGenerator, JSONValue, Message, RequestOptions, Schema, StreamPart, Tool, ToolCall, ToolCallHandler, ToolChoice, ToolInvocation, UseAssistantOptions, formatStreamPart, jsonSchema,
|
2
|
+
export { AssistantMessage, AssistantStatus, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DeepPartial, Function, FunctionCall, FunctionCallHandler, IdGenerator, JSONValue, Message, RequestOptions, Schema, StreamPart, Tool, ToolCall, ToolCallHandler, ToolChoice, ToolInvocation, UseAssistantOptions, formatStreamPart, jsonSchema, parseStreamPart, processDataProtocolResponse, readDataStream } from '@ai-sdk/ui-utils';
|
3
3
|
import { AttributeValue } from '@opentelemetry/api';
|
4
4
|
import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, JSONValue, NoSuchModelError, AISDKError } from '@ai-sdk/provider';
|
5
5
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
@@ -1319,47 +1319,54 @@ By default, it's set to 0, which will disable the feature.
|
|
1319
1319
|
*/
|
1320
1320
|
experimental_telemetry?: TelemetrySettings;
|
1321
1321
|
}): Promise<GenerateTextResult<TOOLS>>;
|
1322
|
-
/**
|
1323
|
-
* @deprecated Use `generateText` instead.
|
1324
|
-
*/
|
1325
|
-
declare const experimental_generateText: typeof generateText;
|
1326
1322
|
|
1327
1323
|
/**
|
1328
1324
|
A result object for accessing different stream types and additional information.
|
1329
1325
|
*/
|
1330
1326
|
interface StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
1331
1327
|
/**
|
1332
|
-
|
1328
|
+
Warnings from the model provider (e.g. unsupported settings) for the first roundtrip.
|
1333
1329
|
*/
|
1334
1330
|
readonly warnings: CallWarning[] | undefined;
|
1335
1331
|
/**
|
1336
|
-
|
1332
|
+
The total token usage of the generated response.
|
1333
|
+
When there are multiple roundtrips, the usage is the sum of all roundtrip usages.
|
1334
|
+
|
1335
|
+
Resolved when the response is finished.
|
1337
1336
|
*/
|
1338
1337
|
readonly usage: Promise<CompletionTokenUsage$1>;
|
1339
1338
|
/**
|
1340
|
-
|
1339
|
+
The reason why the generation finished. Taken from the last roundtrip.
|
1340
|
+
|
1341
|
+
Resolved when the response is finished.
|
1341
1342
|
*/
|
1342
1343
|
readonly finishReason: Promise<FinishReason>;
|
1343
1344
|
/**
|
1344
|
-
Additional provider-specific metadata
|
1345
|
-
from the provider to the AI SDK and
|
1346
|
-
results that can be fully encapsulated in the provider.
|
1345
|
+
Additional provider-specific metadata from the last roundtrip.
|
1346
|
+
Metadata is passed through from the provider to the AI SDK and
|
1347
|
+
enables provider-specific results that can be fully encapsulated in the provider.
|
1347
1348
|
*/
|
1348
1349
|
readonly experimental_providerMetadata: Promise<ProviderMetadata | undefined>;
|
1349
1350
|
/**
|
1350
|
-
|
1351
|
+
The full text that has been generated by the last roundtrip.
|
1352
|
+
|
1353
|
+
Resolved when the response is finished.
|
1351
1354
|
*/
|
1352
1355
|
readonly text: Promise<string>;
|
1353
1356
|
/**
|
1354
|
-
|
1357
|
+
The tool calls that have been executed in the last roundtrip.
|
1358
|
+
|
1359
|
+
Resolved when the response is finished.
|
1355
1360
|
*/
|
1356
1361
|
readonly toolCalls: Promise<ToToolCall<TOOLS>[]>;
|
1357
1362
|
/**
|
1358
|
-
|
1363
|
+
The tool results that have been generated in the last roundtrip.
|
1364
|
+
|
1365
|
+
Resolved when the all tool executions are finished.
|
1359
1366
|
*/
|
1360
1367
|
readonly toolResults: Promise<ToToolResult<TOOLS>[]>;
|
1361
1368
|
/**
|
1362
|
-
|
1369
|
+
Optional raw response data.
|
1363
1370
|
*/
|
1364
1371
|
readonly rawResponse?: {
|
1365
1372
|
/**
|
@@ -1485,6 +1492,16 @@ type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
|
|
1485
1492
|
} | ({
|
1486
1493
|
type: 'tool-result';
|
1487
1494
|
} & ToToolResult<TOOLS>) | {
|
1495
|
+
type: 'roundtrip-finish';
|
1496
|
+
finishReason: FinishReason;
|
1497
|
+
logprobs?: LogProbs;
|
1498
|
+
usage: {
|
1499
|
+
promptTokens: number;
|
1500
|
+
completionTokens: number;
|
1501
|
+
totalTokens: number;
|
1502
|
+
};
|
1503
|
+
experimental_providerMetadata?: ProviderMetadata;
|
1504
|
+
} | {
|
1488
1505
|
type: 'finish';
|
1489
1506
|
finishReason: FinishReason;
|
1490
1507
|
logprobs?: LogProbs;
|
@@ -1536,6 +1553,8 @@ If set and supported by the model, calls will generate deterministic results.
|
|
1536
1553
|
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
1537
1554
|
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
1538
1555
|
|
1556
|
+
@param maxToolRoundtrips - Maximal number of automatic roundtrips for tool calls.
|
1557
|
+
|
1539
1558
|
@param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
|
1540
1559
|
@param onFinish - Callback that is called when the LLM response and all request tool executions
|
1541
1560
|
(for tools that have an `execute` function) are finished.
|
@@ -1543,7 +1562,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
1543
1562
|
@return
|
1544
1563
|
A result object for accessing different stream types and additional information.
|
1545
1564
|
*/
|
1546
|
-
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, ...settings }: CallSettings & Prompt & {
|
1565
|
+
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, experimental_telemetry: telemetry, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, ...settings }: CallSettings & Prompt & {
|
1547
1566
|
/**
|
1548
1567
|
The language model to use.
|
1549
1568
|
*/
|
@@ -1557,6 +1576,19 @@ The tool choice strategy. Default: 'auto'.
|
|
1557
1576
|
*/
|
1558
1577
|
toolChoice?: CoreToolChoice<TOOLS>;
|
1559
1578
|
/**
|
1579
|
+
Maximal number of automatic roundtrips for tool calls.
|
1580
|
+
|
1581
|
+
An automatic tool call roundtrip is another LLM call with the
|
1582
|
+
tool call results when all tool calls of the last assistant
|
1583
|
+
message have results.
|
1584
|
+
|
1585
|
+
A maximum number is required to prevent infinite loops in the
|
1586
|
+
case of misconfigured tools.
|
1587
|
+
|
1588
|
+
By default, it's set to 0, which will disable the feature.
|
1589
|
+
*/
|
1590
|
+
maxToolRoundtrips?: number;
|
1591
|
+
/**
|
1560
1592
|
Optional telemetry configuration (experimental).
|
1561
1593
|
*/
|
1562
1594
|
experimental_telemetry?: TelemetrySettings;
|
@@ -1618,10 +1650,6 @@ Callback that is called when the LLM response and all request tool executions
|
|
1618
1650
|
readonly experimental_providerMetadata: ProviderMetadata | undefined;
|
1619
1651
|
}) => Promise<void> | void;
|
1620
1652
|
}): Promise<StreamTextResult<TOOLS>>;
|
1621
|
-
/**
|
1622
|
-
* @deprecated Use `streamText` instead.
|
1623
|
-
*/
|
1624
|
-
declare const experimental_streamText: typeof streamText;
|
1625
1653
|
|
1626
1654
|
/**
|
1627
1655
|
* Creates a custom provider with specified language models, text embedding models, and an optional fallback provider.
|
@@ -2735,4 +2763,4 @@ declare const generateId: (size?: number) => string;
|
|
2735
2763
|
*/
|
2736
2764
|
declare const nanoid: (size?: number) => string;
|
2737
2765
|
|
2738
|
-
export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantResponse, CallWarning, CohereStream, CompletionTokenUsage, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolChoice, CoreToolMessage, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingTokenUsage, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, MessageConversionError, MistralStream, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamInputPart, ObjectStreamPart, OpenAIStream, OpenAIStreamCallbacks, Provider, ProviderMetadata, ReplicateStream, RetryError, StreamData, StreamObjectResult, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, TokenUsage, ToolCallPart, ToolCallPayload, ToolContent, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createCallbacksTransformer, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_ModelRegistry, experimental_Provider, experimental_ProviderRegistry, experimental_StreamData, experimental_createModelRegistry, experimental_createProviderRegistry, experimental_customProvider, experimental_generateObject,
|
2766
|
+
export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantResponse, CallWarning, CohereStream, CompletionTokenUsage, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolChoice, CoreToolMessage, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingTokenUsage, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, MessageConversionError, MistralStream, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamInputPart, ObjectStreamPart, OpenAIStream, OpenAIStreamCallbacks, Provider, ProviderMetadata, ReplicateStream, RetryError, StreamData, StreamObjectResult, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, TokenUsage, ToolCallPart, ToolCallPayload, ToolContent, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createCallbacksTransformer, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_ModelRegistry, experimental_Provider, experimental_ProviderRegistry, experimental_StreamData, experimental_createModelRegistry, experimental_createProviderRegistry, experimental_customProvider, experimental_generateObject, experimental_streamObject, generateId, generateObject, generateText, nanoid, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
|