ai 4.0.6 → 4.0.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +19 -20
- package/dist/index.d.ts +19 -20
- package/dist/index.js +12 -10
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +12 -10
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,17 @@
|
|
1
1
|
# ai
|
2
2
|
|
3
|
+
## 4.0.8
|
4
|
+
|
5
|
+
### Patch Changes
|
6
|
+
|
7
|
+
- a803d76: feat (ai/core): pass toolCallId option into tool execute function
|
8
|
+
|
9
|
+
## 4.0.7
|
10
|
+
|
11
|
+
### Patch Changes
|
12
|
+
|
13
|
+
- 5b4f07b: fix (ai/core): change default error message for data streams to "An error occurred."
|
14
|
+
|
3
15
|
## 4.0.6
|
4
16
|
|
5
17
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
@@ -1178,6 +1178,21 @@ Callback that is called when the LLM response and the final object validation ar
|
|
1178
1178
|
|
1179
1179
|
type Parameters = z.ZodTypeAny | Schema<any>;
|
1180
1180
|
type inferParameters<PARAMETERS extends Parameters> = PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
|
1181
|
+
interface ToolExecutionOptions {
|
1182
|
+
/**
|
1183
|
+
* The ID of the tool call. You can use it e.g. when sending tool-call related information with stream data.
|
1184
|
+
*/
|
1185
|
+
toolCallId: string;
|
1186
|
+
/**
|
1187
|
+
* Messages that were sent to the language model to initiate the response that contained the tool call.
|
1188
|
+
* The messages **do not** include the system prompt nor the assistant response that contained the tool call.
|
1189
|
+
*/
|
1190
|
+
messages: CoreMessage[];
|
1191
|
+
/**
|
1192
|
+
* An optional abort signal that indicates that the overall operation should be aborted.
|
1193
|
+
*/
|
1194
|
+
abortSignal?: AbortSignal;
|
1195
|
+
}
|
1181
1196
|
/**
|
1182
1197
|
A tool contains the description and the schema of the input that the tool expects.
|
1183
1198
|
This enables the language model to generate the input.
|
@@ -1202,17 +1217,7 @@ type CoreTool<PARAMETERS extends Parameters = any, RESULT = any> = {
|
|
1202
1217
|
@args is the input of the tool call.
|
1203
1218
|
@options.abortSignal is a signal that can be used to abort the tool call.
|
1204
1219
|
*/
|
1205
|
-
execute?: (args: inferParameters<PARAMETERS>, options:
|
1206
|
-
/**
|
1207
|
-
* Messages that were sent to the language model to initiate the response that contained the tool call.
|
1208
|
-
* The messages **do not** include the system prompt nor the assistant response that contained the tool call.
|
1209
|
-
*/
|
1210
|
-
messages: CoreMessage[];
|
1211
|
-
/**
|
1212
|
-
* An optional abort signal that indicates that the overall operation should be aborted.
|
1213
|
-
*/
|
1214
|
-
abortSignal?: AbortSignal;
|
1215
|
-
}) => PromiseLike<RESULT>;
|
1220
|
+
execute?: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
|
1216
1221
|
} & ({
|
1217
1222
|
/**
|
1218
1223
|
Function tool.
|
@@ -1240,15 +1245,9 @@ The arguments for configuring the tool. Must match the expected arguments define
|
|
1240
1245
|
Helper function for inferring the execute args of a tool.
|
1241
1246
|
*/
|
1242
1247
|
declare function tool<PARAMETERS extends Parameters, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
|
1243
|
-
execute: (args: inferParameters<PARAMETERS>, options:
|
1244
|
-
messages: CoreMessage[];
|
1245
|
-
abortSignal?: AbortSignal;
|
1246
|
-
}) => PromiseLike<RESULT>;
|
1248
|
+
execute: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
|
1247
1249
|
}): CoreTool<PARAMETERS, RESULT> & {
|
1248
|
-
execute: (args: inferParameters<PARAMETERS>, options:
|
1249
|
-
messages: CoreMessage[];
|
1250
|
-
abortSignal?: AbortSignal;
|
1251
|
-
}) => PromiseLike<RESULT>;
|
1250
|
+
execute: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
|
1252
1251
|
};
|
1253
1252
|
declare function tool<PARAMETERS extends Parameters, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
|
1254
1253
|
execute?: undefined;
|
@@ -2289,4 +2288,4 @@ declare namespace llamaindexAdapter {
|
|
2289
2288
|
};
|
2290
2289
|
}
|
2291
2290
|
|
2292
|
-
export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolContent, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createStreamDataTransformer, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, experimental_wrapLanguageModel, generateObject, generateText, streamObject, streamText, tool };
|
2291
|
+
export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolContent, ToolExecutionOptions, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createStreamDataTransformer, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, experimental_wrapLanguageModel, generateObject, generateText, streamObject, streamText, tool };
|
package/dist/index.d.ts
CHANGED
@@ -1178,6 +1178,21 @@ Callback that is called when the LLM response and the final object validation ar
|
|
1178
1178
|
|
1179
1179
|
type Parameters = z.ZodTypeAny | Schema<any>;
|
1180
1180
|
type inferParameters<PARAMETERS extends Parameters> = PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
|
1181
|
+
interface ToolExecutionOptions {
|
1182
|
+
/**
|
1183
|
+
* The ID of the tool call. You can use it e.g. when sending tool-call related information with stream data.
|
1184
|
+
*/
|
1185
|
+
toolCallId: string;
|
1186
|
+
/**
|
1187
|
+
* Messages that were sent to the language model to initiate the response that contained the tool call.
|
1188
|
+
* The messages **do not** include the system prompt nor the assistant response that contained the tool call.
|
1189
|
+
*/
|
1190
|
+
messages: CoreMessage[];
|
1191
|
+
/**
|
1192
|
+
* An optional abort signal that indicates that the overall operation should be aborted.
|
1193
|
+
*/
|
1194
|
+
abortSignal?: AbortSignal;
|
1195
|
+
}
|
1181
1196
|
/**
|
1182
1197
|
A tool contains the description and the schema of the input that the tool expects.
|
1183
1198
|
This enables the language model to generate the input.
|
@@ -1202,17 +1217,7 @@ type CoreTool<PARAMETERS extends Parameters = any, RESULT = any> = {
|
|
1202
1217
|
@args is the input of the tool call.
|
1203
1218
|
@options.abortSignal is a signal that can be used to abort the tool call.
|
1204
1219
|
*/
|
1205
|
-
execute?: (args: inferParameters<PARAMETERS>, options:
|
1206
|
-
/**
|
1207
|
-
* Messages that were sent to the language model to initiate the response that contained the tool call.
|
1208
|
-
* The messages **do not** include the system prompt nor the assistant response that contained the tool call.
|
1209
|
-
*/
|
1210
|
-
messages: CoreMessage[];
|
1211
|
-
/**
|
1212
|
-
* An optional abort signal that indicates that the overall operation should be aborted.
|
1213
|
-
*/
|
1214
|
-
abortSignal?: AbortSignal;
|
1215
|
-
}) => PromiseLike<RESULT>;
|
1220
|
+
execute?: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
|
1216
1221
|
} & ({
|
1217
1222
|
/**
|
1218
1223
|
Function tool.
|
@@ -1240,15 +1245,9 @@ The arguments for configuring the tool. Must match the expected arguments define
|
|
1240
1245
|
Helper function for inferring the execute args of a tool.
|
1241
1246
|
*/
|
1242
1247
|
declare function tool<PARAMETERS extends Parameters, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
|
1243
|
-
execute: (args: inferParameters<PARAMETERS>, options:
|
1244
|
-
messages: CoreMessage[];
|
1245
|
-
abortSignal?: AbortSignal;
|
1246
|
-
}) => PromiseLike<RESULT>;
|
1248
|
+
execute: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
|
1247
1249
|
}): CoreTool<PARAMETERS, RESULT> & {
|
1248
|
-
execute: (args: inferParameters<PARAMETERS>, options:
|
1249
|
-
messages: CoreMessage[];
|
1250
|
-
abortSignal?: AbortSignal;
|
1251
|
-
}) => PromiseLike<RESULT>;
|
1250
|
+
execute: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
|
1252
1251
|
};
|
1253
1252
|
declare function tool<PARAMETERS extends Parameters, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
|
1254
1253
|
execute?: undefined;
|
@@ -2289,4 +2288,4 @@ declare namespace llamaindexAdapter {
|
|
2289
2288
|
};
|
2290
2289
|
}
|
2291
2290
|
|
2292
|
-
export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolContent, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createStreamDataTransformer, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, experimental_wrapLanguageModel, generateObject, generateText, streamObject, streamText, tool };
|
2291
|
+
export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolContent, ToolExecutionOptions, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createStreamDataTransformer, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, experimental_wrapLanguageModel, generateObject, generateText, streamObject, streamText, tool };
|
package/dist/index.js
CHANGED
@@ -3564,8 +3564,8 @@ async function executeTools({
|
|
3564
3564
|
abortSignal
|
3565
3565
|
}) {
|
3566
3566
|
const toolResults = await Promise.all(
|
3567
|
-
toolCalls.map(async (
|
3568
|
-
const tool2 = tools[
|
3567
|
+
toolCalls.map(async ({ toolCallId, toolName, args }) => {
|
3568
|
+
const tool2 = tools[toolName];
|
3569
3569
|
if ((tool2 == null ? void 0 : tool2.execute) == null) {
|
3570
3570
|
return void 0;
|
3571
3571
|
}
|
@@ -3578,16 +3578,17 @@ async function executeTools({
|
|
3578
3578
|
operationId: "ai.toolCall",
|
3579
3579
|
telemetry
|
3580
3580
|
}),
|
3581
|
-
"ai.toolCall.name":
|
3582
|
-
"ai.toolCall.id":
|
3581
|
+
"ai.toolCall.name": toolName,
|
3582
|
+
"ai.toolCall.id": toolCallId,
|
3583
3583
|
"ai.toolCall.args": {
|
3584
|
-
output: () => JSON.stringify(
|
3584
|
+
output: () => JSON.stringify(args)
|
3585
3585
|
}
|
3586
3586
|
}
|
3587
3587
|
}),
|
3588
3588
|
tracer,
|
3589
3589
|
fn: async (span) => {
|
3590
|
-
const result2 = await tool2.execute(
|
3590
|
+
const result2 = await tool2.execute(args, {
|
3591
|
+
toolCallId,
|
3591
3592
|
messages,
|
3592
3593
|
abortSignal
|
3593
3594
|
});
|
@@ -3608,9 +3609,9 @@ async function executeTools({
|
|
3608
3609
|
}
|
3609
3610
|
});
|
3610
3611
|
return {
|
3611
|
-
toolCallId
|
3612
|
-
toolName
|
3613
|
-
args
|
3612
|
+
toolCallId,
|
3613
|
+
toolName,
|
3614
|
+
args,
|
3614
3615
|
result
|
3615
3616
|
};
|
3616
3617
|
})
|
@@ -3833,6 +3834,7 @@ function runToolsTransformation({
|
|
3833
3834
|
}),
|
3834
3835
|
tracer,
|
3835
3836
|
fn: async (span) => tool2.execute(toolCall.args, {
|
3837
|
+
toolCallId: toolCall.toolCallId,
|
3836
3838
|
messages,
|
3837
3839
|
abortSignal
|
3838
3840
|
}).then(
|
@@ -4558,7 +4560,7 @@ var DefaultStreamTextResult = class {
|
|
4558
4560
|
});
|
4559
4561
|
}
|
4560
4562
|
toDataStreamInternal({
|
4561
|
-
getErrorMessage: getErrorMessage3 = () => "",
|
4563
|
+
getErrorMessage: getErrorMessage3 = () => "An error occurred.",
|
4562
4564
|
// mask error messages for safety by default
|
4563
4565
|
sendUsage = true
|
4564
4566
|
} = {}) {
|