ai 4.3.9 → 4.3.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/index.d.mts +23 -1
- package/dist/index.d.ts +23 -1
- package/dist/index.js +42 -21
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +42 -21
- package/dist/index.mjs.map +1 -1
- package/mcp-stdio/create-child-process.test.ts +6 -5
- package/mcp-stdio/create-child-process.ts +2 -2
- package/mcp-stdio/dist/index.js +3 -3
- package/mcp-stdio/dist/index.js.map +1 -1
- package/mcp-stdio/dist/index.mjs +3 -3
- package/mcp-stdio/dist/index.mjs.map +1 -1
- package/mcp-stdio/mcp-stdio-transport.test.ts +1 -14
- package/mcp-stdio/mcp-stdio-transport.ts +2 -2
- package/package.json +1 -1
package/dist/index.mjs
CHANGED
@@ -4106,6 +4106,7 @@ async function generateText({
|
|
4106
4106
|
experimental_providerMetadata,
|
4107
4107
|
providerOptions = experimental_providerMetadata,
|
4108
4108
|
experimental_activeTools: activeTools,
|
4109
|
+
experimental_prepareStep: prepareStep,
|
4109
4110
|
experimental_repairToolCall: repairToolCall,
|
4110
4111
|
_internal: {
|
4111
4112
|
generateId: generateId3 = originalGenerateId3,
|
@@ -4148,6 +4149,9 @@ async function generateText({
|
|
4148
4149
|
telemetry
|
4149
4150
|
}),
|
4150
4151
|
...baseTelemetryAttributes,
|
4152
|
+
// model:
|
4153
|
+
"ai.model.provider": model.provider,
|
4154
|
+
"ai.model.id": model.modelId,
|
4151
4155
|
// specific settings that only make sense on the outer level:
|
4152
4156
|
"ai.prompt": {
|
4153
4157
|
input: () => JSON.stringify({ system, prompt, messages })
|
@@ -4157,11 +4161,7 @@ async function generateText({
|
|
4157
4161
|
}),
|
4158
4162
|
tracer,
|
4159
4163
|
fn: async (span) => {
|
4160
|
-
var _a18, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
4161
|
-
const mode = {
|
4162
|
-
type: "regular",
|
4163
|
-
...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
|
4164
|
-
};
|
4164
|
+
var _a18, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
|
4165
4165
|
const callSettings = prepareCallSettings(settings);
|
4166
4166
|
let currentModelResponse;
|
4167
4167
|
let currentToolCalls = [];
|
@@ -4184,16 +4184,33 @@ async function generateText({
|
|
4184
4184
|
...initialPrompt.messages,
|
4185
4185
|
...responseMessages
|
4186
4186
|
];
|
4187
|
+
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
4188
|
+
model,
|
4189
|
+
steps,
|
4190
|
+
maxSteps,
|
4191
|
+
stepNumber: stepCount
|
4192
|
+
}));
|
4193
|
+
const stepToolChoice = (_a18 = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _a18 : toolChoice;
|
4194
|
+
const stepActiveTools = (_b = prepareStepResult == null ? void 0 : prepareStepResult.experimental_activeTools) != null ? _b : activeTools;
|
4195
|
+
const stepModel = (_c = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _c : model;
|
4187
4196
|
const promptMessages = await convertToLanguageModelPrompt({
|
4188
4197
|
prompt: {
|
4189
4198
|
type: promptFormat,
|
4190
4199
|
system: initialPrompt.system,
|
4191
4200
|
messages: stepInputMessages
|
4192
4201
|
},
|
4193
|
-
modelSupportsImageUrls:
|
4194
|
-
modelSupportsUrl: (
|
4202
|
+
modelSupportsImageUrls: stepModel.supportsImageUrls,
|
4203
|
+
modelSupportsUrl: (_d = stepModel.supportsUrl) == null ? void 0 : _d.bind(stepModel)
|
4195
4204
|
// support 'this' context
|
4196
4205
|
});
|
4206
|
+
const mode = {
|
4207
|
+
type: "regular",
|
4208
|
+
...prepareToolsAndToolChoice({
|
4209
|
+
tools,
|
4210
|
+
toolChoice: stepToolChoice,
|
4211
|
+
activeTools: stepActiveTools
|
4212
|
+
})
|
4213
|
+
};
|
4197
4214
|
currentModelResponse = await retry(
|
4198
4215
|
() => recordSpan({
|
4199
4216
|
name: "ai.generateText.doGenerate",
|
@@ -4205,6 +4222,10 @@ async function generateText({
|
|
4205
4222
|
telemetry
|
4206
4223
|
}),
|
4207
4224
|
...baseTelemetryAttributes,
|
4225
|
+
// model:
|
4226
|
+
"ai.model.provider": stepModel.provider,
|
4227
|
+
"ai.model.id": stepModel.modelId,
|
4228
|
+
// prompt:
|
4208
4229
|
"ai.prompt.format": { input: () => promptFormat },
|
4209
4230
|
"ai.prompt.messages": {
|
4210
4231
|
input: () => JSON.stringify(promptMessages)
|
@@ -4220,8 +4241,8 @@ async function generateText({
|
|
4220
4241
|
input: () => mode.toolChoice != null ? JSON.stringify(mode.toolChoice) : void 0
|
4221
4242
|
},
|
4222
4243
|
// standardized gen-ai llm span attributes:
|
4223
|
-
"gen_ai.system":
|
4224
|
-
"gen_ai.request.model":
|
4244
|
+
"gen_ai.system": stepModel.provider,
|
4245
|
+
"gen_ai.request.model": stepModel.modelId,
|
4225
4246
|
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
4226
4247
|
"gen_ai.request.max_tokens": settings.maxTokens,
|
4227
4248
|
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
@@ -4234,7 +4255,7 @@ async function generateText({
|
|
4234
4255
|
tracer,
|
4235
4256
|
fn: async (span2) => {
|
4236
4257
|
var _a19, _b2, _c2, _d2, _e2, _f2;
|
4237
|
-
const result = await
|
4258
|
+
const result = await stepModel.doGenerate({
|
4238
4259
|
mode,
|
4239
4260
|
...callSettings,
|
4240
4261
|
inputFormat: promptFormat,
|
@@ -4247,7 +4268,7 @@ async function generateText({
|
|
4247
4268
|
const responseData = {
|
4248
4269
|
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
4249
4270
|
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
4250
|
-
modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 :
|
4271
|
+
modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : stepModel.modelId
|
4251
4272
|
};
|
4252
4273
|
span2.setAttributes(
|
4253
4274
|
selectTelemetryAttributes({
|
@@ -4279,7 +4300,7 @@ async function generateText({
|
|
4279
4300
|
})
|
4280
4301
|
);
|
4281
4302
|
currentToolCalls = await Promise.all(
|
4282
|
-
((
|
4303
|
+
((_e = currentModelResponse.toolCalls) != null ? _e : []).map(
|
4283
4304
|
(toolCall) => parseToolCall({
|
4284
4305
|
toolCall,
|
4285
4306
|
tools,
|
@@ -4314,7 +4335,7 @@ async function generateText({
|
|
4314
4335
|
nextStepType = "tool-result";
|
4315
4336
|
}
|
4316
4337
|
}
|
4317
|
-
const originalText = (
|
4338
|
+
const originalText = (_f = currentModelResponse.text) != null ? _f : "";
|
4318
4339
|
const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
|
4319
4340
|
text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
|
4320
4341
|
const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
|
@@ -4322,7 +4343,7 @@ async function generateText({
|
|
4322
4343
|
currentReasoningDetails = asReasoningDetails(
|
4323
4344
|
currentModelResponse.reasoning
|
4324
4345
|
);
|
4325
|
-
sources.push(...(
|
4346
|
+
sources.push(...(_g = currentModelResponse.sources) != null ? _g : []);
|
4326
4347
|
if (stepType === "continue") {
|
4327
4348
|
const lastMessage = responseMessages[responseMessages.length - 1];
|
4328
4349
|
if (typeof lastMessage.content === "string") {
|
@@ -4354,18 +4375,18 @@ async function generateText({
|
|
4354
4375
|
reasoning: asReasoningText(currentReasoningDetails),
|
4355
4376
|
reasoningDetails: currentReasoningDetails,
|
4356
4377
|
files: asFiles(currentModelResponse.files),
|
4357
|
-
sources: (
|
4378
|
+
sources: (_h = currentModelResponse.sources) != null ? _h : [],
|
4358
4379
|
toolCalls: currentToolCalls,
|
4359
4380
|
toolResults: currentToolResults,
|
4360
4381
|
finishReason: currentModelResponse.finishReason,
|
4361
4382
|
usage: currentUsage,
|
4362
4383
|
warnings: currentModelResponse.warnings,
|
4363
4384
|
logprobs: currentModelResponse.logprobs,
|
4364
|
-
request: (
|
4385
|
+
request: (_i = currentModelResponse.request) != null ? _i : {},
|
4365
4386
|
response: {
|
4366
4387
|
...currentModelResponse.response,
|
4367
|
-
headers: (
|
4368
|
-
body: (
|
4388
|
+
headers: (_j = currentModelResponse.rawResponse) == null ? void 0 : _j.headers,
|
4389
|
+
body: (_k = currentModelResponse.rawResponse) == null ? void 0 : _k.body,
|
4369
4390
|
// deep clone msgs to avoid mutating past messages in multi-step:
|
4370
4391
|
messages: structuredClone(responseMessages)
|
4371
4392
|
},
|
@@ -4417,11 +4438,11 @@ async function generateText({
|
|
4417
4438
|
finishReason: currentModelResponse.finishReason,
|
4418
4439
|
usage,
|
4419
4440
|
warnings: currentModelResponse.warnings,
|
4420
|
-
request: (
|
4441
|
+
request: (_l = currentModelResponse.request) != null ? _l : {},
|
4421
4442
|
response: {
|
4422
4443
|
...currentModelResponse.response,
|
4423
|
-
headers: (
|
4424
|
-
body: (
|
4444
|
+
headers: (_m = currentModelResponse.rawResponse) == null ? void 0 : _m.headers,
|
4445
|
+
body: (_n = currentModelResponse.rawResponse) == null ? void 0 : _n.body,
|
4425
4446
|
messages: responseMessages
|
4426
4447
|
},
|
4427
4448
|
logprobs: currentModelResponse.logprobs,
|