ai 4.0.18 → 4.0.20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/dist/index.d.mts +2 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +225 -153
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +225 -153
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
- package/rsc/dist/rsc-server.mjs.map +1 -1
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,18 @@
|
|
1
1
|
# ai
|
2
2
|
|
3
|
+
## 4.0.20
|
4
|
+
|
5
|
+
### Patch Changes
|
6
|
+
|
7
|
+
- da9d240: fix (ai/core): suppress errors caused by writing to closed stream
|
8
|
+
- 6f1bfde: fix (ai/core): invoke streamText tool call repair when tool cannot be found
|
9
|
+
|
10
|
+
## 4.0.19
|
11
|
+
|
12
|
+
### Patch Changes
|
13
|
+
|
14
|
+
- c3a6065: fix (ai/core): apply transform before callbacks and resolvables
|
15
|
+
|
3
16
|
## 4.0.18
|
4
17
|
|
5
18
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
@@ -1978,7 +1978,9 @@ type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
|
|
1978
1978
|
finishReason: FinishReason;
|
1979
1979
|
logprobs?: LogProbs;
|
1980
1980
|
usage: LanguageModelUsage;
|
1981
|
+
request: LanguageModelRequestMetadata;
|
1981
1982
|
response: LanguageModelResponseMetadata;
|
1983
|
+
warnings: CallWarning[] | undefined;
|
1982
1984
|
experimental_providerMetadata?: ProviderMetadata;
|
1983
1985
|
isContinued: boolean;
|
1984
1986
|
} | {
|
package/dist/index.d.ts
CHANGED
@@ -1978,7 +1978,9 @@ type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
|
|
1978
1978
|
finishReason: FinishReason;
|
1979
1979
|
logprobs?: LogProbs;
|
1980
1980
|
usage: LanguageModelUsage;
|
1981
|
+
request: LanguageModelRequestMetadata;
|
1981
1982
|
response: LanguageModelResponseMetadata;
|
1983
|
+
warnings: CallWarning[] | undefined;
|
1982
1984
|
experimental_providerMetadata?: ProviderMetadata;
|
1983
1985
|
isContinued: boolean;
|
1984
1986
|
} | {
|
package/dist/index.js
CHANGED
@@ -95,15 +95,19 @@ function createDataStream({
|
|
95
95
|
controller = controllerArg;
|
96
96
|
}
|
97
97
|
});
|
98
|
+
function safeEnqueue(data) {
|
99
|
+
try {
|
100
|
+
controller.enqueue(data);
|
101
|
+
} catch (error) {
|
102
|
+
}
|
103
|
+
}
|
98
104
|
try {
|
99
105
|
const result = execute({
|
100
106
|
writeData(data) {
|
101
|
-
|
107
|
+
safeEnqueue((0, import_ui_utils.formatDataStreamPart)("data", [data]));
|
102
108
|
},
|
103
109
|
writeMessageAnnotation(annotation) {
|
104
|
-
|
105
|
-
(0, import_ui_utils.formatDataStreamPart)("message_annotations", [annotation])
|
106
|
-
);
|
110
|
+
safeEnqueue((0, import_ui_utils.formatDataStreamPart)("message_annotations", [annotation]));
|
107
111
|
},
|
108
112
|
merge(streamArg) {
|
109
113
|
ongoingStreamPromises.push(
|
@@ -113,10 +117,10 @@ function createDataStream({
|
|
113
117
|
const { done, value } = await reader.read();
|
114
118
|
if (done)
|
115
119
|
break;
|
116
|
-
|
120
|
+
safeEnqueue(value);
|
117
121
|
}
|
118
122
|
})().catch((error) => {
|
119
|
-
|
123
|
+
safeEnqueue((0, import_ui_utils.formatDataStreamPart)("error", onError(error)));
|
120
124
|
})
|
121
125
|
);
|
122
126
|
},
|
@@ -125,12 +129,12 @@ function createDataStream({
|
|
125
129
|
if (result) {
|
126
130
|
ongoingStreamPromises.push(
|
127
131
|
result.catch((error) => {
|
128
|
-
|
132
|
+
safeEnqueue((0, import_ui_utils.formatDataStreamPart)("error", onError(error)));
|
129
133
|
})
|
130
134
|
);
|
131
135
|
}
|
132
136
|
} catch (error) {
|
133
|
-
|
137
|
+
safeEnqueue((0, import_ui_utils.formatDataStreamPart)("error", onError(error)));
|
134
138
|
}
|
135
139
|
const waitForStreams = new Promise(async (resolve) => {
|
136
140
|
while (ongoingStreamPromises.length > 0) {
|
@@ -139,7 +143,10 @@ function createDataStream({
|
|
139
143
|
resolve();
|
140
144
|
});
|
141
145
|
waitForStreams.finally(() => {
|
142
|
-
|
146
|
+
try {
|
147
|
+
controller.close();
|
148
|
+
} catch (error) {
|
149
|
+
}
|
143
150
|
});
|
144
151
|
return stream;
|
145
152
|
}
|
@@ -1804,6 +1811,13 @@ function calculateLanguageModelUsage({
|
|
1804
1811
|
totalTokens: promptTokens + completionTokens
|
1805
1812
|
};
|
1806
1813
|
}
|
1814
|
+
function addLanguageModelUsage(usage1, usage2) {
|
1815
|
+
return {
|
1816
|
+
promptTokens: usage1.promptTokens + usage2.promptTokens,
|
1817
|
+
completionTokens: usage1.completionTokens + usage2.completionTokens,
|
1818
|
+
totalTokens: usage1.totalTokens + usage2.totalTokens
|
1819
|
+
};
|
1820
|
+
}
|
1807
1821
|
|
1808
1822
|
// core/generate-object/inject-json-instruction.ts
|
1809
1823
|
var DEFAULT_SCHEMA_PREFIX = "JSON schema:";
|
@@ -3622,7 +3636,7 @@ async function generateText({
|
|
3622
3636
|
const responseMessages = [];
|
3623
3637
|
let text2 = "";
|
3624
3638
|
const steps = [];
|
3625
|
-
|
3639
|
+
let usage = {
|
3626
3640
|
completionTokens: 0,
|
3627
3641
|
promptTokens: 0,
|
3628
3642
|
totalTokens: 0
|
@@ -3749,9 +3763,7 @@ async function generateText({
|
|
3749
3763
|
const currentUsage = calculateLanguageModelUsage(
|
3750
3764
|
currentModelResponse.usage
|
3751
3765
|
);
|
3752
|
-
usage
|
3753
|
-
usage.promptTokens += currentUsage.promptTokens;
|
3754
|
-
usage.totalTokens += currentUsage.totalTokens;
|
3766
|
+
usage = addLanguageModelUsage(usage, currentUsage);
|
3755
3767
|
let nextStepType = "done";
|
3756
3768
|
if (++stepCount < maxSteps) {
|
3757
3769
|
if (continueSteps && currentModelResponse.finishReason === "length" && // only use continue when there are no tool calls:
|
@@ -4162,25 +4174,6 @@ function runToolsTransformation({
|
|
4162
4174
|
break;
|
4163
4175
|
}
|
4164
4176
|
case "tool-call": {
|
4165
|
-
const toolName = chunk.toolName;
|
4166
|
-
if (tools == null) {
|
4167
|
-
toolResultsStreamController.enqueue({
|
4168
|
-
type: "error",
|
4169
|
-
error: new NoSuchToolError({ toolName: chunk.toolName })
|
4170
|
-
});
|
4171
|
-
break;
|
4172
|
-
}
|
4173
|
-
const tool2 = tools[toolName];
|
4174
|
-
if (tool2 == null) {
|
4175
|
-
toolResultsStreamController.enqueue({
|
4176
|
-
type: "error",
|
4177
|
-
error: new NoSuchToolError({
|
4178
|
-
toolName: chunk.toolName,
|
4179
|
-
availableTools: Object.keys(tools)
|
4180
|
-
})
|
4181
|
-
});
|
4182
|
-
break;
|
4183
|
-
}
|
4184
4177
|
try {
|
4185
4178
|
const toolCall = await parseToolCall({
|
4186
4179
|
toolCall: chunk,
|
@@ -4190,6 +4183,7 @@ function runToolsTransformation({
|
|
4190
4183
|
messages
|
4191
4184
|
});
|
4192
4185
|
controller.enqueue(toolCall);
|
4186
|
+
const tool2 = tools[toolCall.toolName];
|
4193
4187
|
if (tool2.execute != null) {
|
4194
4188
|
const toolExecutionId = (0, import_ui_utils7.generateId)();
|
4195
4189
|
outstandingToolResults.add(toolExecutionId);
|
@@ -4409,10 +4403,168 @@ var DefaultStreamTextResult = class {
|
|
4409
4403
|
message: "maxSteps must be at least 1"
|
4410
4404
|
});
|
4411
4405
|
}
|
4406
|
+
let recordedStepText = "";
|
4407
|
+
let recordedContinuationText = "";
|
4408
|
+
let recordedFullText = "";
|
4409
|
+
let recordedRequest = void 0;
|
4410
|
+
const recordedResponse = {
|
4411
|
+
id: generateId3(),
|
4412
|
+
timestamp: currentDate(),
|
4413
|
+
modelId: model.modelId,
|
4414
|
+
messages: []
|
4415
|
+
};
|
4416
|
+
let recordedToolCalls = [];
|
4417
|
+
let recordedToolResults = [];
|
4418
|
+
let recordedFinishReason = void 0;
|
4419
|
+
let recordedUsage = void 0;
|
4420
|
+
let recordedProviderMetadata = void 0;
|
4421
|
+
let stepType = "initial";
|
4422
|
+
const recordedSteps = [];
|
4423
|
+
let rootSpan;
|
4424
|
+
const eventProcessor = new TransformStream({
|
4425
|
+
async transform(chunk, controller) {
|
4426
|
+
controller.enqueue(chunk);
|
4427
|
+
if (chunk.type === "text-delta" || chunk.type === "tool-call" || chunk.type === "tool-result" || chunk.type === "tool-call-streaming-start" || chunk.type === "tool-call-delta") {
|
4428
|
+
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4429
|
+
}
|
4430
|
+
if (chunk.type === "text-delta") {
|
4431
|
+
recordedStepText += chunk.textDelta;
|
4432
|
+
recordedContinuationText += chunk.textDelta;
|
4433
|
+
recordedFullText += chunk.textDelta;
|
4434
|
+
}
|
4435
|
+
if (chunk.type === "tool-call") {
|
4436
|
+
recordedToolCalls.push(chunk);
|
4437
|
+
}
|
4438
|
+
if (chunk.type === "tool-result") {
|
4439
|
+
recordedToolResults.push(chunk);
|
4440
|
+
}
|
4441
|
+
if (chunk.type === "step-finish") {
|
4442
|
+
const stepMessages = toResponseMessages({
|
4443
|
+
text: recordedContinuationText,
|
4444
|
+
tools: tools != null ? tools : {},
|
4445
|
+
toolCalls: recordedToolCalls,
|
4446
|
+
toolResults: recordedToolResults
|
4447
|
+
});
|
4448
|
+
const currentStep = recordedSteps.length;
|
4449
|
+
let nextStepType = "done";
|
4450
|
+
if (currentStep + 1 < maxSteps) {
|
4451
|
+
if (continueSteps && chunk.finishReason === "length" && // only use continue when there are no tool calls:
|
4452
|
+
recordedToolCalls.length === 0) {
|
4453
|
+
nextStepType = "continue";
|
4454
|
+
} else if (
|
4455
|
+
// there are tool calls:
|
4456
|
+
recordedToolCalls.length > 0 && // all current tool calls have results:
|
4457
|
+
recordedToolResults.length === recordedToolCalls.length
|
4458
|
+
) {
|
4459
|
+
nextStepType = "tool-result";
|
4460
|
+
}
|
4461
|
+
}
|
4462
|
+
const currentStepResult = {
|
4463
|
+
stepType,
|
4464
|
+
text: recordedStepText,
|
4465
|
+
toolCalls: recordedToolCalls,
|
4466
|
+
toolResults: recordedToolResults,
|
4467
|
+
finishReason: chunk.finishReason,
|
4468
|
+
usage: chunk.usage,
|
4469
|
+
warnings: chunk.warnings,
|
4470
|
+
logprobs: chunk.logprobs,
|
4471
|
+
request: chunk.request,
|
4472
|
+
response: {
|
4473
|
+
...chunk.response,
|
4474
|
+
messages: [...recordedResponse.messages, ...stepMessages]
|
4475
|
+
},
|
4476
|
+
experimental_providerMetadata: chunk.experimental_providerMetadata,
|
4477
|
+
isContinued: chunk.isContinued
|
4478
|
+
};
|
4479
|
+
await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
|
4480
|
+
recordedSteps.push(currentStepResult);
|
4481
|
+
recordedToolCalls = [];
|
4482
|
+
recordedToolResults = [];
|
4483
|
+
recordedStepText = "";
|
4484
|
+
recordedRequest = chunk.request;
|
4485
|
+
if (nextStepType !== "done") {
|
4486
|
+
stepType = nextStepType;
|
4487
|
+
}
|
4488
|
+
if (nextStepType !== "continue") {
|
4489
|
+
recordedResponse.messages.push(...stepMessages);
|
4490
|
+
recordedContinuationText = "";
|
4491
|
+
}
|
4492
|
+
}
|
4493
|
+
if (chunk.type === "finish") {
|
4494
|
+
recordedResponse.id = chunk.response.id;
|
4495
|
+
recordedResponse.timestamp = chunk.response.timestamp;
|
4496
|
+
recordedResponse.modelId = chunk.response.modelId;
|
4497
|
+
recordedResponse.headers = chunk.response.headers;
|
4498
|
+
recordedUsage = chunk.usage;
|
4499
|
+
recordedFinishReason = chunk.finishReason;
|
4500
|
+
recordedProviderMetadata = chunk.experimental_providerMetadata;
|
4501
|
+
}
|
4502
|
+
},
|
4503
|
+
async flush(controller) {
|
4504
|
+
var _a13;
|
4505
|
+
try {
|
4506
|
+
const lastStep = recordedSteps[recordedSteps.length - 1];
|
4507
|
+
if (lastStep) {
|
4508
|
+
self.warningsPromise.resolve(lastStep.warnings);
|
4509
|
+
self.requestPromise.resolve(lastStep.request);
|
4510
|
+
self.responsePromise.resolve(lastStep.response);
|
4511
|
+
self.toolCallsPromise.resolve(lastStep.toolCalls);
|
4512
|
+
self.toolResultsPromise.resolve(lastStep.toolResults);
|
4513
|
+
self.providerMetadataPromise.resolve(
|
4514
|
+
lastStep.experimental_providerMetadata
|
4515
|
+
);
|
4516
|
+
}
|
4517
|
+
const finishReason = recordedFinishReason != null ? recordedFinishReason : "unknown";
|
4518
|
+
const usage = recordedUsage != null ? recordedUsage : {
|
4519
|
+
completionTokens: NaN,
|
4520
|
+
promptTokens: NaN,
|
4521
|
+
totalTokens: NaN
|
4522
|
+
};
|
4523
|
+
self.finishReasonPromise.resolve(finishReason);
|
4524
|
+
self.usagePromise.resolve(usage);
|
4525
|
+
self.textPromise.resolve(recordedFullText);
|
4526
|
+
self.stepsPromise.resolve(recordedSteps);
|
4527
|
+
await (onFinish == null ? void 0 : onFinish({
|
4528
|
+
finishReason,
|
4529
|
+
logprobs: void 0,
|
4530
|
+
usage,
|
4531
|
+
text: recordedFullText,
|
4532
|
+
toolCalls: lastStep.toolCalls,
|
4533
|
+
toolResults: lastStep.toolResults,
|
4534
|
+
request: (_a13 = lastStep.request) != null ? _a13 : {},
|
4535
|
+
response: lastStep.response,
|
4536
|
+
warnings: lastStep.warnings,
|
4537
|
+
experimental_providerMetadata: lastStep.experimental_providerMetadata,
|
4538
|
+
steps: recordedSteps
|
4539
|
+
}));
|
4540
|
+
rootSpan.setAttributes(
|
4541
|
+
selectTelemetryAttributes({
|
4542
|
+
telemetry,
|
4543
|
+
attributes: {
|
4544
|
+
"ai.response.finishReason": finishReason,
|
4545
|
+
"ai.response.text": { output: () => recordedFullText },
|
4546
|
+
"ai.response.toolCalls": {
|
4547
|
+
output: () => {
|
4548
|
+
var _a14;
|
4549
|
+
return ((_a14 = lastStep.toolCalls) == null ? void 0 : _a14.length) ? JSON.stringify(lastStep.toolCalls) : void 0;
|
4550
|
+
}
|
4551
|
+
},
|
4552
|
+
"ai.usage.promptTokens": usage.promptTokens,
|
4553
|
+
"ai.usage.completionTokens": usage.completionTokens
|
4554
|
+
}
|
4555
|
+
})
|
4556
|
+
);
|
4557
|
+
} catch (error) {
|
4558
|
+
controller.error(error);
|
4559
|
+
} finally {
|
4560
|
+
rootSpan.end();
|
4561
|
+
}
|
4562
|
+
}
|
4563
|
+
});
|
4412
4564
|
const stitchableStream = createStitchableStream();
|
4413
4565
|
this.addStream = stitchableStream.addStream;
|
4414
4566
|
this.closeStream = stitchableStream.close;
|
4415
|
-
this.baseStream = transform ? stitchableStream.stream.pipeThrough(transform) : stitchableStream.stream;
|
4567
|
+
this.baseStream = (transform ? stitchableStream.stream.pipeThrough(transform) : stitchableStream.stream).pipeThrough(eventProcessor);
|
4416
4568
|
const { maxRetries, retry } = prepareRetries({
|
4417
4569
|
maxRetries: maxRetriesArg
|
4418
4570
|
});
|
@@ -4444,13 +4596,13 @@ var DefaultStreamTextResult = class {
|
|
4444
4596
|
}),
|
4445
4597
|
tracer,
|
4446
4598
|
endWhenDone: false,
|
4447
|
-
fn: async (
|
4448
|
-
|
4599
|
+
fn: async (rootSpanArg) => {
|
4600
|
+
rootSpan = rootSpanArg;
|
4449
4601
|
async function streamStep({
|
4450
4602
|
currentStep,
|
4451
4603
|
responseMessages,
|
4452
4604
|
usage,
|
4453
|
-
stepType,
|
4605
|
+
stepType: stepType2,
|
4454
4606
|
previousStepText,
|
4455
4607
|
hasLeadingWhitespace
|
4456
4608
|
}) {
|
@@ -4556,7 +4708,7 @@ var DefaultStreamTextResult = class {
|
|
4556
4708
|
let stepProviderMetadata;
|
4557
4709
|
let stepFirstChunk = true;
|
4558
4710
|
let stepText = "";
|
4559
|
-
let fullStepText =
|
4711
|
+
let fullStepText = stepType2 === "continue" ? previousStepText : "";
|
4560
4712
|
let stepLogProbs;
|
4561
4713
|
let stepResponse = {
|
4562
4714
|
id: generateId3(),
|
@@ -4576,7 +4728,6 @@ var DefaultStreamTextResult = class {
|
|
4576
4728
|
fullStepText += chunk.textDelta;
|
4577
4729
|
chunkTextPublished = true;
|
4578
4730
|
hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
|
4579
|
-
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4580
4731
|
}
|
4581
4732
|
self.addStream(
|
4582
4733
|
transformedStream.pipeThrough(
|
@@ -4625,13 +4776,11 @@ var DefaultStreamTextResult = class {
|
|
4625
4776
|
case "tool-call": {
|
4626
4777
|
controller.enqueue(chunk);
|
4627
4778
|
stepToolCalls.push(chunk);
|
4628
|
-
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4629
4779
|
break;
|
4630
4780
|
}
|
4631
4781
|
case "tool-result": {
|
4632
4782
|
controller.enqueue(chunk);
|
4633
4783
|
stepToolResults.push(chunk);
|
4634
|
-
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4635
4784
|
break;
|
4636
4785
|
}
|
4637
4786
|
case "response-metadata": {
|
@@ -4658,7 +4807,6 @@ var DefaultStreamTextResult = class {
|
|
4658
4807
|
case "tool-call-streaming-start":
|
4659
4808
|
case "tool-call-delta": {
|
4660
4809
|
controller.enqueue(chunk);
|
4661
|
-
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4662
4810
|
break;
|
4663
4811
|
}
|
4664
4812
|
case "error": {
|
@@ -4689,7 +4837,7 @@ var DefaultStreamTextResult = class {
|
|
4689
4837
|
}
|
4690
4838
|
}
|
4691
4839
|
if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
|
4692
|
-
|
4840
|
+
stepType2 === "continue" && !chunkTextPublished)) {
|
4693
4841
|
await publishTextChunk({
|
4694
4842
|
controller,
|
4695
4843
|
chunk: {
|
@@ -4733,69 +4881,16 @@ var DefaultStreamTextResult = class {
|
|
4733
4881
|
usage: stepUsage,
|
4734
4882
|
experimental_providerMetadata: stepProviderMetadata,
|
4735
4883
|
logprobs: stepLogProbs,
|
4736
|
-
response: {
|
4737
|
-
...stepResponse
|
4738
|
-
},
|
4739
|
-
isContinued: nextStepType === "continue"
|
4740
|
-
});
|
4741
|
-
if (stepType === "continue") {
|
4742
|
-
const lastMessage = responseMessages[responseMessages.length - 1];
|
4743
|
-
if (typeof lastMessage.content === "string") {
|
4744
|
-
lastMessage.content += stepText;
|
4745
|
-
} else {
|
4746
|
-
lastMessage.content.push({
|
4747
|
-
text: stepText,
|
4748
|
-
type: "text"
|
4749
|
-
});
|
4750
|
-
}
|
4751
|
-
} else {
|
4752
|
-
responseMessages.push(
|
4753
|
-
...toResponseMessages({
|
4754
|
-
text: stepText,
|
4755
|
-
tools: tools != null ? tools : {},
|
4756
|
-
toolCalls: stepToolCalls,
|
4757
|
-
toolResults: stepToolResults
|
4758
|
-
})
|
4759
|
-
);
|
4760
|
-
}
|
4761
|
-
const currentStepResult = {
|
4762
|
-
stepType,
|
4763
|
-
text: stepText,
|
4764
|
-
toolCalls: stepToolCalls,
|
4765
|
-
toolResults: stepToolResults,
|
4766
|
-
finishReason: stepFinishReason,
|
4767
|
-
usage: stepUsage,
|
4768
|
-
warnings,
|
4769
|
-
logprobs: stepLogProbs,
|
4770
4884
|
request: stepRequest,
|
4771
4885
|
response: {
|
4772
4886
|
...stepResponse,
|
4773
|
-
headers: rawResponse == null ? void 0 : rawResponse.headers
|
4774
|
-
// deep clone msgs to avoid mutating past messages in multi-step:
|
4775
|
-
messages: JSON.parse(JSON.stringify(responseMessages))
|
4887
|
+
headers: rawResponse == null ? void 0 : rawResponse.headers
|
4776
4888
|
},
|
4777
|
-
|
4889
|
+
warnings,
|
4778
4890
|
isContinued: nextStepType === "continue"
|
4779
|
-
};
|
4780
|
-
|
4781
|
-
|
4782
|
-
const combinedUsage = {
|
4783
|
-
promptTokens: usage.promptTokens + stepUsage.promptTokens,
|
4784
|
-
completionTokens: usage.completionTokens + stepUsage.completionTokens,
|
4785
|
-
totalTokens: usage.totalTokens + stepUsage.totalTokens
|
4786
|
-
};
|
4787
|
-
if (nextStepType !== "done") {
|
4788
|
-
await streamStep({
|
4789
|
-
currentStep: currentStep + 1,
|
4790
|
-
responseMessages,
|
4791
|
-
usage: combinedUsage,
|
4792
|
-
stepType: nextStepType,
|
4793
|
-
previousStepText: fullStepText,
|
4794
|
-
hasLeadingWhitespace: hasWhitespaceSuffix
|
4795
|
-
});
|
4796
|
-
return;
|
4797
|
-
}
|
4798
|
-
try {
|
4891
|
+
});
|
4892
|
+
const combinedUsage = addLanguageModelUsage(usage, stepUsage);
|
4893
|
+
if (nextStepType === "done") {
|
4799
4894
|
controller.enqueue({
|
4800
4895
|
type: "finish",
|
4801
4896
|
finishReason: stepFinishReason,
|
@@ -4803,63 +4898,40 @@ var DefaultStreamTextResult = class {
|
|
4803
4898
|
experimental_providerMetadata: stepProviderMetadata,
|
4804
4899
|
logprobs: stepLogProbs,
|
4805
4900
|
response: {
|
4806
|
-
...stepResponse
|
4901
|
+
...stepResponse,
|
4902
|
+
headers: rawResponse == null ? void 0 : rawResponse.headers
|
4807
4903
|
}
|
4808
4904
|
});
|
4809
4905
|
self.closeStream();
|
4810
|
-
|
4811
|
-
|
4812
|
-
|
4813
|
-
|
4814
|
-
|
4815
|
-
|
4816
|
-
|
4817
|
-
|
4818
|
-
|
4819
|
-
|
4820
|
-
|
4821
|
-
|
4822
|
-
|
4823
|
-
|
4824
|
-
|
4825
|
-
|
4826
|
-
|
4827
|
-
|
4828
|
-
|
4829
|
-
|
4830
|
-
|
4831
|
-
|
4832
|
-
|
4833
|
-
|
4834
|
-
messages: responseMessages
|
4835
|
-
});
|
4836
|
-
self.stepsPromise.resolve(stepResults);
|
4837
|
-
self.warningsPromise.resolve(warnings != null ? warnings : []);
|
4838
|
-
await (onFinish == null ? void 0 : onFinish({
|
4839
|
-
finishReason: stepFinishReason,
|
4840
|
-
logprobs: stepLogProbs,
|
4906
|
+
} else {
|
4907
|
+
if (stepType2 === "continue") {
|
4908
|
+
const lastMessage = responseMessages[responseMessages.length - 1];
|
4909
|
+
if (typeof lastMessage.content === "string") {
|
4910
|
+
lastMessage.content += stepText;
|
4911
|
+
} else {
|
4912
|
+
lastMessage.content.push({
|
4913
|
+
text: stepText,
|
4914
|
+
type: "text"
|
4915
|
+
});
|
4916
|
+
}
|
4917
|
+
} else {
|
4918
|
+
responseMessages.push(
|
4919
|
+
...toResponseMessages({
|
4920
|
+
text: stepText,
|
4921
|
+
tools: tools != null ? tools : {},
|
4922
|
+
toolCalls: stepToolCalls,
|
4923
|
+
toolResults: stepToolResults
|
4924
|
+
})
|
4925
|
+
);
|
4926
|
+
}
|
4927
|
+
await streamStep({
|
4928
|
+
currentStep: currentStep + 1,
|
4929
|
+
responseMessages,
|
4841
4930
|
usage: combinedUsage,
|
4842
|
-
|
4843
|
-
|
4844
|
-
|
4845
|
-
|
4846
|
-
// optional as well. Therefore we need to cast the toolResults to any.
|
4847
|
-
// The type exposed to the users will be correctly inferred.
|
4848
|
-
toolResults: stepToolResults,
|
4849
|
-
request: stepRequest,
|
4850
|
-
response: {
|
4851
|
-
...stepResponse,
|
4852
|
-
headers: rawResponse == null ? void 0 : rawResponse.headers,
|
4853
|
-
messages: responseMessages
|
4854
|
-
},
|
4855
|
-
warnings,
|
4856
|
-
experimental_providerMetadata: stepProviderMetadata,
|
4857
|
-
steps: stepResults
|
4858
|
-
}));
|
4859
|
-
} catch (error) {
|
4860
|
-
controller.error(error);
|
4861
|
-
} finally {
|
4862
|
-
rootSpan.end();
|
4931
|
+
stepType: nextStepType,
|
4932
|
+
previousStepText: fullStepText,
|
4933
|
+
hasLeadingWhitespace: hasWhitespaceSuffix
|
4934
|
+
});
|
4863
4935
|
}
|
4864
4936
|
}
|
4865
4937
|
})
|