ai 5.0.0-canary.8 → 5.0.0-canary.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/dist/index.d.mts +172 -37
- package/dist/index.d.ts +172 -37
- package/dist/index.js +255 -152
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +247 -142
- package/dist/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.mjs
CHANGED
@@ -4093,6 +4093,17 @@ function validateObjectGenerationInput({
|
|
4093
4093
|
}
|
4094
4094
|
}
|
4095
4095
|
|
4096
|
+
// core/generate-text/extract-content-text.ts
|
4097
|
+
function extractContentText(content) {
|
4098
|
+
const parts = content.filter(
|
4099
|
+
(content2) => content2.type === "text"
|
4100
|
+
);
|
4101
|
+
if (parts.length === 0) {
|
4102
|
+
return void 0;
|
4103
|
+
}
|
4104
|
+
return parts.map((content2) => content2.text).join("");
|
4105
|
+
}
|
4106
|
+
|
4096
4107
|
// core/generate-object/generate-object.ts
|
4097
4108
|
var originalGenerateId = createIdGenerator({ prefix: "aiobj", size: 24 });
|
4098
4109
|
async function generateObject({
|
@@ -4166,7 +4177,7 @@ async function generateObject({
|
|
4166
4177
|
}),
|
4167
4178
|
tracer,
|
4168
4179
|
fn: async (span) => {
|
4169
|
-
var _a17, _b, _c, _d
|
4180
|
+
var _a17, _b, _c, _d;
|
4170
4181
|
if (mode === "auto" || mode == null) {
|
4171
4182
|
mode = model.defaultObjectGenerationMode;
|
4172
4183
|
}
|
@@ -4228,7 +4239,7 @@ async function generateObject({
|
|
4228
4239
|
}),
|
4229
4240
|
tracer,
|
4230
4241
|
fn: async (span2) => {
|
4231
|
-
var _a18, _b2, _c2, _d2,
|
4242
|
+
var _a18, _b2, _c2, _d2, _e, _f, _g, _h;
|
4232
4243
|
const result2 = await model.doGenerate({
|
4233
4244
|
responseFormat: {
|
4234
4245
|
type: "json",
|
@@ -4246,11 +4257,12 @@ async function generateObject({
|
|
4246
4257
|
const responseData = {
|
4247
4258
|
id: (_b2 = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId3(),
|
4248
4259
|
timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
4249
|
-
modelId: (_f = (
|
4260
|
+
modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
|
4250
4261
|
headers: (_g = result2.response) == null ? void 0 : _g.headers,
|
4251
4262
|
body: (_h = result2.response) == null ? void 0 : _h.body
|
4252
4263
|
};
|
4253
|
-
|
4264
|
+
const text2 = extractContentText(result2.content);
|
4265
|
+
if (text2 === void 0) {
|
4254
4266
|
throw new NoObjectGeneratedError({
|
4255
4267
|
message: "No object generated: the model did not return a response.",
|
4256
4268
|
response: responseData,
|
@@ -4263,10 +4275,7 @@ async function generateObject({
|
|
4263
4275
|
telemetry,
|
4264
4276
|
attributes: {
|
4265
4277
|
"ai.response.finishReason": result2.finishReason,
|
4266
|
-
"ai.response.object": { output: () =>
|
4267
|
-
var _a19;
|
4268
|
-
return (_a19 = result2.text) == null ? void 0 : _a19.text;
|
4269
|
-
} },
|
4278
|
+
"ai.response.object": { output: () => text2 },
|
4270
4279
|
"ai.response.id": responseData.id,
|
4271
4280
|
"ai.response.model": responseData.modelId,
|
4272
4281
|
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
@@ -4282,17 +4291,17 @@ async function generateObject({
|
|
4282
4291
|
}
|
4283
4292
|
})
|
4284
4293
|
);
|
4285
|
-
return { ...result2, objectText:
|
4294
|
+
return { ...result2, objectText: text2, responseData };
|
4286
4295
|
}
|
4287
4296
|
})
|
4288
4297
|
);
|
4289
|
-
result =
|
4298
|
+
result = generateResult.objectText;
|
4290
4299
|
finishReason = generateResult.finishReason;
|
4291
4300
|
usage = generateResult.usage;
|
4292
4301
|
warnings = generateResult.warnings;
|
4293
4302
|
logprobs = generateResult.logprobs;
|
4294
4303
|
resultProviderMetadata = generateResult.providerMetadata;
|
4295
|
-
request = (
|
4304
|
+
request = (_b = generateResult.request) != null ? _b : {};
|
4296
4305
|
response = generateResult.responseData;
|
4297
4306
|
break;
|
4298
4307
|
}
|
@@ -4304,7 +4313,7 @@ async function generateObject({
|
|
4304
4313
|
const promptMessages = await convertToLanguageModelPrompt({
|
4305
4314
|
prompt: standardizedPrompt,
|
4306
4315
|
modelSupportsImageUrls: model.supportsImageUrls,
|
4307
|
-
modelSupportsUrl: (
|
4316
|
+
modelSupportsUrl: (_c = model.supportsUrl) == null ? void 0 : _c.bind(model)
|
4308
4317
|
// support 'this' context,
|
4309
4318
|
});
|
4310
4319
|
const inputFormat = standardizedPrompt.type;
|
@@ -4339,7 +4348,7 @@ async function generateObject({
|
|
4339
4348
|
}),
|
4340
4349
|
tracer,
|
4341
4350
|
fn: async (span2) => {
|
4342
|
-
var _a18, _b2, _c2, _d2,
|
4351
|
+
var _a18, _b2, _c2, _d2, _e, _f, _g, _h;
|
4343
4352
|
const result2 = await model.doGenerate({
|
4344
4353
|
tools: [
|
4345
4354
|
{
|
@@ -4357,13 +4366,16 @@ async function generateObject({
|
|
4357
4366
|
abortSignal,
|
4358
4367
|
headers
|
4359
4368
|
});
|
4360
|
-
const
|
4369
|
+
const firstToolCall = result2.content.find(
|
4370
|
+
(content) => content.type === "tool-call"
|
4371
|
+
);
|
4372
|
+
const objectText = firstToolCall == null ? void 0 : firstToolCall.args;
|
4361
4373
|
const responseData = {
|
4362
|
-
id: (
|
4363
|
-
timestamp: (
|
4364
|
-
modelId: (
|
4365
|
-
headers: (
|
4366
|
-
body: (
|
4374
|
+
id: (_b2 = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId3(),
|
4375
|
+
timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
4376
|
+
modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
|
4377
|
+
headers: (_g = result2.response) == null ? void 0 : _g.headers,
|
4378
|
+
body: (_h = result2.response) == null ? void 0 : _h.body
|
4367
4379
|
};
|
4368
4380
|
if (objectText === void 0) {
|
4369
4381
|
throw new NoObjectGeneratedError({
|
@@ -4404,7 +4416,7 @@ async function generateObject({
|
|
4404
4416
|
warnings = generateResult.warnings;
|
4405
4417
|
logprobs = generateResult.logprobs;
|
4406
4418
|
resultProviderMetadata = generateResult.providerMetadata;
|
4407
|
-
request = (
|
4419
|
+
request = (_d = generateResult.request) != null ? _d : {};
|
4408
4420
|
response = generateResult.responseData;
|
4409
4421
|
break;
|
4410
4422
|
}
|
@@ -4900,7 +4912,7 @@ var DefaultStreamObjectResult = class {
|
|
4900
4912
|
}
|
4901
4913
|
}
|
4902
4914
|
const {
|
4903
|
-
result: { stream,
|
4915
|
+
result: { stream, response, request },
|
4904
4916
|
doStreamSpan,
|
4905
4917
|
startTimestampMs
|
4906
4918
|
} = await retry(
|
@@ -4942,6 +4954,7 @@ var DefaultStreamObjectResult = class {
|
|
4942
4954
|
})
|
4943
4955
|
);
|
4944
4956
|
self.requestPromise.resolve(request != null ? request : {});
|
4957
|
+
let warnings;
|
4945
4958
|
let usage;
|
4946
4959
|
let finishReason;
|
4947
4960
|
let providerMetadata;
|
@@ -4962,6 +4975,10 @@ var DefaultStreamObjectResult = class {
|
|
4962
4975
|
new TransformStream({
|
4963
4976
|
async transform(chunk, controller) {
|
4964
4977
|
var _a18, _b2, _c;
|
4978
|
+
if (typeof chunk === "object" && chunk.type === "stream-start") {
|
4979
|
+
warnings = chunk.warnings;
|
4980
|
+
return;
|
4981
|
+
}
|
4965
4982
|
if (isFirstChunk) {
|
4966
4983
|
const msToFirstChunk = now2() - startTimestampMs;
|
4967
4984
|
isFirstChunk = false;
|
@@ -5485,7 +5502,7 @@ async function doParseToolCall({
|
|
5485
5502
|
};
|
5486
5503
|
}
|
5487
5504
|
|
5488
|
-
// core/generate-text/reasoning
|
5505
|
+
// core/generate-text/reasoning.ts
|
5489
5506
|
function asReasoningText(reasoning) {
|
5490
5507
|
const reasoningText = reasoning.filter((part) => part.type === "text").map((part) => part.text).join("");
|
5491
5508
|
return reasoningText.length > 0 ? reasoningText : void 0;
|
@@ -5636,7 +5653,7 @@ async function generateText({
|
|
5636
5653
|
}),
|
5637
5654
|
tracer,
|
5638
5655
|
fn: async (span) => {
|
5639
|
-
var _a18, _b, _c, _d
|
5656
|
+
var _a18, _b, _c, _d;
|
5640
5657
|
const toolsAndToolChoice = {
|
5641
5658
|
...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
|
5642
5659
|
};
|
@@ -5711,7 +5728,7 @@ async function generateText({
|
|
5711
5728
|
}),
|
5712
5729
|
tracer,
|
5713
5730
|
fn: async (span2) => {
|
5714
|
-
var _a19, _b2, _c2, _d2,
|
5731
|
+
var _a19, _b2, _c2, _d2, _e, _f, _g, _h;
|
5715
5732
|
const result = await model.doGenerate({
|
5716
5733
|
...callSettings,
|
5717
5734
|
...toolsAndToolChoice,
|
@@ -5725,9 +5742,9 @@ async function generateText({
|
|
5725
5742
|
const responseData = {
|
5726
5743
|
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5727
5744
|
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5728
|
-
modelId: (
|
5729
|
-
headers: (
|
5730
|
-
body: (
|
5745
|
+
modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
|
5746
|
+
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5747
|
+
body: (_h = result.response) == null ? void 0 : _h.body
|
5731
5748
|
};
|
5732
5749
|
span2.setAttributes(
|
5733
5750
|
selectTelemetryAttributes({
|
@@ -5735,22 +5752,12 @@ async function generateText({
|
|
5735
5752
|
attributes: {
|
5736
5753
|
"ai.response.finishReason": result.finishReason,
|
5737
5754
|
"ai.response.text": {
|
5738
|
-
output: () =>
|
5739
|
-
var _a20;
|
5740
|
-
return (_a20 = result.text) == null ? void 0 : _a20.text;
|
5741
|
-
}
|
5755
|
+
output: () => extractContentText(result.content)
|
5742
5756
|
},
|
5743
5757
|
"ai.response.toolCalls": {
|
5744
5758
|
output: () => {
|
5745
|
-
|
5746
|
-
return JSON.stringify(
|
5747
|
-
(_a20 = result.toolCalls) == null ? void 0 : _a20.map((toolCall) => ({
|
5748
|
-
toolCallType: toolCall.toolCallType,
|
5749
|
-
toolCallId: toolCall.toolCallId,
|
5750
|
-
toolName: toolCall.toolName,
|
5751
|
-
args: toolCall.args
|
5752
|
-
}))
|
5753
|
-
);
|
5759
|
+
const toolCalls = asToolCalls(result.content);
|
5760
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5754
5761
|
}
|
5755
5762
|
},
|
5756
5763
|
"ai.response.id": responseData.id,
|
@@ -5773,7 +5780,9 @@ async function generateText({
|
|
5773
5780
|
})
|
5774
5781
|
);
|
5775
5782
|
currentToolCalls = await Promise.all(
|
5776
|
-
|
5783
|
+
currentModelResponse.content.filter(
|
5784
|
+
(part) => part.type === "tool-call"
|
5785
|
+
).map(
|
5777
5786
|
(toolCall) => parseToolCall({
|
5778
5787
|
toolCall,
|
5779
5788
|
tools,
|
@@ -5808,15 +5817,19 @@ async function generateText({
|
|
5808
5817
|
nextStepType = "tool-result";
|
5809
5818
|
}
|
5810
5819
|
}
|
5811
|
-
const originalText = (
|
5820
|
+
const originalText = (_b = extractContentText(currentModelResponse.content)) != null ? _b : "";
|
5812
5821
|
const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
|
5813
5822
|
text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
|
5814
5823
|
const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
|
5815
5824
|
text2 = nextStepType === "continue" || stepType === "continue" ? text2 + stepText : stepText;
|
5816
5825
|
currentReasoningDetails = asReasoningDetails(
|
5817
|
-
currentModelResponse.
|
5826
|
+
currentModelResponse.content
|
5827
|
+
);
|
5828
|
+
sources.push(
|
5829
|
+
...currentModelResponse.content.filter(
|
5830
|
+
(part) => part.type === "source"
|
5831
|
+
)
|
5818
5832
|
);
|
5819
|
-
sources.push(...(_e = currentModelResponse.sources) != null ? _e : []);
|
5820
5833
|
if (stepType === "continue") {
|
5821
5834
|
const lastMessage = responseMessages[responseMessages.length - 1];
|
5822
5835
|
if (typeof lastMessage.content === "string") {
|
@@ -5831,8 +5844,8 @@ async function generateText({
|
|
5831
5844
|
responseMessages.push(
|
5832
5845
|
...toResponseMessages({
|
5833
5846
|
text: text2,
|
5834
|
-
files: asFiles(currentModelResponse.
|
5835
|
-
reasoning: asReasoningDetails(currentModelResponse.
|
5847
|
+
files: asFiles(currentModelResponse.content),
|
5848
|
+
reasoning: asReasoningDetails(currentModelResponse.content),
|
5836
5849
|
tools: tools != null ? tools : {},
|
5837
5850
|
toolCalls: currentToolCalls,
|
5838
5851
|
toolResults: currentToolResults,
|
@@ -5844,18 +5857,19 @@ async function generateText({
|
|
5844
5857
|
const currentStepResult = {
|
5845
5858
|
stepType,
|
5846
5859
|
text: stepText,
|
5847
|
-
|
5848
|
-
reasoning:
|
5849
|
-
|
5850
|
-
|
5851
|
-
|
5860
|
+
reasoningText: asReasoningText(currentReasoningDetails),
|
5861
|
+
reasoning: currentReasoningDetails,
|
5862
|
+
files: asFiles(currentModelResponse.content),
|
5863
|
+
sources: currentModelResponse.content.filter(
|
5864
|
+
(part) => part.type === "source"
|
5865
|
+
),
|
5852
5866
|
toolCalls: currentToolCalls,
|
5853
5867
|
toolResults: currentToolResults,
|
5854
5868
|
finishReason: currentModelResponse.finishReason,
|
5855
5869
|
usage: currentUsage,
|
5856
5870
|
warnings: currentModelResponse.warnings,
|
5857
5871
|
logprobs: currentModelResponse.logprobs,
|
5858
|
-
request: (
|
5872
|
+
request: (_c = currentModelResponse.request) != null ? _c : {},
|
5859
5873
|
response: {
|
5860
5874
|
...currentModelResponse.response,
|
5861
5875
|
// deep clone msgs to avoid mutating past messages in multi-step:
|
@@ -5874,22 +5888,12 @@ async function generateText({
|
|
5874
5888
|
attributes: {
|
5875
5889
|
"ai.response.finishReason": currentModelResponse.finishReason,
|
5876
5890
|
"ai.response.text": {
|
5877
|
-
output: () =>
|
5878
|
-
var _a19;
|
5879
|
-
return (_a19 = currentModelResponse.text) == null ? void 0 : _a19.text;
|
5880
|
-
}
|
5891
|
+
output: () => extractContentText(currentModelResponse.content)
|
5881
5892
|
},
|
5882
5893
|
"ai.response.toolCalls": {
|
5883
5894
|
output: () => {
|
5884
|
-
|
5885
|
-
return JSON.stringify(
|
5886
|
-
(_a19 = currentModelResponse.toolCalls) == null ? void 0 : _a19.map((toolCall) => ({
|
5887
|
-
toolCallType: toolCall.toolCallType,
|
5888
|
-
toolCallId: toolCall.toolCallId,
|
5889
|
-
toolName: toolCall.toolName,
|
5890
|
-
args: toolCall.args
|
5891
|
-
}))
|
5892
|
-
);
|
5895
|
+
const toolCalls = asToolCalls(currentModelResponse.content);
|
5896
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5893
5897
|
}
|
5894
5898
|
},
|
5895
5899
|
// TODO rename telemetry attributes to inputTokens and outputTokens
|
@@ -5900,7 +5904,7 @@ async function generateText({
|
|
5900
5904
|
);
|
5901
5905
|
return new DefaultGenerateTextResult({
|
5902
5906
|
text: text2,
|
5903
|
-
files: asFiles(currentModelResponse.
|
5907
|
+
files: asFiles(currentModelResponse.content),
|
5904
5908
|
reasoning: asReasoningText(currentReasoningDetails),
|
5905
5909
|
reasoningDetails: currentReasoningDetails,
|
5906
5910
|
sources,
|
@@ -5922,7 +5926,7 @@ async function generateText({
|
|
5922
5926
|
finishReason: currentModelResponse.finishReason,
|
5923
5927
|
usage,
|
5924
5928
|
warnings: currentModelResponse.warnings,
|
5925
|
-
request: (
|
5929
|
+
request: (_d = currentModelResponse.request) != null ? _d : {},
|
5926
5930
|
response: {
|
5927
5931
|
...currentModelResponse.response,
|
5928
5932
|
messages: responseMessages
|
@@ -6013,8 +6017,8 @@ var DefaultGenerateTextResult = class {
|
|
6013
6017
|
constructor(options) {
|
6014
6018
|
this.text = options.text;
|
6015
6019
|
this.files = options.files;
|
6016
|
-
this.
|
6017
|
-
this.
|
6020
|
+
this.reasoningText = options.reasoning;
|
6021
|
+
this.reasoning = options.reasoningDetails;
|
6018
6022
|
this.toolCalls = options.toolCalls;
|
6019
6023
|
this.toolResults = options.toolResults;
|
6020
6024
|
this.finishReason = options.finishReason;
|
@@ -6032,8 +6036,9 @@ var DefaultGenerateTextResult = class {
|
|
6032
6036
|
return this.outputResolver();
|
6033
6037
|
}
|
6034
6038
|
};
|
6035
|
-
function asReasoningDetails(
|
6036
|
-
|
6039
|
+
function asReasoningDetails(content) {
|
6040
|
+
const reasoning = content.filter((part) => part.type === "reasoning");
|
6041
|
+
if (reasoning.length === 0) {
|
6037
6042
|
return [];
|
6038
6043
|
}
|
6039
6044
|
const result = [];
|
@@ -6059,9 +6064,22 @@ function asReasoningDetails(reasoning) {
|
|
6059
6064
|
}
|
6060
6065
|
return result;
|
6061
6066
|
}
|
6062
|
-
function asFiles(
|
6063
|
-
|
6064
|
-
|
6067
|
+
function asFiles(content) {
|
6068
|
+
return content.filter((part) => part.type === "file").map((part) => new DefaultGeneratedFile(part));
|
6069
|
+
}
|
6070
|
+
function asToolCalls(content) {
|
6071
|
+
const parts = content.filter(
|
6072
|
+
(part) => part.type === "tool-call"
|
6073
|
+
);
|
6074
|
+
if (parts.length === 0) {
|
6075
|
+
return void 0;
|
6076
|
+
}
|
6077
|
+
return parts.map((toolCall) => ({
|
6078
|
+
toolCallType: toolCall.toolCallType,
|
6079
|
+
toolCallId: toolCall.toolCallId,
|
6080
|
+
toolName: toolCall.toolName,
|
6081
|
+
args: toolCall.args
|
6082
|
+
}));
|
6065
6083
|
}
|
6066
6084
|
|
6067
6085
|
// core/generate-text/output.ts
|
@@ -6278,7 +6296,9 @@ function smoothStream({
|
|
6278
6296
|
}
|
6279
6297
|
|
6280
6298
|
// core/generate-text/stream-text.ts
|
6281
|
-
import {
|
6299
|
+
import {
|
6300
|
+
AISDKError as AISDKError18
|
6301
|
+
} from "@ai-sdk/provider";
|
6282
6302
|
import { createIdGenerator as createIdGenerator4 } from "@ai-sdk/provider-utils";
|
6283
6303
|
|
6284
6304
|
// util/as-array.ts
|
@@ -6427,6 +6447,7 @@ function runToolsTransformation({
|
|
6427
6447
|
async transform(chunk, controller) {
|
6428
6448
|
const chunkType = chunk.type;
|
6429
6449
|
switch (chunkType) {
|
6450
|
+
case "stream-start":
|
6430
6451
|
case "text":
|
6431
6452
|
case "reasoning":
|
6432
6453
|
case "source":
|
@@ -6864,8 +6885,8 @@ var DefaultStreamTextResult = class {
|
|
6864
6885
|
const currentStepResult = {
|
6865
6886
|
stepType,
|
6866
6887
|
text: recordedStepText,
|
6867
|
-
|
6868
|
-
|
6888
|
+
reasoningText: asReasoningText(stepReasoning),
|
6889
|
+
reasoning: stepReasoning,
|
6869
6890
|
files: stepFiles,
|
6870
6891
|
sources: recordedStepSources,
|
6871
6892
|
toolCalls: recordedToolCalls,
|
@@ -6921,8 +6942,8 @@ var DefaultStreamTextResult = class {
|
|
6921
6942
|
self.toolCallsPromise.resolve(lastStep.toolCalls);
|
6922
6943
|
self.toolResultsPromise.resolve(lastStep.toolResults);
|
6923
6944
|
self.providerMetadataPromise.resolve(lastStep.providerMetadata);
|
6924
|
-
self.reasoningPromise.resolve(lastStep.
|
6925
|
-
self.reasoningDetailsPromise.resolve(lastStep.
|
6945
|
+
self.reasoningPromise.resolve(lastStep.reasoningText);
|
6946
|
+
self.reasoningDetailsPromise.resolve(lastStep.reasoning);
|
6926
6947
|
const finishReason = recordedFinishReason != null ? recordedFinishReason : "unknown";
|
6927
6948
|
const usage = recordedUsage != null ? recordedUsage : {
|
6928
6949
|
completionTokens: NaN,
|
@@ -6940,8 +6961,8 @@ var DefaultStreamTextResult = class {
|
|
6940
6961
|
logprobs: void 0,
|
6941
6962
|
usage,
|
6942
6963
|
text: recordedFullText,
|
6964
|
+
reasoningText: lastStep.reasoningText,
|
6943
6965
|
reasoning: lastStep.reasoning,
|
6944
|
-
reasoningDetails: lastStep.reasoningDetails,
|
6945
6966
|
files: lastStep.files,
|
6946
6967
|
sources: lastStep.sources,
|
6947
6968
|
toolCalls: lastStep.toolCalls,
|
@@ -7057,7 +7078,7 @@ var DefaultStreamTextResult = class {
|
|
7057
7078
|
...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
|
7058
7079
|
};
|
7059
7080
|
const {
|
7060
|
-
result: { stream: stream2,
|
7081
|
+
result: { stream: stream2, response, request },
|
7061
7082
|
doStreamSpan,
|
7062
7083
|
startTimestampMs
|
7063
7084
|
} = await retry(
|
@@ -7134,6 +7155,7 @@ var DefaultStreamTextResult = class {
|
|
7134
7155
|
const stepRequest = request != null ? request : {};
|
7135
7156
|
const stepToolCalls = [];
|
7136
7157
|
const stepToolResults = [];
|
7158
|
+
let warnings;
|
7137
7159
|
const stepReasoning2 = [];
|
7138
7160
|
const stepFiles2 = [];
|
7139
7161
|
let activeReasoningText2 = void 0;
|
@@ -7172,6 +7194,10 @@ var DefaultStreamTextResult = class {
|
|
7172
7194
|
new TransformStream({
|
7173
7195
|
async transform(chunk, controller) {
|
7174
7196
|
var _a19, _b, _c;
|
7197
|
+
if (chunk.type === "stream-start") {
|
7198
|
+
warnings = chunk.warnings;
|
7199
|
+
return;
|
7200
|
+
}
|
7175
7201
|
if (stepFirstChunk) {
|
7176
7202
|
const msToFirstChunk = now2() - startTimestampMs;
|
7177
7203
|
stepFirstChunk = false;
|
@@ -7464,10 +7490,10 @@ var DefaultStreamTextResult = class {
|
|
7464
7490
|
get text() {
|
7465
7491
|
return this.textPromise.value;
|
7466
7492
|
}
|
7467
|
-
get
|
7493
|
+
get reasoningText() {
|
7468
7494
|
return this.reasoningPromise.value;
|
7469
7495
|
}
|
7470
|
-
get
|
7496
|
+
get reasoning() {
|
7471
7497
|
return this.reasoningDetailsPromise.value;
|
7472
7498
|
}
|
7473
7499
|
get sources() {
|
@@ -7800,9 +7826,99 @@ var DefaultStreamTextResult = class {
|
|
7800
7826
|
}
|
7801
7827
|
};
|
7802
7828
|
|
7803
|
-
// errors/no-
|
7829
|
+
// errors/no-speech-generated-error.ts
|
7804
7830
|
import { AISDKError as AISDKError19 } from "@ai-sdk/provider";
|
7805
|
-
var
|
7831
|
+
var NoSpeechGeneratedError = class extends AISDKError19 {
|
7832
|
+
constructor(options) {
|
7833
|
+
super({
|
7834
|
+
name: "AI_NoSpeechGeneratedError",
|
7835
|
+
message: "No speech audio generated."
|
7836
|
+
});
|
7837
|
+
this.responses = options.responses;
|
7838
|
+
}
|
7839
|
+
};
|
7840
|
+
|
7841
|
+
// core/generate-speech/generated-audio-file.ts
|
7842
|
+
var DefaultGeneratedAudioFile = class extends DefaultGeneratedFile {
|
7843
|
+
constructor({
|
7844
|
+
data,
|
7845
|
+
mediaType
|
7846
|
+
}) {
|
7847
|
+
super({ data, mediaType });
|
7848
|
+
let format = "mp3";
|
7849
|
+
if (mediaType) {
|
7850
|
+
const mimeTypeParts = mediaType.split("/");
|
7851
|
+
if (mimeTypeParts.length === 2) {
|
7852
|
+
if (mediaType !== "audio/mpeg") {
|
7853
|
+
format = mimeTypeParts[1];
|
7854
|
+
}
|
7855
|
+
}
|
7856
|
+
}
|
7857
|
+
if (!format) {
|
7858
|
+
throw new Error(
|
7859
|
+
"Audio format must be provided or determinable from mimeType"
|
7860
|
+
);
|
7861
|
+
}
|
7862
|
+
this.format = format;
|
7863
|
+
}
|
7864
|
+
};
|
7865
|
+
|
7866
|
+
// core/generate-speech/generate-speech.ts
|
7867
|
+
async function generateSpeech({
|
7868
|
+
model,
|
7869
|
+
text: text2,
|
7870
|
+
voice,
|
7871
|
+
outputFormat,
|
7872
|
+
instructions,
|
7873
|
+
speed,
|
7874
|
+
providerOptions = {},
|
7875
|
+
maxRetries: maxRetriesArg,
|
7876
|
+
abortSignal,
|
7877
|
+
headers
|
7878
|
+
}) {
|
7879
|
+
var _a17;
|
7880
|
+
const { retry } = prepareRetries({ maxRetries: maxRetriesArg });
|
7881
|
+
const result = await retry(
|
7882
|
+
() => model.doGenerate({
|
7883
|
+
text: text2,
|
7884
|
+
voice,
|
7885
|
+
outputFormat,
|
7886
|
+
instructions,
|
7887
|
+
speed,
|
7888
|
+
abortSignal,
|
7889
|
+
headers,
|
7890
|
+
providerOptions
|
7891
|
+
})
|
7892
|
+
);
|
7893
|
+
if (!result.audio || result.audio.length === 0) {
|
7894
|
+
throw new NoSpeechGeneratedError({ responses: [result.response] });
|
7895
|
+
}
|
7896
|
+
return new DefaultSpeechResult({
|
7897
|
+
audio: new DefaultGeneratedAudioFile({
|
7898
|
+
data: result.audio,
|
7899
|
+
mediaType: (_a17 = detectMediaType({
|
7900
|
+
data: result.audio,
|
7901
|
+
signatures: audioMediaTypeSignatures
|
7902
|
+
})) != null ? _a17 : "audio/mp3"
|
7903
|
+
}),
|
7904
|
+
warnings: result.warnings,
|
7905
|
+
responses: [result.response],
|
7906
|
+
providerMetadata: result.providerMetadata
|
7907
|
+
});
|
7908
|
+
}
|
7909
|
+
var DefaultSpeechResult = class {
|
7910
|
+
constructor(options) {
|
7911
|
+
var _a17;
|
7912
|
+
this.audio = options.audio;
|
7913
|
+
this.warnings = options.warnings;
|
7914
|
+
this.responses = options.responses;
|
7915
|
+
this.providerMetadata = (_a17 = options.providerMetadata) != null ? _a17 : {};
|
7916
|
+
}
|
7917
|
+
};
|
7918
|
+
|
7919
|
+
// errors/no-transcript-generated-error.ts
|
7920
|
+
import { AISDKError as AISDKError20 } from "@ai-sdk/provider";
|
7921
|
+
var NoTranscriptGeneratedError = class extends AISDKError20 {
|
7806
7922
|
constructor(options) {
|
7807
7923
|
super({
|
7808
7924
|
name: "AI_NoTranscriptGeneratedError",
|
@@ -7949,37 +8065,41 @@ function extractReasoningMiddleware({
|
|
7949
8065
|
return {
|
7950
8066
|
middlewareVersion: "v2",
|
7951
8067
|
wrapGenerate: async ({ doGenerate }) => {
|
7952
|
-
const {
|
7953
|
-
|
7954
|
-
|
7955
|
-
|
7956
|
-
|
7957
|
-
|
7958
|
-
|
7959
|
-
|
7960
|
-
|
7961
|
-
|
7962
|
-
|
7963
|
-
|
7964
|
-
|
7965
|
-
|
7966
|
-
const
|
7967
|
-
|
7968
|
-
|
7969
|
-
|
7970
|
-
|
8068
|
+
const { content, ...rest } = await doGenerate();
|
8069
|
+
const transformedContent = [];
|
8070
|
+
for (const part of content) {
|
8071
|
+
if (part.type !== "text") {
|
8072
|
+
transformedContent.push(part);
|
8073
|
+
continue;
|
8074
|
+
}
|
8075
|
+
const text2 = startWithReasoning ? openingTag + part.text : part.text;
|
8076
|
+
const regexp = new RegExp(`${openingTag}(.*?)${closingTag}`, "gs");
|
8077
|
+
const matches = Array.from(text2.matchAll(regexp));
|
8078
|
+
if (!matches.length) {
|
8079
|
+
transformedContent.push(part);
|
8080
|
+
continue;
|
8081
|
+
}
|
8082
|
+
const reasoningText = matches.map((match) => match[1]).join(separator);
|
8083
|
+
let textWithoutReasoning = text2;
|
8084
|
+
for (let i = matches.length - 1; i >= 0; i--) {
|
8085
|
+
const match = matches[i];
|
8086
|
+
const beforeMatch = textWithoutReasoning.slice(0, match.index);
|
8087
|
+
const afterMatch = textWithoutReasoning.slice(
|
8088
|
+
match.index + match[0].length
|
8089
|
+
);
|
8090
|
+
textWithoutReasoning = beforeMatch + (beforeMatch.length > 0 && afterMatch.length > 0 ? separator : "") + afterMatch;
|
8091
|
+
}
|
8092
|
+
transformedContent.push({
|
8093
|
+
type: "reasoning",
|
8094
|
+
reasoningType: "text",
|
8095
|
+
text: reasoningText
|
8096
|
+
});
|
8097
|
+
transformedContent.push({
|
8098
|
+
type: "text",
|
8099
|
+
text: textWithoutReasoning
|
8100
|
+
});
|
7971
8101
|
}
|
7972
|
-
return {
|
7973
|
-
...rest,
|
7974
|
-
text: { type: "text", text: textWithoutReasoning },
|
7975
|
-
reasoning: reasoningText.length > 0 ? [
|
7976
|
-
{
|
7977
|
-
type: "reasoning",
|
7978
|
-
reasoningType: "text",
|
7979
|
-
text: reasoningText
|
7980
|
-
}
|
7981
|
-
] : void 0
|
7982
|
-
};
|
8102
|
+
return { content: transformedContent, ...rest };
|
7983
8103
|
},
|
7984
8104
|
wrapStream: async ({ doStream }) => {
|
7985
8105
|
const { stream, ...rest } = await doStream();
|
@@ -8054,26 +8174,13 @@ function simulateStreamingMiddleware() {
|
|
8054
8174
|
const result = await doGenerate();
|
8055
8175
|
const simulatedStream = new ReadableStream({
|
8056
8176
|
start(controller) {
|
8177
|
+
controller.enqueue({
|
8178
|
+
type: "stream-start",
|
8179
|
+
warnings: result.warnings
|
8180
|
+
});
|
8057
8181
|
controller.enqueue({ type: "response-metadata", ...result.response });
|
8058
|
-
|
8059
|
-
|
8060
|
-
controller.enqueue(reasoningPart);
|
8061
|
-
}
|
8062
|
-
}
|
8063
|
-
if (result.text) {
|
8064
|
-
controller.enqueue(result.text);
|
8065
|
-
}
|
8066
|
-
if (result.toolCalls) {
|
8067
|
-
for (const toolCall of result.toolCalls) {
|
8068
|
-
controller.enqueue({
|
8069
|
-
type: "tool-call-delta",
|
8070
|
-
toolCallType: "function",
|
8071
|
-
toolCallId: toolCall.toolCallId,
|
8072
|
-
toolName: toolCall.toolName,
|
8073
|
-
argsTextDelta: toolCall.args
|
8074
|
-
});
|
8075
|
-
controller.enqueue(toolCall);
|
8076
|
-
}
|
8182
|
+
for (const part of result.content) {
|
8183
|
+
controller.enqueue(part);
|
8077
8184
|
}
|
8078
8185
|
controller.enqueue({
|
8079
8186
|
type: "finish",
|
@@ -8088,8 +8195,7 @@ function simulateStreamingMiddleware() {
|
|
8088
8195
|
return {
|
8089
8196
|
stream: simulatedStream,
|
8090
8197
|
request: result.request,
|
8091
|
-
response: result.response
|
8092
|
-
warnings: result.warnings
|
8198
|
+
response: result.response
|
8093
8199
|
};
|
8094
8200
|
}
|
8095
8201
|
};
|
@@ -8146,7 +8252,6 @@ var doWrap = ({
|
|
8146
8252
|
}
|
8147
8253
|
};
|
8148
8254
|
};
|
8149
|
-
var experimental_wrapLanguageModel = wrapLanguageModel;
|
8150
8255
|
|
8151
8256
|
// core/prompt/append-client-message.ts
|
8152
8257
|
function appendClientMessage({
|
@@ -8160,7 +8265,7 @@ function appendClientMessage({
|
|
8160
8265
|
}
|
8161
8266
|
|
8162
8267
|
// core/prompt/append-response-messages.ts
|
8163
|
-
import { AISDKError as
|
8268
|
+
import { AISDKError as AISDKError21 } from "@ai-sdk/provider";
|
8164
8269
|
function appendResponseMessages({
|
8165
8270
|
messages,
|
8166
8271
|
responseMessages,
|
@@ -8243,7 +8348,7 @@ function appendResponseMessages({
|
|
8243
8348
|
break;
|
8244
8349
|
case "file":
|
8245
8350
|
if (part.data instanceof URL) {
|
8246
|
-
throw new
|
8351
|
+
throw new AISDKError21({
|
8247
8352
|
name: "InvalidAssistantFileData",
|
8248
8353
|
message: "File data cannot be a URL"
|
8249
8354
|
});
|
@@ -8377,7 +8482,7 @@ function customProvider({
|
|
8377
8482
|
var experimental_customProvider = customProvider;
|
8378
8483
|
|
8379
8484
|
// core/registry/no-such-provider-error.ts
|
8380
|
-
import { AISDKError as
|
8485
|
+
import { AISDKError as AISDKError22, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
|
8381
8486
|
var name16 = "AI_NoSuchProviderError";
|
8382
8487
|
var marker16 = `vercel.ai.error.${name16}`;
|
8383
8488
|
var symbol16 = Symbol.for(marker16);
|
@@ -8396,7 +8501,7 @@ var NoSuchProviderError = class extends NoSuchModelError3 {
|
|
8396
8501
|
this.availableProviders = availableProviders;
|
8397
8502
|
}
|
8398
8503
|
static isInstance(error) {
|
8399
|
-
return
|
8504
|
+
return AISDKError22.hasMarker(error, marker16);
|
8400
8505
|
}
|
8401
8506
|
};
|
8402
8507
|
_a16 = symbol16;
|
@@ -9374,8 +9479,8 @@ export {
|
|
9374
9479
|
experimental_createProviderRegistry,
|
9375
9480
|
experimental_customProvider,
|
9376
9481
|
generateImage as experimental_generateImage,
|
9482
|
+
generateSpeech as experimental_generateSpeech,
|
9377
9483
|
transcribe as experimental_transcribe,
|
9378
|
-
experimental_wrapLanguageModel,
|
9379
9484
|
extractMaxToolInvocationStep,
|
9380
9485
|
extractReasoningMiddleware,
|
9381
9486
|
fillMessageParts,
|