ai 5.0.0-beta.33 → 5.0.0-beta.34
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +77 -6
- package/dist/index.d.ts +77 -6
- package/dist/index.js +200 -136
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +161 -101
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +2 -1
- package/dist/internal/index.d.ts +2 -1
- package/dist/internal/index.js +34 -16
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +34 -16
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +4 -4
package/dist/index.js
CHANGED
@@ -56,7 +56,7 @@ __export(src_exports, {
|
|
56
56
|
UI_MESSAGE_STREAM_HEADERS: () => UI_MESSAGE_STREAM_HEADERS,
|
57
57
|
UnsupportedFunctionalityError: () => import_provider18.UnsupportedFunctionalityError,
|
58
58
|
UnsupportedModelVersionError: () => UnsupportedModelVersionError,
|
59
|
-
asSchema: () =>
|
59
|
+
asSchema: () => import_provider_utils28.asSchema,
|
60
60
|
assistantModelMessageSchema: () => assistantModelMessageSchema,
|
61
61
|
callCompletionApi: () => callCompletionApi,
|
62
62
|
consumeStream: () => consumeStream,
|
@@ -69,14 +69,14 @@ __export(src_exports, {
|
|
69
69
|
coreToolMessageSchema: () => coreToolMessageSchema,
|
70
70
|
coreUserMessageSchema: () => coreUserMessageSchema,
|
71
71
|
cosineSimilarity: () => cosineSimilarity,
|
72
|
-
createIdGenerator: () =>
|
72
|
+
createIdGenerator: () => import_provider_utils28.createIdGenerator,
|
73
73
|
createProviderRegistry: () => createProviderRegistry,
|
74
74
|
createTextStreamResponse: () => createTextStreamResponse,
|
75
75
|
createUIMessageStream: () => createUIMessageStream,
|
76
76
|
createUIMessageStreamResponse: () => createUIMessageStreamResponse,
|
77
77
|
customProvider: () => customProvider,
|
78
78
|
defaultSettingsMiddleware: () => defaultSettingsMiddleware,
|
79
|
-
dynamicTool: () =>
|
79
|
+
dynamicTool: () => import_provider_utils28.dynamicTool,
|
80
80
|
embed: () => embed,
|
81
81
|
embedMany: () => embedMany,
|
82
82
|
experimental_createMCPClient: () => createMCPClient,
|
@@ -86,7 +86,7 @@ __export(src_exports, {
|
|
86
86
|
experimental_generateSpeech: () => generateSpeech,
|
87
87
|
experimental_transcribe: () => transcribe,
|
88
88
|
extractReasoningMiddleware: () => extractReasoningMiddleware,
|
89
|
-
generateId: () =>
|
89
|
+
generateId: () => import_provider_utils28.generateId,
|
90
90
|
generateObject: () => generateObject,
|
91
91
|
generateText: () => generateText,
|
92
92
|
getTextFromDataUrl: () => getTextFromDataUrl,
|
@@ -94,7 +94,7 @@ __export(src_exports, {
|
|
94
94
|
hasToolCall: () => hasToolCall,
|
95
95
|
isDeepEqualData: () => isDeepEqualData,
|
96
96
|
isToolUIPart: () => isToolUIPart,
|
97
|
-
jsonSchema: () =>
|
97
|
+
jsonSchema: () => import_provider_utils28.jsonSchema,
|
98
98
|
lastAssistantMessageIsCompleteWithToolCalls: () => lastAssistantMessageIsCompleteWithToolCalls,
|
99
99
|
modelMessageSchema: () => modelMessageSchema,
|
100
100
|
parsePartialJson: () => parsePartialJson,
|
@@ -108,15 +108,15 @@ __export(src_exports, {
|
|
108
108
|
streamObject: () => streamObject,
|
109
109
|
streamText: () => streamText,
|
110
110
|
systemModelMessageSchema: () => systemModelMessageSchema,
|
111
|
-
tool: () =>
|
111
|
+
tool: () => import_provider_utils28.tool,
|
112
112
|
toolModelMessageSchema: () => toolModelMessageSchema,
|
113
113
|
userModelMessageSchema: () => userModelMessageSchema,
|
114
114
|
wrapLanguageModel: () => wrapLanguageModel,
|
115
115
|
wrapProvider: () => wrapProvider,
|
116
|
-
zodSchema: () =>
|
116
|
+
zodSchema: () => import_provider_utils28.zodSchema
|
117
117
|
});
|
118
118
|
module.exports = __toCommonJS(src_exports);
|
119
|
-
var
|
119
|
+
var import_provider_utils28 = require("@ai-sdk/provider-utils");
|
120
120
|
|
121
121
|
// src/generate-text/generate-text.ts
|
122
122
|
var import_provider_utils9 = require("@ai-sdk/provider-utils");
|
@@ -199,45 +199,51 @@ var RetryError = class extends import_provider3.AISDKError {
|
|
199
199
|
_a3 = symbol3;
|
200
200
|
|
201
201
|
// src/util/retry-with-exponential-backoff.ts
|
202
|
-
function
|
202
|
+
function getRetryDelayInMs({
|
203
|
+
error,
|
204
|
+
exponentialBackoffDelay
|
205
|
+
}) {
|
203
206
|
const headers = error.responseHeaders;
|
204
207
|
if (!headers)
|
205
208
|
return exponentialBackoffDelay;
|
206
|
-
let
|
209
|
+
let ms;
|
207
210
|
const retryAfterMs = headers["retry-after-ms"];
|
208
211
|
if (retryAfterMs) {
|
209
212
|
const timeoutMs = parseFloat(retryAfterMs);
|
210
213
|
if (!Number.isNaN(timeoutMs)) {
|
211
|
-
|
214
|
+
ms = timeoutMs;
|
212
215
|
}
|
213
216
|
}
|
214
217
|
const retryAfter = headers["retry-after"];
|
215
|
-
if (retryAfter &&
|
218
|
+
if (retryAfter && ms === void 0) {
|
216
219
|
const timeoutSeconds = parseFloat(retryAfter);
|
217
220
|
if (!Number.isNaN(timeoutSeconds)) {
|
218
|
-
|
221
|
+
ms = timeoutSeconds * 1e3;
|
219
222
|
} else {
|
220
|
-
|
223
|
+
ms = Date.parse(retryAfter) - Date.now();
|
221
224
|
}
|
222
225
|
}
|
223
|
-
if (
|
224
|
-
return
|
226
|
+
if (ms != null && !Number.isNaN(ms) && 0 <= ms && (ms < 60 * 1e3 || ms < exponentialBackoffDelay)) {
|
227
|
+
return ms;
|
225
228
|
}
|
226
229
|
return exponentialBackoffDelay;
|
227
230
|
}
|
228
231
|
var retryWithExponentialBackoffRespectingRetryHeaders = ({
|
229
232
|
maxRetries = 2,
|
230
233
|
initialDelayInMs = 2e3,
|
231
|
-
backoffFactor = 2
|
234
|
+
backoffFactor = 2,
|
235
|
+
abortSignal
|
232
236
|
} = {}) => async (f) => _retryWithExponentialBackoff(f, {
|
233
237
|
maxRetries,
|
234
238
|
delayInMs: initialDelayInMs,
|
235
|
-
backoffFactor
|
239
|
+
backoffFactor,
|
240
|
+
abortSignal
|
236
241
|
});
|
237
242
|
async function _retryWithExponentialBackoff(f, {
|
238
243
|
maxRetries,
|
239
244
|
delayInMs,
|
240
|
-
backoffFactor
|
245
|
+
backoffFactor,
|
246
|
+
abortSignal
|
241
247
|
}, errors = []) {
|
242
248
|
try {
|
243
249
|
return await f();
|
@@ -259,11 +265,21 @@ async function _retryWithExponentialBackoff(f, {
|
|
259
265
|
});
|
260
266
|
}
|
261
267
|
if (error instanceof Error && import_provider4.APICallError.isInstance(error) && error.isRetryable === true && tryNumber <= maxRetries) {
|
262
|
-
|
263
|
-
|
268
|
+
await (0, import_provider_utils.delay)(
|
269
|
+
getRetryDelayInMs({
|
270
|
+
error,
|
271
|
+
exponentialBackoffDelay: delayInMs
|
272
|
+
}),
|
273
|
+
{ abortSignal }
|
274
|
+
);
|
264
275
|
return _retryWithExponentialBackoff(
|
265
276
|
f,
|
266
|
-
{
|
277
|
+
{
|
278
|
+
maxRetries,
|
279
|
+
delayInMs: backoffFactor * delayInMs,
|
280
|
+
backoffFactor,
|
281
|
+
abortSignal
|
282
|
+
},
|
267
283
|
newErrors
|
268
284
|
);
|
269
285
|
}
|
@@ -280,7 +296,8 @@ async function _retryWithExponentialBackoff(f, {
|
|
280
296
|
|
281
297
|
// src/util/prepare-retries.ts
|
282
298
|
function prepareRetries({
|
283
|
-
maxRetries
|
299
|
+
maxRetries,
|
300
|
+
abortSignal
|
284
301
|
}) {
|
285
302
|
if (maxRetries != null) {
|
286
303
|
if (!Number.isInteger(maxRetries)) {
|
@@ -302,7 +319,8 @@ function prepareRetries({
|
|
302
319
|
return {
|
303
320
|
maxRetries: maxRetriesResult,
|
304
321
|
retry: retryWithExponentialBackoffRespectingRetryHeaders({
|
305
|
-
maxRetries: maxRetriesResult
|
322
|
+
maxRetries: maxRetriesResult,
|
323
|
+
abortSignal
|
306
324
|
})
|
307
325
|
};
|
308
326
|
}
|
@@ -2029,7 +2047,10 @@ async function generateText({
|
|
2029
2047
|
}) {
|
2030
2048
|
const model = resolveLanguageModel(modelArg);
|
2031
2049
|
const stopConditions = asArray(stopWhen);
|
2032
|
-
const { maxRetries, retry } = prepareRetries({
|
2050
|
+
const { maxRetries, retry } = prepareRetries({
|
2051
|
+
maxRetries: maxRetriesArg,
|
2052
|
+
abortSignal
|
2053
|
+
});
|
2033
2054
|
const callSettings = prepareCallSettings(settings);
|
2034
2055
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
2035
2056
|
model,
|
@@ -4513,7 +4534,8 @@ var DefaultStreamTextResult = class {
|
|
4513
4534
|
}
|
4514
4535
|
this.baseStream = stream.pipeThrough(createOutputTransformStream(output)).pipeThrough(eventProcessor);
|
4515
4536
|
const { maxRetries, retry } = prepareRetries({
|
4516
|
-
maxRetries: maxRetriesArg
|
4537
|
+
maxRetries: maxRetriesArg,
|
4538
|
+
abortSignal
|
4517
4539
|
});
|
4518
4540
|
const tracer = getTracer(telemetry);
|
4519
4541
|
const callSettings = prepareCallSettings(settings);
|
@@ -5389,7 +5411,10 @@ async function embed({
|
|
5389
5411
|
modelId: model.modelId
|
5390
5412
|
});
|
5391
5413
|
}
|
5392
|
-
const { maxRetries, retry } = prepareRetries({
|
5414
|
+
const { maxRetries, retry } = prepareRetries({
|
5415
|
+
maxRetries: maxRetriesArg,
|
5416
|
+
abortSignal
|
5417
|
+
});
|
5393
5418
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
5394
5419
|
model,
|
5395
5420
|
telemetry,
|
@@ -5519,7 +5544,10 @@ async function embedMany({
|
|
5519
5544
|
modelId: model.modelId
|
5520
5545
|
});
|
5521
5546
|
}
|
5522
|
-
const { maxRetries, retry } = prepareRetries({
|
5547
|
+
const { maxRetries, retry } = prepareRetries({
|
5548
|
+
maxRetries: maxRetriesArg,
|
5549
|
+
abortSignal
|
5550
|
+
});
|
5523
5551
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
5524
5552
|
model,
|
5525
5553
|
telemetry,
|
@@ -5756,7 +5784,10 @@ async function generateImage({
|
|
5756
5784
|
modelId: model.modelId
|
5757
5785
|
});
|
5758
5786
|
}
|
5759
|
-
const { retry } = prepareRetries({
|
5787
|
+
const { retry } = prepareRetries({
|
5788
|
+
maxRetries: maxRetriesArg,
|
5789
|
+
abortSignal
|
5790
|
+
});
|
5760
5791
|
const maxImagesPerCallWithDefault = (_a16 = maxImagesPerCall != null ? maxImagesPerCall : await invokeModelMaxImagesPerCall(model)) != null ? _a16 : 1;
|
5761
5792
|
const callCount = Math.ceil(n / maxImagesPerCallWithDefault);
|
5762
5793
|
const callImageCounts = Array.from({ length: callCount }, (_, i) => {
|
@@ -5844,8 +5875,7 @@ async function invokeModelMaxImagesPerCall(model) {
|
|
5844
5875
|
}
|
5845
5876
|
|
5846
5877
|
// src/generate-object/generate-object.ts
|
5847
|
-
var
|
5848
|
-
var import_provider_utils15 = require("@ai-sdk/provider-utils");
|
5878
|
+
var import_provider_utils16 = require("@ai-sdk/provider-utils");
|
5849
5879
|
|
5850
5880
|
// src/generate-object/output-strategy.ts
|
5851
5881
|
var import_provider23 = require("@ai-sdk/provider");
|
@@ -6223,8 +6253,65 @@ function validateObjectGenerationInput({
|
|
6223
6253
|
}
|
6224
6254
|
}
|
6225
6255
|
|
6256
|
+
// src/generate-object/parse-and-validate-object-result.ts
|
6257
|
+
var import_provider24 = require("@ai-sdk/provider");
|
6258
|
+
var import_provider_utils15 = require("@ai-sdk/provider-utils");
|
6259
|
+
async function parseAndValidateObjectResult(result, outputStrategy, context) {
|
6260
|
+
const parseResult = await (0, import_provider_utils15.safeParseJSON)({ text: result });
|
6261
|
+
if (!parseResult.success) {
|
6262
|
+
throw new NoObjectGeneratedError({
|
6263
|
+
message: "No object generated: could not parse the response.",
|
6264
|
+
cause: parseResult.error,
|
6265
|
+
text: result,
|
6266
|
+
response: context.response,
|
6267
|
+
usage: context.usage,
|
6268
|
+
finishReason: context.finishReason
|
6269
|
+
});
|
6270
|
+
}
|
6271
|
+
const validationResult = await outputStrategy.validateFinalResult(
|
6272
|
+
parseResult.value,
|
6273
|
+
{
|
6274
|
+
text: result,
|
6275
|
+
response: context.response,
|
6276
|
+
usage: context.usage
|
6277
|
+
}
|
6278
|
+
);
|
6279
|
+
if (!validationResult.success) {
|
6280
|
+
throw new NoObjectGeneratedError({
|
6281
|
+
message: "No object generated: response did not match schema.",
|
6282
|
+
cause: validationResult.error,
|
6283
|
+
text: result,
|
6284
|
+
response: context.response,
|
6285
|
+
usage: context.usage,
|
6286
|
+
finishReason: context.finishReason
|
6287
|
+
});
|
6288
|
+
}
|
6289
|
+
return validationResult.value;
|
6290
|
+
}
|
6291
|
+
async function parseAndValidateObjectResultWithRepair(result, outputStrategy, repairText, context) {
|
6292
|
+
try {
|
6293
|
+
return await parseAndValidateObjectResult(result, outputStrategy, context);
|
6294
|
+
} catch (error) {
|
6295
|
+
if (repairText != null && NoObjectGeneratedError.isInstance(error) && (import_provider24.JSONParseError.isInstance(error.cause) || import_provider24.TypeValidationError.isInstance(error.cause))) {
|
6296
|
+
const repairedText = await repairText({
|
6297
|
+
text: result,
|
6298
|
+
error: error.cause
|
6299
|
+
});
|
6300
|
+
if (repairedText === null) {
|
6301
|
+
throw error;
|
6302
|
+
}
|
6303
|
+
return await parseAndValidateObjectResult(
|
6304
|
+
repairedText,
|
6305
|
+
outputStrategy,
|
6306
|
+
context
|
6307
|
+
);
|
6308
|
+
}
|
6309
|
+
throw error;
|
6310
|
+
}
|
6311
|
+
}
|
6312
|
+
|
6226
6313
|
// src/generate-object/generate-object.ts
|
6227
|
-
var originalGenerateId3 = (0,
|
6314
|
+
var originalGenerateId3 = (0, import_provider_utils16.createIdGenerator)({ prefix: "aiobj", size: 24 });
|
6228
6315
|
async function generateObject(options) {
|
6229
6316
|
const {
|
6230
6317
|
model: modelArg,
|
@@ -6258,7 +6345,10 @@ async function generateObject(options) {
|
|
6258
6345
|
schemaDescription,
|
6259
6346
|
enumValues
|
6260
6347
|
});
|
6261
|
-
const { maxRetries, retry } = prepareRetries({
|
6348
|
+
const { maxRetries, retry } = prepareRetries({
|
6349
|
+
maxRetries: maxRetriesArg,
|
6350
|
+
abortSignal
|
6351
|
+
});
|
6262
6352
|
const outputStrategy = getOutputStrategy({
|
6263
6353
|
output,
|
6264
6354
|
schema: inputSchema,
|
@@ -6404,55 +6494,16 @@ async function generateObject(options) {
|
|
6404
6494
|
resultProviderMetadata = generateResult.providerMetadata;
|
6405
6495
|
request = (_a16 = generateResult.request) != null ? _a16 : {};
|
6406
6496
|
response = generateResult.responseData;
|
6407
|
-
|
6408
|
-
|
6409
|
-
|
6410
|
-
|
6411
|
-
|
6412
|
-
|
6413
|
-
|
6414
|
-
|
6415
|
-
usage,
|
6416
|
-
finishReason
|
6417
|
-
});
|
6497
|
+
const object2 = await parseAndValidateObjectResultWithRepair(
|
6498
|
+
result,
|
6499
|
+
outputStrategy,
|
6500
|
+
repairText,
|
6501
|
+
{
|
6502
|
+
response,
|
6503
|
+
usage,
|
6504
|
+
finishReason
|
6418
6505
|
}
|
6419
|
-
|
6420
|
-
parseResult.value,
|
6421
|
-
{
|
6422
|
-
text: result2,
|
6423
|
-
response,
|
6424
|
-
usage
|
6425
|
-
}
|
6426
|
-
);
|
6427
|
-
if (!validationResult.success) {
|
6428
|
-
throw new NoObjectGeneratedError({
|
6429
|
-
message: "No object generated: response did not match schema.",
|
6430
|
-
cause: validationResult.error,
|
6431
|
-
text: result2,
|
6432
|
-
response,
|
6433
|
-
usage,
|
6434
|
-
finishReason
|
6435
|
-
});
|
6436
|
-
}
|
6437
|
-
return validationResult.value;
|
6438
|
-
}
|
6439
|
-
let object2;
|
6440
|
-
try {
|
6441
|
-
object2 = await processResult(result);
|
6442
|
-
} catch (error) {
|
6443
|
-
if (repairText != null && NoObjectGeneratedError.isInstance(error) && (import_provider24.JSONParseError.isInstance(error.cause) || import_provider24.TypeValidationError.isInstance(error.cause))) {
|
6444
|
-
const repairedText = await repairText({
|
6445
|
-
text: result,
|
6446
|
-
error: error.cause
|
6447
|
-
});
|
6448
|
-
if (repairedText === null) {
|
6449
|
-
throw error;
|
6450
|
-
}
|
6451
|
-
object2 = await processResult(repairedText);
|
6452
|
-
} else {
|
6453
|
-
throw error;
|
6454
|
-
}
|
6455
|
-
}
|
6506
|
+
);
|
6456
6507
|
span.setAttributes(
|
6457
6508
|
selectTelemetryAttributes({
|
6458
6509
|
telemetry,
|
@@ -6507,7 +6558,7 @@ var DefaultGenerateObjectResult = class {
|
|
6507
6558
|
};
|
6508
6559
|
|
6509
6560
|
// src/generate-object/stream-object.ts
|
6510
|
-
var
|
6561
|
+
var import_provider_utils18 = require("@ai-sdk/provider-utils");
|
6511
6562
|
|
6512
6563
|
// src/util/cosine-similarity.ts
|
6513
6564
|
function cosineSimilarity(vector1, vector2) {
|
@@ -6617,7 +6668,7 @@ var SerialJobExecutor = class {
|
|
6617
6668
|
};
|
6618
6669
|
|
6619
6670
|
// src/util/simulate-readable-stream.ts
|
6620
|
-
var
|
6671
|
+
var import_provider_utils17 = require("@ai-sdk/provider-utils");
|
6621
6672
|
function simulateReadableStream({
|
6622
6673
|
chunks,
|
6623
6674
|
initialDelayInMs = 0,
|
@@ -6625,7 +6676,7 @@ function simulateReadableStream({
|
|
6625
6676
|
_internal
|
6626
6677
|
}) {
|
6627
6678
|
var _a16;
|
6628
|
-
const delay2 = (_a16 = _internal == null ? void 0 : _internal.delay) != null ? _a16 :
|
6679
|
+
const delay2 = (_a16 = _internal == null ? void 0 : _internal.delay) != null ? _a16 : import_provider_utils17.delay;
|
6629
6680
|
let index = 0;
|
6630
6681
|
return new ReadableStream({
|
6631
6682
|
async pull(controller) {
|
@@ -6640,7 +6691,7 @@ function simulateReadableStream({
|
|
6640
6691
|
}
|
6641
6692
|
|
6642
6693
|
// src/generate-object/stream-object.ts
|
6643
|
-
var originalGenerateId4 = (0,
|
6694
|
+
var originalGenerateId4 = (0, import_provider_utils18.createIdGenerator)({ prefix: "aiobj", size: 24 });
|
6644
6695
|
function streamObject(options) {
|
6645
6696
|
const {
|
6646
6697
|
model,
|
@@ -6651,6 +6702,7 @@ function streamObject(options) {
|
|
6651
6702
|
maxRetries,
|
6652
6703
|
abortSignal,
|
6653
6704
|
headers,
|
6705
|
+
experimental_repairText: repairText,
|
6654
6706
|
experimental_telemetry: telemetry,
|
6655
6707
|
providerOptions,
|
6656
6708
|
onError = ({ error }) => {
|
@@ -6696,6 +6748,7 @@ function streamObject(options) {
|
|
6696
6748
|
schemaName,
|
6697
6749
|
schemaDescription,
|
6698
6750
|
providerOptions,
|
6751
|
+
repairText,
|
6699
6752
|
onError,
|
6700
6753
|
onFinish,
|
6701
6754
|
generateId: generateId3,
|
@@ -6718,6 +6771,7 @@ var DefaultStreamObjectResult = class {
|
|
6718
6771
|
schemaName,
|
6719
6772
|
schemaDescription,
|
6720
6773
|
providerOptions,
|
6774
|
+
repairText,
|
6721
6775
|
onError,
|
6722
6776
|
onFinish,
|
6723
6777
|
generateId: generateId3,
|
@@ -6733,7 +6787,8 @@ var DefaultStreamObjectResult = class {
|
|
6733
6787
|
this._finishReason = new DelayedPromise();
|
6734
6788
|
const model = resolveLanguageModel(modelArg);
|
6735
6789
|
const { maxRetries, retry } = prepareRetries({
|
6736
|
-
maxRetries: maxRetriesArg
|
6790
|
+
maxRetries: maxRetriesArg,
|
6791
|
+
abortSignal
|
6737
6792
|
});
|
6738
6793
|
const callSettings = prepareCallSettings(settings);
|
6739
6794
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
@@ -6951,27 +7006,21 @@ var DefaultStreamObjectResult = class {
|
|
6951
7006
|
headers: response == null ? void 0 : response.headers
|
6952
7007
|
});
|
6953
7008
|
self._finishReason.resolve(finishReason != null ? finishReason : "unknown");
|
6954
|
-
|
6955
|
-
|
6956
|
-
|
6957
|
-
|
6958
|
-
|
6959
|
-
|
6960
|
-
|
6961
|
-
|
6962
|
-
|
6963
|
-
|
7009
|
+
try {
|
7010
|
+
object2 = await parseAndValidateObjectResultWithRepair(
|
7011
|
+
accumulatedText,
|
7012
|
+
outputStrategy,
|
7013
|
+
repairText,
|
7014
|
+
{
|
7015
|
+
response: fullResponse,
|
7016
|
+
usage,
|
7017
|
+
finishReason
|
7018
|
+
}
|
7019
|
+
);
|
6964
7020
|
self._object.resolve(object2);
|
6965
|
-
}
|
6966
|
-
error =
|
6967
|
-
|
6968
|
-
cause: validationResult.error,
|
6969
|
-
text: accumulatedText,
|
6970
|
-
response: fullResponse,
|
6971
|
-
usage,
|
6972
|
-
finishReason
|
6973
|
-
});
|
6974
|
-
self._object.reject(error);
|
7021
|
+
} catch (e) {
|
7022
|
+
error = e;
|
7023
|
+
self._object.reject(e);
|
6975
7024
|
}
|
6976
7025
|
break;
|
6977
7026
|
}
|
@@ -7214,7 +7263,10 @@ async function generateSpeech({
|
|
7214
7263
|
modelId: model.modelId
|
7215
7264
|
});
|
7216
7265
|
}
|
7217
|
-
const { retry } = prepareRetries({
|
7266
|
+
const { retry } = prepareRetries({
|
7267
|
+
maxRetries: maxRetriesArg,
|
7268
|
+
abortSignal
|
7269
|
+
});
|
7218
7270
|
const result = await retry(
|
7219
7271
|
() => model.doGenerate({
|
7220
7272
|
text: text2,
|
@@ -7260,7 +7312,7 @@ __export(output_exports, {
|
|
7260
7312
|
object: () => object,
|
7261
7313
|
text: () => text
|
7262
7314
|
});
|
7263
|
-
var
|
7315
|
+
var import_provider_utils19 = require("@ai-sdk/provider-utils");
|
7264
7316
|
var text = () => ({
|
7265
7317
|
type: "text",
|
7266
7318
|
responseFormat: { type: "text" },
|
@@ -7274,7 +7326,7 @@ var text = () => ({
|
|
7274
7326
|
var object = ({
|
7275
7327
|
schema: inputSchema
|
7276
7328
|
}) => {
|
7277
|
-
const schema = (0,
|
7329
|
+
const schema = (0, import_provider_utils19.asSchema)(inputSchema);
|
7278
7330
|
return {
|
7279
7331
|
type: "object",
|
7280
7332
|
responseFormat: {
|
@@ -7300,7 +7352,7 @@ var object = ({
|
|
7300
7352
|
}
|
7301
7353
|
},
|
7302
7354
|
async parseOutput({ text: text2 }, context) {
|
7303
|
-
const parseResult = await (0,
|
7355
|
+
const parseResult = await (0, import_provider_utils19.safeParseJSON)({ text: text2 });
|
7304
7356
|
if (!parseResult.success) {
|
7305
7357
|
throw new NoObjectGeneratedError({
|
7306
7358
|
message: "No object generated: could not parse the response.",
|
@@ -7311,7 +7363,7 @@ var object = ({
|
|
7311
7363
|
finishReason: context.finishReason
|
7312
7364
|
});
|
7313
7365
|
}
|
7314
|
-
const validationResult = await (0,
|
7366
|
+
const validationResult = await (0, import_provider_utils19.safeValidateTypes)({
|
7315
7367
|
value: parseResult.value,
|
7316
7368
|
schema
|
7317
7369
|
});
|
@@ -7331,7 +7383,7 @@ var object = ({
|
|
7331
7383
|
};
|
7332
7384
|
|
7333
7385
|
// src/generate-text/smooth-stream.ts
|
7334
|
-
var
|
7386
|
+
var import_provider_utils20 = require("@ai-sdk/provider-utils");
|
7335
7387
|
var import_provider26 = require("@ai-sdk/provider");
|
7336
7388
|
var CHUNKING_REGEXPS = {
|
7337
7389
|
word: /\S+\s+/m,
|
@@ -7340,7 +7392,7 @@ var CHUNKING_REGEXPS = {
|
|
7340
7392
|
function smoothStream({
|
7341
7393
|
delayInMs = 10,
|
7342
7394
|
chunking = "word",
|
7343
|
-
_internal: { delay: delay2 =
|
7395
|
+
_internal: { delay: delay2 = import_provider_utils20.delay } = {}
|
7344
7396
|
} = {}) {
|
7345
7397
|
let detectChunk;
|
7346
7398
|
if (typeof chunking === "function") {
|
@@ -7924,10 +7976,10 @@ var DefaultProviderRegistry = class {
|
|
7924
7976
|
};
|
7925
7977
|
|
7926
7978
|
// src/tool/mcp/mcp-client.ts
|
7927
|
-
var
|
7979
|
+
var import_provider_utils22 = require("@ai-sdk/provider-utils");
|
7928
7980
|
|
7929
7981
|
// src/tool/mcp/mcp-sse-transport.ts
|
7930
|
-
var
|
7982
|
+
var import_provider_utils21 = require("@ai-sdk/provider-utils");
|
7931
7983
|
|
7932
7984
|
// src/tool/mcp/json-rpc-message.ts
|
7933
7985
|
var import_v49 = require("zod/v4");
|
@@ -8099,7 +8151,7 @@ var SseMCPTransport = class {
|
|
8099
8151
|
(_b = this.onerror) == null ? void 0 : _b.call(this, error);
|
8100
8152
|
return reject(error);
|
8101
8153
|
}
|
8102
|
-
const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough(new
|
8154
|
+
const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough(new import_provider_utils21.EventSourceParserStream());
|
8103
8155
|
const reader = stream.getReader();
|
8104
8156
|
const processEvents = async () => {
|
8105
8157
|
var _a17, _b2, _c2;
|
@@ -8429,15 +8481,15 @@ var DefaultMCPClient = class {
|
|
8429
8481
|
(_a17 = options == null ? void 0 : options.abortSignal) == null ? void 0 : _a17.throwIfAborted();
|
8430
8482
|
return self.callTool({ name: name16, args, options });
|
8431
8483
|
};
|
8432
|
-
const toolWithExecute = schemas === "automatic" ? (0,
|
8484
|
+
const toolWithExecute = schemas === "automatic" ? (0, import_provider_utils22.dynamicTool)({
|
8433
8485
|
description,
|
8434
|
-
inputSchema: (0,
|
8486
|
+
inputSchema: (0, import_provider_utils22.jsonSchema)({
|
8435
8487
|
...inputSchema,
|
8436
8488
|
properties: (_a16 = inputSchema.properties) != null ? _a16 : {},
|
8437
8489
|
additionalProperties: false
|
8438
8490
|
}),
|
8439
8491
|
execute
|
8440
|
-
}) : (0,
|
8492
|
+
}) : (0, import_provider_utils22.tool)({
|
8441
8493
|
description,
|
8442
8494
|
inputSchema: schemas[name16].inputSchema,
|
8443
8495
|
execute
|
@@ -8514,7 +8566,10 @@ async function transcribe({
|
|
8514
8566
|
modelId: model.modelId
|
8515
8567
|
});
|
8516
8568
|
}
|
8517
|
-
const { retry } = prepareRetries({
|
8569
|
+
const { retry } = prepareRetries({
|
8570
|
+
maxRetries: maxRetriesArg,
|
8571
|
+
abortSignal
|
8572
|
+
});
|
8518
8573
|
const audioData = audio instanceof URL ? (await download({ url: audio })).data : convertDataContentToUint8Array(audio);
|
8519
8574
|
const result = await retry(
|
8520
8575
|
() => {
|
@@ -8558,7 +8613,7 @@ var DefaultTranscriptionResult = class {
|
|
8558
8613
|
};
|
8559
8614
|
|
8560
8615
|
// src/ui/call-completion-api.ts
|
8561
|
-
var
|
8616
|
+
var import_provider_utils23 = require("@ai-sdk/provider-utils");
|
8562
8617
|
|
8563
8618
|
// src/ui/process-text-stream.ts
|
8564
8619
|
async function processTextStream({
|
@@ -8636,7 +8691,7 @@ async function callCompletionApi({
|
|
8636
8691
|
}
|
8637
8692
|
case "data": {
|
8638
8693
|
await consumeStream({
|
8639
|
-
stream: (0,
|
8694
|
+
stream: (0, import_provider_utils23.parseJsonEventStream)({
|
8640
8695
|
stream: response.body,
|
8641
8696
|
schema: uiMessageChunkSchema
|
8642
8697
|
}).pipeThrough(
|
@@ -8688,7 +8743,7 @@ async function callCompletionApi({
|
|
8688
8743
|
}
|
8689
8744
|
|
8690
8745
|
// src/ui/chat.ts
|
8691
|
-
var
|
8746
|
+
var import_provider_utils26 = require("@ai-sdk/provider-utils");
|
8692
8747
|
|
8693
8748
|
// src/ui/convert-file-list-to-file-ui-parts.ts
|
8694
8749
|
async function convertFileListToFileUIParts(files) {
|
@@ -8721,10 +8776,10 @@ async function convertFileListToFileUIParts(files) {
|
|
8721
8776
|
}
|
8722
8777
|
|
8723
8778
|
// src/ui/default-chat-transport.ts
|
8724
|
-
var
|
8779
|
+
var import_provider_utils25 = require("@ai-sdk/provider-utils");
|
8725
8780
|
|
8726
8781
|
// src/ui/http-chat-transport.ts
|
8727
|
-
var
|
8782
|
+
var import_provider_utils24 = require("@ai-sdk/provider-utils");
|
8728
8783
|
var HttpChatTransport = class {
|
8729
8784
|
constructor({
|
8730
8785
|
api = "/api/chat",
|
@@ -8748,9 +8803,9 @@ var HttpChatTransport = class {
|
|
8748
8803
|
...options
|
8749
8804
|
}) {
|
8750
8805
|
var _a16, _b, _c, _d, _e;
|
8751
|
-
const resolvedBody = await (0,
|
8752
|
-
const resolvedHeaders = await (0,
|
8753
|
-
const resolvedCredentials = await (0,
|
8806
|
+
const resolvedBody = await (0, import_provider_utils24.resolve)(this.body);
|
8807
|
+
const resolvedHeaders = await (0, import_provider_utils24.resolve)(this.headers);
|
8808
|
+
const resolvedCredentials = await (0, import_provider_utils24.resolve)(this.credentials);
|
8754
8809
|
const preparedRequest = await ((_a16 = this.prepareSendMessagesRequest) == null ? void 0 : _a16.call(this, {
|
8755
8810
|
api: this.api,
|
8756
8811
|
id: options.chatId,
|
@@ -8796,9 +8851,9 @@ var HttpChatTransport = class {
|
|
8796
8851
|
}
|
8797
8852
|
async reconnectToStream(options) {
|
8798
8853
|
var _a16, _b, _c, _d, _e;
|
8799
|
-
const resolvedBody = await (0,
|
8800
|
-
const resolvedHeaders = await (0,
|
8801
|
-
const resolvedCredentials = await (0,
|
8854
|
+
const resolvedBody = await (0, import_provider_utils24.resolve)(this.body);
|
8855
|
+
const resolvedHeaders = await (0, import_provider_utils24.resolve)(this.headers);
|
8856
|
+
const resolvedCredentials = await (0, import_provider_utils24.resolve)(this.credentials);
|
8802
8857
|
const preparedRequest = await ((_a16 = this.prepareReconnectToStreamRequest) == null ? void 0 : _a16.call(this, {
|
8803
8858
|
api: this.api,
|
8804
8859
|
id: options.chatId,
|
@@ -8837,7 +8892,7 @@ var DefaultChatTransport = class extends HttpChatTransport {
|
|
8837
8892
|
super(options);
|
8838
8893
|
}
|
8839
8894
|
processResponseStream(stream) {
|
8840
|
-
return (0,
|
8895
|
+
return (0, import_provider_utils25.parseJsonEventStream)({
|
8841
8896
|
stream,
|
8842
8897
|
schema: uiMessageChunkSchema
|
8843
8898
|
}).pipeThrough(
|
@@ -8856,7 +8911,7 @@ var DefaultChatTransport = class extends HttpChatTransport {
|
|
8856
8911
|
// src/ui/chat.ts
|
8857
8912
|
var AbstractChat = class {
|
8858
8913
|
constructor({
|
8859
|
-
generateId: generateId3 =
|
8914
|
+
generateId: generateId3 = import_provider_utils26.generateId,
|
8860
8915
|
id = generateId3(),
|
8861
8916
|
transport = new DefaultChatTransport(),
|
8862
8917
|
messageMetadataSchema,
|
@@ -8960,6 +9015,15 @@ var AbstractChat = class {
|
|
8960
9015
|
this.resumeStream = async (options = {}) => {
|
8961
9016
|
await this.makeRequest({ trigger: "resume-stream", ...options });
|
8962
9017
|
};
|
9018
|
+
/**
|
9019
|
+
* Clear the error state and set the status to ready if the chat is in an error state.
|
9020
|
+
*/
|
9021
|
+
this.clearError = () => {
|
9022
|
+
if (this.status === "error") {
|
9023
|
+
this.state.error = void 0;
|
9024
|
+
this.setStatus({ status: "ready" });
|
9025
|
+
}
|
9026
|
+
};
|
8963
9027
|
this.addToolResult = async ({
|
8964
9028
|
tool: tool3,
|
8965
9029
|
toolCallId,
|
@@ -9395,13 +9459,13 @@ var TextStreamChatTransport = class extends HttpChatTransport {
|
|
9395
9459
|
};
|
9396
9460
|
|
9397
9461
|
// src/ui-message-stream/create-ui-message-stream.ts
|
9398
|
-
var
|
9462
|
+
var import_provider_utils27 = require("@ai-sdk/provider-utils");
|
9399
9463
|
function createUIMessageStream({
|
9400
9464
|
execute,
|
9401
|
-
onError =
|
9465
|
+
onError = import_provider_utils27.getErrorMessage,
|
9402
9466
|
originalMessages,
|
9403
9467
|
onFinish,
|
9404
|
-
generateId: generateId3 =
|
9468
|
+
generateId: generateId3 = import_provider_utils27.generateId
|
9405
9469
|
}) {
|
9406
9470
|
let controller;
|
9407
9471
|
const ongoingStreamPromises = [];
|