ai 5.0.0-canary.20 → 5.0.0-canary.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +51 -0
- package/dist/index.d.mts +467 -512
- package/dist/index.d.ts +467 -512
- package/dist/index.js +989 -1411
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +947 -1368
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +7 -13
- package/dist/internal/index.d.ts +7 -13
- package/dist/internal/index.js +126 -126
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +119 -119
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +2 -2
package/dist/index.js
CHANGED
@@ -50,8 +50,7 @@ __export(src_exports, {
|
|
50
50
|
TypeValidationError: () => import_provider16.TypeValidationError,
|
51
51
|
UnsupportedFunctionalityError: () => import_provider16.UnsupportedFunctionalityError,
|
52
52
|
appendClientMessage: () => appendClientMessage,
|
53
|
-
|
54
|
-
asSchema: () => import_provider_utils24.asSchema,
|
53
|
+
asSchema: () => import_provider_utils25.asSchema,
|
55
54
|
assistantModelMessageSchema: () => assistantModelMessageSchema,
|
56
55
|
callChatApi: () => callChatApi,
|
57
56
|
callCompletionApi: () => callCompletionApi,
|
@@ -66,7 +65,7 @@ __export(src_exports, {
|
|
66
65
|
cosineSimilarity: () => cosineSimilarity,
|
67
66
|
createDataStream: () => createDataStream,
|
68
67
|
createDataStreamResponse: () => createDataStreamResponse,
|
69
|
-
createIdGenerator: () =>
|
68
|
+
createIdGenerator: () => import_provider_utils25.createIdGenerator,
|
70
69
|
createProviderRegistry: () => createProviderRegistry,
|
71
70
|
createTextStreamResponse: () => createTextStreamResponse,
|
72
71
|
customProvider: () => customProvider,
|
@@ -81,19 +80,18 @@ __export(src_exports, {
|
|
81
80
|
experimental_transcribe: () => transcribe,
|
82
81
|
extractMaxToolInvocationStep: () => extractMaxToolInvocationStep,
|
83
82
|
extractReasoningMiddleware: () => extractReasoningMiddleware,
|
84
|
-
generateId: () =>
|
83
|
+
generateId: () => import_provider_utils25.generateId,
|
85
84
|
generateObject: () => generateObject,
|
86
85
|
generateText: () => generateText,
|
87
86
|
getTextFromDataUrl: () => getTextFromDataUrl,
|
88
87
|
getToolInvocations: () => getToolInvocations,
|
89
88
|
isAssistantMessageWithCompletedToolCalls: () => isAssistantMessageWithCompletedToolCalls,
|
90
89
|
isDeepEqualData: () => isDeepEqualData,
|
91
|
-
jsonSchema: () =>
|
90
|
+
jsonSchema: () => import_provider_utils25.jsonSchema,
|
92
91
|
modelMessageSchema: () => modelMessageSchema,
|
93
92
|
parsePartialJson: () => parsePartialJson,
|
94
93
|
pipeDataStreamToResponse: () => pipeDataStreamToResponse,
|
95
94
|
pipeTextStreamToResponse: () => pipeTextStreamToResponse,
|
96
|
-
processDataStream: () => processDataStream,
|
97
95
|
processTextStream: () => processTextStream,
|
98
96
|
shouldResubmitMessages: () => shouldResubmitMessages,
|
99
97
|
simulateReadableStream: () => simulateReadableStream,
|
@@ -109,7 +107,7 @@ __export(src_exports, {
|
|
109
107
|
wrapLanguageModel: () => wrapLanguageModel
|
110
108
|
});
|
111
109
|
module.exports = __toCommonJS(src_exports);
|
112
|
-
var
|
110
|
+
var import_provider_utils25 = require("@ai-sdk/provider-utils");
|
113
111
|
|
114
112
|
// src/data-stream/create-data-stream.ts
|
115
113
|
function createDataStream({
|
@@ -277,237 +275,6 @@ function pipeDataStreamToResponse({
|
|
277
275
|
});
|
278
276
|
}
|
279
277
|
|
280
|
-
// src/data-stream/process-data-stream.ts
|
281
|
-
var import_provider_utils = require("@ai-sdk/provider-utils");
|
282
|
-
|
283
|
-
// src/util/async-iterable-stream.ts
|
284
|
-
function createAsyncIterableStream(source) {
|
285
|
-
const stream = source.pipeThrough(new TransformStream());
|
286
|
-
stream[Symbol.asyncIterator] = () => {
|
287
|
-
const reader = stream.getReader();
|
288
|
-
return {
|
289
|
-
async next() {
|
290
|
-
const { done, value } = await reader.read();
|
291
|
-
return done ? { done: true, value: void 0 } : { done: false, value };
|
292
|
-
}
|
293
|
-
};
|
294
|
-
};
|
295
|
-
return stream;
|
296
|
-
}
|
297
|
-
|
298
|
-
// src/data-stream/data-stream-parts.ts
|
299
|
-
var import_zod = require("zod");
|
300
|
-
var languageModelUsageSchema = import_zod.z.object({
|
301
|
-
inputTokens: import_zod.z.number().optional(),
|
302
|
-
outputTokens: import_zod.z.number().optional(),
|
303
|
-
totalTokens: import_zod.z.number().optional(),
|
304
|
-
reasoningTokens: import_zod.z.number().optional(),
|
305
|
-
cachedInputTokens: import_zod.z.number().optional()
|
306
|
-
});
|
307
|
-
var finishReasonSchema = import_zod.z.enum([
|
308
|
-
"stop",
|
309
|
-
"length",
|
310
|
-
"tool-calls",
|
311
|
-
"content-filter",
|
312
|
-
"other",
|
313
|
-
"error",
|
314
|
-
"unknown"
|
315
|
-
]);
|
316
|
-
var toolCallSchema = import_zod.z.object({
|
317
|
-
toolCallId: import_zod.z.string(),
|
318
|
-
toolName: import_zod.z.string(),
|
319
|
-
args: import_zod.z.unknown()
|
320
|
-
});
|
321
|
-
var toolResultValueSchema = import_zod.z.object({
|
322
|
-
toolCallId: import_zod.z.string(),
|
323
|
-
result: import_zod.z.unknown(),
|
324
|
-
providerMetadata: import_zod.z.any().optional()
|
325
|
-
});
|
326
|
-
var sourceSchema = import_zod.z.object({
|
327
|
-
type: import_zod.z.literal("source"),
|
328
|
-
sourceType: import_zod.z.literal("url"),
|
329
|
-
id: import_zod.z.string(),
|
330
|
-
url: import_zod.z.string(),
|
331
|
-
title: import_zod.z.string().optional(),
|
332
|
-
providerMetadata: import_zod.z.any().optional()
|
333
|
-
// Use z.any() for generic metadata
|
334
|
-
});
|
335
|
-
var dataStreamPartSchema = import_zod.z.discriminatedUnion("type", [
|
336
|
-
import_zod.z.object({
|
337
|
-
type: import_zod.z.literal("text"),
|
338
|
-
value: import_zod.z.string()
|
339
|
-
}),
|
340
|
-
import_zod.z.object({
|
341
|
-
type: import_zod.z.literal("data"),
|
342
|
-
value: import_zod.z.array(import_zod.z.any())
|
343
|
-
// TODO json validation
|
344
|
-
}),
|
345
|
-
import_zod.z.object({
|
346
|
-
type: import_zod.z.literal("error"),
|
347
|
-
value: import_zod.z.string()
|
348
|
-
}),
|
349
|
-
import_zod.z.object({
|
350
|
-
type: import_zod.z.literal("message-annotations"),
|
351
|
-
value: import_zod.z.array(import_zod.z.any())
|
352
|
-
// TODO json validation
|
353
|
-
}),
|
354
|
-
import_zod.z.object({
|
355
|
-
type: import_zod.z.literal("tool-call"),
|
356
|
-
value: toolCallSchema
|
357
|
-
}),
|
358
|
-
import_zod.z.object({
|
359
|
-
type: import_zod.z.literal("tool-result"),
|
360
|
-
value: toolResultValueSchema
|
361
|
-
}),
|
362
|
-
import_zod.z.object({
|
363
|
-
type: import_zod.z.literal("tool-call-streaming-start"),
|
364
|
-
value: import_zod.z.object({ toolCallId: import_zod.z.string(), toolName: import_zod.z.string() })
|
365
|
-
}),
|
366
|
-
import_zod.z.object({
|
367
|
-
type: import_zod.z.literal("tool-call-delta"),
|
368
|
-
value: import_zod.z.object({ toolCallId: import_zod.z.string(), argsTextDelta: import_zod.z.string() })
|
369
|
-
}),
|
370
|
-
import_zod.z.object({
|
371
|
-
type: import_zod.z.literal("finish-message"),
|
372
|
-
value: import_zod.z.object({
|
373
|
-
finishReason: finishReasonSchema,
|
374
|
-
// TODO v5 remove usage from finish event (only on step-finish)
|
375
|
-
usage: languageModelUsageSchema.optional()
|
376
|
-
})
|
377
|
-
}),
|
378
|
-
import_zod.z.object({
|
379
|
-
type: import_zod.z.literal("finish-step"),
|
380
|
-
value: import_zod.z.object({
|
381
|
-
isContinued: import_zod.z.boolean(),
|
382
|
-
finishReason: finishReasonSchema,
|
383
|
-
usage: languageModelUsageSchema.optional()
|
384
|
-
})
|
385
|
-
}),
|
386
|
-
import_zod.z.object({
|
387
|
-
type: import_zod.z.literal("start-step"),
|
388
|
-
value: import_zod.z.object({
|
389
|
-
messageId: import_zod.z.string()
|
390
|
-
})
|
391
|
-
}),
|
392
|
-
import_zod.z.object({
|
393
|
-
type: import_zod.z.literal("reasoning"),
|
394
|
-
value: import_zod.z.object({
|
395
|
-
text: import_zod.z.string(),
|
396
|
-
providerMetadata: import_zod.z.record(import_zod.z.any()).optional()
|
397
|
-
})
|
398
|
-
}),
|
399
|
-
import_zod.z.object({
|
400
|
-
type: import_zod.z.literal("source"),
|
401
|
-
value: sourceSchema
|
402
|
-
}),
|
403
|
-
import_zod.z.object({
|
404
|
-
type: import_zod.z.literal("file"),
|
405
|
-
value: import_zod.z.object({
|
406
|
-
url: import_zod.z.string(),
|
407
|
-
mediaType: import_zod.z.string()
|
408
|
-
})
|
409
|
-
}),
|
410
|
-
import_zod.z.object({
|
411
|
-
type: import_zod.z.literal("reasoning-part-finish"),
|
412
|
-
value: import_zod.z.null()
|
413
|
-
})
|
414
|
-
]);
|
415
|
-
|
416
|
-
// src/data-stream/process-data-stream.ts
|
417
|
-
async function processDataStream({
|
418
|
-
stream,
|
419
|
-
onTextPart,
|
420
|
-
onReasoningPart,
|
421
|
-
onReasoningPartFinish,
|
422
|
-
onSourcePart,
|
423
|
-
onFilePart,
|
424
|
-
onDataPart,
|
425
|
-
onErrorPart,
|
426
|
-
onToolCallStreamingStartPart,
|
427
|
-
onToolCallDeltaPart,
|
428
|
-
onToolCallPart,
|
429
|
-
onToolResultPart,
|
430
|
-
onMessageAnnotationsPart,
|
431
|
-
onFinishMessagePart,
|
432
|
-
onFinishStepPart,
|
433
|
-
onStartStepPart
|
434
|
-
}) {
|
435
|
-
const streamParts = createAsyncIterableStream(
|
436
|
-
stream.pipeThrough(new TextDecoderStream()).pipeThrough((0, import_provider_utils.createEventSourceParserStream)()).pipeThrough(
|
437
|
-
new TransformStream({
|
438
|
-
async transform({ data }, controller) {
|
439
|
-
if (data === "[DONE]") {
|
440
|
-
return;
|
441
|
-
}
|
442
|
-
controller.enqueue(
|
443
|
-
await (0, import_provider_utils.safeParseJSON)({
|
444
|
-
text: data,
|
445
|
-
schema: dataStreamPartSchema
|
446
|
-
})
|
447
|
-
);
|
448
|
-
}
|
449
|
-
})
|
450
|
-
)
|
451
|
-
);
|
452
|
-
for await (const parseResult of streamParts) {
|
453
|
-
if (!parseResult.success) {
|
454
|
-
throw new Error("Failed to parse data stream part");
|
455
|
-
}
|
456
|
-
const { type, value } = parseResult.value;
|
457
|
-
switch (type) {
|
458
|
-
case "text":
|
459
|
-
await (onTextPart == null ? void 0 : onTextPart(value));
|
460
|
-
break;
|
461
|
-
case "reasoning":
|
462
|
-
await (onReasoningPart == null ? void 0 : onReasoningPart(value));
|
463
|
-
break;
|
464
|
-
case "reasoning-part-finish":
|
465
|
-
await (onReasoningPartFinish == null ? void 0 : onReasoningPartFinish(value));
|
466
|
-
break;
|
467
|
-
case "file":
|
468
|
-
await (onFilePart == null ? void 0 : onFilePart(value));
|
469
|
-
break;
|
470
|
-
case "source":
|
471
|
-
await (onSourcePart == null ? void 0 : onSourcePart(value));
|
472
|
-
break;
|
473
|
-
case "data":
|
474
|
-
await (onDataPart == null ? void 0 : onDataPart(value));
|
475
|
-
break;
|
476
|
-
case "error":
|
477
|
-
await (onErrorPart == null ? void 0 : onErrorPart(value));
|
478
|
-
break;
|
479
|
-
case "message-annotations":
|
480
|
-
await (onMessageAnnotationsPart == null ? void 0 : onMessageAnnotationsPart(value));
|
481
|
-
break;
|
482
|
-
case "tool-call-streaming-start":
|
483
|
-
await (onToolCallStreamingStartPart == null ? void 0 : onToolCallStreamingStartPart(value));
|
484
|
-
break;
|
485
|
-
case "tool-call-delta":
|
486
|
-
await (onToolCallDeltaPart == null ? void 0 : onToolCallDeltaPart(value));
|
487
|
-
break;
|
488
|
-
case "tool-call":
|
489
|
-
await (onToolCallPart == null ? void 0 : onToolCallPart(value));
|
490
|
-
break;
|
491
|
-
case "tool-result":
|
492
|
-
await (onToolResultPart == null ? void 0 : onToolResultPart(value));
|
493
|
-
break;
|
494
|
-
case "finish-message":
|
495
|
-
await (onFinishMessagePart == null ? void 0 : onFinishMessagePart(value));
|
496
|
-
break;
|
497
|
-
case "finish-step":
|
498
|
-
await (onFinishStepPart == null ? void 0 : onFinishStepPart(value));
|
499
|
-
break;
|
500
|
-
case "start-step":
|
501
|
-
await (onStartStepPart == null ? void 0 : onStartStepPart(value));
|
502
|
-
break;
|
503
|
-
default: {
|
504
|
-
const exhaustiveCheck = type;
|
505
|
-
throw new Error(`Unknown stream part type: ${exhaustiveCheck}`);
|
506
|
-
}
|
507
|
-
}
|
508
|
-
}
|
509
|
-
}
|
510
|
-
|
511
278
|
// src/error/index.ts
|
512
279
|
var import_provider16 = require("@ai-sdk/provider");
|
513
280
|
|
@@ -906,267 +673,167 @@ function appendClientMessage({
|
|
906
673
|
];
|
907
674
|
}
|
908
675
|
|
909
|
-
// src/ui/
|
910
|
-
var
|
911
|
-
|
912
|
-
// src/ui/extract-max-tool-invocation-step.ts
|
913
|
-
function extractMaxToolInvocationStep(toolInvocations) {
|
914
|
-
return toolInvocations == null ? void 0 : toolInvocations.reduce((max, toolInvocation) => {
|
915
|
-
var _a17;
|
916
|
-
return Math.max(max, (_a17 = toolInvocation.step) != null ? _a17 : 0);
|
917
|
-
}, 0);
|
918
|
-
}
|
919
|
-
|
920
|
-
// src/ui/get-tool-invocations.ts
|
921
|
-
function getToolInvocations(message) {
|
922
|
-
return message.parts.filter(
|
923
|
-
(part) => part.type === "tool-invocation"
|
924
|
-
).map((part) => part.toolInvocation);
|
925
|
-
}
|
676
|
+
// src/ui/call-chat-api.ts
|
677
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
926
678
|
|
927
|
-
//
|
928
|
-
var
|
929
|
-
var
|
930
|
-
|
679
|
+
// src/data-stream/data-stream-parts.ts
|
680
|
+
var import_zod = require("zod");
|
681
|
+
var toolCallSchema = import_zod.z.object({
|
682
|
+
toolCallId: import_zod.z.string(),
|
683
|
+
toolName: import_zod.z.string(),
|
684
|
+
args: import_zod.z.unknown()
|
685
|
+
});
|
686
|
+
var toolResultValueSchema = import_zod.z.object({
|
687
|
+
toolCallId: import_zod.z.string(),
|
688
|
+
result: import_zod.z.unknown(),
|
689
|
+
providerMetadata: import_zod.z.any().optional()
|
690
|
+
});
|
691
|
+
var sourceSchema = import_zod.z.object({
|
692
|
+
type: import_zod.z.literal("source"),
|
693
|
+
sourceType: import_zod.z.literal("url"),
|
694
|
+
id: import_zod.z.string(),
|
695
|
+
url: import_zod.z.string(),
|
696
|
+
title: import_zod.z.string().optional(),
|
697
|
+
providerMetadata: import_zod.z.any().optional()
|
698
|
+
// Use z.any() for generic metadata
|
699
|
+
});
|
700
|
+
var dataStreamPartSchema = import_zod.z.discriminatedUnion("type", [
|
701
|
+
import_zod.z.object({
|
702
|
+
type: import_zod.z.literal("text"),
|
703
|
+
value: import_zod.z.string()
|
704
|
+
}),
|
705
|
+
import_zod.z.object({
|
706
|
+
type: import_zod.z.literal("error"),
|
707
|
+
value: import_zod.z.string()
|
708
|
+
}),
|
709
|
+
import_zod.z.object({
|
710
|
+
type: import_zod.z.literal("tool-call"),
|
711
|
+
value: toolCallSchema
|
712
|
+
}),
|
713
|
+
import_zod.z.object({
|
714
|
+
type: import_zod.z.literal("tool-result"),
|
715
|
+
value: toolResultValueSchema
|
716
|
+
}),
|
717
|
+
import_zod.z.object({
|
718
|
+
type: import_zod.z.literal("tool-call-streaming-start"),
|
719
|
+
value: import_zod.z.object({ toolCallId: import_zod.z.string(), toolName: import_zod.z.string() })
|
720
|
+
}),
|
721
|
+
import_zod.z.object({
|
722
|
+
type: import_zod.z.literal("tool-call-delta"),
|
723
|
+
value: import_zod.z.object({ toolCallId: import_zod.z.string(), argsTextDelta: import_zod.z.string() })
|
724
|
+
}),
|
725
|
+
import_zod.z.object({
|
726
|
+
type: import_zod.z.literal("reasoning"),
|
727
|
+
value: import_zod.z.object({
|
728
|
+
text: import_zod.z.string(),
|
729
|
+
providerMetadata: import_zod.z.record(import_zod.z.any()).optional()
|
730
|
+
})
|
731
|
+
}),
|
732
|
+
import_zod.z.object({
|
733
|
+
type: import_zod.z.literal("source"),
|
734
|
+
value: sourceSchema
|
735
|
+
}),
|
736
|
+
import_zod.z.object({
|
737
|
+
type: import_zod.z.literal("file"),
|
738
|
+
value: import_zod.z.object({
|
739
|
+
url: import_zod.z.string(),
|
740
|
+
mediaType: import_zod.z.string()
|
741
|
+
})
|
742
|
+
}),
|
743
|
+
import_zod.z.object({
|
744
|
+
type: import_zod.z.literal("metadata"),
|
745
|
+
value: import_zod.z.object({
|
746
|
+
metadata: import_zod.z.unknown()
|
747
|
+
})
|
748
|
+
}),
|
749
|
+
import_zod.z.object({
|
750
|
+
type: import_zod.z.literal("start-step"),
|
751
|
+
value: import_zod.z.object({
|
752
|
+
metadata: import_zod.z.unknown()
|
753
|
+
})
|
754
|
+
}),
|
755
|
+
import_zod.z.object({
|
756
|
+
type: import_zod.z.literal("finish-step"),
|
757
|
+
value: import_zod.z.object({
|
758
|
+
metadata: import_zod.z.unknown()
|
759
|
+
})
|
760
|
+
}),
|
761
|
+
import_zod.z.object({
|
762
|
+
type: import_zod.z.literal("start"),
|
763
|
+
value: import_zod.z.object({
|
764
|
+
messageId: import_zod.z.string().optional(),
|
765
|
+
metadata: import_zod.z.unknown()
|
766
|
+
})
|
767
|
+
}),
|
768
|
+
import_zod.z.object({
|
769
|
+
type: import_zod.z.literal("finish"),
|
770
|
+
value: import_zod.z.object({
|
771
|
+
metadata: import_zod.z.unknown()
|
772
|
+
})
|
773
|
+
}),
|
774
|
+
import_zod.z.object({
|
775
|
+
type: import_zod.z.literal("reasoning-part-finish"),
|
776
|
+
value: import_zod.z.null()
|
777
|
+
})
|
778
|
+
]);
|
931
779
|
|
932
|
-
//
|
933
|
-
function
|
780
|
+
// src/util/consume-stream.ts
|
781
|
+
async function consumeStream({
|
782
|
+
stream,
|
783
|
+
onError
|
784
|
+
}) {
|
785
|
+
const reader = stream.getReader();
|
934
786
|
try {
|
935
|
-
|
936
|
-
|
937
|
-
|
938
|
-
|
939
|
-
}
|
787
|
+
while (true) {
|
788
|
+
const { done } = await reader.read();
|
789
|
+
if (done)
|
790
|
+
break;
|
791
|
+
}
|
940
792
|
} catch (error) {
|
941
|
-
|
942
|
-
|
943
|
-
|
944
|
-
};
|
793
|
+
onError == null ? void 0 : onError(error);
|
794
|
+
} finally {
|
795
|
+
reader.releaseLock();
|
945
796
|
}
|
946
797
|
}
|
947
798
|
|
948
|
-
//
|
949
|
-
var
|
950
|
-
|
951
|
-
|
952
|
-
|
953
|
-
|
954
|
-
|
955
|
-
(value) => {
|
956
|
-
var _a17, _b;
|
957
|
-
return (_b = (_a17 = globalThis.Buffer) == null ? void 0 : _a17.isBuffer(value)) != null ? _b : false;
|
958
|
-
},
|
959
|
-
{ message: "Must be a Buffer" }
|
960
|
-
)
|
961
|
-
]);
|
962
|
-
function convertToLanguageModelV2DataContent(content) {
|
963
|
-
if (content instanceof Uint8Array) {
|
964
|
-
return { data: content, mediaType: void 0 };
|
965
|
-
}
|
966
|
-
if (content instanceof ArrayBuffer) {
|
967
|
-
return { data: new Uint8Array(content), mediaType: void 0 };
|
968
|
-
}
|
969
|
-
if (typeof content === "string") {
|
970
|
-
try {
|
971
|
-
content = new URL(content);
|
972
|
-
} catch (error) {
|
973
|
-
}
|
974
|
-
}
|
975
|
-
if (content instanceof URL && content.protocol === "data:") {
|
976
|
-
const { mediaType: dataUrlMediaType, base64Content } = splitDataUrl(
|
977
|
-
content.toString()
|
978
|
-
);
|
979
|
-
if (dataUrlMediaType == null || base64Content == null) {
|
980
|
-
throw new import_provider17.AISDKError({
|
981
|
-
name: "InvalidDataContentError",
|
982
|
-
message: `Invalid data URL format in content ${content.toString()}`
|
983
|
-
});
|
984
|
-
}
|
985
|
-
return { data: base64Content, mediaType: dataUrlMediaType };
|
986
|
-
}
|
987
|
-
return { data: content, mediaType: void 0 };
|
988
|
-
}
|
989
|
-
function convertDataContentToBase64String(content) {
|
990
|
-
if (typeof content === "string") {
|
991
|
-
return content;
|
992
|
-
}
|
993
|
-
if (content instanceof ArrayBuffer) {
|
994
|
-
return (0, import_provider_utils2.convertUint8ArrayToBase64)(new Uint8Array(content));
|
995
|
-
}
|
996
|
-
return (0, import_provider_utils2.convertUint8ArrayToBase64)(content);
|
997
|
-
}
|
998
|
-
function convertDataContentToUint8Array(content) {
|
999
|
-
if (content instanceof Uint8Array) {
|
1000
|
-
return content;
|
799
|
+
// src/ui/process-chat-response.ts
|
800
|
+
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
801
|
+
|
802
|
+
// src/util/merge-objects.ts
|
803
|
+
function mergeObjects(base, overrides) {
|
804
|
+
if (base === void 0 && overrides === void 0) {
|
805
|
+
return void 0;
|
1001
806
|
}
|
1002
|
-
if (
|
1003
|
-
|
1004
|
-
return (0, import_provider_utils2.convertBase64ToUint8Array)(content);
|
1005
|
-
} catch (error) {
|
1006
|
-
throw new InvalidDataContentError({
|
1007
|
-
message: "Invalid data content. Content string is not a base64-encoded media.",
|
1008
|
-
content,
|
1009
|
-
cause: error
|
1010
|
-
});
|
1011
|
-
}
|
807
|
+
if (base === void 0) {
|
808
|
+
return overrides;
|
1012
809
|
}
|
1013
|
-
if (
|
1014
|
-
return
|
810
|
+
if (overrides === void 0) {
|
811
|
+
return base;
|
1015
812
|
}
|
1016
|
-
|
1017
|
-
|
1018
|
-
|
1019
|
-
|
1020
|
-
|
1021
|
-
|
1022
|
-
|
1023
|
-
|
1024
|
-
|
1025
|
-
|
1026
|
-
|
1027
|
-
|
1028
|
-
|
1029
|
-
|
1030
|
-
|
1031
|
-
|
1032
|
-
case "assistant": {
|
1033
|
-
let getToolInvocationsForStep2 = function(step) {
|
1034
|
-
return (typeof message.content === "string" ? [] : message.content.filter((part) => part.type === "tool-call")).map((call) => ({
|
1035
|
-
state: "call",
|
1036
|
-
step,
|
1037
|
-
args: call.args,
|
1038
|
-
toolCallId: call.toolCallId,
|
1039
|
-
toolName: call.toolName
|
1040
|
-
}));
|
1041
|
-
};
|
1042
|
-
var getToolInvocationsForStep = getToolInvocationsForStep2;
|
1043
|
-
const parts = [{ type: "step-start" }];
|
1044
|
-
let textContent = "";
|
1045
|
-
let reasoningTextContent = void 0;
|
1046
|
-
if (typeof message.content === "string") {
|
1047
|
-
textContent = message.content;
|
1048
|
-
parts.push({
|
1049
|
-
type: "text",
|
1050
|
-
text: message.content
|
1051
|
-
});
|
1052
|
-
} else {
|
1053
|
-
let reasoningPart = void 0;
|
1054
|
-
for (const part of message.content) {
|
1055
|
-
switch (part.type) {
|
1056
|
-
case "text": {
|
1057
|
-
reasoningPart = void 0;
|
1058
|
-
textContent += part.text;
|
1059
|
-
parts.push({
|
1060
|
-
type: "text",
|
1061
|
-
text: part.text
|
1062
|
-
});
|
1063
|
-
break;
|
1064
|
-
}
|
1065
|
-
case "reasoning": {
|
1066
|
-
if (reasoningPart == null) {
|
1067
|
-
reasoningPart = {
|
1068
|
-
type: "reasoning",
|
1069
|
-
text: ""
|
1070
|
-
};
|
1071
|
-
parts.push(reasoningPart);
|
1072
|
-
}
|
1073
|
-
reasoningTextContent = (reasoningTextContent != null ? reasoningTextContent : "") + part.text;
|
1074
|
-
reasoningPart.text += part.text;
|
1075
|
-
reasoningPart.providerMetadata = part.providerOptions;
|
1076
|
-
break;
|
1077
|
-
}
|
1078
|
-
case "tool-call":
|
1079
|
-
break;
|
1080
|
-
case "file":
|
1081
|
-
if (part.data instanceof URL) {
|
1082
|
-
throw new import_provider18.AISDKError({
|
1083
|
-
name: "InvalidAssistantFileData",
|
1084
|
-
message: "File data cannot be a URL"
|
1085
|
-
});
|
1086
|
-
}
|
1087
|
-
parts.push({
|
1088
|
-
type: "file",
|
1089
|
-
mediaType: part.mediaType,
|
1090
|
-
url: `data:${part.mediaType};base64,${convertDataContentToBase64String(part.data)}`
|
1091
|
-
});
|
1092
|
-
break;
|
1093
|
-
}
|
1094
|
-
}
|
1095
|
-
}
|
1096
|
-
if (isLastMessageAssistant) {
|
1097
|
-
const maxStep = extractMaxToolInvocationStep(
|
1098
|
-
getToolInvocations(lastMessage)
|
1099
|
-
);
|
1100
|
-
(_a17 = lastMessage.parts) != null ? _a17 : lastMessage.parts = [];
|
1101
|
-
lastMessage.parts.push(...parts);
|
1102
|
-
getToolInvocationsForStep2(maxStep === void 0 ? 0 : maxStep + 1).map((call) => ({
|
1103
|
-
type: "tool-invocation",
|
1104
|
-
toolInvocation: call
|
1105
|
-
})).forEach((part) => {
|
1106
|
-
lastMessage.parts.push(part);
|
1107
|
-
});
|
1108
|
-
} else {
|
1109
|
-
clonedMessages.push({
|
1110
|
-
role: "assistant",
|
1111
|
-
id: message.id,
|
1112
|
-
createdAt: currentDate(),
|
1113
|
-
// generate a createdAt date for the message, will be overridden by the client
|
1114
|
-
parts: [
|
1115
|
-
...parts,
|
1116
|
-
...getToolInvocationsForStep2(0).map((call) => ({
|
1117
|
-
type: "tool-invocation",
|
1118
|
-
toolInvocation: call
|
1119
|
-
}))
|
1120
|
-
]
|
1121
|
-
});
|
1122
|
-
}
|
1123
|
-
break;
|
1124
|
-
}
|
1125
|
-
case "tool": {
|
1126
|
-
if (lastMessage.role !== "assistant") {
|
1127
|
-
throw new Error(
|
1128
|
-
`Tool result must follow an assistant message: ${lastMessage.role}`
|
1129
|
-
);
|
1130
|
-
}
|
1131
|
-
(_b = lastMessage.parts) != null ? _b : lastMessage.parts = [];
|
1132
|
-
for (const contentPart of message.content) {
|
1133
|
-
const toolCall = getToolInvocations(lastMessage).find(
|
1134
|
-
(call) => call.toolCallId === contentPart.toolCallId
|
1135
|
-
);
|
1136
|
-
const toolCallPart = lastMessage.parts.find(
|
1137
|
-
(part) => part.type === "tool-invocation" && part.toolInvocation.toolCallId === contentPart.toolCallId
|
1138
|
-
);
|
1139
|
-
if (!toolCall) {
|
1140
|
-
throw new Error("Tool call not found in previous message");
|
1141
|
-
}
|
1142
|
-
toolCall.state = "result";
|
1143
|
-
const toolResult = toolCall;
|
1144
|
-
toolResult.result = contentPart.result;
|
1145
|
-
if (toolCallPart) {
|
1146
|
-
toolCallPart.toolInvocation = toolResult;
|
1147
|
-
} else {
|
1148
|
-
lastMessage.parts.push({
|
1149
|
-
type: "tool-invocation",
|
1150
|
-
toolInvocation: toolResult
|
1151
|
-
});
|
1152
|
-
}
|
1153
|
-
}
|
1154
|
-
break;
|
1155
|
-
}
|
1156
|
-
default: {
|
1157
|
-
const _exhaustiveCheck = role;
|
1158
|
-
throw new Error(`Unsupported message role: ${_exhaustiveCheck}`);
|
813
|
+
const result = { ...base };
|
814
|
+
for (const key in overrides) {
|
815
|
+
if (Object.prototype.hasOwnProperty.call(overrides, key)) {
|
816
|
+
const overridesValue = overrides[key];
|
817
|
+
if (overridesValue === void 0)
|
818
|
+
continue;
|
819
|
+
const baseValue = key in base ? base[key] : void 0;
|
820
|
+
const isSourceObject = overridesValue !== null && typeof overridesValue === "object" && !Array.isArray(overridesValue) && !(overridesValue instanceof Date) && !(overridesValue instanceof RegExp);
|
821
|
+
const isTargetObject = baseValue !== null && baseValue !== void 0 && typeof baseValue === "object" && !Array.isArray(baseValue) && !(baseValue instanceof Date) && !(baseValue instanceof RegExp);
|
822
|
+
if (isSourceObject && isTargetObject) {
|
823
|
+
result[key] = mergeObjects(
|
824
|
+
baseValue,
|
825
|
+
overridesValue
|
826
|
+
);
|
827
|
+
} else {
|
828
|
+
result[key] = overridesValue;
|
1159
829
|
}
|
1160
830
|
}
|
1161
831
|
}
|
1162
|
-
return
|
832
|
+
return result;
|
1163
833
|
}
|
1164
834
|
|
1165
|
-
// src/ui/process-chat-response.ts
|
1166
|
-
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
1167
|
-
|
1168
835
|
// src/util/parse-partial-json.ts
|
1169
|
-
var
|
836
|
+
var import_provider_utils = require("@ai-sdk/provider-utils");
|
1170
837
|
|
1171
838
|
// src/util/fix-json.ts
|
1172
839
|
function fixJson(input) {
|
@@ -1491,33 +1158,48 @@ async function parsePartialJson(jsonText) {
|
|
1491
1158
|
if (jsonText === void 0) {
|
1492
1159
|
return { value: void 0, state: "undefined-input" };
|
1493
1160
|
}
|
1494
|
-
let result = await (0,
|
1161
|
+
let result = await (0, import_provider_utils.safeParseJSON)({ text: jsonText });
|
1495
1162
|
if (result.success) {
|
1496
1163
|
return { value: result.value, state: "successful-parse" };
|
1497
1164
|
}
|
1498
|
-
result = await (0,
|
1165
|
+
result = await (0, import_provider_utils.safeParseJSON)({ text: fixJson(jsonText) });
|
1499
1166
|
if (result.success) {
|
1500
1167
|
return { value: result.value, state: "repaired-parse" };
|
1501
1168
|
}
|
1502
1169
|
return { value: void 0, state: "failed-parse" };
|
1503
1170
|
}
|
1504
1171
|
|
1172
|
+
// src/ui/extract-max-tool-invocation-step.ts
|
1173
|
+
function extractMaxToolInvocationStep(toolInvocations) {
|
1174
|
+
return toolInvocations == null ? void 0 : toolInvocations.reduce((max, toolInvocation) => {
|
1175
|
+
var _a17;
|
1176
|
+
return Math.max(max, (_a17 = toolInvocation.step) != null ? _a17 : 0);
|
1177
|
+
}, 0);
|
1178
|
+
}
|
1179
|
+
|
1180
|
+
// src/ui/get-tool-invocations.ts
|
1181
|
+
function getToolInvocations(message) {
|
1182
|
+
return message.parts.filter(
|
1183
|
+
(part) => part.type === "tool-invocation"
|
1184
|
+
).map((part) => part.toolInvocation);
|
1185
|
+
}
|
1186
|
+
|
1505
1187
|
// src/ui/process-chat-response.ts
|
1506
|
-
|
1188
|
+
function processChatResponse({
|
1507
1189
|
stream,
|
1508
|
-
|
1190
|
+
onUpdate,
|
1509
1191
|
onToolCall,
|
1510
1192
|
onFinish,
|
1511
|
-
|
1512
|
-
|
1513
|
-
|
1193
|
+
lastMessage,
|
1194
|
+
newMessageId,
|
1195
|
+
messageMetadataSchema
|
1514
1196
|
}) {
|
1515
1197
|
var _a17;
|
1516
|
-
const
|
1517
|
-
let step =
|
1518
|
-
const message =
|
1519
|
-
id:
|
1520
|
-
|
1198
|
+
const isContinuation = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
|
1199
|
+
let step = isContinuation ? 1 + ((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) : 0;
|
1200
|
+
const message = isContinuation ? structuredClone(lastMessage) : {
|
1201
|
+
id: newMessageId,
|
1202
|
+
metadata: {},
|
1521
1203
|
role: "assistant",
|
1522
1204
|
parts: []
|
1523
1205
|
};
|
@@ -1536,198 +1218,214 @@ async function processChatResponse({
|
|
1536
1218
|
});
|
1537
1219
|
}
|
1538
1220
|
}
|
1539
|
-
const data = [];
|
1540
|
-
let messageAnnotations = replaceLastMessage ? lastMessage == null ? void 0 : lastMessage.annotations : void 0;
|
1541
1221
|
const partialToolCalls = {};
|
1542
|
-
|
1543
|
-
|
1544
|
-
|
1545
|
-
|
1546
|
-
|
1547
|
-
|
1548
|
-
|
1549
|
-
const copiedData = [...data];
|
1550
|
-
if (messageAnnotations == null ? void 0 : messageAnnotations.length) {
|
1551
|
-
message.annotations = messageAnnotations;
|
1552
|
-
}
|
1553
|
-
const copiedMessage = {
|
1554
|
-
// deep copy the message to ensure that deep changes (msg attachments) are updated
|
1555
|
-
// with SolidJS. SolidJS uses referential integration of sub-objects to detect changes.
|
1556
|
-
...structuredClone(message),
|
1557
|
-
// add a revision id to ensure that the message is updated with SWR. SWR uses a
|
1558
|
-
// hashing approach by default to detect changes, but it only works for shallow
|
1559
|
-
// changes. This is why we need to add a revision id to ensure that the message
|
1560
|
-
// is updated with SWR (without it, the changes get stuck in SWR and are not
|
1561
|
-
// forwarded to rendering):
|
1562
|
-
revisionId: generateId3()
|
1563
|
-
};
|
1564
|
-
update({
|
1565
|
-
message: copiedMessage,
|
1566
|
-
data: copiedData,
|
1567
|
-
replaceLastMessage
|
1568
|
-
});
|
1569
|
-
}
|
1570
|
-
await processDataStream({
|
1571
|
-
stream,
|
1572
|
-
onTextPart(value) {
|
1573
|
-
if (currentTextPart == null) {
|
1574
|
-
currentTextPart = {
|
1575
|
-
type: "text",
|
1576
|
-
text: value
|
1577
|
-
};
|
1578
|
-
message.parts.push(currentTextPart);
|
1579
|
-
} else {
|
1580
|
-
currentTextPart.text += value;
|
1581
|
-
}
|
1582
|
-
execUpdate();
|
1583
|
-
},
|
1584
|
-
onReasoningPart(value) {
|
1585
|
-
if (currentReasoningPart == null) {
|
1586
|
-
currentReasoningPart = {
|
1587
|
-
type: "reasoning",
|
1588
|
-
text: value.text,
|
1589
|
-
providerMetadata: value.providerMetadata
|
1590
|
-
};
|
1591
|
-
message.parts.push(currentReasoningPart);
|
1592
|
-
} else {
|
1593
|
-
currentReasoningPart.text += value.text;
|
1594
|
-
currentReasoningPart.providerMetadata = value.providerMetadata;
|
1595
|
-
}
|
1596
|
-
execUpdate();
|
1597
|
-
},
|
1598
|
-
onReasoningPartFinish(value) {
|
1599
|
-
if (currentReasoningPart != null) {
|
1600
|
-
currentReasoningPart = void 0;
|
1601
|
-
}
|
1602
|
-
},
|
1603
|
-
onFilePart(value) {
|
1604
|
-
message.parts.push({
|
1605
|
-
type: "file",
|
1606
|
-
mediaType: value.mediaType,
|
1607
|
-
url: value.url
|
1608
|
-
});
|
1609
|
-
execUpdate();
|
1610
|
-
},
|
1611
|
-
onSourcePart(value) {
|
1612
|
-
message.parts.push({
|
1613
|
-
type: "source",
|
1614
|
-
source: value
|
1615
|
-
});
|
1616
|
-
execUpdate();
|
1617
|
-
},
|
1618
|
-
onToolCallStreamingStartPart(value) {
|
1619
|
-
const toolInvocations = getToolInvocations(message);
|
1620
|
-
partialToolCalls[value.toolCallId] = {
|
1621
|
-
text: "",
|
1622
|
-
step,
|
1623
|
-
toolName: value.toolName,
|
1624
|
-
index: toolInvocations.length
|
1625
|
-
};
|
1626
|
-
updateToolInvocationPart(value.toolCallId, {
|
1627
|
-
state: "partial-call",
|
1628
|
-
step,
|
1629
|
-
toolCallId: value.toolCallId,
|
1630
|
-
toolName: value.toolName,
|
1631
|
-
args: void 0
|
1632
|
-
});
|
1633
|
-
execUpdate();
|
1634
|
-
},
|
1635
|
-
async onToolCallDeltaPart(value) {
|
1636
|
-
const partialToolCall = partialToolCalls[value.toolCallId];
|
1637
|
-
partialToolCall.text += value.argsTextDelta;
|
1638
|
-
const { value: partialArgs } = await parsePartialJson(
|
1639
|
-
partialToolCall.text
|
1640
|
-
);
|
1641
|
-
updateToolInvocationPart(value.toolCallId, {
|
1642
|
-
state: "partial-call",
|
1643
|
-
step: partialToolCall.step,
|
1644
|
-
toolCallId: value.toolCallId,
|
1645
|
-
toolName: partialToolCall.toolName,
|
1646
|
-
args: partialArgs
|
1647
|
-
});
|
1648
|
-
execUpdate();
|
1649
|
-
},
|
1650
|
-
async onToolCallPart(value) {
|
1651
|
-
updateToolInvocationPart(value.toolCallId, {
|
1652
|
-
state: "call",
|
1653
|
-
step,
|
1654
|
-
...value
|
1655
|
-
});
|
1656
|
-
execUpdate();
|
1657
|
-
if (onToolCall) {
|
1658
|
-
const result = await onToolCall({
|
1659
|
-
toolCall: value
|
1222
|
+
async function updateMessageMetadata(metadata) {
|
1223
|
+
if (metadata != null) {
|
1224
|
+
const mergedMetadata = message.metadata != null ? mergeObjects(message.metadata, metadata) : metadata;
|
1225
|
+
if (messageMetadataSchema != null) {
|
1226
|
+
await (0, import_provider_utils2.validateTypes)({
|
1227
|
+
value: mergedMetadata,
|
1228
|
+
schema: messageMetadataSchema
|
1660
1229
|
});
|
1661
|
-
|
1662
|
-
|
1663
|
-
|
1664
|
-
|
1665
|
-
|
1666
|
-
|
1667
|
-
|
1668
|
-
|
1230
|
+
}
|
1231
|
+
message.metadata = mergedMetadata;
|
1232
|
+
}
|
1233
|
+
}
|
1234
|
+
return stream.pipeThrough(
|
1235
|
+
new TransformStream({
|
1236
|
+
async transform(chunk, controller) {
|
1237
|
+
const { type, value } = chunk;
|
1238
|
+
switch (type) {
|
1239
|
+
case "text": {
|
1240
|
+
if (currentTextPart == null) {
|
1241
|
+
currentTextPart = {
|
1242
|
+
type: "text",
|
1243
|
+
text: value
|
1244
|
+
};
|
1245
|
+
message.parts.push(currentTextPart);
|
1246
|
+
} else {
|
1247
|
+
currentTextPart.text += value;
|
1248
|
+
}
|
1249
|
+
onUpdate == null ? void 0 : onUpdate({ message });
|
1250
|
+
break;
|
1251
|
+
}
|
1252
|
+
case "reasoning": {
|
1253
|
+
if (currentReasoningPart == null) {
|
1254
|
+
currentReasoningPart = {
|
1255
|
+
type: "reasoning",
|
1256
|
+
text: value.text,
|
1257
|
+
providerMetadata: value.providerMetadata
|
1258
|
+
};
|
1259
|
+
message.parts.push(currentReasoningPart);
|
1260
|
+
} else {
|
1261
|
+
currentReasoningPart.text += value.text;
|
1262
|
+
currentReasoningPart.providerMetadata = value.providerMetadata;
|
1263
|
+
}
|
1264
|
+
onUpdate == null ? void 0 : onUpdate({ message });
|
1265
|
+
break;
|
1266
|
+
}
|
1267
|
+
case "reasoning-part-finish": {
|
1268
|
+
if (currentReasoningPart != null) {
|
1269
|
+
currentReasoningPart = void 0;
|
1270
|
+
}
|
1271
|
+
break;
|
1272
|
+
}
|
1273
|
+
case "file": {
|
1274
|
+
message.parts.push({
|
1275
|
+
type: "file",
|
1276
|
+
mediaType: value.mediaType,
|
1277
|
+
url: value.url
|
1278
|
+
});
|
1279
|
+
onUpdate == null ? void 0 : onUpdate({ message });
|
1280
|
+
break;
|
1281
|
+
}
|
1282
|
+
case "source": {
|
1283
|
+
message.parts.push({
|
1284
|
+
type: "source",
|
1285
|
+
source: value
|
1286
|
+
});
|
1287
|
+
onUpdate == null ? void 0 : onUpdate({ message });
|
1288
|
+
break;
|
1289
|
+
}
|
1290
|
+
case "tool-call-streaming-start": {
|
1291
|
+
const toolInvocations = getToolInvocations(message);
|
1292
|
+
partialToolCalls[value.toolCallId] = {
|
1293
|
+
text: "",
|
1294
|
+
step,
|
1295
|
+
toolName: value.toolName,
|
1296
|
+
index: toolInvocations.length
|
1297
|
+
};
|
1298
|
+
updateToolInvocationPart(value.toolCallId, {
|
1299
|
+
state: "partial-call",
|
1300
|
+
step,
|
1301
|
+
toolCallId: value.toolCallId,
|
1302
|
+
toolName: value.toolName,
|
1303
|
+
args: void 0
|
1304
|
+
});
|
1305
|
+
onUpdate == null ? void 0 : onUpdate({ message });
|
1306
|
+
break;
|
1307
|
+
}
|
1308
|
+
case "tool-call-delta": {
|
1309
|
+
const partialToolCall = partialToolCalls[value.toolCallId];
|
1310
|
+
partialToolCall.text += value.argsTextDelta;
|
1311
|
+
const { value: partialArgs } = await parsePartialJson(
|
1312
|
+
partialToolCall.text
|
1313
|
+
);
|
1314
|
+
updateToolInvocationPart(value.toolCallId, {
|
1315
|
+
state: "partial-call",
|
1316
|
+
step: partialToolCall.step,
|
1317
|
+
toolCallId: value.toolCallId,
|
1318
|
+
toolName: partialToolCall.toolName,
|
1319
|
+
args: partialArgs
|
1320
|
+
});
|
1321
|
+
onUpdate == null ? void 0 : onUpdate({ message });
|
1322
|
+
break;
|
1323
|
+
}
|
1324
|
+
case "tool-call": {
|
1325
|
+
const call = { args: value.args, ...value };
|
1326
|
+
updateToolInvocationPart(value.toolCallId, {
|
1327
|
+
state: "call",
|
1328
|
+
step,
|
1329
|
+
...call
|
1330
|
+
});
|
1331
|
+
onUpdate == null ? void 0 : onUpdate({ message });
|
1332
|
+
if (onToolCall) {
|
1333
|
+
const result = await onToolCall({
|
1334
|
+
toolCall: call
|
1335
|
+
});
|
1336
|
+
if (result != null) {
|
1337
|
+
updateToolInvocationPart(value.toolCallId, {
|
1338
|
+
state: "result",
|
1339
|
+
step,
|
1340
|
+
...call,
|
1341
|
+
result
|
1342
|
+
});
|
1343
|
+
onUpdate == null ? void 0 : onUpdate({ message });
|
1344
|
+
}
|
1345
|
+
}
|
1346
|
+
break;
|
1347
|
+
}
|
1348
|
+
case "tool-result": {
|
1349
|
+
const toolInvocations = getToolInvocations(message);
|
1350
|
+
if (toolInvocations == null) {
|
1351
|
+
throw new Error("tool_result must be preceded by a tool_call");
|
1352
|
+
}
|
1353
|
+
const toolInvocationIndex = toolInvocations.findIndex(
|
1354
|
+
(invocation) => invocation.toolCallId === value.toolCallId
|
1355
|
+
);
|
1356
|
+
if (toolInvocationIndex === -1) {
|
1357
|
+
throw new Error(
|
1358
|
+
"tool_result must be preceded by a tool_call with the same toolCallId"
|
1359
|
+
);
|
1360
|
+
}
|
1361
|
+
const result = { result: value.result, ...value };
|
1362
|
+
updateToolInvocationPart(value.toolCallId, {
|
1363
|
+
...toolInvocations[toolInvocationIndex],
|
1364
|
+
state: "result",
|
1365
|
+
...result
|
1366
|
+
});
|
1367
|
+
onUpdate == null ? void 0 : onUpdate({ message });
|
1368
|
+
break;
|
1369
|
+
}
|
1370
|
+
case "start-step": {
|
1371
|
+
message.parts.push({ type: "step-start" });
|
1372
|
+
await updateMessageMetadata(value.metadata);
|
1373
|
+
onUpdate == null ? void 0 : onUpdate({ message });
|
1374
|
+
break;
|
1375
|
+
}
|
1376
|
+
case "finish-step": {
|
1377
|
+
step += 1;
|
1378
|
+
currentTextPart = void 0;
|
1379
|
+
currentReasoningPart = void 0;
|
1380
|
+
await updateMessageMetadata(value.metadata);
|
1381
|
+
if (value.metadata != null) {
|
1382
|
+
onUpdate == null ? void 0 : onUpdate({ message });
|
1383
|
+
}
|
1384
|
+
break;
|
1385
|
+
}
|
1386
|
+
case "start": {
|
1387
|
+
if (value.messageId != null) {
|
1388
|
+
message.id = value.messageId;
|
1389
|
+
}
|
1390
|
+
await updateMessageMetadata(value.metadata);
|
1391
|
+
if (value.messageId != null || value.metadata != null) {
|
1392
|
+
onUpdate == null ? void 0 : onUpdate({ message });
|
1393
|
+
}
|
1394
|
+
break;
|
1395
|
+
}
|
1396
|
+
case "finish": {
|
1397
|
+
await updateMessageMetadata(value.metadata);
|
1398
|
+
if (value.metadata != null) {
|
1399
|
+
onUpdate == null ? void 0 : onUpdate({ message });
|
1400
|
+
}
|
1401
|
+
break;
|
1402
|
+
}
|
1403
|
+
case "metadata": {
|
1404
|
+
await updateMessageMetadata(value.metadata);
|
1405
|
+
if (value.metadata != null) {
|
1406
|
+
onUpdate == null ? void 0 : onUpdate({ message });
|
1407
|
+
}
|
1408
|
+
break;
|
1409
|
+
}
|
1410
|
+
case "error": {
|
1411
|
+
throw new Error(value);
|
1412
|
+
}
|
1413
|
+
default: {
|
1414
|
+
const _exhaustiveCheck = type;
|
1415
|
+
throw new Error(`Unhandled stream part: ${_exhaustiveCheck}`);
|
1416
|
+
}
|
1669
1417
|
}
|
1418
|
+
controller.enqueue(chunk);
|
1419
|
+
},
|
1420
|
+
flush() {
|
1421
|
+
onFinish == null ? void 0 : onFinish({ message });
|
1670
1422
|
}
|
1671
|
-
}
|
1672
|
-
|
1673
|
-
const toolInvocations = getToolInvocations(message);
|
1674
|
-
if (toolInvocations == null) {
|
1675
|
-
throw new Error("tool_result must be preceded by a tool_call");
|
1676
|
-
}
|
1677
|
-
const toolInvocationIndex = toolInvocations.findIndex(
|
1678
|
-
(invocation) => invocation.toolCallId === value.toolCallId
|
1679
|
-
);
|
1680
|
-
if (toolInvocationIndex === -1) {
|
1681
|
-
throw new Error(
|
1682
|
-
"tool_result must be preceded by a tool_call with the same toolCallId"
|
1683
|
-
);
|
1684
|
-
}
|
1685
|
-
updateToolInvocationPart(value.toolCallId, {
|
1686
|
-
...toolInvocations[toolInvocationIndex],
|
1687
|
-
state: "result",
|
1688
|
-
...value
|
1689
|
-
});
|
1690
|
-
execUpdate();
|
1691
|
-
},
|
1692
|
-
onDataPart(value) {
|
1693
|
-
data.push(...value);
|
1694
|
-
execUpdate();
|
1695
|
-
},
|
1696
|
-
onMessageAnnotationsPart(value) {
|
1697
|
-
if (messageAnnotations == null) {
|
1698
|
-
messageAnnotations = [...value];
|
1699
|
-
} else {
|
1700
|
-
messageAnnotations.push(...value);
|
1701
|
-
}
|
1702
|
-
execUpdate();
|
1703
|
-
},
|
1704
|
-
onFinishStepPart(value) {
|
1705
|
-
step += 1;
|
1706
|
-
currentTextPart = value.isContinued ? currentTextPart : void 0;
|
1707
|
-
currentReasoningPart = void 0;
|
1708
|
-
},
|
1709
|
-
onStartStepPart(value) {
|
1710
|
-
if (!replaceLastMessage) {
|
1711
|
-
message.id = value.messageId;
|
1712
|
-
}
|
1713
|
-
message.parts.push({ type: "step-start" });
|
1714
|
-
execUpdate();
|
1715
|
-
},
|
1716
|
-
onFinishMessagePart(value) {
|
1717
|
-
finishReason = value.finishReason;
|
1718
|
-
if (value.usage != null) {
|
1719
|
-
usage = value.usage;
|
1720
|
-
}
|
1721
|
-
},
|
1722
|
-
onErrorPart(error) {
|
1723
|
-
throw new Error(error);
|
1724
|
-
}
|
1725
|
-
});
|
1726
|
-
onFinish == null ? void 0 : onFinish({ message, finishReason, usage });
|
1423
|
+
})
|
1424
|
+
);
|
1727
1425
|
}
|
1728
1426
|
|
1729
1427
|
// src/ui/process-chat-text-response.ts
|
1730
|
-
var
|
1428
|
+
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
1731
1429
|
|
1732
1430
|
// src/ui/process-text-stream.ts
|
1733
1431
|
async function processTextStream({
|
@@ -1749,13 +1447,11 @@ async function processChatTextResponse({
|
|
1749
1447
|
stream,
|
1750
1448
|
update,
|
1751
1449
|
onFinish,
|
1752
|
-
|
1753
|
-
generateId: generateId3 = import_provider_utils5.generateId
|
1450
|
+
generateId: generateId3 = import_provider_utils3.generateId
|
1754
1451
|
}) {
|
1755
1452
|
const textPart = { type: "text", text: "" };
|
1756
1453
|
const resultMessage = {
|
1757
1454
|
id: generateId3(),
|
1758
|
-
createdAt: getCurrentDate(),
|
1759
1455
|
role: "assistant",
|
1760
1456
|
parts: [textPart]
|
1761
1457
|
};
|
@@ -1763,21 +1459,10 @@ async function processChatTextResponse({
|
|
1763
1459
|
stream,
|
1764
1460
|
onTextPart: (chunk) => {
|
1765
1461
|
textPart.text += chunk;
|
1766
|
-
update({
|
1767
|
-
message: { ...resultMessage },
|
1768
|
-
data: [],
|
1769
|
-
replaceLastMessage: false
|
1770
|
-
});
|
1462
|
+
update({ message: { ...resultMessage } });
|
1771
1463
|
}
|
1772
1464
|
});
|
1773
|
-
onFinish == null ? void 0 : onFinish(resultMessage
|
1774
|
-
usage: {
|
1775
|
-
inputTokens: void 0,
|
1776
|
-
outputTokens: void 0,
|
1777
|
-
totalTokens: void 0
|
1778
|
-
},
|
1779
|
-
finishReason: "unknown"
|
1780
|
-
});
|
1465
|
+
onFinish == null ? void 0 : onFinish({ message: resultMessage });
|
1781
1466
|
}
|
1782
1467
|
|
1783
1468
|
// src/ui/call-chat-api.ts
|
@@ -1796,8 +1481,8 @@ async function callChatApi({
|
|
1796
1481
|
generateId: generateId3,
|
1797
1482
|
fetch: fetch2 = getOriginalFetch(),
|
1798
1483
|
lastMessage,
|
1799
|
-
|
1800
|
-
|
1484
|
+
requestType = "generate",
|
1485
|
+
messageMetadataSchema
|
1801
1486
|
}) {
|
1802
1487
|
var _a17, _b, _c;
|
1803
1488
|
const response = requestType === "resume" ? await fetch2(`${api}?chatId=${body.id}`, {
|
@@ -1835,24 +1520,49 @@ async function callChatApi({
|
|
1835
1520
|
stream: response.body,
|
1836
1521
|
update: onUpdate,
|
1837
1522
|
onFinish,
|
1838
|
-
generateId: generateId3
|
1839
|
-
getCurrentDate
|
1523
|
+
generateId: generateId3
|
1840
1524
|
});
|
1841
1525
|
return;
|
1842
1526
|
}
|
1843
1527
|
case "data": {
|
1844
|
-
await
|
1845
|
-
stream:
|
1846
|
-
|
1847
|
-
|
1848
|
-
|
1849
|
-
|
1850
|
-
|
1851
|
-
|
1852
|
-
|
1853
|
-
|
1854
|
-
|
1855
|
-
|
1528
|
+
await consumeStream({
|
1529
|
+
stream: processChatResponse({
|
1530
|
+
stream: (0, import_provider_utils4.parseJsonEventStream)({
|
1531
|
+
stream: response.body,
|
1532
|
+
schema: dataStreamPartSchema
|
1533
|
+
}).pipeThrough(
|
1534
|
+
new TransformStream({
|
1535
|
+
async transform(part, controller) {
|
1536
|
+
if (!part.success) {
|
1537
|
+
throw part.error;
|
1538
|
+
}
|
1539
|
+
controller.enqueue(part.value);
|
1540
|
+
}
|
1541
|
+
})
|
1542
|
+
),
|
1543
|
+
onUpdate({ message }) {
|
1544
|
+
const copiedMessage = {
|
1545
|
+
// deep copy the message to ensure that deep changes (msg attachments) are updated
|
1546
|
+
// with SolidJS. SolidJS uses referential integration of sub-objects to detect changes.
|
1547
|
+
...structuredClone(message),
|
1548
|
+
// add a revision id to ensure that the message is updated with SWR. SWR uses a
|
1549
|
+
// hashing approach by default to detect changes, but it only works for shallow
|
1550
|
+
// changes. This is why we need to add a revision id to ensure that the message
|
1551
|
+
// is updated with SWR (without it, the changes get stuck in SWR and are not
|
1552
|
+
// forwarded to rendering):
|
1553
|
+
revisionId: generateId3()
|
1554
|
+
};
|
1555
|
+
onUpdate({ message: copiedMessage });
|
1556
|
+
},
|
1557
|
+
lastMessage,
|
1558
|
+
onToolCall,
|
1559
|
+
onFinish,
|
1560
|
+
newMessageId: generateId3(),
|
1561
|
+
messageMetadataSchema
|
1562
|
+
}),
|
1563
|
+
onError: (error) => {
|
1564
|
+
throw error;
|
1565
|
+
}
|
1856
1566
|
});
|
1857
1567
|
return;
|
1858
1568
|
}
|
@@ -1864,6 +1574,7 @@ async function callChatApi({
|
|
1864
1574
|
}
|
1865
1575
|
|
1866
1576
|
// src/ui/call-completion-api.ts
|
1577
|
+
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
1867
1578
|
var getOriginalFetch2 = () => fetch;
|
1868
1579
|
async function callCompletionApi({
|
1869
1580
|
api,
|
@@ -1879,7 +1590,6 @@ async function callCompletionApi({
|
|
1879
1590
|
onResponse,
|
1880
1591
|
onFinish,
|
1881
1592
|
onError,
|
1882
|
-
onData,
|
1883
1593
|
fetch: fetch2 = getOriginalFetch2()
|
1884
1594
|
}) {
|
1885
1595
|
var _a17;
|
@@ -1932,17 +1642,28 @@ async function callCompletionApi({
|
|
1932
1642
|
break;
|
1933
1643
|
}
|
1934
1644
|
case "data": {
|
1935
|
-
await
|
1936
|
-
stream:
|
1937
|
-
|
1938
|
-
|
1939
|
-
|
1940
|
-
|
1941
|
-
|
1942
|
-
|
1943
|
-
|
1944
|
-
|
1945
|
-
|
1645
|
+
await consumeStream({
|
1646
|
+
stream: (0, import_provider_utils5.parseJsonEventStream)({
|
1647
|
+
stream: response.body,
|
1648
|
+
schema: dataStreamPartSchema
|
1649
|
+
}).pipeThrough(
|
1650
|
+
new TransformStream({
|
1651
|
+
async transform(part) {
|
1652
|
+
if (!part.success) {
|
1653
|
+
throw part.error;
|
1654
|
+
}
|
1655
|
+
const { type, value } = part.value;
|
1656
|
+
if (type === "text") {
|
1657
|
+
result += value;
|
1658
|
+
setCompletion(result);
|
1659
|
+
} else if (type === "error") {
|
1660
|
+
throw new Error(value);
|
1661
|
+
}
|
1662
|
+
}
|
1663
|
+
})
|
1664
|
+
),
|
1665
|
+
onError: (error) => {
|
1666
|
+
throw error;
|
1946
1667
|
}
|
1947
1668
|
});
|
1948
1669
|
break;
|
@@ -2307,7 +2028,7 @@ function simulateReadableStream({
|
|
2307
2028
|
}
|
2308
2029
|
|
2309
2030
|
// src/util/retry-with-exponential-backoff.ts
|
2310
|
-
var
|
2031
|
+
var import_provider17 = require("@ai-sdk/provider");
|
2311
2032
|
var import_provider_utils7 = require("@ai-sdk/provider-utils");
|
2312
2033
|
var retryWithExponentialBackoff = ({
|
2313
2034
|
maxRetries = 2,
|
@@ -2342,7 +2063,7 @@ async function _retryWithExponentialBackoff(f, {
|
|
2342
2063
|
errors: newErrors
|
2343
2064
|
});
|
2344
2065
|
}
|
2345
|
-
if (error instanceof Error &&
|
2066
|
+
if (error instanceof Error && import_provider17.APICallError.isInstance(error) && error.isRetryable === true && tryNumber <= maxRetries) {
|
2346
2067
|
await (0, import_provider_utils7.delay)(delayInMs);
|
2347
2068
|
return _retryWithExponentialBackoff(
|
2348
2069
|
f,
|
@@ -3073,6 +2794,7 @@ async function generateImage({
|
|
3073
2794
|
model,
|
3074
2795
|
prompt,
|
3075
2796
|
n = 1,
|
2797
|
+
maxImagesPerCall,
|
3076
2798
|
size,
|
3077
2799
|
aspectRatio,
|
3078
2800
|
seed,
|
@@ -3083,14 +2805,14 @@ async function generateImage({
|
|
3083
2805
|
}) {
|
3084
2806
|
var _a17, _b;
|
3085
2807
|
const { retry } = prepareRetries({ maxRetries: maxRetriesArg });
|
3086
|
-
const
|
3087
|
-
const callCount = Math.ceil(n /
|
2808
|
+
const maxImagesPerCallWithDefault = (_a17 = maxImagesPerCall != null ? maxImagesPerCall : model.maxImagesPerCall) != null ? _a17 : 1;
|
2809
|
+
const callCount = Math.ceil(n / maxImagesPerCallWithDefault);
|
3088
2810
|
const callImageCounts = Array.from({ length: callCount }, (_, i) => {
|
3089
2811
|
if (i < callCount - 1) {
|
3090
|
-
return
|
2812
|
+
return maxImagesPerCallWithDefault;
|
3091
2813
|
}
|
3092
|
-
const remainder = n %
|
3093
|
-
return remainder === 0 ?
|
2814
|
+
const remainder = n % maxImagesPerCallWithDefault;
|
2815
|
+
return remainder === 0 ? maxImagesPerCallWithDefault : remainder;
|
3094
2816
|
});
|
3095
2817
|
const results = await Promise.all(
|
3096
2818
|
callImageCounts.map(
|
@@ -3161,8 +2883,8 @@ var DefaultGenerateImageResult = class {
|
|
3161
2883
|
};
|
3162
2884
|
|
3163
2885
|
// core/generate-object/generate-object.ts
|
3164
|
-
var
|
3165
|
-
var
|
2886
|
+
var import_provider21 = require("@ai-sdk/provider");
|
2887
|
+
var import_provider_utils14 = require("@ai-sdk/provider-utils");
|
3166
2888
|
|
3167
2889
|
// core/generate-text/extract-content-text.ts
|
3168
2890
|
function extractContentText(content) {
|
@@ -3176,7 +2898,7 @@ function extractContentText(content) {
|
|
3176
2898
|
}
|
3177
2899
|
|
3178
2900
|
// core/prompt/convert-to-language-model-prompt.ts
|
3179
|
-
var
|
2901
|
+
var import_provider_utils11 = require("@ai-sdk/provider-utils");
|
3180
2902
|
|
3181
2903
|
// src/util/download.ts
|
3182
2904
|
async function download({ url }) {
|
@@ -3203,6 +2925,89 @@ async function download({ url }) {
|
|
3203
2925
|
}
|
3204
2926
|
}
|
3205
2927
|
|
2928
|
+
// core/prompt/data-content.ts
|
2929
|
+
var import_provider18 = require("@ai-sdk/provider");
|
2930
|
+
var import_provider_utils10 = require("@ai-sdk/provider-utils");
|
2931
|
+
var import_zod2 = require("zod");
|
2932
|
+
|
2933
|
+
// core/prompt/split-data-url.ts
|
2934
|
+
function splitDataUrl(dataUrl) {
|
2935
|
+
try {
|
2936
|
+
const [header, base64Content] = dataUrl.split(",");
|
2937
|
+
return {
|
2938
|
+
mediaType: header.split(";")[0].split(":")[1],
|
2939
|
+
base64Content
|
2940
|
+
};
|
2941
|
+
} catch (error) {
|
2942
|
+
return {
|
2943
|
+
mediaType: void 0,
|
2944
|
+
base64Content: void 0
|
2945
|
+
};
|
2946
|
+
}
|
2947
|
+
}
|
2948
|
+
|
2949
|
+
// core/prompt/data-content.ts
|
2950
|
+
var dataContentSchema = import_zod2.z.union([
|
2951
|
+
import_zod2.z.string(),
|
2952
|
+
import_zod2.z.instanceof(Uint8Array),
|
2953
|
+
import_zod2.z.instanceof(ArrayBuffer),
|
2954
|
+
import_zod2.z.custom(
|
2955
|
+
// Buffer might not be available in some environments such as CloudFlare:
|
2956
|
+
(value) => {
|
2957
|
+
var _a17, _b;
|
2958
|
+
return (_b = (_a17 = globalThis.Buffer) == null ? void 0 : _a17.isBuffer(value)) != null ? _b : false;
|
2959
|
+
},
|
2960
|
+
{ message: "Must be a Buffer" }
|
2961
|
+
)
|
2962
|
+
]);
|
2963
|
+
function convertToLanguageModelV2DataContent(content) {
|
2964
|
+
if (content instanceof Uint8Array) {
|
2965
|
+
return { data: content, mediaType: void 0 };
|
2966
|
+
}
|
2967
|
+
if (content instanceof ArrayBuffer) {
|
2968
|
+
return { data: new Uint8Array(content), mediaType: void 0 };
|
2969
|
+
}
|
2970
|
+
if (typeof content === "string") {
|
2971
|
+
try {
|
2972
|
+
content = new URL(content);
|
2973
|
+
} catch (error) {
|
2974
|
+
}
|
2975
|
+
}
|
2976
|
+
if (content instanceof URL && content.protocol === "data:") {
|
2977
|
+
const { mediaType: dataUrlMediaType, base64Content } = splitDataUrl(
|
2978
|
+
content.toString()
|
2979
|
+
);
|
2980
|
+
if (dataUrlMediaType == null || base64Content == null) {
|
2981
|
+
throw new import_provider18.AISDKError({
|
2982
|
+
name: "InvalidDataContentError",
|
2983
|
+
message: `Invalid data URL format in content ${content.toString()}`
|
2984
|
+
});
|
2985
|
+
}
|
2986
|
+
return { data: base64Content, mediaType: dataUrlMediaType };
|
2987
|
+
}
|
2988
|
+
return { data: content, mediaType: void 0 };
|
2989
|
+
}
|
2990
|
+
function convertDataContentToUint8Array(content) {
|
2991
|
+
if (content instanceof Uint8Array) {
|
2992
|
+
return content;
|
2993
|
+
}
|
2994
|
+
if (typeof content === "string") {
|
2995
|
+
try {
|
2996
|
+
return (0, import_provider_utils10.convertBase64ToUint8Array)(content);
|
2997
|
+
} catch (error) {
|
2998
|
+
throw new InvalidDataContentError({
|
2999
|
+
message: "Invalid data content. Content string is not a base64-encoded media.",
|
3000
|
+
content,
|
3001
|
+
cause: error
|
3002
|
+
});
|
3003
|
+
}
|
3004
|
+
}
|
3005
|
+
if (content instanceof ArrayBuffer) {
|
3006
|
+
return new Uint8Array(content);
|
3007
|
+
}
|
3008
|
+
throw new InvalidDataContentError({ content });
|
3009
|
+
}
|
3010
|
+
|
3206
3011
|
// core/prompt/convert-to-language-model-prompt.ts
|
3207
3012
|
async function convertToLanguageModelPrompt({
|
3208
3013
|
prompt,
|
@@ -3339,7 +3144,7 @@ async function downloadAssets(messages, downloadImplementation, supportedUrls) {
|
|
3339
3144
|
}
|
3340
3145
|
return { mediaType, data };
|
3341
3146
|
}).filter(
|
3342
|
-
(part) => part.data instanceof URL && part.mediaType != null && !(0,
|
3147
|
+
(part) => part.data instanceof URL && part.mediaType != null && !(0, import_provider_utils11.isUrlSupported)({
|
3343
3148
|
url: part.data.toString(),
|
3344
3149
|
mediaType: part.mediaType,
|
3345
3150
|
supportedUrls
|
@@ -3423,8 +3228,8 @@ function prepareCallSettings({
|
|
3423
3228
|
topK,
|
3424
3229
|
presencePenalty,
|
3425
3230
|
frequencyPenalty,
|
3426
|
-
|
3427
|
-
|
3231
|
+
seed,
|
3232
|
+
stopSequences
|
3428
3233
|
}) {
|
3429
3234
|
if (maxOutputTokens != null) {
|
3430
3235
|
if (!Number.isInteger(maxOutputTokens)) {
|
@@ -3498,19 +3303,19 @@ function prepareCallSettings({
|
|
3498
3303
|
}
|
3499
3304
|
return {
|
3500
3305
|
maxOutputTokens,
|
3501
|
-
temperature
|
3306
|
+
temperature,
|
3502
3307
|
topP,
|
3503
3308
|
topK,
|
3504
3309
|
presencePenalty,
|
3505
3310
|
frequencyPenalty,
|
3506
|
-
stopSequences
|
3311
|
+
stopSequences,
|
3507
3312
|
seed
|
3508
3313
|
};
|
3509
3314
|
}
|
3510
3315
|
|
3511
3316
|
// core/prompt/standardize-prompt.ts
|
3512
|
-
var
|
3513
|
-
var
|
3317
|
+
var import_provider19 = require("@ai-sdk/provider");
|
3318
|
+
var import_provider_utils12 = require("@ai-sdk/provider-utils");
|
3514
3319
|
var import_zod8 = require("zod");
|
3515
3320
|
|
3516
3321
|
// core/prompt/message.ts
|
@@ -3646,19 +3451,19 @@ var coreMessageSchema = modelMessageSchema;
|
|
3646
3451
|
// core/prompt/standardize-prompt.ts
|
3647
3452
|
async function standardizePrompt(prompt) {
|
3648
3453
|
if (prompt.prompt == null && prompt.messages == null) {
|
3649
|
-
throw new
|
3454
|
+
throw new import_provider19.InvalidPromptError({
|
3650
3455
|
prompt,
|
3651
3456
|
message: "prompt or messages must be defined"
|
3652
3457
|
});
|
3653
3458
|
}
|
3654
3459
|
if (prompt.prompt != null && prompt.messages != null) {
|
3655
|
-
throw new
|
3460
|
+
throw new import_provider19.InvalidPromptError({
|
3656
3461
|
prompt,
|
3657
3462
|
message: "prompt and messages cannot be defined at the same time"
|
3658
3463
|
});
|
3659
3464
|
}
|
3660
3465
|
if (prompt.system != null && typeof prompt.system !== "string") {
|
3661
|
-
throw new
|
3466
|
+
throw new import_provider19.InvalidPromptError({
|
3662
3467
|
prompt,
|
3663
3468
|
message: "system must be a string"
|
3664
3469
|
});
|
@@ -3671,23 +3476,23 @@ async function standardizePrompt(prompt) {
|
|
3671
3476
|
} else if (prompt.messages != null) {
|
3672
3477
|
messages = prompt.messages;
|
3673
3478
|
} else {
|
3674
|
-
throw new
|
3479
|
+
throw new import_provider19.InvalidPromptError({
|
3675
3480
|
prompt,
|
3676
3481
|
message: "prompt or messages must be defined"
|
3677
3482
|
});
|
3678
3483
|
}
|
3679
3484
|
if (messages.length === 0) {
|
3680
|
-
throw new
|
3485
|
+
throw new import_provider19.InvalidPromptError({
|
3681
3486
|
prompt,
|
3682
3487
|
message: "messages must not be empty"
|
3683
3488
|
});
|
3684
3489
|
}
|
3685
|
-
const validationResult = await (0,
|
3490
|
+
const validationResult = await (0, import_provider_utils12.safeValidateTypes)({
|
3686
3491
|
value: messages,
|
3687
3492
|
schema: import_zod8.z.array(modelMessageSchema)
|
3688
3493
|
});
|
3689
3494
|
if (!validationResult.success) {
|
3690
|
-
throw new
|
3495
|
+
throw new import_provider19.InvalidPromptError({
|
3691
3496
|
prompt,
|
3692
3497
|
message: "messages must be an array of ModelMessage",
|
3693
3498
|
cause: validationResult.error
|
@@ -3700,8 +3505,25 @@ async function standardizePrompt(prompt) {
|
|
3700
3505
|
}
|
3701
3506
|
|
3702
3507
|
// core/generate-object/output-strategy.ts
|
3703
|
-
var
|
3704
|
-
var
|
3508
|
+
var import_provider20 = require("@ai-sdk/provider");
|
3509
|
+
var import_provider_utils13 = require("@ai-sdk/provider-utils");
|
3510
|
+
|
3511
|
+
// src/util/async-iterable-stream.ts
|
3512
|
+
function createAsyncIterableStream(source) {
|
3513
|
+
const stream = source.pipeThrough(new TransformStream());
|
3514
|
+
stream[Symbol.asyncIterator] = () => {
|
3515
|
+
const reader = stream.getReader();
|
3516
|
+
return {
|
3517
|
+
async next() {
|
3518
|
+
const { done, value } = await reader.read();
|
3519
|
+
return done ? { done: true, value: void 0 } : { done: false, value };
|
3520
|
+
}
|
3521
|
+
};
|
3522
|
+
};
|
3523
|
+
return stream;
|
3524
|
+
}
|
3525
|
+
|
3526
|
+
// core/generate-object/output-strategy.ts
|
3705
3527
|
var noSchemaOutputStrategy = {
|
3706
3528
|
type: "no-schema",
|
3707
3529
|
jsonSchema: void 0,
|
@@ -3721,7 +3543,7 @@ var noSchemaOutputStrategy = {
|
|
3721
3543
|
} : { success: true, value };
|
3722
3544
|
},
|
3723
3545
|
createElementStream() {
|
3724
|
-
throw new
|
3546
|
+
throw new import_provider20.UnsupportedFunctionalityError({
|
3725
3547
|
functionality: "element streams in no-schema mode"
|
3726
3548
|
});
|
3727
3549
|
}
|
@@ -3740,10 +3562,10 @@ var objectOutputStrategy = (schema) => ({
|
|
3740
3562
|
};
|
3741
3563
|
},
|
3742
3564
|
async validateFinalResult(value) {
|
3743
|
-
return (0,
|
3565
|
+
return (0, import_provider_utils13.safeValidateTypes)({ value, schema });
|
3744
3566
|
},
|
3745
3567
|
createElementStream() {
|
3746
|
-
throw new
|
3568
|
+
throw new import_provider20.UnsupportedFunctionalityError({
|
3747
3569
|
functionality: "element streams in object mode"
|
3748
3570
|
});
|
3749
3571
|
}
|
@@ -3771,10 +3593,10 @@ var arrayOutputStrategy = (schema) => {
|
|
3771
3593
|
isFinalDelta
|
3772
3594
|
}) {
|
3773
3595
|
var _a17;
|
3774
|
-
if (!(0,
|
3596
|
+
if (!(0, import_provider20.isJSONObject)(value) || !(0, import_provider20.isJSONArray)(value.elements)) {
|
3775
3597
|
return {
|
3776
3598
|
success: false,
|
3777
|
-
error: new
|
3599
|
+
error: new import_provider20.TypeValidationError({
|
3778
3600
|
value,
|
3779
3601
|
cause: "value must be an object that contains an array of elements"
|
3780
3602
|
})
|
@@ -3784,7 +3606,7 @@ var arrayOutputStrategy = (schema) => {
|
|
3784
3606
|
const resultArray = [];
|
3785
3607
|
for (let i = 0; i < inputArray.length; i++) {
|
3786
3608
|
const element = inputArray[i];
|
3787
|
-
const result = await (0,
|
3609
|
+
const result = await (0, import_provider_utils13.safeValidateTypes)({ value: element, schema });
|
3788
3610
|
if (i === inputArray.length - 1 && !isFinalDelta) {
|
3789
3611
|
continue;
|
3790
3612
|
}
|
@@ -3814,10 +3636,10 @@ var arrayOutputStrategy = (schema) => {
|
|
3814
3636
|
};
|
3815
3637
|
},
|
3816
3638
|
async validateFinalResult(value) {
|
3817
|
-
if (!(0,
|
3639
|
+
if (!(0, import_provider20.isJSONObject)(value) || !(0, import_provider20.isJSONArray)(value.elements)) {
|
3818
3640
|
return {
|
3819
3641
|
success: false,
|
3820
|
-
error: new
|
3642
|
+
error: new import_provider20.TypeValidationError({
|
3821
3643
|
value,
|
3822
3644
|
cause: "value must be an object that contains an array of elements"
|
3823
3645
|
})
|
@@ -3825,7 +3647,7 @@ var arrayOutputStrategy = (schema) => {
|
|
3825
3647
|
}
|
3826
3648
|
const inputArray = value.elements;
|
3827
3649
|
for (const element of inputArray) {
|
3828
|
-
const result = await (0,
|
3650
|
+
const result = await (0, import_provider_utils13.safeValidateTypes)({ value: element, schema });
|
3829
3651
|
if (!result.success) {
|
3830
3652
|
return result;
|
3831
3653
|
}
|
@@ -3880,10 +3702,10 @@ var enumOutputStrategy = (enumValues) => {
|
|
3880
3702
|
additionalProperties: false
|
3881
3703
|
},
|
3882
3704
|
async validateFinalResult(value) {
|
3883
|
-
if (!(0,
|
3705
|
+
if (!(0, import_provider20.isJSONObject)(value) || typeof value.result !== "string") {
|
3884
3706
|
return {
|
3885
3707
|
success: false,
|
3886
|
-
error: new
|
3708
|
+
error: new import_provider20.TypeValidationError({
|
3887
3709
|
value,
|
3888
3710
|
cause: 'value must be an object that contains a string in the "result" property.'
|
3889
3711
|
})
|
@@ -3892,17 +3714,17 @@ var enumOutputStrategy = (enumValues) => {
|
|
3892
3714
|
const result = value.result;
|
3893
3715
|
return enumValues.includes(result) ? { success: true, value: result } : {
|
3894
3716
|
success: false,
|
3895
|
-
error: new
|
3717
|
+
error: new import_provider20.TypeValidationError({
|
3896
3718
|
value,
|
3897
3719
|
cause: "value must be a string in the enum"
|
3898
3720
|
})
|
3899
3721
|
};
|
3900
3722
|
},
|
3901
3723
|
async validatePartialResult({ value, textDelta }) {
|
3902
|
-
if (!(0,
|
3724
|
+
if (!(0, import_provider20.isJSONObject)(value) || typeof value.result !== "string") {
|
3903
3725
|
return {
|
3904
3726
|
success: false,
|
3905
|
-
error: new
|
3727
|
+
error: new import_provider20.TypeValidationError({
|
3906
3728
|
value,
|
3907
3729
|
cause: 'value must be an object that contains a string in the "result" property.'
|
3908
3730
|
})
|
@@ -3915,7 +3737,7 @@ var enumOutputStrategy = (enumValues) => {
|
|
3915
3737
|
if (value.result.length === 0 || possibleEnumValues.length === 0) {
|
3916
3738
|
return {
|
3917
3739
|
success: false,
|
3918
|
-
error: new
|
3740
|
+
error: new import_provider20.TypeValidationError({
|
3919
3741
|
value,
|
3920
3742
|
cause: "value must be a string in the enum"
|
3921
3743
|
})
|
@@ -3930,7 +3752,7 @@ var enumOutputStrategy = (enumValues) => {
|
|
3930
3752
|
};
|
3931
3753
|
},
|
3932
3754
|
createElementStream() {
|
3933
|
-
throw new
|
3755
|
+
throw new import_provider20.UnsupportedFunctionalityError({
|
3934
3756
|
functionality: "element streams in enum mode"
|
3935
3757
|
});
|
3936
3758
|
}
|
@@ -3943,9 +3765,9 @@ function getOutputStrategy({
|
|
3943
3765
|
}) {
|
3944
3766
|
switch (output) {
|
3945
3767
|
case "object":
|
3946
|
-
return objectOutputStrategy((0,
|
3768
|
+
return objectOutputStrategy((0, import_provider_utils13.asSchema)(schema));
|
3947
3769
|
case "array":
|
3948
|
-
return arrayOutputStrategy((0,
|
3770
|
+
return arrayOutputStrategy((0, import_provider_utils13.asSchema)(schema));
|
3949
3771
|
case "enum":
|
3950
3772
|
return enumOutputStrategy(enumValues);
|
3951
3773
|
case "no-schema":
|
@@ -4076,7 +3898,7 @@ function validateObjectGenerationInput({
|
|
4076
3898
|
}
|
4077
3899
|
|
4078
3900
|
// core/generate-object/generate-object.ts
|
4079
|
-
var originalGenerateId = (0,
|
3901
|
+
var originalGenerateId = (0, import_provider_utils14.createIdGenerator)({ prefix: "aiobj", size: 24 });
|
4080
3902
|
async function generateObject(options) {
|
4081
3903
|
const {
|
4082
3904
|
model,
|
@@ -4252,7 +4074,7 @@ async function generateObject(options) {
|
|
4252
4074
|
request = (_a17 = generateResult.request) != null ? _a17 : {};
|
4253
4075
|
response = generateResult.responseData;
|
4254
4076
|
async function processResult(result2) {
|
4255
|
-
const parseResult = await (0,
|
4077
|
+
const parseResult = await (0, import_provider_utils14.safeParseJSON)({ text: result2 });
|
4256
4078
|
if (!parseResult.success) {
|
4257
4079
|
throw new NoObjectGeneratedError({
|
4258
4080
|
message: "No object generated: could not parse the response.",
|
@@ -4287,7 +4109,7 @@ async function generateObject(options) {
|
|
4287
4109
|
try {
|
4288
4110
|
object2 = await processResult(result);
|
4289
4111
|
} catch (error) {
|
4290
|
-
if (repairText != null && NoObjectGeneratedError.isInstance(error) && (
|
4112
|
+
if (repairText != null && NoObjectGeneratedError.isInstance(error) && (import_provider21.JSONParseError.isInstance(error.cause) || import_provider21.TypeValidationError.isInstance(error.cause))) {
|
4291
4113
|
const repairedText = await repairText({
|
4292
4114
|
text: result,
|
4293
4115
|
error: error.cause
|
@@ -4348,7 +4170,7 @@ var DefaultGenerateObjectResult = class {
|
|
4348
4170
|
};
|
4349
4171
|
|
4350
4172
|
// core/generate-object/stream-object.ts
|
4351
|
-
var
|
4173
|
+
var import_provider_utils15 = require("@ai-sdk/provider-utils");
|
4352
4174
|
|
4353
4175
|
// src/util/create-resolvable-promise.ts
|
4354
4176
|
function createResolvablePromise() {
|
@@ -4492,7 +4314,7 @@ function now() {
|
|
4492
4314
|
}
|
4493
4315
|
|
4494
4316
|
// core/generate-object/stream-object.ts
|
4495
|
-
var originalGenerateId2 = (0,
|
4317
|
+
var originalGenerateId2 = (0, import_provider_utils15.createIdGenerator)({ prefix: "aiobj", size: 24 });
|
4496
4318
|
function streamObject(options) {
|
4497
4319
|
const {
|
4498
4320
|
model,
|
@@ -4997,8 +4819,8 @@ var DefaultStreamObjectResult = class {
|
|
4997
4819
|
};
|
4998
4820
|
|
4999
4821
|
// src/error/no-speech-generated-error.ts
|
5000
|
-
var
|
5001
|
-
var NoSpeechGeneratedError = class extends
|
4822
|
+
var import_provider22 = require("@ai-sdk/provider");
|
4823
|
+
var NoSpeechGeneratedError = class extends import_provider22.AISDKError {
|
5002
4824
|
constructor(options) {
|
5003
4825
|
super({
|
5004
4826
|
name: "AI_NoSpeechGeneratedError",
|
@@ -5087,23 +4909,10 @@ var DefaultSpeechResult = class {
|
|
5087
4909
|
};
|
5088
4910
|
|
5089
4911
|
// core/generate-text/generate-text.ts
|
5090
|
-
var
|
5091
|
-
|
5092
|
-
// src/util/split-on-last-whitespace.ts
|
5093
|
-
var lastWhitespaceRegexp = /^([\s\S]*?)(\s+)(\S*)$/;
|
5094
|
-
function splitOnLastWhitespace(text2) {
|
5095
|
-
const match = text2.match(lastWhitespaceRegexp);
|
5096
|
-
return match ? { prefix: match[1], whitespace: match[2], suffix: match[3] } : void 0;
|
5097
|
-
}
|
5098
|
-
|
5099
|
-
// src/util/remove-text-after-last-whitespace.ts
|
5100
|
-
function removeTextAfterLastWhitespace(text2) {
|
5101
|
-
const match = splitOnLastWhitespace(text2);
|
5102
|
-
return match ? match.prefix + match.whitespace : text2;
|
5103
|
-
}
|
4912
|
+
var import_provider_utils18 = require("@ai-sdk/provider-utils");
|
5104
4913
|
|
5105
4914
|
// core/prompt/prepare-tools-and-tool-choice.ts
|
5106
|
-
var
|
4915
|
+
var import_provider_utils16 = require("@ai-sdk/provider-utils");
|
5107
4916
|
|
5108
4917
|
// src/util/is-non-empty-object.ts
|
5109
4918
|
function isNonEmptyObject(object2) {
|
@@ -5135,7 +4944,7 @@ function prepareToolsAndToolChoice({
|
|
5135
4944
|
type: "function",
|
5136
4945
|
name: name17,
|
5137
4946
|
description: tool2.description,
|
5138
|
-
parameters: (0,
|
4947
|
+
parameters: (0, import_provider_utils16.asSchema)(tool2.parameters).jsonSchema
|
5139
4948
|
};
|
5140
4949
|
case "provider-defined":
|
5141
4950
|
return {
|
@@ -5203,18 +5012,9 @@ function asContent({
|
|
5203
5012
|
...toolResults
|
5204
5013
|
];
|
5205
5014
|
}
|
5206
|
-
function extractFiles(content) {
|
5207
|
-
return content.filter((part) => part.type === "file").map((part) => part.file);
|
5208
|
-
}
|
5209
|
-
function extractReasoning(content) {
|
5210
|
-
return content.filter((part) => part.type === "reasoning");
|
5211
|
-
}
|
5212
|
-
function extractSources(content) {
|
5213
|
-
return content.filter((part) => part.type === "source");
|
5214
|
-
}
|
5215
5015
|
|
5216
5016
|
// core/generate-text/parse-tool-call.ts
|
5217
|
-
var
|
5017
|
+
var import_provider_utils17 = require("@ai-sdk/provider-utils");
|
5218
5018
|
async function parseToolCall({
|
5219
5019
|
toolCall,
|
5220
5020
|
tools,
|
@@ -5238,7 +5038,7 @@ async function parseToolCall({
|
|
5238
5038
|
tools,
|
5239
5039
|
parameterSchema: ({ toolName }) => {
|
5240
5040
|
const { parameters } = tools[toolName];
|
5241
|
-
return (0,
|
5041
|
+
return (0, import_provider_utils17.asSchema)(parameters).jsonSchema;
|
5242
5042
|
},
|
5243
5043
|
system,
|
5244
5044
|
messages,
|
@@ -5268,8 +5068,8 @@ async function doParseToolCall({
|
|
5268
5068
|
availableTools: Object.keys(tools)
|
5269
5069
|
});
|
5270
5070
|
}
|
5271
|
-
const schema = (0,
|
5272
|
-
const parseResult = toolCall.args.trim() === "" ? await (0,
|
5071
|
+
const schema = (0, import_provider_utils17.asSchema)(tool2.parameters);
|
5072
|
+
const parseResult = toolCall.args.trim() === "" ? await (0, import_provider_utils17.safeValidateTypes)({ value: {}, schema }) : await (0, import_provider_utils17.safeParseJSON)({ text: toolCall.args, schema });
|
5273
5073
|
if (parseResult.success === false) {
|
5274
5074
|
throw new InvalidToolArgumentsError({
|
5275
5075
|
toolName,
|
@@ -5285,85 +5085,111 @@ async function doParseToolCall({
|
|
5285
5085
|
};
|
5286
5086
|
}
|
5287
5087
|
|
5288
|
-
// core/generate-text/
|
5289
|
-
|
5290
|
-
|
5291
|
-
|
5292
|
-
|
5088
|
+
// core/generate-text/step-result.ts
|
5089
|
+
var DefaultStepResult = class {
|
5090
|
+
constructor({
|
5091
|
+
content,
|
5092
|
+
finishReason,
|
5093
|
+
usage,
|
5094
|
+
warnings,
|
5095
|
+
request,
|
5096
|
+
response,
|
5097
|
+
providerMetadata
|
5098
|
+
}) {
|
5099
|
+
this.content = content;
|
5100
|
+
this.finishReason = finishReason;
|
5101
|
+
this.usage = usage;
|
5102
|
+
this.warnings = warnings;
|
5103
|
+
this.request = request;
|
5104
|
+
this.response = response;
|
5105
|
+
this.providerMetadata = providerMetadata;
|
5106
|
+
}
|
5107
|
+
get text() {
|
5108
|
+
return this.content.filter((part) => part.type === "text").map((part) => part.text).join("");
|
5109
|
+
}
|
5110
|
+
get reasoning() {
|
5111
|
+
return this.content.filter((part) => part.type === "reasoning");
|
5112
|
+
}
|
5113
|
+
get reasoningText() {
|
5114
|
+
return this.reasoning.length === 0 ? void 0 : this.reasoning.map((part) => part.text).join("");
|
5115
|
+
}
|
5116
|
+
get files() {
|
5117
|
+
return this.content.filter((part) => part.type === "file").map((part) => part.file);
|
5118
|
+
}
|
5119
|
+
get sources() {
|
5120
|
+
return this.content.filter((part) => part.type === "source");
|
5121
|
+
}
|
5122
|
+
get toolCalls() {
|
5123
|
+
return this.content.filter((part) => part.type === "tool-call");
|
5124
|
+
}
|
5125
|
+
get toolResults() {
|
5126
|
+
return this.content.filter((part) => part.type === "tool-result");
|
5127
|
+
}
|
5128
|
+
};
|
5293
5129
|
|
5294
5130
|
// core/generate-text/to-response-messages.ts
|
5295
5131
|
function toResponseMessages({
|
5296
|
-
|
5297
|
-
|
5298
|
-
reasoning,
|
5299
|
-
tools,
|
5300
|
-
toolCalls,
|
5301
|
-
toolResults,
|
5302
|
-
messageId,
|
5303
|
-
generateMessageId
|
5132
|
+
content: inputContent,
|
5133
|
+
tools
|
5304
5134
|
}) {
|
5305
5135
|
const responseMessages = [];
|
5306
|
-
const content =
|
5307
|
-
|
5308
|
-
|
5309
|
-
|
5310
|
-
|
5311
|
-
|
5312
|
-
|
5313
|
-
|
5314
|
-
|
5315
|
-
|
5316
|
-
|
5317
|
-
|
5318
|
-
|
5319
|
-
|
5320
|
-
|
5321
|
-
|
5322
|
-
|
5323
|
-
|
5324
|
-
|
5136
|
+
const content = inputContent.filter((part) => part.type !== "tool-result" && part.type !== "source").filter((part) => part.type !== "text" || part.text.length > 0).map((part) => {
|
5137
|
+
switch (part.type) {
|
5138
|
+
case "text":
|
5139
|
+
return part;
|
5140
|
+
case "reasoning":
|
5141
|
+
return {
|
5142
|
+
type: "reasoning",
|
5143
|
+
text: part.text,
|
5144
|
+
providerOptions: part.providerMetadata
|
5145
|
+
};
|
5146
|
+
case "file":
|
5147
|
+
return {
|
5148
|
+
type: "file",
|
5149
|
+
data: part.file.base64,
|
5150
|
+
mediaType: part.file.mediaType
|
5151
|
+
};
|
5152
|
+
case "tool-call":
|
5153
|
+
return part;
|
5154
|
+
}
|
5155
|
+
});
|
5325
5156
|
if (content.length > 0) {
|
5326
5157
|
responseMessages.push({
|
5327
5158
|
role: "assistant",
|
5328
|
-
content
|
5329
|
-
id: messageId
|
5159
|
+
content
|
5330
5160
|
});
|
5331
5161
|
}
|
5332
|
-
|
5162
|
+
const toolResultContent = inputContent.filter((part) => part.type === "tool-result").map((toolResult) => {
|
5163
|
+
const tool2 = tools[toolResult.toolName];
|
5164
|
+
return (tool2 == null ? void 0 : tool2.experimental_toToolResultContent) != null ? {
|
5165
|
+
type: "tool-result",
|
5166
|
+
toolCallId: toolResult.toolCallId,
|
5167
|
+
toolName: toolResult.toolName,
|
5168
|
+
result: tool2.experimental_toToolResultContent(toolResult.result),
|
5169
|
+
experimental_content: tool2.experimental_toToolResultContent(
|
5170
|
+
toolResult.result
|
5171
|
+
)
|
5172
|
+
} : {
|
5173
|
+
type: "tool-result",
|
5174
|
+
toolCallId: toolResult.toolCallId,
|
5175
|
+
toolName: toolResult.toolName,
|
5176
|
+
result: toolResult.result
|
5177
|
+
};
|
5178
|
+
});
|
5179
|
+
if (toolResultContent.length > 0) {
|
5333
5180
|
responseMessages.push({
|
5334
5181
|
role: "tool",
|
5335
|
-
|
5336
|
-
content: toolResults.map((toolResult) => {
|
5337
|
-
const tool2 = tools[toolResult.toolName];
|
5338
|
-
return (tool2 == null ? void 0 : tool2.experimental_toToolResultContent) != null ? {
|
5339
|
-
type: "tool-result",
|
5340
|
-
toolCallId: toolResult.toolCallId,
|
5341
|
-
toolName: toolResult.toolName,
|
5342
|
-
result: tool2.experimental_toToolResultContent(toolResult.result),
|
5343
|
-
experimental_content: tool2.experimental_toToolResultContent(
|
5344
|
-
toolResult.result
|
5345
|
-
)
|
5346
|
-
} : {
|
5347
|
-
type: "tool-result",
|
5348
|
-
toolCallId: toolResult.toolCallId,
|
5349
|
-
toolName: toolResult.toolName,
|
5350
|
-
result: toolResult.result
|
5351
|
-
};
|
5352
|
-
})
|
5182
|
+
content: toolResultContent
|
5353
5183
|
});
|
5354
5184
|
}
|
5355
5185
|
return responseMessages;
|
5356
5186
|
}
|
5357
5187
|
|
5358
5188
|
// core/generate-text/generate-text.ts
|
5359
|
-
var originalGenerateId3 = (0,
|
5189
|
+
var originalGenerateId3 = (0, import_provider_utils18.createIdGenerator)({
|
5360
5190
|
prefix: "aitxt",
|
5361
5191
|
size: 24
|
5362
5192
|
});
|
5363
|
-
var originalGenerateMessageId = (0, import_provider_utils17.createIdGenerator)({
|
5364
|
-
prefix: "msg",
|
5365
|
-
size: 24
|
5366
|
-
});
|
5367
5193
|
async function generateText({
|
5368
5194
|
model,
|
5369
5195
|
tools,
|
@@ -5375,9 +5201,7 @@ async function generateText({
|
|
5375
5201
|
abortSignal,
|
5376
5202
|
headers,
|
5377
5203
|
maxSteps = 1,
|
5378
|
-
experimental_generateMessageId: generateMessageId = originalGenerateMessageId,
|
5379
5204
|
experimental_output: output,
|
5380
|
-
experimental_continueSteps: continueSteps = false,
|
5381
5205
|
experimental_telemetry: telemetry,
|
5382
5206
|
providerOptions,
|
5383
5207
|
experimental_activeTools: activeTools,
|
@@ -5433,22 +5257,14 @@ async function generateText({
|
|
5433
5257
|
}),
|
5434
5258
|
tracer,
|
5435
5259
|
fn: async (span) => {
|
5436
|
-
var _a17, _b, _c, _d
|
5260
|
+
var _a17, _b, _c, _d;
|
5437
5261
|
const callSettings2 = prepareCallSettings(settings);
|
5438
5262
|
let currentModelResponse;
|
5439
5263
|
let currentToolCalls = [];
|
5440
5264
|
let currentToolResults = [];
|
5441
5265
|
let stepCount = 0;
|
5442
5266
|
const responseMessages = [];
|
5443
|
-
let text2 = "";
|
5444
|
-
const sources = [];
|
5445
5267
|
const steps = [];
|
5446
|
-
let usage = {
|
5447
|
-
inputTokens: void 0,
|
5448
|
-
outputTokens: void 0,
|
5449
|
-
totalTokens: void 0
|
5450
|
-
};
|
5451
|
-
let stepType = "initial";
|
5452
5268
|
do {
|
5453
5269
|
const stepInputMessages = [
|
5454
5270
|
...initialPrompt.messages,
|
@@ -5514,7 +5330,7 @@ async function generateText({
|
|
5514
5330
|
}),
|
5515
5331
|
tracer,
|
5516
5332
|
fn: async (span2) => {
|
5517
|
-
var _a19, _b2, _c2, _d2,
|
5333
|
+
var _a19, _b2, _c2, _d2, _e, _f, _g, _h;
|
5518
5334
|
const result = await stepModel.doGenerate({
|
5519
5335
|
...callSettings2,
|
5520
5336
|
tools: stepTools,
|
@@ -5528,7 +5344,7 @@ async function generateText({
|
|
5528
5344
|
const responseData = {
|
5529
5345
|
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5530
5346
|
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5531
|
-
modelId: (
|
5347
|
+
modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : stepModel.modelId,
|
5532
5348
|
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5533
5349
|
body: (_h = result.response) == null ? void 0 : _h.body
|
5534
5350
|
};
|
@@ -5587,89 +5403,35 @@ async function generateText({
|
|
5587
5403
|
messages: stepInputMessages,
|
5588
5404
|
abortSignal
|
5589
5405
|
});
|
5590
|
-
usage = addLanguageModelUsage(usage, currentModelResponse.usage);
|
5591
|
-
let nextStepType = "done";
|
5592
|
-
if (++stepCount < maxSteps) {
|
5593
|
-
if (continueSteps && currentModelResponse.finishReason === "length" && // only use continue when there are no tool calls:
|
5594
|
-
currentToolCalls.length === 0) {
|
5595
|
-
nextStepType = "continue";
|
5596
|
-
} else if (
|
5597
|
-
// there are tool calls:
|
5598
|
-
currentToolCalls.length > 0 && // all current tool calls have results:
|
5599
|
-
currentToolResults.length === currentToolCalls.length
|
5600
|
-
) {
|
5601
|
-
nextStepType = "tool-result";
|
5602
|
-
}
|
5603
|
-
}
|
5604
5406
|
const stepContent = asContent({
|
5605
5407
|
content: currentModelResponse.content,
|
5606
5408
|
toolCalls: currentToolCalls,
|
5607
5409
|
toolResults: currentToolResults
|
5608
5410
|
});
|
5609
|
-
|
5610
|
-
|
5611
|
-
|
5612
|
-
|
5613
|
-
|
5614
|
-
sources.push(
|
5615
|
-
...currentModelResponse.content.filter(
|
5616
|
-
(part) => part.type === "source"
|
5617
|
-
)
|
5411
|
+
responseMessages.push(
|
5412
|
+
...toResponseMessages({
|
5413
|
+
content: stepContent,
|
5414
|
+
tools: tools != null ? tools : {}
|
5415
|
+
})
|
5618
5416
|
);
|
5619
|
-
|
5620
|
-
const lastMessage = responseMessages[responseMessages.length - 1];
|
5621
|
-
if (typeof lastMessage.content === "string") {
|
5622
|
-
lastMessage.content += stepText;
|
5623
|
-
} else {
|
5624
|
-
lastMessage.content.push({
|
5625
|
-
text: stepText,
|
5626
|
-
type: "text"
|
5627
|
-
});
|
5628
|
-
}
|
5629
|
-
} else {
|
5630
|
-
responseMessages.push(
|
5631
|
-
...toResponseMessages({
|
5632
|
-
text: text2,
|
5633
|
-
files: extractFiles(stepContent),
|
5634
|
-
reasoning: extractReasoning(stepContent).map((part) => ({
|
5635
|
-
type: "reasoning",
|
5636
|
-
text: part.text,
|
5637
|
-
providerOptions: part.providerMetadata
|
5638
|
-
})),
|
5639
|
-
tools: tools != null ? tools : {},
|
5640
|
-
toolCalls: currentToolCalls,
|
5641
|
-
toolResults: currentToolResults,
|
5642
|
-
messageId: generateMessageId(),
|
5643
|
-
generateMessageId
|
5644
|
-
})
|
5645
|
-
);
|
5646
|
-
}
|
5647
|
-
const currentStepResult = {
|
5648
|
-
stepType,
|
5417
|
+
const currentStepResult = new DefaultStepResult({
|
5649
5418
|
content: stepContent,
|
5650
|
-
text: stepText,
|
5651
|
-
reasoningText: asReasoningText(extractReasoning(stepContent)),
|
5652
|
-
reasoning: extractReasoning(stepContent),
|
5653
|
-
files: extractFiles(stepContent),
|
5654
|
-
sources: extractSources(stepContent),
|
5655
|
-
toolCalls: currentToolCalls,
|
5656
|
-
toolResults: currentToolResults,
|
5657
5419
|
finishReason: currentModelResponse.finishReason,
|
5658
5420
|
usage: currentModelResponse.usage,
|
5659
5421
|
warnings: currentModelResponse.warnings,
|
5660
|
-
|
5422
|
+
providerMetadata: currentModelResponse.providerMetadata,
|
5423
|
+
request: (_d = currentModelResponse.request) != null ? _d : {},
|
5661
5424
|
response: {
|
5662
5425
|
...currentModelResponse.response,
|
5663
5426
|
// deep clone msgs to avoid mutating past messages in multi-step:
|
5664
5427
|
messages: structuredClone(responseMessages)
|
5665
|
-
}
|
5666
|
-
|
5667
|
-
isContinued: nextStepType === "continue"
|
5668
|
-
};
|
5428
|
+
}
|
5429
|
+
});
|
5669
5430
|
steps.push(currentStepResult);
|
5670
5431
|
await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
|
5671
|
-
|
5672
|
-
|
5432
|
+
} while (++stepCount < maxSteps && // there are tool calls:
|
5433
|
+
currentToolCalls.length > 0 && // all current tool calls have results:
|
5434
|
+
currentToolResults.length === currentToolCalls.length);
|
5673
5435
|
span.setAttributes(
|
5674
5436
|
selectTelemetryAttributes({
|
5675
5437
|
telemetry,
|
@@ -5690,32 +5452,17 @@ async function generateText({
|
|
5690
5452
|
}
|
5691
5453
|
})
|
5692
5454
|
);
|
5693
|
-
const
|
5694
|
-
{ text: text2 },
|
5695
|
-
{
|
5696
|
-
response: currentModelResponse.response,
|
5697
|
-
usage,
|
5698
|
-
finishReason: currentModelResponse.finishReason
|
5699
|
-
}
|
5700
|
-
));
|
5455
|
+
const lastStep = steps[steps.length - 1];
|
5701
5456
|
return new DefaultGenerateTextResult({
|
5702
|
-
text: text2,
|
5703
|
-
content: asContent({
|
5704
|
-
content: currentModelResponse.content,
|
5705
|
-
toolCalls: currentToolCalls,
|
5706
|
-
toolResults: currentToolResults
|
5707
|
-
}),
|
5708
|
-
resolvedOutput,
|
5709
|
-
finishReason: currentModelResponse.finishReason,
|
5710
|
-
usage,
|
5711
|
-
warnings: currentModelResponse.warnings,
|
5712
|
-
request: (_f = currentModelResponse.request) != null ? _f : {},
|
5713
|
-
response: {
|
5714
|
-
...currentModelResponse.response,
|
5715
|
-
messages: responseMessages
|
5716
|
-
},
|
5717
5457
|
steps,
|
5718
|
-
|
5458
|
+
resolvedOutput: await (output == null ? void 0 : output.parseOutput(
|
5459
|
+
{ text: lastStep.text },
|
5460
|
+
{
|
5461
|
+
response: lastStep.response,
|
5462
|
+
usage: lastStep.usage,
|
5463
|
+
finishReason: lastStep.finishReason
|
5464
|
+
}
|
5465
|
+
))
|
5719
5466
|
});
|
5720
5467
|
}
|
5721
5468
|
});
|
@@ -5797,35 +5544,67 @@ async function executeTools({
|
|
5797
5544
|
}
|
5798
5545
|
var DefaultGenerateTextResult = class {
|
5799
5546
|
constructor(options) {
|
5800
|
-
this.text = options.text;
|
5801
|
-
this.content = options.content;
|
5802
|
-
this.finishReason = options.finishReason;
|
5803
|
-
this.usage = options.usage;
|
5804
|
-
this.warnings = options.warnings;
|
5805
|
-
this.request = options.request;
|
5806
|
-
this.response = options.response;
|
5807
5547
|
this.steps = options.steps;
|
5808
|
-
this.providerMetadata = options.providerMetadata;
|
5809
5548
|
this.resolvedOutput = options.resolvedOutput;
|
5810
5549
|
}
|
5550
|
+
get finalStep() {
|
5551
|
+
return this.steps[this.steps.length - 1];
|
5552
|
+
}
|
5553
|
+
get content() {
|
5554
|
+
return this.finalStep.content;
|
5555
|
+
}
|
5556
|
+
get text() {
|
5557
|
+
return this.finalStep.text;
|
5558
|
+
}
|
5811
5559
|
get files() {
|
5812
|
-
return
|
5560
|
+
return this.finalStep.files;
|
5813
5561
|
}
|
5814
5562
|
get reasoningText() {
|
5815
|
-
|
5816
|
-
return texts.length > 0 ? texts.join("") : void 0;
|
5563
|
+
return this.finalStep.reasoningText;
|
5817
5564
|
}
|
5818
5565
|
get reasoning() {
|
5819
|
-
return this.
|
5566
|
+
return this.finalStep.reasoning;
|
5820
5567
|
}
|
5821
5568
|
get toolCalls() {
|
5822
|
-
return this.
|
5569
|
+
return this.finalStep.toolCalls;
|
5823
5570
|
}
|
5824
5571
|
get toolResults() {
|
5825
|
-
return this.
|
5572
|
+
return this.finalStep.toolResults;
|
5826
5573
|
}
|
5827
5574
|
get sources() {
|
5828
|
-
return this.
|
5575
|
+
return this.finalStep.sources;
|
5576
|
+
}
|
5577
|
+
get finishReason() {
|
5578
|
+
return this.finalStep.finishReason;
|
5579
|
+
}
|
5580
|
+
get warnings() {
|
5581
|
+
return this.finalStep.warnings;
|
5582
|
+
}
|
5583
|
+
get providerMetadata() {
|
5584
|
+
return this.finalStep.providerMetadata;
|
5585
|
+
}
|
5586
|
+
get response() {
|
5587
|
+
return this.finalStep.response;
|
5588
|
+
}
|
5589
|
+
get request() {
|
5590
|
+
return this.finalStep.request;
|
5591
|
+
}
|
5592
|
+
get usage() {
|
5593
|
+
return this.finalStep.usage;
|
5594
|
+
}
|
5595
|
+
get totalUsage() {
|
5596
|
+
return this.steps.reduce(
|
5597
|
+
(totalUsage, step) => {
|
5598
|
+
return addLanguageModelUsage(totalUsage, step.usage);
|
5599
|
+
},
|
5600
|
+
{
|
5601
|
+
inputTokens: void 0,
|
5602
|
+
outputTokens: void 0,
|
5603
|
+
totalTokens: void 0,
|
5604
|
+
reasoningTokens: void 0,
|
5605
|
+
cachedInputTokens: void 0
|
5606
|
+
}
|
5607
|
+
);
|
5829
5608
|
}
|
5830
5609
|
get experimental_output() {
|
5831
5610
|
if (this.resolvedOutput == null) {
|
@@ -5855,7 +5634,7 @@ __export(output_exports, {
|
|
5855
5634
|
object: () => object,
|
5856
5635
|
text: () => text
|
5857
5636
|
});
|
5858
|
-
var
|
5637
|
+
var import_provider_utils19 = require("@ai-sdk/provider-utils");
|
5859
5638
|
var text = () => ({
|
5860
5639
|
type: "text",
|
5861
5640
|
responseFormat: { type: "text" },
|
@@ -5869,7 +5648,7 @@ var text = () => ({
|
|
5869
5648
|
var object = ({
|
5870
5649
|
schema: inputSchema
|
5871
5650
|
}) => {
|
5872
|
-
const schema = (0,
|
5651
|
+
const schema = (0, import_provider_utils19.asSchema)(inputSchema);
|
5873
5652
|
return {
|
5874
5653
|
type: "object",
|
5875
5654
|
responseFormat: {
|
@@ -5895,7 +5674,7 @@ var object = ({
|
|
5895
5674
|
}
|
5896
5675
|
},
|
5897
5676
|
async parseOutput({ text: text2 }, context) {
|
5898
|
-
const parseResult = await (0,
|
5677
|
+
const parseResult = await (0, import_provider_utils19.safeParseJSON)({ text: text2 });
|
5899
5678
|
if (!parseResult.success) {
|
5900
5679
|
throw new NoObjectGeneratedError({
|
5901
5680
|
message: "No object generated: could not parse the response.",
|
@@ -5906,7 +5685,7 @@ var object = ({
|
|
5906
5685
|
finishReason: context.finishReason
|
5907
5686
|
});
|
5908
5687
|
}
|
5909
|
-
const validationResult = await (0,
|
5688
|
+
const validationResult = await (0, import_provider_utils19.safeValidateTypes)({
|
5910
5689
|
value: parseResult.value,
|
5911
5690
|
schema
|
5912
5691
|
});
|
@@ -5926,8 +5705,8 @@ var object = ({
|
|
5926
5705
|
};
|
5927
5706
|
|
5928
5707
|
// core/generate-text/smooth-stream.ts
|
5929
|
-
var
|
5930
|
-
var
|
5708
|
+
var import_provider_utils20 = require("@ai-sdk/provider-utils");
|
5709
|
+
var import_provider23 = require("@ai-sdk/provider");
|
5931
5710
|
var CHUNKING_REGEXPS = {
|
5932
5711
|
word: /\S+\s+/m,
|
5933
5712
|
line: /\n+/m
|
@@ -5935,7 +5714,7 @@ var CHUNKING_REGEXPS = {
|
|
5935
5714
|
function smoothStream({
|
5936
5715
|
delayInMs = 10,
|
5937
5716
|
chunking = "word",
|
5938
|
-
_internal: { delay: delay2 =
|
5717
|
+
_internal: { delay: delay2 = import_provider_utils20.delay } = {}
|
5939
5718
|
} = {}) {
|
5940
5719
|
let detectChunk;
|
5941
5720
|
if (typeof chunking === "function") {
|
@@ -5957,7 +5736,7 @@ function smoothStream({
|
|
5957
5736
|
} else {
|
5958
5737
|
const chunkingRegex = typeof chunking === "string" ? CHUNKING_REGEXPS[chunking] : chunking;
|
5959
5738
|
if (chunkingRegex == null) {
|
5960
|
-
throw new
|
5739
|
+
throw new import_provider23.InvalidArgumentError({
|
5961
5740
|
argument: "chunking",
|
5962
5741
|
message: `Chunking must be "word" or "line" or a RegExp. Received: ${chunking}`
|
5963
5742
|
});
|
@@ -5995,34 +5774,15 @@ function smoothStream({
|
|
5995
5774
|
}
|
5996
5775
|
|
5997
5776
|
// core/generate-text/stream-text.ts
|
5998
|
-
var
|
5777
|
+
var import_provider_utils22 = require("@ai-sdk/provider-utils");
|
5999
5778
|
|
6000
5779
|
// src/util/as-array.ts
|
6001
5780
|
function asArray(value) {
|
6002
5781
|
return value === void 0 ? [] : Array.isArray(value) ? value : [value];
|
6003
5782
|
}
|
6004
5783
|
|
6005
|
-
// src/util/consume-stream.ts
|
6006
|
-
async function consumeStream({
|
6007
|
-
stream,
|
6008
|
-
onError
|
6009
|
-
}) {
|
6010
|
-
const reader = stream.getReader();
|
6011
|
-
try {
|
6012
|
-
while (true) {
|
6013
|
-
const { done } = await reader.read();
|
6014
|
-
if (done)
|
6015
|
-
break;
|
6016
|
-
}
|
6017
|
-
} catch (error) {
|
6018
|
-
onError == null ? void 0 : onError(error);
|
6019
|
-
} finally {
|
6020
|
-
reader.releaseLock();
|
6021
|
-
}
|
6022
|
-
}
|
6023
|
-
|
6024
5784
|
// core/generate-text/run-tools-transformation.ts
|
6025
|
-
var
|
5785
|
+
var import_provider_utils21 = require("@ai-sdk/provider-utils");
|
6026
5786
|
function runToolsTransformation({
|
6027
5787
|
tools,
|
6028
5788
|
generatorStream,
|
@@ -6108,7 +5868,7 @@ function runToolsTransformation({
|
|
6108
5868
|
controller.enqueue(toolCall);
|
6109
5869
|
const tool2 = tools[toolCall.toolName];
|
6110
5870
|
if (tool2.execute != null) {
|
6111
|
-
const toolExecutionId = (0,
|
5871
|
+
const toolExecutionId = (0, import_provider_utils21.generateId)();
|
6112
5872
|
outstandingToolResults.add(toolExecutionId);
|
6113
5873
|
recordSpan({
|
6114
5874
|
name: "ai.toolCall",
|
@@ -6217,14 +5977,10 @@ function runToolsTransformation({
|
|
6217
5977
|
}
|
6218
5978
|
|
6219
5979
|
// core/generate-text/stream-text.ts
|
6220
|
-
var originalGenerateId4 = (0,
|
5980
|
+
var originalGenerateId4 = (0, import_provider_utils22.createIdGenerator)({
|
6221
5981
|
prefix: "aitxt",
|
6222
5982
|
size: 24
|
6223
5983
|
});
|
6224
|
-
var originalGenerateMessageId2 = (0, import_provider_utils21.createIdGenerator)({
|
6225
|
-
prefix: "msg",
|
6226
|
-
size: 24
|
6227
|
-
});
|
6228
5984
|
function streamText({
|
6229
5985
|
model,
|
6230
5986
|
tools,
|
@@ -6236,9 +5992,7 @@ function streamText({
|
|
6236
5992
|
abortSignal,
|
6237
5993
|
headers,
|
6238
5994
|
maxSteps = 1,
|
6239
|
-
experimental_generateMessageId: generateMessageId = originalGenerateMessageId2,
|
6240
5995
|
experimental_output: output,
|
6241
|
-
experimental_continueSteps: continueSteps = false,
|
6242
5996
|
experimental_telemetry: telemetry,
|
6243
5997
|
providerOptions,
|
6244
5998
|
experimental_toolCallStreaming = false,
|
@@ -6275,7 +6029,6 @@ function streamText({
|
|
6275
6029
|
repairToolCall,
|
6276
6030
|
maxSteps,
|
6277
6031
|
output,
|
6278
|
-
continueSteps,
|
6279
6032
|
providerOptions,
|
6280
6033
|
onChunk,
|
6281
6034
|
onError,
|
@@ -6283,8 +6036,7 @@ function streamText({
|
|
6283
6036
|
onStepFinish,
|
6284
6037
|
now: now2,
|
6285
6038
|
currentDate,
|
6286
|
-
generateId: generateId3
|
6287
|
-
generateMessageId
|
6039
|
+
generateId: generateId3
|
6288
6040
|
});
|
6289
6041
|
}
|
6290
6042
|
function createOutputTransformStream(output) {
|
@@ -6310,7 +6062,7 @@ function createOutputTransformStream(output) {
|
|
6310
6062
|
}
|
6311
6063
|
return new TransformStream({
|
6312
6064
|
async transform(chunk, controller) {
|
6313
|
-
if (chunk.type === "step
|
6065
|
+
if (chunk.type === "finish-step") {
|
6314
6066
|
publishTextChunk({ controller });
|
6315
6067
|
}
|
6316
6068
|
if (chunk.type !== "text") {
|
@@ -6354,32 +6106,18 @@ var DefaultStreamTextResult = class {
|
|
6354
6106
|
repairToolCall,
|
6355
6107
|
maxSteps,
|
6356
6108
|
output,
|
6357
|
-
continueSteps,
|
6358
6109
|
providerOptions,
|
6359
6110
|
now: now2,
|
6360
6111
|
currentDate,
|
6361
6112
|
generateId: generateId3,
|
6362
|
-
generateMessageId,
|
6363
6113
|
onChunk,
|
6364
6114
|
onError,
|
6365
6115
|
onFinish,
|
6366
6116
|
onStepFinish
|
6367
6117
|
}) {
|
6368
|
-
this.
|
6369
|
-
this.usagePromise = new DelayedPromise();
|
6118
|
+
this.totalUsagePromise = new DelayedPromise();
|
6370
6119
|
this.finishReasonPromise = new DelayedPromise();
|
6371
|
-
this.providerMetadataPromise = new DelayedPromise();
|
6372
|
-
this.textPromise = new DelayedPromise();
|
6373
|
-
this.reasoningPromise = new DelayedPromise();
|
6374
|
-
this.reasoningDetailsPromise = new DelayedPromise();
|
6375
|
-
this.sourcesPromise = new DelayedPromise();
|
6376
|
-
this.filesPromise = new DelayedPromise();
|
6377
|
-
this.toolCallsPromise = new DelayedPromise();
|
6378
|
-
this.toolResultsPromise = new DelayedPromise();
|
6379
|
-
this.requestPromise = new DelayedPromise();
|
6380
|
-
this.responsePromise = new DelayedPromise();
|
6381
6120
|
this.stepsPromise = new DelayedPromise();
|
6382
|
-
this.contentPromise = new DelayedPromise();
|
6383
6121
|
if (maxSteps < 1) {
|
6384
6122
|
throw new InvalidArgumentError({
|
6385
6123
|
parameter: "maxSteps",
|
@@ -6388,23 +6126,14 @@ var DefaultStreamTextResult = class {
|
|
6388
6126
|
});
|
6389
6127
|
}
|
6390
6128
|
this.output = output;
|
6391
|
-
|
6392
|
-
let recordedContinuationText = "";
|
6393
|
-
let recordedFullText = "";
|
6129
|
+
this.generateId = generateId3;
|
6394
6130
|
let activeReasoningPart = void 0;
|
6395
6131
|
let recordedContent = [];
|
6396
|
-
const
|
6397
|
-
const recordedResponse = {
|
6398
|
-
id: generateId3(),
|
6399
|
-
timestamp: currentDate(),
|
6400
|
-
modelId: model.modelId,
|
6401
|
-
messages: []
|
6402
|
-
};
|
6403
|
-
let recordedToolCalls = [];
|
6404
|
-
let recordedToolResults = [];
|
6132
|
+
const recordedResponseMessages = [];
|
6405
6133
|
let recordedFinishReason = void 0;
|
6406
|
-
let
|
6407
|
-
let
|
6134
|
+
let recordedTotalUsage = void 0;
|
6135
|
+
let recordedRequest = {};
|
6136
|
+
let recordedWarnings = [];
|
6408
6137
|
const recordedSteps = [];
|
6409
6138
|
let rootSpan;
|
6410
6139
|
const eventProcessor = new TransformStream({
|
@@ -6418,9 +6147,6 @@ var DefaultStreamTextResult = class {
|
|
6418
6147
|
await (onError == null ? void 0 : onError({ error: part.error }));
|
6419
6148
|
}
|
6420
6149
|
if (part.type === "text") {
|
6421
|
-
recordedStepText += part.text;
|
6422
|
-
recordedContinuationText += part.text;
|
6423
|
-
recordedFullText += part.text;
|
6424
6150
|
const latestContent = recordedContent[recordedContent.length - 1];
|
6425
6151
|
if ((latestContent == null ? void 0 : latestContent.type) === "text") {
|
6426
6152
|
latestContent.text += part.text;
|
@@ -6433,12 +6159,12 @@ var DefaultStreamTextResult = class {
|
|
6433
6159
|
activeReasoningPart = {
|
6434
6160
|
type: "reasoning",
|
6435
6161
|
text: part.text,
|
6436
|
-
|
6162
|
+
providerMetadata: part.providerMetadata
|
6437
6163
|
};
|
6438
6164
|
recordedContent.push(activeReasoningPart);
|
6439
6165
|
} else {
|
6440
6166
|
activeReasoningPart.text += part.text;
|
6441
|
-
activeReasoningPart.
|
6167
|
+
activeReasoningPart.providerMetadata = part.providerMetadata;
|
6442
6168
|
}
|
6443
6169
|
}
|
6444
6170
|
if (part.type === "reasoning-part-finish" && activeReasoningPart != null) {
|
@@ -6449,129 +6175,76 @@ var DefaultStreamTextResult = class {
|
|
6449
6175
|
}
|
6450
6176
|
if (part.type === "source") {
|
6451
6177
|
recordedContent.push(part);
|
6452
|
-
recordedSources.push(part);
|
6453
6178
|
}
|
6454
6179
|
if (part.type === "tool-call") {
|
6455
6180
|
recordedContent.push(part);
|
6456
|
-
recordedToolCalls.push(part);
|
6457
6181
|
}
|
6458
6182
|
if (part.type === "tool-result") {
|
6459
6183
|
recordedContent.push(part);
|
6460
|
-
recordedToolResults.push(part);
|
6461
6184
|
}
|
6462
|
-
if (part.type === "step
|
6185
|
+
if (part.type === "start-step") {
|
6186
|
+
recordedRequest = part.request;
|
6187
|
+
recordedWarnings = part.warnings;
|
6188
|
+
}
|
6189
|
+
if (part.type === "finish-step") {
|
6463
6190
|
const stepMessages = toResponseMessages({
|
6464
|
-
|
6465
|
-
|
6466
|
-
reasoning: extractReasoning(recordedContent),
|
6467
|
-
tools: tools != null ? tools : {},
|
6468
|
-
toolCalls: recordedToolCalls,
|
6469
|
-
toolResults: recordedToolResults,
|
6470
|
-
messageId: part.messageId,
|
6471
|
-
generateMessageId
|
6191
|
+
content: recordedContent,
|
6192
|
+
tools: tools != null ? tools : {}
|
6472
6193
|
});
|
6473
|
-
const
|
6474
|
-
let nextStepType = "done";
|
6475
|
-
if (currentStep + 1 < maxSteps) {
|
6476
|
-
if (continueSteps && part.finishReason === "length" && // only use continue when there are no tool calls:
|
6477
|
-
recordedToolCalls.length === 0) {
|
6478
|
-
nextStepType = "continue";
|
6479
|
-
} else if (
|
6480
|
-
// there are tool calls:
|
6481
|
-
recordedToolCalls.length > 0 && // all current tool calls have results:
|
6482
|
-
recordedToolResults.length === recordedToolCalls.length
|
6483
|
-
) {
|
6484
|
-
nextStepType = "tool-result";
|
6485
|
-
}
|
6486
|
-
}
|
6487
|
-
const currentStepResult = {
|
6488
|
-
stepType,
|
6194
|
+
const currentStepResult = new DefaultStepResult({
|
6489
6195
|
content: recordedContent,
|
6490
|
-
text: recordedStepText,
|
6491
|
-
reasoningText: asReasoningText(extractReasoning(recordedContent)),
|
6492
|
-
reasoning: extractReasoning(recordedContent),
|
6493
|
-
files: extractFiles(recordedContent),
|
6494
|
-
sources: extractSources(recordedContent),
|
6495
|
-
toolCalls: recordedToolCalls,
|
6496
|
-
toolResults: recordedToolResults,
|
6497
6196
|
finishReason: part.finishReason,
|
6498
6197
|
usage: part.usage,
|
6499
|
-
warnings:
|
6500
|
-
request:
|
6198
|
+
warnings: recordedWarnings,
|
6199
|
+
request: recordedRequest,
|
6501
6200
|
response: {
|
6502
6201
|
...part.response,
|
6503
|
-
messages: [...
|
6202
|
+
messages: [...recordedResponseMessages, ...stepMessages]
|
6504
6203
|
},
|
6505
|
-
providerMetadata: part.providerMetadata
|
6506
|
-
|
6507
|
-
};
|
6204
|
+
providerMetadata: part.providerMetadata
|
6205
|
+
});
|
6508
6206
|
await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
|
6509
6207
|
recordedSteps.push(currentStepResult);
|
6510
6208
|
recordedContent = [];
|
6511
|
-
recordedToolCalls = [];
|
6512
|
-
recordedToolResults = [];
|
6513
|
-
recordedStepText = "";
|
6514
6209
|
activeReasoningPart = void 0;
|
6515
|
-
|
6516
|
-
stepType = nextStepType;
|
6517
|
-
}
|
6518
|
-
if (nextStepType !== "continue") {
|
6519
|
-
recordedResponse.messages.push(...stepMessages);
|
6520
|
-
recordedContinuationText = "";
|
6521
|
-
}
|
6210
|
+
recordedResponseMessages.push(...stepMessages);
|
6522
6211
|
}
|
6523
6212
|
if (part.type === "finish") {
|
6524
|
-
|
6525
|
-
recordedResponse.timestamp = part.response.timestamp;
|
6526
|
-
recordedResponse.modelId = part.response.modelId;
|
6527
|
-
recordedResponse.headers = part.response.headers;
|
6528
|
-
recordedUsage = part.usage;
|
6213
|
+
recordedTotalUsage = part.totalUsage;
|
6529
6214
|
recordedFinishReason = part.finishReason;
|
6530
6215
|
}
|
6531
6216
|
},
|
6532
6217
|
async flush(controller) {
|
6533
|
-
var _a17;
|
6534
6218
|
try {
|
6535
6219
|
if (recordedSteps.length === 0) {
|
6536
6220
|
return;
|
6537
6221
|
}
|
6538
|
-
const lastStep = recordedSteps[recordedSteps.length - 1];
|
6539
|
-
self.contentPromise.resolve(lastStep.content);
|
6540
|
-
self.warningsPromise.resolve(lastStep.warnings);
|
6541
|
-
self.requestPromise.resolve(lastStep.request);
|
6542
|
-
self.responsePromise.resolve(lastStep.response);
|
6543
|
-
self.toolCallsPromise.resolve(lastStep.toolCalls);
|
6544
|
-
self.toolResultsPromise.resolve(lastStep.toolResults);
|
6545
|
-
self.providerMetadataPromise.resolve(lastStep.providerMetadata);
|
6546
|
-
self.reasoningPromise.resolve(lastStep.reasoningText);
|
6547
|
-
self.reasoningDetailsPromise.resolve(lastStep.reasoning);
|
6548
6222
|
const finishReason = recordedFinishReason != null ? recordedFinishReason : "unknown";
|
6549
|
-
const
|
6223
|
+
const totalUsage = recordedTotalUsage != null ? recordedTotalUsage : {
|
6550
6224
|
inputTokens: void 0,
|
6551
6225
|
outputTokens: void 0,
|
6552
6226
|
totalTokens: void 0
|
6553
6227
|
};
|
6554
6228
|
self.finishReasonPromise.resolve(finishReason);
|
6555
|
-
self.
|
6556
|
-
self.textPromise.resolve(recordedFullText);
|
6557
|
-
self.sourcesPromise.resolve(recordedSources);
|
6558
|
-
self.filesPromise.resolve(lastStep.files);
|
6229
|
+
self.totalUsagePromise.resolve(totalUsage);
|
6559
6230
|
self.stepsPromise.resolve(recordedSteps);
|
6231
|
+
const finalStep = recordedSteps[recordedSteps.length - 1];
|
6560
6232
|
await (onFinish == null ? void 0 : onFinish({
|
6561
6233
|
finishReason,
|
6562
|
-
|
6563
|
-
|
6564
|
-
|
6565
|
-
|
6566
|
-
|
6567
|
-
|
6568
|
-
|
6569
|
-
|
6570
|
-
|
6571
|
-
|
6572
|
-
|
6573
|
-
|
6574
|
-
|
6234
|
+
totalUsage,
|
6235
|
+
usage: finalStep.usage,
|
6236
|
+
content: finalStep.content,
|
6237
|
+
text: finalStep.text,
|
6238
|
+
reasoningText: finalStep.reasoningText,
|
6239
|
+
reasoning: finalStep.reasoning,
|
6240
|
+
files: finalStep.files,
|
6241
|
+
sources: finalStep.sources,
|
6242
|
+
toolCalls: finalStep.toolCalls,
|
6243
|
+
toolResults: finalStep.toolResults,
|
6244
|
+
request: finalStep.request,
|
6245
|
+
response: finalStep.response,
|
6246
|
+
warnings: finalStep.warnings,
|
6247
|
+
providerMetadata: finalStep.providerMetadata,
|
6575
6248
|
steps: recordedSteps
|
6576
6249
|
}));
|
6577
6250
|
rootSpan.setAttributes(
|
@@ -6579,18 +6252,18 @@ var DefaultStreamTextResult = class {
|
|
6579
6252
|
telemetry,
|
6580
6253
|
attributes: {
|
6581
6254
|
"ai.response.finishReason": finishReason,
|
6582
|
-
"ai.response.text": { output: () =>
|
6255
|
+
"ai.response.text": { output: () => finalStep.text },
|
6583
6256
|
"ai.response.toolCalls": {
|
6584
6257
|
output: () => {
|
6585
|
-
var
|
6586
|
-
return ((
|
6258
|
+
var _a17;
|
6259
|
+
return ((_a17 = finalStep.toolCalls) == null ? void 0 : _a17.length) ? JSON.stringify(finalStep.toolCalls) : void 0;
|
6587
6260
|
}
|
6588
6261
|
},
|
6589
|
-
"ai.usage.inputTokens":
|
6590
|
-
"ai.usage.outputTokens":
|
6591
|
-
"ai.usage.totalTokens":
|
6592
|
-
"ai.usage.reasoningTokens":
|
6593
|
-
"ai.usage.cachedInputTokens":
|
6262
|
+
"ai.usage.inputTokens": totalUsage.inputTokens,
|
6263
|
+
"ai.usage.outputTokens": totalUsage.outputTokens,
|
6264
|
+
"ai.usage.totalTokens": totalUsage.totalTokens,
|
6265
|
+
"ai.usage.reasoningTokens": totalUsage.reasoningTokens,
|
6266
|
+
"ai.usage.cachedInputTokens": totalUsage.cachedInputTokens
|
6594
6267
|
}
|
6595
6268
|
})
|
6596
6269
|
);
|
@@ -6649,11 +6322,7 @@ var DefaultStreamTextResult = class {
|
|
6649
6322
|
async function streamStep({
|
6650
6323
|
currentStep,
|
6651
6324
|
responseMessages,
|
6652
|
-
usage
|
6653
|
-
stepType: stepType2,
|
6654
|
-
previousStepText,
|
6655
|
-
hasLeadingWhitespace,
|
6656
|
-
messageId
|
6325
|
+
usage
|
6657
6326
|
}) {
|
6658
6327
|
const initialPrompt = await standardizePrompt({
|
6659
6328
|
system,
|
@@ -6751,8 +6420,7 @@ var DefaultStreamTextResult = class {
|
|
6751
6420
|
const stepToolCalls = [];
|
6752
6421
|
const stepToolResults = [];
|
6753
6422
|
let warnings;
|
6754
|
-
const
|
6755
|
-
const stepFiles = [];
|
6423
|
+
const stepContent = [];
|
6756
6424
|
let activeReasoningPart2 = void 0;
|
6757
6425
|
let stepFinishReason = "unknown";
|
6758
6426
|
let stepUsage = {
|
@@ -6763,25 +6431,17 @@ var DefaultStreamTextResult = class {
|
|
6763
6431
|
let stepProviderMetadata;
|
6764
6432
|
let stepFirstChunk = true;
|
6765
6433
|
let stepText = "";
|
6766
|
-
let fullStepText = stepType2 === "continue" ? previousStepText : "";
|
6767
6434
|
let stepResponse = {
|
6768
6435
|
id: generateId3(),
|
6769
6436
|
timestamp: currentDate(),
|
6770
6437
|
modelId: model.modelId
|
6771
6438
|
};
|
6772
|
-
let chunkBuffer = "";
|
6773
|
-
let chunkTextPublished = false;
|
6774
|
-
let inWhitespacePrefix = true;
|
6775
|
-
let hasWhitespaceSuffix = false;
|
6776
6439
|
async function publishTextChunk({
|
6777
6440
|
controller,
|
6778
6441
|
chunk
|
6779
6442
|
}) {
|
6780
6443
|
controller.enqueue(chunk);
|
6781
6444
|
stepText += chunk.text;
|
6782
|
-
fullStepText += chunk.text;
|
6783
|
-
chunkTextPublished = true;
|
6784
|
-
hasWhitespaceSuffix = chunk.text.trimEnd() !== chunk.text;
|
6785
6445
|
}
|
6786
6446
|
self.addStream(
|
6787
6447
|
transformedStream.pipeThrough(
|
@@ -6790,6 +6450,7 @@ var DefaultStreamTextResult = class {
|
|
6790
6450
|
var _a17, _b, _c, _d;
|
6791
6451
|
if (chunk.type === "stream-start") {
|
6792
6452
|
warnings = chunk.warnings;
|
6453
|
+
controller.enqueue({ type: "start" });
|
6793
6454
|
return;
|
6794
6455
|
}
|
6795
6456
|
if (stepFirstChunk) {
|
@@ -6802,8 +6463,7 @@ var DefaultStreamTextResult = class {
|
|
6802
6463
|
"ai.response.msToFirstChunk": msToFirstChunk
|
6803
6464
|
});
|
6804
6465
|
controller.enqueue({
|
6805
|
-
type: "step
|
6806
|
-
messageId,
|
6466
|
+
type: "start-step",
|
6807
6467
|
request: stepRequest,
|
6808
6468
|
warnings: warnings != null ? warnings : []
|
6809
6469
|
});
|
@@ -6814,27 +6474,7 @@ var DefaultStreamTextResult = class {
|
|
6814
6474
|
const chunkType = chunk.type;
|
6815
6475
|
switch (chunkType) {
|
6816
6476
|
case "text": {
|
6817
|
-
|
6818
|
-
const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.text.trimStart() : chunk.text;
|
6819
|
-
if (trimmedChunkText.length === 0) {
|
6820
|
-
break;
|
6821
|
-
}
|
6822
|
-
inWhitespacePrefix = false;
|
6823
|
-
chunkBuffer += trimmedChunkText;
|
6824
|
-
const split = splitOnLastWhitespace(chunkBuffer);
|
6825
|
-
if (split != null) {
|
6826
|
-
chunkBuffer = split.suffix;
|
6827
|
-
await publishTextChunk({
|
6828
|
-
controller,
|
6829
|
-
chunk: {
|
6830
|
-
type: "text",
|
6831
|
-
text: split.prefix + split.whitespace
|
6832
|
-
}
|
6833
|
-
});
|
6834
|
-
}
|
6835
|
-
} else {
|
6836
|
-
await publishTextChunk({ controller, chunk });
|
6837
|
-
}
|
6477
|
+
await publishTextChunk({ controller, chunk });
|
6838
6478
|
break;
|
6839
6479
|
}
|
6840
6480
|
case "reasoning": {
|
@@ -6843,12 +6483,12 @@ var DefaultStreamTextResult = class {
|
|
6843
6483
|
activeReasoningPart2 = {
|
6844
6484
|
type: "reasoning",
|
6845
6485
|
text: chunk.text,
|
6846
|
-
|
6486
|
+
providerMetadata: chunk.providerMetadata
|
6847
6487
|
};
|
6848
|
-
|
6488
|
+
stepContent.push(activeReasoningPart2);
|
6849
6489
|
} else {
|
6850
6490
|
activeReasoningPart2.text += chunk.text;
|
6851
|
-
activeReasoningPart2.
|
6491
|
+
activeReasoningPart2.providerMetadata = chunk.providerMetadata;
|
6852
6492
|
}
|
6853
6493
|
break;
|
6854
6494
|
}
|
@@ -6860,11 +6500,13 @@ var DefaultStreamTextResult = class {
|
|
6860
6500
|
case "tool-call": {
|
6861
6501
|
controller.enqueue(chunk);
|
6862
6502
|
stepToolCalls.push(chunk);
|
6503
|
+
stepContent.push(chunk);
|
6863
6504
|
break;
|
6864
6505
|
}
|
6865
6506
|
case "tool-result": {
|
6866
6507
|
controller.enqueue(chunk);
|
6867
6508
|
stepToolResults.push(chunk);
|
6509
|
+
stepContent.push(chunk);
|
6868
6510
|
break;
|
6869
6511
|
}
|
6870
6512
|
case "response-metadata": {
|
@@ -6888,11 +6530,15 @@ var DefaultStreamTextResult = class {
|
|
6888
6530
|
break;
|
6889
6531
|
}
|
6890
6532
|
case "file": {
|
6891
|
-
|
6533
|
+
stepContent.push(chunk);
|
6534
|
+
controller.enqueue(chunk);
|
6535
|
+
break;
|
6536
|
+
}
|
6537
|
+
case "source": {
|
6538
|
+
stepContent.push(chunk);
|
6892
6539
|
controller.enqueue(chunk);
|
6893
6540
|
break;
|
6894
6541
|
}
|
6895
|
-
case "source":
|
6896
6542
|
case "tool-call-streaming-start":
|
6897
6543
|
case "tool-call-delta": {
|
6898
6544
|
controller.enqueue(chunk);
|
@@ -6912,27 +6558,6 @@ var DefaultStreamTextResult = class {
|
|
6912
6558
|
// invoke onFinish callback and resolve toolResults promise when the stream is about to close:
|
6913
6559
|
async flush(controller) {
|
6914
6560
|
const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
|
6915
|
-
let nextStepType = "done";
|
6916
|
-
if (currentStep + 1 < maxSteps) {
|
6917
|
-
if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
|
6918
|
-
stepToolCalls.length === 0) {
|
6919
|
-
nextStepType = "continue";
|
6920
|
-
} else if (
|
6921
|
-
// there are tool calls:
|
6922
|
-
stepToolCalls.length > 0 && // all current tool calls have results:
|
6923
|
-
stepToolResults.length === stepToolCalls.length
|
6924
|
-
) {
|
6925
|
-
nextStepType = "tool-result";
|
6926
|
-
}
|
6927
|
-
}
|
6928
|
-
if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
|
6929
|
-
stepType2 === "continue" && !chunkTextPublished)) {
|
6930
|
-
await publishTextChunk({
|
6931
|
-
controller,
|
6932
|
-
chunk: { type: "text", text: chunkBuffer }
|
6933
|
-
});
|
6934
|
-
chunkBuffer = "";
|
6935
|
-
}
|
6936
6561
|
try {
|
6937
6562
|
doStreamSpan.setAttributes(
|
6938
6563
|
selectTelemetryAttributes({
|
@@ -6965,69 +6590,37 @@ var DefaultStreamTextResult = class {
|
|
6965
6590
|
doStreamSpan.end();
|
6966
6591
|
}
|
6967
6592
|
controller.enqueue({
|
6968
|
-
type: "step
|
6593
|
+
type: "finish-step",
|
6969
6594
|
finishReason: stepFinishReason,
|
6970
6595
|
usage: stepUsage,
|
6971
6596
|
providerMetadata: stepProviderMetadata,
|
6972
|
-
request: stepRequest,
|
6973
6597
|
response: {
|
6974
6598
|
...stepResponse,
|
6975
6599
|
headers: response == null ? void 0 : response.headers
|
6976
|
-
}
|
6977
|
-
warnings,
|
6978
|
-
isContinued: nextStepType === "continue",
|
6979
|
-
messageId
|
6600
|
+
}
|
6980
6601
|
});
|
6981
6602
|
const combinedUsage = addLanguageModelUsage(usage, stepUsage);
|
6982
|
-
if (
|
6603
|
+
if (currentStep + 1 < maxSteps && // there are tool calls:
|
6604
|
+
stepToolCalls.length > 0 && // all current tool calls have results:
|
6605
|
+
stepToolResults.length === stepToolCalls.length) {
|
6606
|
+
responseMessages.push(
|
6607
|
+
...toResponseMessages({
|
6608
|
+
content: stepContent,
|
6609
|
+
tools: tools != null ? tools : {}
|
6610
|
+
})
|
6611
|
+
);
|
6612
|
+
await streamStep({
|
6613
|
+
currentStep: currentStep + 1,
|
6614
|
+
responseMessages,
|
6615
|
+
usage: combinedUsage
|
6616
|
+
});
|
6617
|
+
} else {
|
6983
6618
|
controller.enqueue({
|
6984
6619
|
type: "finish",
|
6985
6620
|
finishReason: stepFinishReason,
|
6986
|
-
|
6987
|
-
providerMetadata: stepProviderMetadata,
|
6988
|
-
response: {
|
6989
|
-
...stepResponse,
|
6990
|
-
headers: response == null ? void 0 : response.headers
|
6991
|
-
}
|
6621
|
+
totalUsage: combinedUsage
|
6992
6622
|
});
|
6993
6623
|
self.closeStream();
|
6994
|
-
} else {
|
6995
|
-
if (stepType2 === "continue") {
|
6996
|
-
const lastMessage = responseMessages[responseMessages.length - 1];
|
6997
|
-
if (typeof lastMessage.content === "string") {
|
6998
|
-
lastMessage.content += stepText;
|
6999
|
-
} else {
|
7000
|
-
lastMessage.content.push({
|
7001
|
-
text: stepText,
|
7002
|
-
type: "text"
|
7003
|
-
});
|
7004
|
-
}
|
7005
|
-
} else {
|
7006
|
-
responseMessages.push(
|
7007
|
-
...toResponseMessages({
|
7008
|
-
text: stepText,
|
7009
|
-
files: stepFiles,
|
7010
|
-
reasoning: stepReasoning,
|
7011
|
-
tools: tools != null ? tools : {},
|
7012
|
-
toolCalls: stepToolCalls,
|
7013
|
-
toolResults: stepToolResults,
|
7014
|
-
messageId,
|
7015
|
-
generateMessageId
|
7016
|
-
})
|
7017
|
-
);
|
7018
|
-
}
|
7019
|
-
await streamStep({
|
7020
|
-
currentStep: currentStep + 1,
|
7021
|
-
responseMessages,
|
7022
|
-
usage: combinedUsage,
|
7023
|
-
stepType: nextStepType,
|
7024
|
-
previousStepText: fullStepText,
|
7025
|
-
hasLeadingWhitespace: hasWhitespaceSuffix,
|
7026
|
-
messageId: (
|
7027
|
-
// keep the same id when continuing a step:
|
7028
|
-
nextStepType === "continue" ? messageId : generateMessageId()
|
7029
|
-
)
|
7030
|
-
});
|
7031
6624
|
}
|
7032
6625
|
}
|
7033
6626
|
})
|
@@ -7041,11 +6634,7 @@ var DefaultStreamTextResult = class {
|
|
7041
6634
|
inputTokens: void 0,
|
7042
6635
|
outputTokens: void 0,
|
7043
6636
|
totalTokens: void 0
|
7044
|
-
}
|
7045
|
-
previousStepText: "",
|
7046
|
-
stepType: "initial",
|
7047
|
-
hasLeadingWhitespace: false,
|
7048
|
-
messageId: generateMessageId()
|
6637
|
+
}
|
7049
6638
|
});
|
7050
6639
|
}
|
7051
6640
|
}).catch((error) => {
|
@@ -7060,50 +6649,56 @@ var DefaultStreamTextResult = class {
|
|
7060
6649
|
self.closeStream();
|
7061
6650
|
});
|
7062
6651
|
}
|
7063
|
-
get
|
7064
|
-
return this.
|
6652
|
+
get steps() {
|
6653
|
+
return this.stepsPromise.value;
|
7065
6654
|
}
|
7066
|
-
get
|
7067
|
-
return this.
|
6655
|
+
get finalStep() {
|
6656
|
+
return this.steps.then((steps) => steps[steps.length - 1]);
|
7068
6657
|
}
|
7069
|
-
get
|
7070
|
-
return this.
|
6658
|
+
get content() {
|
6659
|
+
return this.finalStep.then((step) => step.content);
|
7071
6660
|
}
|
7072
|
-
get
|
7073
|
-
return this.
|
6661
|
+
get warnings() {
|
6662
|
+
return this.finalStep.then((step) => step.warnings);
|
7074
6663
|
}
|
7075
6664
|
get providerMetadata() {
|
7076
|
-
return this.
|
6665
|
+
return this.finalStep.then((step) => step.providerMetadata);
|
7077
6666
|
}
|
7078
6667
|
get text() {
|
7079
|
-
return this.
|
6668
|
+
return this.finalStep.then((step) => step.text);
|
7080
6669
|
}
|
7081
6670
|
get reasoningText() {
|
7082
|
-
return this.
|
6671
|
+
return this.finalStep.then((step) => step.reasoningText);
|
7083
6672
|
}
|
7084
6673
|
get reasoning() {
|
7085
|
-
return this.
|
6674
|
+
return this.finalStep.then((step) => step.reasoning);
|
7086
6675
|
}
|
7087
6676
|
get sources() {
|
7088
|
-
return this.
|
6677
|
+
return this.finalStep.then((step) => step.sources);
|
7089
6678
|
}
|
7090
6679
|
get files() {
|
7091
|
-
return this.
|
6680
|
+
return this.finalStep.then((step) => step.files);
|
7092
6681
|
}
|
7093
6682
|
get toolCalls() {
|
7094
|
-
return this.
|
6683
|
+
return this.finalStep.then((step) => step.toolCalls);
|
7095
6684
|
}
|
7096
6685
|
get toolResults() {
|
7097
|
-
return this.
|
6686
|
+
return this.finalStep.then((step) => step.toolResults);
|
6687
|
+
}
|
6688
|
+
get usage() {
|
6689
|
+
return this.finalStep.then((step) => step.usage);
|
7098
6690
|
}
|
7099
6691
|
get request() {
|
7100
|
-
return this.
|
6692
|
+
return this.finalStep.then((step) => step.request);
|
7101
6693
|
}
|
7102
6694
|
get response() {
|
7103
|
-
return this.
|
6695
|
+
return this.finalStep.then((step) => step.response);
|
7104
6696
|
}
|
7105
|
-
get
|
7106
|
-
return this.
|
6697
|
+
get totalUsage() {
|
6698
|
+
return this.totalUsagePromise.value;
|
6699
|
+
}
|
6700
|
+
get finishReason() {
|
6701
|
+
return this.finishReasonPromise.value;
|
7107
6702
|
}
|
7108
6703
|
/**
|
7109
6704
|
Split out a new stream from the original stream.
|
@@ -7170,25 +6765,32 @@ var DefaultStreamTextResult = class {
|
|
7170
6765
|
);
|
7171
6766
|
}
|
7172
6767
|
toDataStream({
|
7173
|
-
|
7174
|
-
|
7175
|
-
|
6768
|
+
newMessageId,
|
6769
|
+
originalMessages = [],
|
6770
|
+
onFinish,
|
6771
|
+
messageMetadata,
|
7176
6772
|
sendReasoning = false,
|
7177
6773
|
sendSources = false,
|
7178
|
-
|
6774
|
+
experimental_sendStart = true,
|
6775
|
+
experimental_sendFinish = true,
|
6776
|
+
onError = () => "An error occurred."
|
6777
|
+
// mask error messages for safety by default
|
7179
6778
|
} = {}) {
|
7180
|
-
|
6779
|
+
const lastMessage = originalMessages[originalMessages.length - 1];
|
6780
|
+
const isContinuation = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
|
6781
|
+
const messageId = isContinuation ? lastMessage.id : newMessageId;
|
6782
|
+
const baseStream = this.fullStream.pipeThrough(
|
7181
6783
|
new TransformStream({
|
7182
|
-
transform: async (
|
7183
|
-
const
|
7184
|
-
switch (
|
6784
|
+
transform: async (part, controller) => {
|
6785
|
+
const partType = part.type;
|
6786
|
+
switch (partType) {
|
7185
6787
|
case "text": {
|
7186
|
-
controller.enqueue({ type: "text", value:
|
6788
|
+
controller.enqueue({ type: "text", value: part.text });
|
7187
6789
|
break;
|
7188
6790
|
}
|
7189
6791
|
case "reasoning": {
|
7190
6792
|
if (sendReasoning) {
|
7191
|
-
controller.enqueue({ type: "reasoning", value:
|
6793
|
+
controller.enqueue({ type: "reasoning", value: part });
|
7192
6794
|
}
|
7193
6795
|
break;
|
7194
6796
|
}
|
@@ -7205,15 +6807,15 @@ var DefaultStreamTextResult = class {
|
|
7205
6807
|
controller.enqueue({
|
7206
6808
|
type: "file",
|
7207
6809
|
value: {
|
7208
|
-
mediaType:
|
7209
|
-
url: `data:${
|
6810
|
+
mediaType: part.file.mediaType,
|
6811
|
+
url: `data:${part.file.mediaType};base64,${part.file.base64}`
|
7210
6812
|
}
|
7211
6813
|
});
|
7212
6814
|
break;
|
7213
6815
|
}
|
7214
6816
|
case "source": {
|
7215
6817
|
if (sendSources) {
|
7216
|
-
controller.enqueue({ type: "source", value:
|
6818
|
+
controller.enqueue({ type: "source", value: part });
|
7217
6819
|
}
|
7218
6820
|
break;
|
7219
6821
|
}
|
@@ -7221,8 +6823,8 @@ var DefaultStreamTextResult = class {
|
|
7221
6823
|
controller.enqueue({
|
7222
6824
|
type: "tool-call-streaming-start",
|
7223
6825
|
value: {
|
7224
|
-
toolCallId:
|
7225
|
-
toolName:
|
6826
|
+
toolCallId: part.toolCallId,
|
6827
|
+
toolName: part.toolName
|
7226
6828
|
}
|
7227
6829
|
});
|
7228
6830
|
break;
|
@@ -7231,8 +6833,8 @@ var DefaultStreamTextResult = class {
|
|
7231
6833
|
controller.enqueue({
|
7232
6834
|
type: "tool-call-delta",
|
7233
6835
|
value: {
|
7234
|
-
toolCallId:
|
7235
|
-
argsTextDelta:
|
6836
|
+
toolCallId: part.toolCallId,
|
6837
|
+
argsTextDelta: part.argsTextDelta
|
7236
6838
|
}
|
7237
6839
|
});
|
7238
6840
|
break;
|
@@ -7241,9 +6843,9 @@ var DefaultStreamTextResult = class {
|
|
7241
6843
|
controller.enqueue({
|
7242
6844
|
type: "tool-call",
|
7243
6845
|
value: {
|
7244
|
-
toolCallId:
|
7245
|
-
toolName:
|
7246
|
-
args:
|
6846
|
+
toolCallId: part.toolCallId,
|
6847
|
+
toolName: part.toolName,
|
6848
|
+
args: part.args
|
7247
6849
|
}
|
7248
6850
|
});
|
7249
6851
|
break;
|
@@ -7252,8 +6854,8 @@ var DefaultStreamTextResult = class {
|
|
7252
6854
|
controller.enqueue({
|
7253
6855
|
type: "tool-result",
|
7254
6856
|
value: {
|
7255
|
-
toolCallId:
|
7256
|
-
result:
|
6857
|
+
toolCallId: part.toolCallId,
|
6858
|
+
result: part.result
|
7257
6859
|
}
|
7258
6860
|
});
|
7259
6861
|
break;
|
@@ -7261,69 +6863,100 @@ var DefaultStreamTextResult = class {
|
|
7261
6863
|
case "error": {
|
7262
6864
|
controller.enqueue({
|
7263
6865
|
type: "error",
|
7264
|
-
value: onError(
|
6866
|
+
value: onError(part.error)
|
7265
6867
|
});
|
7266
6868
|
break;
|
7267
6869
|
}
|
7268
|
-
case "step
|
6870
|
+
case "start-step": {
|
7269
6871
|
controller.enqueue({
|
7270
6872
|
type: "start-step",
|
7271
6873
|
value: {
|
7272
|
-
|
6874
|
+
metadata: messageMetadata == null ? void 0 : messageMetadata({ part })
|
7273
6875
|
}
|
7274
6876
|
});
|
7275
6877
|
break;
|
7276
6878
|
}
|
7277
|
-
case "step
|
6879
|
+
case "finish-step": {
|
7278
6880
|
controller.enqueue({
|
7279
6881
|
type: "finish-step",
|
7280
6882
|
value: {
|
7281
|
-
|
7282
|
-
usage: sendUsage ? chunk.usage : void 0,
|
7283
|
-
isContinued: chunk.isContinued
|
6883
|
+
metadata: messageMetadata == null ? void 0 : messageMetadata({ part })
|
7284
6884
|
}
|
7285
6885
|
});
|
7286
6886
|
break;
|
7287
6887
|
}
|
6888
|
+
case "start": {
|
6889
|
+
if (experimental_sendStart) {
|
6890
|
+
controller.enqueue({
|
6891
|
+
type: "start",
|
6892
|
+
value: {
|
6893
|
+
messageId,
|
6894
|
+
metadata: messageMetadata == null ? void 0 : messageMetadata({ part })
|
6895
|
+
}
|
6896
|
+
});
|
6897
|
+
}
|
6898
|
+
break;
|
6899
|
+
}
|
7288
6900
|
case "finish": {
|
7289
6901
|
if (experimental_sendFinish) {
|
7290
6902
|
controller.enqueue({
|
7291
|
-
type: "finish
|
6903
|
+
type: "finish",
|
7292
6904
|
value: {
|
7293
|
-
|
7294
|
-
usage: sendUsage ? chunk.usage : void 0
|
6905
|
+
metadata: messageMetadata == null ? void 0 : messageMetadata({ part })
|
7295
6906
|
}
|
7296
6907
|
});
|
7297
6908
|
}
|
7298
6909
|
break;
|
7299
6910
|
}
|
7300
6911
|
default: {
|
7301
|
-
const exhaustiveCheck =
|
6912
|
+
const exhaustiveCheck = partType;
|
7302
6913
|
throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
|
7303
6914
|
}
|
7304
6915
|
}
|
7305
6916
|
}
|
7306
6917
|
})
|
7307
6918
|
);
|
6919
|
+
return onFinish == null ? baseStream : processChatResponse({
|
6920
|
+
stream: baseStream,
|
6921
|
+
lastMessage,
|
6922
|
+
newMessageId: messageId != null ? messageId : this.generateId(),
|
6923
|
+
onFinish: ({ message }) => {
|
6924
|
+
const isContinuation2 = message.id === (lastMessage == null ? void 0 : lastMessage.id);
|
6925
|
+
onFinish({
|
6926
|
+
isContinuation: isContinuation2,
|
6927
|
+
responseMessage: message,
|
6928
|
+
messages: [
|
6929
|
+
...isContinuation2 ? originalMessages.slice(0, -1) : originalMessages,
|
6930
|
+
message
|
6931
|
+
]
|
6932
|
+
});
|
6933
|
+
}
|
6934
|
+
});
|
7308
6935
|
}
|
7309
6936
|
pipeDataStreamToResponse(response, {
|
7310
|
-
|
7311
|
-
|
6937
|
+
newMessageId,
|
6938
|
+
originalMessages,
|
6939
|
+
onFinish,
|
6940
|
+
messageMetadata,
|
7312
6941
|
sendReasoning,
|
7313
6942
|
sendSources,
|
7314
6943
|
experimental_sendFinish,
|
7315
6944
|
experimental_sendStart,
|
6945
|
+
onError,
|
7316
6946
|
...init
|
7317
6947
|
} = {}) {
|
7318
6948
|
pipeDataStreamToResponse({
|
7319
6949
|
response,
|
7320
6950
|
dataStream: this.toDataStream({
|
7321
|
-
|
7322
|
-
|
6951
|
+
newMessageId,
|
6952
|
+
originalMessages,
|
6953
|
+
onFinish,
|
6954
|
+
messageMetadata,
|
7323
6955
|
sendReasoning,
|
7324
6956
|
sendSources,
|
7325
6957
|
experimental_sendFinish,
|
7326
|
-
experimental_sendStart
|
6958
|
+
experimental_sendStart,
|
6959
|
+
onError
|
7327
6960
|
}),
|
7328
6961
|
...init
|
7329
6962
|
});
|
@@ -7336,22 +6969,28 @@ var DefaultStreamTextResult = class {
|
|
7336
6969
|
});
|
7337
6970
|
}
|
7338
6971
|
toDataStreamResponse({
|
7339
|
-
|
7340
|
-
|
6972
|
+
newMessageId,
|
6973
|
+
originalMessages,
|
6974
|
+
onFinish,
|
6975
|
+
messageMetadata,
|
7341
6976
|
sendReasoning,
|
7342
6977
|
sendSources,
|
7343
6978
|
experimental_sendFinish,
|
7344
6979
|
experimental_sendStart,
|
6980
|
+
onError,
|
7345
6981
|
...init
|
7346
6982
|
} = {}) {
|
7347
6983
|
return createDataStreamResponse({
|
7348
6984
|
dataStream: this.toDataStream({
|
7349
|
-
|
7350
|
-
|
6985
|
+
newMessageId,
|
6986
|
+
originalMessages,
|
6987
|
+
onFinish,
|
6988
|
+
messageMetadata,
|
7351
6989
|
sendReasoning,
|
7352
6990
|
sendSources,
|
7353
6991
|
experimental_sendFinish,
|
7354
|
-
experimental_sendStart
|
6992
|
+
experimental_sendStart,
|
6993
|
+
onError
|
7355
6994
|
}),
|
7356
6995
|
...init
|
7357
6996
|
});
|
@@ -7364,39 +7003,6 @@ var DefaultStreamTextResult = class {
|
|
7364
7003
|
}
|
7365
7004
|
};
|
7366
7005
|
|
7367
|
-
// src/util/merge-objects.ts
|
7368
|
-
function mergeObjects(target, source) {
|
7369
|
-
if (target === void 0 && source === void 0) {
|
7370
|
-
return void 0;
|
7371
|
-
}
|
7372
|
-
if (target === void 0) {
|
7373
|
-
return source;
|
7374
|
-
}
|
7375
|
-
if (source === void 0) {
|
7376
|
-
return target;
|
7377
|
-
}
|
7378
|
-
const result = { ...target };
|
7379
|
-
for (const key in source) {
|
7380
|
-
if (Object.prototype.hasOwnProperty.call(source, key)) {
|
7381
|
-
const sourceValue = source[key];
|
7382
|
-
if (sourceValue === void 0)
|
7383
|
-
continue;
|
7384
|
-
const targetValue = key in target ? target[key] : void 0;
|
7385
|
-
const isSourceObject = sourceValue !== null && typeof sourceValue === "object" && !Array.isArray(sourceValue) && !(sourceValue instanceof Date) && !(sourceValue instanceof RegExp);
|
7386
|
-
const isTargetObject = targetValue !== null && targetValue !== void 0 && typeof targetValue === "object" && !Array.isArray(targetValue) && !(targetValue instanceof Date) && !(targetValue instanceof RegExp);
|
7387
|
-
if (isSourceObject && isTargetObject) {
|
7388
|
-
result[key] = mergeObjects(
|
7389
|
-
targetValue,
|
7390
|
-
sourceValue
|
7391
|
-
);
|
7392
|
-
} else {
|
7393
|
-
result[key] = sourceValue;
|
7394
|
-
}
|
7395
|
-
}
|
7396
|
-
}
|
7397
|
-
return result;
|
7398
|
-
}
|
7399
|
-
|
7400
7006
|
// core/middleware/default-settings-middleware.ts
|
7401
7007
|
function defaultSettingsMiddleware({
|
7402
7008
|
settings
|
@@ -7404,33 +7010,7 @@ function defaultSettingsMiddleware({
|
|
7404
7010
|
return {
|
7405
7011
|
middlewareVersion: "v2",
|
7406
7012
|
transformParams: async ({ params }) => {
|
7407
|
-
|
7408
|
-
return {
|
7409
|
-
...settings,
|
7410
|
-
...params,
|
7411
|
-
// map all values that are null to undefined
|
7412
|
-
maxOutputTokens: settings.maxOutputTokens !== null ? (_a17 = params.maxOutputTokens) != null ? _a17 : settings.maxOutputTokens : void 0,
|
7413
|
-
temperature: settings.temperature !== null ? (
|
7414
|
-
// temperature: special case 0 or null
|
7415
|
-
params.temperature === 0 || params.temperature == null ? (_b = settings.temperature) != null ? _b : params.temperature : params.temperature
|
7416
|
-
) : void 0,
|
7417
|
-
stopSequences: settings.stopSequences !== null ? (_c = params.stopSequences) != null ? _c : settings.stopSequences : void 0,
|
7418
|
-
topP: settings.topP !== null ? (_d = params.topP) != null ? _d : settings.topP : void 0,
|
7419
|
-
topK: settings.topK !== null ? (_e = params.topK) != null ? _e : settings.topK : void 0,
|
7420
|
-
presencePenalty: settings.presencePenalty !== null ? (_f = params.presencePenalty) != null ? _f : settings.presencePenalty : void 0,
|
7421
|
-
frequencyPenalty: settings.frequencyPenalty !== null ? (_g = params.frequencyPenalty) != null ? _g : settings.frequencyPenalty : void 0,
|
7422
|
-
responseFormat: settings.responseFormat !== null ? (_h = params.responseFormat) != null ? _h : settings.responseFormat : void 0,
|
7423
|
-
seed: settings.seed !== null ? (_i = params.seed) != null ? _i : settings.seed : void 0,
|
7424
|
-
tools: settings.tools !== null ? (_j = params.tools) != null ? _j : settings.tools : void 0,
|
7425
|
-
toolChoice: settings.toolChoice !== null ? (_k = params.toolChoice) != null ? _k : settings.toolChoice : void 0,
|
7426
|
-
// headers: deep merge
|
7427
|
-
headers: mergeObjects(settings.headers, params.headers),
|
7428
|
-
// provider options: deep merge
|
7429
|
-
providerOptions: mergeObjects(
|
7430
|
-
settings.providerOptions,
|
7431
|
-
params.providerOptions
|
7432
|
-
)
|
7433
|
-
};
|
7013
|
+
return mergeObjects(settings, params);
|
7434
7014
|
}
|
7435
7015
|
};
|
7436
7016
|
}
|
@@ -7652,7 +7232,7 @@ var doWrap = ({
|
|
7652
7232
|
};
|
7653
7233
|
|
7654
7234
|
// core/registry/custom-provider.ts
|
7655
|
-
var
|
7235
|
+
var import_provider24 = require("@ai-sdk/provider");
|
7656
7236
|
function customProvider({
|
7657
7237
|
languageModels,
|
7658
7238
|
textEmbeddingModels,
|
@@ -7667,7 +7247,7 @@ function customProvider({
|
|
7667
7247
|
if (fallbackProvider) {
|
7668
7248
|
return fallbackProvider.languageModel(modelId);
|
7669
7249
|
}
|
7670
|
-
throw new
|
7250
|
+
throw new import_provider24.NoSuchModelError({ modelId, modelType: "languageModel" });
|
7671
7251
|
},
|
7672
7252
|
textEmbeddingModel(modelId) {
|
7673
7253
|
if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
|
@@ -7676,7 +7256,7 @@ function customProvider({
|
|
7676
7256
|
if (fallbackProvider) {
|
7677
7257
|
return fallbackProvider.textEmbeddingModel(modelId);
|
7678
7258
|
}
|
7679
|
-
throw new
|
7259
|
+
throw new import_provider24.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
|
7680
7260
|
},
|
7681
7261
|
imageModel(modelId) {
|
7682
7262
|
if (imageModels != null && modelId in imageModels) {
|
@@ -7685,19 +7265,19 @@ function customProvider({
|
|
7685
7265
|
if (fallbackProvider == null ? void 0 : fallbackProvider.imageModel) {
|
7686
7266
|
return fallbackProvider.imageModel(modelId);
|
7687
7267
|
}
|
7688
|
-
throw new
|
7268
|
+
throw new import_provider24.NoSuchModelError({ modelId, modelType: "imageModel" });
|
7689
7269
|
}
|
7690
7270
|
};
|
7691
7271
|
}
|
7692
7272
|
var experimental_customProvider = customProvider;
|
7693
7273
|
|
7694
7274
|
// core/registry/no-such-provider-error.ts
|
7695
|
-
var
|
7275
|
+
var import_provider25 = require("@ai-sdk/provider");
|
7696
7276
|
var name16 = "AI_NoSuchProviderError";
|
7697
7277
|
var marker16 = `vercel.ai.error.${name16}`;
|
7698
7278
|
var symbol16 = Symbol.for(marker16);
|
7699
7279
|
var _a16;
|
7700
|
-
var NoSuchProviderError = class extends
|
7280
|
+
var NoSuchProviderError = class extends import_provider25.NoSuchModelError {
|
7701
7281
|
constructor({
|
7702
7282
|
modelId,
|
7703
7283
|
modelType,
|
@@ -7711,13 +7291,13 @@ var NoSuchProviderError = class extends import_provider26.NoSuchModelError {
|
|
7711
7291
|
this.availableProviders = availableProviders;
|
7712
7292
|
}
|
7713
7293
|
static isInstance(error) {
|
7714
|
-
return
|
7294
|
+
return import_provider25.AISDKError.hasMarker(error, marker16);
|
7715
7295
|
}
|
7716
7296
|
};
|
7717
7297
|
_a16 = symbol16;
|
7718
7298
|
|
7719
7299
|
// core/registry/provider-registry.ts
|
7720
|
-
var
|
7300
|
+
var import_provider26 = require("@ai-sdk/provider");
|
7721
7301
|
function createProviderRegistry(providers, {
|
7722
7302
|
separator = ":"
|
7723
7303
|
} = {}) {
|
@@ -7756,7 +7336,7 @@ var DefaultProviderRegistry = class {
|
|
7756
7336
|
splitId(id, modelType) {
|
7757
7337
|
const index = id.indexOf(this.separator);
|
7758
7338
|
if (index === -1) {
|
7759
|
-
throw new
|
7339
|
+
throw new import_provider26.NoSuchModelError({
|
7760
7340
|
modelId: id,
|
7761
7341
|
modelType,
|
7762
7342
|
message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId${this.separator}modelId")`
|
@@ -7769,7 +7349,7 @@ var DefaultProviderRegistry = class {
|
|
7769
7349
|
const [providerId, modelId] = this.splitId(id, "languageModel");
|
7770
7350
|
const model = (_b = (_a17 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a17, modelId);
|
7771
7351
|
if (model == null) {
|
7772
|
-
throw new
|
7352
|
+
throw new import_provider26.NoSuchModelError({ modelId: id, modelType: "languageModel" });
|
7773
7353
|
}
|
7774
7354
|
return model;
|
7775
7355
|
}
|
@@ -7779,7 +7359,7 @@ var DefaultProviderRegistry = class {
|
|
7779
7359
|
const provider = this.getProvider(providerId);
|
7780
7360
|
const model = (_a17 = provider.textEmbeddingModel) == null ? void 0 : _a17.call(provider, modelId);
|
7781
7361
|
if (model == null) {
|
7782
|
-
throw new
|
7362
|
+
throw new import_provider26.NoSuchModelError({
|
7783
7363
|
modelId: id,
|
7784
7364
|
modelType: "textEmbeddingModel"
|
7785
7365
|
});
|
@@ -7792,14 +7372,14 @@ var DefaultProviderRegistry = class {
|
|
7792
7372
|
const provider = this.getProvider(providerId);
|
7793
7373
|
const model = (_a17 = provider.imageModel) == null ? void 0 : _a17.call(provider, modelId);
|
7794
7374
|
if (model == null) {
|
7795
|
-
throw new
|
7375
|
+
throw new import_provider26.NoSuchModelError({ modelId: id, modelType: "imageModel" });
|
7796
7376
|
}
|
7797
7377
|
return model;
|
7798
7378
|
}
|
7799
7379
|
};
|
7800
7380
|
|
7801
7381
|
// core/tool/mcp/mcp-client.ts
|
7802
|
-
var
|
7382
|
+
var import_provider_utils24 = require("@ai-sdk/provider-utils");
|
7803
7383
|
|
7804
7384
|
// core/tool/tool.ts
|
7805
7385
|
function tool(tool2) {
|
@@ -7807,7 +7387,7 @@ function tool(tool2) {
|
|
7807
7387
|
}
|
7808
7388
|
|
7809
7389
|
// core/tool/mcp/mcp-sse-transport.ts
|
7810
|
-
var
|
7390
|
+
var import_provider_utils23 = require("@ai-sdk/provider-utils");
|
7811
7391
|
|
7812
7392
|
// core/tool/mcp/json-rpc-message.ts
|
7813
7393
|
var import_zod10 = require("zod");
|
@@ -7978,7 +7558,7 @@ var SseMCPTransport = class {
|
|
7978
7558
|
(_b = this.onerror) == null ? void 0 : _b.call(this, error);
|
7979
7559
|
return reject(error);
|
7980
7560
|
}
|
7981
|
-
const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough((0,
|
7561
|
+
const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough((0, import_provider_utils23.createEventSourceParserStream)());
|
7982
7562
|
const reader = stream.getReader();
|
7983
7563
|
const processEvents = async () => {
|
7984
7564
|
var _a18, _b2, _c2;
|
@@ -8302,7 +7882,7 @@ var MCPClient = class {
|
|
8302
7882
|
if (schemas !== "automatic" && !(name17 in schemas)) {
|
8303
7883
|
continue;
|
8304
7884
|
}
|
8305
|
-
const parameters = schemas === "automatic" ? (0,
|
7885
|
+
const parameters = schemas === "automatic" ? (0, import_provider_utils24.jsonSchema)({
|
8306
7886
|
...inputSchema,
|
8307
7887
|
properties: (_a17 = inputSchema.properties) != null ? _a17 : {},
|
8308
7888
|
additionalProperties: false
|
@@ -8366,8 +7946,8 @@ var MCPClient = class {
|
|
8366
7946
|
};
|
8367
7947
|
|
8368
7948
|
// src/error/no-transcript-generated-error.ts
|
8369
|
-
var
|
8370
|
-
var NoTranscriptGeneratedError = class extends
|
7949
|
+
var import_provider27 = require("@ai-sdk/provider");
|
7950
|
+
var NoTranscriptGeneratedError = class extends import_provider27.AISDKError {
|
8371
7951
|
constructor(options) {
|
8372
7952
|
super({
|
8373
7953
|
name: "AI_NoTranscriptGeneratedError",
|
@@ -8460,7 +8040,6 @@ var DefaultTranscriptionResult = class {
|
|
8460
8040
|
TypeValidationError,
|
8461
8041
|
UnsupportedFunctionalityError,
|
8462
8042
|
appendClientMessage,
|
8463
|
-
appendResponseMessages,
|
8464
8043
|
asSchema,
|
8465
8044
|
assistantModelMessageSchema,
|
8466
8045
|
callChatApi,
|
@@ -8503,7 +8082,6 @@ var DefaultTranscriptionResult = class {
|
|
8503
8082
|
parsePartialJson,
|
8504
8083
|
pipeDataStreamToResponse,
|
8505
8084
|
pipeTextStreamToResponse,
|
8506
|
-
processDataStream,
|
8507
8085
|
processTextStream,
|
8508
8086
|
shouldResubmitMessages,
|
8509
8087
|
simulateReadableStream,
|