ai 4.0.0-canary.9 → 4.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +116 -0
- package/README.md +10 -14
- package/dist/index.d.mts +16 -15
- package/dist/index.d.ts +16 -15
- package/dist/index.js +1098 -1047
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1101 -1050
- package/dist/index.mjs.map +1 -1
- package/package.json +7 -7
- package/test/dist/index.d.mts +3 -3
- package/test/dist/index.d.ts +3 -3
- package/test/dist/index.js +3 -3
- package/test/dist/index.js.map +1 -1
- package/test/dist/index.mjs +3 -3
- package/test/dist/index.mjs.map +1 -1
package/dist/index.mjs
CHANGED
@@ -6,10 +6,12 @@ var __export = (target, all) => {
|
|
6
6
|
|
7
7
|
// streams/index.ts
|
8
8
|
import {
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
9
|
+
formatAssistantStreamPart as formatAssistantStreamPart2,
|
10
|
+
formatDataStreamPart as formatDataStreamPart3,
|
11
|
+
parseAssistantStreamPart,
|
12
|
+
parseDataStreamPart,
|
13
|
+
processDataStream,
|
14
|
+
processTextStream
|
13
15
|
} from "@ai-sdk/ui-utils";
|
14
16
|
import { generateId as generateId2 } from "@ai-sdk/provider-utils";
|
15
17
|
|
@@ -2293,21 +2295,6 @@ import {
|
|
2293
2295
|
parsePartialJson
|
2294
2296
|
} from "@ai-sdk/ui-utils";
|
2295
2297
|
|
2296
|
-
// util/create-resolvable-promise.ts
|
2297
|
-
function createResolvablePromise() {
|
2298
|
-
let resolve;
|
2299
|
-
let reject;
|
2300
|
-
const promise = new Promise((res, rej) => {
|
2301
|
-
resolve = res;
|
2302
|
-
reject = rej;
|
2303
|
-
});
|
2304
|
-
return {
|
2305
|
-
promise,
|
2306
|
-
resolve,
|
2307
|
-
reject
|
2308
|
-
};
|
2309
|
-
}
|
2310
|
-
|
2311
2298
|
// util/delayed-promise.ts
|
2312
2299
|
var DelayedPromise = class {
|
2313
2300
|
constructor() {
|
@@ -2399,9 +2386,91 @@ function writeToServerResponse({
|
|
2399
2386
|
read();
|
2400
2387
|
}
|
2401
2388
|
|
2389
|
+
// util/create-resolvable-promise.ts
|
2390
|
+
function createResolvablePromise() {
|
2391
|
+
let resolve;
|
2392
|
+
let reject;
|
2393
|
+
const promise = new Promise((res, rej) => {
|
2394
|
+
resolve = res;
|
2395
|
+
reject = rej;
|
2396
|
+
});
|
2397
|
+
return {
|
2398
|
+
promise,
|
2399
|
+
resolve,
|
2400
|
+
reject
|
2401
|
+
};
|
2402
|
+
}
|
2403
|
+
|
2404
|
+
// core/util/create-stitchable-stream.ts
|
2405
|
+
function createStitchableStream() {
|
2406
|
+
let innerStreamReaders = [];
|
2407
|
+
let controller = null;
|
2408
|
+
let isClosed = false;
|
2409
|
+
let waitForNewStream = createResolvablePromise();
|
2410
|
+
const processPull = async () => {
|
2411
|
+
if (isClosed && innerStreamReaders.length === 0) {
|
2412
|
+
controller == null ? void 0 : controller.close();
|
2413
|
+
return;
|
2414
|
+
}
|
2415
|
+
if (innerStreamReaders.length === 0) {
|
2416
|
+
waitForNewStream = createResolvablePromise();
|
2417
|
+
await waitForNewStream.promise;
|
2418
|
+
return processPull();
|
2419
|
+
}
|
2420
|
+
try {
|
2421
|
+
const { value, done } = await innerStreamReaders[0].read();
|
2422
|
+
if (done) {
|
2423
|
+
innerStreamReaders.shift();
|
2424
|
+
if (innerStreamReaders.length > 0) {
|
2425
|
+
await processPull();
|
2426
|
+
} else if (isClosed) {
|
2427
|
+
controller == null ? void 0 : controller.close();
|
2428
|
+
}
|
2429
|
+
} else {
|
2430
|
+
controller == null ? void 0 : controller.enqueue(value);
|
2431
|
+
}
|
2432
|
+
} catch (error) {
|
2433
|
+
controller == null ? void 0 : controller.error(error);
|
2434
|
+
innerStreamReaders.shift();
|
2435
|
+
if (isClosed && innerStreamReaders.length === 0) {
|
2436
|
+
controller == null ? void 0 : controller.close();
|
2437
|
+
}
|
2438
|
+
}
|
2439
|
+
};
|
2440
|
+
return {
|
2441
|
+
stream: new ReadableStream({
|
2442
|
+
start(controllerParam) {
|
2443
|
+
controller = controllerParam;
|
2444
|
+
},
|
2445
|
+
pull: processPull,
|
2446
|
+
async cancel() {
|
2447
|
+
for (const reader of innerStreamReaders) {
|
2448
|
+
await reader.cancel();
|
2449
|
+
}
|
2450
|
+
innerStreamReaders = [];
|
2451
|
+
isClosed = true;
|
2452
|
+
}
|
2453
|
+
}),
|
2454
|
+
addStream: (innerStream) => {
|
2455
|
+
if (isClosed) {
|
2456
|
+
throw new Error("Cannot add inner stream: outer stream is closed");
|
2457
|
+
}
|
2458
|
+
innerStreamReaders.push(innerStream.getReader());
|
2459
|
+
waitForNewStream.resolve();
|
2460
|
+
},
|
2461
|
+
close: () => {
|
2462
|
+
isClosed = true;
|
2463
|
+
waitForNewStream.resolve();
|
2464
|
+
if (innerStreamReaders.length === 0) {
|
2465
|
+
controller == null ? void 0 : controller.close();
|
2466
|
+
}
|
2467
|
+
}
|
2468
|
+
};
|
2469
|
+
}
|
2470
|
+
|
2402
2471
|
// core/generate-object/stream-object.ts
|
2403
2472
|
var originalGenerateId2 = createIdGenerator2({ prefix: "aiobj", size: 24 });
|
2404
|
-
|
2473
|
+
function streamObject({
|
2405
2474
|
model,
|
2406
2475
|
schema: inputSchema,
|
2407
2476
|
schemaName,
|
@@ -2435,400 +2504,433 @@ async function streamObject({
|
|
2435
2504
|
if (outputStrategy.type === "no-schema" && mode === void 0) {
|
2436
2505
|
mode = "json";
|
2437
2506
|
}
|
2438
|
-
|
2507
|
+
return new DefaultStreamObjectResult({
|
2439
2508
|
model,
|
2440
2509
|
telemetry,
|
2441
2510
|
headers,
|
2442
|
-
settings
|
2511
|
+
settings,
|
2512
|
+
maxRetries,
|
2513
|
+
abortSignal,
|
2514
|
+
outputStrategy,
|
2515
|
+
system,
|
2516
|
+
prompt,
|
2517
|
+
messages,
|
2518
|
+
schemaName,
|
2519
|
+
schemaDescription,
|
2520
|
+
inputProviderMetadata: providerMetadata,
|
2521
|
+
mode,
|
2522
|
+
onFinish,
|
2523
|
+
generateId: generateId3,
|
2524
|
+
currentDate,
|
2525
|
+
now: now2
|
2443
2526
|
});
|
2444
|
-
|
2445
|
-
|
2446
|
-
|
2447
|
-
|
2448
|
-
|
2527
|
+
}
|
2528
|
+
var DefaultStreamObjectResult = class {
|
2529
|
+
constructor({
|
2530
|
+
model,
|
2531
|
+
headers,
|
2532
|
+
telemetry,
|
2533
|
+
settings,
|
2534
|
+
maxRetries,
|
2535
|
+
abortSignal,
|
2536
|
+
outputStrategy,
|
2537
|
+
system,
|
2538
|
+
prompt,
|
2539
|
+
messages,
|
2540
|
+
schemaName,
|
2541
|
+
schemaDescription,
|
2542
|
+
inputProviderMetadata,
|
2543
|
+
mode,
|
2544
|
+
onFinish,
|
2545
|
+
generateId: generateId3,
|
2546
|
+
currentDate,
|
2547
|
+
now: now2
|
2548
|
+
}) {
|
2549
|
+
this.objectPromise = new DelayedPromise();
|
2550
|
+
this.usagePromise = new DelayedPromise();
|
2551
|
+
this.providerMetadataPromise = new DelayedPromise();
|
2552
|
+
this.warningsPromise = new DelayedPromise();
|
2553
|
+
this.requestPromise = new DelayedPromise();
|
2554
|
+
this.responsePromise = new DelayedPromise();
|
2555
|
+
this.stitchableStream = createStitchableStream();
|
2556
|
+
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
2557
|
+
model,
|
2449
2558
|
telemetry,
|
2450
|
-
|
2451
|
-
|
2452
|
-
|
2453
|
-
|
2454
|
-
|
2455
|
-
|
2456
|
-
|
2457
|
-
|
2458
|
-
|
2459
|
-
|
2460
|
-
|
2461
|
-
|
2462
|
-
|
2463
|
-
|
2464
|
-
|
2465
|
-
|
2466
|
-
|
2467
|
-
|
2468
|
-
|
2469
|
-
|
2470
|
-
|
2471
|
-
|
2472
|
-
|
2473
|
-
|
2474
|
-
|
2475
|
-
|
2476
|
-
|
2477
|
-
|
2478
|
-
|
2479
|
-
|
2480
|
-
|
2481
|
-
|
2559
|
+
headers,
|
2560
|
+
settings: { ...settings, maxRetries }
|
2561
|
+
});
|
2562
|
+
const tracer = getTracer(telemetry);
|
2563
|
+
const retry = retryWithExponentialBackoff({ maxRetries });
|
2564
|
+
const self = this;
|
2565
|
+
recordSpan({
|
2566
|
+
name: "ai.streamObject",
|
2567
|
+
attributes: selectTelemetryAttributes({
|
2568
|
+
telemetry,
|
2569
|
+
attributes: {
|
2570
|
+
...assembleOperationName({
|
2571
|
+
operationId: "ai.streamObject",
|
2572
|
+
telemetry
|
2573
|
+
}),
|
2574
|
+
...baseTelemetryAttributes,
|
2575
|
+
// specific settings that only make sense on the outer level:
|
2576
|
+
"ai.prompt": {
|
2577
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
2578
|
+
},
|
2579
|
+
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
2580
|
+
"ai.schema.name": schemaName,
|
2581
|
+
"ai.schema.description": schemaDescription,
|
2582
|
+
"ai.settings.output": outputStrategy.type,
|
2583
|
+
"ai.settings.mode": mode
|
2584
|
+
}
|
2585
|
+
}),
|
2586
|
+
tracer,
|
2587
|
+
endWhenDone: false,
|
2588
|
+
fn: async (rootSpan) => {
|
2589
|
+
if (mode === "auto" || mode == null) {
|
2590
|
+
mode = model.defaultObjectGenerationMode;
|
2591
|
+
}
|
2592
|
+
let callOptions;
|
2593
|
+
let transformer;
|
2594
|
+
switch (mode) {
|
2595
|
+
case "json": {
|
2596
|
+
const standardizedPrompt = standardizePrompt({
|
2597
|
+
prompt: {
|
2598
|
+
system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
|
2599
|
+
prompt: system,
|
2600
|
+
schema: outputStrategy.jsonSchema
|
2601
|
+
}),
|
2602
|
+
prompt,
|
2603
|
+
messages
|
2604
|
+
},
|
2605
|
+
tools: void 0
|
2606
|
+
});
|
2607
|
+
callOptions = {
|
2608
|
+
mode: {
|
2609
|
+
type: "object-json",
|
2610
|
+
schema: outputStrategy.jsonSchema,
|
2611
|
+
name: schemaName,
|
2612
|
+
description: schemaDescription
|
2613
|
+
},
|
2614
|
+
...prepareCallSettings(settings),
|
2615
|
+
inputFormat: standardizedPrompt.type,
|
2616
|
+
prompt: await convertToLanguageModelPrompt({
|
2617
|
+
prompt: standardizedPrompt,
|
2618
|
+
modelSupportsImageUrls: model.supportsImageUrls,
|
2619
|
+
modelSupportsUrl: model.supportsUrl
|
2482
2620
|
}),
|
2483
|
-
|
2484
|
-
|
2485
|
-
|
2486
|
-
|
2487
|
-
|
2488
|
-
|
2489
|
-
|
2490
|
-
|
2491
|
-
|
2492
|
-
|
2493
|
-
|
2494
|
-
|
2495
|
-
|
2496
|
-
|
2497
|
-
|
2498
|
-
|
2499
|
-
modelSupportsImageUrls: model.supportsImageUrls,
|
2500
|
-
modelSupportsUrl: model.supportsUrl
|
2501
|
-
}),
|
2502
|
-
providerMetadata,
|
2503
|
-
abortSignal,
|
2504
|
-
headers
|
2505
|
-
};
|
2506
|
-
transformer = {
|
2507
|
-
transform: (chunk, controller) => {
|
2508
|
-
switch (chunk.type) {
|
2509
|
-
case "text-delta":
|
2510
|
-
controller.enqueue(chunk.textDelta);
|
2511
|
-
break;
|
2512
|
-
case "response-metadata":
|
2513
|
-
case "finish":
|
2514
|
-
case "error":
|
2515
|
-
controller.enqueue(chunk);
|
2516
|
-
break;
|
2621
|
+
providerMetadata: inputProviderMetadata,
|
2622
|
+
abortSignal,
|
2623
|
+
headers
|
2624
|
+
};
|
2625
|
+
transformer = {
|
2626
|
+
transform: (chunk, controller) => {
|
2627
|
+
switch (chunk.type) {
|
2628
|
+
case "text-delta":
|
2629
|
+
controller.enqueue(chunk.textDelta);
|
2630
|
+
break;
|
2631
|
+
case "response-metadata":
|
2632
|
+
case "finish":
|
2633
|
+
case "error":
|
2634
|
+
controller.enqueue(chunk);
|
2635
|
+
break;
|
2636
|
+
}
|
2517
2637
|
}
|
2518
|
-
}
|
2519
|
-
|
2520
|
-
|
2638
|
+
};
|
2639
|
+
break;
|
2640
|
+
}
|
2641
|
+
case "tool": {
|
2642
|
+
const standardizedPrompt = standardizePrompt({
|
2643
|
+
prompt: { system, prompt, messages },
|
2644
|
+
tools: void 0
|
2645
|
+
});
|
2646
|
+
callOptions = {
|
2647
|
+
mode: {
|
2648
|
+
type: "object-tool",
|
2649
|
+
tool: {
|
2650
|
+
type: "function",
|
2651
|
+
name: schemaName != null ? schemaName : "json",
|
2652
|
+
description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
|
2653
|
+
parameters: outputStrategy.jsonSchema
|
2654
|
+
}
|
2655
|
+
},
|
2656
|
+
...prepareCallSettings(settings),
|
2657
|
+
inputFormat: standardizedPrompt.type,
|
2658
|
+
prompt: await convertToLanguageModelPrompt({
|
2659
|
+
prompt: standardizedPrompt,
|
2660
|
+
modelSupportsImageUrls: model.supportsImageUrls,
|
2661
|
+
modelSupportsUrl: model.supportsUrl
|
2662
|
+
}),
|
2663
|
+
providerMetadata: inputProviderMetadata,
|
2664
|
+
abortSignal,
|
2665
|
+
headers
|
2666
|
+
};
|
2667
|
+
transformer = {
|
2668
|
+
transform(chunk, controller) {
|
2669
|
+
switch (chunk.type) {
|
2670
|
+
case "tool-call-delta":
|
2671
|
+
controller.enqueue(chunk.argsTextDelta);
|
2672
|
+
break;
|
2673
|
+
case "response-metadata":
|
2674
|
+
case "finish":
|
2675
|
+
case "error":
|
2676
|
+
controller.enqueue(chunk);
|
2677
|
+
break;
|
2678
|
+
}
|
2679
|
+
}
|
2680
|
+
};
|
2681
|
+
break;
|
2682
|
+
}
|
2683
|
+
case void 0: {
|
2684
|
+
throw new Error(
|
2685
|
+
"Model does not have a default object generation mode."
|
2686
|
+
);
|
2687
|
+
}
|
2688
|
+
default: {
|
2689
|
+
const _exhaustiveCheck = mode;
|
2690
|
+
throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
|
2691
|
+
}
|
2521
2692
|
}
|
2522
|
-
|
2523
|
-
|
2524
|
-
|
2525
|
-
|
2526
|
-
|
2527
|
-
|
2528
|
-
|
2529
|
-
|
2530
|
-
|
2531
|
-
|
2532
|
-
|
2533
|
-
|
2534
|
-
|
2693
|
+
const {
|
2694
|
+
result: { stream, warnings, rawResponse, request },
|
2695
|
+
doStreamSpan,
|
2696
|
+
startTimestampMs
|
2697
|
+
} = await retry(
|
2698
|
+
() => recordSpan({
|
2699
|
+
name: "ai.streamObject.doStream",
|
2700
|
+
attributes: selectTelemetryAttributes({
|
2701
|
+
telemetry,
|
2702
|
+
attributes: {
|
2703
|
+
...assembleOperationName({
|
2704
|
+
operationId: "ai.streamObject.doStream",
|
2705
|
+
telemetry
|
2706
|
+
}),
|
2707
|
+
...baseTelemetryAttributes,
|
2708
|
+
"ai.prompt.format": {
|
2709
|
+
input: () => callOptions.inputFormat
|
2710
|
+
},
|
2711
|
+
"ai.prompt.messages": {
|
2712
|
+
input: () => JSON.stringify(callOptions.prompt)
|
2713
|
+
},
|
2714
|
+
"ai.settings.mode": mode,
|
2715
|
+
// standardized gen-ai llm span attributes:
|
2716
|
+
"gen_ai.system": model.provider,
|
2717
|
+
"gen_ai.request.model": model.modelId,
|
2718
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
2719
|
+
"gen_ai.request.max_tokens": settings.maxTokens,
|
2720
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
2721
|
+
"gen_ai.request.temperature": settings.temperature,
|
2722
|
+
"gen_ai.request.top_k": settings.topK,
|
2723
|
+
"gen_ai.request.top_p": settings.topP
|
2535
2724
|
}
|
2536
|
-
},
|
2537
|
-
...prepareCallSettings(settings),
|
2538
|
-
inputFormat: standardizedPrompt.type,
|
2539
|
-
prompt: await convertToLanguageModelPrompt({
|
2540
|
-
prompt: standardizedPrompt,
|
2541
|
-
modelSupportsImageUrls: model.supportsImageUrls,
|
2542
|
-
modelSupportsUrl: model.supportsUrl
|
2543
2725
|
}),
|
2544
|
-
|
2545
|
-
|
2546
|
-
|
2547
|
-
|
2548
|
-
|
2549
|
-
|
2726
|
+
tracer,
|
2727
|
+
endWhenDone: false,
|
2728
|
+
fn: async (doStreamSpan2) => ({
|
2729
|
+
startTimestampMs: now2(),
|
2730
|
+
doStreamSpan: doStreamSpan2,
|
2731
|
+
result: await model.doStream(callOptions)
|
2732
|
+
})
|
2733
|
+
})
|
2734
|
+
);
|
2735
|
+
self.requestPromise.resolve(request != null ? request : {});
|
2736
|
+
let usage;
|
2737
|
+
let finishReason;
|
2738
|
+
let providerMetadata;
|
2739
|
+
let object;
|
2740
|
+
let error;
|
2741
|
+
let accumulatedText = "";
|
2742
|
+
let textDelta = "";
|
2743
|
+
let response = {
|
2744
|
+
id: generateId3(),
|
2745
|
+
timestamp: currentDate(),
|
2746
|
+
modelId: model.modelId
|
2747
|
+
};
|
2748
|
+
let latestObjectJson = void 0;
|
2749
|
+
let latestObject = void 0;
|
2750
|
+
let isFirstChunk = true;
|
2751
|
+
let isFirstDelta = true;
|
2752
|
+
const transformedStream = stream.pipeThrough(new TransformStream(transformer)).pipeThrough(
|
2753
|
+
new TransformStream({
|
2754
|
+
async transform(chunk, controller) {
|
2755
|
+
var _a11, _b, _c;
|
2756
|
+
if (isFirstChunk) {
|
2757
|
+
const msToFirstChunk = now2() - startTimestampMs;
|
2758
|
+
isFirstChunk = false;
|
2759
|
+
doStreamSpan.addEvent("ai.stream.firstChunk", {
|
2760
|
+
"ai.stream.msToFirstChunk": msToFirstChunk
|
2761
|
+
});
|
2762
|
+
doStreamSpan.setAttributes({
|
2763
|
+
"ai.stream.msToFirstChunk": msToFirstChunk
|
2764
|
+
});
|
2765
|
+
}
|
2766
|
+
if (typeof chunk === "string") {
|
2767
|
+
accumulatedText += chunk;
|
2768
|
+
textDelta += chunk;
|
2769
|
+
const { value: currentObjectJson, state: parseState } = parsePartialJson(accumulatedText);
|
2770
|
+
if (currentObjectJson !== void 0 && !isDeepEqualData(latestObjectJson, currentObjectJson)) {
|
2771
|
+
const validationResult = outputStrategy.validatePartialResult({
|
2772
|
+
value: currentObjectJson,
|
2773
|
+
textDelta,
|
2774
|
+
latestObject,
|
2775
|
+
isFirstDelta,
|
2776
|
+
isFinalDelta: parseState === "successful-parse"
|
2777
|
+
});
|
2778
|
+
if (validationResult.success && !isDeepEqualData(
|
2779
|
+
latestObject,
|
2780
|
+
validationResult.value.partial
|
2781
|
+
)) {
|
2782
|
+
latestObjectJson = currentObjectJson;
|
2783
|
+
latestObject = validationResult.value.partial;
|
2784
|
+
controller.enqueue({
|
2785
|
+
type: "object",
|
2786
|
+
object: latestObject
|
2787
|
+
});
|
2788
|
+
controller.enqueue({
|
2789
|
+
type: "text-delta",
|
2790
|
+
textDelta: validationResult.value.textDelta
|
2791
|
+
});
|
2792
|
+
textDelta = "";
|
2793
|
+
isFirstDelta = false;
|
2794
|
+
}
|
2795
|
+
}
|
2796
|
+
return;
|
2797
|
+
}
|
2550
2798
|
switch (chunk.type) {
|
2551
|
-
case "
|
2552
|
-
|
2799
|
+
case "response-metadata": {
|
2800
|
+
response = {
|
2801
|
+
id: (_a11 = chunk.id) != null ? _a11 : response.id,
|
2802
|
+
timestamp: (_b = chunk.timestamp) != null ? _b : response.timestamp,
|
2803
|
+
modelId: (_c = chunk.modelId) != null ? _c : response.modelId
|
2804
|
+
};
|
2553
2805
|
break;
|
2554
|
-
|
2555
|
-
case "finish":
|
2556
|
-
|
2806
|
+
}
|
2807
|
+
case "finish": {
|
2808
|
+
if (textDelta !== "") {
|
2809
|
+
controller.enqueue({ type: "text-delta", textDelta });
|
2810
|
+
}
|
2811
|
+
finishReason = chunk.finishReason;
|
2812
|
+
usage = calculateLanguageModelUsage(chunk.usage);
|
2813
|
+
providerMetadata = chunk.providerMetadata;
|
2814
|
+
controller.enqueue({ ...chunk, usage, response });
|
2815
|
+
self.usagePromise.resolve(usage);
|
2816
|
+
self.providerMetadataPromise.resolve(providerMetadata);
|
2817
|
+
self.responsePromise.resolve({
|
2818
|
+
...response,
|
2819
|
+
headers: rawResponse == null ? void 0 : rawResponse.headers
|
2820
|
+
});
|
2821
|
+
const validationResult = outputStrategy.validateFinalResult(latestObjectJson);
|
2822
|
+
if (validationResult.success) {
|
2823
|
+
object = validationResult.value;
|
2824
|
+
self.objectPromise.resolve(object);
|
2825
|
+
} else {
|
2826
|
+
error = validationResult.error;
|
2827
|
+
self.objectPromise.reject(error);
|
2828
|
+
}
|
2829
|
+
break;
|
2830
|
+
}
|
2831
|
+
default: {
|
2557
2832
|
controller.enqueue(chunk);
|
2558
2833
|
break;
|
2834
|
+
}
|
2835
|
+
}
|
2836
|
+
},
|
2837
|
+
// invoke onFinish callback and resolve toolResults promise when the stream is about to close:
|
2838
|
+
async flush(controller) {
|
2839
|
+
try {
|
2840
|
+
const finalUsage = usage != null ? usage : {
|
2841
|
+
promptTokens: NaN,
|
2842
|
+
completionTokens: NaN,
|
2843
|
+
totalTokens: NaN
|
2844
|
+
};
|
2845
|
+
doStreamSpan.setAttributes(
|
2846
|
+
selectTelemetryAttributes({
|
2847
|
+
telemetry,
|
2848
|
+
attributes: {
|
2849
|
+
"ai.response.finishReason": finishReason,
|
2850
|
+
"ai.response.object": {
|
2851
|
+
output: () => JSON.stringify(object)
|
2852
|
+
},
|
2853
|
+
"ai.response.id": response.id,
|
2854
|
+
"ai.response.model": response.modelId,
|
2855
|
+
"ai.response.timestamp": response.timestamp.toISOString(),
|
2856
|
+
"ai.usage.promptTokens": finalUsage.promptTokens,
|
2857
|
+
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2858
|
+
// standardized gen-ai llm span attributes:
|
2859
|
+
"gen_ai.response.finish_reasons": [finishReason],
|
2860
|
+
"gen_ai.response.id": response.id,
|
2861
|
+
"gen_ai.response.model": response.modelId,
|
2862
|
+
"gen_ai.usage.input_tokens": finalUsage.promptTokens,
|
2863
|
+
"gen_ai.usage.output_tokens": finalUsage.completionTokens
|
2864
|
+
}
|
2865
|
+
})
|
2866
|
+
);
|
2867
|
+
doStreamSpan.end();
|
2868
|
+
rootSpan.setAttributes(
|
2869
|
+
selectTelemetryAttributes({
|
2870
|
+
telemetry,
|
2871
|
+
attributes: {
|
2872
|
+
"ai.usage.promptTokens": finalUsage.promptTokens,
|
2873
|
+
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2874
|
+
"ai.response.object": {
|
2875
|
+
output: () => JSON.stringify(object)
|
2876
|
+
}
|
2877
|
+
}
|
2878
|
+
})
|
2879
|
+
);
|
2880
|
+
await (onFinish == null ? void 0 : onFinish({
|
2881
|
+
usage: finalUsage,
|
2882
|
+
object,
|
2883
|
+
error,
|
2884
|
+
response: {
|
2885
|
+
...response,
|
2886
|
+
headers: rawResponse == null ? void 0 : rawResponse.headers
|
2887
|
+
},
|
2888
|
+
warnings,
|
2889
|
+
experimental_providerMetadata: providerMetadata
|
2890
|
+
}));
|
2891
|
+
} catch (error2) {
|
2892
|
+
controller.error(error2);
|
2893
|
+
} finally {
|
2894
|
+
rootSpan.end();
|
2559
2895
|
}
|
2560
2896
|
}
|
2561
|
-
};
|
2562
|
-
break;
|
2563
|
-
}
|
2564
|
-
case void 0: {
|
2565
|
-
throw new Error(
|
2566
|
-
"Model does not have a default object generation mode."
|
2567
|
-
);
|
2568
|
-
}
|
2569
|
-
default: {
|
2570
|
-
const _exhaustiveCheck = mode;
|
2571
|
-
throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
|
2572
|
-
}
|
2573
|
-
}
|
2574
|
-
const {
|
2575
|
-
result: { stream, warnings, rawResponse, request },
|
2576
|
-
doStreamSpan,
|
2577
|
-
startTimestampMs
|
2578
|
-
} = await retry(
|
2579
|
-
() => recordSpan({
|
2580
|
-
name: "ai.streamObject.doStream",
|
2581
|
-
attributes: selectTelemetryAttributes({
|
2582
|
-
telemetry,
|
2583
|
-
attributes: {
|
2584
|
-
...assembleOperationName({
|
2585
|
-
operationId: "ai.streamObject.doStream",
|
2586
|
-
telemetry
|
2587
|
-
}),
|
2588
|
-
...baseTelemetryAttributes,
|
2589
|
-
"ai.prompt.format": {
|
2590
|
-
input: () => callOptions.inputFormat
|
2591
|
-
},
|
2592
|
-
"ai.prompt.messages": {
|
2593
|
-
input: () => JSON.stringify(callOptions.prompt)
|
2594
|
-
},
|
2595
|
-
"ai.settings.mode": mode,
|
2596
|
-
// standardized gen-ai llm span attributes:
|
2597
|
-
"gen_ai.system": model.provider,
|
2598
|
-
"gen_ai.request.model": model.modelId,
|
2599
|
-
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
2600
|
-
"gen_ai.request.max_tokens": settings.maxTokens,
|
2601
|
-
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
2602
|
-
"gen_ai.request.temperature": settings.temperature,
|
2603
|
-
"gen_ai.request.top_k": settings.topK,
|
2604
|
-
"gen_ai.request.top_p": settings.topP
|
2605
|
-
}
|
2606
|
-
}),
|
2607
|
-
tracer,
|
2608
|
-
endWhenDone: false,
|
2609
|
-
fn: async (doStreamSpan2) => ({
|
2610
|
-
startTimestampMs: now2(),
|
2611
|
-
doStreamSpan: doStreamSpan2,
|
2612
|
-
result: await model.doStream(callOptions)
|
2613
2897
|
})
|
2898
|
+
);
|
2899
|
+
self.stitchableStream.addStream(transformedStream);
|
2900
|
+
}
|
2901
|
+
}).catch((error) => {
|
2902
|
+
self.stitchableStream.addStream(
|
2903
|
+
new ReadableStream({
|
2904
|
+
start(controller) {
|
2905
|
+
controller.error(error);
|
2906
|
+
}
|
2614
2907
|
})
|
2615
2908
|
);
|
2616
|
-
|
2617
|
-
|
2618
|
-
|
2619
|
-
warnings,
|
2620
|
-
rawResponse,
|
2621
|
-
request: request != null ? request : {},
|
2622
|
-
onFinish,
|
2623
|
-
rootSpan,
|
2624
|
-
doStreamSpan,
|
2625
|
-
telemetry,
|
2626
|
-
startTimestampMs,
|
2627
|
-
modelId: model.modelId,
|
2628
|
-
now: now2,
|
2629
|
-
currentDate,
|
2630
|
-
generateId: generateId3
|
2631
|
-
});
|
2632
|
-
}
|
2633
|
-
});
|
2634
|
-
}
|
2635
|
-
var DefaultStreamObjectResult = class {
|
2636
|
-
constructor({
|
2637
|
-
stream,
|
2638
|
-
warnings,
|
2639
|
-
rawResponse,
|
2640
|
-
request,
|
2641
|
-
outputStrategy,
|
2642
|
-
onFinish,
|
2643
|
-
rootSpan,
|
2644
|
-
doStreamSpan,
|
2645
|
-
telemetry,
|
2646
|
-
startTimestampMs,
|
2647
|
-
modelId,
|
2648
|
-
now: now2,
|
2649
|
-
currentDate,
|
2650
|
-
generateId: generateId3
|
2651
|
-
}) {
|
2652
|
-
this.warnings = warnings;
|
2909
|
+
}).finally(() => {
|
2910
|
+
self.stitchableStream.close();
|
2911
|
+
});
|
2653
2912
|
this.outputStrategy = outputStrategy;
|
2654
|
-
this.request = Promise.resolve(request);
|
2655
|
-
this.objectPromise = new DelayedPromise();
|
2656
|
-
const { resolve: resolveUsage, promise: usagePromise } = createResolvablePromise();
|
2657
|
-
this.usage = usagePromise;
|
2658
|
-
const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
|
2659
|
-
this.response = responsePromise;
|
2660
|
-
const {
|
2661
|
-
resolve: resolveProviderMetadata,
|
2662
|
-
promise: providerMetadataPromise
|
2663
|
-
} = createResolvablePromise();
|
2664
|
-
this.experimental_providerMetadata = providerMetadataPromise;
|
2665
|
-
let usage;
|
2666
|
-
let finishReason;
|
2667
|
-
let providerMetadata;
|
2668
|
-
let object;
|
2669
|
-
let error;
|
2670
|
-
let accumulatedText = "";
|
2671
|
-
let textDelta = "";
|
2672
|
-
let response = {
|
2673
|
-
id: generateId3(),
|
2674
|
-
timestamp: currentDate(),
|
2675
|
-
modelId
|
2676
|
-
};
|
2677
|
-
let latestObjectJson = void 0;
|
2678
|
-
let latestObject = void 0;
|
2679
|
-
let isFirstChunk = true;
|
2680
|
-
let isFirstDelta = true;
|
2681
|
-
const self = this;
|
2682
|
-
this.originalStream = stream.pipeThrough(
|
2683
|
-
new TransformStream({
|
2684
|
-
async transform(chunk, controller) {
|
2685
|
-
var _a11, _b, _c;
|
2686
|
-
if (isFirstChunk) {
|
2687
|
-
const msToFirstChunk = now2() - startTimestampMs;
|
2688
|
-
isFirstChunk = false;
|
2689
|
-
doStreamSpan.addEvent("ai.stream.firstChunk", {
|
2690
|
-
"ai.stream.msToFirstChunk": msToFirstChunk
|
2691
|
-
});
|
2692
|
-
doStreamSpan.setAttributes({
|
2693
|
-
"ai.stream.msToFirstChunk": msToFirstChunk
|
2694
|
-
});
|
2695
|
-
}
|
2696
|
-
if (typeof chunk === "string") {
|
2697
|
-
accumulatedText += chunk;
|
2698
|
-
textDelta += chunk;
|
2699
|
-
const { value: currentObjectJson, state: parseState } = parsePartialJson(accumulatedText);
|
2700
|
-
if (currentObjectJson !== void 0 && !isDeepEqualData(latestObjectJson, currentObjectJson)) {
|
2701
|
-
const validationResult = outputStrategy.validatePartialResult({
|
2702
|
-
value: currentObjectJson,
|
2703
|
-
textDelta,
|
2704
|
-
latestObject,
|
2705
|
-
isFirstDelta,
|
2706
|
-
isFinalDelta: parseState === "successful-parse"
|
2707
|
-
});
|
2708
|
-
if (validationResult.success && !isDeepEqualData(latestObject, validationResult.value.partial)) {
|
2709
|
-
latestObjectJson = currentObjectJson;
|
2710
|
-
latestObject = validationResult.value.partial;
|
2711
|
-
controller.enqueue({
|
2712
|
-
type: "object",
|
2713
|
-
object: latestObject
|
2714
|
-
});
|
2715
|
-
controller.enqueue({
|
2716
|
-
type: "text-delta",
|
2717
|
-
textDelta: validationResult.value.textDelta
|
2718
|
-
});
|
2719
|
-
textDelta = "";
|
2720
|
-
isFirstDelta = false;
|
2721
|
-
}
|
2722
|
-
}
|
2723
|
-
return;
|
2724
|
-
}
|
2725
|
-
switch (chunk.type) {
|
2726
|
-
case "response-metadata": {
|
2727
|
-
response = {
|
2728
|
-
id: (_a11 = chunk.id) != null ? _a11 : response.id,
|
2729
|
-
timestamp: (_b = chunk.timestamp) != null ? _b : response.timestamp,
|
2730
|
-
modelId: (_c = chunk.modelId) != null ? _c : response.modelId
|
2731
|
-
};
|
2732
|
-
break;
|
2733
|
-
}
|
2734
|
-
case "finish": {
|
2735
|
-
if (textDelta !== "") {
|
2736
|
-
controller.enqueue({ type: "text-delta", textDelta });
|
2737
|
-
}
|
2738
|
-
finishReason = chunk.finishReason;
|
2739
|
-
usage = calculateLanguageModelUsage(chunk.usage);
|
2740
|
-
providerMetadata = chunk.providerMetadata;
|
2741
|
-
controller.enqueue({ ...chunk, usage, response });
|
2742
|
-
resolveUsage(usage);
|
2743
|
-
resolveProviderMetadata(providerMetadata);
|
2744
|
-
resolveResponse({
|
2745
|
-
...response,
|
2746
|
-
headers: rawResponse == null ? void 0 : rawResponse.headers
|
2747
|
-
});
|
2748
|
-
const validationResult = outputStrategy.validateFinalResult(latestObjectJson);
|
2749
|
-
if (validationResult.success) {
|
2750
|
-
object = validationResult.value;
|
2751
|
-
self.objectPromise.resolve(object);
|
2752
|
-
} else {
|
2753
|
-
error = validationResult.error;
|
2754
|
-
self.objectPromise.reject(error);
|
2755
|
-
}
|
2756
|
-
break;
|
2757
|
-
}
|
2758
|
-
default: {
|
2759
|
-
controller.enqueue(chunk);
|
2760
|
-
break;
|
2761
|
-
}
|
2762
|
-
}
|
2763
|
-
},
|
2764
|
-
// invoke onFinish callback and resolve toolResults promise when the stream is about to close:
|
2765
|
-
async flush(controller) {
|
2766
|
-
try {
|
2767
|
-
const finalUsage = usage != null ? usage : {
|
2768
|
-
promptTokens: NaN,
|
2769
|
-
completionTokens: NaN,
|
2770
|
-
totalTokens: NaN
|
2771
|
-
};
|
2772
|
-
doStreamSpan.setAttributes(
|
2773
|
-
selectTelemetryAttributes({
|
2774
|
-
telemetry,
|
2775
|
-
attributes: {
|
2776
|
-
"ai.response.finishReason": finishReason,
|
2777
|
-
"ai.response.object": {
|
2778
|
-
output: () => JSON.stringify(object)
|
2779
|
-
},
|
2780
|
-
"ai.response.id": response.id,
|
2781
|
-
"ai.response.model": response.modelId,
|
2782
|
-
"ai.response.timestamp": response.timestamp.toISOString(),
|
2783
|
-
"ai.usage.promptTokens": finalUsage.promptTokens,
|
2784
|
-
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2785
|
-
// standardized gen-ai llm span attributes:
|
2786
|
-
"gen_ai.response.finish_reasons": [finishReason],
|
2787
|
-
"gen_ai.response.id": response.id,
|
2788
|
-
"gen_ai.response.model": response.modelId,
|
2789
|
-
"gen_ai.usage.input_tokens": finalUsage.promptTokens,
|
2790
|
-
"gen_ai.usage.output_tokens": finalUsage.completionTokens
|
2791
|
-
}
|
2792
|
-
})
|
2793
|
-
);
|
2794
|
-
doStreamSpan.end();
|
2795
|
-
rootSpan.setAttributes(
|
2796
|
-
selectTelemetryAttributes({
|
2797
|
-
telemetry,
|
2798
|
-
attributes: {
|
2799
|
-
"ai.usage.promptTokens": finalUsage.promptTokens,
|
2800
|
-
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2801
|
-
"ai.response.object": {
|
2802
|
-
output: () => JSON.stringify(object)
|
2803
|
-
}
|
2804
|
-
}
|
2805
|
-
})
|
2806
|
-
);
|
2807
|
-
await (onFinish == null ? void 0 : onFinish({
|
2808
|
-
usage: finalUsage,
|
2809
|
-
object,
|
2810
|
-
error,
|
2811
|
-
response: {
|
2812
|
-
...response,
|
2813
|
-
headers: rawResponse == null ? void 0 : rawResponse.headers
|
2814
|
-
},
|
2815
|
-
warnings,
|
2816
|
-
experimental_providerMetadata: providerMetadata
|
2817
|
-
}));
|
2818
|
-
} catch (error2) {
|
2819
|
-
controller.error(error2);
|
2820
|
-
} finally {
|
2821
|
-
rootSpan.end();
|
2822
|
-
}
|
2823
|
-
}
|
2824
|
-
})
|
2825
|
-
);
|
2826
2913
|
}
|
2827
2914
|
get object() {
|
2828
2915
|
return this.objectPromise.value;
|
2829
2916
|
}
|
2917
|
+
get usage() {
|
2918
|
+
return this.usagePromise.value;
|
2919
|
+
}
|
2920
|
+
get experimental_providerMetadata() {
|
2921
|
+
return this.providerMetadataPromise.value;
|
2922
|
+
}
|
2923
|
+
get warnings() {
|
2924
|
+
return this.warningsPromise.value;
|
2925
|
+
}
|
2926
|
+
get request() {
|
2927
|
+
return this.requestPromise.value;
|
2928
|
+
}
|
2929
|
+
get response() {
|
2930
|
+
return this.responsePromise.value;
|
2931
|
+
}
|
2830
2932
|
get partialObjectStream() {
|
2831
|
-
return createAsyncIterableStream(this.
|
2933
|
+
return createAsyncIterableStream(this.stitchableStream.stream, {
|
2832
2934
|
transform(chunk, controller) {
|
2833
2935
|
switch (chunk.type) {
|
2834
2936
|
case "object":
|
@@ -2849,10 +2951,12 @@ var DefaultStreamObjectResult = class {
|
|
2849
2951
|
});
|
2850
2952
|
}
|
2851
2953
|
get elementStream() {
|
2852
|
-
return this.outputStrategy.createElementStream(
|
2954
|
+
return this.outputStrategy.createElementStream(
|
2955
|
+
this.stitchableStream.stream
|
2956
|
+
);
|
2853
2957
|
}
|
2854
2958
|
get textStream() {
|
2855
|
-
return createAsyncIterableStream(this.
|
2959
|
+
return createAsyncIterableStream(this.stitchableStream.stream, {
|
2856
2960
|
transform(chunk, controller) {
|
2857
2961
|
switch (chunk.type) {
|
2858
2962
|
case "text-delta":
|
@@ -2873,7 +2977,7 @@ var DefaultStreamObjectResult = class {
|
|
2873
2977
|
});
|
2874
2978
|
}
|
2875
2979
|
get fullStream() {
|
2876
|
-
return createAsyncIterableStream(this.
|
2980
|
+
return createAsyncIterableStream(this.stitchableStream.stream, {
|
2877
2981
|
transform(chunk, controller) {
|
2878
2982
|
controller.enqueue(chunk);
|
2879
2983
|
}
|
@@ -3187,15 +3291,16 @@ async function generateText({
|
|
3187
3291
|
};
|
3188
3292
|
let stepType = "initial";
|
3189
3293
|
do {
|
3190
|
-
if (stepCount === 1) {
|
3191
|
-
initialPrompt.type = "messages";
|
3192
|
-
}
|
3193
3294
|
const promptFormat = stepCount === 0 ? initialPrompt.type : "messages";
|
3295
|
+
const stepInputMessages = [
|
3296
|
+
...initialPrompt.messages,
|
3297
|
+
...responseMessages
|
3298
|
+
];
|
3194
3299
|
const promptMessages = await convertToLanguageModelPrompt({
|
3195
3300
|
prompt: {
|
3196
3301
|
type: promptFormat,
|
3197
3302
|
system: initialPrompt.system,
|
3198
|
-
messages:
|
3303
|
+
messages: stepInputMessages
|
3199
3304
|
},
|
3200
3305
|
modelSupportsImageUrls: model.supportsImageUrls,
|
3201
3306
|
modelSupportsUrl: model.supportsUrl
|
@@ -3291,6 +3396,7 @@ async function generateText({
|
|
3291
3396
|
tools,
|
3292
3397
|
tracer,
|
3293
3398
|
telemetry,
|
3399
|
+
messages: stepInputMessages,
|
3294
3400
|
abortSignal
|
3295
3401
|
});
|
3296
3402
|
const currentUsage = calculateLanguageModelUsage(
|
@@ -3401,6 +3507,7 @@ async function executeTools({
|
|
3401
3507
|
tools,
|
3402
3508
|
tracer,
|
3403
3509
|
telemetry,
|
3510
|
+
messages,
|
3404
3511
|
abortSignal
|
3405
3512
|
}) {
|
3406
3513
|
const toolResults = await Promise.all(
|
@@ -3427,7 +3534,10 @@ async function executeTools({
|
|
3427
3534
|
}),
|
3428
3535
|
tracer,
|
3429
3536
|
fn: async (span) => {
|
3430
|
-
const result2 = await tool2.execute(toolCall.args, {
|
3537
|
+
const result2 = await tool2.execute(toolCall.args, {
|
3538
|
+
messages,
|
3539
|
+
abortSignal
|
3540
|
+
});
|
3431
3541
|
try {
|
3432
3542
|
span.setAttributes(
|
3433
3543
|
selectTelemetryAttributes({
|
@@ -3474,69 +3584,7 @@ var DefaultGenerateTextResult = class {
|
|
3474
3584
|
|
3475
3585
|
// core/generate-text/stream-text.ts
|
3476
3586
|
import { createIdGenerator as createIdGenerator4 } from "@ai-sdk/provider-utils";
|
3477
|
-
import {
|
3478
|
-
|
3479
|
-
// core/util/create-stitchable-stream.ts
|
3480
|
-
function createStitchableStream() {
|
3481
|
-
let innerStreamReaders = [];
|
3482
|
-
let controller = null;
|
3483
|
-
let isClosed = false;
|
3484
|
-
const processPull = async () => {
|
3485
|
-
if (isClosed && innerStreamReaders.length === 0) {
|
3486
|
-
controller == null ? void 0 : controller.close();
|
3487
|
-
return;
|
3488
|
-
}
|
3489
|
-
if (innerStreamReaders.length === 0) {
|
3490
|
-
return;
|
3491
|
-
}
|
3492
|
-
try {
|
3493
|
-
const { value, done } = await innerStreamReaders[0].read();
|
3494
|
-
if (done) {
|
3495
|
-
innerStreamReaders.shift();
|
3496
|
-
if (innerStreamReaders.length > 0) {
|
3497
|
-
await processPull();
|
3498
|
-
} else if (isClosed) {
|
3499
|
-
controller == null ? void 0 : controller.close();
|
3500
|
-
}
|
3501
|
-
} else {
|
3502
|
-
controller == null ? void 0 : controller.enqueue(value);
|
3503
|
-
}
|
3504
|
-
} catch (error) {
|
3505
|
-
controller == null ? void 0 : controller.error(error);
|
3506
|
-
innerStreamReaders.shift();
|
3507
|
-
if (isClosed && innerStreamReaders.length === 0) {
|
3508
|
-
controller == null ? void 0 : controller.close();
|
3509
|
-
}
|
3510
|
-
}
|
3511
|
-
};
|
3512
|
-
return {
|
3513
|
-
stream: new ReadableStream({
|
3514
|
-
start(controllerParam) {
|
3515
|
-
controller = controllerParam;
|
3516
|
-
},
|
3517
|
-
pull: processPull,
|
3518
|
-
async cancel() {
|
3519
|
-
for (const reader of innerStreamReaders) {
|
3520
|
-
await reader.cancel();
|
3521
|
-
}
|
3522
|
-
innerStreamReaders = [];
|
3523
|
-
isClosed = true;
|
3524
|
-
}
|
3525
|
-
}),
|
3526
|
-
addStream: (innerStream) => {
|
3527
|
-
if (isClosed) {
|
3528
|
-
throw new Error("Cannot add inner stream: outer stream is closed");
|
3529
|
-
}
|
3530
|
-
innerStreamReaders.push(innerStream.getReader());
|
3531
|
-
},
|
3532
|
-
close: () => {
|
3533
|
-
isClosed = true;
|
3534
|
-
if (innerStreamReaders.length === 0) {
|
3535
|
-
controller == null ? void 0 : controller.close();
|
3536
|
-
}
|
3537
|
-
}
|
3538
|
-
};
|
3539
|
-
}
|
3587
|
+
import { formatDataStreamPart } from "@ai-sdk/ui-utils";
|
3540
3588
|
|
3541
3589
|
// core/util/merge-streams.ts
|
3542
3590
|
function mergeStreams(stream1, stream2) {
|
@@ -3634,6 +3682,7 @@ function runToolsTransformation({
|
|
3634
3682
|
toolCallStreaming,
|
3635
3683
|
tracer,
|
3636
3684
|
telemetry,
|
3685
|
+
messages,
|
3637
3686
|
abortSignal
|
3638
3687
|
}) {
|
3639
3688
|
let toolResultsStreamController = null;
|
@@ -3729,7 +3778,10 @@ function runToolsTransformation({
|
|
3729
3778
|
}
|
3730
3779
|
}),
|
3731
3780
|
tracer,
|
3732
|
-
fn: async (span) => tool2.execute(toolCall.args, {
|
3781
|
+
fn: async (span) => tool2.execute(toolCall.args, {
|
3782
|
+
messages,
|
3783
|
+
abortSignal
|
3784
|
+
}).then(
|
3733
3785
|
(result) => {
|
3734
3786
|
toolResultsStreamController.enqueue({
|
3735
3787
|
...toolCall,
|
@@ -3821,7 +3873,7 @@ function runToolsTransformation({
|
|
3821
3873
|
|
3822
3874
|
// core/generate-text/stream-text.ts
|
3823
3875
|
var originalGenerateId4 = createIdGenerator4({ prefix: "aitxt", size: 24 });
|
3824
|
-
|
3876
|
+
function streamText({
|
3825
3877
|
model,
|
3826
3878
|
tools,
|
3827
3879
|
toolChoice,
|
@@ -3837,594 +3889,587 @@ async function streamText({
|
|
3837
3889
|
experimental_providerMetadata: providerMetadata,
|
3838
3890
|
experimental_toolCallStreaming: toolCallStreaming = false,
|
3839
3891
|
experimental_activeTools: activeTools,
|
3840
|
-
onChunk,
|
3841
|
-
onFinish,
|
3842
|
-
onStepFinish,
|
3843
|
-
_internal: {
|
3844
|
-
now: now2 = now,
|
3845
|
-
generateId: generateId3 = originalGenerateId4,
|
3846
|
-
currentDate = () => /* @__PURE__ */ new Date()
|
3847
|
-
} = {},
|
3848
|
-
...settings
|
3849
|
-
}) {
|
3850
|
-
|
3851
|
-
|
3852
|
-
|
3853
|
-
|
3854
|
-
|
3855
|
-
|
3856
|
-
|
3857
|
-
|
3858
|
-
|
3859
|
-
|
3860
|
-
|
3861
|
-
|
3862
|
-
|
3863
|
-
|
3864
|
-
|
3865
|
-
|
3866
|
-
|
3867
|
-
|
3868
|
-
|
3869
|
-
|
3870
|
-
|
3871
|
-
|
3872
|
-
|
3873
|
-
...assembleOperationName({ operationId: "ai.streamText", telemetry }),
|
3874
|
-
...baseTelemetryAttributes,
|
3875
|
-
// specific settings that only make sense on the outer level:
|
3876
|
-
"ai.prompt": {
|
3877
|
-
input: () => JSON.stringify({ system, prompt, messages })
|
3878
|
-
},
|
3879
|
-
"ai.settings.maxSteps": maxSteps
|
3880
|
-
}
|
3881
|
-
}),
|
3882
|
-
tracer,
|
3883
|
-
endWhenDone: false,
|
3884
|
-
fn: async (rootSpan) => {
|
3885
|
-
const retry = retryWithExponentialBackoff({ maxRetries });
|
3886
|
-
const startStep = async ({
|
3887
|
-
responseMessages
|
3888
|
-
}) => {
|
3889
|
-
const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
|
3890
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
3891
|
-
prompt: {
|
3892
|
-
type: promptFormat,
|
3893
|
-
system: initialPrompt.system,
|
3894
|
-
messages: [...initialPrompt.messages, ...responseMessages]
|
3895
|
-
},
|
3896
|
-
modelSupportsImageUrls: model.supportsImageUrls,
|
3897
|
-
modelSupportsUrl: model.supportsUrl
|
3898
|
-
});
|
3899
|
-
const mode = {
|
3900
|
-
type: "regular",
|
3901
|
-
...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
|
3902
|
-
};
|
3903
|
-
const {
|
3904
|
-
result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2, request: request2 },
|
3905
|
-
doStreamSpan: doStreamSpan2,
|
3906
|
-
startTimestampMs: startTimestampMs2
|
3907
|
-
} = await retry(
|
3908
|
-
() => recordSpan({
|
3909
|
-
name: "ai.streamText.doStream",
|
3910
|
-
attributes: selectTelemetryAttributes({
|
3911
|
-
telemetry,
|
3912
|
-
attributes: {
|
3913
|
-
...assembleOperationName({
|
3914
|
-
operationId: "ai.streamText.doStream",
|
3915
|
-
telemetry
|
3916
|
-
}),
|
3917
|
-
...baseTelemetryAttributes,
|
3918
|
-
"ai.prompt.format": {
|
3919
|
-
input: () => promptFormat
|
3920
|
-
},
|
3921
|
-
"ai.prompt.messages": {
|
3922
|
-
input: () => JSON.stringify(promptMessages)
|
3923
|
-
},
|
3924
|
-
"ai.prompt.tools": {
|
3925
|
-
// convert the language model level tools:
|
3926
|
-
input: () => {
|
3927
|
-
var _a11;
|
3928
|
-
return (_a11 = mode.tools) == null ? void 0 : _a11.map((tool2) => JSON.stringify(tool2));
|
3929
|
-
}
|
3930
|
-
},
|
3931
|
-
"ai.prompt.toolChoice": {
|
3932
|
-
input: () => mode.toolChoice != null ? JSON.stringify(mode.toolChoice) : void 0
|
3933
|
-
},
|
3934
|
-
// standardized gen-ai llm span attributes:
|
3935
|
-
"gen_ai.system": model.provider,
|
3936
|
-
"gen_ai.request.model": model.modelId,
|
3937
|
-
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
3938
|
-
"gen_ai.request.max_tokens": settings.maxTokens,
|
3939
|
-
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
3940
|
-
"gen_ai.request.stop_sequences": settings.stopSequences,
|
3941
|
-
"gen_ai.request.temperature": settings.temperature,
|
3942
|
-
"gen_ai.request.top_k": settings.topK,
|
3943
|
-
"gen_ai.request.top_p": settings.topP
|
3944
|
-
}
|
3945
|
-
}),
|
3946
|
-
tracer,
|
3947
|
-
endWhenDone: false,
|
3948
|
-
fn: async (doStreamSpan3) => ({
|
3949
|
-
startTimestampMs: now2(),
|
3950
|
-
// get before the call
|
3951
|
-
doStreamSpan: doStreamSpan3,
|
3952
|
-
result: await model.doStream({
|
3953
|
-
mode,
|
3954
|
-
...prepareCallSettings(settings),
|
3955
|
-
inputFormat: promptFormat,
|
3956
|
-
prompt: promptMessages,
|
3957
|
-
providerMetadata,
|
3958
|
-
abortSignal,
|
3959
|
-
headers
|
3960
|
-
})
|
3961
|
-
})
|
3962
|
-
})
|
3963
|
-
);
|
3964
|
-
return {
|
3965
|
-
result: {
|
3966
|
-
stream: runToolsTransformation({
|
3967
|
-
tools,
|
3968
|
-
generatorStream: stream2,
|
3969
|
-
toolCallStreaming,
|
3970
|
-
tracer,
|
3971
|
-
telemetry,
|
3972
|
-
abortSignal
|
3973
|
-
}),
|
3974
|
-
warnings: warnings2,
|
3975
|
-
request: request2 != null ? request2 : {},
|
3976
|
-
rawResponse: rawResponse2
|
3977
|
-
},
|
3978
|
-
doStreamSpan: doStreamSpan2,
|
3979
|
-
startTimestampMs: startTimestampMs2
|
3980
|
-
};
|
3981
|
-
};
|
3982
|
-
const {
|
3983
|
-
result: { stream, warnings, rawResponse, request },
|
3984
|
-
doStreamSpan,
|
3985
|
-
startTimestampMs
|
3986
|
-
} = await startStep({ responseMessages: [] });
|
3987
|
-
return new DefaultStreamTextResult({
|
3988
|
-
stream,
|
3989
|
-
warnings,
|
3990
|
-
rawResponse,
|
3991
|
-
request,
|
3992
|
-
onChunk,
|
3993
|
-
onFinish,
|
3994
|
-
onStepFinish,
|
3995
|
-
rootSpan,
|
3996
|
-
doStreamSpan,
|
3997
|
-
telemetry,
|
3998
|
-
startTimestampMs,
|
3999
|
-
maxSteps,
|
4000
|
-
continueSteps,
|
4001
|
-
startStep,
|
4002
|
-
modelId: model.modelId,
|
4003
|
-
now: now2,
|
4004
|
-
currentDate,
|
4005
|
-
generateId: generateId3,
|
4006
|
-
tools
|
4007
|
-
});
|
4008
|
-
}
|
3892
|
+
onChunk,
|
3893
|
+
onFinish,
|
3894
|
+
onStepFinish,
|
3895
|
+
_internal: {
|
3896
|
+
now: now2 = now,
|
3897
|
+
generateId: generateId3 = originalGenerateId4,
|
3898
|
+
currentDate = () => /* @__PURE__ */ new Date()
|
3899
|
+
} = {},
|
3900
|
+
...settings
|
3901
|
+
}) {
|
3902
|
+
return new DefaultStreamTextResult({
|
3903
|
+
model,
|
3904
|
+
telemetry,
|
3905
|
+
headers,
|
3906
|
+
settings,
|
3907
|
+
maxRetries,
|
3908
|
+
abortSignal,
|
3909
|
+
system,
|
3910
|
+
prompt,
|
3911
|
+
messages,
|
3912
|
+
tools,
|
3913
|
+
toolChoice,
|
3914
|
+
toolCallStreaming,
|
3915
|
+
activeTools,
|
3916
|
+
maxSteps,
|
3917
|
+
continueSteps,
|
3918
|
+
providerMetadata,
|
3919
|
+
onChunk,
|
3920
|
+
onFinish,
|
3921
|
+
onStepFinish,
|
3922
|
+
now: now2,
|
3923
|
+
currentDate,
|
3924
|
+
generateId: generateId3
|
4009
3925
|
});
|
4010
3926
|
}
|
4011
3927
|
var DefaultStreamTextResult = class {
|
4012
3928
|
constructor({
|
4013
|
-
|
4014
|
-
warnings,
|
4015
|
-
rawResponse,
|
4016
|
-
request,
|
4017
|
-
onChunk,
|
4018
|
-
onFinish,
|
4019
|
-
onStepFinish,
|
4020
|
-
rootSpan,
|
4021
|
-
doStreamSpan,
|
3929
|
+
model,
|
4022
3930
|
telemetry,
|
4023
|
-
|
3931
|
+
headers,
|
3932
|
+
settings,
|
3933
|
+
maxRetries,
|
3934
|
+
abortSignal,
|
3935
|
+
system,
|
3936
|
+
prompt,
|
3937
|
+
messages,
|
3938
|
+
tools,
|
3939
|
+
toolChoice,
|
3940
|
+
toolCallStreaming,
|
3941
|
+
activeTools,
|
4024
3942
|
maxSteps,
|
4025
3943
|
continueSteps,
|
4026
|
-
|
4027
|
-
|
3944
|
+
providerMetadata,
|
3945
|
+
onChunk,
|
3946
|
+
onFinish,
|
3947
|
+
onStepFinish,
|
4028
3948
|
now: now2,
|
4029
3949
|
currentDate,
|
4030
|
-
generateId: generateId3
|
4031
|
-
tools
|
3950
|
+
generateId: generateId3
|
4032
3951
|
}) {
|
4033
|
-
this.
|
4034
|
-
this.
|
4035
|
-
|
4036
|
-
this.
|
4037
|
-
|
4038
|
-
this.
|
4039
|
-
|
4040
|
-
this.
|
4041
|
-
|
4042
|
-
this.
|
4043
|
-
|
4044
|
-
|
4045
|
-
|
4046
|
-
|
4047
|
-
|
4048
|
-
|
4049
|
-
|
4050
|
-
}
|
4051
|
-
|
4052
|
-
const
|
4053
|
-
|
4054
|
-
|
4055
|
-
|
4056
|
-
|
4057
|
-
|
4058
|
-
const {
|
4059
|
-
|
4060
|
-
|
4061
|
-
|
4062
|
-
} = createStitchableStream();
|
4063
|
-
this.originalStream = stitchableStream;
|
4064
|
-
const stepResults = [];
|
3952
|
+
this.warningsPromise = new DelayedPromise();
|
3953
|
+
this.usagePromise = new DelayedPromise();
|
3954
|
+
this.finishReasonPromise = new DelayedPromise();
|
3955
|
+
this.providerMetadataPromise = new DelayedPromise();
|
3956
|
+
this.textPromise = new DelayedPromise();
|
3957
|
+
this.toolCallsPromise = new DelayedPromise();
|
3958
|
+
this.toolResultsPromise = new DelayedPromise();
|
3959
|
+
this.requestPromise = new DelayedPromise();
|
3960
|
+
this.responsePromise = new DelayedPromise();
|
3961
|
+
this.stepsPromise = new DelayedPromise();
|
3962
|
+
this.stitchableStream = createStitchableStream();
|
3963
|
+
if (maxSteps < 1) {
|
3964
|
+
throw new InvalidArgumentError({
|
3965
|
+
parameter: "maxSteps",
|
3966
|
+
value: maxSteps,
|
3967
|
+
message: "maxSteps must be at least 1"
|
3968
|
+
});
|
3969
|
+
}
|
3970
|
+
const tracer = getTracer(telemetry);
|
3971
|
+
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
3972
|
+
model,
|
3973
|
+
telemetry,
|
3974
|
+
headers,
|
3975
|
+
settings: { ...settings, maxRetries }
|
3976
|
+
});
|
3977
|
+
const initialPrompt = standardizePrompt({
|
3978
|
+
prompt: { system, prompt, messages },
|
3979
|
+
tools
|
3980
|
+
});
|
4065
3981
|
const self = this;
|
4066
|
-
|
4067
|
-
|
4068
|
-
|
4069
|
-
|
4070
|
-
|
4071
|
-
|
4072
|
-
|
4073
|
-
|
4074
|
-
|
4075
|
-
|
4076
|
-
|
4077
|
-
|
4078
|
-
|
4079
|
-
|
4080
|
-
|
4081
|
-
|
4082
|
-
|
4083
|
-
|
4084
|
-
|
4085
|
-
|
4086
|
-
|
4087
|
-
|
4088
|
-
|
4089
|
-
|
4090
|
-
|
4091
|
-
|
4092
|
-
|
4093
|
-
|
4094
|
-
|
4095
|
-
|
4096
|
-
|
4097
|
-
|
4098
|
-
|
4099
|
-
|
4100
|
-
|
4101
|
-
|
4102
|
-
|
4103
|
-
|
4104
|
-
|
4105
|
-
|
4106
|
-
|
4107
|
-
|
4108
|
-
|
4109
|
-
|
4110
|
-
|
4111
|
-
|
4112
|
-
|
4113
|
-
|
4114
|
-
|
4115
|
-
|
4116
|
-
|
4117
|
-
|
4118
|
-
|
4119
|
-
|
4120
|
-
|
4121
|
-
|
4122
|
-
|
4123
|
-
|
4124
|
-
|
4125
|
-
|
4126
|
-
|
4127
|
-
|
4128
|
-
|
4129
|
-
|
4130
|
-
|
4131
|
-
|
4132
|
-
|
4133
|
-
|
4134
|
-
|
4135
|
-
|
4136
|
-
|
4137
|
-
const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
|
4138
|
-
if (trimmedChunkText.length === 0) {
|
4139
|
-
break;
|
3982
|
+
recordSpan({
|
3983
|
+
name: "ai.streamText",
|
3984
|
+
attributes: selectTelemetryAttributes({
|
3985
|
+
telemetry,
|
3986
|
+
attributes: {
|
3987
|
+
...assembleOperationName({ operationId: "ai.streamText", telemetry }),
|
3988
|
+
...baseTelemetryAttributes,
|
3989
|
+
// specific settings that only make sense on the outer level:
|
3990
|
+
"ai.prompt": {
|
3991
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
3992
|
+
},
|
3993
|
+
"ai.settings.maxSteps": maxSteps
|
3994
|
+
}
|
3995
|
+
}),
|
3996
|
+
tracer,
|
3997
|
+
endWhenDone: false,
|
3998
|
+
fn: async (rootSpan) => {
|
3999
|
+
const retry = retryWithExponentialBackoff({ maxRetries });
|
4000
|
+
const stepResults = [];
|
4001
|
+
async function streamStep({
|
4002
|
+
currentStep,
|
4003
|
+
responseMessages,
|
4004
|
+
usage,
|
4005
|
+
stepType,
|
4006
|
+
previousStepText,
|
4007
|
+
hasLeadingWhitespace
|
4008
|
+
}) {
|
4009
|
+
const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
|
4010
|
+
const stepInputMessages = [
|
4011
|
+
...initialPrompt.messages,
|
4012
|
+
...responseMessages
|
4013
|
+
];
|
4014
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
4015
|
+
prompt: {
|
4016
|
+
type: promptFormat,
|
4017
|
+
system: initialPrompt.system,
|
4018
|
+
messages: stepInputMessages
|
4019
|
+
},
|
4020
|
+
modelSupportsImageUrls: model.supportsImageUrls,
|
4021
|
+
modelSupportsUrl: model.supportsUrl
|
4022
|
+
});
|
4023
|
+
const mode = {
|
4024
|
+
type: "regular",
|
4025
|
+
...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
|
4026
|
+
};
|
4027
|
+
const {
|
4028
|
+
result: { stream, warnings, rawResponse, request },
|
4029
|
+
doStreamSpan,
|
4030
|
+
startTimestampMs
|
4031
|
+
} = await retry(
|
4032
|
+
() => recordSpan({
|
4033
|
+
name: "ai.streamText.doStream",
|
4034
|
+
attributes: selectTelemetryAttributes({
|
4035
|
+
telemetry,
|
4036
|
+
attributes: {
|
4037
|
+
...assembleOperationName({
|
4038
|
+
operationId: "ai.streamText.doStream",
|
4039
|
+
telemetry
|
4040
|
+
}),
|
4041
|
+
...baseTelemetryAttributes,
|
4042
|
+
"ai.prompt.format": {
|
4043
|
+
input: () => promptFormat
|
4044
|
+
},
|
4045
|
+
"ai.prompt.messages": {
|
4046
|
+
input: () => JSON.stringify(promptMessages)
|
4047
|
+
},
|
4048
|
+
"ai.prompt.tools": {
|
4049
|
+
// convert the language model level tools:
|
4050
|
+
input: () => {
|
4051
|
+
var _a11;
|
4052
|
+
return (_a11 = mode.tools) == null ? void 0 : _a11.map((tool2) => JSON.stringify(tool2));
|
4140
4053
|
}
|
4141
|
-
|
4142
|
-
|
4143
|
-
|
4144
|
-
|
4145
|
-
|
4146
|
-
|
4147
|
-
|
4148
|
-
|
4149
|
-
|
4150
|
-
|
4054
|
+
},
|
4055
|
+
"ai.prompt.toolChoice": {
|
4056
|
+
input: () => mode.toolChoice != null ? JSON.stringify(mode.toolChoice) : void 0
|
4057
|
+
},
|
4058
|
+
// standardized gen-ai llm span attributes:
|
4059
|
+
"gen_ai.system": model.provider,
|
4060
|
+
"gen_ai.request.model": model.modelId,
|
4061
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
4062
|
+
"gen_ai.request.max_tokens": settings.maxTokens,
|
4063
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
4064
|
+
"gen_ai.request.stop_sequences": settings.stopSequences,
|
4065
|
+
"gen_ai.request.temperature": settings.temperature,
|
4066
|
+
"gen_ai.request.top_k": settings.topK,
|
4067
|
+
"gen_ai.request.top_p": settings.topP
|
4068
|
+
}
|
4069
|
+
}),
|
4070
|
+
tracer,
|
4071
|
+
endWhenDone: false,
|
4072
|
+
fn: async (doStreamSpan2) => ({
|
4073
|
+
startTimestampMs: now2(),
|
4074
|
+
// get before the call
|
4075
|
+
doStreamSpan: doStreamSpan2,
|
4076
|
+
result: await model.doStream({
|
4077
|
+
mode,
|
4078
|
+
...prepareCallSettings(settings),
|
4079
|
+
inputFormat: promptFormat,
|
4080
|
+
prompt: promptMessages,
|
4081
|
+
providerMetadata,
|
4082
|
+
abortSignal,
|
4083
|
+
headers
|
4084
|
+
})
|
4085
|
+
})
|
4086
|
+
})
|
4087
|
+
);
|
4088
|
+
const transformedStream = runToolsTransformation({
|
4089
|
+
tools,
|
4090
|
+
generatorStream: stream,
|
4091
|
+
toolCallStreaming,
|
4092
|
+
tracer,
|
4093
|
+
telemetry,
|
4094
|
+
messages: stepInputMessages,
|
4095
|
+
abortSignal
|
4096
|
+
});
|
4097
|
+
const stepRequest = request != null ? request : {};
|
4098
|
+
const stepToolCalls = [];
|
4099
|
+
const stepToolResults = [];
|
4100
|
+
let stepFinishReason = "unknown";
|
4101
|
+
let stepUsage = {
|
4102
|
+
promptTokens: 0,
|
4103
|
+
completionTokens: 0,
|
4104
|
+
totalTokens: 0
|
4105
|
+
};
|
4106
|
+
let stepProviderMetadata;
|
4107
|
+
let stepFirstChunk = true;
|
4108
|
+
let stepText = "";
|
4109
|
+
let fullStepText = stepType === "continue" ? previousStepText : "";
|
4110
|
+
let stepLogProbs;
|
4111
|
+
let stepResponse = {
|
4112
|
+
id: generateId3(),
|
4113
|
+
timestamp: currentDate(),
|
4114
|
+
modelId: model.modelId
|
4115
|
+
};
|
4116
|
+
let chunkBuffer = "";
|
4117
|
+
let chunkTextPublished = false;
|
4118
|
+
let inWhitespacePrefix = true;
|
4119
|
+
let hasWhitespaceSuffix = false;
|
4120
|
+
async function publishTextChunk({
|
4121
|
+
controller,
|
4122
|
+
chunk
|
4123
|
+
}) {
|
4124
|
+
controller.enqueue(chunk);
|
4125
|
+
stepText += chunk.textDelta;
|
4126
|
+
fullStepText += chunk.textDelta;
|
4127
|
+
chunkTextPublished = true;
|
4128
|
+
hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
|
4129
|
+
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4130
|
+
}
|
4131
|
+
self.stitchableStream.addStream(
|
4132
|
+
transformedStream.pipeThrough(
|
4133
|
+
new TransformStream({
|
4134
|
+
async transform(chunk, controller) {
|
4135
|
+
var _a11, _b, _c;
|
4136
|
+
if (stepFirstChunk) {
|
4137
|
+
const msToFirstChunk = now2() - startTimestampMs;
|
4138
|
+
stepFirstChunk = false;
|
4139
|
+
doStreamSpan.addEvent("ai.stream.firstChunk", {
|
4140
|
+
"ai.response.msToFirstChunk": msToFirstChunk
|
4141
|
+
});
|
4142
|
+
doStreamSpan.setAttributes({
|
4143
|
+
"ai.response.msToFirstChunk": msToFirstChunk
|
4144
|
+
});
|
4145
|
+
}
|
4146
|
+
if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
|
4147
|
+
return;
|
4148
|
+
}
|
4149
|
+
const chunkType = chunk.type;
|
4150
|
+
switch (chunkType) {
|
4151
|
+
case "text-delta": {
|
4152
|
+
if (continueSteps) {
|
4153
|
+
const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
|
4154
|
+
if (trimmedChunkText.length === 0) {
|
4155
|
+
break;
|
4156
|
+
}
|
4157
|
+
inWhitespacePrefix = false;
|
4158
|
+
chunkBuffer += trimmedChunkText;
|
4159
|
+
const split = splitOnLastWhitespace(chunkBuffer);
|
4160
|
+
if (split != null) {
|
4161
|
+
chunkBuffer = split.suffix;
|
4162
|
+
await publishTextChunk({
|
4163
|
+
controller,
|
4164
|
+
chunk: {
|
4165
|
+
type: "text-delta",
|
4166
|
+
textDelta: split.prefix + split.whitespace
|
4167
|
+
}
|
4168
|
+
});
|
4151
4169
|
}
|
4170
|
+
} else {
|
4171
|
+
await publishTextChunk({ controller, chunk });
|
4172
|
+
}
|
4173
|
+
break;
|
4174
|
+
}
|
4175
|
+
case "tool-call": {
|
4176
|
+
controller.enqueue(chunk);
|
4177
|
+
stepToolCalls.push(chunk);
|
4178
|
+
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4179
|
+
break;
|
4180
|
+
}
|
4181
|
+
case "tool-result": {
|
4182
|
+
controller.enqueue(chunk);
|
4183
|
+
stepToolResults.push(chunk);
|
4184
|
+
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4185
|
+
break;
|
4186
|
+
}
|
4187
|
+
case "response-metadata": {
|
4188
|
+
stepResponse = {
|
4189
|
+
id: (_a11 = chunk.id) != null ? _a11 : stepResponse.id,
|
4190
|
+
timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
|
4191
|
+
modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
|
4192
|
+
};
|
4193
|
+
break;
|
4194
|
+
}
|
4195
|
+
case "finish": {
|
4196
|
+
stepUsage = chunk.usage;
|
4197
|
+
stepFinishReason = chunk.finishReason;
|
4198
|
+
stepProviderMetadata = chunk.experimental_providerMetadata;
|
4199
|
+
stepLogProbs = chunk.logprobs;
|
4200
|
+
const msToFinish = now2() - startTimestampMs;
|
4201
|
+
doStreamSpan.addEvent("ai.stream.finish");
|
4202
|
+
doStreamSpan.setAttributes({
|
4203
|
+
"ai.response.msToFinish": msToFinish,
|
4204
|
+
"ai.response.avgCompletionTokensPerSecond": 1e3 * stepUsage.completionTokens / msToFinish
|
4152
4205
|
});
|
4206
|
+
break;
|
4153
4207
|
}
|
4154
|
-
|
4155
|
-
|
4156
|
-
|
4157
|
-
|
4158
|
-
|
4159
|
-
case "tool-call": {
|
4160
|
-
controller.enqueue(chunk);
|
4161
|
-
stepToolCalls.push(chunk);
|
4162
|
-
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4163
|
-
break;
|
4164
|
-
}
|
4165
|
-
case "tool-result": {
|
4166
|
-
controller.enqueue(chunk);
|
4167
|
-
stepToolResults.push(chunk);
|
4168
|
-
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4169
|
-
break;
|
4170
|
-
}
|
4171
|
-
case "response-metadata": {
|
4172
|
-
stepResponse = {
|
4173
|
-
id: (_a11 = chunk.id) != null ? _a11 : stepResponse.id,
|
4174
|
-
timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
|
4175
|
-
modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
|
4176
|
-
};
|
4177
|
-
break;
|
4178
|
-
}
|
4179
|
-
case "finish": {
|
4180
|
-
stepUsage = chunk.usage;
|
4181
|
-
stepFinishReason = chunk.finishReason;
|
4182
|
-
stepProviderMetadata = chunk.experimental_providerMetadata;
|
4183
|
-
stepLogProbs = chunk.logprobs;
|
4184
|
-
const msToFinish = now2() - startTimestamp;
|
4185
|
-
doStreamSpan2.addEvent("ai.stream.finish");
|
4186
|
-
doStreamSpan2.setAttributes({
|
4187
|
-
"ai.response.msToFinish": msToFinish,
|
4188
|
-
"ai.response.avgCompletionTokensPerSecond": 1e3 * stepUsage.completionTokens / msToFinish
|
4189
|
-
});
|
4190
|
-
break;
|
4191
|
-
}
|
4192
|
-
case "tool-call-streaming-start":
|
4193
|
-
case "tool-call-delta": {
|
4194
|
-
controller.enqueue(chunk);
|
4195
|
-
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4196
|
-
break;
|
4197
|
-
}
|
4198
|
-
case "error": {
|
4199
|
-
controller.enqueue(chunk);
|
4200
|
-
stepFinishReason = "error";
|
4201
|
-
break;
|
4202
|
-
}
|
4203
|
-
default: {
|
4204
|
-
const exhaustiveCheck = chunkType;
|
4205
|
-
throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
|
4206
|
-
}
|
4207
|
-
}
|
4208
|
-
},
|
4209
|
-
// invoke onFinish callback and resolve toolResults promise when the stream is about to close:
|
4210
|
-
async flush(controller) {
|
4211
|
-
var _a11, _b;
|
4212
|
-
const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
|
4213
|
-
let nextStepType = "done";
|
4214
|
-
if (currentStep + 1 < maxSteps) {
|
4215
|
-
if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
|
4216
|
-
stepToolCalls.length === 0) {
|
4217
|
-
nextStepType = "continue";
|
4218
|
-
} else if (
|
4219
|
-
// there are tool calls:
|
4220
|
-
stepToolCalls.length > 0 && // all current tool calls have results:
|
4221
|
-
stepToolResults.length === stepToolCalls.length
|
4222
|
-
) {
|
4223
|
-
nextStepType = "tool-result";
|
4224
|
-
}
|
4225
|
-
}
|
4226
|
-
if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
|
4227
|
-
stepType === "continue" && !chunkTextPublished)) {
|
4228
|
-
await publishTextChunk({
|
4229
|
-
controller,
|
4230
|
-
chunk: {
|
4231
|
-
type: "text-delta",
|
4232
|
-
textDelta: chunkBuffer
|
4233
|
-
}
|
4234
|
-
});
|
4235
|
-
chunkBuffer = "";
|
4236
|
-
}
|
4237
|
-
try {
|
4238
|
-
doStreamSpan2.setAttributes(
|
4239
|
-
selectTelemetryAttributes({
|
4240
|
-
telemetry,
|
4241
|
-
attributes: {
|
4242
|
-
"ai.response.finishReason": stepFinishReason,
|
4243
|
-
"ai.response.text": { output: () => stepText },
|
4244
|
-
"ai.response.toolCalls": {
|
4245
|
-
output: () => stepToolCallsJson
|
4246
|
-
},
|
4247
|
-
"ai.response.id": stepResponse.id,
|
4248
|
-
"ai.response.model": stepResponse.modelId,
|
4249
|
-
"ai.response.timestamp": stepResponse.timestamp.toISOString(),
|
4250
|
-
"ai.usage.promptTokens": stepUsage.promptTokens,
|
4251
|
-
"ai.usage.completionTokens": stepUsage.completionTokens,
|
4252
|
-
// standardized gen-ai llm span attributes:
|
4253
|
-
"gen_ai.response.finish_reasons": [stepFinishReason],
|
4254
|
-
"gen_ai.response.id": stepResponse.id,
|
4255
|
-
"gen_ai.response.model": stepResponse.modelId,
|
4256
|
-
"gen_ai.usage.input_tokens": stepUsage.promptTokens,
|
4257
|
-
"gen_ai.usage.output_tokens": stepUsage.completionTokens
|
4208
|
+
case "tool-call-streaming-start":
|
4209
|
+
case "tool-call-delta": {
|
4210
|
+
controller.enqueue(chunk);
|
4211
|
+
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4212
|
+
break;
|
4258
4213
|
}
|
4259
|
-
|
4260
|
-
|
4261
|
-
|
4262
|
-
|
4263
|
-
|
4264
|
-
|
4265
|
-
|
4266
|
-
|
4267
|
-
|
4268
|
-
|
4269
|
-
experimental_providerMetadata: stepProviderMetadata,
|
4270
|
-
logprobs: stepLogProbs,
|
4271
|
-
response: {
|
4272
|
-
...stepResponse
|
4214
|
+
case "error": {
|
4215
|
+
controller.enqueue(chunk);
|
4216
|
+
stepFinishReason = "error";
|
4217
|
+
break;
|
4218
|
+
}
|
4219
|
+
default: {
|
4220
|
+
const exhaustiveCheck = chunkType;
|
4221
|
+
throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
|
4222
|
+
}
|
4223
|
+
}
|
4273
4224
|
},
|
4274
|
-
|
4275
|
-
|
4276
|
-
|
4277
|
-
|
4278
|
-
|
4279
|
-
|
4280
|
-
|
4281
|
-
|
4282
|
-
|
4283
|
-
|
4225
|
+
// invoke onFinish callback and resolve toolResults promise when the stream is about to close:
|
4226
|
+
async flush(controller) {
|
4227
|
+
const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
|
4228
|
+
let nextStepType = "done";
|
4229
|
+
if (currentStep + 1 < maxSteps) {
|
4230
|
+
if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
|
4231
|
+
stepToolCalls.length === 0) {
|
4232
|
+
nextStepType = "continue";
|
4233
|
+
} else if (
|
4234
|
+
// there are tool calls:
|
4235
|
+
stepToolCalls.length > 0 && // all current tool calls have results:
|
4236
|
+
stepToolResults.length === stepToolCalls.length
|
4237
|
+
) {
|
4238
|
+
nextStepType = "tool-result";
|
4239
|
+
}
|
4240
|
+
}
|
4241
|
+
if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
|
4242
|
+
stepType === "continue" && !chunkTextPublished)) {
|
4243
|
+
await publishTextChunk({
|
4244
|
+
controller,
|
4245
|
+
chunk: {
|
4246
|
+
type: "text-delta",
|
4247
|
+
textDelta: chunkBuffer
|
4248
|
+
}
|
4249
|
+
});
|
4250
|
+
chunkBuffer = "";
|
4251
|
+
}
|
4252
|
+
try {
|
4253
|
+
doStreamSpan.setAttributes(
|
4254
|
+
selectTelemetryAttributes({
|
4255
|
+
telemetry,
|
4256
|
+
attributes: {
|
4257
|
+
"ai.response.finishReason": stepFinishReason,
|
4258
|
+
"ai.response.text": { output: () => stepText },
|
4259
|
+
"ai.response.toolCalls": {
|
4260
|
+
output: () => stepToolCallsJson
|
4261
|
+
},
|
4262
|
+
"ai.response.id": stepResponse.id,
|
4263
|
+
"ai.response.model": stepResponse.modelId,
|
4264
|
+
"ai.response.timestamp": stepResponse.timestamp.toISOString(),
|
4265
|
+
"ai.usage.promptTokens": stepUsage.promptTokens,
|
4266
|
+
"ai.usage.completionTokens": stepUsage.completionTokens,
|
4267
|
+
// standardized gen-ai llm span attributes:
|
4268
|
+
"gen_ai.response.finish_reasons": [stepFinishReason],
|
4269
|
+
"gen_ai.response.id": stepResponse.id,
|
4270
|
+
"gen_ai.response.model": stepResponse.modelId,
|
4271
|
+
"gen_ai.usage.input_tokens": stepUsage.promptTokens,
|
4272
|
+
"gen_ai.usage.output_tokens": stepUsage.completionTokens
|
4273
|
+
}
|
4274
|
+
})
|
4275
|
+
);
|
4276
|
+
} catch (error) {
|
4277
|
+
} finally {
|
4278
|
+
doStreamSpan.end();
|
4279
|
+
}
|
4280
|
+
controller.enqueue({
|
4281
|
+
type: "step-finish",
|
4282
|
+
finishReason: stepFinishReason,
|
4283
|
+
usage: stepUsage,
|
4284
|
+
experimental_providerMetadata: stepProviderMetadata,
|
4285
|
+
logprobs: stepLogProbs,
|
4286
|
+
response: {
|
4287
|
+
...stepResponse
|
4288
|
+
},
|
4289
|
+
isContinued: nextStepType === "continue"
|
4284
4290
|
});
|
4285
|
-
|
4286
|
-
|
4287
|
-
|
4288
|
-
|
4291
|
+
if (stepType === "continue") {
|
4292
|
+
const lastMessage = responseMessages[responseMessages.length - 1];
|
4293
|
+
if (typeof lastMessage.content === "string") {
|
4294
|
+
lastMessage.content += stepText;
|
4295
|
+
} else {
|
4296
|
+
lastMessage.content.push({
|
4297
|
+
text: stepText,
|
4298
|
+
type: "text"
|
4299
|
+
});
|
4300
|
+
}
|
4301
|
+
} else {
|
4302
|
+
responseMessages.push(
|
4303
|
+
...toResponseMessages({
|
4304
|
+
text: stepText,
|
4305
|
+
tools: tools != null ? tools : {},
|
4306
|
+
toolCalls: stepToolCalls,
|
4307
|
+
toolResults: stepToolResults
|
4308
|
+
})
|
4309
|
+
);
|
4310
|
+
}
|
4311
|
+
const currentStepResult = {
|
4312
|
+
stepType,
|
4289
4313
|
text: stepText,
|
4290
|
-
tools: tools != null ? tools : {},
|
4291
4314
|
toolCalls: stepToolCalls,
|
4292
|
-
toolResults: stepToolResults
|
4293
|
-
|
4294
|
-
|
4295
|
-
|
4296
|
-
|
4297
|
-
|
4298
|
-
|
4299
|
-
|
4300
|
-
|
4301
|
-
|
4302
|
-
|
4303
|
-
|
4304
|
-
|
4305
|
-
|
4306
|
-
|
4307
|
-
|
4308
|
-
|
4309
|
-
|
4310
|
-
|
4311
|
-
|
4312
|
-
|
4313
|
-
|
4314
|
-
|
4315
|
-
|
4316
|
-
|
4317
|
-
|
4318
|
-
|
4319
|
-
|
4320
|
-
|
4321
|
-
|
4322
|
-
|
4323
|
-
|
4324
|
-
result,
|
4325
|
-
doStreamSpan: doStreamSpan3,
|
4326
|
-
startTimestampMs: startTimestamp2
|
4327
|
-
} = await startStep({ responseMessages });
|
4328
|
-
self.rawWarnings = result.warnings;
|
4329
|
-
self.rawResponse = result.rawResponse;
|
4330
|
-
addStepStream({
|
4331
|
-
stream: result.stream,
|
4332
|
-
startTimestamp: startTimestamp2,
|
4333
|
-
doStreamSpan: doStreamSpan3,
|
4334
|
-
currentStep: currentStep + 1,
|
4335
|
-
responseMessages,
|
4336
|
-
usage: combinedUsage,
|
4337
|
-
stepType: nextStepType,
|
4338
|
-
previousStepText: fullStepText,
|
4339
|
-
stepRequest: result.request,
|
4340
|
-
hasLeadingWhitespace: hasWhitespaceSuffix
|
4341
|
-
});
|
4342
|
-
return;
|
4343
|
-
}
|
4344
|
-
try {
|
4345
|
-
controller.enqueue({
|
4346
|
-
type: "finish",
|
4347
|
-
finishReason: stepFinishReason,
|
4348
|
-
usage: combinedUsage,
|
4349
|
-
experimental_providerMetadata: stepProviderMetadata,
|
4350
|
-
logprobs: stepLogProbs,
|
4351
|
-
response: {
|
4352
|
-
...stepResponse
|
4315
|
+
toolResults: stepToolResults,
|
4316
|
+
finishReason: stepFinishReason,
|
4317
|
+
usage: stepUsage,
|
4318
|
+
warnings,
|
4319
|
+
logprobs: stepLogProbs,
|
4320
|
+
request: stepRequest,
|
4321
|
+
response: {
|
4322
|
+
...stepResponse,
|
4323
|
+
headers: rawResponse == null ? void 0 : rawResponse.headers,
|
4324
|
+
// deep clone msgs to avoid mutating past messages in multi-step:
|
4325
|
+
messages: JSON.parse(JSON.stringify(responseMessages))
|
4326
|
+
},
|
4327
|
+
experimental_providerMetadata: stepProviderMetadata,
|
4328
|
+
isContinued: nextStepType === "continue"
|
4329
|
+
};
|
4330
|
+
stepResults.push(currentStepResult);
|
4331
|
+
await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
|
4332
|
+
const combinedUsage = {
|
4333
|
+
promptTokens: usage.promptTokens + stepUsage.promptTokens,
|
4334
|
+
completionTokens: usage.completionTokens + stepUsage.completionTokens,
|
4335
|
+
totalTokens: usage.totalTokens + stepUsage.totalTokens
|
4336
|
+
};
|
4337
|
+
if (nextStepType !== "done") {
|
4338
|
+
await streamStep({
|
4339
|
+
currentStep: currentStep + 1,
|
4340
|
+
responseMessages,
|
4341
|
+
usage: combinedUsage,
|
4342
|
+
stepType: nextStepType,
|
4343
|
+
previousStepText: fullStepText,
|
4344
|
+
hasLeadingWhitespace: hasWhitespaceSuffix
|
4345
|
+
});
|
4346
|
+
return;
|
4353
4347
|
}
|
4354
|
-
|
4355
|
-
|
4356
|
-
|
4357
|
-
|
4358
|
-
|
4359
|
-
|
4360
|
-
|
4361
|
-
|
4362
|
-
|
4363
|
-
|
4348
|
+
try {
|
4349
|
+
controller.enqueue({
|
4350
|
+
type: "finish",
|
4351
|
+
finishReason: stepFinishReason,
|
4352
|
+
usage: combinedUsage,
|
4353
|
+
experimental_providerMetadata: stepProviderMetadata,
|
4354
|
+
logprobs: stepLogProbs,
|
4355
|
+
response: {
|
4356
|
+
...stepResponse
|
4357
|
+
}
|
4358
|
+
});
|
4359
|
+
self.stitchableStream.close();
|
4360
|
+
rootSpan.setAttributes(
|
4361
|
+
selectTelemetryAttributes({
|
4362
|
+
telemetry,
|
4363
|
+
attributes: {
|
4364
|
+
"ai.response.finishReason": stepFinishReason,
|
4365
|
+
"ai.response.text": { output: () => fullStepText },
|
4366
|
+
"ai.response.toolCalls": {
|
4367
|
+
output: () => stepToolCallsJson
|
4368
|
+
},
|
4369
|
+
"ai.usage.promptTokens": combinedUsage.promptTokens,
|
4370
|
+
"ai.usage.completionTokens": combinedUsage.completionTokens
|
4371
|
+
}
|
4372
|
+
})
|
4373
|
+
);
|
4374
|
+
self.usagePromise.resolve(combinedUsage);
|
4375
|
+
self.finishReasonPromise.resolve(stepFinishReason);
|
4376
|
+
self.textPromise.resolve(fullStepText);
|
4377
|
+
self.toolCallsPromise.resolve(stepToolCalls);
|
4378
|
+
self.providerMetadataPromise.resolve(stepProviderMetadata);
|
4379
|
+
self.toolResultsPromise.resolve(stepToolResults);
|
4380
|
+
self.requestPromise.resolve(stepRequest);
|
4381
|
+
self.responsePromise.resolve({
|
4382
|
+
...stepResponse,
|
4383
|
+
headers: rawResponse == null ? void 0 : rawResponse.headers,
|
4384
|
+
messages: responseMessages
|
4385
|
+
});
|
4386
|
+
self.stepsPromise.resolve(stepResults);
|
4387
|
+
self.warningsPromise.resolve(warnings != null ? warnings : []);
|
4388
|
+
await (onFinish == null ? void 0 : onFinish({
|
4389
|
+
finishReason: stepFinishReason,
|
4390
|
+
logprobs: stepLogProbs,
|
4391
|
+
usage: combinedUsage,
|
4392
|
+
text: fullStepText,
|
4393
|
+
toolCalls: stepToolCalls,
|
4394
|
+
// The tool results are inferred as a never[] type, because they are
|
4395
|
+
// optional and the execute method with an inferred result type is
|
4396
|
+
// optional as well. Therefore we need to cast the toolResults to any.
|
4397
|
+
// The type exposed to the users will be correctly inferred.
|
4398
|
+
toolResults: stepToolResults,
|
4399
|
+
request: stepRequest,
|
4400
|
+
response: {
|
4401
|
+
...stepResponse,
|
4402
|
+
headers: rawResponse == null ? void 0 : rawResponse.headers,
|
4403
|
+
messages: responseMessages
|
4364
4404
|
},
|
4365
|
-
|
4366
|
-
|
4367
|
-
|
4368
|
-
|
4369
|
-
|
4370
|
-
|
4371
|
-
|
4372
|
-
|
4373
|
-
|
4374
|
-
|
4375
|
-
|
4376
|
-
|
4377
|
-
|
4378
|
-
|
4379
|
-
|
4380
|
-
|
4381
|
-
|
4382
|
-
|
4383
|
-
|
4384
|
-
|
4385
|
-
|
4386
|
-
|
4387
|
-
|
4388
|
-
|
4389
|
-
|
4390
|
-
|
4391
|
-
|
4392
|
-
|
4393
|
-
|
4394
|
-
|
4395
|
-
|
4396
|
-
|
4397
|
-
|
4398
|
-
|
4399
|
-
messages: responseMessages
|
4400
|
-
},
|
4401
|
-
warnings,
|
4402
|
-
experimental_providerMetadata: stepProviderMetadata,
|
4403
|
-
steps: stepResults,
|
4404
|
-
responseMessages
|
4405
|
-
}));
|
4406
|
-
} catch (error) {
|
4407
|
-
controller.error(error);
|
4408
|
-
} finally {
|
4409
|
-
rootSpan.end();
|
4410
|
-
}
|
4411
|
-
}
|
4412
|
-
})
|
4413
|
-
)
|
4405
|
+
warnings,
|
4406
|
+
experimental_providerMetadata: stepProviderMetadata,
|
4407
|
+
steps: stepResults
|
4408
|
+
}));
|
4409
|
+
} catch (error) {
|
4410
|
+
controller.error(error);
|
4411
|
+
} finally {
|
4412
|
+
rootSpan.end();
|
4413
|
+
}
|
4414
|
+
}
|
4415
|
+
})
|
4416
|
+
)
|
4417
|
+
);
|
4418
|
+
}
|
4419
|
+
await streamStep({
|
4420
|
+
currentStep: 0,
|
4421
|
+
responseMessages: [],
|
4422
|
+
usage: {
|
4423
|
+
promptTokens: 0,
|
4424
|
+
completionTokens: 0,
|
4425
|
+
totalTokens: 0
|
4426
|
+
},
|
4427
|
+
previousStepText: "",
|
4428
|
+
stepType: "initial",
|
4429
|
+
hasLeadingWhitespace: false
|
4430
|
+
});
|
4431
|
+
}
|
4432
|
+
}).catch((error) => {
|
4433
|
+
self.stitchableStream.addStream(
|
4434
|
+
new ReadableStream({
|
4435
|
+
start(controller) {
|
4436
|
+
controller.error(error);
|
4437
|
+
}
|
4438
|
+
})
|
4414
4439
|
);
|
4415
|
-
|
4416
|
-
addStepStream({
|
4417
|
-
stream,
|
4418
|
-
startTimestamp: startTimestampMs,
|
4419
|
-
doStreamSpan,
|
4420
|
-
currentStep: 0,
|
4421
|
-
responseMessages: [],
|
4422
|
-
usage: void 0,
|
4423
|
-
stepType: "initial",
|
4424
|
-
stepRequest: request,
|
4425
|
-
hasLeadingWhitespace: false
|
4440
|
+
self.stitchableStream.close();
|
4426
4441
|
});
|
4427
4442
|
}
|
4443
|
+
get warnings() {
|
4444
|
+
return this.warningsPromise.value;
|
4445
|
+
}
|
4446
|
+
get usage() {
|
4447
|
+
return this.usagePromise.value;
|
4448
|
+
}
|
4449
|
+
get finishReason() {
|
4450
|
+
return this.finishReasonPromise.value;
|
4451
|
+
}
|
4452
|
+
get experimental_providerMetadata() {
|
4453
|
+
return this.providerMetadataPromise.value;
|
4454
|
+
}
|
4455
|
+
get text() {
|
4456
|
+
return this.textPromise.value;
|
4457
|
+
}
|
4458
|
+
get toolCalls() {
|
4459
|
+
return this.toolCallsPromise.value;
|
4460
|
+
}
|
4461
|
+
get toolResults() {
|
4462
|
+
return this.toolResultsPromise.value;
|
4463
|
+
}
|
4464
|
+
get request() {
|
4465
|
+
return this.requestPromise.value;
|
4466
|
+
}
|
4467
|
+
get response() {
|
4468
|
+
return this.responsePromise.value;
|
4469
|
+
}
|
4470
|
+
get steps() {
|
4471
|
+
return this.stepsPromise.value;
|
4472
|
+
}
|
4428
4473
|
/**
|
4429
4474
|
Split out a new stream from the original stream.
|
4430
4475
|
The original stream is replaced to allow for further splitting,
|
@@ -4434,8 +4479,8 @@ var DefaultStreamTextResult = class {
|
|
4434
4479
|
However, the LLM results are expected to be small enough to not cause issues.
|
4435
4480
|
*/
|
4436
4481
|
teeStream() {
|
4437
|
-
const [stream1, stream2] = this.
|
4438
|
-
this.
|
4482
|
+
const [stream1, stream2] = this.stitchableStream.stream.tee();
|
4483
|
+
this.stitchableStream.stream = stream2;
|
4439
4484
|
return stream1;
|
4440
4485
|
}
|
4441
4486
|
get textStream() {
|
@@ -4475,12 +4520,12 @@ var DefaultStreamTextResult = class {
|
|
4475
4520
|
const chunkType = chunk.type;
|
4476
4521
|
switch (chunkType) {
|
4477
4522
|
case "text-delta": {
|
4478
|
-
controller.enqueue(
|
4523
|
+
controller.enqueue(formatDataStreamPart("text", chunk.textDelta));
|
4479
4524
|
break;
|
4480
4525
|
}
|
4481
4526
|
case "tool-call-streaming-start": {
|
4482
4527
|
controller.enqueue(
|
4483
|
-
|
4528
|
+
formatDataStreamPart("tool_call_streaming_start", {
|
4484
4529
|
toolCallId: chunk.toolCallId,
|
4485
4530
|
toolName: chunk.toolName
|
4486
4531
|
})
|
@@ -4489,7 +4534,7 @@ var DefaultStreamTextResult = class {
|
|
4489
4534
|
}
|
4490
4535
|
case "tool-call-delta": {
|
4491
4536
|
controller.enqueue(
|
4492
|
-
|
4537
|
+
formatDataStreamPart("tool_call_delta", {
|
4493
4538
|
toolCallId: chunk.toolCallId,
|
4494
4539
|
argsTextDelta: chunk.argsTextDelta
|
4495
4540
|
})
|
@@ -4498,7 +4543,7 @@ var DefaultStreamTextResult = class {
|
|
4498
4543
|
}
|
4499
4544
|
case "tool-call": {
|
4500
4545
|
controller.enqueue(
|
4501
|
-
|
4546
|
+
formatDataStreamPart("tool_call", {
|
4502
4547
|
toolCallId: chunk.toolCallId,
|
4503
4548
|
toolName: chunk.toolName,
|
4504
4549
|
args: chunk.args
|
@@ -4508,7 +4553,7 @@ var DefaultStreamTextResult = class {
|
|
4508
4553
|
}
|
4509
4554
|
case "tool-result": {
|
4510
4555
|
controller.enqueue(
|
4511
|
-
|
4556
|
+
formatDataStreamPart("tool_result", {
|
4512
4557
|
toolCallId: chunk.toolCallId,
|
4513
4558
|
result: chunk.result
|
4514
4559
|
})
|
@@ -4517,13 +4562,13 @@ var DefaultStreamTextResult = class {
|
|
4517
4562
|
}
|
4518
4563
|
case "error": {
|
4519
4564
|
controller.enqueue(
|
4520
|
-
|
4565
|
+
formatDataStreamPart("error", getErrorMessage3(chunk.error))
|
4521
4566
|
);
|
4522
4567
|
break;
|
4523
4568
|
}
|
4524
4569
|
case "step-finish": {
|
4525
4570
|
controller.enqueue(
|
4526
|
-
|
4571
|
+
formatDataStreamPart("finish_step", {
|
4527
4572
|
finishReason: chunk.finishReason,
|
4528
4573
|
usage: sendUsage ? {
|
4529
4574
|
promptTokens: chunk.usage.promptTokens,
|
@@ -4536,7 +4581,7 @@ var DefaultStreamTextResult = class {
|
|
4536
4581
|
}
|
4537
4582
|
case "finish": {
|
4538
4583
|
controller.enqueue(
|
4539
|
-
|
4584
|
+
formatDataStreamPart("finish_message", {
|
4540
4585
|
finishReason: chunk.finishReason,
|
4541
4586
|
usage: sendUsage ? {
|
4542
4587
|
promptTokens: chunk.usage.promptTokens,
|
@@ -4806,7 +4851,7 @@ function magnitude(vector) {
|
|
4806
4851
|
|
4807
4852
|
// streams/assistant-response.ts
|
4808
4853
|
import {
|
4809
|
-
|
4854
|
+
formatAssistantStreamPart
|
4810
4855
|
} from "@ai-sdk/ui-utils";
|
4811
4856
|
function AssistantResponse({ threadId, messageId }, process2) {
|
4812
4857
|
const stream = new ReadableStream({
|
@@ -4815,17 +4860,21 @@ function AssistantResponse({ threadId, messageId }, process2) {
|
|
4815
4860
|
const textEncoder = new TextEncoder();
|
4816
4861
|
const sendMessage = (message) => {
|
4817
4862
|
controller.enqueue(
|
4818
|
-
textEncoder.encode(
|
4863
|
+
textEncoder.encode(
|
4864
|
+
formatAssistantStreamPart("assistant_message", message)
|
4865
|
+
)
|
4819
4866
|
);
|
4820
4867
|
};
|
4821
4868
|
const sendDataMessage = (message) => {
|
4822
4869
|
controller.enqueue(
|
4823
|
-
textEncoder.encode(
|
4870
|
+
textEncoder.encode(
|
4871
|
+
formatAssistantStreamPart("data_message", message)
|
4872
|
+
)
|
4824
4873
|
);
|
4825
4874
|
};
|
4826
4875
|
const sendError = (errorMessage) => {
|
4827
4876
|
controller.enqueue(
|
4828
|
-
textEncoder.encode(
|
4877
|
+
textEncoder.encode(formatAssistantStreamPart("error", errorMessage))
|
4829
4878
|
);
|
4830
4879
|
};
|
4831
4880
|
const forwardStream = async (stream2) => {
|
@@ -4836,7 +4885,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
|
|
4836
4885
|
case "thread.message.created": {
|
4837
4886
|
controller.enqueue(
|
4838
4887
|
textEncoder.encode(
|
4839
|
-
|
4888
|
+
formatAssistantStreamPart("assistant_message", {
|
4840
4889
|
id: value.data.id,
|
4841
4890
|
role: "assistant",
|
4842
4891
|
content: [{ type: "text", text: { value: "" } }]
|
@@ -4850,7 +4899,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
|
|
4850
4899
|
if ((content == null ? void 0 : content.type) === "text" && ((_b = content.text) == null ? void 0 : _b.value) != null) {
|
4851
4900
|
controller.enqueue(
|
4852
4901
|
textEncoder.encode(
|
4853
|
-
|
4902
|
+
formatAssistantStreamPart("text", content.text.value)
|
4854
4903
|
)
|
4855
4904
|
);
|
4856
4905
|
}
|
@@ -4867,7 +4916,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
|
|
4867
4916
|
};
|
4868
4917
|
controller.enqueue(
|
4869
4918
|
textEncoder.encode(
|
4870
|
-
|
4919
|
+
formatAssistantStreamPart("assistant_control_data", {
|
4871
4920
|
threadId,
|
4872
4921
|
messageId
|
4873
4922
|
})
|
@@ -4932,7 +4981,7 @@ function createCallbacksTransformer(callbacks = {}) {
|
|
4932
4981
|
}
|
4933
4982
|
|
4934
4983
|
// streams/stream-data.ts
|
4935
|
-
import {
|
4984
|
+
import { formatDataStreamPart as formatDataStreamPart2 } from "@ai-sdk/ui-utils";
|
4936
4985
|
|
4937
4986
|
// util/constants.ts
|
4938
4987
|
var HANGING_STREAM_WARNING_TIME_MS = 15 * 1e3;
|
@@ -4984,7 +5033,7 @@ var StreamData = class {
|
|
4984
5033
|
throw new Error("Stream controller is not initialized.");
|
4985
5034
|
}
|
4986
5035
|
this.controller.enqueue(
|
4987
|
-
this.encoder.encode(
|
5036
|
+
this.encoder.encode(formatDataStreamPart2("data", [value]))
|
4988
5037
|
);
|
4989
5038
|
}
|
4990
5039
|
appendMessageAnnotation(value) {
|
@@ -4995,7 +5044,7 @@ var StreamData = class {
|
|
4995
5044
|
throw new Error("Stream controller is not initialized.");
|
4996
5045
|
}
|
4997
5046
|
this.controller.enqueue(
|
4998
|
-
this.encoder.encode(
|
5047
|
+
this.encoder.encode(formatDataStreamPart2("message_annotations", [value]))
|
4999
5048
|
);
|
5000
5049
|
}
|
5001
5050
|
};
|
@@ -5005,7 +5054,7 @@ function createStreamDataTransformer() {
|
|
5005
5054
|
return new TransformStream({
|
5006
5055
|
transform: async (chunk, controller) => {
|
5007
5056
|
const message = decoder.decode(chunk);
|
5008
|
-
controller.enqueue(encoder.encode(
|
5057
|
+
controller.enqueue(encoder.encode(formatDataStreamPart2("text", message)));
|
5009
5058
|
}
|
5010
5059
|
});
|
5011
5060
|
}
|
@@ -5138,14 +5187,16 @@ export {
|
|
5138
5187
|
experimental_createProviderRegistry,
|
5139
5188
|
experimental_customProvider,
|
5140
5189
|
experimental_wrapLanguageModel,
|
5141
|
-
|
5190
|
+
formatAssistantStreamPart2 as formatAssistantStreamPart,
|
5191
|
+
formatDataStreamPart3 as formatDataStreamPart,
|
5142
5192
|
generateId2 as generateId,
|
5143
5193
|
generateObject,
|
5144
5194
|
generateText,
|
5145
5195
|
jsonSchema,
|
5146
|
-
|
5147
|
-
|
5148
|
-
|
5196
|
+
parseAssistantStreamPart,
|
5197
|
+
parseDataStreamPart,
|
5198
|
+
processDataStream,
|
5199
|
+
processTextStream,
|
5149
5200
|
streamObject,
|
5150
5201
|
streamText,
|
5151
5202
|
tool
|