ai 4.0.0-canary.10 → 4.0.0-canary.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -0
- package/README.md +10 -14
- package/dist/index.d.mts +6 -6
- package/dist/index.d.ts +6 -6
- package/dist/index.js +1073 -1036
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1076 -1039
- package/dist/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.mjs
CHANGED
@@ -6,10 +6,12 @@ var __export = (target, all) => {
|
|
6
6
|
|
7
7
|
// streams/index.ts
|
8
8
|
import {
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
9
|
+
formatAssistantStreamPart as formatAssistantStreamPart2,
|
10
|
+
formatDataStreamPart as formatDataStreamPart3,
|
11
|
+
parseAssistantStreamPart,
|
12
|
+
parseDataStreamPart,
|
13
|
+
processDataStream,
|
14
|
+
processTextStream
|
13
15
|
} from "@ai-sdk/ui-utils";
|
14
16
|
import { generateId as generateId2 } from "@ai-sdk/provider-utils";
|
15
17
|
|
@@ -2293,21 +2295,6 @@ import {
|
|
2293
2295
|
parsePartialJson
|
2294
2296
|
} from "@ai-sdk/ui-utils";
|
2295
2297
|
|
2296
|
-
// util/create-resolvable-promise.ts
|
2297
|
-
function createResolvablePromise() {
|
2298
|
-
let resolve;
|
2299
|
-
let reject;
|
2300
|
-
const promise = new Promise((res, rej) => {
|
2301
|
-
resolve = res;
|
2302
|
-
reject = rej;
|
2303
|
-
});
|
2304
|
-
return {
|
2305
|
-
promise,
|
2306
|
-
resolve,
|
2307
|
-
reject
|
2308
|
-
};
|
2309
|
-
}
|
2310
|
-
|
2311
2298
|
// util/delayed-promise.ts
|
2312
2299
|
var DelayedPromise = class {
|
2313
2300
|
constructor() {
|
@@ -2399,9 +2386,91 @@ function writeToServerResponse({
|
|
2399
2386
|
read();
|
2400
2387
|
}
|
2401
2388
|
|
2389
|
+
// util/create-resolvable-promise.ts
|
2390
|
+
function createResolvablePromise() {
|
2391
|
+
let resolve;
|
2392
|
+
let reject;
|
2393
|
+
const promise = new Promise((res, rej) => {
|
2394
|
+
resolve = res;
|
2395
|
+
reject = rej;
|
2396
|
+
});
|
2397
|
+
return {
|
2398
|
+
promise,
|
2399
|
+
resolve,
|
2400
|
+
reject
|
2401
|
+
};
|
2402
|
+
}
|
2403
|
+
|
2404
|
+
// core/util/create-stitchable-stream.ts
|
2405
|
+
function createStitchableStream() {
|
2406
|
+
let innerStreamReaders = [];
|
2407
|
+
let controller = null;
|
2408
|
+
let isClosed = false;
|
2409
|
+
let waitForNewStream = createResolvablePromise();
|
2410
|
+
const processPull = async () => {
|
2411
|
+
if (isClosed && innerStreamReaders.length === 0) {
|
2412
|
+
controller == null ? void 0 : controller.close();
|
2413
|
+
return;
|
2414
|
+
}
|
2415
|
+
if (innerStreamReaders.length === 0) {
|
2416
|
+
waitForNewStream = createResolvablePromise();
|
2417
|
+
await waitForNewStream.promise;
|
2418
|
+
return processPull();
|
2419
|
+
}
|
2420
|
+
try {
|
2421
|
+
const { value, done } = await innerStreamReaders[0].read();
|
2422
|
+
if (done) {
|
2423
|
+
innerStreamReaders.shift();
|
2424
|
+
if (innerStreamReaders.length > 0) {
|
2425
|
+
await processPull();
|
2426
|
+
} else if (isClosed) {
|
2427
|
+
controller == null ? void 0 : controller.close();
|
2428
|
+
}
|
2429
|
+
} else {
|
2430
|
+
controller == null ? void 0 : controller.enqueue(value);
|
2431
|
+
}
|
2432
|
+
} catch (error) {
|
2433
|
+
controller == null ? void 0 : controller.error(error);
|
2434
|
+
innerStreamReaders.shift();
|
2435
|
+
if (isClosed && innerStreamReaders.length === 0) {
|
2436
|
+
controller == null ? void 0 : controller.close();
|
2437
|
+
}
|
2438
|
+
}
|
2439
|
+
};
|
2440
|
+
return {
|
2441
|
+
stream: new ReadableStream({
|
2442
|
+
start(controllerParam) {
|
2443
|
+
controller = controllerParam;
|
2444
|
+
},
|
2445
|
+
pull: processPull,
|
2446
|
+
async cancel() {
|
2447
|
+
for (const reader of innerStreamReaders) {
|
2448
|
+
await reader.cancel();
|
2449
|
+
}
|
2450
|
+
innerStreamReaders = [];
|
2451
|
+
isClosed = true;
|
2452
|
+
}
|
2453
|
+
}),
|
2454
|
+
addStream: (innerStream) => {
|
2455
|
+
if (isClosed) {
|
2456
|
+
throw new Error("Cannot add inner stream: outer stream is closed");
|
2457
|
+
}
|
2458
|
+
innerStreamReaders.push(innerStream.getReader());
|
2459
|
+
waitForNewStream.resolve();
|
2460
|
+
},
|
2461
|
+
close: () => {
|
2462
|
+
isClosed = true;
|
2463
|
+
waitForNewStream.resolve();
|
2464
|
+
if (innerStreamReaders.length === 0) {
|
2465
|
+
controller == null ? void 0 : controller.close();
|
2466
|
+
}
|
2467
|
+
}
|
2468
|
+
};
|
2469
|
+
}
|
2470
|
+
|
2402
2471
|
// core/generate-object/stream-object.ts
|
2403
2472
|
var originalGenerateId2 = createIdGenerator2({ prefix: "aiobj", size: 24 });
|
2404
|
-
|
2473
|
+
function streamObject({
|
2405
2474
|
model,
|
2406
2475
|
schema: inputSchema,
|
2407
2476
|
schemaName,
|
@@ -2435,400 +2504,433 @@ async function streamObject({
|
|
2435
2504
|
if (outputStrategy.type === "no-schema" && mode === void 0) {
|
2436
2505
|
mode = "json";
|
2437
2506
|
}
|
2438
|
-
|
2507
|
+
return new DefaultStreamObjectResult({
|
2439
2508
|
model,
|
2440
2509
|
telemetry,
|
2441
2510
|
headers,
|
2442
|
-
settings
|
2443
|
-
|
2444
|
-
|
2445
|
-
|
2446
|
-
|
2447
|
-
|
2448
|
-
|
2449
|
-
|
2450
|
-
|
2451
|
-
|
2452
|
-
|
2453
|
-
|
2454
|
-
|
2455
|
-
|
2456
|
-
|
2457
|
-
"ai.prompt": {
|
2458
|
-
input: () => JSON.stringify({ system, prompt, messages })
|
2459
|
-
},
|
2460
|
-
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
2461
|
-
"ai.schema.name": schemaName,
|
2462
|
-
"ai.schema.description": schemaDescription,
|
2463
|
-
"ai.settings.output": outputStrategy.type,
|
2464
|
-
"ai.settings.mode": mode
|
2465
|
-
}
|
2466
|
-
}),
|
2467
|
-
tracer,
|
2468
|
-
endWhenDone: false,
|
2469
|
-
fn: async (rootSpan) => {
|
2470
|
-
if (mode === "auto" || mode == null) {
|
2471
|
-
mode = model.defaultObjectGenerationMode;
|
2472
|
-
}
|
2473
|
-
let callOptions;
|
2474
|
-
let transformer;
|
2475
|
-
switch (mode) {
|
2476
|
-
case "json": {
|
2477
|
-
const standardizedPrompt = standardizePrompt({
|
2478
|
-
prompt: {
|
2479
|
-
system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
|
2480
|
-
prompt: system,
|
2481
|
-
schema: outputStrategy.jsonSchema
|
2482
|
-
}),
|
2483
|
-
prompt,
|
2484
|
-
messages
|
2485
|
-
},
|
2486
|
-
tools: void 0
|
2487
|
-
});
|
2488
|
-
callOptions = {
|
2489
|
-
mode: {
|
2490
|
-
type: "object-json",
|
2491
|
-
schema: outputStrategy.jsonSchema,
|
2492
|
-
name: schemaName,
|
2493
|
-
description: schemaDescription
|
2494
|
-
},
|
2495
|
-
...prepareCallSettings(settings),
|
2496
|
-
inputFormat: standardizedPrompt.type,
|
2497
|
-
prompt: await convertToLanguageModelPrompt({
|
2498
|
-
prompt: standardizedPrompt,
|
2499
|
-
modelSupportsImageUrls: model.supportsImageUrls,
|
2500
|
-
modelSupportsUrl: model.supportsUrl
|
2501
|
-
}),
|
2502
|
-
providerMetadata,
|
2503
|
-
abortSignal,
|
2504
|
-
headers
|
2505
|
-
};
|
2506
|
-
transformer = {
|
2507
|
-
transform: (chunk, controller) => {
|
2508
|
-
switch (chunk.type) {
|
2509
|
-
case "text-delta":
|
2510
|
-
controller.enqueue(chunk.textDelta);
|
2511
|
-
break;
|
2512
|
-
case "response-metadata":
|
2513
|
-
case "finish":
|
2514
|
-
case "error":
|
2515
|
-
controller.enqueue(chunk);
|
2516
|
-
break;
|
2517
|
-
}
|
2518
|
-
}
|
2519
|
-
};
|
2520
|
-
break;
|
2521
|
-
}
|
2522
|
-
case "tool": {
|
2523
|
-
const standardizedPrompt = standardizePrompt({
|
2524
|
-
prompt: { system, prompt, messages },
|
2525
|
-
tools: void 0
|
2526
|
-
});
|
2527
|
-
callOptions = {
|
2528
|
-
mode: {
|
2529
|
-
type: "object-tool",
|
2530
|
-
tool: {
|
2531
|
-
type: "function",
|
2532
|
-
name: schemaName != null ? schemaName : "json",
|
2533
|
-
description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
|
2534
|
-
parameters: outputStrategy.jsonSchema
|
2535
|
-
}
|
2536
|
-
},
|
2537
|
-
...prepareCallSettings(settings),
|
2538
|
-
inputFormat: standardizedPrompt.type,
|
2539
|
-
prompt: await convertToLanguageModelPrompt({
|
2540
|
-
prompt: standardizedPrompt,
|
2541
|
-
modelSupportsImageUrls: model.supportsImageUrls,
|
2542
|
-
modelSupportsUrl: model.supportsUrl
|
2543
|
-
}),
|
2544
|
-
providerMetadata,
|
2545
|
-
abortSignal,
|
2546
|
-
headers
|
2547
|
-
};
|
2548
|
-
transformer = {
|
2549
|
-
transform(chunk, controller) {
|
2550
|
-
switch (chunk.type) {
|
2551
|
-
case "tool-call-delta":
|
2552
|
-
controller.enqueue(chunk.argsTextDelta);
|
2553
|
-
break;
|
2554
|
-
case "response-metadata":
|
2555
|
-
case "finish":
|
2556
|
-
case "error":
|
2557
|
-
controller.enqueue(chunk);
|
2558
|
-
break;
|
2559
|
-
}
|
2560
|
-
}
|
2561
|
-
};
|
2562
|
-
break;
|
2563
|
-
}
|
2564
|
-
case void 0: {
|
2565
|
-
throw new Error(
|
2566
|
-
"Model does not have a default object generation mode."
|
2567
|
-
);
|
2568
|
-
}
|
2569
|
-
default: {
|
2570
|
-
const _exhaustiveCheck = mode;
|
2571
|
-
throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
|
2572
|
-
}
|
2573
|
-
}
|
2574
|
-
const {
|
2575
|
-
result: { stream, warnings, rawResponse, request },
|
2576
|
-
doStreamSpan,
|
2577
|
-
startTimestampMs
|
2578
|
-
} = await retry(
|
2579
|
-
() => recordSpan({
|
2580
|
-
name: "ai.streamObject.doStream",
|
2581
|
-
attributes: selectTelemetryAttributes({
|
2582
|
-
telemetry,
|
2583
|
-
attributes: {
|
2584
|
-
...assembleOperationName({
|
2585
|
-
operationId: "ai.streamObject.doStream",
|
2586
|
-
telemetry
|
2587
|
-
}),
|
2588
|
-
...baseTelemetryAttributes,
|
2589
|
-
"ai.prompt.format": {
|
2590
|
-
input: () => callOptions.inputFormat
|
2591
|
-
},
|
2592
|
-
"ai.prompt.messages": {
|
2593
|
-
input: () => JSON.stringify(callOptions.prompt)
|
2594
|
-
},
|
2595
|
-
"ai.settings.mode": mode,
|
2596
|
-
// standardized gen-ai llm span attributes:
|
2597
|
-
"gen_ai.system": model.provider,
|
2598
|
-
"gen_ai.request.model": model.modelId,
|
2599
|
-
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
2600
|
-
"gen_ai.request.max_tokens": settings.maxTokens,
|
2601
|
-
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
2602
|
-
"gen_ai.request.temperature": settings.temperature,
|
2603
|
-
"gen_ai.request.top_k": settings.topK,
|
2604
|
-
"gen_ai.request.top_p": settings.topP
|
2605
|
-
}
|
2606
|
-
}),
|
2607
|
-
tracer,
|
2608
|
-
endWhenDone: false,
|
2609
|
-
fn: async (doStreamSpan2) => ({
|
2610
|
-
startTimestampMs: now2(),
|
2611
|
-
doStreamSpan: doStreamSpan2,
|
2612
|
-
result: await model.doStream(callOptions)
|
2613
|
-
})
|
2614
|
-
})
|
2615
|
-
);
|
2616
|
-
return new DefaultStreamObjectResult({
|
2617
|
-
outputStrategy,
|
2618
|
-
stream: stream.pipeThrough(new TransformStream(transformer)),
|
2619
|
-
warnings,
|
2620
|
-
rawResponse,
|
2621
|
-
request: request != null ? request : {},
|
2622
|
-
onFinish,
|
2623
|
-
rootSpan,
|
2624
|
-
doStreamSpan,
|
2625
|
-
telemetry,
|
2626
|
-
startTimestampMs,
|
2627
|
-
modelId: model.modelId,
|
2628
|
-
now: now2,
|
2629
|
-
currentDate,
|
2630
|
-
generateId: generateId3
|
2631
|
-
});
|
2632
|
-
}
|
2511
|
+
settings,
|
2512
|
+
maxRetries,
|
2513
|
+
abortSignal,
|
2514
|
+
outputStrategy,
|
2515
|
+
system,
|
2516
|
+
prompt,
|
2517
|
+
messages,
|
2518
|
+
schemaName,
|
2519
|
+
schemaDescription,
|
2520
|
+
inputProviderMetadata: providerMetadata,
|
2521
|
+
mode,
|
2522
|
+
onFinish,
|
2523
|
+
generateId: generateId3,
|
2524
|
+
currentDate,
|
2525
|
+
now: now2
|
2633
2526
|
});
|
2634
2527
|
}
|
2635
2528
|
var DefaultStreamObjectResult = class {
|
2636
2529
|
constructor({
|
2637
|
-
|
2638
|
-
|
2639
|
-
|
2640
|
-
|
2530
|
+
model,
|
2531
|
+
headers,
|
2532
|
+
telemetry,
|
2533
|
+
settings,
|
2534
|
+
maxRetries,
|
2535
|
+
abortSignal,
|
2641
2536
|
outputStrategy,
|
2537
|
+
system,
|
2538
|
+
prompt,
|
2539
|
+
messages,
|
2540
|
+
schemaName,
|
2541
|
+
schemaDescription,
|
2542
|
+
inputProviderMetadata,
|
2543
|
+
mode,
|
2642
2544
|
onFinish,
|
2643
|
-
|
2644
|
-
doStreamSpan,
|
2645
|
-
telemetry,
|
2646
|
-
startTimestampMs,
|
2647
|
-
modelId,
|
2648
|
-
now: now2,
|
2545
|
+
generateId: generateId3,
|
2649
2546
|
currentDate,
|
2650
|
-
|
2547
|
+
now: now2
|
2651
2548
|
}) {
|
2652
|
-
this.warnings = warnings;
|
2653
|
-
this.outputStrategy = outputStrategy;
|
2654
|
-
this.request = Promise.resolve(request);
|
2655
2549
|
this.objectPromise = new DelayedPromise();
|
2656
|
-
|
2657
|
-
this.
|
2658
|
-
|
2659
|
-
this.
|
2660
|
-
|
2661
|
-
|
2662
|
-
|
2663
|
-
|
2664
|
-
|
2665
|
-
|
2666
|
-
|
2667
|
-
|
2668
|
-
|
2669
|
-
|
2670
|
-
let accumulatedText = "";
|
2671
|
-
let textDelta = "";
|
2672
|
-
let response = {
|
2673
|
-
id: generateId3(),
|
2674
|
-
timestamp: currentDate(),
|
2675
|
-
modelId
|
2676
|
-
};
|
2677
|
-
let latestObjectJson = void 0;
|
2678
|
-
let latestObject = void 0;
|
2679
|
-
let isFirstChunk = true;
|
2680
|
-
let isFirstDelta = true;
|
2550
|
+
this.usagePromise = new DelayedPromise();
|
2551
|
+
this.providerMetadataPromise = new DelayedPromise();
|
2552
|
+
this.warningsPromise = new DelayedPromise();
|
2553
|
+
this.requestPromise = new DelayedPromise();
|
2554
|
+
this.responsePromise = new DelayedPromise();
|
2555
|
+
this.stitchableStream = createStitchableStream();
|
2556
|
+
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
2557
|
+
model,
|
2558
|
+
telemetry,
|
2559
|
+
headers,
|
2560
|
+
settings: { ...settings, maxRetries }
|
2561
|
+
});
|
2562
|
+
const tracer = getTracer(telemetry);
|
2563
|
+
const retry = retryWithExponentialBackoff({ maxRetries });
|
2681
2564
|
const self = this;
|
2682
|
-
|
2683
|
-
|
2684
|
-
|
2685
|
-
|
2686
|
-
|
2687
|
-
|
2688
|
-
|
2689
|
-
|
2690
|
-
|
2565
|
+
recordSpan({
|
2566
|
+
name: "ai.streamObject",
|
2567
|
+
attributes: selectTelemetryAttributes({
|
2568
|
+
telemetry,
|
2569
|
+
attributes: {
|
2570
|
+
...assembleOperationName({
|
2571
|
+
operationId: "ai.streamObject",
|
2572
|
+
telemetry
|
2573
|
+
}),
|
2574
|
+
...baseTelemetryAttributes,
|
2575
|
+
// specific settings that only make sense on the outer level:
|
2576
|
+
"ai.prompt": {
|
2577
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
2578
|
+
},
|
2579
|
+
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
2580
|
+
"ai.schema.name": schemaName,
|
2581
|
+
"ai.schema.description": schemaDescription,
|
2582
|
+
"ai.settings.output": outputStrategy.type,
|
2583
|
+
"ai.settings.mode": mode
|
2584
|
+
}
|
2585
|
+
}),
|
2586
|
+
tracer,
|
2587
|
+
endWhenDone: false,
|
2588
|
+
fn: async (rootSpan) => {
|
2589
|
+
if (mode === "auto" || mode == null) {
|
2590
|
+
mode = model.defaultObjectGenerationMode;
|
2591
|
+
}
|
2592
|
+
let callOptions;
|
2593
|
+
let transformer;
|
2594
|
+
switch (mode) {
|
2595
|
+
case "json": {
|
2596
|
+
const standardizedPrompt = standardizePrompt({
|
2597
|
+
prompt: {
|
2598
|
+
system: outputStrategy.jsonSchema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs ? system : injectJsonInstruction({
|
2599
|
+
prompt: system,
|
2600
|
+
schema: outputStrategy.jsonSchema
|
2601
|
+
}),
|
2602
|
+
prompt,
|
2603
|
+
messages
|
2604
|
+
},
|
2605
|
+
tools: void 0
|
2691
2606
|
});
|
2692
|
-
|
2693
|
-
|
2607
|
+
callOptions = {
|
2608
|
+
mode: {
|
2609
|
+
type: "object-json",
|
2610
|
+
schema: outputStrategy.jsonSchema,
|
2611
|
+
name: schemaName,
|
2612
|
+
description: schemaDescription
|
2613
|
+
},
|
2614
|
+
...prepareCallSettings(settings),
|
2615
|
+
inputFormat: standardizedPrompt.type,
|
2616
|
+
prompt: await convertToLanguageModelPrompt({
|
2617
|
+
prompt: standardizedPrompt,
|
2618
|
+
modelSupportsImageUrls: model.supportsImageUrls,
|
2619
|
+
modelSupportsUrl: model.supportsUrl
|
2620
|
+
}),
|
2621
|
+
providerMetadata: inputProviderMetadata,
|
2622
|
+
abortSignal,
|
2623
|
+
headers
|
2624
|
+
};
|
2625
|
+
transformer = {
|
2626
|
+
transform: (chunk, controller) => {
|
2627
|
+
switch (chunk.type) {
|
2628
|
+
case "text-delta":
|
2629
|
+
controller.enqueue(chunk.textDelta);
|
2630
|
+
break;
|
2631
|
+
case "response-metadata":
|
2632
|
+
case "finish":
|
2633
|
+
case "error":
|
2634
|
+
controller.enqueue(chunk);
|
2635
|
+
break;
|
2636
|
+
}
|
2637
|
+
}
|
2638
|
+
};
|
2639
|
+
break;
|
2640
|
+
}
|
2641
|
+
case "tool": {
|
2642
|
+
const standardizedPrompt = standardizePrompt({
|
2643
|
+
prompt: { system, prompt, messages },
|
2644
|
+
tools: void 0
|
2694
2645
|
});
|
2646
|
+
callOptions = {
|
2647
|
+
mode: {
|
2648
|
+
type: "object-tool",
|
2649
|
+
tool: {
|
2650
|
+
type: "function",
|
2651
|
+
name: schemaName != null ? schemaName : "json",
|
2652
|
+
description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
|
2653
|
+
parameters: outputStrategy.jsonSchema
|
2654
|
+
}
|
2655
|
+
},
|
2656
|
+
...prepareCallSettings(settings),
|
2657
|
+
inputFormat: standardizedPrompt.type,
|
2658
|
+
prompt: await convertToLanguageModelPrompt({
|
2659
|
+
prompt: standardizedPrompt,
|
2660
|
+
modelSupportsImageUrls: model.supportsImageUrls,
|
2661
|
+
modelSupportsUrl: model.supportsUrl
|
2662
|
+
}),
|
2663
|
+
providerMetadata: inputProviderMetadata,
|
2664
|
+
abortSignal,
|
2665
|
+
headers
|
2666
|
+
};
|
2667
|
+
transformer = {
|
2668
|
+
transform(chunk, controller) {
|
2669
|
+
switch (chunk.type) {
|
2670
|
+
case "tool-call-delta":
|
2671
|
+
controller.enqueue(chunk.argsTextDelta);
|
2672
|
+
break;
|
2673
|
+
case "response-metadata":
|
2674
|
+
case "finish":
|
2675
|
+
case "error":
|
2676
|
+
controller.enqueue(chunk);
|
2677
|
+
break;
|
2678
|
+
}
|
2679
|
+
}
|
2680
|
+
};
|
2681
|
+
break;
|
2695
2682
|
}
|
2696
|
-
|
2697
|
-
|
2698
|
-
|
2699
|
-
|
2700
|
-
|
2701
|
-
|
2702
|
-
|
2703
|
-
|
2704
|
-
|
2705
|
-
|
2706
|
-
|
2707
|
-
|
2708
|
-
|
2709
|
-
|
2710
|
-
|
2711
|
-
|
2712
|
-
|
2713
|
-
|
2683
|
+
case void 0: {
|
2684
|
+
throw new Error(
|
2685
|
+
"Model does not have a default object generation mode."
|
2686
|
+
);
|
2687
|
+
}
|
2688
|
+
default: {
|
2689
|
+
const _exhaustiveCheck = mode;
|
2690
|
+
throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
|
2691
|
+
}
|
2692
|
+
}
|
2693
|
+
const {
|
2694
|
+
result: { stream, warnings, rawResponse, request },
|
2695
|
+
doStreamSpan,
|
2696
|
+
startTimestampMs
|
2697
|
+
} = await retry(
|
2698
|
+
() => recordSpan({
|
2699
|
+
name: "ai.streamObject.doStream",
|
2700
|
+
attributes: selectTelemetryAttributes({
|
2701
|
+
telemetry,
|
2702
|
+
attributes: {
|
2703
|
+
...assembleOperationName({
|
2704
|
+
operationId: "ai.streamObject.doStream",
|
2705
|
+
telemetry
|
2706
|
+
}),
|
2707
|
+
...baseTelemetryAttributes,
|
2708
|
+
"ai.prompt.format": {
|
2709
|
+
input: () => callOptions.inputFormat
|
2710
|
+
},
|
2711
|
+
"ai.prompt.messages": {
|
2712
|
+
input: () => JSON.stringify(callOptions.prompt)
|
2713
|
+
},
|
2714
|
+
"ai.settings.mode": mode,
|
2715
|
+
// standardized gen-ai llm span attributes:
|
2716
|
+
"gen_ai.system": model.provider,
|
2717
|
+
"gen_ai.request.model": model.modelId,
|
2718
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
2719
|
+
"gen_ai.request.max_tokens": settings.maxTokens,
|
2720
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
2721
|
+
"gen_ai.request.temperature": settings.temperature,
|
2722
|
+
"gen_ai.request.top_k": settings.topK,
|
2723
|
+
"gen_ai.request.top_p": settings.topP
|
2724
|
+
}
|
2725
|
+
}),
|
2726
|
+
tracer,
|
2727
|
+
endWhenDone: false,
|
2728
|
+
fn: async (doStreamSpan2) => ({
|
2729
|
+
startTimestampMs: now2(),
|
2730
|
+
doStreamSpan: doStreamSpan2,
|
2731
|
+
result: await model.doStream(callOptions)
|
2732
|
+
})
|
2733
|
+
})
|
2734
|
+
);
|
2735
|
+
self.requestPromise.resolve(request != null ? request : {});
|
2736
|
+
let usage;
|
2737
|
+
let finishReason;
|
2738
|
+
let providerMetadata;
|
2739
|
+
let object;
|
2740
|
+
let error;
|
2741
|
+
let accumulatedText = "";
|
2742
|
+
let textDelta = "";
|
2743
|
+
let response = {
|
2744
|
+
id: generateId3(),
|
2745
|
+
timestamp: currentDate(),
|
2746
|
+
modelId: model.modelId
|
2747
|
+
};
|
2748
|
+
let latestObjectJson = void 0;
|
2749
|
+
let latestObject = void 0;
|
2750
|
+
let isFirstChunk = true;
|
2751
|
+
let isFirstDelta = true;
|
2752
|
+
const transformedStream = stream.pipeThrough(new TransformStream(transformer)).pipeThrough(
|
2753
|
+
new TransformStream({
|
2754
|
+
async transform(chunk, controller) {
|
2755
|
+
var _a11, _b, _c;
|
2756
|
+
if (isFirstChunk) {
|
2757
|
+
const msToFirstChunk = now2() - startTimestampMs;
|
2758
|
+
isFirstChunk = false;
|
2759
|
+
doStreamSpan.addEvent("ai.stream.firstChunk", {
|
2760
|
+
"ai.stream.msToFirstChunk": msToFirstChunk
|
2714
2761
|
});
|
2715
|
-
|
2716
|
-
|
2717
|
-
textDelta: validationResult.value.textDelta
|
2762
|
+
doStreamSpan.setAttributes({
|
2763
|
+
"ai.stream.msToFirstChunk": msToFirstChunk
|
2718
2764
|
});
|
2719
|
-
textDelta = "";
|
2720
|
-
isFirstDelta = false;
|
2721
|
-
}
|
2722
|
-
}
|
2723
|
-
return;
|
2724
|
-
}
|
2725
|
-
switch (chunk.type) {
|
2726
|
-
case "response-metadata": {
|
2727
|
-
response = {
|
2728
|
-
id: (_a11 = chunk.id) != null ? _a11 : response.id,
|
2729
|
-
timestamp: (_b = chunk.timestamp) != null ? _b : response.timestamp,
|
2730
|
-
modelId: (_c = chunk.modelId) != null ? _c : response.modelId
|
2731
|
-
};
|
2732
|
-
break;
|
2733
|
-
}
|
2734
|
-
case "finish": {
|
2735
|
-
if (textDelta !== "") {
|
2736
|
-
controller.enqueue({ type: "text-delta", textDelta });
|
2737
2765
|
}
|
2738
|
-
|
2739
|
-
|
2740
|
-
|
2741
|
-
|
2742
|
-
|
2743
|
-
|
2744
|
-
|
2745
|
-
|
2746
|
-
|
2747
|
-
|
2748
|
-
|
2749
|
-
|
2750
|
-
|
2751
|
-
|
2752
|
-
|
2753
|
-
|
2754
|
-
|
2766
|
+
if (typeof chunk === "string") {
|
2767
|
+
accumulatedText += chunk;
|
2768
|
+
textDelta += chunk;
|
2769
|
+
const { value: currentObjectJson, state: parseState } = parsePartialJson(accumulatedText);
|
2770
|
+
if (currentObjectJson !== void 0 && !isDeepEqualData(latestObjectJson, currentObjectJson)) {
|
2771
|
+
const validationResult = outputStrategy.validatePartialResult({
|
2772
|
+
value: currentObjectJson,
|
2773
|
+
textDelta,
|
2774
|
+
latestObject,
|
2775
|
+
isFirstDelta,
|
2776
|
+
isFinalDelta: parseState === "successful-parse"
|
2777
|
+
});
|
2778
|
+
if (validationResult.success && !isDeepEqualData(
|
2779
|
+
latestObject,
|
2780
|
+
validationResult.value.partial
|
2781
|
+
)) {
|
2782
|
+
latestObjectJson = currentObjectJson;
|
2783
|
+
latestObject = validationResult.value.partial;
|
2784
|
+
controller.enqueue({
|
2785
|
+
type: "object",
|
2786
|
+
object: latestObject
|
2787
|
+
});
|
2788
|
+
controller.enqueue({
|
2789
|
+
type: "text-delta",
|
2790
|
+
textDelta: validationResult.value.textDelta
|
2791
|
+
});
|
2792
|
+
textDelta = "";
|
2793
|
+
isFirstDelta = false;
|
2794
|
+
}
|
2795
|
+
}
|
2796
|
+
return;
|
2755
2797
|
}
|
2756
|
-
|
2757
|
-
|
2758
|
-
|
2759
|
-
|
2760
|
-
|
2761
|
-
|
2762
|
-
|
2763
|
-
|
2764
|
-
// invoke onFinish callback and resolve toolResults promise when the stream is about to close:
|
2765
|
-
async flush(controller) {
|
2766
|
-
try {
|
2767
|
-
const finalUsage = usage != null ? usage : {
|
2768
|
-
promptTokens: NaN,
|
2769
|
-
completionTokens: NaN,
|
2770
|
-
totalTokens: NaN
|
2771
|
-
};
|
2772
|
-
doStreamSpan.setAttributes(
|
2773
|
-
selectTelemetryAttributes({
|
2774
|
-
telemetry,
|
2775
|
-
attributes: {
|
2776
|
-
"ai.response.finishReason": finishReason,
|
2777
|
-
"ai.response.object": {
|
2778
|
-
output: () => JSON.stringify(object)
|
2779
|
-
},
|
2780
|
-
"ai.response.id": response.id,
|
2781
|
-
"ai.response.model": response.modelId,
|
2782
|
-
"ai.response.timestamp": response.timestamp.toISOString(),
|
2783
|
-
"ai.usage.promptTokens": finalUsage.promptTokens,
|
2784
|
-
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2785
|
-
// standardized gen-ai llm span attributes:
|
2786
|
-
"gen_ai.response.finish_reasons": [finishReason],
|
2787
|
-
"gen_ai.response.id": response.id,
|
2788
|
-
"gen_ai.response.model": response.modelId,
|
2789
|
-
"gen_ai.usage.input_tokens": finalUsage.promptTokens,
|
2790
|
-
"gen_ai.usage.output_tokens": finalUsage.completionTokens
|
2798
|
+
switch (chunk.type) {
|
2799
|
+
case "response-metadata": {
|
2800
|
+
response = {
|
2801
|
+
id: (_a11 = chunk.id) != null ? _a11 : response.id,
|
2802
|
+
timestamp: (_b = chunk.timestamp) != null ? _b : response.timestamp,
|
2803
|
+
modelId: (_c = chunk.modelId) != null ? _c : response.modelId
|
2804
|
+
};
|
2805
|
+
break;
|
2791
2806
|
}
|
2792
|
-
|
2793
|
-
|
2794
|
-
|
2795
|
-
|
2796
|
-
|
2797
|
-
|
2798
|
-
|
2799
|
-
|
2800
|
-
|
2801
|
-
|
2802
|
-
|
2807
|
+
case "finish": {
|
2808
|
+
if (textDelta !== "") {
|
2809
|
+
controller.enqueue({ type: "text-delta", textDelta });
|
2810
|
+
}
|
2811
|
+
finishReason = chunk.finishReason;
|
2812
|
+
usage = calculateLanguageModelUsage(chunk.usage);
|
2813
|
+
providerMetadata = chunk.providerMetadata;
|
2814
|
+
controller.enqueue({ ...chunk, usage, response });
|
2815
|
+
self.usagePromise.resolve(usage);
|
2816
|
+
self.providerMetadataPromise.resolve(providerMetadata);
|
2817
|
+
self.responsePromise.resolve({
|
2818
|
+
...response,
|
2819
|
+
headers: rawResponse == null ? void 0 : rawResponse.headers
|
2820
|
+
});
|
2821
|
+
const validationResult = outputStrategy.validateFinalResult(latestObjectJson);
|
2822
|
+
if (validationResult.success) {
|
2823
|
+
object = validationResult.value;
|
2824
|
+
self.objectPromise.resolve(object);
|
2825
|
+
} else {
|
2826
|
+
error = validationResult.error;
|
2827
|
+
self.objectPromise.reject(error);
|
2803
2828
|
}
|
2829
|
+
break;
|
2804
2830
|
}
|
2805
|
-
|
2806
|
-
|
2807
|
-
|
2808
|
-
|
2809
|
-
|
2810
|
-
|
2811
|
-
|
2812
|
-
|
2813
|
-
|
2814
|
-
|
2815
|
-
|
2816
|
-
|
2817
|
-
|
2818
|
-
|
2819
|
-
|
2820
|
-
|
2821
|
-
|
2831
|
+
default: {
|
2832
|
+
controller.enqueue(chunk);
|
2833
|
+
break;
|
2834
|
+
}
|
2835
|
+
}
|
2836
|
+
},
|
2837
|
+
// invoke onFinish callback and resolve toolResults promise when the stream is about to close:
|
2838
|
+
async flush(controller) {
|
2839
|
+
try {
|
2840
|
+
const finalUsage = usage != null ? usage : {
|
2841
|
+
promptTokens: NaN,
|
2842
|
+
completionTokens: NaN,
|
2843
|
+
totalTokens: NaN
|
2844
|
+
};
|
2845
|
+
doStreamSpan.setAttributes(
|
2846
|
+
selectTelemetryAttributes({
|
2847
|
+
telemetry,
|
2848
|
+
attributes: {
|
2849
|
+
"ai.response.finishReason": finishReason,
|
2850
|
+
"ai.response.object": {
|
2851
|
+
output: () => JSON.stringify(object)
|
2852
|
+
},
|
2853
|
+
"ai.response.id": response.id,
|
2854
|
+
"ai.response.model": response.modelId,
|
2855
|
+
"ai.response.timestamp": response.timestamp.toISOString(),
|
2856
|
+
"ai.usage.promptTokens": finalUsage.promptTokens,
|
2857
|
+
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2858
|
+
// standardized gen-ai llm span attributes:
|
2859
|
+
"gen_ai.response.finish_reasons": [finishReason],
|
2860
|
+
"gen_ai.response.id": response.id,
|
2861
|
+
"gen_ai.response.model": response.modelId,
|
2862
|
+
"gen_ai.usage.input_tokens": finalUsage.promptTokens,
|
2863
|
+
"gen_ai.usage.output_tokens": finalUsage.completionTokens
|
2864
|
+
}
|
2865
|
+
})
|
2866
|
+
);
|
2867
|
+
doStreamSpan.end();
|
2868
|
+
rootSpan.setAttributes(
|
2869
|
+
selectTelemetryAttributes({
|
2870
|
+
telemetry,
|
2871
|
+
attributes: {
|
2872
|
+
"ai.usage.promptTokens": finalUsage.promptTokens,
|
2873
|
+
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2874
|
+
"ai.response.object": {
|
2875
|
+
output: () => JSON.stringify(object)
|
2876
|
+
}
|
2877
|
+
}
|
2878
|
+
})
|
2879
|
+
);
|
2880
|
+
await (onFinish == null ? void 0 : onFinish({
|
2881
|
+
usage: finalUsage,
|
2882
|
+
object,
|
2883
|
+
error,
|
2884
|
+
response: {
|
2885
|
+
...response,
|
2886
|
+
headers: rawResponse == null ? void 0 : rawResponse.headers
|
2887
|
+
},
|
2888
|
+
warnings,
|
2889
|
+
experimental_providerMetadata: providerMetadata
|
2890
|
+
}));
|
2891
|
+
} catch (error2) {
|
2892
|
+
controller.error(error2);
|
2893
|
+
} finally {
|
2894
|
+
rootSpan.end();
|
2895
|
+
}
|
2896
|
+
}
|
2897
|
+
})
|
2898
|
+
);
|
2899
|
+
self.stitchableStream.addStream(transformedStream);
|
2900
|
+
}
|
2901
|
+
}).catch((error) => {
|
2902
|
+
self.stitchableStream.addStream(
|
2903
|
+
new ReadableStream({
|
2904
|
+
start(controller) {
|
2905
|
+
controller.error(error);
|
2822
2906
|
}
|
2823
|
-
}
|
2824
|
-
|
2825
|
-
)
|
2907
|
+
})
|
2908
|
+
);
|
2909
|
+
}).finally(() => {
|
2910
|
+
self.stitchableStream.close();
|
2911
|
+
});
|
2912
|
+
this.outputStrategy = outputStrategy;
|
2826
2913
|
}
|
2827
2914
|
get object() {
|
2828
2915
|
return this.objectPromise.value;
|
2829
2916
|
}
|
2917
|
+
get usage() {
|
2918
|
+
return this.usagePromise.value;
|
2919
|
+
}
|
2920
|
+
get experimental_providerMetadata() {
|
2921
|
+
return this.providerMetadataPromise.value;
|
2922
|
+
}
|
2923
|
+
get warnings() {
|
2924
|
+
return this.warningsPromise.value;
|
2925
|
+
}
|
2926
|
+
get request() {
|
2927
|
+
return this.requestPromise.value;
|
2928
|
+
}
|
2929
|
+
get response() {
|
2930
|
+
return this.responsePromise.value;
|
2931
|
+
}
|
2830
2932
|
get partialObjectStream() {
|
2831
|
-
return createAsyncIterableStream(this.
|
2933
|
+
return createAsyncIterableStream(this.stitchableStream.stream, {
|
2832
2934
|
transform(chunk, controller) {
|
2833
2935
|
switch (chunk.type) {
|
2834
2936
|
case "object":
|
@@ -2849,10 +2951,12 @@ var DefaultStreamObjectResult = class {
|
|
2849
2951
|
});
|
2850
2952
|
}
|
2851
2953
|
get elementStream() {
|
2852
|
-
return this.outputStrategy.createElementStream(
|
2954
|
+
return this.outputStrategy.createElementStream(
|
2955
|
+
this.stitchableStream.stream
|
2956
|
+
);
|
2853
2957
|
}
|
2854
2958
|
get textStream() {
|
2855
|
-
return createAsyncIterableStream(this.
|
2959
|
+
return createAsyncIterableStream(this.stitchableStream.stream, {
|
2856
2960
|
transform(chunk, controller) {
|
2857
2961
|
switch (chunk.type) {
|
2858
2962
|
case "text-delta":
|
@@ -2873,7 +2977,7 @@ var DefaultStreamObjectResult = class {
|
|
2873
2977
|
});
|
2874
2978
|
}
|
2875
2979
|
get fullStream() {
|
2876
|
-
return createAsyncIterableStream(this.
|
2980
|
+
return createAsyncIterableStream(this.stitchableStream.stream, {
|
2877
2981
|
transform(chunk, controller) {
|
2878
2982
|
controller.enqueue(chunk);
|
2879
2983
|
}
|
@@ -3474,69 +3578,7 @@ var DefaultGenerateTextResult = class {
|
|
3474
3578
|
|
3475
3579
|
// core/generate-text/stream-text.ts
|
3476
3580
|
import { createIdGenerator as createIdGenerator4 } from "@ai-sdk/provider-utils";
|
3477
|
-
import {
|
3478
|
-
|
3479
|
-
// core/util/create-stitchable-stream.ts
|
3480
|
-
function createStitchableStream() {
|
3481
|
-
let innerStreamReaders = [];
|
3482
|
-
let controller = null;
|
3483
|
-
let isClosed = false;
|
3484
|
-
const processPull = async () => {
|
3485
|
-
if (isClosed && innerStreamReaders.length === 0) {
|
3486
|
-
controller == null ? void 0 : controller.close();
|
3487
|
-
return;
|
3488
|
-
}
|
3489
|
-
if (innerStreamReaders.length === 0) {
|
3490
|
-
return;
|
3491
|
-
}
|
3492
|
-
try {
|
3493
|
-
const { value, done } = await innerStreamReaders[0].read();
|
3494
|
-
if (done) {
|
3495
|
-
innerStreamReaders.shift();
|
3496
|
-
if (innerStreamReaders.length > 0) {
|
3497
|
-
await processPull();
|
3498
|
-
} else if (isClosed) {
|
3499
|
-
controller == null ? void 0 : controller.close();
|
3500
|
-
}
|
3501
|
-
} else {
|
3502
|
-
controller == null ? void 0 : controller.enqueue(value);
|
3503
|
-
}
|
3504
|
-
} catch (error) {
|
3505
|
-
controller == null ? void 0 : controller.error(error);
|
3506
|
-
innerStreamReaders.shift();
|
3507
|
-
if (isClosed && innerStreamReaders.length === 0) {
|
3508
|
-
controller == null ? void 0 : controller.close();
|
3509
|
-
}
|
3510
|
-
}
|
3511
|
-
};
|
3512
|
-
return {
|
3513
|
-
stream: new ReadableStream({
|
3514
|
-
start(controllerParam) {
|
3515
|
-
controller = controllerParam;
|
3516
|
-
},
|
3517
|
-
pull: processPull,
|
3518
|
-
async cancel() {
|
3519
|
-
for (const reader of innerStreamReaders) {
|
3520
|
-
await reader.cancel();
|
3521
|
-
}
|
3522
|
-
innerStreamReaders = [];
|
3523
|
-
isClosed = true;
|
3524
|
-
}
|
3525
|
-
}),
|
3526
|
-
addStream: (innerStream) => {
|
3527
|
-
if (isClosed) {
|
3528
|
-
throw new Error("Cannot add inner stream: outer stream is closed");
|
3529
|
-
}
|
3530
|
-
innerStreamReaders.push(innerStream.getReader());
|
3531
|
-
},
|
3532
|
-
close: () => {
|
3533
|
-
isClosed = true;
|
3534
|
-
if (innerStreamReaders.length === 0) {
|
3535
|
-
controller == null ? void 0 : controller.close();
|
3536
|
-
}
|
3537
|
-
}
|
3538
|
-
};
|
3539
|
-
}
|
3581
|
+
import { formatDataStreamPart } from "@ai-sdk/ui-utils";
|
3540
3582
|
|
3541
3583
|
// core/util/merge-streams.ts
|
3542
3584
|
function mergeStreams(stream1, stream2) {
|
@@ -3821,7 +3863,7 @@ function runToolsTransformation({
|
|
3821
3863
|
|
3822
3864
|
// core/generate-text/stream-text.ts
|
3823
3865
|
var originalGenerateId4 = createIdGenerator4({ prefix: "aitxt", size: 24 });
|
3824
|
-
|
3866
|
+
function streamText({
|
3825
3867
|
model,
|
3826
3868
|
tools,
|
3827
3869
|
toolChoice,
|
@@ -3837,593 +3879,582 @@ async function streamText({
|
|
3837
3879
|
experimental_providerMetadata: providerMetadata,
|
3838
3880
|
experimental_toolCallStreaming: toolCallStreaming = false,
|
3839
3881
|
experimental_activeTools: activeTools,
|
3840
|
-
onChunk,
|
3841
|
-
onFinish,
|
3842
|
-
onStepFinish,
|
3843
|
-
_internal: {
|
3844
|
-
now: now2 = now,
|
3845
|
-
generateId: generateId3 = originalGenerateId4,
|
3846
|
-
currentDate = () => /* @__PURE__ */ new Date()
|
3847
|
-
} = {},
|
3848
|
-
...settings
|
3849
|
-
}) {
|
3850
|
-
|
3851
|
-
|
3852
|
-
|
3853
|
-
|
3854
|
-
|
3855
|
-
|
3856
|
-
|
3857
|
-
|
3858
|
-
|
3859
|
-
|
3860
|
-
|
3861
|
-
|
3862
|
-
|
3863
|
-
|
3864
|
-
|
3865
|
-
|
3866
|
-
|
3867
|
-
|
3868
|
-
|
3869
|
-
|
3870
|
-
|
3871
|
-
|
3872
|
-
|
3873
|
-
...assembleOperationName({ operationId: "ai.streamText", telemetry }),
|
3874
|
-
...baseTelemetryAttributes,
|
3875
|
-
// specific settings that only make sense on the outer level:
|
3876
|
-
"ai.prompt": {
|
3877
|
-
input: () => JSON.stringify({ system, prompt, messages })
|
3878
|
-
},
|
3879
|
-
"ai.settings.maxSteps": maxSteps
|
3880
|
-
}
|
3881
|
-
}),
|
3882
|
-
tracer,
|
3883
|
-
endWhenDone: false,
|
3884
|
-
fn: async (rootSpan) => {
|
3885
|
-
const retry = retryWithExponentialBackoff({ maxRetries });
|
3886
|
-
const startStep = async ({
|
3887
|
-
responseMessages
|
3888
|
-
}) => {
|
3889
|
-
const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
|
3890
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
3891
|
-
prompt: {
|
3892
|
-
type: promptFormat,
|
3893
|
-
system: initialPrompt.system,
|
3894
|
-
messages: [...initialPrompt.messages, ...responseMessages]
|
3895
|
-
},
|
3896
|
-
modelSupportsImageUrls: model.supportsImageUrls,
|
3897
|
-
modelSupportsUrl: model.supportsUrl
|
3898
|
-
});
|
3899
|
-
const mode = {
|
3900
|
-
type: "regular",
|
3901
|
-
...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
|
3902
|
-
};
|
3903
|
-
const {
|
3904
|
-
result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2, request: request2 },
|
3905
|
-
doStreamSpan: doStreamSpan2,
|
3906
|
-
startTimestampMs: startTimestampMs2
|
3907
|
-
} = await retry(
|
3908
|
-
() => recordSpan({
|
3909
|
-
name: "ai.streamText.doStream",
|
3910
|
-
attributes: selectTelemetryAttributes({
|
3911
|
-
telemetry,
|
3912
|
-
attributes: {
|
3913
|
-
...assembleOperationName({
|
3914
|
-
operationId: "ai.streamText.doStream",
|
3915
|
-
telemetry
|
3916
|
-
}),
|
3917
|
-
...baseTelemetryAttributes,
|
3918
|
-
"ai.prompt.format": {
|
3919
|
-
input: () => promptFormat
|
3920
|
-
},
|
3921
|
-
"ai.prompt.messages": {
|
3922
|
-
input: () => JSON.stringify(promptMessages)
|
3923
|
-
},
|
3924
|
-
"ai.prompt.tools": {
|
3925
|
-
// convert the language model level tools:
|
3926
|
-
input: () => {
|
3927
|
-
var _a11;
|
3928
|
-
return (_a11 = mode.tools) == null ? void 0 : _a11.map((tool2) => JSON.stringify(tool2));
|
3929
|
-
}
|
3930
|
-
},
|
3931
|
-
"ai.prompt.toolChoice": {
|
3932
|
-
input: () => mode.toolChoice != null ? JSON.stringify(mode.toolChoice) : void 0
|
3933
|
-
},
|
3934
|
-
// standardized gen-ai llm span attributes:
|
3935
|
-
"gen_ai.system": model.provider,
|
3936
|
-
"gen_ai.request.model": model.modelId,
|
3937
|
-
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
3938
|
-
"gen_ai.request.max_tokens": settings.maxTokens,
|
3939
|
-
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
3940
|
-
"gen_ai.request.stop_sequences": settings.stopSequences,
|
3941
|
-
"gen_ai.request.temperature": settings.temperature,
|
3942
|
-
"gen_ai.request.top_k": settings.topK,
|
3943
|
-
"gen_ai.request.top_p": settings.topP
|
3944
|
-
}
|
3945
|
-
}),
|
3946
|
-
tracer,
|
3947
|
-
endWhenDone: false,
|
3948
|
-
fn: async (doStreamSpan3) => ({
|
3949
|
-
startTimestampMs: now2(),
|
3950
|
-
// get before the call
|
3951
|
-
doStreamSpan: doStreamSpan3,
|
3952
|
-
result: await model.doStream({
|
3953
|
-
mode,
|
3954
|
-
...prepareCallSettings(settings),
|
3955
|
-
inputFormat: promptFormat,
|
3956
|
-
prompt: promptMessages,
|
3957
|
-
providerMetadata,
|
3958
|
-
abortSignal,
|
3959
|
-
headers
|
3960
|
-
})
|
3961
|
-
})
|
3962
|
-
})
|
3963
|
-
);
|
3964
|
-
return {
|
3965
|
-
result: {
|
3966
|
-
stream: runToolsTransformation({
|
3967
|
-
tools,
|
3968
|
-
generatorStream: stream2,
|
3969
|
-
toolCallStreaming,
|
3970
|
-
tracer,
|
3971
|
-
telemetry,
|
3972
|
-
abortSignal
|
3973
|
-
}),
|
3974
|
-
warnings: warnings2,
|
3975
|
-
request: request2 != null ? request2 : {},
|
3976
|
-
rawResponse: rawResponse2
|
3977
|
-
},
|
3978
|
-
doStreamSpan: doStreamSpan2,
|
3979
|
-
startTimestampMs: startTimestampMs2
|
3980
|
-
};
|
3981
|
-
};
|
3982
|
-
const {
|
3983
|
-
result: { stream, warnings, rawResponse, request },
|
3984
|
-
doStreamSpan,
|
3985
|
-
startTimestampMs
|
3986
|
-
} = await startStep({ responseMessages: [] });
|
3987
|
-
return new DefaultStreamTextResult({
|
3988
|
-
stream,
|
3989
|
-
warnings,
|
3990
|
-
rawResponse,
|
3991
|
-
request,
|
3992
|
-
onChunk,
|
3993
|
-
onFinish,
|
3994
|
-
onStepFinish,
|
3995
|
-
rootSpan,
|
3996
|
-
doStreamSpan,
|
3997
|
-
telemetry,
|
3998
|
-
startTimestampMs,
|
3999
|
-
maxSteps,
|
4000
|
-
continueSteps,
|
4001
|
-
startStep,
|
4002
|
-
modelId: model.modelId,
|
4003
|
-
now: now2,
|
4004
|
-
currentDate,
|
4005
|
-
generateId: generateId3,
|
4006
|
-
tools
|
4007
|
-
});
|
4008
|
-
}
|
3882
|
+
onChunk,
|
3883
|
+
onFinish,
|
3884
|
+
onStepFinish,
|
3885
|
+
_internal: {
|
3886
|
+
now: now2 = now,
|
3887
|
+
generateId: generateId3 = originalGenerateId4,
|
3888
|
+
currentDate = () => /* @__PURE__ */ new Date()
|
3889
|
+
} = {},
|
3890
|
+
...settings
|
3891
|
+
}) {
|
3892
|
+
return new DefaultStreamTextResult({
|
3893
|
+
model,
|
3894
|
+
telemetry,
|
3895
|
+
headers,
|
3896
|
+
settings,
|
3897
|
+
maxRetries,
|
3898
|
+
abortSignal,
|
3899
|
+
system,
|
3900
|
+
prompt,
|
3901
|
+
messages,
|
3902
|
+
tools,
|
3903
|
+
toolChoice,
|
3904
|
+
toolCallStreaming,
|
3905
|
+
activeTools,
|
3906
|
+
maxSteps,
|
3907
|
+
continueSteps,
|
3908
|
+
providerMetadata,
|
3909
|
+
onChunk,
|
3910
|
+
onFinish,
|
3911
|
+
onStepFinish,
|
3912
|
+
now: now2,
|
3913
|
+
currentDate,
|
3914
|
+
generateId: generateId3
|
4009
3915
|
});
|
4010
3916
|
}
|
4011
3917
|
var DefaultStreamTextResult = class {
|
4012
3918
|
constructor({
|
4013
|
-
|
4014
|
-
warnings,
|
4015
|
-
rawResponse,
|
4016
|
-
request,
|
4017
|
-
onChunk,
|
4018
|
-
onFinish,
|
4019
|
-
onStepFinish,
|
4020
|
-
rootSpan,
|
4021
|
-
doStreamSpan,
|
3919
|
+
model,
|
4022
3920
|
telemetry,
|
4023
|
-
|
3921
|
+
headers,
|
3922
|
+
settings,
|
3923
|
+
maxRetries,
|
3924
|
+
abortSignal,
|
3925
|
+
system,
|
3926
|
+
prompt,
|
3927
|
+
messages,
|
3928
|
+
tools,
|
3929
|
+
toolChoice,
|
3930
|
+
toolCallStreaming,
|
3931
|
+
activeTools,
|
4024
3932
|
maxSteps,
|
4025
3933
|
continueSteps,
|
4026
|
-
|
4027
|
-
|
3934
|
+
providerMetadata,
|
3935
|
+
onChunk,
|
3936
|
+
onFinish,
|
3937
|
+
onStepFinish,
|
4028
3938
|
now: now2,
|
4029
3939
|
currentDate,
|
4030
|
-
generateId: generateId3
|
4031
|
-
tools
|
3940
|
+
generateId: generateId3
|
4032
3941
|
}) {
|
4033
|
-
this.
|
4034
|
-
this.
|
4035
|
-
|
4036
|
-
this.
|
4037
|
-
|
4038
|
-
this.
|
4039
|
-
|
4040
|
-
this.
|
4041
|
-
|
4042
|
-
this.
|
4043
|
-
|
4044
|
-
|
4045
|
-
|
4046
|
-
|
4047
|
-
|
4048
|
-
|
4049
|
-
|
4050
|
-
}
|
4051
|
-
|
4052
|
-
const
|
4053
|
-
|
4054
|
-
|
4055
|
-
|
4056
|
-
|
4057
|
-
|
4058
|
-
const {
|
4059
|
-
|
4060
|
-
|
4061
|
-
|
4062
|
-
} = createStitchableStream();
|
4063
|
-
this.originalStream = stitchableStream;
|
4064
|
-
const stepResults = [];
|
3942
|
+
this.warningsPromise = new DelayedPromise();
|
3943
|
+
this.usagePromise = new DelayedPromise();
|
3944
|
+
this.finishReasonPromise = new DelayedPromise();
|
3945
|
+
this.providerMetadataPromise = new DelayedPromise();
|
3946
|
+
this.textPromise = new DelayedPromise();
|
3947
|
+
this.toolCallsPromise = new DelayedPromise();
|
3948
|
+
this.toolResultsPromise = new DelayedPromise();
|
3949
|
+
this.requestPromise = new DelayedPromise();
|
3950
|
+
this.responsePromise = new DelayedPromise();
|
3951
|
+
this.stepsPromise = new DelayedPromise();
|
3952
|
+
this.stitchableStream = createStitchableStream();
|
3953
|
+
if (maxSteps < 1) {
|
3954
|
+
throw new InvalidArgumentError({
|
3955
|
+
parameter: "maxSteps",
|
3956
|
+
value: maxSteps,
|
3957
|
+
message: "maxSteps must be at least 1"
|
3958
|
+
});
|
3959
|
+
}
|
3960
|
+
const tracer = getTracer(telemetry);
|
3961
|
+
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
3962
|
+
model,
|
3963
|
+
telemetry,
|
3964
|
+
headers,
|
3965
|
+
settings: { ...settings, maxRetries }
|
3966
|
+
});
|
3967
|
+
const initialPrompt = standardizePrompt({
|
3968
|
+
prompt: { system, prompt, messages },
|
3969
|
+
tools
|
3970
|
+
});
|
4065
3971
|
const self = this;
|
4066
|
-
|
4067
|
-
|
4068
|
-
|
4069
|
-
|
4070
|
-
|
4071
|
-
|
4072
|
-
|
4073
|
-
|
4074
|
-
|
4075
|
-
|
4076
|
-
|
4077
|
-
|
4078
|
-
|
4079
|
-
|
4080
|
-
|
4081
|
-
|
4082
|
-
|
4083
|
-
|
4084
|
-
|
4085
|
-
|
4086
|
-
|
4087
|
-
|
4088
|
-
|
4089
|
-
|
4090
|
-
|
4091
|
-
|
4092
|
-
|
4093
|
-
|
4094
|
-
|
4095
|
-
|
4096
|
-
|
4097
|
-
|
4098
|
-
|
4099
|
-
|
4100
|
-
|
4101
|
-
|
4102
|
-
|
4103
|
-
|
4104
|
-
|
4105
|
-
|
4106
|
-
|
4107
|
-
|
4108
|
-
|
4109
|
-
|
4110
|
-
|
4111
|
-
|
4112
|
-
|
4113
|
-
|
4114
|
-
|
4115
|
-
|
4116
|
-
|
4117
|
-
|
4118
|
-
|
4119
|
-
|
4120
|
-
|
4121
|
-
|
4122
|
-
|
4123
|
-
|
4124
|
-
|
4125
|
-
|
4126
|
-
|
4127
|
-
|
4128
|
-
|
4129
|
-
|
4130
|
-
|
4131
|
-
|
4132
|
-
|
4133
|
-
const chunkType = chunk.type;
|
4134
|
-
switch (chunkType) {
|
4135
|
-
case "text-delta": {
|
4136
|
-
if (continueSteps) {
|
4137
|
-
const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
|
4138
|
-
if (trimmedChunkText.length === 0) {
|
4139
|
-
break;
|
3972
|
+
recordSpan({
|
3973
|
+
name: "ai.streamText",
|
3974
|
+
attributes: selectTelemetryAttributes({
|
3975
|
+
telemetry,
|
3976
|
+
attributes: {
|
3977
|
+
...assembleOperationName({ operationId: "ai.streamText", telemetry }),
|
3978
|
+
...baseTelemetryAttributes,
|
3979
|
+
// specific settings that only make sense on the outer level:
|
3980
|
+
"ai.prompt": {
|
3981
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
3982
|
+
},
|
3983
|
+
"ai.settings.maxSteps": maxSteps
|
3984
|
+
}
|
3985
|
+
}),
|
3986
|
+
tracer,
|
3987
|
+
endWhenDone: false,
|
3988
|
+
fn: async (rootSpan) => {
|
3989
|
+
const retry = retryWithExponentialBackoff({ maxRetries });
|
3990
|
+
const stepResults = [];
|
3991
|
+
async function streamStep({
|
3992
|
+
currentStep,
|
3993
|
+
responseMessages,
|
3994
|
+
usage,
|
3995
|
+
stepType,
|
3996
|
+
previousStepText,
|
3997
|
+
hasLeadingWhitespace
|
3998
|
+
}) {
|
3999
|
+
const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
|
4000
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
4001
|
+
prompt: {
|
4002
|
+
type: promptFormat,
|
4003
|
+
system: initialPrompt.system,
|
4004
|
+
messages: [...initialPrompt.messages, ...responseMessages]
|
4005
|
+
},
|
4006
|
+
modelSupportsImageUrls: model.supportsImageUrls,
|
4007
|
+
modelSupportsUrl: model.supportsUrl
|
4008
|
+
});
|
4009
|
+
const mode = {
|
4010
|
+
type: "regular",
|
4011
|
+
...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
|
4012
|
+
};
|
4013
|
+
const {
|
4014
|
+
result: { stream, warnings, rawResponse, request },
|
4015
|
+
doStreamSpan,
|
4016
|
+
startTimestampMs
|
4017
|
+
} = await retry(
|
4018
|
+
() => recordSpan({
|
4019
|
+
name: "ai.streamText.doStream",
|
4020
|
+
attributes: selectTelemetryAttributes({
|
4021
|
+
telemetry,
|
4022
|
+
attributes: {
|
4023
|
+
...assembleOperationName({
|
4024
|
+
operationId: "ai.streamText.doStream",
|
4025
|
+
telemetry
|
4026
|
+
}),
|
4027
|
+
...baseTelemetryAttributes,
|
4028
|
+
"ai.prompt.format": {
|
4029
|
+
input: () => promptFormat
|
4030
|
+
},
|
4031
|
+
"ai.prompt.messages": {
|
4032
|
+
input: () => JSON.stringify(promptMessages)
|
4033
|
+
},
|
4034
|
+
"ai.prompt.tools": {
|
4035
|
+
// convert the language model level tools:
|
4036
|
+
input: () => {
|
4037
|
+
var _a11;
|
4038
|
+
return (_a11 = mode.tools) == null ? void 0 : _a11.map((tool2) => JSON.stringify(tool2));
|
4140
4039
|
}
|
4141
|
-
|
4142
|
-
|
4143
|
-
|
4144
|
-
|
4145
|
-
|
4146
|
-
|
4147
|
-
|
4148
|
-
|
4149
|
-
|
4150
|
-
|
4040
|
+
},
|
4041
|
+
"ai.prompt.toolChoice": {
|
4042
|
+
input: () => mode.toolChoice != null ? JSON.stringify(mode.toolChoice) : void 0
|
4043
|
+
},
|
4044
|
+
// standardized gen-ai llm span attributes:
|
4045
|
+
"gen_ai.system": model.provider,
|
4046
|
+
"gen_ai.request.model": model.modelId,
|
4047
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
4048
|
+
"gen_ai.request.max_tokens": settings.maxTokens,
|
4049
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
4050
|
+
"gen_ai.request.stop_sequences": settings.stopSequences,
|
4051
|
+
"gen_ai.request.temperature": settings.temperature,
|
4052
|
+
"gen_ai.request.top_k": settings.topK,
|
4053
|
+
"gen_ai.request.top_p": settings.topP
|
4054
|
+
}
|
4055
|
+
}),
|
4056
|
+
tracer,
|
4057
|
+
endWhenDone: false,
|
4058
|
+
fn: async (doStreamSpan2) => ({
|
4059
|
+
startTimestampMs: now2(),
|
4060
|
+
// get before the call
|
4061
|
+
doStreamSpan: doStreamSpan2,
|
4062
|
+
result: await model.doStream({
|
4063
|
+
mode,
|
4064
|
+
...prepareCallSettings(settings),
|
4065
|
+
inputFormat: promptFormat,
|
4066
|
+
prompt: promptMessages,
|
4067
|
+
providerMetadata,
|
4068
|
+
abortSignal,
|
4069
|
+
headers
|
4070
|
+
})
|
4071
|
+
})
|
4072
|
+
})
|
4073
|
+
);
|
4074
|
+
const transformedStream = runToolsTransformation({
|
4075
|
+
tools,
|
4076
|
+
generatorStream: stream,
|
4077
|
+
toolCallStreaming,
|
4078
|
+
tracer,
|
4079
|
+
telemetry,
|
4080
|
+
abortSignal
|
4081
|
+
});
|
4082
|
+
const stepRequest = request != null ? request : {};
|
4083
|
+
const stepToolCalls = [];
|
4084
|
+
const stepToolResults = [];
|
4085
|
+
let stepFinishReason = "unknown";
|
4086
|
+
let stepUsage = {
|
4087
|
+
promptTokens: 0,
|
4088
|
+
completionTokens: 0,
|
4089
|
+
totalTokens: 0
|
4090
|
+
};
|
4091
|
+
let stepProviderMetadata;
|
4092
|
+
let stepFirstChunk = true;
|
4093
|
+
let stepText = "";
|
4094
|
+
let fullStepText = stepType === "continue" ? previousStepText : "";
|
4095
|
+
let stepLogProbs;
|
4096
|
+
let stepResponse = {
|
4097
|
+
id: generateId3(),
|
4098
|
+
timestamp: currentDate(),
|
4099
|
+
modelId: model.modelId
|
4100
|
+
};
|
4101
|
+
let chunkBuffer = "";
|
4102
|
+
let chunkTextPublished = false;
|
4103
|
+
let inWhitespacePrefix = true;
|
4104
|
+
let hasWhitespaceSuffix = false;
|
4105
|
+
async function publishTextChunk({
|
4106
|
+
controller,
|
4107
|
+
chunk
|
4108
|
+
}) {
|
4109
|
+
controller.enqueue(chunk);
|
4110
|
+
stepText += chunk.textDelta;
|
4111
|
+
fullStepText += chunk.textDelta;
|
4112
|
+
chunkTextPublished = true;
|
4113
|
+
hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
|
4114
|
+
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4115
|
+
}
|
4116
|
+
self.stitchableStream.addStream(
|
4117
|
+
transformedStream.pipeThrough(
|
4118
|
+
new TransformStream({
|
4119
|
+
async transform(chunk, controller) {
|
4120
|
+
var _a11, _b, _c;
|
4121
|
+
if (stepFirstChunk) {
|
4122
|
+
const msToFirstChunk = now2() - startTimestampMs;
|
4123
|
+
stepFirstChunk = false;
|
4124
|
+
doStreamSpan.addEvent("ai.stream.firstChunk", {
|
4125
|
+
"ai.response.msToFirstChunk": msToFirstChunk
|
4126
|
+
});
|
4127
|
+
doStreamSpan.setAttributes({
|
4128
|
+
"ai.response.msToFirstChunk": msToFirstChunk
|
4129
|
+
});
|
4130
|
+
}
|
4131
|
+
if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
|
4132
|
+
return;
|
4133
|
+
}
|
4134
|
+
const chunkType = chunk.type;
|
4135
|
+
switch (chunkType) {
|
4136
|
+
case "text-delta": {
|
4137
|
+
if (continueSteps) {
|
4138
|
+
const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
|
4139
|
+
if (trimmedChunkText.length === 0) {
|
4140
|
+
break;
|
4141
|
+
}
|
4142
|
+
inWhitespacePrefix = false;
|
4143
|
+
chunkBuffer += trimmedChunkText;
|
4144
|
+
const split = splitOnLastWhitespace(chunkBuffer);
|
4145
|
+
if (split != null) {
|
4146
|
+
chunkBuffer = split.suffix;
|
4147
|
+
await publishTextChunk({
|
4148
|
+
controller,
|
4149
|
+
chunk: {
|
4150
|
+
type: "text-delta",
|
4151
|
+
textDelta: split.prefix + split.whitespace
|
4152
|
+
}
|
4153
|
+
});
|
4151
4154
|
}
|
4155
|
+
} else {
|
4156
|
+
await publishTextChunk({ controller, chunk });
|
4157
|
+
}
|
4158
|
+
break;
|
4159
|
+
}
|
4160
|
+
case "tool-call": {
|
4161
|
+
controller.enqueue(chunk);
|
4162
|
+
stepToolCalls.push(chunk);
|
4163
|
+
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4164
|
+
break;
|
4165
|
+
}
|
4166
|
+
case "tool-result": {
|
4167
|
+
controller.enqueue(chunk);
|
4168
|
+
stepToolResults.push(chunk);
|
4169
|
+
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4170
|
+
break;
|
4171
|
+
}
|
4172
|
+
case "response-metadata": {
|
4173
|
+
stepResponse = {
|
4174
|
+
id: (_a11 = chunk.id) != null ? _a11 : stepResponse.id,
|
4175
|
+
timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
|
4176
|
+
modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
|
4177
|
+
};
|
4178
|
+
break;
|
4179
|
+
}
|
4180
|
+
case "finish": {
|
4181
|
+
stepUsage = chunk.usage;
|
4182
|
+
stepFinishReason = chunk.finishReason;
|
4183
|
+
stepProviderMetadata = chunk.experimental_providerMetadata;
|
4184
|
+
stepLogProbs = chunk.logprobs;
|
4185
|
+
const msToFinish = now2() - startTimestampMs;
|
4186
|
+
doStreamSpan.addEvent("ai.stream.finish");
|
4187
|
+
doStreamSpan.setAttributes({
|
4188
|
+
"ai.response.msToFinish": msToFinish,
|
4189
|
+
"ai.response.avgCompletionTokensPerSecond": 1e3 * stepUsage.completionTokens / msToFinish
|
4152
4190
|
});
|
4191
|
+
break;
|
4153
4192
|
}
|
4154
|
-
|
4155
|
-
|
4156
|
-
|
4157
|
-
|
4158
|
-
|
4159
|
-
case "tool-call": {
|
4160
|
-
controller.enqueue(chunk);
|
4161
|
-
stepToolCalls.push(chunk);
|
4162
|
-
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4163
|
-
break;
|
4164
|
-
}
|
4165
|
-
case "tool-result": {
|
4166
|
-
controller.enqueue(chunk);
|
4167
|
-
stepToolResults.push(chunk);
|
4168
|
-
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4169
|
-
break;
|
4170
|
-
}
|
4171
|
-
case "response-metadata": {
|
4172
|
-
stepResponse = {
|
4173
|
-
id: (_a11 = chunk.id) != null ? _a11 : stepResponse.id,
|
4174
|
-
timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
|
4175
|
-
modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
|
4176
|
-
};
|
4177
|
-
break;
|
4178
|
-
}
|
4179
|
-
case "finish": {
|
4180
|
-
stepUsage = chunk.usage;
|
4181
|
-
stepFinishReason = chunk.finishReason;
|
4182
|
-
stepProviderMetadata = chunk.experimental_providerMetadata;
|
4183
|
-
stepLogProbs = chunk.logprobs;
|
4184
|
-
const msToFinish = now2() - startTimestamp;
|
4185
|
-
doStreamSpan2.addEvent("ai.stream.finish");
|
4186
|
-
doStreamSpan2.setAttributes({
|
4187
|
-
"ai.response.msToFinish": msToFinish,
|
4188
|
-
"ai.response.avgCompletionTokensPerSecond": 1e3 * stepUsage.completionTokens / msToFinish
|
4189
|
-
});
|
4190
|
-
break;
|
4191
|
-
}
|
4192
|
-
case "tool-call-streaming-start":
|
4193
|
-
case "tool-call-delta": {
|
4194
|
-
controller.enqueue(chunk);
|
4195
|
-
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4196
|
-
break;
|
4197
|
-
}
|
4198
|
-
case "error": {
|
4199
|
-
controller.enqueue(chunk);
|
4200
|
-
stepFinishReason = "error";
|
4201
|
-
break;
|
4202
|
-
}
|
4203
|
-
default: {
|
4204
|
-
const exhaustiveCheck = chunkType;
|
4205
|
-
throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
|
4206
|
-
}
|
4207
|
-
}
|
4208
|
-
},
|
4209
|
-
// invoke onFinish callback and resolve toolResults promise when the stream is about to close:
|
4210
|
-
async flush(controller) {
|
4211
|
-
var _a11, _b;
|
4212
|
-
const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
|
4213
|
-
let nextStepType = "done";
|
4214
|
-
if (currentStep + 1 < maxSteps) {
|
4215
|
-
if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
|
4216
|
-
stepToolCalls.length === 0) {
|
4217
|
-
nextStepType = "continue";
|
4218
|
-
} else if (
|
4219
|
-
// there are tool calls:
|
4220
|
-
stepToolCalls.length > 0 && // all current tool calls have results:
|
4221
|
-
stepToolResults.length === stepToolCalls.length
|
4222
|
-
) {
|
4223
|
-
nextStepType = "tool-result";
|
4224
|
-
}
|
4225
|
-
}
|
4226
|
-
if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
|
4227
|
-
stepType === "continue" && !chunkTextPublished)) {
|
4228
|
-
await publishTextChunk({
|
4229
|
-
controller,
|
4230
|
-
chunk: {
|
4231
|
-
type: "text-delta",
|
4232
|
-
textDelta: chunkBuffer
|
4233
|
-
}
|
4234
|
-
});
|
4235
|
-
chunkBuffer = "";
|
4236
|
-
}
|
4237
|
-
try {
|
4238
|
-
doStreamSpan2.setAttributes(
|
4239
|
-
selectTelemetryAttributes({
|
4240
|
-
telemetry,
|
4241
|
-
attributes: {
|
4242
|
-
"ai.response.finishReason": stepFinishReason,
|
4243
|
-
"ai.response.text": { output: () => stepText },
|
4244
|
-
"ai.response.toolCalls": {
|
4245
|
-
output: () => stepToolCallsJson
|
4246
|
-
},
|
4247
|
-
"ai.response.id": stepResponse.id,
|
4248
|
-
"ai.response.model": stepResponse.modelId,
|
4249
|
-
"ai.response.timestamp": stepResponse.timestamp.toISOString(),
|
4250
|
-
"ai.usage.promptTokens": stepUsage.promptTokens,
|
4251
|
-
"ai.usage.completionTokens": stepUsage.completionTokens,
|
4252
|
-
// standardized gen-ai llm span attributes:
|
4253
|
-
"gen_ai.response.finish_reasons": [stepFinishReason],
|
4254
|
-
"gen_ai.response.id": stepResponse.id,
|
4255
|
-
"gen_ai.response.model": stepResponse.modelId,
|
4256
|
-
"gen_ai.usage.input_tokens": stepUsage.promptTokens,
|
4257
|
-
"gen_ai.usage.output_tokens": stepUsage.completionTokens
|
4193
|
+
case "tool-call-streaming-start":
|
4194
|
+
case "tool-call-delta": {
|
4195
|
+
controller.enqueue(chunk);
|
4196
|
+
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
4197
|
+
break;
|
4258
4198
|
}
|
4259
|
-
|
4260
|
-
|
4261
|
-
|
4262
|
-
|
4263
|
-
|
4264
|
-
|
4265
|
-
|
4266
|
-
|
4267
|
-
|
4268
|
-
|
4269
|
-
experimental_providerMetadata: stepProviderMetadata,
|
4270
|
-
logprobs: stepLogProbs,
|
4271
|
-
response: {
|
4272
|
-
...stepResponse
|
4199
|
+
case "error": {
|
4200
|
+
controller.enqueue(chunk);
|
4201
|
+
stepFinishReason = "error";
|
4202
|
+
break;
|
4203
|
+
}
|
4204
|
+
default: {
|
4205
|
+
const exhaustiveCheck = chunkType;
|
4206
|
+
throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
|
4207
|
+
}
|
4208
|
+
}
|
4273
4209
|
},
|
4274
|
-
|
4275
|
-
|
4276
|
-
|
4277
|
-
|
4278
|
-
|
4279
|
-
|
4280
|
-
|
4281
|
-
|
4282
|
-
|
4283
|
-
|
4210
|
+
// invoke onFinish callback and resolve toolResults promise when the stream is about to close:
|
4211
|
+
async flush(controller) {
|
4212
|
+
const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
|
4213
|
+
let nextStepType = "done";
|
4214
|
+
if (currentStep + 1 < maxSteps) {
|
4215
|
+
if (continueSteps && stepFinishReason === "length" && // only use continue when there are no tool calls:
|
4216
|
+
stepToolCalls.length === 0) {
|
4217
|
+
nextStepType = "continue";
|
4218
|
+
} else if (
|
4219
|
+
// there are tool calls:
|
4220
|
+
stepToolCalls.length > 0 && // all current tool calls have results:
|
4221
|
+
stepToolResults.length === stepToolCalls.length
|
4222
|
+
) {
|
4223
|
+
nextStepType = "tool-result";
|
4224
|
+
}
|
4225
|
+
}
|
4226
|
+
if (continueSteps && chunkBuffer.length > 0 && (nextStepType !== "continue" || // when the next step is a regular step, publish the buffer
|
4227
|
+
stepType === "continue" && !chunkTextPublished)) {
|
4228
|
+
await publishTextChunk({
|
4229
|
+
controller,
|
4230
|
+
chunk: {
|
4231
|
+
type: "text-delta",
|
4232
|
+
textDelta: chunkBuffer
|
4233
|
+
}
|
4234
|
+
});
|
4235
|
+
chunkBuffer = "";
|
4236
|
+
}
|
4237
|
+
try {
|
4238
|
+
doStreamSpan.setAttributes(
|
4239
|
+
selectTelemetryAttributes({
|
4240
|
+
telemetry,
|
4241
|
+
attributes: {
|
4242
|
+
"ai.response.finishReason": stepFinishReason,
|
4243
|
+
"ai.response.text": { output: () => stepText },
|
4244
|
+
"ai.response.toolCalls": {
|
4245
|
+
output: () => stepToolCallsJson
|
4246
|
+
},
|
4247
|
+
"ai.response.id": stepResponse.id,
|
4248
|
+
"ai.response.model": stepResponse.modelId,
|
4249
|
+
"ai.response.timestamp": stepResponse.timestamp.toISOString(),
|
4250
|
+
"ai.usage.promptTokens": stepUsage.promptTokens,
|
4251
|
+
"ai.usage.completionTokens": stepUsage.completionTokens,
|
4252
|
+
// standardized gen-ai llm span attributes:
|
4253
|
+
"gen_ai.response.finish_reasons": [stepFinishReason],
|
4254
|
+
"gen_ai.response.id": stepResponse.id,
|
4255
|
+
"gen_ai.response.model": stepResponse.modelId,
|
4256
|
+
"gen_ai.usage.input_tokens": stepUsage.promptTokens,
|
4257
|
+
"gen_ai.usage.output_tokens": stepUsage.completionTokens
|
4258
|
+
}
|
4259
|
+
})
|
4260
|
+
);
|
4261
|
+
} catch (error) {
|
4262
|
+
} finally {
|
4263
|
+
doStreamSpan.end();
|
4264
|
+
}
|
4265
|
+
controller.enqueue({
|
4266
|
+
type: "step-finish",
|
4267
|
+
finishReason: stepFinishReason,
|
4268
|
+
usage: stepUsage,
|
4269
|
+
experimental_providerMetadata: stepProviderMetadata,
|
4270
|
+
logprobs: stepLogProbs,
|
4271
|
+
response: {
|
4272
|
+
...stepResponse
|
4273
|
+
},
|
4274
|
+
isContinued: nextStepType === "continue"
|
4284
4275
|
});
|
4285
|
-
|
4286
|
-
|
4287
|
-
|
4288
|
-
|
4276
|
+
if (stepType === "continue") {
|
4277
|
+
const lastMessage = responseMessages[responseMessages.length - 1];
|
4278
|
+
if (typeof lastMessage.content === "string") {
|
4279
|
+
lastMessage.content += stepText;
|
4280
|
+
} else {
|
4281
|
+
lastMessage.content.push({
|
4282
|
+
text: stepText,
|
4283
|
+
type: "text"
|
4284
|
+
});
|
4285
|
+
}
|
4286
|
+
} else {
|
4287
|
+
responseMessages.push(
|
4288
|
+
...toResponseMessages({
|
4289
|
+
text: stepText,
|
4290
|
+
tools: tools != null ? tools : {},
|
4291
|
+
toolCalls: stepToolCalls,
|
4292
|
+
toolResults: stepToolResults
|
4293
|
+
})
|
4294
|
+
);
|
4295
|
+
}
|
4296
|
+
const currentStepResult = {
|
4297
|
+
stepType,
|
4289
4298
|
text: stepText,
|
4290
|
-
tools: tools != null ? tools : {},
|
4291
4299
|
toolCalls: stepToolCalls,
|
4292
|
-
toolResults: stepToolResults
|
4293
|
-
|
4294
|
-
|
4295
|
-
|
4296
|
-
|
4297
|
-
|
4298
|
-
|
4299
|
-
|
4300
|
-
|
4301
|
-
|
4302
|
-
|
4303
|
-
|
4304
|
-
|
4305
|
-
|
4306
|
-
|
4307
|
-
|
4308
|
-
|
4309
|
-
|
4310
|
-
|
4311
|
-
|
4312
|
-
|
4313
|
-
|
4314
|
-
|
4315
|
-
|
4316
|
-
|
4317
|
-
|
4318
|
-
|
4319
|
-
|
4320
|
-
|
4321
|
-
|
4322
|
-
|
4323
|
-
|
4324
|
-
result,
|
4325
|
-
doStreamSpan: doStreamSpan3,
|
4326
|
-
startTimestampMs: startTimestamp2
|
4327
|
-
} = await startStep({ responseMessages });
|
4328
|
-
self.rawWarnings = result.warnings;
|
4329
|
-
self.rawResponse = result.rawResponse;
|
4330
|
-
addStepStream({
|
4331
|
-
stream: result.stream,
|
4332
|
-
startTimestamp: startTimestamp2,
|
4333
|
-
doStreamSpan: doStreamSpan3,
|
4334
|
-
currentStep: currentStep + 1,
|
4335
|
-
responseMessages,
|
4336
|
-
usage: combinedUsage,
|
4337
|
-
stepType: nextStepType,
|
4338
|
-
previousStepText: fullStepText,
|
4339
|
-
stepRequest: result.request,
|
4340
|
-
hasLeadingWhitespace: hasWhitespaceSuffix
|
4341
|
-
});
|
4342
|
-
return;
|
4343
|
-
}
|
4344
|
-
try {
|
4345
|
-
controller.enqueue({
|
4346
|
-
type: "finish",
|
4347
|
-
finishReason: stepFinishReason,
|
4348
|
-
usage: combinedUsage,
|
4349
|
-
experimental_providerMetadata: stepProviderMetadata,
|
4350
|
-
logprobs: stepLogProbs,
|
4351
|
-
response: {
|
4352
|
-
...stepResponse
|
4300
|
+
toolResults: stepToolResults,
|
4301
|
+
finishReason: stepFinishReason,
|
4302
|
+
usage: stepUsage,
|
4303
|
+
warnings,
|
4304
|
+
logprobs: stepLogProbs,
|
4305
|
+
request: stepRequest,
|
4306
|
+
response: {
|
4307
|
+
...stepResponse,
|
4308
|
+
headers: rawResponse == null ? void 0 : rawResponse.headers,
|
4309
|
+
// deep clone msgs to avoid mutating past messages in multi-step:
|
4310
|
+
messages: JSON.parse(JSON.stringify(responseMessages))
|
4311
|
+
},
|
4312
|
+
experimental_providerMetadata: stepProviderMetadata,
|
4313
|
+
isContinued: nextStepType === "continue"
|
4314
|
+
};
|
4315
|
+
stepResults.push(currentStepResult);
|
4316
|
+
await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
|
4317
|
+
const combinedUsage = {
|
4318
|
+
promptTokens: usage.promptTokens + stepUsage.promptTokens,
|
4319
|
+
completionTokens: usage.completionTokens + stepUsage.completionTokens,
|
4320
|
+
totalTokens: usage.totalTokens + stepUsage.totalTokens
|
4321
|
+
};
|
4322
|
+
if (nextStepType !== "done") {
|
4323
|
+
await streamStep({
|
4324
|
+
currentStep: currentStep + 1,
|
4325
|
+
responseMessages,
|
4326
|
+
usage: combinedUsage,
|
4327
|
+
stepType: nextStepType,
|
4328
|
+
previousStepText: fullStepText,
|
4329
|
+
hasLeadingWhitespace: hasWhitespaceSuffix
|
4330
|
+
});
|
4331
|
+
return;
|
4353
4332
|
}
|
4354
|
-
|
4355
|
-
|
4356
|
-
|
4357
|
-
|
4358
|
-
|
4359
|
-
|
4360
|
-
|
4361
|
-
|
4362
|
-
|
4363
|
-
|
4333
|
+
try {
|
4334
|
+
controller.enqueue({
|
4335
|
+
type: "finish",
|
4336
|
+
finishReason: stepFinishReason,
|
4337
|
+
usage: combinedUsage,
|
4338
|
+
experimental_providerMetadata: stepProviderMetadata,
|
4339
|
+
logprobs: stepLogProbs,
|
4340
|
+
response: {
|
4341
|
+
...stepResponse
|
4342
|
+
}
|
4343
|
+
});
|
4344
|
+
self.stitchableStream.close();
|
4345
|
+
rootSpan.setAttributes(
|
4346
|
+
selectTelemetryAttributes({
|
4347
|
+
telemetry,
|
4348
|
+
attributes: {
|
4349
|
+
"ai.response.finishReason": stepFinishReason,
|
4350
|
+
"ai.response.text": { output: () => fullStepText },
|
4351
|
+
"ai.response.toolCalls": {
|
4352
|
+
output: () => stepToolCallsJson
|
4353
|
+
},
|
4354
|
+
"ai.usage.promptTokens": combinedUsage.promptTokens,
|
4355
|
+
"ai.usage.completionTokens": combinedUsage.completionTokens
|
4356
|
+
}
|
4357
|
+
})
|
4358
|
+
);
|
4359
|
+
self.usagePromise.resolve(combinedUsage);
|
4360
|
+
self.finishReasonPromise.resolve(stepFinishReason);
|
4361
|
+
self.textPromise.resolve(fullStepText);
|
4362
|
+
self.toolCallsPromise.resolve(stepToolCalls);
|
4363
|
+
self.providerMetadataPromise.resolve(stepProviderMetadata);
|
4364
|
+
self.toolResultsPromise.resolve(stepToolResults);
|
4365
|
+
self.requestPromise.resolve(stepRequest);
|
4366
|
+
self.responsePromise.resolve({
|
4367
|
+
...stepResponse,
|
4368
|
+
headers: rawResponse == null ? void 0 : rawResponse.headers,
|
4369
|
+
messages: responseMessages
|
4370
|
+
});
|
4371
|
+
self.stepsPromise.resolve(stepResults);
|
4372
|
+
self.warningsPromise.resolve(warnings != null ? warnings : []);
|
4373
|
+
await (onFinish == null ? void 0 : onFinish({
|
4374
|
+
finishReason: stepFinishReason,
|
4375
|
+
logprobs: stepLogProbs,
|
4376
|
+
usage: combinedUsage,
|
4377
|
+
text: fullStepText,
|
4378
|
+
toolCalls: stepToolCalls,
|
4379
|
+
// The tool results are inferred as a never[] type, because they are
|
4380
|
+
// optional and the execute method with an inferred result type is
|
4381
|
+
// optional as well. Therefore we need to cast the toolResults to any.
|
4382
|
+
// The type exposed to the users will be correctly inferred.
|
4383
|
+
toolResults: stepToolResults,
|
4384
|
+
request: stepRequest,
|
4385
|
+
response: {
|
4386
|
+
...stepResponse,
|
4387
|
+
headers: rawResponse == null ? void 0 : rawResponse.headers,
|
4388
|
+
messages: responseMessages
|
4364
4389
|
},
|
4365
|
-
|
4366
|
-
|
4367
|
-
|
4368
|
-
|
4369
|
-
|
4370
|
-
|
4371
|
-
|
4372
|
-
|
4373
|
-
|
4374
|
-
|
4375
|
-
|
4376
|
-
|
4377
|
-
|
4378
|
-
|
4379
|
-
|
4380
|
-
|
4381
|
-
|
4382
|
-
|
4383
|
-
|
4384
|
-
|
4385
|
-
|
4386
|
-
|
4387
|
-
|
4388
|
-
|
4389
|
-
|
4390
|
-
|
4391
|
-
|
4392
|
-
|
4393
|
-
|
4394
|
-
|
4395
|
-
|
4396
|
-
|
4397
|
-
|
4398
|
-
|
4399
|
-
messages: responseMessages
|
4400
|
-
},
|
4401
|
-
warnings,
|
4402
|
-
experimental_providerMetadata: stepProviderMetadata,
|
4403
|
-
steps: stepResults
|
4404
|
-
}));
|
4405
|
-
} catch (error) {
|
4406
|
-
controller.error(error);
|
4407
|
-
} finally {
|
4408
|
-
rootSpan.end();
|
4409
|
-
}
|
4410
|
-
}
|
4411
|
-
})
|
4412
|
-
)
|
4390
|
+
warnings,
|
4391
|
+
experimental_providerMetadata: stepProviderMetadata,
|
4392
|
+
steps: stepResults
|
4393
|
+
}));
|
4394
|
+
} catch (error) {
|
4395
|
+
controller.error(error);
|
4396
|
+
} finally {
|
4397
|
+
rootSpan.end();
|
4398
|
+
}
|
4399
|
+
}
|
4400
|
+
})
|
4401
|
+
)
|
4402
|
+
);
|
4403
|
+
}
|
4404
|
+
await streamStep({
|
4405
|
+
currentStep: 0,
|
4406
|
+
responseMessages: [],
|
4407
|
+
usage: {
|
4408
|
+
promptTokens: 0,
|
4409
|
+
completionTokens: 0,
|
4410
|
+
totalTokens: 0
|
4411
|
+
},
|
4412
|
+
previousStepText: "",
|
4413
|
+
stepType: "initial",
|
4414
|
+
hasLeadingWhitespace: false
|
4415
|
+
});
|
4416
|
+
}
|
4417
|
+
}).catch((error) => {
|
4418
|
+
self.stitchableStream.addStream(
|
4419
|
+
new ReadableStream({
|
4420
|
+
start(controller) {
|
4421
|
+
controller.error(error);
|
4422
|
+
}
|
4423
|
+
})
|
4413
4424
|
);
|
4414
|
-
|
4415
|
-
addStepStream({
|
4416
|
-
stream,
|
4417
|
-
startTimestamp: startTimestampMs,
|
4418
|
-
doStreamSpan,
|
4419
|
-
currentStep: 0,
|
4420
|
-
responseMessages: [],
|
4421
|
-
usage: void 0,
|
4422
|
-
stepType: "initial",
|
4423
|
-
stepRequest: request,
|
4424
|
-
hasLeadingWhitespace: false
|
4425
|
+
self.stitchableStream.close();
|
4425
4426
|
});
|
4426
4427
|
}
|
4428
|
+
get warnings() {
|
4429
|
+
return this.warningsPromise.value;
|
4430
|
+
}
|
4431
|
+
get usage() {
|
4432
|
+
return this.usagePromise.value;
|
4433
|
+
}
|
4434
|
+
get finishReason() {
|
4435
|
+
return this.finishReasonPromise.value;
|
4436
|
+
}
|
4437
|
+
get experimental_providerMetadata() {
|
4438
|
+
return this.providerMetadataPromise.value;
|
4439
|
+
}
|
4440
|
+
get text() {
|
4441
|
+
return this.textPromise.value;
|
4442
|
+
}
|
4443
|
+
get toolCalls() {
|
4444
|
+
return this.toolCallsPromise.value;
|
4445
|
+
}
|
4446
|
+
get toolResults() {
|
4447
|
+
return this.toolResultsPromise.value;
|
4448
|
+
}
|
4449
|
+
get request() {
|
4450
|
+
return this.requestPromise.value;
|
4451
|
+
}
|
4452
|
+
get response() {
|
4453
|
+
return this.responsePromise.value;
|
4454
|
+
}
|
4455
|
+
get steps() {
|
4456
|
+
return this.stepsPromise.value;
|
4457
|
+
}
|
4427
4458
|
/**
|
4428
4459
|
Split out a new stream from the original stream.
|
4429
4460
|
The original stream is replaced to allow for further splitting,
|
@@ -4433,8 +4464,8 @@ var DefaultStreamTextResult = class {
|
|
4433
4464
|
However, the LLM results are expected to be small enough to not cause issues.
|
4434
4465
|
*/
|
4435
4466
|
teeStream() {
|
4436
|
-
const [stream1, stream2] = this.
|
4437
|
-
this.
|
4467
|
+
const [stream1, stream2] = this.stitchableStream.stream.tee();
|
4468
|
+
this.stitchableStream.stream = stream2;
|
4438
4469
|
return stream1;
|
4439
4470
|
}
|
4440
4471
|
get textStream() {
|
@@ -4474,12 +4505,12 @@ var DefaultStreamTextResult = class {
|
|
4474
4505
|
const chunkType = chunk.type;
|
4475
4506
|
switch (chunkType) {
|
4476
4507
|
case "text-delta": {
|
4477
|
-
controller.enqueue(
|
4508
|
+
controller.enqueue(formatDataStreamPart("text", chunk.textDelta));
|
4478
4509
|
break;
|
4479
4510
|
}
|
4480
4511
|
case "tool-call-streaming-start": {
|
4481
4512
|
controller.enqueue(
|
4482
|
-
|
4513
|
+
formatDataStreamPart("tool_call_streaming_start", {
|
4483
4514
|
toolCallId: chunk.toolCallId,
|
4484
4515
|
toolName: chunk.toolName
|
4485
4516
|
})
|
@@ -4488,7 +4519,7 @@ var DefaultStreamTextResult = class {
|
|
4488
4519
|
}
|
4489
4520
|
case "tool-call-delta": {
|
4490
4521
|
controller.enqueue(
|
4491
|
-
|
4522
|
+
formatDataStreamPart("tool_call_delta", {
|
4492
4523
|
toolCallId: chunk.toolCallId,
|
4493
4524
|
argsTextDelta: chunk.argsTextDelta
|
4494
4525
|
})
|
@@ -4497,7 +4528,7 @@ var DefaultStreamTextResult = class {
|
|
4497
4528
|
}
|
4498
4529
|
case "tool-call": {
|
4499
4530
|
controller.enqueue(
|
4500
|
-
|
4531
|
+
formatDataStreamPart("tool_call", {
|
4501
4532
|
toolCallId: chunk.toolCallId,
|
4502
4533
|
toolName: chunk.toolName,
|
4503
4534
|
args: chunk.args
|
@@ -4507,7 +4538,7 @@ var DefaultStreamTextResult = class {
|
|
4507
4538
|
}
|
4508
4539
|
case "tool-result": {
|
4509
4540
|
controller.enqueue(
|
4510
|
-
|
4541
|
+
formatDataStreamPart("tool_result", {
|
4511
4542
|
toolCallId: chunk.toolCallId,
|
4512
4543
|
result: chunk.result
|
4513
4544
|
})
|
@@ -4516,13 +4547,13 @@ var DefaultStreamTextResult = class {
|
|
4516
4547
|
}
|
4517
4548
|
case "error": {
|
4518
4549
|
controller.enqueue(
|
4519
|
-
|
4550
|
+
formatDataStreamPart("error", getErrorMessage3(chunk.error))
|
4520
4551
|
);
|
4521
4552
|
break;
|
4522
4553
|
}
|
4523
4554
|
case "step-finish": {
|
4524
4555
|
controller.enqueue(
|
4525
|
-
|
4556
|
+
formatDataStreamPart("finish_step", {
|
4526
4557
|
finishReason: chunk.finishReason,
|
4527
4558
|
usage: sendUsage ? {
|
4528
4559
|
promptTokens: chunk.usage.promptTokens,
|
@@ -4535,7 +4566,7 @@ var DefaultStreamTextResult = class {
|
|
4535
4566
|
}
|
4536
4567
|
case "finish": {
|
4537
4568
|
controller.enqueue(
|
4538
|
-
|
4569
|
+
formatDataStreamPart("finish_message", {
|
4539
4570
|
finishReason: chunk.finishReason,
|
4540
4571
|
usage: sendUsage ? {
|
4541
4572
|
promptTokens: chunk.usage.promptTokens,
|
@@ -4805,7 +4836,7 @@ function magnitude(vector) {
|
|
4805
4836
|
|
4806
4837
|
// streams/assistant-response.ts
|
4807
4838
|
import {
|
4808
|
-
|
4839
|
+
formatAssistantStreamPart
|
4809
4840
|
} from "@ai-sdk/ui-utils";
|
4810
4841
|
function AssistantResponse({ threadId, messageId }, process2) {
|
4811
4842
|
const stream = new ReadableStream({
|
@@ -4814,17 +4845,21 @@ function AssistantResponse({ threadId, messageId }, process2) {
|
|
4814
4845
|
const textEncoder = new TextEncoder();
|
4815
4846
|
const sendMessage = (message) => {
|
4816
4847
|
controller.enqueue(
|
4817
|
-
textEncoder.encode(
|
4848
|
+
textEncoder.encode(
|
4849
|
+
formatAssistantStreamPart("assistant_message", message)
|
4850
|
+
)
|
4818
4851
|
);
|
4819
4852
|
};
|
4820
4853
|
const sendDataMessage = (message) => {
|
4821
4854
|
controller.enqueue(
|
4822
|
-
textEncoder.encode(
|
4855
|
+
textEncoder.encode(
|
4856
|
+
formatAssistantStreamPart("data_message", message)
|
4857
|
+
)
|
4823
4858
|
);
|
4824
4859
|
};
|
4825
4860
|
const sendError = (errorMessage) => {
|
4826
4861
|
controller.enqueue(
|
4827
|
-
textEncoder.encode(
|
4862
|
+
textEncoder.encode(formatAssistantStreamPart("error", errorMessage))
|
4828
4863
|
);
|
4829
4864
|
};
|
4830
4865
|
const forwardStream = async (stream2) => {
|
@@ -4835,7 +4870,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
|
|
4835
4870
|
case "thread.message.created": {
|
4836
4871
|
controller.enqueue(
|
4837
4872
|
textEncoder.encode(
|
4838
|
-
|
4873
|
+
formatAssistantStreamPart("assistant_message", {
|
4839
4874
|
id: value.data.id,
|
4840
4875
|
role: "assistant",
|
4841
4876
|
content: [{ type: "text", text: { value: "" } }]
|
@@ -4849,7 +4884,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
|
|
4849
4884
|
if ((content == null ? void 0 : content.type) === "text" && ((_b = content.text) == null ? void 0 : _b.value) != null) {
|
4850
4885
|
controller.enqueue(
|
4851
4886
|
textEncoder.encode(
|
4852
|
-
|
4887
|
+
formatAssistantStreamPart("text", content.text.value)
|
4853
4888
|
)
|
4854
4889
|
);
|
4855
4890
|
}
|
@@ -4866,7 +4901,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
|
|
4866
4901
|
};
|
4867
4902
|
controller.enqueue(
|
4868
4903
|
textEncoder.encode(
|
4869
|
-
|
4904
|
+
formatAssistantStreamPart("assistant_control_data", {
|
4870
4905
|
threadId,
|
4871
4906
|
messageId
|
4872
4907
|
})
|
@@ -4931,7 +4966,7 @@ function createCallbacksTransformer(callbacks = {}) {
|
|
4931
4966
|
}
|
4932
4967
|
|
4933
4968
|
// streams/stream-data.ts
|
4934
|
-
import {
|
4969
|
+
import { formatDataStreamPart as formatDataStreamPart2 } from "@ai-sdk/ui-utils";
|
4935
4970
|
|
4936
4971
|
// util/constants.ts
|
4937
4972
|
var HANGING_STREAM_WARNING_TIME_MS = 15 * 1e3;
|
@@ -4983,7 +5018,7 @@ var StreamData = class {
|
|
4983
5018
|
throw new Error("Stream controller is not initialized.");
|
4984
5019
|
}
|
4985
5020
|
this.controller.enqueue(
|
4986
|
-
this.encoder.encode(
|
5021
|
+
this.encoder.encode(formatDataStreamPart2("data", [value]))
|
4987
5022
|
);
|
4988
5023
|
}
|
4989
5024
|
appendMessageAnnotation(value) {
|
@@ -4994,7 +5029,7 @@ var StreamData = class {
|
|
4994
5029
|
throw new Error("Stream controller is not initialized.");
|
4995
5030
|
}
|
4996
5031
|
this.controller.enqueue(
|
4997
|
-
this.encoder.encode(
|
5032
|
+
this.encoder.encode(formatDataStreamPart2("message_annotations", [value]))
|
4998
5033
|
);
|
4999
5034
|
}
|
5000
5035
|
};
|
@@ -5004,7 +5039,7 @@ function createStreamDataTransformer() {
|
|
5004
5039
|
return new TransformStream({
|
5005
5040
|
transform: async (chunk, controller) => {
|
5006
5041
|
const message = decoder.decode(chunk);
|
5007
|
-
controller.enqueue(encoder.encode(
|
5042
|
+
controller.enqueue(encoder.encode(formatDataStreamPart2("text", message)));
|
5008
5043
|
}
|
5009
5044
|
});
|
5010
5045
|
}
|
@@ -5137,14 +5172,16 @@ export {
|
|
5137
5172
|
experimental_createProviderRegistry,
|
5138
5173
|
experimental_customProvider,
|
5139
5174
|
experimental_wrapLanguageModel,
|
5140
|
-
|
5175
|
+
formatAssistantStreamPart2 as formatAssistantStreamPart,
|
5176
|
+
formatDataStreamPart3 as formatDataStreamPart,
|
5141
5177
|
generateId2 as generateId,
|
5142
5178
|
generateObject,
|
5143
5179
|
generateText,
|
5144
5180
|
jsonSchema,
|
5145
|
-
|
5146
|
-
|
5147
|
-
|
5181
|
+
parseAssistantStreamPart,
|
5182
|
+
parseDataStreamPart,
|
5183
|
+
processDataStream,
|
5184
|
+
processTextStream,
|
5148
5185
|
streamObject,
|
5149
5186
|
streamText,
|
5150
5187
|
tool
|