ai 6.0.0-beta.137 → 6.0.0-beta.139
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +19 -0
- package/README.md +4 -4
- package/dist/index.d.mts +61 -3
- package/dist/index.d.ts +61 -3
- package/dist/index.js +140 -39
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +140 -39
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +66 -2
- package/dist/internal/index.d.ts +66 -2
- package/dist/internal/index.js +30 -1
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +29 -1
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +4 -4
package/dist/index.mjs
CHANGED
|
@@ -495,12 +495,65 @@ function asLanguageModelV3(model) {
|
|
|
495
495
|
});
|
|
496
496
|
return new Proxy(model, {
|
|
497
497
|
get(target, prop) {
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
498
|
+
switch (prop) {
|
|
499
|
+
case "specificationVersion":
|
|
500
|
+
return "v3";
|
|
501
|
+
case "doGenerate":
|
|
502
|
+
return async (...args) => {
|
|
503
|
+
const result = await target.doGenerate(...args);
|
|
504
|
+
return {
|
|
505
|
+
...result,
|
|
506
|
+
usage: convertV2UsageToV3(result.usage)
|
|
507
|
+
};
|
|
508
|
+
};
|
|
509
|
+
case "doStream":
|
|
510
|
+
return async (...args) => {
|
|
511
|
+
const result = await target.doStream(...args);
|
|
512
|
+
return {
|
|
513
|
+
...result,
|
|
514
|
+
stream: convertV2StreamToV3(result.stream)
|
|
515
|
+
};
|
|
516
|
+
};
|
|
517
|
+
default:
|
|
518
|
+
return target[prop];
|
|
519
|
+
}
|
|
501
520
|
}
|
|
502
521
|
});
|
|
503
522
|
}
|
|
523
|
+
function convertV2StreamToV3(stream) {
|
|
524
|
+
return stream.pipeThrough(
|
|
525
|
+
new TransformStream({
|
|
526
|
+
transform(chunk, controller) {
|
|
527
|
+
switch (chunk.type) {
|
|
528
|
+
case "finish":
|
|
529
|
+
controller.enqueue({
|
|
530
|
+
...chunk,
|
|
531
|
+
usage: convertV2UsageToV3(chunk.usage)
|
|
532
|
+
});
|
|
533
|
+
break;
|
|
534
|
+
default:
|
|
535
|
+
controller.enqueue(chunk);
|
|
536
|
+
break;
|
|
537
|
+
}
|
|
538
|
+
}
|
|
539
|
+
})
|
|
540
|
+
);
|
|
541
|
+
}
|
|
542
|
+
function convertV2UsageToV3(usage) {
|
|
543
|
+
return {
|
|
544
|
+
inputTokens: {
|
|
545
|
+
total: usage.inputTokens,
|
|
546
|
+
noCache: void 0,
|
|
547
|
+
cacheRead: usage.cachedInputTokens,
|
|
548
|
+
cacheWrite: void 0
|
|
549
|
+
},
|
|
550
|
+
outputTokens: {
|
|
551
|
+
total: usage.outputTokens,
|
|
552
|
+
text: void 0,
|
|
553
|
+
reasoning: usage.reasoningTokens
|
|
554
|
+
}
|
|
555
|
+
};
|
|
556
|
+
}
|
|
504
557
|
|
|
505
558
|
// src/model/as-speech-model-v3.ts
|
|
506
559
|
function asSpeechModelV3(model) {
|
|
@@ -813,7 +866,7 @@ import {
|
|
|
813
866
|
} from "@ai-sdk/provider-utils";
|
|
814
867
|
|
|
815
868
|
// src/version.ts
|
|
816
|
-
var VERSION = true ? "6.0.0-beta.
|
|
869
|
+
var VERSION = true ? "6.0.0-beta.139" : "0.0.0-test";
|
|
817
870
|
|
|
818
871
|
// src/util/download/download.ts
|
|
819
872
|
var download = async ({ url }) => {
|
|
@@ -1910,10 +1963,74 @@ function stringifyForTelemetry(prompt) {
|
|
|
1910
1963
|
}
|
|
1911
1964
|
|
|
1912
1965
|
// src/types/usage.ts
|
|
1966
|
+
function asLanguageModelUsage(usage) {
|
|
1967
|
+
return {
|
|
1968
|
+
inputTokens: usage.inputTokens.total,
|
|
1969
|
+
inputTokenDetails: {
|
|
1970
|
+
noCacheTokens: usage.inputTokens.noCache,
|
|
1971
|
+
cacheReadTokens: usage.inputTokens.cacheRead,
|
|
1972
|
+
cacheWriteTokens: usage.inputTokens.cacheWrite
|
|
1973
|
+
},
|
|
1974
|
+
outputTokens: usage.outputTokens.total,
|
|
1975
|
+
outputTokenDetails: {
|
|
1976
|
+
textTokens: usage.outputTokens.text,
|
|
1977
|
+
reasoningTokens: usage.outputTokens.reasoning
|
|
1978
|
+
},
|
|
1979
|
+
totalTokens: addTokenCounts(
|
|
1980
|
+
usage.inputTokens.total,
|
|
1981
|
+
usage.outputTokens.total
|
|
1982
|
+
),
|
|
1983
|
+
raw: usage.raw,
|
|
1984
|
+
reasoningTokens: usage.outputTokens.reasoning,
|
|
1985
|
+
cachedInputTokens: usage.inputTokens.cacheRead
|
|
1986
|
+
};
|
|
1987
|
+
}
|
|
1988
|
+
function createNullLanguageModelUsage() {
|
|
1989
|
+
return {
|
|
1990
|
+
inputTokens: void 0,
|
|
1991
|
+
inputTokenDetails: {
|
|
1992
|
+
noCacheTokens: void 0,
|
|
1993
|
+
cacheReadTokens: void 0,
|
|
1994
|
+
cacheWriteTokens: void 0
|
|
1995
|
+
},
|
|
1996
|
+
outputTokens: void 0,
|
|
1997
|
+
outputTokenDetails: {
|
|
1998
|
+
textTokens: void 0,
|
|
1999
|
+
reasoningTokens: void 0
|
|
2000
|
+
},
|
|
2001
|
+
totalTokens: void 0,
|
|
2002
|
+
raw: void 0
|
|
2003
|
+
};
|
|
2004
|
+
}
|
|
1913
2005
|
function addLanguageModelUsage(usage1, usage2) {
|
|
2006
|
+
var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j;
|
|
1914
2007
|
return {
|
|
1915
2008
|
inputTokens: addTokenCounts(usage1.inputTokens, usage2.inputTokens),
|
|
2009
|
+
inputTokenDetails: {
|
|
2010
|
+
noCacheTokens: addTokenCounts(
|
|
2011
|
+
(_a15 = usage1.inputTokenDetails) == null ? void 0 : _a15.noCacheTokens,
|
|
2012
|
+
(_b = usage2.inputTokenDetails) == null ? void 0 : _b.noCacheTokens
|
|
2013
|
+
),
|
|
2014
|
+
cacheReadTokens: addTokenCounts(
|
|
2015
|
+
(_c = usage1.inputTokenDetails) == null ? void 0 : _c.cacheReadTokens,
|
|
2016
|
+
(_d = usage2.inputTokenDetails) == null ? void 0 : _d.cacheReadTokens
|
|
2017
|
+
),
|
|
2018
|
+
cacheWriteTokens: addTokenCounts(
|
|
2019
|
+
(_e = usage1.inputTokenDetails) == null ? void 0 : _e.cacheWriteTokens,
|
|
2020
|
+
(_f = usage2.inputTokenDetails) == null ? void 0 : _f.cacheWriteTokens
|
|
2021
|
+
)
|
|
2022
|
+
},
|
|
1916
2023
|
outputTokens: addTokenCounts(usage1.outputTokens, usage2.outputTokens),
|
|
2024
|
+
outputTokenDetails: {
|
|
2025
|
+
textTokens: addTokenCounts(
|
|
2026
|
+
(_g = usage1.outputTokenDetails) == null ? void 0 : _g.textTokens,
|
|
2027
|
+
(_h = usage2.outputTokenDetails) == null ? void 0 : _h.textTokens
|
|
2028
|
+
),
|
|
2029
|
+
reasoningTokens: addTokenCounts(
|
|
2030
|
+
(_i = usage1.outputTokenDetails) == null ? void 0 : _i.reasoningTokens,
|
|
2031
|
+
(_j = usage2.outputTokenDetails) == null ? void 0 : _j.reasoningTokens
|
|
2032
|
+
)
|
|
2033
|
+
},
|
|
1917
2034
|
totalTokens: addTokenCounts(usage1.totalTokens, usage2.totalTokens),
|
|
1918
2035
|
reasoningTokens: addTokenCounts(
|
|
1919
2036
|
usage1.reasoningTokens,
|
|
@@ -3456,14 +3573,14 @@ async function generateText({
|
|
|
3456
3573
|
result.providerMetadata
|
|
3457
3574
|
),
|
|
3458
3575
|
// TODO rename telemetry attributes to inputTokens and outputTokens
|
|
3459
|
-
"ai.usage.promptTokens": result.usage.inputTokens,
|
|
3460
|
-
"ai.usage.completionTokens": result.usage.outputTokens,
|
|
3576
|
+
"ai.usage.promptTokens": result.usage.inputTokens.total,
|
|
3577
|
+
"ai.usage.completionTokens": result.usage.outputTokens.total,
|
|
3461
3578
|
// standardized gen-ai llm span attributes:
|
|
3462
3579
|
"gen_ai.response.finish_reasons": [result.finishReason],
|
|
3463
3580
|
"gen_ai.response.id": responseData.id,
|
|
3464
3581
|
"gen_ai.response.model": responseData.modelId,
|
|
3465
|
-
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
|
3466
|
-
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
|
3582
|
+
"gen_ai.usage.input_tokens": result.usage.inputTokens.total,
|
|
3583
|
+
"gen_ai.usage.output_tokens": result.usage.outputTokens.total
|
|
3467
3584
|
}
|
|
3468
3585
|
})
|
|
3469
3586
|
);
|
|
@@ -3563,7 +3680,7 @@ async function generateText({
|
|
|
3563
3680
|
const currentStepResult = new DefaultStepResult({
|
|
3564
3681
|
content: stepContent,
|
|
3565
3682
|
finishReason: currentModelResponse.finishReason,
|
|
3566
|
-
usage: currentModelResponse.usage,
|
|
3683
|
+
usage: asLanguageModelUsage(currentModelResponse.usage),
|
|
3567
3684
|
warnings: currentModelResponse.warnings,
|
|
3568
3685
|
providerMetadata: currentModelResponse.providerMetadata,
|
|
3569
3686
|
request: (_f = currentModelResponse.request) != null ? _f : {},
|
|
@@ -3604,8 +3721,8 @@ async function generateText({
|
|
|
3604
3721
|
currentModelResponse.providerMetadata
|
|
3605
3722
|
),
|
|
3606
3723
|
// TODO rename telemetry attributes to inputTokens and outputTokens
|
|
3607
|
-
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
|
|
3608
|
-
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens
|
|
3724
|
+
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens.total,
|
|
3725
|
+
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens.total
|
|
3609
3726
|
}
|
|
3610
3727
|
})
|
|
3611
3728
|
);
|
|
@@ -5051,7 +5168,7 @@ function runToolsTransformation({
|
|
|
5051
5168
|
finishChunk = {
|
|
5052
5169
|
type: "finish",
|
|
5053
5170
|
finishReason: chunk.finishReason,
|
|
5054
|
-
usage: chunk.usage,
|
|
5171
|
+
usage: asLanguageModelUsage(chunk.usage),
|
|
5055
5172
|
providerMetadata: chunk.providerMetadata
|
|
5056
5173
|
};
|
|
5057
5174
|
break;
|
|
@@ -5534,11 +5651,7 @@ var DefaultStreamTextResult = class {
|
|
|
5534
5651
|
return;
|
|
5535
5652
|
}
|
|
5536
5653
|
const finishReason = recordedFinishReason != null ? recordedFinishReason : "unknown";
|
|
5537
|
-
const totalUsage = recordedTotalUsage != null ? recordedTotalUsage :
|
|
5538
|
-
inputTokens: void 0,
|
|
5539
|
-
outputTokens: void 0,
|
|
5540
|
-
totalTokens: void 0
|
|
5541
|
-
};
|
|
5654
|
+
const totalUsage = recordedTotalUsage != null ? recordedTotalUsage : createNullLanguageModelUsage();
|
|
5542
5655
|
self._finishReason.resolve(finishReason);
|
|
5543
5656
|
self._totalUsage.resolve(totalUsage);
|
|
5544
5657
|
self._steps.resolve(recordedSteps);
|
|
@@ -5859,11 +5972,7 @@ var DefaultStreamTextResult = class {
|
|
|
5859
5972
|
let warnings;
|
|
5860
5973
|
const activeToolCallToolNames = {};
|
|
5861
5974
|
let stepFinishReason = "unknown";
|
|
5862
|
-
let stepUsage =
|
|
5863
|
-
inputTokens: void 0,
|
|
5864
|
-
outputTokens: void 0,
|
|
5865
|
-
totalTokens: void 0
|
|
5866
|
-
};
|
|
5975
|
+
let stepUsage = createNullLanguageModelUsage();
|
|
5867
5976
|
let stepProviderMetadata;
|
|
5868
5977
|
let stepFirstChunk = true;
|
|
5869
5978
|
let stepResponse = {
|
|
@@ -6129,11 +6238,7 @@ var DefaultStreamTextResult = class {
|
|
|
6129
6238
|
await streamStep({
|
|
6130
6239
|
currentStep: 0,
|
|
6131
6240
|
responseMessages: initialResponseMessages,
|
|
6132
|
-
usage:
|
|
6133
|
-
inputTokens: void 0,
|
|
6134
|
-
outputTokens: void 0,
|
|
6135
|
-
totalTokens: void 0
|
|
6136
|
-
}
|
|
6241
|
+
usage: createNullLanguageModelUsage()
|
|
6137
6242
|
});
|
|
6138
6243
|
}
|
|
6139
6244
|
}).catch((error) => {
|
|
@@ -8607,7 +8712,7 @@ async function generateObject(options) {
|
|
|
8607
8712
|
throw new NoObjectGeneratedError({
|
|
8608
8713
|
message: "No object generated: the model did not return a response.",
|
|
8609
8714
|
response: responseData,
|
|
8610
|
-
usage: result2.usage,
|
|
8715
|
+
usage: asLanguageModelUsage(result2.usage),
|
|
8611
8716
|
finishReason: result2.finishReason
|
|
8612
8717
|
});
|
|
8613
8718
|
}
|
|
@@ -8624,14 +8729,14 @@ async function generateObject(options) {
|
|
|
8624
8729
|
result2.providerMetadata
|
|
8625
8730
|
),
|
|
8626
8731
|
// TODO rename telemetry attributes to inputTokens and outputTokens
|
|
8627
|
-
"ai.usage.promptTokens": result2.usage.inputTokens,
|
|
8628
|
-
"ai.usage.completionTokens": result2.usage.outputTokens,
|
|
8732
|
+
"ai.usage.promptTokens": result2.usage.inputTokens.total,
|
|
8733
|
+
"ai.usage.completionTokens": result2.usage.outputTokens.total,
|
|
8629
8734
|
// standardized gen-ai llm span attributes:
|
|
8630
8735
|
"gen_ai.response.finish_reasons": [result2.finishReason],
|
|
8631
8736
|
"gen_ai.response.id": responseData.id,
|
|
8632
8737
|
"gen_ai.response.model": responseData.modelId,
|
|
8633
|
-
"gen_ai.usage.input_tokens": result2.usage.inputTokens,
|
|
8634
|
-
"gen_ai.usage.output_tokens": result2.usage.outputTokens
|
|
8738
|
+
"gen_ai.usage.input_tokens": result2.usage.inputTokens.total,
|
|
8739
|
+
"gen_ai.usage.output_tokens": result2.usage.outputTokens.total
|
|
8635
8740
|
}
|
|
8636
8741
|
})
|
|
8637
8742
|
);
|
|
@@ -8646,7 +8751,7 @@ async function generateObject(options) {
|
|
|
8646
8751
|
);
|
|
8647
8752
|
result = generateResult.objectText;
|
|
8648
8753
|
finishReason = generateResult.finishReason;
|
|
8649
|
-
usage = generateResult.usage;
|
|
8754
|
+
usage = asLanguageModelUsage(generateResult.usage);
|
|
8650
8755
|
warnings = generateResult.warnings;
|
|
8651
8756
|
resultProviderMetadata = generateResult.providerMetadata;
|
|
8652
8757
|
request = (_a15 = generateResult.request) != null ? _a15 : {};
|
|
@@ -9083,11 +9188,7 @@ var DefaultStreamObjectResult = class {
|
|
|
9083
9188
|
);
|
|
9084
9189
|
self._request.resolve(request != null ? request : {});
|
|
9085
9190
|
let warnings;
|
|
9086
|
-
let usage =
|
|
9087
|
-
inputTokens: void 0,
|
|
9088
|
-
outputTokens: void 0,
|
|
9089
|
-
totalTokens: void 0
|
|
9090
|
-
};
|
|
9191
|
+
let usage = createNullLanguageModelUsage();
|
|
9091
9192
|
let finishReason;
|
|
9092
9193
|
let providerMetadata;
|
|
9093
9194
|
let object2;
|
|
@@ -9167,7 +9268,7 @@ var DefaultStreamObjectResult = class {
|
|
|
9167
9268
|
controller.enqueue({ type: "text-delta", textDelta });
|
|
9168
9269
|
}
|
|
9169
9270
|
finishReason = chunk.finishReason;
|
|
9170
|
-
usage = chunk.usage;
|
|
9271
|
+
usage = asLanguageModelUsage(chunk.usage);
|
|
9171
9272
|
providerMetadata = chunk.providerMetadata;
|
|
9172
9273
|
controller.enqueue({
|
|
9173
9274
|
...chunk,
|