ai 7.0.0-beta.4 → 7.0.0-beta.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/dist/index.js +49 -15
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +49 -15
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +1 -1
- package/dist/internal/index.mjs +1 -1
- package/package.json +2 -2
- package/src/generate-text/generate-text.ts +43 -9
- package/src/generate-text/stream-text.ts +26 -6
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,18 @@
|
|
|
1
1
|
# ai
|
|
2
2
|
|
|
3
|
+
## 7.0.0-beta.6
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- Updated dependencies [c949e25]
|
|
8
|
+
- @ai-sdk/gateway@4.0.0-beta.3
|
|
9
|
+
|
|
10
|
+
## 7.0.0-beta.5
|
|
11
|
+
|
|
12
|
+
### Patch Changes
|
|
13
|
+
|
|
14
|
+
- ebd4da2: feat(ai): add missing usage attributes
|
|
15
|
+
|
|
3
16
|
## 7.0.0-beta.4
|
|
4
17
|
|
|
5
18
|
### Patch Changes
|
package/dist/index.js
CHANGED
|
@@ -1370,7 +1370,7 @@ var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
|
|
1370
1370
|
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
|
1371
1371
|
|
|
1372
1372
|
// src/version.ts
|
|
1373
|
-
var VERSION = true ? "7.0.0-beta.
|
|
1373
|
+
var VERSION = true ? "7.0.0-beta.6" : "0.0.0-test";
|
|
1374
1374
|
|
|
1375
1375
|
// src/util/download/download.ts
|
|
1376
1376
|
var download = async ({
|
|
@@ -4346,7 +4346,7 @@ async function generateText({
|
|
|
4346
4346
|
}),
|
|
4347
4347
|
tracer,
|
|
4348
4348
|
fn: async (span) => {
|
|
4349
|
-
var _a21, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
|
|
4349
|
+
var _a21, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t;
|
|
4350
4350
|
const initialMessages = initialPrompt.messages;
|
|
4351
4351
|
const responseMessages = [];
|
|
4352
4352
|
const { approvedToolApprovals, deniedToolApprovals } = collectToolApprovals({ messages: initialMessages });
|
|
@@ -4563,6 +4563,7 @@ async function generateText({
|
|
|
4563
4563
|
headers: (_g2 = result.response) == null ? void 0 : _g2.headers,
|
|
4564
4564
|
body: (_h2 = result.response) == null ? void 0 : _h2.body
|
|
4565
4565
|
};
|
|
4566
|
+
const usage = asLanguageModelUsage(result.usage);
|
|
4566
4567
|
span2.setAttributes(
|
|
4567
4568
|
await selectTelemetryAttributes({
|
|
4568
4569
|
telemetry,
|
|
@@ -4586,9 +4587,16 @@ async function generateText({
|
|
|
4586
4587
|
"ai.response.providerMetadata": JSON.stringify(
|
|
4587
4588
|
result.providerMetadata
|
|
4588
4589
|
),
|
|
4589
|
-
|
|
4590
|
-
"ai.usage.
|
|
4591
|
-
"ai.usage.
|
|
4590
|
+
"ai.usage.inputTokens": result.usage.inputTokens.total,
|
|
4591
|
+
"ai.usage.inputTokenDetails.noCacheTokens": result.usage.inputTokens.noCache,
|
|
4592
|
+
"ai.usage.inputTokenDetails.cacheReadTokens": result.usage.inputTokens.cacheRead,
|
|
4593
|
+
"ai.usage.inputTokenDetails.cacheWriteTokens": result.usage.inputTokens.cacheWrite,
|
|
4594
|
+
"ai.usage.outputTokens": result.usage.outputTokens.total,
|
|
4595
|
+
"ai.usage.outputTokenDetails.textTokens": result.usage.outputTokens.text,
|
|
4596
|
+
"ai.usage.outputTokenDetails.reasoningTokens": result.usage.outputTokens.reasoning,
|
|
4597
|
+
"ai.usage.totalTokens": usage.totalTokens,
|
|
4598
|
+
"ai.usage.reasoningTokens": result.usage.outputTokens.reasoning,
|
|
4599
|
+
"ai.usage.cachedInputTokens": result.usage.inputTokens.cacheRead,
|
|
4592
4600
|
// standardized gen-ai llm span attributes:
|
|
4593
4601
|
"gen_ai.response.finish_reasons": [
|
|
4594
4602
|
result.finishReason.unified
|
|
@@ -4789,10 +4797,7 @@ async function generateText({
|
|
|
4789
4797
|
},
|
|
4790
4798
|
"ai.response.providerMetadata": JSON.stringify(
|
|
4791
4799
|
currentModelResponse.providerMetadata
|
|
4792
|
-
)
|
|
4793
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
|
4794
|
-
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens.total,
|
|
4795
|
-
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens.total
|
|
4800
|
+
)
|
|
4796
4801
|
}
|
|
4797
4802
|
})
|
|
4798
4803
|
);
|
|
@@ -4809,6 +4814,23 @@ async function generateText({
|
|
|
4809
4814
|
cachedInputTokens: void 0
|
|
4810
4815
|
}
|
|
4811
4816
|
);
|
|
4817
|
+
span.setAttributes(
|
|
4818
|
+
await selectTelemetryAttributes({
|
|
4819
|
+
telemetry,
|
|
4820
|
+
attributes: {
|
|
4821
|
+
"ai.usage.inputTokens": totalUsage.inputTokens,
|
|
4822
|
+
"ai.usage.inputTokenDetails.noCacheTokens": (_n = totalUsage.inputTokenDetails) == null ? void 0 : _n.noCacheTokens,
|
|
4823
|
+
"ai.usage.inputTokenDetails.cacheReadTokens": (_o = totalUsage.inputTokenDetails) == null ? void 0 : _o.cacheReadTokens,
|
|
4824
|
+
"ai.usage.inputTokenDetails.cacheWriteTokens": (_p = totalUsage.inputTokenDetails) == null ? void 0 : _p.cacheWriteTokens,
|
|
4825
|
+
"ai.usage.outputTokens": totalUsage.outputTokens,
|
|
4826
|
+
"ai.usage.outputTokenDetails.textTokens": (_q = totalUsage.outputTokenDetails) == null ? void 0 : _q.textTokens,
|
|
4827
|
+
"ai.usage.outputTokenDetails.reasoningTokens": (_r = totalUsage.outputTokenDetails) == null ? void 0 : _r.reasoningTokens,
|
|
4828
|
+
"ai.usage.totalTokens": totalUsage.totalTokens,
|
|
4829
|
+
"ai.usage.reasoningTokens": (_s = totalUsage.outputTokenDetails) == null ? void 0 : _s.reasoningTokens,
|
|
4830
|
+
"ai.usage.cachedInputTokens": (_t = totalUsage.inputTokenDetails) == null ? void 0 : _t.cacheReadTokens
|
|
4831
|
+
}
|
|
4832
|
+
})
|
|
4833
|
+
);
|
|
4812
4834
|
await notify({
|
|
4813
4835
|
event: {
|
|
4814
4836
|
stepNumber: lastStep.stepNumber,
|
|
@@ -6907,6 +6929,7 @@ var DefaultStreamTextResult = class {
|
|
|
6907
6929
|
}
|
|
6908
6930
|
},
|
|
6909
6931
|
async flush(controller) {
|
|
6932
|
+
var _a21, _b, _c, _d, _e, _f, _g;
|
|
6910
6933
|
try {
|
|
6911
6934
|
if (recordedSteps.length === 0) {
|
|
6912
6935
|
const error = (abortSignal == null ? void 0 : abortSignal.aborted) ? abortSignal.reason : new NoOutputGeneratedError({
|
|
@@ -6970,18 +6993,23 @@ var DefaultStreamTextResult = class {
|
|
|
6970
6993
|
},
|
|
6971
6994
|
"ai.response.toolCalls": {
|
|
6972
6995
|
output: () => {
|
|
6973
|
-
var
|
|
6974
|
-
return ((
|
|
6996
|
+
var _a22;
|
|
6997
|
+
return ((_a22 = finalStep.toolCalls) == null ? void 0 : _a22.length) ? JSON.stringify(finalStep.toolCalls) : void 0;
|
|
6975
6998
|
}
|
|
6976
6999
|
},
|
|
6977
7000
|
"ai.response.providerMetadata": JSON.stringify(
|
|
6978
7001
|
finalStep.providerMetadata
|
|
6979
7002
|
),
|
|
6980
7003
|
"ai.usage.inputTokens": totalUsage.inputTokens,
|
|
7004
|
+
"ai.usage.inputTokenDetails.noCacheTokens": (_a21 = totalUsage.inputTokenDetails) == null ? void 0 : _a21.noCacheTokens,
|
|
7005
|
+
"ai.usage.inputTokenDetails.cacheReadTokens": (_b = totalUsage.inputTokenDetails) == null ? void 0 : _b.cacheReadTokens,
|
|
7006
|
+
"ai.usage.inputTokenDetails.cacheWriteTokens": (_c = totalUsage.inputTokenDetails) == null ? void 0 : _c.cacheWriteTokens,
|
|
6981
7007
|
"ai.usage.outputTokens": totalUsage.outputTokens,
|
|
7008
|
+
"ai.usage.outputTokenDetails.textTokens": (_d = totalUsage.outputTokenDetails) == null ? void 0 : _d.textTokens,
|
|
7009
|
+
"ai.usage.outputTokenDetails.reasoningTokens": (_e = totalUsage.outputTokenDetails) == null ? void 0 : _e.reasoningTokens,
|
|
6982
7010
|
"ai.usage.totalTokens": totalUsage.totalTokens,
|
|
6983
|
-
"ai.usage.reasoningTokens": totalUsage.reasoningTokens,
|
|
6984
|
-
"ai.usage.cachedInputTokens": totalUsage.
|
|
7011
|
+
"ai.usage.reasoningTokens": (_f = totalUsage.outputTokenDetails) == null ? void 0 : _f.reasoningTokens,
|
|
7012
|
+
"ai.usage.cachedInputTokens": (_g = totalUsage.inputTokenDetails) == null ? void 0 : _g.cacheReadTokens
|
|
6985
7013
|
}
|
|
6986
7014
|
})
|
|
6987
7015
|
);
|
|
@@ -7593,6 +7621,7 @@ var DefaultStreamTextResult = class {
|
|
|
7593
7621
|
},
|
|
7594
7622
|
// invoke onFinish callback and resolve toolResults promise when the stream is about to close:
|
|
7595
7623
|
async flush(controller) {
|
|
7624
|
+
var _a22, _b2, _c2, _d2, _e2, _f2, _g2;
|
|
7596
7625
|
const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
|
|
7597
7626
|
try {
|
|
7598
7627
|
doStreamSpan.setAttributes(
|
|
@@ -7607,10 +7636,15 @@ var DefaultStreamTextResult = class {
|
|
|
7607
7636
|
"ai.response.model": stepResponse.modelId,
|
|
7608
7637
|
"ai.response.timestamp": stepResponse.timestamp.toISOString(),
|
|
7609
7638
|
"ai.usage.inputTokens": stepUsage.inputTokens,
|
|
7639
|
+
"ai.usage.inputTokenDetails.noCacheTokens": (_a22 = stepUsage.inputTokenDetails) == null ? void 0 : _a22.noCacheTokens,
|
|
7640
|
+
"ai.usage.inputTokenDetails.cacheReadTokens": (_b2 = stepUsage.inputTokenDetails) == null ? void 0 : _b2.cacheReadTokens,
|
|
7641
|
+
"ai.usage.inputTokenDetails.cacheWriteTokens": (_c2 = stepUsage.inputTokenDetails) == null ? void 0 : _c2.cacheWriteTokens,
|
|
7610
7642
|
"ai.usage.outputTokens": stepUsage.outputTokens,
|
|
7643
|
+
"ai.usage.outputTokenDetails.textTokens": (_d2 = stepUsage.outputTokenDetails) == null ? void 0 : _d2.textTokens,
|
|
7644
|
+
"ai.usage.outputTokenDetails.reasoningTokens": (_e2 = stepUsage.outputTokenDetails) == null ? void 0 : _e2.reasoningTokens,
|
|
7611
7645
|
"ai.usage.totalTokens": stepUsage.totalTokens,
|
|
7612
|
-
"ai.usage.reasoningTokens": stepUsage.reasoningTokens,
|
|
7613
|
-
"ai.usage.cachedInputTokens": stepUsage.
|
|
7646
|
+
"ai.usage.reasoningTokens": (_f2 = stepUsage.outputTokenDetails) == null ? void 0 : _f2.reasoningTokens,
|
|
7647
|
+
"ai.usage.cachedInputTokens": (_g2 = stepUsage.inputTokenDetails) == null ? void 0 : _g2.cacheReadTokens,
|
|
7614
7648
|
// standardized gen-ai llm span attributes:
|
|
7615
7649
|
"gen_ai.response.finish_reasons": [
|
|
7616
7650
|
stepFinishReason
|