@ai-sdk/xai 2.0.57 → 2.0.58
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/index.js +10 -10
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +10 -10
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
package/dist/index.js
CHANGED
|
@@ -468,7 +468,7 @@ var XaiChatLanguageModel = class {
|
|
|
468
468
|
};
|
|
469
469
|
}
|
|
470
470
|
async doGenerate(options) {
|
|
471
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
|
|
471
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
|
|
472
472
|
const { args: body, warnings } = await this.getArgs(options);
|
|
473
473
|
const url = `${(_a = this.config.baseURL) != null ? _a : "https://api.x.ai/v1"}/chat/completions`;
|
|
474
474
|
const {
|
|
@@ -550,10 +550,10 @@ var XaiChatLanguageModel = class {
|
|
|
550
550
|
finishReason: mapXaiFinishReason(choice.finish_reason),
|
|
551
551
|
usage: {
|
|
552
552
|
inputTokens: (_b = response.usage) == null ? void 0 : _b.prompt_tokens,
|
|
553
|
-
outputTokens: (_c = response.usage) == null ? void 0 : _c.completion_tokens,
|
|
554
|
-
totalTokens: (
|
|
555
|
-
reasoningTokens: (
|
|
556
|
-
cachedInputTokens: (
|
|
553
|
+
outputTokens: ((_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : 0) + ((_g = (_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : 0),
|
|
554
|
+
totalTokens: (_h = response.usage) == null ? void 0 : _h.total_tokens,
|
|
555
|
+
reasoningTokens: (_k = (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens_details) == null ? void 0 : _j.reasoning_tokens) != null ? _k : void 0,
|
|
556
|
+
cachedInputTokens: (_n = (_m = (_l = response.usage) == null ? void 0 : _l.prompt_tokens_details) == null ? void 0 : _m.cached_tokens) != null ? _n : void 0
|
|
557
557
|
},
|
|
558
558
|
request: { body },
|
|
559
559
|
response: {
|
|
@@ -638,7 +638,7 @@ var XaiChatLanguageModel = class {
|
|
|
638
638
|
controller.enqueue({ type: "stream-start", warnings });
|
|
639
639
|
},
|
|
640
640
|
transform(chunk, controller) {
|
|
641
|
-
var _a2, _b, _c, _d;
|
|
641
|
+
var _a2, _b, _c, _d, _e, _f, _g;
|
|
642
642
|
if (options.includeRawChunks) {
|
|
643
643
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
644
644
|
}
|
|
@@ -666,10 +666,10 @@ var XaiChatLanguageModel = class {
|
|
|
666
666
|
}
|
|
667
667
|
if (value.usage != null) {
|
|
668
668
|
usage.inputTokens = value.usage.prompt_tokens;
|
|
669
|
-
usage.outputTokens = value.usage.completion_tokens;
|
|
669
|
+
usage.outputTokens = ((_a2 = value.usage.completion_tokens) != null ? _a2 : 0) + ((_c = (_b = value.usage.completion_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : 0);
|
|
670
670
|
usage.totalTokens = value.usage.total_tokens;
|
|
671
|
-
usage.reasoningTokens = (
|
|
672
|
-
usage.cachedInputTokens = (
|
|
671
|
+
usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
|
|
672
|
+
usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
|
|
673
673
|
}
|
|
674
674
|
const choice = value.choices[0];
|
|
675
675
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
@@ -2237,7 +2237,7 @@ var xaiTools = {
|
|
|
2237
2237
|
};
|
|
2238
2238
|
|
|
2239
2239
|
// src/version.ts
|
|
2240
|
-
var VERSION = true ? "2.0.
|
|
2240
|
+
var VERSION = true ? "2.0.58" : "0.0.0-test";
|
|
2241
2241
|
|
|
2242
2242
|
// src/xai-provider.ts
|
|
2243
2243
|
var xaiErrorStructure = {
|