@ai-sdk/xai 2.0.60 → 2.0.62
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -0
- package/dist/index.js +69 -38
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +69 -38
- package/dist/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.mjs
CHANGED
|
@@ -27,6 +27,30 @@ import {
|
|
|
27
27
|
} from "@ai-sdk/provider-utils";
|
|
28
28
|
import { z as z3 } from "zod/v4";
|
|
29
29
|
|
|
30
|
+
// src/convert-xai-chat-usage.ts
|
|
31
|
+
function convertXaiChatUsage(usage) {
|
|
32
|
+
var _a, _b, _c, _d;
|
|
33
|
+
if (usage == null) {
|
|
34
|
+
return {
|
|
35
|
+
inputTokens: void 0,
|
|
36
|
+
outputTokens: void 0,
|
|
37
|
+
totalTokens: void 0,
|
|
38
|
+
reasoningTokens: void 0,
|
|
39
|
+
cachedInputTokens: void 0
|
|
40
|
+
};
|
|
41
|
+
}
|
|
42
|
+
const cacheReadTokens = (_b = (_a = usage.prompt_tokens_details) == null ? void 0 : _a.cached_tokens) != null ? _b : 0;
|
|
43
|
+
const reasoningTokens = (_d = (_c = usage.completion_tokens_details) == null ? void 0 : _c.reasoning_tokens) != null ? _d : 0;
|
|
44
|
+
const promptTokensIncludesCached = cacheReadTokens <= usage.prompt_tokens;
|
|
45
|
+
return {
|
|
46
|
+
inputTokens: promptTokensIncludesCached ? usage.prompt_tokens : usage.prompt_tokens + cacheReadTokens,
|
|
47
|
+
outputTokens: usage.completion_tokens + reasoningTokens,
|
|
48
|
+
totalTokens: usage.total_tokens,
|
|
49
|
+
reasoningTokens: reasoningTokens || void 0,
|
|
50
|
+
cachedInputTokens: cacheReadTokens || void 0
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
|
|
30
54
|
// src/convert-to-xai-chat-messages.ts
|
|
31
55
|
import {
|
|
32
56
|
UnsupportedFunctionalityError
|
|
@@ -457,7 +481,7 @@ var XaiChatLanguageModel = class {
|
|
|
457
481
|
};
|
|
458
482
|
}
|
|
459
483
|
async doGenerate(options) {
|
|
460
|
-
var _a
|
|
484
|
+
var _a;
|
|
461
485
|
const { args: body, warnings } = await this.getArgs(options);
|
|
462
486
|
const url = `${(_a = this.config.baseURL) != null ? _a : "https://api.x.ai/v1"}/chat/completions`;
|
|
463
487
|
const {
|
|
@@ -537,13 +561,7 @@ var XaiChatLanguageModel = class {
|
|
|
537
561
|
return {
|
|
538
562
|
content,
|
|
539
563
|
finishReason: mapXaiFinishReason(choice.finish_reason),
|
|
540
|
-
usage:
|
|
541
|
-
inputTokens: (_b = response.usage) == null ? void 0 : _b.prompt_tokens,
|
|
542
|
-
outputTokens: ((_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens) != null ? _d : 0) + ((_g = (_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : 0),
|
|
543
|
-
totalTokens: (_h = response.usage) == null ? void 0 : _h.total_tokens,
|
|
544
|
-
reasoningTokens: (_k = (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens_details) == null ? void 0 : _j.reasoning_tokens) != null ? _k : void 0,
|
|
545
|
-
cachedInputTokens: (_n = (_m = (_l = response.usage) == null ? void 0 : _l.prompt_tokens_details) == null ? void 0 : _m.cached_tokens) != null ? _n : void 0
|
|
546
|
-
},
|
|
564
|
+
usage: convertXaiChatUsage(response.usage),
|
|
547
565
|
request: { body },
|
|
548
566
|
response: {
|
|
549
567
|
...getResponseMetadata(response),
|
|
@@ -627,7 +645,6 @@ var XaiChatLanguageModel = class {
|
|
|
627
645
|
controller.enqueue({ type: "stream-start", warnings });
|
|
628
646
|
},
|
|
629
647
|
transform(chunk, controller) {
|
|
630
|
-
var _a2, _b, _c, _d, _e, _f, _g;
|
|
631
648
|
if (options.includeRawChunks) {
|
|
632
649
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
633
650
|
}
|
|
@@ -654,11 +671,12 @@ var XaiChatLanguageModel = class {
|
|
|
654
671
|
}
|
|
655
672
|
}
|
|
656
673
|
if (value.usage != null) {
|
|
657
|
-
|
|
658
|
-
usage.
|
|
659
|
-
usage.
|
|
660
|
-
usage.
|
|
661
|
-
usage.
|
|
674
|
+
const converted = convertXaiChatUsage(value.usage);
|
|
675
|
+
usage.inputTokens = converted.inputTokens;
|
|
676
|
+
usage.outputTokens = converted.outputTokens;
|
|
677
|
+
usage.totalTokens = converted.totalTokens;
|
|
678
|
+
usage.reasoningTokens = converted.reasoningTokens;
|
|
679
|
+
usage.cachedInputTokens = converted.cachedInputTokens;
|
|
662
680
|
}
|
|
663
681
|
const choice = value.choices[0];
|
|
664
682
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
@@ -859,6 +877,28 @@ import {
|
|
|
859
877
|
postJsonToApi as postJsonToApi2
|
|
860
878
|
} from "@ai-sdk/provider-utils";
|
|
861
879
|
|
|
880
|
+
// src/responses/convert-xai-responses-usage.ts
|
|
881
|
+
function convertXaiResponsesUsage(usage) {
|
|
882
|
+
var _a, _b, _c, _d;
|
|
883
|
+
if (usage == null) {
|
|
884
|
+
return {
|
|
885
|
+
inputTokens: 0,
|
|
886
|
+
outputTokens: 0,
|
|
887
|
+
totalTokens: 0
|
|
888
|
+
};
|
|
889
|
+
}
|
|
890
|
+
const cacheReadTokens = (_b = (_a = usage.input_tokens_details) == null ? void 0 : _a.cached_tokens) != null ? _b : 0;
|
|
891
|
+
const reasoningTokens = (_d = (_c = usage.output_tokens_details) == null ? void 0 : _c.reasoning_tokens) != null ? _d : 0;
|
|
892
|
+
const inputTokensIncludesCached = cacheReadTokens <= usage.input_tokens;
|
|
893
|
+
return {
|
|
894
|
+
inputTokens: inputTokensIncludesCached ? usage.input_tokens : usage.input_tokens + cacheReadTokens,
|
|
895
|
+
outputTokens: usage.output_tokens,
|
|
896
|
+
totalTokens: usage.total_tokens,
|
|
897
|
+
reasoningTokens: reasoningTokens || void 0,
|
|
898
|
+
cachedInputTokens: cacheReadTokens || void 0
|
|
899
|
+
};
|
|
900
|
+
}
|
|
901
|
+
|
|
862
902
|
// src/responses/xai-responses-api.ts
|
|
863
903
|
import { z as z4 } from "zod/v4";
|
|
864
904
|
var annotationSchema = z4.union([
|
|
@@ -1724,7 +1764,7 @@ var XaiResponsesLanguageModel = class {
|
|
|
1724
1764
|
};
|
|
1725
1765
|
}
|
|
1726
1766
|
async doGenerate(options) {
|
|
1727
|
-
var _a, _b, _c, _d, _e, _f, _g
|
|
1767
|
+
var _a, _b, _c, _d, _e, _f, _g;
|
|
1728
1768
|
const {
|
|
1729
1769
|
args: body,
|
|
1730
1770
|
warnings,
|
|
@@ -1847,17 +1887,7 @@ var XaiResponsesLanguageModel = class {
|
|
|
1847
1887
|
return {
|
|
1848
1888
|
content,
|
|
1849
1889
|
finishReason: mapXaiResponsesFinishReason(response.status),
|
|
1850
|
-
usage: response.usage
|
|
1851
|
-
inputTokens: response.usage.input_tokens,
|
|
1852
|
-
outputTokens: response.usage.output_tokens,
|
|
1853
|
-
totalTokens: response.usage.total_tokens,
|
|
1854
|
-
reasoningTokens: (_h = response.usage.output_tokens_details) == null ? void 0 : _h.reasoning_tokens,
|
|
1855
|
-
cachedInputTokens: (_i = response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens
|
|
1856
|
-
} : {
|
|
1857
|
-
inputTokens: 0,
|
|
1858
|
-
outputTokens: 0,
|
|
1859
|
-
totalTokens: 0
|
|
1860
|
-
},
|
|
1890
|
+
usage: convertXaiResponsesUsage(response.usage),
|
|
1861
1891
|
request: { body },
|
|
1862
1892
|
response: {
|
|
1863
1893
|
...getResponseMetadata(response),
|
|
@@ -1910,7 +1940,7 @@ var XaiResponsesLanguageModel = class {
|
|
|
1910
1940
|
controller.enqueue({ type: "stream-start", warnings });
|
|
1911
1941
|
},
|
|
1912
1942
|
transform(chunk, controller) {
|
|
1913
|
-
var _a2, _b, _c, _d, _e, _f, _g, _h
|
|
1943
|
+
var _a2, _b, _c, _d, _e, _f, _g, _h;
|
|
1914
1944
|
if (options.includeRawChunks) {
|
|
1915
1945
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
1916
1946
|
}
|
|
@@ -2035,11 +2065,12 @@ var XaiResponsesLanguageModel = class {
|
|
|
2035
2065
|
if (event.type === "response.done" || event.type === "response.completed") {
|
|
2036
2066
|
const response2 = event.response;
|
|
2037
2067
|
if (response2.usage) {
|
|
2038
|
-
|
|
2039
|
-
usage.
|
|
2040
|
-
usage.outputTokens =
|
|
2041
|
-
usage.totalTokens =
|
|
2042
|
-
usage.reasoningTokens =
|
|
2068
|
+
const converted = convertXaiResponsesUsage(response2.usage);
|
|
2069
|
+
usage.inputTokens = converted.inputTokens;
|
|
2070
|
+
usage.outputTokens = converted.outputTokens;
|
|
2071
|
+
usage.totalTokens = converted.totalTokens;
|
|
2072
|
+
usage.reasoningTokens = converted.reasoningTokens;
|
|
2073
|
+
usage.cachedInputTokens = converted.cachedInputTokens;
|
|
2043
2074
|
}
|
|
2044
2075
|
if (response2.status) {
|
|
2045
2076
|
finishReason = mapXaiResponsesFinishReason(response2.status);
|
|
@@ -2103,15 +2134,15 @@ var XaiResponsesLanguageModel = class {
|
|
|
2103
2134
|
"x_semantic_search",
|
|
2104
2135
|
"x_thread_fetch"
|
|
2105
2136
|
];
|
|
2106
|
-
let toolName = (
|
|
2107
|
-
if (webSearchSubTools.includes((
|
|
2137
|
+
let toolName = (_c = part.name) != null ? _c : "";
|
|
2138
|
+
if (webSearchSubTools.includes((_d = part.name) != null ? _d : "") || part.type === "web_search_call") {
|
|
2108
2139
|
toolName = webSearchToolName != null ? webSearchToolName : "web_search";
|
|
2109
|
-
} else if (xSearchSubTools.includes((
|
|
2140
|
+
} else if (xSearchSubTools.includes((_e = part.name) != null ? _e : "") || part.type === "x_search_call") {
|
|
2110
2141
|
toolName = xSearchToolName != null ? xSearchToolName : "x_search";
|
|
2111
2142
|
} else if (part.name === "code_execution" || part.type === "code_interpreter_call" || part.type === "code_execution_call") {
|
|
2112
2143
|
toolName = codeExecutionToolName != null ? codeExecutionToolName : "code_execution";
|
|
2113
2144
|
}
|
|
2114
|
-
const toolInput = part.type === "custom_tool_call" ? (
|
|
2145
|
+
const toolInput = part.type === "custom_tool_call" ? (_f = part.input) != null ? _f : "" : (_g = part.arguments) != null ? _g : "";
|
|
2115
2146
|
const shouldEmit = part.type === "custom_tool_call" ? event.type === "response.output_item.done" : !seenToolCalls.has(part.id);
|
|
2116
2147
|
if (shouldEmit && !seenToolCalls.has(part.id)) {
|
|
2117
2148
|
seenToolCalls.add(part.id);
|
|
@@ -2164,7 +2195,7 @@ var XaiResponsesLanguageModel = class {
|
|
|
2164
2195
|
sourceType: "url",
|
|
2165
2196
|
id: self.config.generateId(),
|
|
2166
2197
|
url: annotation.url,
|
|
2167
|
-
title: (
|
|
2198
|
+
title: (_h = annotation.title) != null ? _h : annotation.url
|
|
2168
2199
|
});
|
|
2169
2200
|
}
|
|
2170
2201
|
}
|
|
@@ -2272,7 +2303,7 @@ var xaiTools = {
|
|
|
2272
2303
|
};
|
|
2273
2304
|
|
|
2274
2305
|
// src/version.ts
|
|
2275
|
-
var VERSION = true ? "2.0.
|
|
2306
|
+
var VERSION = true ? "2.0.62" : "0.0.0-test";
|
|
2276
2307
|
|
|
2277
2308
|
// src/xai-provider.ts
|
|
2278
2309
|
var xaiErrorStructure = {
|