@ai-sdk/xai 3.0.0-beta.37 → 3.0.0-beta.39

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # @ai-sdk/xai
2
2
 
3
+ ## 3.0.0-beta.39
4
+
5
+ ### Patch Changes
6
+
7
+ - 4b4c37b: fix(xai): add cache input tokens
8
+
9
+ ## 3.0.0-beta.38
10
+
11
+ ### Patch Changes
12
+
13
+ - 8a2c18e: fix(provider/xai): remove json schema unsupported warning
14
+
3
15
  ## 3.0.0-beta.37
4
16
 
5
17
  ### Patch Changes
package/dist/index.js CHANGED
@@ -389,13 +389,6 @@ var XaiChatLanguageModel = class {
389
389
  setting: "stopSequences"
390
390
  });
391
391
  }
392
- if (responseFormat != null && responseFormat.type === "json" && responseFormat.schema != null) {
393
- warnings.push({
394
- type: "unsupported-setting",
395
- setting: "responseFormat",
396
- details: "JSON response format schema is not supported"
397
- });
398
- }
399
392
  const { messages, warnings: messageWarnings } = convertToXaiChatMessages(prompt);
400
393
  warnings.push(...messageWarnings);
401
394
  const {
@@ -473,7 +466,7 @@ var XaiChatLanguageModel = class {
473
466
  };
474
467
  }
475
468
  async doGenerate(options) {
476
- var _a, _b, _c;
469
+ var _a, _b, _c, _d, _e;
477
470
  const { args: body, warnings } = await this.getArgs(options);
478
471
  const {
479
472
  responseHeaders,
@@ -535,7 +528,8 @@ var XaiChatLanguageModel = class {
535
528
  inputTokens: response.usage.prompt_tokens,
536
529
  outputTokens: response.usage.completion_tokens,
537
530
  totalTokens: response.usage.total_tokens,
538
- reasoningTokens: (_c = (_b = response.usage.completion_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0
531
+ reasoningTokens: (_c = (_b = response.usage.completion_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0,
532
+ cachedInputTokens: (_e = (_d = response.usage.prompt_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0
539
533
  },
540
534
  request: { body },
541
535
  response: {
@@ -569,7 +563,9 @@ var XaiChatLanguageModel = class {
569
563
  const usage = {
570
564
  inputTokens: void 0,
571
565
  outputTokens: void 0,
572
- totalTokens: void 0
566
+ totalTokens: void 0,
567
+ reasoningTokens: void 0,
568
+ cachedInputTokens: void 0
573
569
  };
574
570
  let isFirstChunk = true;
575
571
  const contentBlocks = {};
@@ -582,7 +578,7 @@ var XaiChatLanguageModel = class {
582
578
  controller.enqueue({ type: "stream-start", warnings });
583
579
  },
584
580
  transform(chunk, controller) {
585
- var _a2, _b;
581
+ var _a2, _b, _c, _d;
586
582
  if (options.includeRawChunks) {
587
583
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
588
584
  }
@@ -613,6 +609,7 @@ var XaiChatLanguageModel = class {
613
609
  usage.outputTokens = value.usage.completion_tokens;
614
610
  usage.totalTokens = value.usage.total_tokens;
615
611
  usage.reasoningTokens = (_b = (_a2 = value.usage.completion_tokens_details) == null ? void 0 : _a2.reasoning_tokens) != null ? _b : void 0;
612
+ usage.cachedInputTokens = (_d = (_c = value.usage.prompt_tokens_details) == null ? void 0 : _c.cached_tokens) != null ? _d : void 0;
616
613
  }
617
614
  const choice = value.choices[0];
618
615
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -708,8 +705,17 @@ var xaiUsageSchema = import_v43.z.object({
708
705
  prompt_tokens: import_v43.z.number(),
709
706
  completion_tokens: import_v43.z.number(),
710
707
  total_tokens: import_v43.z.number(),
708
+ prompt_tokens_details: import_v43.z.object({
709
+ text_tokens: import_v43.z.number().nullish(),
710
+ audio_tokens: import_v43.z.number().nullish(),
711
+ image_tokens: import_v43.z.number().nullish(),
712
+ cached_tokens: import_v43.z.number().nullish()
713
+ }).nullish(),
711
714
  completion_tokens_details: import_v43.z.object({
712
- reasoning_tokens: import_v43.z.number().nullish()
715
+ reasoning_tokens: import_v43.z.number().nullish(),
716
+ audio_tokens: import_v43.z.number().nullish(),
717
+ accepted_prediction_tokens: import_v43.z.number().nullish(),
718
+ rejected_prediction_tokens: import_v43.z.number().nullish()
713
719
  }).nullish()
714
720
  });
715
721
  var xaiChatResponseSchema = import_v43.z.object({
@@ -1840,7 +1846,7 @@ var xaiTools = {
1840
1846
  };
1841
1847
 
1842
1848
  // src/version.ts
1843
- var VERSION = true ? "3.0.0-beta.37" : "0.0.0-test";
1849
+ var VERSION = true ? "3.0.0-beta.39" : "0.0.0-test";
1844
1850
 
1845
1851
  // src/xai-provider.ts
1846
1852
  var xaiErrorStructure = {