@ai-sdk/xai 2.0.48 → 2.0.50

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # @ai-sdk/xai
2
2
 
3
+ ## 2.0.50
4
+
5
+ ### Patch Changes
6
+
7
+ - 13a913c: fix(provider/xai): send reasoning-end before text-start in streaming
8
+
9
+ ## 2.0.49
10
+
11
+ ### Patch Changes
12
+
13
+ - 0c419f8: fix(provider/xai): handle error responses returned with 200 status
14
+
3
15
  ## 2.0.48
4
16
 
5
17
  ### Patch Changes
package/dist/index.js CHANGED
@@ -34,10 +34,11 @@ module.exports = __toCommonJS(src_exports);
34
34
 
35
35
  // src/xai-provider.ts
36
36
  var import_openai_compatible = require("@ai-sdk/openai-compatible");
37
- var import_provider4 = require("@ai-sdk/provider");
37
+ var import_provider5 = require("@ai-sdk/provider");
38
38
  var import_provider_utils11 = require("@ai-sdk/provider-utils");
39
39
 
40
40
  // src/xai-chat-language-model.ts
41
+ var import_provider3 = require("@ai-sdk/provider");
41
42
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
42
43
  var import_v43 = require("zod/v4");
43
44
 
@@ -467,14 +468,15 @@ var XaiChatLanguageModel = class {
467
468
  };
468
469
  }
469
470
  async doGenerate(options) {
470
- var _a, _b, _c, _d, _e;
471
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
471
472
  const { args: body, warnings } = await this.getArgs(options);
473
+ const url = `${(_a = this.config.baseURL) != null ? _a : "https://api.x.ai/v1"}/chat/completions`;
472
474
  const {
473
475
  responseHeaders,
474
476
  value: response,
475
477
  rawValue: rawResponse
476
478
  } = await (0, import_provider_utils3.postJsonToApi)({
477
- url: `${(_a = this.config.baseURL) != null ? _a : "https://api.x.ai/v1"}/chat/completions`,
479
+ url,
478
480
  headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
479
481
  body,
480
482
  failedResponseHandler: xaiFailedResponseHandler,
@@ -484,6 +486,27 @@ var XaiChatLanguageModel = class {
484
486
  abortSignal: options.abortSignal,
485
487
  fetch: this.config.fetch
486
488
  });
489
+ if (response.error != null) {
490
+ throw new import_provider3.APICallError({
491
+ message: response.error,
492
+ url,
493
+ requestBodyValues: body,
494
+ statusCode: 200,
495
+ responseHeaders,
496
+ responseBody: JSON.stringify(rawResponse),
497
+ isRetryable: response.code === "The service is currently unavailable"
498
+ });
499
+ }
500
+ if (!response.choices || response.choices.length === 0) {
501
+ throw new import_provider3.APICallError({
502
+ message: "No choices returned from the API",
503
+ url,
504
+ requestBodyValues: body,
505
+ statusCode: 200,
506
+ responseHeaders,
507
+ responseBody: JSON.stringify(rawResponse)
508
+ });
509
+ }
487
510
  const choice = response.choices[0];
488
511
  const content = [];
489
512
  if (choice.message.content != null && choice.message.content.length > 0) {
@@ -513,12 +536,12 @@ var XaiChatLanguageModel = class {
513
536
  }
514
537
  }
515
538
  if (response.citations != null) {
516
- for (const url of response.citations) {
539
+ for (const url2 of response.citations) {
517
540
  content.push({
518
541
  type: "source",
519
542
  sourceType: "url",
520
543
  id: this.config.generateId(),
521
- url
544
+ url: url2
522
545
  });
523
546
  }
524
547
  }
@@ -526,11 +549,11 @@ var XaiChatLanguageModel = class {
526
549
  content,
527
550
  finishReason: mapXaiFinishReason(choice.finish_reason),
528
551
  usage: {
529
- inputTokens: response.usage.prompt_tokens,
530
- outputTokens: response.usage.completion_tokens,
531
- totalTokens: response.usage.total_tokens,
532
- reasoningTokens: (_c = (_b = response.usage.completion_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0,
533
- cachedInputTokens: (_e = (_d = response.usage.prompt_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0
552
+ inputTokens: (_b = response.usage) == null ? void 0 : _b.prompt_tokens,
553
+ outputTokens: (_c = response.usage) == null ? void 0 : _c.completion_tokens,
554
+ totalTokens: (_d = response.usage) == null ? void 0 : _d.total_tokens,
555
+ reasoningTokens: (_g = (_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : void 0,
556
+ cachedInputTokens: (_j = (_i = (_h = response.usage) == null ? void 0 : _h.prompt_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0
534
557
  },
535
558
  request: { body },
536
559
  response: {
@@ -551,12 +574,47 @@ var XaiChatLanguageModel = class {
551
574
  include_usage: true
552
575
  }
553
576
  };
577
+ const url = `${(_a = this.config.baseURL) != null ? _a : "https://api.x.ai/v1"}/chat/completions`;
554
578
  const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
555
- url: `${(_a = this.config.baseURL) != null ? _a : "https://api.x.ai/v1"}/chat/completions`,
579
+ url,
556
580
  headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
557
581
  body,
558
582
  failedResponseHandler: xaiFailedResponseHandler,
559
- successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(xaiChatChunkSchema),
583
+ successfulResponseHandler: async ({ response: response2 }) => {
584
+ const responseHeaders2 = (0, import_provider_utils3.extractResponseHeaders)(response2);
585
+ const contentType = response2.headers.get("content-type");
586
+ if (contentType == null ? void 0 : contentType.includes("application/json")) {
587
+ const responseBody = await response2.text();
588
+ const parsedError = await (0, import_provider_utils3.safeParseJSON)({
589
+ text: responseBody,
590
+ schema: xaiStreamErrorSchema
591
+ });
592
+ if (parsedError.success) {
593
+ throw new import_provider3.APICallError({
594
+ message: parsedError.value.error,
595
+ url,
596
+ requestBodyValues: body,
597
+ statusCode: 200,
598
+ responseHeaders: responseHeaders2,
599
+ responseBody,
600
+ isRetryable: parsedError.value.code === "The service is currently unavailable"
601
+ });
602
+ }
603
+ throw new import_provider3.APICallError({
604
+ message: "Invalid JSON response",
605
+ url,
606
+ requestBodyValues: body,
607
+ statusCode: 200,
608
+ responseHeaders: responseHeaders2,
609
+ responseBody
610
+ });
611
+ }
612
+ return (0, import_provider_utils3.createEventSourceResponseHandler)(xaiChatChunkSchema)({
613
+ response: response2,
614
+ url,
615
+ requestBodyValues: body
616
+ });
617
+ },
560
618
  abortSignal: options.abortSignal,
561
619
  fetch: this.config.fetch
562
620
  });
@@ -571,6 +629,7 @@ var XaiChatLanguageModel = class {
571
629
  let isFirstChunk = true;
572
630
  const contentBlocks = {};
573
631
  const lastReasoningDeltas = {};
632
+ let activeReasoningBlockId = void 0;
574
633
  const self = this;
575
634
  return {
576
635
  stream: response.pipeThrough(
@@ -596,12 +655,12 @@ var XaiChatLanguageModel = class {
596
655
  isFirstChunk = false;
597
656
  }
598
657
  if (value.citations != null) {
599
- for (const url of value.citations) {
658
+ for (const url2 of value.citations) {
600
659
  controller.enqueue({
601
660
  type: "source",
602
661
  sourceType: "url",
603
662
  id: self.config.generateId(),
604
- url
663
+ url: url2
605
664
  });
606
665
  }
607
666
  }
@@ -623,13 +682,21 @@ var XaiChatLanguageModel = class {
623
682
  const choiceIndex = choice.index;
624
683
  if (delta.content != null && delta.content.length > 0) {
625
684
  const textContent = delta.content;
685
+ if (activeReasoningBlockId != null && !contentBlocks[activeReasoningBlockId].ended) {
686
+ controller.enqueue({
687
+ type: "reasoning-end",
688
+ id: activeReasoningBlockId
689
+ });
690
+ contentBlocks[activeReasoningBlockId].ended = true;
691
+ activeReasoningBlockId = void 0;
692
+ }
626
693
  const lastMessage = body.messages[body.messages.length - 1];
627
694
  if ((lastMessage == null ? void 0 : lastMessage.role) === "assistant" && textContent === lastMessage.content) {
628
695
  return;
629
696
  }
630
697
  const blockId = `text-${value.id || choiceIndex}`;
631
698
  if (contentBlocks[blockId] == null) {
632
- contentBlocks[blockId] = { type: "text" };
699
+ contentBlocks[blockId] = { type: "text", ended: false };
633
700
  controller.enqueue({
634
701
  type: "text-start",
635
702
  id: blockId
@@ -648,7 +715,8 @@ var XaiChatLanguageModel = class {
648
715
  }
649
716
  lastReasoningDeltas[blockId] = delta.reasoning_content;
650
717
  if (contentBlocks[blockId] == null) {
651
- contentBlocks[blockId] = { type: "reasoning" };
718
+ contentBlocks[blockId] = { type: "reasoning", ended: false };
719
+ activeReasoningBlockId = blockId;
652
720
  controller.enqueue({
653
721
  type: "reasoning-start",
654
722
  id: blockId
@@ -661,6 +729,14 @@ var XaiChatLanguageModel = class {
661
729
  });
662
730
  }
663
731
  if (delta.tool_calls != null) {
732
+ if (activeReasoningBlockId != null && !contentBlocks[activeReasoningBlockId].ended) {
733
+ controller.enqueue({
734
+ type: "reasoning-end",
735
+ id: activeReasoningBlockId
736
+ });
737
+ contentBlocks[activeReasoningBlockId].ended = true;
738
+ activeReasoningBlockId = void 0;
739
+ }
664
740
  for (const toolCall of delta.tool_calls) {
665
741
  const toolCallId = toolCall.id;
666
742
  controller.enqueue({
@@ -688,10 +764,12 @@ var XaiChatLanguageModel = class {
688
764
  },
689
765
  flush(controller) {
690
766
  for (const [blockId, block] of Object.entries(contentBlocks)) {
691
- controller.enqueue({
692
- type: block.type === "text" ? "text-end" : "reasoning-end",
693
- id: blockId
694
- });
767
+ if (!block.ended) {
768
+ controller.enqueue({
769
+ type: block.type === "text" ? "text-end" : "reasoning-end",
770
+ id: blockId
771
+ });
772
+ }
695
773
  }
696
774
  controller.enqueue({ type: "finish", finishReason, usage });
697
775
  }
@@ -743,10 +821,12 @@ var xaiChatResponseSchema = import_v43.z.object({
743
821
  index: import_v43.z.number(),
744
822
  finish_reason: import_v43.z.string().nullish()
745
823
  })
746
- ),
747
- object: import_v43.z.literal("chat.completion"),
748
- usage: xaiUsageSchema,
749
- citations: import_v43.z.array(import_v43.z.string().url()).nullish()
824
+ ).nullish(),
825
+ object: import_v43.z.literal("chat.completion").nullish(),
826
+ usage: xaiUsageSchema.nullish(),
827
+ citations: import_v43.z.array(import_v43.z.string().url()).nullish(),
828
+ code: import_v43.z.string().nullish(),
829
+ error: import_v43.z.string().nullish()
750
830
  });
751
831
  var xaiChatChunkSchema = import_v43.z.object({
752
832
  id: import_v43.z.string().nullish(),
@@ -776,6 +856,10 @@ var xaiChatChunkSchema = import_v43.z.object({
776
856
  usage: xaiUsageSchema.nullish(),
777
857
  citations: import_v43.z.array(import_v43.z.string().url()).nullish()
778
858
  });
859
+ var xaiStreamErrorSchema = import_v43.z.object({
860
+ code: import_v43.z.string(),
861
+ error: import_v43.z.string()
862
+ });
779
863
 
780
864
  // src/responses/xai-responses-language-model.ts
781
865
  var import_provider_utils7 = require("@ai-sdk/provider-utils");
@@ -1246,7 +1330,7 @@ async function convertToXaiResponsesInput({
1246
1330
  }
1247
1331
 
1248
1332
  // src/responses/xai-responses-prepare-tools.ts
1249
- var import_provider3 = require("@ai-sdk/provider");
1333
+ var import_provider4 = require("@ai-sdk/provider");
1250
1334
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
1251
1335
 
1252
1336
  // src/tool/web-search.ts
@@ -1486,7 +1570,7 @@ async function prepareResponsesTools({
1486
1570
  }
1487
1571
  default: {
1488
1572
  const _exhaustiveCheck = type;
1489
- throw new import_provider3.UnsupportedFunctionalityError({
1573
+ throw new import_provider4.UnsupportedFunctionalityError({
1490
1574
  functionality: `tool choice type: ${_exhaustiveCheck}`
1491
1575
  });
1492
1576
  }
@@ -2024,7 +2108,7 @@ var xaiTools = {
2024
2108
  };
2025
2109
 
2026
2110
  // src/version.ts
2027
- var VERSION = true ? "2.0.48" : "0.0.0-test";
2111
+ var VERSION = true ? "2.0.50" : "0.0.0-test";
2028
2112
 
2029
2113
  // src/xai-provider.ts
2030
2114
  var xaiErrorStructure = {
@@ -2080,7 +2164,7 @@ function createXai(options = {}) {
2080
2164
  provider.chat = createChatLanguageModel;
2081
2165
  provider.responses = createResponsesLanguageModel;
2082
2166
  provider.textEmbeddingModel = (modelId) => {
2083
- throw new import_provider4.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
2167
+ throw new import_provider5.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
2084
2168
  };
2085
2169
  provider.imageModel = createImageModel;
2086
2170
  provider.image = createImageModel;