@providerprotocol/ai 0.0.20 → 0.0.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/dist/anthropic/index.d.ts +184 -14
  2. package/dist/anthropic/index.js +306 -107
  3. package/dist/anthropic/index.js.map +1 -1
  4. package/dist/{chunk-P5IRTEM5.js → chunk-7WYBJPJJ.js} +2 -2
  5. package/dist/chunk-I2VHCGQE.js +49 -0
  6. package/dist/chunk-I2VHCGQE.js.map +1 -0
  7. package/dist/{chunk-UMKWXGO3.js → chunk-M4BMM5IB.js} +86 -2
  8. package/dist/chunk-M4BMM5IB.js.map +1 -0
  9. package/dist/{chunk-SKY2JLA7.js → chunk-MKDLXV4O.js} +1 -1
  10. package/dist/chunk-MKDLXV4O.js.map +1 -0
  11. package/dist/{chunk-Z7RBRCRN.js → chunk-NWS5IKNR.js} +37 -11
  12. package/dist/chunk-NWS5IKNR.js.map +1 -0
  13. package/dist/{chunk-U3FZWV4U.js → chunk-RFWLEFAB.js} +100 -43
  14. package/dist/chunk-RFWLEFAB.js.map +1 -0
  15. package/dist/{chunk-U4JJC2YX.js → chunk-RS7C25LS.js} +36 -11
  16. package/dist/chunk-RS7C25LS.js.map +1 -0
  17. package/dist/google/index.d.ts +35 -24
  18. package/dist/google/index.js +273 -99
  19. package/dist/google/index.js.map +1 -1
  20. package/dist/http/index.d.ts +3 -3
  21. package/dist/http/index.js +4 -4
  22. package/dist/index.d.ts +103 -38
  23. package/dist/index.js +346 -153
  24. package/dist/index.js.map +1 -1
  25. package/dist/ollama/index.d.ts +14 -16
  26. package/dist/ollama/index.js +68 -16
  27. package/dist/ollama/index.js.map +1 -1
  28. package/dist/openai/index.d.ts +25 -133
  29. package/dist/openai/index.js +208 -122
  30. package/dist/openai/index.js.map +1 -1
  31. package/dist/openrouter/index.d.ts +28 -53
  32. package/dist/openrouter/index.js +179 -72
  33. package/dist/openrouter/index.js.map +1 -1
  34. package/dist/provider-DWEAzeM5.d.ts +1329 -0
  35. package/dist/proxy/index.d.ts +2 -3
  36. package/dist/proxy/index.js +174 -17
  37. package/dist/proxy/index.js.map +1 -1
  38. package/dist/{retry-DR7YRJDz.d.ts → retry-DmPmqZL6.d.ts} +12 -3
  39. package/dist/{stream-DRHy6q1a.d.ts → stream-DbkLOIbJ.d.ts} +15 -5
  40. package/dist/xai/index.d.ts +16 -88
  41. package/dist/xai/index.js +167 -86
  42. package/dist/xai/index.js.map +1 -1
  43. package/package.json +4 -1
  44. package/dist/chunk-MSR5P65T.js +0 -39
  45. package/dist/chunk-MSR5P65T.js.map +0 -1
  46. package/dist/chunk-SKY2JLA7.js.map +0 -1
  47. package/dist/chunk-U3FZWV4U.js.map +0 -1
  48. package/dist/chunk-U4JJC2YX.js.map +0 -1
  49. package/dist/chunk-UMKWXGO3.js.map +0 -1
  50. package/dist/chunk-Z7RBRCRN.js.map +0 -1
  51. package/dist/content-DEl3z_W2.d.ts +0 -276
  52. package/dist/image-Dhq-Yuq4.d.ts +0 -456
  53. package/dist/provider-BBMBZuGn.d.ts +0 -570
  54. /package/dist/{chunk-P5IRTEM5.js.map → chunk-7WYBJPJJ.js.map} +0 -0
@@ -1,4 +1,4 @@
1
- import { C as ContentBlock, I as ImageBlock, a as AudioBlock, V as VideoBlock, A as AssistantContent, U as UserContent } from './content-DEl3z_W2.js';
1
+ import { C as ContentBlock, l as ImageBlock, m as AudioBlock, V as VideoBlock, A as AssistantContent, U as UserContent } from './provider-DWEAzeM5.js';
2
2
 
3
3
  /**
4
4
  * @fileoverview JSON Schema types for tool parameters and structured outputs.
@@ -752,7 +752,12 @@ interface Turn<TData = unknown> {
752
752
  readonly messages: Message[];
753
753
  /** The final assistant response (last AssistantMessage in the turn) */
754
754
  readonly response: AssistantMessage;
755
- /** Tool executions that occurred during this turn */
755
+ /**
756
+ * Tool executions that occurred during this turn.
757
+ *
758
+ * Execution order reflects completion timing, not call order.
759
+ * Correlate with tool calls using toolCallId.
760
+ */
756
761
  readonly toolExecutions: ToolExecution[];
757
762
  /** Aggregate token usage for the entire turn */
758
763
  readonly usage: TokenUsage;
@@ -767,6 +772,9 @@ interface Turn<TData = unknown> {
767
772
  /**
768
773
  * Turn serialized to JSON format.
769
774
  * Messages are converted to MessageJSON, response is omitted (computed from messages).
775
+ *
776
+ * @remarks
777
+ * This type is derived from {@link Turn} and should stay in sync with it.
770
778
  */
771
779
  type TurnJSON = Omit<Turn, 'messages' | 'response'> & {
772
780
  messages: MessageJSON[];
@@ -850,7 +858,7 @@ type StreamEventType =
850
858
  | 'video_delta'
851
859
  /** Incremental tool call data (arguments being streamed) */
852
860
  | 'tool_call_delta'
853
- /** Tool execution has started */
861
+ /** Tool execution has started (may be emitted after completion in some implementations) */
854
862
  | 'tool_execution_start'
855
863
  /** Tool execution has completed */
856
864
  | 'tool_execution_end'
@@ -938,10 +946,12 @@ interface StreamEvent {
938
946
  interface StreamResult<TData = unknown> extends AsyncIterable<StreamEvent> {
939
947
  /**
940
948
  * Promise that resolves to the complete Turn after streaming finishes.
949
+ * Rejects if the stream is aborted or terminated early.
941
950
  */
942
951
  readonly turn: Promise<Turn<TData>>;
943
952
  /**
944
953
  * Aborts the stream, stopping further events and cancelling the request.
954
+ * This will cause {@link StreamResult.turn} to reject.
945
955
  */
946
956
  abort(): void;
947
957
  }
@@ -950,7 +960,7 @@ interface StreamResult<TData = unknown> extends AsyncIterable<StreamEvent> {
950
960
  *
951
961
  * @typeParam TData - Type of the structured output data
952
962
  * @param generator - Async generator that yields stream events
953
- * @param turnPromise - Promise that resolves to the complete Turn
963
+ * @param turnPromiseOrFactory - Promise or factory that resolves to the complete Turn
954
964
  * @param abortController - Controller for aborting the stream
955
965
  * @returns A StreamResult that can be iterated and awaited
956
966
  *
@@ -964,7 +974,7 @@ interface StreamResult<TData = unknown> extends AsyncIterable<StreamEvent> {
964
974
  * );
965
975
  * ```
966
976
  */
967
- declare function createStreamResult<TData = unknown>(generator: AsyncGenerator<StreamEvent, void, unknown>, turnPromise: Promise<Turn<TData>>, abortController: AbortController): StreamResult<TData>;
977
+ declare function createStreamResult<TData = unknown>(generator: AsyncGenerator<StreamEvent, void, unknown>, turnPromiseOrFactory: Promise<Turn<TData>> | (() => Promise<Turn<TData>>), abortController: AbortController): StreamResult<TData>;
968
978
  /**
969
979
  * Creates a text delta stream event.
970
980
  *
@@ -1,31 +1,4 @@
1
- import { d as Provider, f as ModelReference, b as LLMHandler } from '../provider-BBMBZuGn.js';
2
- import { n as ImageHandler } from '../image-Dhq-Yuq4.js';
3
- import '../content-DEl3z_W2.js';
4
-
5
- /**
6
- * @fileoverview xAI Image Generation API Handler
7
- *
8
- * This module implements the image handler for xAI's Image Generation API (Aurora).
9
- * Supports the grok-2-image-1212 model.
10
- *
11
- * @see {@link https://docs.x.ai/docs/image-generation xAI Image Generation Reference}
12
- * @module providers/xai/image
13
- */
14
-
15
- /**
16
- * xAI image generation parameters.
17
- * Passed through unchanged to the API.
18
- *
19
- * Note: xAI does NOT support negative_prompt or seed parameters.
20
- */
21
- interface XAIImageParams {
22
- /** Number of images to generate (1-10) */
23
- n?: number;
24
- /** Response format */
25
- response_format?: 'url' | 'b64_json';
26
- /** User identifier */
27
- user?: string;
28
- }
1
+ import { g as Provider } from '../provider-DWEAzeM5.js';
29
2
 
30
3
  /**
31
4
  * xAI Chat Completions API parameters (OpenAI-compatible).
@@ -163,7 +136,7 @@ interface XAIResponsesParams {
163
136
  * const model = llm({
164
137
  * model: xai('grok-4-1-fast', { api: 'responses' }),
165
138
  * params: {
166
- * builtInTools: [
139
+ * tools: [
167
140
  * tools.webSearch(),
168
141
  * tools.xSearch({ from_date: '2025-01-01' }),
169
142
  * tools.codeExecution(),
@@ -172,7 +145,7 @@ interface XAIResponsesParams {
172
145
  * });
173
146
  * ```
174
147
  */
175
- builtInTools?: XAIBuiltInTool[];
148
+ tools?: XAIBuiltInTool[];
176
149
  /**
177
150
  * Maximum agent reasoning turns.
178
151
  * Limits the number of assistant turns, not individual tool calls.
@@ -334,20 +307,12 @@ interface XAIXSearchTool {
334
307
  * ```typescript
335
308
  * const tool: XAICodeExecutionTool = {
336
309
  * type: 'code_interpreter',
337
- * container: {
338
- * pip_packages: ['numpy', 'pandas'],
339
- * },
340
310
  * };
341
311
  * ```
342
312
  */
343
313
  interface XAICodeExecutionTool {
344
314
  /** Tool type identifier */
345
315
  type: 'code_interpreter';
346
- /** Container configuration */
347
- container?: {
348
- /** Additional pip packages to install */
349
- pip_packages?: string[];
350
- };
351
316
  }
352
317
  /**
353
318
  * File/collections search tool for document retrieval.
@@ -488,23 +453,14 @@ declare function xSearchTool(options?: {
488
453
  * Enables Grok to write and execute Python code in a sandbox.
489
454
  * Pricing: $5 per 1,000 successful tool invocations.
490
455
  *
491
- * @param options - Optional configuration for the execution environment
492
456
  * @returns A code execution tool configuration object
493
457
  *
494
458
  * @example
495
459
  * ```typescript
496
- * // Basic code execution
497
460
  * const codeExec = codeExecutionTool();
498
- *
499
- * // With additional packages
500
- * const codeExecWithPackages = codeExecutionTool({
501
- * pip_packages: ['numpy', 'pandas', 'scipy'],
502
- * });
503
461
  * ```
504
462
  */
505
- declare function codeExecutionTool(options?: {
506
- pip_packages?: string[];
507
- }): XAICodeExecutionTool;
463
+ declare function codeExecutionTool(): XAICodeExecutionTool;
508
464
  /**
509
465
  * Creates a file/collections search tool configuration.
510
466
  *
@@ -567,7 +523,7 @@ declare function mcpTool(options: {
567
523
  * const model = llm({
568
524
  * model: xai('grok-4-1-fast', { api: 'responses' }),
569
525
  * params: {
570
- * builtInTools: [
526
+ * tools: [
571
527
  * tools.webSearch(),
572
528
  * tools.xSearch({ from_date: '2025-01-01' }),
573
529
  * tools.codeExecution(),
@@ -634,10 +590,15 @@ interface XAIHeaders {
634
590
  }
635
591
 
636
592
  /**
637
- * Union type for LLM parameters across all xAI API modes.
638
- * This type enables the provider to handle parameters from any of the three APIs.
593
+ * @fileoverview xAI Provider Factory
594
+ *
595
+ * This module provides the main xAI provider implementation that supports three
596
+ * API modes: Chat Completions (default, OpenAI-compatible), Responses (stateful),
597
+ * and Messages (Anthropic-compatible).
598
+ *
599
+ * @module providers/xai
639
600
  */
640
- type XAILLMParamsUnion = XAICompletionsParams | XAIResponsesParams | XAIMessagesParams;
601
+
641
602
  /**
642
603
  * Configuration options for creating xAI model references.
643
604
  */
@@ -654,42 +615,9 @@ interface XAIProviderOptions {
654
615
  api?: XAIAPIMode;
655
616
  }
656
617
  /**
657
- * xAI provider with configurable API mode
658
- *
659
- * xAI's APIs are compatible with OpenAI and Anthropic SDKs, supporting three API modes:
660
- * - Chat Completions API (OpenAI-compatible) - default, recommended
661
- * - Responses API (OpenAI Responses-compatible) - stateful conversations
662
- * - Messages API (Anthropic-compatible) - for migration from Anthropic
663
- *
664
- * @example
665
- * // Using the Chat Completions API (default)
666
- * const model = xai('grok-4');
667
- *
668
- * @example
669
- * // Using the Responses API (stateful)
670
- * const model = xai('grok-4', { api: 'responses' });
671
- *
672
- * @example
673
- * // Using the Messages API (Anthropic-compatible)
674
- * const model = xai('grok-4', { api: 'messages' });
618
+ * Type alias for the xAI provider with its options.
675
619
  */
676
- interface XAIProvider extends Provider<XAIProviderOptions> {
677
- /**
678
- * Create a model reference
679
- * @param modelId - The model identifier (e.g., 'grok-4', 'grok-4.1-fast', 'grok-3-mini')
680
- * @param options - Provider options including API selection
681
- */
682
- (modelId: string, options?: XAIProviderOptions): ModelReference<XAIProviderOptions>;
683
- /** Provider name */
684
- readonly name: 'xai';
685
- /** Provider version */
686
- readonly version: string;
687
- /** Supported modalities */
688
- readonly modalities: {
689
- llm: LLMHandler<XAILLMParamsUnion>;
690
- image: ImageHandler<XAIImageParams>;
691
- };
692
- }
620
+ type XAIProvider = Provider<XAIProviderOptions>;
693
621
  /**
694
622
  * xAI provider
695
623
  *
@@ -766,6 +694,6 @@ interface XAIProvider extends Provider<XAIProviderOptions> {
766
694
  * console.log(turn.response.text);
767
695
  * ```
768
696
  */
769
- declare const xai: XAIProvider;
697
+ declare const xai: Provider<XAIProviderOptions>;
770
698
 
771
699
  export { type XAIAPIMode, type XAIAgentTool, type XAIBuiltInTool, type XAICodeExecutionTool, type XAICompletionsParams, type XAIConfig, type XAIFileSearchTool, type XAIHeaders, type XAIMcpTool, type XAIMessagesParams, type XAIModelOptions, type XAIModelReference, type XAIProvider, type XAIProviderOptions, type XAIResponsesParams, type XAISearchParameters, type XAIServerSideToolUsage, type XAIWebSearchTool, type XAIXSearchTool, tools, xai };
package/dist/xai/index.js CHANGED
@@ -1,24 +1,30 @@
1
1
  import {
2
2
  Image
3
3
  } from "../chunk-WAKD3OO5.js";
4
+ import {
5
+ parseJsonResponse
6
+ } from "../chunk-I2VHCGQE.js";
4
7
  import {
5
8
  AssistantMessage,
9
+ createProvider,
10
+ generateId,
6
11
  isAssistantMessage,
7
12
  isToolResultMessage,
8
13
  isUserMessage
9
- } from "../chunk-UMKWXGO3.js";
14
+ } from "../chunk-M4BMM5IB.js";
10
15
  import {
11
16
  parseSSEStream
12
- } from "../chunk-Z7RBRCRN.js";
17
+ } from "../chunk-NWS5IKNR.js";
13
18
  import {
14
19
  resolveApiKey
15
- } from "../chunk-P5IRTEM5.js";
20
+ } from "../chunk-7WYBJPJJ.js";
16
21
  import {
17
22
  UPPError,
18
23
  doFetch,
19
24
  doStreamFetch,
20
- normalizeHttpError
21
- } from "../chunk-U3FZWV4U.js";
25
+ normalizeHttpError,
26
+ toError
27
+ } from "../chunk-RFWLEFAB.js";
22
28
 
23
29
  // src/providers/xai/transform.completions.ts
24
30
  function transformRequest(request, modelId) {
@@ -54,9 +60,40 @@ function transformRequest(request, modelId) {
54
60
  return xaiRequest;
55
61
  }
56
62
  function normalizeSystem(system) {
57
- if (!system) return void 0;
63
+ if (system === void 0 || system === null) return void 0;
58
64
  if (typeof system === "string") return system;
59
- return system.map((block) => block.text ?? "").filter((text) => text.length > 0).join("\n\n");
65
+ if (!Array.isArray(system)) {
66
+ throw new UPPError(
67
+ "System prompt must be a string or an array of text blocks",
68
+ "INVALID_REQUEST",
69
+ "xai",
70
+ "llm"
71
+ );
72
+ }
73
+ const texts = [];
74
+ for (const block of system) {
75
+ if (!block || typeof block !== "object" || !("text" in block)) {
76
+ throw new UPPError(
77
+ "System prompt array must contain objects with a text field",
78
+ "INVALID_REQUEST",
79
+ "xai",
80
+ "llm"
81
+ );
82
+ }
83
+ const textValue = block.text;
84
+ if (typeof textValue !== "string") {
85
+ throw new UPPError(
86
+ "System prompt text must be a string",
87
+ "INVALID_REQUEST",
88
+ "xai",
89
+ "llm"
90
+ );
91
+ }
92
+ if (textValue.length > 0) {
93
+ texts.push(textValue);
94
+ }
95
+ }
96
+ return texts.length > 0 ? texts.join("\n\n") : void 0;
60
97
  }
61
98
  function transformMessages(messages, system) {
62
99
  const result = [];
@@ -215,11 +252,12 @@ function transformResponse(data) {
215
252
  });
216
253
  }
217
254
  }
255
+ const responseId = data.id || generateId();
218
256
  const message = new AssistantMessage(
219
257
  textContent,
220
258
  toolCalls.length > 0 ? toolCalls : void 0,
221
259
  {
222
- id: data.id,
260
+ id: responseId,
223
261
  metadata: {
224
262
  xai: {
225
263
  model: data.model,
@@ -369,11 +407,12 @@ function buildResponseFromState(state) {
369
407
  arguments: args
370
408
  });
371
409
  }
410
+ const messageId = state.id || generateId();
372
411
  const message = new AssistantMessage(
373
412
  textContent,
374
413
  toolCalls.length > 0 ? toolCalls : void 0,
375
414
  {
376
- id: state.id,
415
+ id: messageId,
377
416
  metadata: {
378
417
  xai: {
379
418
  model: state.model,
@@ -478,7 +517,7 @@ function createCompletionsLLMHandler() {
478
517
  "xai",
479
518
  "llm"
480
519
  );
481
- const data = await response.json();
520
+ const data = await parseJsonResponse(response, "xai", "llm");
482
521
  return transformResponse(data);
483
522
  },
484
523
  stream(request) {
@@ -503,7 +542,8 @@ function createCompletionsLLMHandler() {
503
542
  body.stream_options = { include_usage: true };
504
543
  const headers = {
505
544
  "Content-Type": "application/json",
506
- Authorization: `Bearer ${apiKey}`
545
+ Authorization: `Bearer ${apiKey}`,
546
+ Accept: "text/event-stream"
507
547
  };
508
548
  if (request.config.headers) {
509
549
  for (const [key, value] of Object.entries(request.config.headers)) {
@@ -564,8 +604,9 @@ function createCompletionsLLMHandler() {
564
604
  }
565
605
  responseResolve(buildResponseFromState(state));
566
606
  } catch (error) {
567
- responseReject(error);
568
- throw error;
607
+ const err = toError(error);
608
+ responseReject(err);
609
+ throw err;
569
610
  }
570
611
  }
571
612
  return {
@@ -584,13 +625,19 @@ function createCompletionsLLMHandler() {
584
625
  // src/providers/xai/transform.responses.ts
585
626
  function transformRequest2(request, modelId) {
586
627
  const params = request.params ?? {};
628
+ const { tools: builtInTools, ...restParams } = params;
587
629
  const xaiRequest = {
588
- ...params,
630
+ ...restParams,
589
631
  model: modelId,
590
632
  input: transformInputItems(request.messages, request.system)
591
633
  };
592
- if (request.tools && request.tools.length > 0) {
593
- xaiRequest.tools = request.tools.map(transformTool2);
634
+ const functionTools = request.tools?.map(transformTool2) ?? [];
635
+ const allTools = [
636
+ ...functionTools,
637
+ ...builtInTools ?? []
638
+ ];
639
+ if (allTools.length > 0) {
640
+ xaiRequest.tools = allTools;
594
641
  }
595
642
  if (request.structure) {
596
643
  const schema = {
@@ -615,9 +662,40 @@ function transformRequest2(request, modelId) {
615
662
  return xaiRequest;
616
663
  }
617
664
  function normalizeSystem2(system) {
618
- if (!system) return void 0;
665
+ if (system === void 0 || system === null) return void 0;
619
666
  if (typeof system === "string") return system;
620
- return system.map((block) => block.text ?? "").filter((text) => text.length > 0).join("\n\n");
667
+ if (!Array.isArray(system)) {
668
+ throw new UPPError(
669
+ "System prompt must be a string or an array of text blocks",
670
+ "INVALID_REQUEST",
671
+ "xai",
672
+ "llm"
673
+ );
674
+ }
675
+ const texts = [];
676
+ for (const block of system) {
677
+ if (!block || typeof block !== "object" || !("text" in block)) {
678
+ throw new UPPError(
679
+ "System prompt array must contain objects with a text field",
680
+ "INVALID_REQUEST",
681
+ "xai",
682
+ "llm"
683
+ );
684
+ }
685
+ const textValue = block.text;
686
+ if (typeof textValue !== "string") {
687
+ throw new UPPError(
688
+ "System prompt text must be a string",
689
+ "INVALID_REQUEST",
690
+ "xai",
691
+ "llm"
692
+ );
693
+ }
694
+ if (textValue.length > 0) {
695
+ texts.push(textValue);
696
+ }
697
+ }
698
+ return texts.length > 0 ? texts.join("\n\n") : void 0;
621
699
  }
622
700
  function transformInputItems(messages, system) {
623
701
  const result = [];
@@ -798,16 +876,17 @@ function transformResponse2(data) {
798
876
  });
799
877
  }
800
878
  }
879
+ const responseId = data.id || generateId();
801
880
  const message = new AssistantMessage(
802
881
  textContent,
803
882
  toolCalls.length > 0 ? toolCalls : void 0,
804
883
  {
805
- id: data.id,
884
+ id: responseId,
806
885
  metadata: {
807
886
  xai: {
808
887
  model: data.model,
809
888
  status: data.status,
810
- response_id: data.id,
889
+ response_id: responseId,
811
890
  functionCallItems: functionCallItems.length > 0 ? functionCallItems : void 0,
812
891
  citations: data.citations,
813
892
  inline_citations: data.inline_citations
@@ -1033,16 +1112,17 @@ function buildResponseFromState2(state) {
1033
1112
  });
1034
1113
  }
1035
1114
  }
1115
+ const responseId = state.id || generateId();
1036
1116
  const message = new AssistantMessage(
1037
1117
  textContent,
1038
1118
  toolCalls.length > 0 ? toolCalls : void 0,
1039
1119
  {
1040
- id: state.id,
1120
+ id: responseId,
1041
1121
  metadata: {
1042
1122
  xai: {
1043
1123
  model: state.model,
1044
1124
  status: state.status,
1045
- response_id: state.id,
1125
+ response_id: responseId,
1046
1126
  functionCallItems: functionCallItems.length > 0 ? functionCallItems : void 0
1047
1127
  }
1048
1128
  }
@@ -1135,7 +1215,7 @@ function createResponsesLLMHandler() {
1135
1215
  "xai",
1136
1216
  "llm"
1137
1217
  );
1138
- const data = await response.json();
1218
+ const data = await parseJsonResponse(response, "xai", "llm");
1139
1219
  if (data.status === "failed" && data.error) {
1140
1220
  throw new UPPError(
1141
1221
  data.error.message,
@@ -1167,7 +1247,8 @@ function createResponsesLLMHandler() {
1167
1247
  body.stream = true;
1168
1248
  const headers = {
1169
1249
  "Content-Type": "application/json",
1170
- Authorization: `Bearer ${apiKey}`
1250
+ Authorization: `Bearer ${apiKey}`,
1251
+ Accept: "text/event-stream"
1171
1252
  };
1172
1253
  if (request.config.headers) {
1173
1254
  for (const [key, value] of Object.entries(request.config.headers)) {
@@ -1228,8 +1309,9 @@ function createResponsesLLMHandler() {
1228
1309
  }
1229
1310
  responseResolve(buildResponseFromState2(state));
1230
1311
  } catch (error) {
1231
- responseReject(error);
1232
- throw error;
1312
+ const err = toError(error);
1313
+ responseReject(err);
1314
+ throw err;
1233
1315
  }
1234
1316
  }
1235
1317
  return {
@@ -1247,9 +1329,40 @@ function createResponsesLLMHandler() {
1247
1329
 
1248
1330
  // src/providers/xai/transform.messages.ts
1249
1331
  function normalizeSystem3(system) {
1250
- if (!system) return void 0;
1332
+ if (system === void 0 || system === null) return void 0;
1251
1333
  if (typeof system === "string") return system;
1252
- return system.map((block) => block.text ?? "").filter((text) => text.length > 0).join("\n\n");
1334
+ if (!Array.isArray(system)) {
1335
+ throw new UPPError(
1336
+ "System prompt must be a string or an array of text blocks",
1337
+ "INVALID_REQUEST",
1338
+ "xai",
1339
+ "llm"
1340
+ );
1341
+ }
1342
+ const texts = [];
1343
+ for (const block of system) {
1344
+ if (!block || typeof block !== "object" || !("text" in block)) {
1345
+ throw new UPPError(
1346
+ "System prompt array must contain objects with a text field",
1347
+ "INVALID_REQUEST",
1348
+ "xai",
1349
+ "llm"
1350
+ );
1351
+ }
1352
+ const textValue = block.text;
1353
+ if (typeof textValue !== "string") {
1354
+ throw new UPPError(
1355
+ "System prompt text must be a string",
1356
+ "INVALID_REQUEST",
1357
+ "xai",
1358
+ "llm"
1359
+ );
1360
+ }
1361
+ if (textValue.length > 0) {
1362
+ texts.push(textValue);
1363
+ }
1364
+ }
1365
+ return texts.length > 0 ? texts.join("\n\n") : void 0;
1253
1366
  }
1254
1367
  function transformRequest3(request, modelId) {
1255
1368
  const params = request.params ?? {};
@@ -1540,11 +1653,12 @@ function buildResponseFromState3(state) {
1540
1653
  });
1541
1654
  }
1542
1655
  }
1656
+ const messageId = state.messageId || generateId();
1543
1657
  const message = new AssistantMessage(
1544
1658
  textContent,
1545
1659
  toolCalls.length > 0 ? toolCalls : void 0,
1546
1660
  {
1547
- id: state.messageId,
1661
+ id: messageId,
1548
1662
  metadata: {
1549
1663
  xai: {
1550
1664
  stop_reason: state.stopReason,
@@ -1632,7 +1746,7 @@ function createMessagesLLMHandler() {
1632
1746
  "xai",
1633
1747
  "llm"
1634
1748
  );
1635
- const data = await response.json();
1749
+ const data = await parseJsonResponse(response, "xai", "llm");
1636
1750
  return transformResponse3(data);
1637
1751
  },
1638
1752
  stream(request) {
@@ -1657,7 +1771,8 @@ function createMessagesLLMHandler() {
1657
1771
  const headers = {
1658
1772
  "Content-Type": "application/json",
1659
1773
  "x-api-key": apiKey,
1660
- "anthropic-version": "2023-06-01"
1774
+ "anthropic-version": "2023-06-01",
1775
+ Accept: "text/event-stream"
1661
1776
  };
1662
1777
  if (request.config.headers) {
1663
1778
  for (const [key, value] of Object.entries(request.config.headers)) {
@@ -1714,8 +1829,9 @@ function createMessagesLLMHandler() {
1714
1829
  }
1715
1830
  responseResolve(buildResponseFromState3(state));
1716
1831
  } catch (error) {
1717
- responseReject(error);
1718
- throw error;
1832
+ const err = toError(error);
1833
+ responseReject(err);
1834
+ throw err;
1719
1835
  }
1720
1836
  }
1721
1837
  return {
@@ -1806,7 +1922,7 @@ async function executeGenerate(modelId, request) {
1806
1922
  body: JSON.stringify(body),
1807
1923
  signal: request.signal
1808
1924
  }, request.config, "xai", "image");
1809
- const data = await response.json();
1925
+ const data = await parseJsonResponse(response, "xai", "image");
1810
1926
  return transformResponse4(data);
1811
1927
  }
1812
1928
  function transformResponse4(data) {
@@ -1850,10 +1966,9 @@ function xSearchTool(options) {
1850
1966
  ...options
1851
1967
  };
1852
1968
  }
1853
- function codeExecutionTool(options) {
1969
+ function codeExecutionTool() {
1854
1970
  return {
1855
- type: "code_interpreter",
1856
- ...options?.pip_packages && { container: { pip_packages: options.pip_packages } }
1971
+ type: "code_interpreter"
1857
1972
  };
1858
1973
  }
1859
1974
  function fileSearchTool(options) {
@@ -1884,56 +1999,22 @@ var tools = {
1884
1999
  };
1885
2000
 
1886
2001
  // src/providers/xai/index.ts
1887
- function createXAIProvider() {
1888
- let currentApiMode = "completions";
1889
- const completionsHandler = createCompletionsLLMHandler();
1890
- const responsesHandler = createResponsesLLMHandler();
1891
- const messagesHandler = createMessagesLLMHandler();
1892
- const imageHandler = createImageHandler();
1893
- const fn = function(modelId, options) {
1894
- const apiMode = options?.api ?? "completions";
1895
- currentApiMode = apiMode;
1896
- return { modelId, provider };
1897
- };
1898
- const modalities = {
1899
- get llm() {
1900
- switch (currentApiMode) {
1901
- case "responses":
1902
- return responsesHandler;
1903
- case "messages":
1904
- return messagesHandler;
1905
- case "completions":
1906
- default:
1907
- return completionsHandler;
1908
- }
1909
- },
1910
- image: imageHandler
1911
- };
1912
- Object.defineProperties(fn, {
1913
- name: {
1914
- value: "xai",
1915
- writable: false,
1916
- configurable: true
2002
+ var xai = createProvider({
2003
+ name: "xai",
2004
+ version: "1.0.0",
2005
+ handlers: {
2006
+ llm: {
2007
+ handlers: {
2008
+ completions: createCompletionsLLMHandler(),
2009
+ responses: createResponsesLLMHandler(),
2010
+ messages: createMessagesLLMHandler()
2011
+ },
2012
+ defaultMode: "completions",
2013
+ getMode: (options) => options?.api ?? "completions"
1917
2014
  },
1918
- version: {
1919
- value: "1.0.0",
1920
- writable: false,
1921
- configurable: true
1922
- },
1923
- modalities: {
1924
- value: modalities,
1925
- writable: false,
1926
- configurable: true
1927
- }
1928
- });
1929
- const provider = fn;
1930
- completionsHandler._setProvider?.(provider);
1931
- responsesHandler._setProvider?.(provider);
1932
- messagesHandler._setProvider?.(provider);
1933
- imageHandler._setProvider?.(provider);
1934
- return provider;
1935
- }
1936
- var xai = createXAIProvider();
2015
+ image: createImageHandler()
2016
+ }
2017
+ });
1937
2018
  export {
1938
2019
  tools,
1939
2020
  xai