@providerprotocol/ai 0.0.11 → 0.0.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. package/dist/anthropic/index.d.ts +51 -15
  2. package/dist/anthropic/index.js +54 -19
  3. package/dist/anthropic/index.js.map +1 -1
  4. package/dist/{chunk-SUNYWHTH.js → chunk-MOU4U3PO.js} +55 -3
  5. package/dist/chunk-MOU4U3PO.js.map +1 -0
  6. package/dist/{chunk-Y6Q7JCNP.js → chunk-MSR5P65T.js} +1 -1
  7. package/dist/chunk-MSR5P65T.js.map +1 -0
  8. package/dist/{chunk-W4BB4BG2.js → chunk-SVYROCLD.js} +31 -11
  9. package/dist/chunk-SVYROCLD.js.map +1 -0
  10. package/dist/chunk-U4JJC2YX.js +234 -0
  11. package/dist/chunk-U4JJC2YX.js.map +1 -0
  12. package/dist/{chunk-X5G4EHL7.js → chunk-Z7RBRCRN.js} +1 -1
  13. package/dist/chunk-Z7RBRCRN.js.map +1 -0
  14. package/dist/google/index.d.ts +376 -7
  15. package/dist/google/index.js +127 -15
  16. package/dist/google/index.js.map +1 -1
  17. package/dist/http/index.d.ts +222 -25
  18. package/dist/http/index.js +3 -3
  19. package/dist/index.d.ts +1482 -198
  20. package/dist/index.js +233 -49
  21. package/dist/index.js.map +1 -1
  22. package/dist/ollama/index.d.ts +92 -20
  23. package/dist/ollama/index.js +17 -7
  24. package/dist/ollama/index.js.map +1 -1
  25. package/dist/openai/index.d.ts +340 -61
  26. package/dist/openai/index.js +57 -15
  27. package/dist/openai/index.js.map +1 -1
  28. package/dist/openrouter/index.d.ts +107 -51
  29. package/dist/openrouter/index.js +36 -8
  30. package/dist/openrouter/index.js.map +1 -1
  31. package/dist/provider-mKkz7Q9U.d.ts +488 -0
  32. package/dist/retry-Dh70lgr0.d.ts +508 -0
  33. package/dist/xai/index.d.ts +97 -22
  34. package/dist/xai/index.js +55 -19
  35. package/dist/xai/index.js.map +1 -1
  36. package/package.json +8 -12
  37. package/dist/chunk-CUCRF5W6.js +0 -136
  38. package/dist/chunk-CUCRF5W6.js.map +0 -1
  39. package/dist/chunk-SUNYWHTH.js.map +0 -1
  40. package/dist/chunk-W4BB4BG2.js.map +0 -1
  41. package/dist/chunk-X5G4EHL7.js.map +0 -1
  42. package/dist/chunk-Y6Q7JCNP.js.map +0 -1
  43. package/dist/provider-CUJWjgNl.d.ts +0 -192
  44. package/dist/retry-I2661_rv.d.ts +0 -118
  45. package/src/anthropic/index.ts +0 -3
  46. package/src/core/image.ts +0 -188
  47. package/src/core/llm.ts +0 -650
  48. package/src/core/provider.ts +0 -92
  49. package/src/google/index.ts +0 -3
  50. package/src/http/errors.ts +0 -112
  51. package/src/http/fetch.ts +0 -210
  52. package/src/http/index.ts +0 -31
  53. package/src/http/keys.ts +0 -136
  54. package/src/http/retry.ts +0 -205
  55. package/src/http/sse.ts +0 -136
  56. package/src/index.ts +0 -32
  57. package/src/ollama/index.ts +0 -3
  58. package/src/openai/index.ts +0 -39
  59. package/src/openrouter/index.ts +0 -11
  60. package/src/providers/anthropic/index.ts +0 -17
  61. package/src/providers/anthropic/llm.ts +0 -196
  62. package/src/providers/anthropic/transform.ts +0 -434
  63. package/src/providers/anthropic/types.ts +0 -213
  64. package/src/providers/google/index.ts +0 -17
  65. package/src/providers/google/llm.ts +0 -203
  66. package/src/providers/google/transform.ts +0 -447
  67. package/src/providers/google/types.ts +0 -214
  68. package/src/providers/ollama/index.ts +0 -43
  69. package/src/providers/ollama/llm.ts +0 -272
  70. package/src/providers/ollama/transform.ts +0 -434
  71. package/src/providers/ollama/types.ts +0 -260
  72. package/src/providers/openai/index.ts +0 -186
  73. package/src/providers/openai/llm.completions.ts +0 -201
  74. package/src/providers/openai/llm.responses.ts +0 -211
  75. package/src/providers/openai/transform.completions.ts +0 -561
  76. package/src/providers/openai/transform.responses.ts +0 -708
  77. package/src/providers/openai/types.ts +0 -1249
  78. package/src/providers/openrouter/index.ts +0 -177
  79. package/src/providers/openrouter/llm.completions.ts +0 -201
  80. package/src/providers/openrouter/llm.responses.ts +0 -211
  81. package/src/providers/openrouter/transform.completions.ts +0 -538
  82. package/src/providers/openrouter/transform.responses.ts +0 -742
  83. package/src/providers/openrouter/types.ts +0 -717
  84. package/src/providers/xai/index.ts +0 -223
  85. package/src/providers/xai/llm.completions.ts +0 -201
  86. package/src/providers/xai/llm.messages.ts +0 -195
  87. package/src/providers/xai/llm.responses.ts +0 -211
  88. package/src/providers/xai/transform.completions.ts +0 -565
  89. package/src/providers/xai/transform.messages.ts +0 -448
  90. package/src/providers/xai/transform.responses.ts +0 -678
  91. package/src/providers/xai/types.ts +0 -938
  92. package/src/types/content.ts +0 -133
  93. package/src/types/errors.ts +0 -85
  94. package/src/types/index.ts +0 -105
  95. package/src/types/llm.ts +0 -211
  96. package/src/types/messages.ts +0 -205
  97. package/src/types/provider.ts +0 -195
  98. package/src/types/schema.ts +0 -58
  99. package/src/types/stream.ts +0 -188
  100. package/src/types/thread.ts +0 -226
  101. package/src/types/tool.ts +0 -88
  102. package/src/types/turn.ts +0 -118
  103. package/src/utils/id.ts +0 -28
  104. package/src/xai/index.ts +0 -41
@@ -1,223 +0,0 @@
1
- import type {
2
- Provider,
3
- ModelReference,
4
- LLMHandler,
5
- LLMProvider,
6
- } from '../../types/provider.ts';
7
- import { createCompletionsLLMHandler } from './llm.completions.ts';
8
- import { createResponsesLLMHandler } from './llm.responses.ts';
9
- import { createMessagesLLMHandler } from './llm.messages.ts';
10
- import type { XAICompletionsParams, XAIResponsesParams, XAIMessagesParams, XAIConfig, XAIAPIMode } from './types.ts';
11
-
12
- /** Union type for modalities interface */
13
- type XAILLMParamsUnion = XAICompletionsParams | XAIResponsesParams | XAIMessagesParams;
14
-
15
- /**
16
- * xAI provider options
17
- */
18
- export interface XAIProviderOptions {
19
- /**
20
- * Which API to use:
21
- * - 'completions': Chat Completions API (OpenAI-compatible, default)
22
- * - 'responses': Responses API (OpenAI Responses-compatible, stateful)
23
- * - 'messages': Messages API (Anthropic-compatible)
24
- */
25
- api?: XAIAPIMode;
26
- }
27
-
28
- /**
29
- * xAI provider with configurable API mode
30
- *
31
- * xAI's APIs are compatible with OpenAI and Anthropic SDKs, supporting three API modes:
32
- * - Chat Completions API (OpenAI-compatible) - default, recommended
33
- * - Responses API (OpenAI Responses-compatible) - stateful conversations
34
- * - Messages API (Anthropic-compatible) - for migration from Anthropic
35
- *
36
- * @example
37
- * // Using the Chat Completions API (default)
38
- * const model = xai('grok-4');
39
- *
40
- * @example
41
- * // Using the Responses API (stateful)
42
- * const model = xai('grok-4', { api: 'responses' });
43
- *
44
- * @example
45
- * // Using the Messages API (Anthropic-compatible)
46
- * const model = xai('grok-4', { api: 'messages' });
47
- */
48
- export interface XAIProvider extends Provider<XAIProviderOptions> {
49
- /**
50
- * Create a model reference
51
- * @param modelId - The model identifier (e.g., 'grok-4', 'grok-4.1-fast', 'grok-3-mini')
52
- * @param options - Provider options including API selection
53
- */
54
- (modelId: string, options?: XAIProviderOptions): ModelReference<XAIProviderOptions>;
55
-
56
- /** Provider name */
57
- readonly name: 'xai';
58
-
59
- /** Provider version */
60
- readonly version: string;
61
-
62
- /** Supported modalities */
63
- readonly modalities: {
64
- llm: LLMHandler<XAILLMParamsUnion>;
65
- };
66
- }
67
-
68
- /**
69
- * Create the xAI provider
70
- */
71
- function createXAIProvider(): XAIProvider {
72
- // Track which API mode is currently active for the modalities
73
- // Default to 'completions' (recommended for most use cases)
74
- let currentApiMode: XAIAPIMode = 'completions';
75
-
76
- // Create handlers eagerly so we can inject provider reference
77
- const completionsHandler = createCompletionsLLMHandler();
78
- const responsesHandler = createResponsesLLMHandler();
79
- const messagesHandler = createMessagesLLMHandler();
80
-
81
- const fn = function (
82
- modelId: string,
83
- options?: XAIProviderOptions
84
- ): ModelReference<XAIProviderOptions> {
85
- const apiMode = options?.api ?? 'completions';
86
- currentApiMode = apiMode;
87
- return { modelId, provider };
88
- };
89
-
90
- // Create a dynamic modalities object that returns the correct handler
91
- const modalities = {
92
- get llm(): LLMHandler<XAILLMParamsUnion> {
93
- switch (currentApiMode) {
94
- case 'responses':
95
- return responsesHandler as unknown as LLMHandler<XAILLMParamsUnion>;
96
- case 'messages':
97
- return messagesHandler as unknown as LLMHandler<XAILLMParamsUnion>;
98
- case 'completions':
99
- default:
100
- return completionsHandler as unknown as LLMHandler<XAILLMParamsUnion>;
101
- }
102
- },
103
- };
104
-
105
- // Define properties
106
- Object.defineProperties(fn, {
107
- name: {
108
- value: 'xai',
109
- writable: false,
110
- configurable: true,
111
- },
112
- version: {
113
- value: '1.0.0',
114
- writable: false,
115
- configurable: true,
116
- },
117
- modalities: {
118
- value: modalities,
119
- writable: false,
120
- configurable: true,
121
- },
122
- });
123
-
124
- const provider = fn as XAIProvider;
125
-
126
- // Inject provider reference into all handlers (spec compliance)
127
- completionsHandler._setProvider?.(provider as unknown as LLMProvider<XAICompletionsParams>);
128
- responsesHandler._setProvider?.(provider as unknown as LLMProvider<XAIResponsesParams>);
129
- messagesHandler._setProvider?.(provider as unknown as LLMProvider<XAIMessagesParams>);
130
-
131
- return provider;
132
- }
133
-
134
- /**
135
- * xAI provider
136
- *
137
- * Supports three API modes:
138
- * - Chat Completions API (default, OpenAI-compatible)
139
- * - Responses API (stateful, OpenAI Responses-compatible)
140
- * - Messages API (Anthropic-compatible)
141
- *
142
- * xAI's Grok models support:
143
- * - Real-time search via Live Search API (deprecated Dec 2025) or Agent Tools API
144
- * - Reasoning with `reasoning_effort` parameter (for Grok 3 Mini)
145
- * - Tool/function calling
146
- * - Image input
147
- * - Streaming responses
148
- * - Structured output (JSON mode)
149
- *
150
- * @example
151
- * ```ts
152
- * import { xai } from './providers/xai';
153
- * import { llm } from './core/llm';
154
- *
155
- * // Using Chat Completions API (default, recommended)
156
- * const model = llm({
157
- * model: xai('grok-4'),
158
- * params: { max_tokens: 1000 }
159
- * });
160
- *
161
- * // Using Responses API (stateful conversations)
162
- * const statefulModel = llm({
163
- * model: xai('grok-4', { api: 'responses' }),
164
- * params: {
165
- * max_output_tokens: 1000,
166
- * store: true, // Enable stateful storage
167
- * }
168
- * });
169
- *
170
- * // Continue a previous conversation
171
- * const continuedModel = llm({
172
- * model: xai('grok-4', { api: 'responses' }),
173
- * params: {
174
- * previous_response_id: 'resp_123...',
175
- * }
176
- * });
177
- *
178
- * // Using Messages API (Anthropic-compatible)
179
- * const anthropicModel = llm({
180
- * model: xai('grok-4', { api: 'messages' }),
181
- * params: { max_tokens: 1000 }
182
- * });
183
- *
184
- * // Using reasoning effort (Grok 3 Mini only)
185
- * const reasoningModel = llm({
186
- * model: xai('grok-3-mini'),
187
- * params: {
188
- * max_tokens: 1000,
189
- * reasoning_effort: 'high', // 'low' or 'high'
190
- * }
191
- * });
192
- *
193
- * // Using Live Search (deprecated Dec 2025)
194
- * const searchModel = llm({
195
- * model: xai('grok-4'),
196
- * params: {
197
- * max_tokens: 1000,
198
- * search_parameters: {
199
- * mode: 'auto',
200
- * sources: ['web', 'x', 'news'],
201
- * }
202
- * }
203
- * });
204
- *
205
- * // Generate
206
- * const turn = await model.generate('Hello!');
207
- * console.log(turn.response.text);
208
- * ```
209
- */
210
- export const xai = createXAIProvider();
211
-
212
- // Re-export types
213
- export type {
214
- XAICompletionsParams,
215
- XAIResponsesParams,
216
- XAIMessagesParams,
217
- XAIConfig,
218
- XAIAPIMode,
219
- XAIModelOptions,
220
- XAIModelReference,
221
- XAISearchParameters,
222
- XAIAgentTool,
223
- } from './types.ts';
@@ -1,201 +0,0 @@
1
- import type { LLMHandler, BoundLLMModel, LLMRequest, LLMResponse, LLMStreamResult, LLMCapabilities } from '../../types/llm.ts';
2
- import type { StreamEvent } from '../../types/stream.ts';
3
- import type { LLMProvider } from '../../types/provider.ts';
4
- import { UPPError } from '../../types/errors.ts';
5
- import { resolveApiKey } from '../../http/keys.ts';
6
- import { doFetch, doStreamFetch } from '../../http/fetch.ts';
7
- import { parseSSEStream } from '../../http/sse.ts';
8
- import { normalizeHttpError } from '../../http/errors.ts';
9
- import type { XAICompletionsParams, XAICompletionsResponse, XAICompletionsStreamChunk } from './types.ts';
10
- import {
11
- transformRequest,
12
- transformResponse,
13
- transformStreamEvent,
14
- createStreamState,
15
- buildResponseFromState,
16
- } from './transform.completions.ts';
17
-
18
- const XAI_COMPLETIONS_API_URL = 'https://api.x.ai/v1/chat/completions';
19
-
20
- /**
21
- * xAI Chat Completions API capabilities
22
- */
23
- const XAI_COMPLETIONS_CAPABILITIES: LLMCapabilities = {
24
- streaming: true,
25
- tools: true,
26
- structuredOutput: true,
27
- imageInput: true,
28
- videoInput: false,
29
- audioInput: false,
30
- };
31
-
32
- /**
33
- * Create xAI Chat Completions LLM handler
34
- */
35
- export function createCompletionsLLMHandler(): LLMHandler<XAICompletionsParams> {
36
- // Provider reference injected by createProvider() or xAI's custom factory
37
- let providerRef: LLMProvider<XAICompletionsParams> | null = null;
38
-
39
- return {
40
- _setProvider(provider: LLMProvider<XAICompletionsParams>) {
41
- providerRef = provider;
42
- },
43
-
44
- bind(modelId: string): BoundLLMModel<XAICompletionsParams> {
45
- // Use the injected provider reference
46
- if (!providerRef) {
47
- throw new UPPError(
48
- 'Provider reference not set. Handler must be used with createProvider() or have _setProvider called.',
49
- 'INVALID_REQUEST',
50
- 'xai',
51
- 'llm'
52
- );
53
- }
54
-
55
- const model: BoundLLMModel<XAICompletionsParams> = {
56
- modelId,
57
- capabilities: XAI_COMPLETIONS_CAPABILITIES,
58
-
59
- get provider(): LLMProvider<XAICompletionsParams> {
60
- return providerRef!;
61
- },
62
-
63
- async complete(request: LLMRequest<XAICompletionsParams>): Promise<LLMResponse> {
64
- const apiKey = await resolveApiKey(
65
- request.config,
66
- 'XAI_API_KEY',
67
- 'xai',
68
- 'llm'
69
- );
70
-
71
- const baseUrl = request.config.baseUrl ?? XAI_COMPLETIONS_API_URL;
72
- const body = transformRequest(request, modelId);
73
-
74
- const response = await doFetch(
75
- baseUrl,
76
- {
77
- method: 'POST',
78
- headers: {
79
- 'Content-Type': 'application/json',
80
- Authorization: `Bearer ${apiKey}`,
81
- },
82
- body: JSON.stringify(body),
83
- signal: request.signal,
84
- },
85
- request.config,
86
- 'xai',
87
- 'llm'
88
- );
89
-
90
- const data = (await response.json()) as XAICompletionsResponse;
91
- return transformResponse(data);
92
- },
93
-
94
- stream(request: LLMRequest<XAICompletionsParams>): LLMStreamResult {
95
- const state = createStreamState();
96
- let responseResolve: (value: LLMResponse) => void;
97
- let responseReject: (error: Error) => void;
98
-
99
- const responsePromise = new Promise<LLMResponse>((resolve, reject) => {
100
- responseResolve = resolve;
101
- responseReject = reject;
102
- });
103
-
104
- async function* generateEvents(): AsyncGenerator<StreamEvent, void, unknown> {
105
- try {
106
- const apiKey = await resolveApiKey(
107
- request.config,
108
- 'XAI_API_KEY',
109
- 'xai',
110
- 'llm'
111
- );
112
-
113
- const baseUrl = request.config.baseUrl ?? XAI_COMPLETIONS_API_URL;
114
- const body = transformRequest(request, modelId);
115
- body.stream = true;
116
- body.stream_options = { include_usage: true };
117
-
118
- const response = await doStreamFetch(
119
- baseUrl,
120
- {
121
- method: 'POST',
122
- headers: {
123
- 'Content-Type': 'application/json',
124
- Authorization: `Bearer ${apiKey}`,
125
- },
126
- body: JSON.stringify(body),
127
- signal: request.signal,
128
- },
129
- request.config,
130
- 'xai',
131
- 'llm'
132
- );
133
-
134
- if (!response.ok) {
135
- const error = await normalizeHttpError(response, 'xai', 'llm');
136
- responseReject(error);
137
- throw error;
138
- }
139
-
140
- if (!response.body) {
141
- const error = new UPPError(
142
- 'No response body for streaming request',
143
- 'PROVIDER_ERROR',
144
- 'xai',
145
- 'llm'
146
- );
147
- responseReject(error);
148
- throw error;
149
- }
150
-
151
- for await (const data of parseSSEStream(response.body)) {
152
- // Skip [DONE] marker
153
- if (data === '[DONE]') {
154
- continue;
155
- }
156
-
157
- // Check for xAI error event
158
- if (typeof data === 'object' && data !== null) {
159
- const chunk = data as XAICompletionsStreamChunk;
160
-
161
- // Check for error in chunk
162
- if ('error' in chunk && chunk.error) {
163
- const errorData = chunk.error as { message?: string; type?: string };
164
- const error = new UPPError(
165
- errorData.message ?? 'Unknown error',
166
- 'PROVIDER_ERROR',
167
- 'xai',
168
- 'llm'
169
- );
170
- responseReject(error);
171
- throw error;
172
- }
173
-
174
- const uppEvents = transformStreamEvent(chunk, state);
175
- for (const event of uppEvents) {
176
- yield event;
177
- }
178
- }
179
- }
180
-
181
- // Build final response
182
- responseResolve(buildResponseFromState(state));
183
- } catch (error) {
184
- responseReject(error as Error);
185
- throw error;
186
- }
187
- }
188
-
189
- return {
190
- [Symbol.asyncIterator]() {
191
- return generateEvents();
192
- },
193
- response: responsePromise,
194
- };
195
- },
196
- };
197
-
198
- return model;
199
- },
200
- };
201
- }
@@ -1,195 +0,0 @@
1
- import type { LLMHandler, BoundLLMModel, LLMRequest, LLMResponse, LLMStreamResult, LLMCapabilities } from '../../types/llm.ts';
2
- import type { StreamEvent } from '../../types/stream.ts';
3
- import type { LLMProvider } from '../../types/provider.ts';
4
- import { UPPError } from '../../types/errors.ts';
5
- import { resolveApiKey } from '../../http/keys.ts';
6
- import { doFetch, doStreamFetch } from '../../http/fetch.ts';
7
- import { parseSSEStream } from '../../http/sse.ts';
8
- import { normalizeHttpError } from '../../http/errors.ts';
9
- import type { XAIMessagesParams, XAIMessagesResponse, XAIMessagesStreamEvent } from './types.ts';
10
- import {
11
- transformRequest,
12
- transformResponse,
13
- transformStreamEvent,
14
- createStreamState,
15
- buildResponseFromState,
16
- } from './transform.messages.ts';
17
-
18
- const XAI_MESSAGES_API_URL = 'https://api.x.ai/v1/messages';
19
-
20
- /**
21
- * xAI Messages API capabilities (Anthropic-compatible)
22
- */
23
- const XAI_MESSAGES_CAPABILITIES: LLMCapabilities = {
24
- streaming: true,
25
- tools: true,
26
- structuredOutput: true,
27
- imageInput: true,
28
- videoInput: false,
29
- audioInput: false,
30
- };
31
-
32
- /**
33
- * Create xAI Messages API LLM handler (Anthropic-compatible)
34
- */
35
- export function createMessagesLLMHandler(): LLMHandler<XAIMessagesParams> {
36
- // Provider reference injected by createProvider() or xAI's custom factory
37
- let providerRef: LLMProvider<XAIMessagesParams> | null = null;
38
-
39
- return {
40
- _setProvider(provider: LLMProvider<XAIMessagesParams>) {
41
- providerRef = provider;
42
- },
43
-
44
- bind(modelId: string): BoundLLMModel<XAIMessagesParams> {
45
- // Use the injected provider reference
46
- if (!providerRef) {
47
- throw new UPPError(
48
- 'Provider reference not set. Handler must be used with createProvider() or have _setProvider called.',
49
- 'INVALID_REQUEST',
50
- 'xai',
51
- 'llm'
52
- );
53
- }
54
-
55
- const model: BoundLLMModel<XAIMessagesParams> = {
56
- modelId,
57
- capabilities: XAI_MESSAGES_CAPABILITIES,
58
-
59
- get provider(): LLMProvider<XAIMessagesParams> {
60
- return providerRef!;
61
- },
62
-
63
- async complete(request: LLMRequest<XAIMessagesParams>): Promise<LLMResponse> {
64
- const apiKey = await resolveApiKey(
65
- request.config,
66
- 'XAI_API_KEY',
67
- 'xai',
68
- 'llm'
69
- );
70
-
71
- const baseUrl = request.config.baseUrl ?? XAI_MESSAGES_API_URL;
72
- const body = transformRequest(request, modelId);
73
-
74
- const response = await doFetch(
75
- baseUrl,
76
- {
77
- method: 'POST',
78
- headers: {
79
- 'Content-Type': 'application/json',
80
- 'x-api-key': apiKey,
81
- 'anthropic-version': '2023-06-01',
82
- },
83
- body: JSON.stringify(body),
84
- signal: request.signal,
85
- },
86
- request.config,
87
- 'xai',
88
- 'llm'
89
- );
90
-
91
- const data = (await response.json()) as XAIMessagesResponse;
92
- return transformResponse(data);
93
- },
94
-
95
- stream(request: LLMRequest<XAIMessagesParams>): LLMStreamResult {
96
- const state = createStreamState();
97
- let responseResolve: (value: LLMResponse) => void;
98
- let responseReject: (error: Error) => void;
99
-
100
- const responsePromise = new Promise<LLMResponse>((resolve, reject) => {
101
- responseResolve = resolve;
102
- responseReject = reject;
103
- });
104
-
105
- async function* generateEvents(): AsyncGenerator<StreamEvent, void, unknown> {
106
- try {
107
- const apiKey = await resolveApiKey(
108
- request.config,
109
- 'XAI_API_KEY',
110
- 'xai',
111
- 'llm'
112
- );
113
-
114
- const baseUrl = request.config.baseUrl ?? XAI_MESSAGES_API_URL;
115
- const body = transformRequest(request, modelId);
116
- body.stream = true;
117
-
118
- const response = await doStreamFetch(
119
- baseUrl,
120
- {
121
- method: 'POST',
122
- headers: {
123
- 'Content-Type': 'application/json',
124
- 'x-api-key': apiKey,
125
- 'anthropic-version': '2023-06-01',
126
- },
127
- body: JSON.stringify(body),
128
- signal: request.signal,
129
- },
130
- request.config,
131
- 'xai',
132
- 'llm'
133
- );
134
-
135
- if (!response.ok) {
136
- const error = await normalizeHttpError(response, 'xai', 'llm');
137
- responseReject(error);
138
- throw error;
139
- }
140
-
141
- if (!response.body) {
142
- const error = new UPPError(
143
- 'No response body for streaming request',
144
- 'PROVIDER_ERROR',
145
- 'xai',
146
- 'llm'
147
- );
148
- responseReject(error);
149
- throw error;
150
- }
151
-
152
- for await (const data of parseSSEStream(response.body)) {
153
- // Check for xAI error event
154
- if (typeof data === 'object' && data !== null && 'type' in data) {
155
- const event = data as XAIMessagesStreamEvent;
156
-
157
- if (event.type === 'error') {
158
- const error = new UPPError(
159
- event.error.message,
160
- 'PROVIDER_ERROR',
161
- 'xai',
162
- 'llm'
163
- );
164
- responseReject(error);
165
- throw error;
166
- }
167
-
168
- const uppEvent = transformStreamEvent(event, state);
169
- if (uppEvent) {
170
- yield uppEvent;
171
- }
172
- }
173
- }
174
-
175
- // Build final response
176
- responseResolve(buildResponseFromState(state));
177
- } catch (error) {
178
- responseReject(error as Error);
179
- throw error;
180
- }
181
- }
182
-
183
- return {
184
- [Symbol.asyncIterator]() {
185
- return generateEvents();
186
- },
187
- response: responsePromise,
188
- };
189
- },
190
- };
191
-
192
- return model;
193
- },
194
- };
195
- }