@providerprotocol/ai 0.0.4 → 0.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +19 -0
- package/dist/index.js +5 -1
- package/dist/index.js.map +1 -1
- package/dist/xai/index.d.ts +282 -0
- package/dist/xai/index.js +1812 -0
- package/dist/xai/index.js.map +1 -0
- package/package.json +9 -1
- package/src/core/llm.ts +6 -1
- package/src/providers/xai/index.ts +218 -0
- package/src/providers/xai/llm.completions.ts +201 -0
- package/src/providers/xai/llm.messages.ts +195 -0
- package/src/providers/xai/llm.responses.ts +211 -0
- package/src/providers/xai/transform.completions.ts +617 -0
- package/src/providers/xai/transform.messages.ts +467 -0
- package/src/providers/xai/transform.responses.ts +717 -0
- package/src/providers/xai/types.ts +908 -0
- package/src/xai/index.ts +39 -0
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
import type { LLMHandler, BoundLLMModel, LLMRequest, LLMResponse, LLMStreamResult, LLMCapabilities } from '../../types/llm.ts';
|
|
2
|
+
import type { StreamEvent } from '../../types/stream.ts';
|
|
3
|
+
import type { LLMProvider } from '../../types/provider.ts';
|
|
4
|
+
import { UPPError } from '../../types/errors.ts';
|
|
5
|
+
import { resolveApiKey } from '../../http/keys.ts';
|
|
6
|
+
import { doFetch, doStreamFetch } from '../../http/fetch.ts';
|
|
7
|
+
import { parseSSEStream } from '../../http/sse.ts';
|
|
8
|
+
import { normalizeHttpError } from '../../http/errors.ts';
|
|
9
|
+
import type { XAILLMParams, XAICompletionsResponse, XAICompletionsStreamChunk } from './types.ts';
|
|
10
|
+
import {
|
|
11
|
+
transformRequest,
|
|
12
|
+
transformResponse,
|
|
13
|
+
transformStreamEvent,
|
|
14
|
+
createStreamState,
|
|
15
|
+
buildResponseFromState,
|
|
16
|
+
} from './transform.completions.ts';
|
|
17
|
+
|
|
18
|
+
const XAI_COMPLETIONS_API_URL = 'https://api.x.ai/v1/chat/completions';
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* xAI Chat Completions API capabilities
|
|
22
|
+
*/
|
|
23
|
+
const XAI_COMPLETIONS_CAPABILITIES: LLMCapabilities = {
|
|
24
|
+
streaming: true,
|
|
25
|
+
tools: true,
|
|
26
|
+
structuredOutput: true,
|
|
27
|
+
imageInput: true,
|
|
28
|
+
videoInput: false,
|
|
29
|
+
audioInput: false,
|
|
30
|
+
};
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Create xAI Chat Completions LLM handler
|
|
34
|
+
*/
|
|
35
|
+
export function createCompletionsLLMHandler(): LLMHandler<XAILLMParams> {
|
|
36
|
+
// Provider reference injected by createProvider() or xAI's custom factory
|
|
37
|
+
let providerRef: LLMProvider<XAILLMParams> | null = null;
|
|
38
|
+
|
|
39
|
+
return {
|
|
40
|
+
_setProvider(provider: LLMProvider<XAILLMParams>) {
|
|
41
|
+
providerRef = provider;
|
|
42
|
+
},
|
|
43
|
+
|
|
44
|
+
bind(modelId: string): BoundLLMModel<XAILLMParams> {
|
|
45
|
+
// Use the injected provider reference
|
|
46
|
+
if (!providerRef) {
|
|
47
|
+
throw new UPPError(
|
|
48
|
+
'Provider reference not set. Handler must be used with createProvider() or have _setProvider called.',
|
|
49
|
+
'INVALID_REQUEST',
|
|
50
|
+
'xai',
|
|
51
|
+
'llm'
|
|
52
|
+
);
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
const model: BoundLLMModel<XAILLMParams> = {
|
|
56
|
+
modelId,
|
|
57
|
+
capabilities: XAI_COMPLETIONS_CAPABILITIES,
|
|
58
|
+
|
|
59
|
+
get provider(): LLMProvider<XAILLMParams> {
|
|
60
|
+
return providerRef!;
|
|
61
|
+
},
|
|
62
|
+
|
|
63
|
+
async complete(request: LLMRequest<XAILLMParams>): Promise<LLMResponse> {
|
|
64
|
+
const apiKey = await resolveApiKey(
|
|
65
|
+
request.config,
|
|
66
|
+
'XAI_API_KEY',
|
|
67
|
+
'xai',
|
|
68
|
+
'llm'
|
|
69
|
+
);
|
|
70
|
+
|
|
71
|
+
const baseUrl = request.config.baseUrl ?? XAI_COMPLETIONS_API_URL;
|
|
72
|
+
const body = transformRequest(request, modelId);
|
|
73
|
+
|
|
74
|
+
const response = await doFetch(
|
|
75
|
+
baseUrl,
|
|
76
|
+
{
|
|
77
|
+
method: 'POST',
|
|
78
|
+
headers: {
|
|
79
|
+
'Content-Type': 'application/json',
|
|
80
|
+
Authorization: `Bearer ${apiKey}`,
|
|
81
|
+
},
|
|
82
|
+
body: JSON.stringify(body),
|
|
83
|
+
signal: request.signal,
|
|
84
|
+
},
|
|
85
|
+
request.config,
|
|
86
|
+
'xai',
|
|
87
|
+
'llm'
|
|
88
|
+
);
|
|
89
|
+
|
|
90
|
+
const data = (await response.json()) as XAICompletionsResponse;
|
|
91
|
+
return transformResponse(data);
|
|
92
|
+
},
|
|
93
|
+
|
|
94
|
+
stream(request: LLMRequest<XAILLMParams>): LLMStreamResult {
|
|
95
|
+
const state = createStreamState();
|
|
96
|
+
let responseResolve: (value: LLMResponse) => void;
|
|
97
|
+
let responseReject: (error: Error) => void;
|
|
98
|
+
|
|
99
|
+
const responsePromise = new Promise<LLMResponse>((resolve, reject) => {
|
|
100
|
+
responseResolve = resolve;
|
|
101
|
+
responseReject = reject;
|
|
102
|
+
});
|
|
103
|
+
|
|
104
|
+
async function* generateEvents(): AsyncGenerator<StreamEvent, void, unknown> {
|
|
105
|
+
try {
|
|
106
|
+
const apiKey = await resolveApiKey(
|
|
107
|
+
request.config,
|
|
108
|
+
'XAI_API_KEY',
|
|
109
|
+
'xai',
|
|
110
|
+
'llm'
|
|
111
|
+
);
|
|
112
|
+
|
|
113
|
+
const baseUrl = request.config.baseUrl ?? XAI_COMPLETIONS_API_URL;
|
|
114
|
+
const body = transformRequest(request, modelId);
|
|
115
|
+
body.stream = true;
|
|
116
|
+
body.stream_options = { include_usage: true };
|
|
117
|
+
|
|
118
|
+
const response = await doStreamFetch(
|
|
119
|
+
baseUrl,
|
|
120
|
+
{
|
|
121
|
+
method: 'POST',
|
|
122
|
+
headers: {
|
|
123
|
+
'Content-Type': 'application/json',
|
|
124
|
+
Authorization: `Bearer ${apiKey}`,
|
|
125
|
+
},
|
|
126
|
+
body: JSON.stringify(body),
|
|
127
|
+
signal: request.signal,
|
|
128
|
+
},
|
|
129
|
+
request.config,
|
|
130
|
+
'xai',
|
|
131
|
+
'llm'
|
|
132
|
+
);
|
|
133
|
+
|
|
134
|
+
if (!response.ok) {
|
|
135
|
+
const error = await normalizeHttpError(response, 'xai', 'llm');
|
|
136
|
+
responseReject(error);
|
|
137
|
+
throw error;
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
if (!response.body) {
|
|
141
|
+
const error = new UPPError(
|
|
142
|
+
'No response body for streaming request',
|
|
143
|
+
'PROVIDER_ERROR',
|
|
144
|
+
'xai',
|
|
145
|
+
'llm'
|
|
146
|
+
);
|
|
147
|
+
responseReject(error);
|
|
148
|
+
throw error;
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
for await (const data of parseSSEStream(response.body)) {
|
|
152
|
+
// Skip [DONE] marker
|
|
153
|
+
if (data === '[DONE]') {
|
|
154
|
+
continue;
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
// Check for xAI error event
|
|
158
|
+
if (typeof data === 'object' && data !== null) {
|
|
159
|
+
const chunk = data as XAICompletionsStreamChunk;
|
|
160
|
+
|
|
161
|
+
// Check for error in chunk
|
|
162
|
+
if ('error' in chunk && chunk.error) {
|
|
163
|
+
const errorData = chunk.error as { message?: string; type?: string };
|
|
164
|
+
const error = new UPPError(
|
|
165
|
+
errorData.message ?? 'Unknown error',
|
|
166
|
+
'PROVIDER_ERROR',
|
|
167
|
+
'xai',
|
|
168
|
+
'llm'
|
|
169
|
+
);
|
|
170
|
+
responseReject(error);
|
|
171
|
+
throw error;
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
const uppEvents = transformStreamEvent(chunk, state);
|
|
175
|
+
for (const event of uppEvents) {
|
|
176
|
+
yield event;
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
// Build final response
|
|
182
|
+
responseResolve(buildResponseFromState(state));
|
|
183
|
+
} catch (error) {
|
|
184
|
+
responseReject(error as Error);
|
|
185
|
+
throw error;
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
return {
|
|
190
|
+
[Symbol.asyncIterator]() {
|
|
191
|
+
return generateEvents();
|
|
192
|
+
},
|
|
193
|
+
response: responsePromise,
|
|
194
|
+
};
|
|
195
|
+
},
|
|
196
|
+
};
|
|
197
|
+
|
|
198
|
+
return model;
|
|
199
|
+
},
|
|
200
|
+
};
|
|
201
|
+
}
|
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
import type { LLMHandler, BoundLLMModel, LLMRequest, LLMResponse, LLMStreamResult, LLMCapabilities } from '../../types/llm.ts';
|
|
2
|
+
import type { StreamEvent } from '../../types/stream.ts';
|
|
3
|
+
import type { LLMProvider } from '../../types/provider.ts';
|
|
4
|
+
import { UPPError } from '../../types/errors.ts';
|
|
5
|
+
import { resolveApiKey } from '../../http/keys.ts';
|
|
6
|
+
import { doFetch, doStreamFetch } from '../../http/fetch.ts';
|
|
7
|
+
import { parseSSEStream } from '../../http/sse.ts';
|
|
8
|
+
import { normalizeHttpError } from '../../http/errors.ts';
|
|
9
|
+
import type { XAILLMParams, XAIMessagesResponse, XAIMessagesStreamEvent } from './types.ts';
|
|
10
|
+
import {
|
|
11
|
+
transformRequest,
|
|
12
|
+
transformResponse,
|
|
13
|
+
transformStreamEvent,
|
|
14
|
+
createStreamState,
|
|
15
|
+
buildResponseFromState,
|
|
16
|
+
} from './transform.messages.ts';
|
|
17
|
+
|
|
18
|
+
const XAI_MESSAGES_API_URL = 'https://api.x.ai/v1/messages';
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* xAI Messages API capabilities (Anthropic-compatible)
|
|
22
|
+
*/
|
|
23
|
+
const XAI_MESSAGES_CAPABILITIES: LLMCapabilities = {
|
|
24
|
+
streaming: true,
|
|
25
|
+
tools: true,
|
|
26
|
+
structuredOutput: true,
|
|
27
|
+
imageInput: true,
|
|
28
|
+
videoInput: false,
|
|
29
|
+
audioInput: false,
|
|
30
|
+
};
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Create xAI Messages API LLM handler (Anthropic-compatible)
|
|
34
|
+
*/
|
|
35
|
+
export function createMessagesLLMHandler(): LLMHandler<XAILLMParams> {
|
|
36
|
+
// Provider reference injected by createProvider() or xAI's custom factory
|
|
37
|
+
let providerRef: LLMProvider<XAILLMParams> | null = null;
|
|
38
|
+
|
|
39
|
+
return {
|
|
40
|
+
_setProvider(provider: LLMProvider<XAILLMParams>) {
|
|
41
|
+
providerRef = provider;
|
|
42
|
+
},
|
|
43
|
+
|
|
44
|
+
bind(modelId: string): BoundLLMModel<XAILLMParams> {
|
|
45
|
+
// Use the injected provider reference
|
|
46
|
+
if (!providerRef) {
|
|
47
|
+
throw new UPPError(
|
|
48
|
+
'Provider reference not set. Handler must be used with createProvider() or have _setProvider called.',
|
|
49
|
+
'INVALID_REQUEST',
|
|
50
|
+
'xai',
|
|
51
|
+
'llm'
|
|
52
|
+
);
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
const model: BoundLLMModel<XAILLMParams> = {
|
|
56
|
+
modelId,
|
|
57
|
+
capabilities: XAI_MESSAGES_CAPABILITIES,
|
|
58
|
+
|
|
59
|
+
get provider(): LLMProvider<XAILLMParams> {
|
|
60
|
+
return providerRef!;
|
|
61
|
+
},
|
|
62
|
+
|
|
63
|
+
async complete(request: LLMRequest<XAILLMParams>): Promise<LLMResponse> {
|
|
64
|
+
const apiKey = await resolveApiKey(
|
|
65
|
+
request.config,
|
|
66
|
+
'XAI_API_KEY',
|
|
67
|
+
'xai',
|
|
68
|
+
'llm'
|
|
69
|
+
);
|
|
70
|
+
|
|
71
|
+
const baseUrl = request.config.baseUrl ?? XAI_MESSAGES_API_URL;
|
|
72
|
+
const body = transformRequest(request, modelId);
|
|
73
|
+
|
|
74
|
+
const response = await doFetch(
|
|
75
|
+
baseUrl,
|
|
76
|
+
{
|
|
77
|
+
method: 'POST',
|
|
78
|
+
headers: {
|
|
79
|
+
'Content-Type': 'application/json',
|
|
80
|
+
'x-api-key': apiKey,
|
|
81
|
+
'anthropic-version': '2023-06-01',
|
|
82
|
+
},
|
|
83
|
+
body: JSON.stringify(body),
|
|
84
|
+
signal: request.signal,
|
|
85
|
+
},
|
|
86
|
+
request.config,
|
|
87
|
+
'xai',
|
|
88
|
+
'llm'
|
|
89
|
+
);
|
|
90
|
+
|
|
91
|
+
const data = (await response.json()) as XAIMessagesResponse;
|
|
92
|
+
return transformResponse(data);
|
|
93
|
+
},
|
|
94
|
+
|
|
95
|
+
stream(request: LLMRequest<XAILLMParams>): LLMStreamResult {
|
|
96
|
+
const state = createStreamState();
|
|
97
|
+
let responseResolve: (value: LLMResponse) => void;
|
|
98
|
+
let responseReject: (error: Error) => void;
|
|
99
|
+
|
|
100
|
+
const responsePromise = new Promise<LLMResponse>((resolve, reject) => {
|
|
101
|
+
responseResolve = resolve;
|
|
102
|
+
responseReject = reject;
|
|
103
|
+
});
|
|
104
|
+
|
|
105
|
+
async function* generateEvents(): AsyncGenerator<StreamEvent, void, unknown> {
|
|
106
|
+
try {
|
|
107
|
+
const apiKey = await resolveApiKey(
|
|
108
|
+
request.config,
|
|
109
|
+
'XAI_API_KEY',
|
|
110
|
+
'xai',
|
|
111
|
+
'llm'
|
|
112
|
+
);
|
|
113
|
+
|
|
114
|
+
const baseUrl = request.config.baseUrl ?? XAI_MESSAGES_API_URL;
|
|
115
|
+
const body = transformRequest(request, modelId);
|
|
116
|
+
body.stream = true;
|
|
117
|
+
|
|
118
|
+
const response = await doStreamFetch(
|
|
119
|
+
baseUrl,
|
|
120
|
+
{
|
|
121
|
+
method: 'POST',
|
|
122
|
+
headers: {
|
|
123
|
+
'Content-Type': 'application/json',
|
|
124
|
+
'x-api-key': apiKey,
|
|
125
|
+
'anthropic-version': '2023-06-01',
|
|
126
|
+
},
|
|
127
|
+
body: JSON.stringify(body),
|
|
128
|
+
signal: request.signal,
|
|
129
|
+
},
|
|
130
|
+
request.config,
|
|
131
|
+
'xai',
|
|
132
|
+
'llm'
|
|
133
|
+
);
|
|
134
|
+
|
|
135
|
+
if (!response.ok) {
|
|
136
|
+
const error = await normalizeHttpError(response, 'xai', 'llm');
|
|
137
|
+
responseReject(error);
|
|
138
|
+
throw error;
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
if (!response.body) {
|
|
142
|
+
const error = new UPPError(
|
|
143
|
+
'No response body for streaming request',
|
|
144
|
+
'PROVIDER_ERROR',
|
|
145
|
+
'xai',
|
|
146
|
+
'llm'
|
|
147
|
+
);
|
|
148
|
+
responseReject(error);
|
|
149
|
+
throw error;
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
for await (const data of parseSSEStream(response.body)) {
|
|
153
|
+
// Check for xAI error event
|
|
154
|
+
if (typeof data === 'object' && data !== null && 'type' in data) {
|
|
155
|
+
const event = data as XAIMessagesStreamEvent;
|
|
156
|
+
|
|
157
|
+
if (event.type === 'error') {
|
|
158
|
+
const error = new UPPError(
|
|
159
|
+
event.error.message,
|
|
160
|
+
'PROVIDER_ERROR',
|
|
161
|
+
'xai',
|
|
162
|
+
'llm'
|
|
163
|
+
);
|
|
164
|
+
responseReject(error);
|
|
165
|
+
throw error;
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
const uppEvent = transformStreamEvent(event, state);
|
|
169
|
+
if (uppEvent) {
|
|
170
|
+
yield uppEvent;
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
// Build final response
|
|
176
|
+
responseResolve(buildResponseFromState(state));
|
|
177
|
+
} catch (error) {
|
|
178
|
+
responseReject(error as Error);
|
|
179
|
+
throw error;
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
return {
|
|
184
|
+
[Symbol.asyncIterator]() {
|
|
185
|
+
return generateEvents();
|
|
186
|
+
},
|
|
187
|
+
response: responsePromise,
|
|
188
|
+
};
|
|
189
|
+
},
|
|
190
|
+
};
|
|
191
|
+
|
|
192
|
+
return model;
|
|
193
|
+
},
|
|
194
|
+
};
|
|
195
|
+
}
|
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
import type { LLMHandler, BoundLLMModel, LLMRequest, LLMResponse, LLMStreamResult, LLMCapabilities } from '../../types/llm.ts';
|
|
2
|
+
import type { StreamEvent } from '../../types/stream.ts';
|
|
3
|
+
import type { LLMProvider } from '../../types/provider.ts';
|
|
4
|
+
import { UPPError } from '../../types/errors.ts';
|
|
5
|
+
import { resolveApiKey } from '../../http/keys.ts';
|
|
6
|
+
import { doFetch, doStreamFetch } from '../../http/fetch.ts';
|
|
7
|
+
import { parseSSEStream } from '../../http/sse.ts';
|
|
8
|
+
import { normalizeHttpError } from '../../http/errors.ts';
|
|
9
|
+
import type { XAILLMParams, XAIResponsesResponse, XAIResponsesStreamEvent, XAIResponseErrorEvent } from './types.ts';
|
|
10
|
+
import {
|
|
11
|
+
transformRequest,
|
|
12
|
+
transformResponse,
|
|
13
|
+
transformStreamEvent,
|
|
14
|
+
createStreamState,
|
|
15
|
+
buildResponseFromState,
|
|
16
|
+
} from './transform.responses.ts';
|
|
17
|
+
|
|
18
|
+
const XAI_RESPONSES_API_URL = 'https://api.x.ai/v1/responses';
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* xAI Responses API capabilities
|
|
22
|
+
*/
|
|
23
|
+
const XAI_RESPONSES_CAPABILITIES: LLMCapabilities = {
|
|
24
|
+
streaming: true,
|
|
25
|
+
tools: true,
|
|
26
|
+
structuredOutput: true,
|
|
27
|
+
imageInput: true,
|
|
28
|
+
videoInput: false,
|
|
29
|
+
audioInput: false,
|
|
30
|
+
};
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Create xAI Responses API LLM handler
|
|
34
|
+
*/
|
|
35
|
+
export function createResponsesLLMHandler(): LLMHandler<XAILLMParams> {
|
|
36
|
+
// Provider reference injected by createProvider() or xAI's custom factory
|
|
37
|
+
let providerRef: LLMProvider<XAILLMParams> | null = null;
|
|
38
|
+
|
|
39
|
+
return {
|
|
40
|
+
_setProvider(provider: LLMProvider<XAILLMParams>) {
|
|
41
|
+
providerRef = provider;
|
|
42
|
+
},
|
|
43
|
+
|
|
44
|
+
bind(modelId: string): BoundLLMModel<XAILLMParams> {
|
|
45
|
+
// Use the injected provider reference
|
|
46
|
+
if (!providerRef) {
|
|
47
|
+
throw new UPPError(
|
|
48
|
+
'Provider reference not set. Handler must be used with createProvider() or have _setProvider called.',
|
|
49
|
+
'INVALID_REQUEST',
|
|
50
|
+
'xai',
|
|
51
|
+
'llm'
|
|
52
|
+
);
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
const model: BoundLLMModel<XAILLMParams> = {
|
|
56
|
+
modelId,
|
|
57
|
+
capabilities: XAI_RESPONSES_CAPABILITIES,
|
|
58
|
+
|
|
59
|
+
get provider(): LLMProvider<XAILLMParams> {
|
|
60
|
+
return providerRef!;
|
|
61
|
+
},
|
|
62
|
+
|
|
63
|
+
async complete(request: LLMRequest<XAILLMParams>): Promise<LLMResponse> {
|
|
64
|
+
const apiKey = await resolveApiKey(
|
|
65
|
+
request.config,
|
|
66
|
+
'XAI_API_KEY',
|
|
67
|
+
'xai',
|
|
68
|
+
'llm'
|
|
69
|
+
);
|
|
70
|
+
|
|
71
|
+
const baseUrl = request.config.baseUrl ?? XAI_RESPONSES_API_URL;
|
|
72
|
+
const body = transformRequest(request, modelId);
|
|
73
|
+
|
|
74
|
+
const response = await doFetch(
|
|
75
|
+
baseUrl,
|
|
76
|
+
{
|
|
77
|
+
method: 'POST',
|
|
78
|
+
headers: {
|
|
79
|
+
'Content-Type': 'application/json',
|
|
80
|
+
Authorization: `Bearer ${apiKey}`,
|
|
81
|
+
},
|
|
82
|
+
body: JSON.stringify(body),
|
|
83
|
+
signal: request.signal,
|
|
84
|
+
},
|
|
85
|
+
request.config,
|
|
86
|
+
'xai',
|
|
87
|
+
'llm'
|
|
88
|
+
);
|
|
89
|
+
|
|
90
|
+
const data = (await response.json()) as XAIResponsesResponse;
|
|
91
|
+
|
|
92
|
+
// Check for error in response
|
|
93
|
+
if (data.status === 'failed' && data.error) {
|
|
94
|
+
throw new UPPError(
|
|
95
|
+
data.error.message,
|
|
96
|
+
'PROVIDER_ERROR',
|
|
97
|
+
'xai',
|
|
98
|
+
'llm'
|
|
99
|
+
);
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
return transformResponse(data);
|
|
103
|
+
},
|
|
104
|
+
|
|
105
|
+
stream(request: LLMRequest<XAILLMParams>): LLMStreamResult {
|
|
106
|
+
const state = createStreamState();
|
|
107
|
+
let responseResolve: (value: LLMResponse) => void;
|
|
108
|
+
let responseReject: (error: Error) => void;
|
|
109
|
+
|
|
110
|
+
const responsePromise = new Promise<LLMResponse>((resolve, reject) => {
|
|
111
|
+
responseResolve = resolve;
|
|
112
|
+
responseReject = reject;
|
|
113
|
+
});
|
|
114
|
+
|
|
115
|
+
async function* generateEvents(): AsyncGenerator<StreamEvent, void, unknown> {
|
|
116
|
+
try {
|
|
117
|
+
const apiKey = await resolveApiKey(
|
|
118
|
+
request.config,
|
|
119
|
+
'XAI_API_KEY',
|
|
120
|
+
'xai',
|
|
121
|
+
'llm'
|
|
122
|
+
);
|
|
123
|
+
|
|
124
|
+
const baseUrl = request.config.baseUrl ?? XAI_RESPONSES_API_URL;
|
|
125
|
+
const body = transformRequest(request, modelId);
|
|
126
|
+
body.stream = true;
|
|
127
|
+
|
|
128
|
+
const response = await doStreamFetch(
|
|
129
|
+
baseUrl,
|
|
130
|
+
{
|
|
131
|
+
method: 'POST',
|
|
132
|
+
headers: {
|
|
133
|
+
'Content-Type': 'application/json',
|
|
134
|
+
Authorization: `Bearer ${apiKey}`,
|
|
135
|
+
},
|
|
136
|
+
body: JSON.stringify(body),
|
|
137
|
+
signal: request.signal,
|
|
138
|
+
},
|
|
139
|
+
request.config,
|
|
140
|
+
'xai',
|
|
141
|
+
'llm'
|
|
142
|
+
);
|
|
143
|
+
|
|
144
|
+
if (!response.ok) {
|
|
145
|
+
const error = await normalizeHttpError(response, 'xai', 'llm');
|
|
146
|
+
responseReject(error);
|
|
147
|
+
throw error;
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
if (!response.body) {
|
|
151
|
+
const error = new UPPError(
|
|
152
|
+
'No response body for streaming request',
|
|
153
|
+
'PROVIDER_ERROR',
|
|
154
|
+
'xai',
|
|
155
|
+
'llm'
|
|
156
|
+
);
|
|
157
|
+
responseReject(error);
|
|
158
|
+
throw error;
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
for await (const data of parseSSEStream(response.body)) {
|
|
162
|
+
// Skip [DONE] marker
|
|
163
|
+
if (data === '[DONE]') {
|
|
164
|
+
continue;
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
// Check for xAI error event
|
|
168
|
+
if (typeof data === 'object' && data !== null) {
|
|
169
|
+
const event = data as XAIResponsesStreamEvent;
|
|
170
|
+
|
|
171
|
+
// Check for error event
|
|
172
|
+
if (event.type === 'error') {
|
|
173
|
+
const errorEvent = event as XAIResponseErrorEvent;
|
|
174
|
+
const error = new UPPError(
|
|
175
|
+
errorEvent.error.message,
|
|
176
|
+
'PROVIDER_ERROR',
|
|
177
|
+
'xai',
|
|
178
|
+
'llm'
|
|
179
|
+
);
|
|
180
|
+
responseReject(error);
|
|
181
|
+
throw error;
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
const uppEvents = transformStreamEvent(event, state);
|
|
185
|
+
for (const uppEvent of uppEvents) {
|
|
186
|
+
yield uppEvent;
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
// Build final response
|
|
192
|
+
responseResolve(buildResponseFromState(state));
|
|
193
|
+
} catch (error) {
|
|
194
|
+
responseReject(error as Error);
|
|
195
|
+
throw error;
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
return {
|
|
200
|
+
[Symbol.asyncIterator]() {
|
|
201
|
+
return generateEvents();
|
|
202
|
+
},
|
|
203
|
+
response: responsePromise,
|
|
204
|
+
};
|
|
205
|
+
},
|
|
206
|
+
};
|
|
207
|
+
|
|
208
|
+
return model;
|
|
209
|
+
},
|
|
210
|
+
};
|
|
211
|
+
}
|