@providerprotocol/ai 0.0.4 → 0.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +19 -0
- package/dist/anthropic/index.js +1 -24
- package/dist/anthropic/index.js.map +1 -1
- package/dist/google/index.js +3 -46
- package/dist/google/index.js.map +1 -1
- package/dist/index.js +5 -1
- package/dist/index.js.map +1 -1
- package/dist/ollama/index.js +13 -44
- package/dist/ollama/index.js.map +1 -1
- package/dist/openai/index.d.ts +46 -27
- package/dist/openai/index.js +2 -116
- package/dist/openai/index.js.map +1 -1
- package/dist/openrouter/index.d.ts +23 -10
- package/dist/openrouter/index.js +2 -85
- package/dist/openrouter/index.js.map +1 -1
- package/dist/xai/index.d.ts +306 -0
- package/dist/xai/index.js +1696 -0
- package/dist/xai/index.js.map +1 -0
- package/package.json +9 -1
- package/src/core/llm.ts +6 -1
- package/src/openai/index.ts +2 -1
- package/src/openrouter/index.ts +2 -1
- package/src/providers/anthropic/transform.ts +7 -29
- package/src/providers/google/transform.ts +9 -49
- package/src/providers/ollama/transform.ts +27 -49
- package/src/providers/openai/index.ts +12 -8
- package/src/providers/openai/llm.completions.ts +9 -9
- package/src/providers/openai/llm.responses.ts +9 -9
- package/src/providers/openai/transform.completions.ts +12 -79
- package/src/providers/openai/transform.responses.ts +12 -54
- package/src/providers/openai/types.ts +54 -31
- package/src/providers/openrouter/index.ts +12 -8
- package/src/providers/openrouter/llm.completions.ts +9 -9
- package/src/providers/openrouter/llm.responses.ts +9 -9
- package/src/providers/openrouter/transform.completions.ts +12 -79
- package/src/providers/openrouter/transform.responses.ts +12 -25
- package/src/providers/openrouter/types.ts +22 -28
- package/src/providers/xai/index.ts +223 -0
- package/src/providers/xai/llm.completions.ts +201 -0
- package/src/providers/xai/llm.messages.ts +195 -0
- package/src/providers/xai/llm.responses.ts +211 -0
- package/src/providers/xai/transform.completions.ts +565 -0
- package/src/providers/xai/transform.messages.ts +448 -0
- package/src/providers/xai/transform.responses.ts +678 -0
- package/src/providers/xai/types.ts +938 -0
- package/src/xai/index.ts +41 -0
|
@@ -0,0 +1,565 @@
|
|
|
1
|
+
import type { LLMRequest, LLMResponse } from '../../types/llm.ts';
|
|
2
|
+
import type { Message } from '../../types/messages.ts';
|
|
3
|
+
import type { StreamEvent } from '../../types/stream.ts';
|
|
4
|
+
import type { Tool, ToolCall } from '../../types/tool.ts';
|
|
5
|
+
import type { TokenUsage } from '../../types/turn.ts';
|
|
6
|
+
import type { ContentBlock, TextBlock, ImageBlock } from '../../types/content.ts';
|
|
7
|
+
import {
|
|
8
|
+
AssistantMessage,
|
|
9
|
+
isUserMessage,
|
|
10
|
+
isAssistantMessage,
|
|
11
|
+
isToolResultMessage,
|
|
12
|
+
} from '../../types/messages.ts';
|
|
13
|
+
import type {
|
|
14
|
+
XAICompletionsParams,
|
|
15
|
+
XAICompletionsRequest,
|
|
16
|
+
XAICompletionsMessage,
|
|
17
|
+
XAIUserContent,
|
|
18
|
+
XAICompletionsTool,
|
|
19
|
+
XAICompletionsResponse,
|
|
20
|
+
XAICompletionsStreamChunk,
|
|
21
|
+
XAIToolCall,
|
|
22
|
+
} from './types.ts';
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* Transform UPP request to xAI Chat Completions format
|
|
26
|
+
*
|
|
27
|
+
* Params are spread directly to allow pass-through of any xAI API fields,
|
|
28
|
+
* even those not explicitly defined in our type. This enables developers to
|
|
29
|
+
* use new API features without waiting for library updates.
|
|
30
|
+
*/
|
|
31
|
+
export function transformRequest(
|
|
32
|
+
request: LLMRequest<XAICompletionsParams>,
|
|
33
|
+
modelId: string
|
|
34
|
+
): XAICompletionsRequest {
|
|
35
|
+
const params = request.params ?? ({} as XAICompletionsParams);
|
|
36
|
+
|
|
37
|
+
// Spread params to pass through all fields, then set required fields
|
|
38
|
+
const xaiRequest: XAICompletionsRequest = {
|
|
39
|
+
...params,
|
|
40
|
+
model: modelId,
|
|
41
|
+
messages: transformMessages(request.messages, request.system),
|
|
42
|
+
};
|
|
43
|
+
|
|
44
|
+
// Tools come from request, not params
|
|
45
|
+
if (request.tools && request.tools.length > 0) {
|
|
46
|
+
xaiRequest.tools = request.tools.map(transformTool);
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
// Structured output via response_format (overrides params.response_format if set)
|
|
50
|
+
if (request.structure) {
|
|
51
|
+
const schema: Record<string, unknown> = {
|
|
52
|
+
type: 'object',
|
|
53
|
+
properties: request.structure.properties,
|
|
54
|
+
required: request.structure.required,
|
|
55
|
+
...(request.structure.additionalProperties !== undefined
|
|
56
|
+
? { additionalProperties: request.structure.additionalProperties }
|
|
57
|
+
: { additionalProperties: false }),
|
|
58
|
+
};
|
|
59
|
+
if (request.structure.description) {
|
|
60
|
+
schema.description = request.structure.description;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
xaiRequest.response_format = {
|
|
64
|
+
type: 'json_schema',
|
|
65
|
+
json_schema: {
|
|
66
|
+
name: 'json_response',
|
|
67
|
+
description: request.structure.description,
|
|
68
|
+
schema,
|
|
69
|
+
strict: true,
|
|
70
|
+
},
|
|
71
|
+
};
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
return xaiRequest;
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Transform messages including system prompt
|
|
79
|
+
*/
|
|
80
|
+
function transformMessages(
|
|
81
|
+
messages: Message[],
|
|
82
|
+
system?: string
|
|
83
|
+
): XAICompletionsMessage[] {
|
|
84
|
+
const result: XAICompletionsMessage[] = [];
|
|
85
|
+
|
|
86
|
+
// Add system message first if present
|
|
87
|
+
if (system) {
|
|
88
|
+
result.push({
|
|
89
|
+
role: 'system',
|
|
90
|
+
content: system,
|
|
91
|
+
});
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
// Transform each message
|
|
95
|
+
for (const message of messages) {
|
|
96
|
+
// Handle tool result messages specially - they need to produce multiple messages
|
|
97
|
+
if (isToolResultMessage(message)) {
|
|
98
|
+
const toolMessages = transformToolResults(message);
|
|
99
|
+
result.push(...toolMessages);
|
|
100
|
+
} else {
|
|
101
|
+
const transformed = transformMessage(message);
|
|
102
|
+
if (transformed) {
|
|
103
|
+
result.push(transformed);
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
return result;
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
/**
|
|
112
|
+
* Filter to only valid content blocks with a type property
|
|
113
|
+
*/
|
|
114
|
+
function filterValidContent<T extends { type?: string }>(content: T[]): T[] {
|
|
115
|
+
return content.filter((c) => c && typeof c.type === 'string');
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
/**
|
|
119
|
+
* Transform a UPP Message to xAI format
|
|
120
|
+
*/
|
|
121
|
+
function transformMessage(message: Message): XAICompletionsMessage | null {
|
|
122
|
+
if (isUserMessage(message)) {
|
|
123
|
+
const validContent = filterValidContent(message.content);
|
|
124
|
+
// Check if we can use simple string content
|
|
125
|
+
if (validContent.length === 1 && validContent[0]?.type === 'text') {
|
|
126
|
+
return {
|
|
127
|
+
role: 'user',
|
|
128
|
+
content: (validContent[0] as TextBlock).text,
|
|
129
|
+
};
|
|
130
|
+
}
|
|
131
|
+
return {
|
|
132
|
+
role: 'user',
|
|
133
|
+
content: validContent.map(transformContentBlock),
|
|
134
|
+
};
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
if (isAssistantMessage(message)) {
|
|
138
|
+
const validContent = filterValidContent(message.content);
|
|
139
|
+
// Extract text content
|
|
140
|
+
const textContent = validContent
|
|
141
|
+
.filter((c): c is TextBlock => c.type === 'text')
|
|
142
|
+
.map((c) => c.text)
|
|
143
|
+
.join('');
|
|
144
|
+
|
|
145
|
+
const hasToolCalls = message.toolCalls && message.toolCalls.length > 0;
|
|
146
|
+
|
|
147
|
+
const assistantMessage: XAICompletionsMessage = {
|
|
148
|
+
role: 'assistant',
|
|
149
|
+
// xAI/OpenAI: content should be null when tool_calls are present and there's no text
|
|
150
|
+
content: hasToolCalls && !textContent ? null : textContent,
|
|
151
|
+
};
|
|
152
|
+
|
|
153
|
+
// Add tool calls if present
|
|
154
|
+
if (hasToolCalls) {
|
|
155
|
+
(assistantMessage as { tool_calls?: XAIToolCall[] }).tool_calls =
|
|
156
|
+
message.toolCalls!.map((call) => ({
|
|
157
|
+
id: call.toolCallId,
|
|
158
|
+
type: 'function' as const,
|
|
159
|
+
function: {
|
|
160
|
+
name: call.toolName,
|
|
161
|
+
arguments: JSON.stringify(call.arguments),
|
|
162
|
+
},
|
|
163
|
+
}));
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
return assistantMessage;
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
if (isToolResultMessage(message)) {
|
|
170
|
+
// Tool results are sent as individual tool messages
|
|
171
|
+
// Return the first one and handle multiple in a different way
|
|
172
|
+
// Actually, we need to return multiple messages for multiple tool results
|
|
173
|
+
// This is handled by the caller - transform each result to a message
|
|
174
|
+
const results = message.results.map((result) => ({
|
|
175
|
+
role: 'tool' as const,
|
|
176
|
+
tool_call_id: result.toolCallId,
|
|
177
|
+
content:
|
|
178
|
+
typeof result.result === 'string'
|
|
179
|
+
? result.result
|
|
180
|
+
: JSON.stringify(result.result),
|
|
181
|
+
}));
|
|
182
|
+
|
|
183
|
+
// For now, return the first result - caller should handle multiple
|
|
184
|
+
return results[0] ?? null;
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
return null;
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
/**
|
|
191
|
+
* Transform multiple tool results to messages
|
|
192
|
+
*/
|
|
193
|
+
export function transformToolResults(
|
|
194
|
+
message: Message
|
|
195
|
+
): XAICompletionsMessage[] {
|
|
196
|
+
if (!isToolResultMessage(message)) {
|
|
197
|
+
const single = transformMessage(message);
|
|
198
|
+
return single ? [single] : [];
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
return message.results.map((result) => ({
|
|
202
|
+
role: 'tool' as const,
|
|
203
|
+
tool_call_id: result.toolCallId,
|
|
204
|
+
content:
|
|
205
|
+
typeof result.result === 'string'
|
|
206
|
+
? result.result
|
|
207
|
+
: JSON.stringify(result.result),
|
|
208
|
+
}));
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
/**
|
|
212
|
+
* Transform a content block to xAI format
|
|
213
|
+
*/
|
|
214
|
+
function transformContentBlock(block: ContentBlock): XAIUserContent {
|
|
215
|
+
switch (block.type) {
|
|
216
|
+
case 'text':
|
|
217
|
+
return { type: 'text', text: block.text };
|
|
218
|
+
|
|
219
|
+
case 'image': {
|
|
220
|
+
const imageBlock = block as ImageBlock;
|
|
221
|
+
let url: string;
|
|
222
|
+
|
|
223
|
+
if (imageBlock.source.type === 'base64') {
|
|
224
|
+
url = `data:${imageBlock.mimeType};base64,${imageBlock.source.data}`;
|
|
225
|
+
} else if (imageBlock.source.type === 'url') {
|
|
226
|
+
url = imageBlock.source.url;
|
|
227
|
+
} else if (imageBlock.source.type === 'bytes') {
|
|
228
|
+
// Convert bytes to base64
|
|
229
|
+
const base64 = btoa(
|
|
230
|
+
Array.from(imageBlock.source.data)
|
|
231
|
+
.map((b) => String.fromCharCode(b))
|
|
232
|
+
.join('')
|
|
233
|
+
);
|
|
234
|
+
url = `data:${imageBlock.mimeType};base64,${base64}`;
|
|
235
|
+
} else {
|
|
236
|
+
throw new Error('Unknown image source type');
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
return {
|
|
240
|
+
type: 'image_url',
|
|
241
|
+
image_url: { url },
|
|
242
|
+
};
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
default:
|
|
246
|
+
throw new Error(`Unsupported content type: ${block.type}`);
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
/**
|
|
251
|
+
* Transform a UPP Tool to xAI format
|
|
252
|
+
*/
|
|
253
|
+
function transformTool(tool: Tool): XAICompletionsTool {
|
|
254
|
+
return {
|
|
255
|
+
type: 'function',
|
|
256
|
+
function: {
|
|
257
|
+
name: tool.name,
|
|
258
|
+
description: tool.description,
|
|
259
|
+
parameters: {
|
|
260
|
+
type: 'object',
|
|
261
|
+
properties: tool.parameters.properties,
|
|
262
|
+
required: tool.parameters.required,
|
|
263
|
+
...(tool.parameters.additionalProperties !== undefined
|
|
264
|
+
? { additionalProperties: tool.parameters.additionalProperties }
|
|
265
|
+
: {}),
|
|
266
|
+
},
|
|
267
|
+
},
|
|
268
|
+
};
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
/**
|
|
272
|
+
* Transform xAI response to UPP LLMResponse
|
|
273
|
+
*/
|
|
274
|
+
export function transformResponse(data: XAICompletionsResponse): LLMResponse {
|
|
275
|
+
const choice = data.choices[0];
|
|
276
|
+
if (!choice) {
|
|
277
|
+
throw new Error('No choices in xAI response');
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
// Extract text content
|
|
281
|
+
const textContent: TextBlock[] = [];
|
|
282
|
+
let structuredData: unknown;
|
|
283
|
+
if (choice.message.content) {
|
|
284
|
+
textContent.push({ type: 'text', text: choice.message.content });
|
|
285
|
+
// Try to parse as JSON for structured output (native JSON mode)
|
|
286
|
+
try {
|
|
287
|
+
structuredData = JSON.parse(choice.message.content);
|
|
288
|
+
} catch {
|
|
289
|
+
// Not valid JSON - that's fine, might not be structured output
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
let hadRefusal = false;
|
|
293
|
+
if (choice.message.refusal) {
|
|
294
|
+
textContent.push({ type: 'text', text: choice.message.refusal });
|
|
295
|
+
hadRefusal = true;
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
// Extract tool calls
|
|
299
|
+
const toolCalls: ToolCall[] = [];
|
|
300
|
+
if (choice.message.tool_calls) {
|
|
301
|
+
for (const call of choice.message.tool_calls) {
|
|
302
|
+
let args: Record<string, unknown> = {};
|
|
303
|
+
try {
|
|
304
|
+
args = JSON.parse(call.function.arguments);
|
|
305
|
+
} catch {
|
|
306
|
+
// Invalid JSON - use empty object
|
|
307
|
+
}
|
|
308
|
+
toolCalls.push({
|
|
309
|
+
toolCallId: call.id,
|
|
310
|
+
toolName: call.function.name,
|
|
311
|
+
arguments: args,
|
|
312
|
+
});
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
const message = new AssistantMessage(
|
|
317
|
+
textContent,
|
|
318
|
+
toolCalls.length > 0 ? toolCalls : undefined,
|
|
319
|
+
{
|
|
320
|
+
id: data.id,
|
|
321
|
+
metadata: {
|
|
322
|
+
xai: {
|
|
323
|
+
model: data.model,
|
|
324
|
+
finish_reason: choice.finish_reason,
|
|
325
|
+
system_fingerprint: data.system_fingerprint,
|
|
326
|
+
citations: data.citations,
|
|
327
|
+
inline_citations: data.inline_citations,
|
|
328
|
+
},
|
|
329
|
+
},
|
|
330
|
+
}
|
|
331
|
+
);
|
|
332
|
+
|
|
333
|
+
const usage: TokenUsage = {
|
|
334
|
+
inputTokens: data.usage.prompt_tokens,
|
|
335
|
+
outputTokens: data.usage.completion_tokens,
|
|
336
|
+
totalTokens: data.usage.total_tokens,
|
|
337
|
+
};
|
|
338
|
+
|
|
339
|
+
// Map finish reason to stop reason
|
|
340
|
+
let stopReason = 'end_turn';
|
|
341
|
+
switch (choice.finish_reason) {
|
|
342
|
+
case 'stop':
|
|
343
|
+
stopReason = 'end_turn';
|
|
344
|
+
break;
|
|
345
|
+
case 'length':
|
|
346
|
+
stopReason = 'max_tokens';
|
|
347
|
+
break;
|
|
348
|
+
case 'tool_calls':
|
|
349
|
+
stopReason = 'tool_use';
|
|
350
|
+
break;
|
|
351
|
+
case 'content_filter':
|
|
352
|
+
stopReason = 'content_filter';
|
|
353
|
+
break;
|
|
354
|
+
}
|
|
355
|
+
if (hadRefusal && stopReason !== 'content_filter') {
|
|
356
|
+
stopReason = 'content_filter';
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
return {
|
|
360
|
+
message,
|
|
361
|
+
usage,
|
|
362
|
+
stopReason,
|
|
363
|
+
data: structuredData,
|
|
364
|
+
};
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
/**
|
|
368
|
+
* State for accumulating streaming response
|
|
369
|
+
*/
|
|
370
|
+
export interface CompletionsStreamState {
|
|
371
|
+
id: string;
|
|
372
|
+
model: string;
|
|
373
|
+
text: string;
|
|
374
|
+
toolCalls: Map<number, { id: string; name: string; arguments: string }>;
|
|
375
|
+
finishReason: string | null;
|
|
376
|
+
inputTokens: number;
|
|
377
|
+
outputTokens: number;
|
|
378
|
+
hadRefusal: boolean;
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
/**
|
|
382
|
+
* Create initial stream state
|
|
383
|
+
*/
|
|
384
|
+
export function createStreamState(): CompletionsStreamState {
|
|
385
|
+
return {
|
|
386
|
+
id: '',
|
|
387
|
+
model: '',
|
|
388
|
+
text: '',
|
|
389
|
+
toolCalls: new Map(),
|
|
390
|
+
finishReason: null,
|
|
391
|
+
inputTokens: 0,
|
|
392
|
+
outputTokens: 0,
|
|
393
|
+
hadRefusal: false,
|
|
394
|
+
};
|
|
395
|
+
}
|
|
396
|
+
|
|
397
|
+
/**
|
|
398
|
+
* Transform xAI stream chunk to UPP StreamEvent
|
|
399
|
+
* Returns array since one chunk may produce multiple events
|
|
400
|
+
*/
|
|
401
|
+
export function transformStreamEvent(
|
|
402
|
+
chunk: XAICompletionsStreamChunk,
|
|
403
|
+
state: CompletionsStreamState
|
|
404
|
+
): StreamEvent[] {
|
|
405
|
+
const events: StreamEvent[] = [];
|
|
406
|
+
|
|
407
|
+
// Update state with basic info
|
|
408
|
+
if (chunk.id && !state.id) {
|
|
409
|
+
state.id = chunk.id;
|
|
410
|
+
events.push({ type: 'message_start', index: 0, delta: {} });
|
|
411
|
+
}
|
|
412
|
+
if (chunk.model) {
|
|
413
|
+
state.model = chunk.model;
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
// Process choices
|
|
417
|
+
const choice = chunk.choices[0];
|
|
418
|
+
if (choice) {
|
|
419
|
+
// Text delta
|
|
420
|
+
if (choice.delta.content) {
|
|
421
|
+
state.text += choice.delta.content;
|
|
422
|
+
events.push({
|
|
423
|
+
type: 'text_delta',
|
|
424
|
+
index: 0,
|
|
425
|
+
delta: { text: choice.delta.content },
|
|
426
|
+
});
|
|
427
|
+
}
|
|
428
|
+
if (choice.delta.refusal) {
|
|
429
|
+
state.hadRefusal = true;
|
|
430
|
+
state.text += choice.delta.refusal;
|
|
431
|
+
events.push({
|
|
432
|
+
type: 'text_delta',
|
|
433
|
+
index: 0,
|
|
434
|
+
delta: { text: choice.delta.refusal },
|
|
435
|
+
});
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
// Tool call deltas
|
|
439
|
+
if (choice.delta.tool_calls) {
|
|
440
|
+
for (const toolCallDelta of choice.delta.tool_calls) {
|
|
441
|
+
const index = toolCallDelta.index;
|
|
442
|
+
let toolCall = state.toolCalls.get(index);
|
|
443
|
+
|
|
444
|
+
if (!toolCall) {
|
|
445
|
+
toolCall = { id: '', name: '', arguments: '' };
|
|
446
|
+
state.toolCalls.set(index, toolCall);
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
if (toolCallDelta.id) {
|
|
450
|
+
toolCall.id = toolCallDelta.id;
|
|
451
|
+
}
|
|
452
|
+
if (toolCallDelta.function?.name) {
|
|
453
|
+
toolCall.name = toolCallDelta.function.name;
|
|
454
|
+
}
|
|
455
|
+
if (toolCallDelta.function?.arguments) {
|
|
456
|
+
toolCall.arguments += toolCallDelta.function.arguments;
|
|
457
|
+
events.push({
|
|
458
|
+
type: 'tool_call_delta',
|
|
459
|
+
index: index,
|
|
460
|
+
delta: {
|
|
461
|
+
toolCallId: toolCall.id,
|
|
462
|
+
toolName: toolCall.name,
|
|
463
|
+
argumentsJson: toolCallDelta.function.arguments,
|
|
464
|
+
},
|
|
465
|
+
});
|
|
466
|
+
}
|
|
467
|
+
}
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
// Finish reason
|
|
471
|
+
if (choice.finish_reason) {
|
|
472
|
+
state.finishReason = choice.finish_reason;
|
|
473
|
+
events.push({ type: 'message_stop', index: 0, delta: {} });
|
|
474
|
+
}
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
// Usage info (usually comes at the end with stream_options.include_usage)
|
|
478
|
+
if (chunk.usage) {
|
|
479
|
+
state.inputTokens = chunk.usage.prompt_tokens;
|
|
480
|
+
state.outputTokens = chunk.usage.completion_tokens;
|
|
481
|
+
}
|
|
482
|
+
|
|
483
|
+
return events;
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
/**
|
|
487
|
+
* Build LLMResponse from accumulated stream state
|
|
488
|
+
*/
|
|
489
|
+
export function buildResponseFromState(state: CompletionsStreamState): LLMResponse {
|
|
490
|
+
const textContent: TextBlock[] = [];
|
|
491
|
+
let structuredData: unknown;
|
|
492
|
+
if (state.text) {
|
|
493
|
+
textContent.push({ type: 'text', text: state.text });
|
|
494
|
+
// Try to parse as JSON for structured output (native JSON mode)
|
|
495
|
+
try {
|
|
496
|
+
structuredData = JSON.parse(state.text);
|
|
497
|
+
} catch {
|
|
498
|
+
// Not valid JSON - that's fine, might not be structured output
|
|
499
|
+
}
|
|
500
|
+
}
|
|
501
|
+
|
|
502
|
+
const toolCalls: ToolCall[] = [];
|
|
503
|
+
for (const [, toolCall] of state.toolCalls) {
|
|
504
|
+
let args: Record<string, unknown> = {};
|
|
505
|
+
if (toolCall.arguments) {
|
|
506
|
+
try {
|
|
507
|
+
args = JSON.parse(toolCall.arguments);
|
|
508
|
+
} catch {
|
|
509
|
+
// Invalid JSON - use empty object
|
|
510
|
+
}
|
|
511
|
+
}
|
|
512
|
+
toolCalls.push({
|
|
513
|
+
toolCallId: toolCall.id,
|
|
514
|
+
toolName: toolCall.name,
|
|
515
|
+
arguments: args,
|
|
516
|
+
});
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
const message = new AssistantMessage(
|
|
520
|
+
textContent,
|
|
521
|
+
toolCalls.length > 0 ? toolCalls : undefined,
|
|
522
|
+
{
|
|
523
|
+
id: state.id,
|
|
524
|
+
metadata: {
|
|
525
|
+
xai: {
|
|
526
|
+
model: state.model,
|
|
527
|
+
finish_reason: state.finishReason,
|
|
528
|
+
},
|
|
529
|
+
},
|
|
530
|
+
}
|
|
531
|
+
);
|
|
532
|
+
|
|
533
|
+
const usage: TokenUsage = {
|
|
534
|
+
inputTokens: state.inputTokens,
|
|
535
|
+
outputTokens: state.outputTokens,
|
|
536
|
+
totalTokens: state.inputTokens + state.outputTokens,
|
|
537
|
+
};
|
|
538
|
+
|
|
539
|
+
// Map finish reason to stop reason
|
|
540
|
+
let stopReason = 'end_turn';
|
|
541
|
+
switch (state.finishReason) {
|
|
542
|
+
case 'stop':
|
|
543
|
+
stopReason = 'end_turn';
|
|
544
|
+
break;
|
|
545
|
+
case 'length':
|
|
546
|
+
stopReason = 'max_tokens';
|
|
547
|
+
break;
|
|
548
|
+
case 'tool_calls':
|
|
549
|
+
stopReason = 'tool_use';
|
|
550
|
+
break;
|
|
551
|
+
case 'content_filter':
|
|
552
|
+
stopReason = 'content_filter';
|
|
553
|
+
break;
|
|
554
|
+
}
|
|
555
|
+
if (state.hadRefusal && stopReason !== 'content_filter') {
|
|
556
|
+
stopReason = 'content_filter';
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
return {
|
|
560
|
+
message,
|
|
561
|
+
usage,
|
|
562
|
+
stopReason,
|
|
563
|
+
data: structuredData,
|
|
564
|
+
};
|
|
565
|
+
}
|