@providerprotocol/ai 0.0.3 → 0.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,467 @@
1
+ import type { LLMRequest, LLMResponse } from '../../types/llm.ts';
2
+ import type { Message } from '../../types/messages.ts';
3
+ import type { StreamEvent } from '../../types/stream.ts';
4
+ import type { Tool, ToolCall } from '../../types/tool.ts';
5
+ import type { TokenUsage } from '../../types/turn.ts';
6
+ import type { ContentBlock, TextBlock, ImageBlock } from '../../types/content.ts';
7
+ import {
8
+ AssistantMessage,
9
+ isUserMessage,
10
+ isAssistantMessage,
11
+ isToolResultMessage,
12
+ } from '../../types/messages.ts';
13
+ import type {
14
+ XAILLMParams,
15
+ XAIMessagesRequest,
16
+ XAIMessagesMessage,
17
+ XAIMessagesContent,
18
+ XAIMessagesTool,
19
+ XAIMessagesResponse,
20
+ XAIMessagesStreamEvent,
21
+ XAIMessagesContentBlockDeltaEvent,
22
+ } from './types.ts';
23
+
24
+ /**
25
+ * Transform UPP request to xAI Messages API format (Anthropic-compatible)
26
+ */
27
+ export function transformRequest<TParams extends XAILLMParams>(
28
+ request: LLMRequest<TParams>,
29
+ modelId: string
30
+ ): XAIMessagesRequest {
31
+ const params = (request.params ?? {}) as XAILLMParams;
32
+
33
+ const xaiRequest: XAIMessagesRequest = {
34
+ model: modelId,
35
+ messages: request.messages.map(transformMessage),
36
+ };
37
+
38
+ // Only include max_tokens if provided - let API enforce its requirement
39
+ if (params.max_tokens !== undefined) {
40
+ xaiRequest.max_tokens = params.max_tokens;
41
+ }
42
+
43
+ // System prompt (top-level in Messages API)
44
+ if (request.system) {
45
+ xaiRequest.system = request.system;
46
+ }
47
+
48
+ // Model parameters
49
+ if (params.temperature !== undefined) {
50
+ xaiRequest.temperature = params.temperature;
51
+ }
52
+ if (params.top_p !== undefined) {
53
+ xaiRequest.top_p = params.top_p;
54
+ }
55
+ if (params.top_k !== undefined) {
56
+ xaiRequest.top_k = params.top_k;
57
+ }
58
+ if (params.stop_sequences) {
59
+ xaiRequest.stop_sequences = params.stop_sequences;
60
+ }
61
+ if (params.messages_metadata) {
62
+ xaiRequest.metadata = params.messages_metadata;
63
+ }
64
+ if (params.thinking) {
65
+ xaiRequest.thinking = params.thinking;
66
+ }
67
+
68
+ // Tools
69
+ if (request.tools && request.tools.length > 0) {
70
+ xaiRequest.tools = request.tools.map(transformTool);
71
+ xaiRequest.tool_choice = { type: 'auto' };
72
+ }
73
+
74
+ // Structured output via tool-based approach
75
+ // xAI Messages API (like Anthropic) doesn't have native structured output,
76
+ // so we use a tool to enforce the schema
77
+ if (request.structure) {
78
+ const structuredTool: XAIMessagesTool = {
79
+ name: 'json_response',
80
+ description: 'Return the response in the specified JSON format. You MUST use this tool to provide your response.',
81
+ input_schema: {
82
+ type: 'object',
83
+ properties: request.structure.properties,
84
+ required: request.structure.required,
85
+ },
86
+ };
87
+
88
+ // Add the structured output tool (may coexist with user tools)
89
+ xaiRequest.tools = [...(xaiRequest.tools ?? []), structuredTool];
90
+ // Force the model to use the json_response tool
91
+ xaiRequest.tool_choice = { type: 'tool', name: 'json_response' };
92
+ }
93
+
94
+ return xaiRequest;
95
+ }
96
+
97
+ /**
98
+ * Filter to only valid content blocks with a type property
99
+ */
100
+ function filterValidContent<T extends { type?: string }>(content: T[]): T[] {
101
+ return content.filter((c) => c && typeof c.type === 'string');
102
+ }
103
+
104
+ /**
105
+ * Transform a UPP Message to xAI Messages API format
106
+ */
107
+ function transformMessage(message: Message): XAIMessagesMessage {
108
+ if (isUserMessage(message)) {
109
+ const validContent = filterValidContent(message.content);
110
+ return {
111
+ role: 'user',
112
+ content: validContent.map(transformContentBlock),
113
+ };
114
+ }
115
+
116
+ if (isAssistantMessage(message)) {
117
+ const validContent = filterValidContent(message.content);
118
+ const content: XAIMessagesContent[] = validContent.map(transformContentBlock);
119
+
120
+ // Add tool calls as tool_use content blocks
121
+ if (message.toolCalls) {
122
+ for (const call of message.toolCalls) {
123
+ content.push({
124
+ type: 'tool_use',
125
+ id: call.toolCallId,
126
+ name: call.toolName,
127
+ input: call.arguments,
128
+ });
129
+ }
130
+ }
131
+
132
+ // Ensure content is not empty (xAI Messages API requires at least one content block)
133
+ if (content.length === 0) {
134
+ content.push({ type: 'text', text: '' });
135
+ }
136
+
137
+ return {
138
+ role: 'assistant',
139
+ content,
140
+ };
141
+ }
142
+
143
+ if (isToolResultMessage(message)) {
144
+ // Tool results are sent as user messages with tool_result content
145
+ return {
146
+ role: 'user',
147
+ content: message.results.map((result) => ({
148
+ type: 'tool_result' as const,
149
+ tool_use_id: result.toolCallId,
150
+ content:
151
+ typeof result.result === 'string'
152
+ ? result.result
153
+ : JSON.stringify(result.result),
154
+ is_error: result.isError,
155
+ })),
156
+ };
157
+ }
158
+
159
+ throw new Error(`Unknown message type: ${message.type}`);
160
+ }
161
+
162
+ /**
163
+ * Transform a content block to xAI Messages API format
164
+ */
165
+ function transformContentBlock(block: ContentBlock): XAIMessagesContent {
166
+ switch (block.type) {
167
+ case 'text':
168
+ return { type: 'text', text: block.text };
169
+
170
+ case 'image': {
171
+ const imageBlock = block as ImageBlock;
172
+ if (imageBlock.source.type === 'base64') {
173
+ return {
174
+ type: 'image',
175
+ source: {
176
+ type: 'base64',
177
+ media_type: imageBlock.mimeType,
178
+ data: imageBlock.source.data,
179
+ },
180
+ };
181
+ }
182
+ if (imageBlock.source.type === 'url') {
183
+ return {
184
+ type: 'image',
185
+ source: {
186
+ type: 'url',
187
+ url: imageBlock.source.url,
188
+ },
189
+ };
190
+ }
191
+ if (imageBlock.source.type === 'bytes') {
192
+ // Convert bytes to base64
193
+ const base64 = btoa(
194
+ Array.from(imageBlock.source.data)
195
+ .map((b) => String.fromCharCode(b))
196
+ .join('')
197
+ );
198
+ return {
199
+ type: 'image',
200
+ source: {
201
+ type: 'base64',
202
+ media_type: imageBlock.mimeType,
203
+ data: base64,
204
+ },
205
+ };
206
+ }
207
+ throw new Error(`Unknown image source type`);
208
+ }
209
+
210
+ default:
211
+ throw new Error(`Unsupported content type: ${block.type}`);
212
+ }
213
+ }
214
+
215
+ /**
216
+ * Transform a UPP Tool to xAI Messages API format
217
+ */
218
+ function transformTool(tool: Tool): XAIMessagesTool {
219
+ return {
220
+ name: tool.name,
221
+ description: tool.description,
222
+ input_schema: {
223
+ type: 'object',
224
+ properties: tool.parameters.properties,
225
+ required: tool.parameters.required,
226
+ },
227
+ };
228
+ }
229
+
230
+ /**
231
+ * Transform xAI Messages API response to UPP LLMResponse
232
+ */
233
+ export function transformResponse(data: XAIMessagesResponse): LLMResponse {
234
+ // Extract text content
235
+ const textContent: TextBlock[] = [];
236
+ const toolCalls: ToolCall[] = [];
237
+ let structuredData: unknown;
238
+
239
+ for (const block of data.content) {
240
+ if (block.type === 'text') {
241
+ textContent.push({ type: 'text', text: block.text });
242
+ } else if (block.type === 'tool_use') {
243
+ // Check if this is the json_response tool (structured output)
244
+ if (block.name === 'json_response') {
245
+ // Extract structured data from tool arguments
246
+ structuredData = block.input;
247
+ }
248
+ toolCalls.push({
249
+ toolCallId: block.id,
250
+ toolName: block.name,
251
+ arguments: block.input,
252
+ });
253
+ }
254
+ // Skip thinking blocks for now
255
+ }
256
+
257
+ const message = new AssistantMessage(
258
+ textContent,
259
+ toolCalls.length > 0 ? toolCalls : undefined,
260
+ {
261
+ id: data.id,
262
+ metadata: {
263
+ xai: {
264
+ stop_reason: data.stop_reason,
265
+ stop_sequence: data.stop_sequence,
266
+ model: data.model,
267
+ },
268
+ },
269
+ }
270
+ );
271
+
272
+ const usage: TokenUsage = {
273
+ inputTokens: data.usage.input_tokens,
274
+ outputTokens: data.usage.output_tokens,
275
+ totalTokens: data.usage.input_tokens + data.usage.output_tokens,
276
+ };
277
+
278
+ return {
279
+ message,
280
+ usage,
281
+ stopReason: data.stop_reason ?? 'end_turn',
282
+ data: structuredData,
283
+ };
284
+ }
285
+
286
+ /**
287
+ * State for accumulating streaming response
288
+ */
289
+ export interface MessagesStreamState {
290
+ messageId: string;
291
+ model: string;
292
+ content: Array<{ type: string; text?: string; id?: string; name?: string; input?: string }>;
293
+ stopReason: string | null;
294
+ inputTokens: number;
295
+ outputTokens: number;
296
+ /** Track current content block index for delta events that don't include index */
297
+ currentIndex: number;
298
+ }
299
+
300
+ /**
301
+ * Create initial stream state
302
+ */
303
+ export function createStreamState(): MessagesStreamState {
304
+ return {
305
+ messageId: '',
306
+ model: '',
307
+ content: [],
308
+ stopReason: null,
309
+ inputTokens: 0,
310
+ outputTokens: 0,
311
+ currentIndex: 0,
312
+ };
313
+ }
314
+
315
+ /**
316
+ * Transform xAI Messages API stream event to UPP StreamEvent
317
+ * Returns null for events that don't produce UPP events
318
+ */
319
+ export function transformStreamEvent(
320
+ event: XAIMessagesStreamEvent,
321
+ state: MessagesStreamState
322
+ ): StreamEvent | null {
323
+ switch (event.type) {
324
+ case 'message_start':
325
+ state.messageId = event.message.id;
326
+ state.model = event.message.model;
327
+ state.inputTokens = event.message.usage.input_tokens;
328
+ return { type: 'message_start', index: 0, delta: {} };
329
+
330
+ case 'content_block_start':
331
+ // Track current index and initialize content block
332
+ state.currentIndex = event.index;
333
+ if (event.content_block.type === 'text') {
334
+ state.content[event.index] = { type: 'text', text: '' };
335
+ } else if (event.content_block.type === 'tool_use') {
336
+ state.content[event.index] = {
337
+ type: 'tool_use',
338
+ id: event.content_block.id,
339
+ name: event.content_block.name,
340
+ input: '',
341
+ };
342
+ }
343
+ return { type: 'content_block_start', index: event.index, delta: {} };
344
+
345
+ case 'content_block_delta': {
346
+ const delta = event.delta;
347
+ // xAI delta events may not include index, use tracked currentIndex
348
+ const index = event.index ?? state.currentIndex;
349
+ if (delta.type === 'text_delta') {
350
+ // Initialize content block if not already done (in case content_block_start was missed)
351
+ if (!state.content[index]) {
352
+ state.content[index] = { type: 'text', text: '' };
353
+ }
354
+ state.content[index]!.text =
355
+ (state.content[index]!.text ?? '') + delta.text;
356
+ return {
357
+ type: 'text_delta',
358
+ index: index,
359
+ delta: { text: delta.text },
360
+ };
361
+ }
362
+ if (delta.type === 'input_json_delta') {
363
+ // Initialize content block if not already done
364
+ if (!state.content[index]) {
365
+ state.content[index] = { type: 'tool_use', id: '', name: '', input: '' };
366
+ }
367
+ state.content[index]!.input =
368
+ (state.content[index]!.input ?? '') + delta.partial_json;
369
+ return {
370
+ type: 'tool_call_delta',
371
+ index: index,
372
+ delta: {
373
+ argumentsJson: delta.partial_json,
374
+ toolCallId: state.content[index]?.id,
375
+ toolName: state.content[index]?.name,
376
+ },
377
+ };
378
+ }
379
+ if (delta.type === 'thinking_delta') {
380
+ return {
381
+ type: 'reasoning_delta',
382
+ index: index,
383
+ delta: { text: delta.thinking },
384
+ };
385
+ }
386
+ return null;
387
+ }
388
+
389
+ case 'content_block_stop':
390
+ return { type: 'content_block_stop', index: event.index ?? state.currentIndex, delta: {} };
391
+
392
+ case 'message_delta':
393
+ state.stopReason = event.delta.stop_reason;
394
+ state.outputTokens = event.usage.output_tokens;
395
+ return null;
396
+
397
+ case 'message_stop':
398
+ return { type: 'message_stop', index: 0, delta: {} };
399
+
400
+ case 'ping':
401
+ case 'error':
402
+ return null;
403
+
404
+ default:
405
+ return null;
406
+ }
407
+ }
408
+
409
+ /**
410
+ * Build LLMResponse from accumulated stream state
411
+ */
412
+ export function buildResponseFromState(state: MessagesStreamState): LLMResponse {
413
+ const textContent: TextBlock[] = [];
414
+ const toolCalls: ToolCall[] = [];
415
+ let structuredData: unknown;
416
+
417
+ for (const block of state.content) {
418
+ if (block.type === 'text' && block.text) {
419
+ textContent.push({ type: 'text', text: block.text });
420
+ } else if (block.type === 'tool_use' && block.id && block.name) {
421
+ let args: Record<string, unknown> = {};
422
+ if (block.input) {
423
+ try {
424
+ args = JSON.parse(block.input);
425
+ } catch {
426
+ // Invalid JSON - use empty object
427
+ }
428
+ }
429
+ // Check if this is the json_response tool (structured output)
430
+ if (block.name === 'json_response') {
431
+ structuredData = args;
432
+ }
433
+ toolCalls.push({
434
+ toolCallId: block.id,
435
+ toolName: block.name,
436
+ arguments: args,
437
+ });
438
+ }
439
+ }
440
+
441
+ const message = new AssistantMessage(
442
+ textContent,
443
+ toolCalls.length > 0 ? toolCalls : undefined,
444
+ {
445
+ id: state.messageId,
446
+ metadata: {
447
+ xai: {
448
+ stop_reason: state.stopReason,
449
+ model: state.model,
450
+ },
451
+ },
452
+ }
453
+ );
454
+
455
+ const usage: TokenUsage = {
456
+ inputTokens: state.inputTokens,
457
+ outputTokens: state.outputTokens,
458
+ totalTokens: state.inputTokens + state.outputTokens,
459
+ };
460
+
461
+ return {
462
+ message,
463
+ usage,
464
+ stopReason: state.stopReason ?? 'end_turn',
465
+ data: structuredData,
466
+ };
467
+ }