@providerprotocol/ai 0.0.11 → 0.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/dist/index.js +3 -3
  2. package/dist/index.js.map +1 -1
  3. package/package.json +1 -10
  4. package/src/anthropic/index.ts +0 -3
  5. package/src/core/image.ts +0 -188
  6. package/src/core/llm.ts +0 -650
  7. package/src/core/provider.ts +0 -92
  8. package/src/google/index.ts +0 -3
  9. package/src/http/errors.ts +0 -112
  10. package/src/http/fetch.ts +0 -210
  11. package/src/http/index.ts +0 -31
  12. package/src/http/keys.ts +0 -136
  13. package/src/http/retry.ts +0 -205
  14. package/src/http/sse.ts +0 -136
  15. package/src/index.ts +0 -32
  16. package/src/ollama/index.ts +0 -3
  17. package/src/openai/index.ts +0 -39
  18. package/src/openrouter/index.ts +0 -11
  19. package/src/providers/anthropic/index.ts +0 -17
  20. package/src/providers/anthropic/llm.ts +0 -196
  21. package/src/providers/anthropic/transform.ts +0 -434
  22. package/src/providers/anthropic/types.ts +0 -213
  23. package/src/providers/google/index.ts +0 -17
  24. package/src/providers/google/llm.ts +0 -203
  25. package/src/providers/google/transform.ts +0 -447
  26. package/src/providers/google/types.ts +0 -214
  27. package/src/providers/ollama/index.ts +0 -43
  28. package/src/providers/ollama/llm.ts +0 -272
  29. package/src/providers/ollama/transform.ts +0 -434
  30. package/src/providers/ollama/types.ts +0 -260
  31. package/src/providers/openai/index.ts +0 -186
  32. package/src/providers/openai/llm.completions.ts +0 -201
  33. package/src/providers/openai/llm.responses.ts +0 -211
  34. package/src/providers/openai/transform.completions.ts +0 -561
  35. package/src/providers/openai/transform.responses.ts +0 -708
  36. package/src/providers/openai/types.ts +0 -1249
  37. package/src/providers/openrouter/index.ts +0 -177
  38. package/src/providers/openrouter/llm.completions.ts +0 -201
  39. package/src/providers/openrouter/llm.responses.ts +0 -211
  40. package/src/providers/openrouter/transform.completions.ts +0 -538
  41. package/src/providers/openrouter/transform.responses.ts +0 -742
  42. package/src/providers/openrouter/types.ts +0 -717
  43. package/src/providers/xai/index.ts +0 -223
  44. package/src/providers/xai/llm.completions.ts +0 -201
  45. package/src/providers/xai/llm.messages.ts +0 -195
  46. package/src/providers/xai/llm.responses.ts +0 -211
  47. package/src/providers/xai/transform.completions.ts +0 -565
  48. package/src/providers/xai/transform.messages.ts +0 -448
  49. package/src/providers/xai/transform.responses.ts +0 -678
  50. package/src/providers/xai/types.ts +0 -938
  51. package/src/types/content.ts +0 -133
  52. package/src/types/errors.ts +0 -85
  53. package/src/types/index.ts +0 -105
  54. package/src/types/llm.ts +0 -211
  55. package/src/types/messages.ts +0 -205
  56. package/src/types/provider.ts +0 -195
  57. package/src/types/schema.ts +0 -58
  58. package/src/types/stream.ts +0 -188
  59. package/src/types/thread.ts +0 -226
  60. package/src/types/tool.ts +0 -88
  61. package/src/types/turn.ts +0 -118
  62. package/src/utils/id.ts +0 -28
  63. package/src/xai/index.ts +0 -41
@@ -1,448 +0,0 @@
1
- import type { LLMRequest, LLMResponse } from '../../types/llm.ts';
2
- import type { Message } from '../../types/messages.ts';
3
- import type { StreamEvent } from '../../types/stream.ts';
4
- import type { Tool, ToolCall } from '../../types/tool.ts';
5
- import type { TokenUsage } from '../../types/turn.ts';
6
- import type { ContentBlock, TextBlock, ImageBlock } from '../../types/content.ts';
7
- import {
8
- AssistantMessage,
9
- isUserMessage,
10
- isAssistantMessage,
11
- isToolResultMessage,
12
- } from '../../types/messages.ts';
13
- import type {
14
- XAIMessagesParams,
15
- XAIMessagesRequest,
16
- XAIMessagesMessage,
17
- XAIMessagesContent,
18
- XAIMessagesTool,
19
- XAIMessagesResponse,
20
- XAIMessagesStreamEvent,
21
- XAIMessagesContentBlockDeltaEvent,
22
- } from './types.ts';
23
-
24
- /**
25
- * Transform UPP request to xAI Messages API format (Anthropic-compatible)
26
- *
27
- * Params are spread directly to allow pass-through of any xAI API fields,
28
- * even those not explicitly defined in our type. This enables developers to
29
- * use new API features without waiting for library updates.
30
- */
31
- export function transformRequest(
32
- request: LLMRequest<XAIMessagesParams>,
33
- modelId: string
34
- ): XAIMessagesRequest {
35
- const params = request.params ?? ({} as XAIMessagesParams);
36
-
37
- // Spread params to pass through all fields, then set required fields
38
- const xaiRequest: XAIMessagesRequest = {
39
- ...params,
40
- model: modelId,
41
- messages: request.messages.map(transformMessage),
42
- };
43
-
44
- // System prompt (top-level in Messages API)
45
- if (request.system) {
46
- xaiRequest.system = request.system;
47
- }
48
-
49
- // Tools come from request, not params
50
- if (request.tools && request.tools.length > 0) {
51
- xaiRequest.tools = request.tools.map(transformTool);
52
- xaiRequest.tool_choice = { type: 'auto' };
53
- }
54
-
55
- // Structured output via tool-based approach
56
- // xAI Messages API (like Anthropic) doesn't have native structured output,
57
- // so we use a tool to enforce the schema
58
- if (request.structure) {
59
- const structuredTool: XAIMessagesTool = {
60
- name: 'json_response',
61
- description: 'Return the response in the specified JSON format. You MUST use this tool to provide your response.',
62
- input_schema: {
63
- type: 'object',
64
- properties: request.structure.properties,
65
- required: request.structure.required,
66
- },
67
- };
68
-
69
- // Add the structured output tool (may coexist with user tools)
70
- xaiRequest.tools = [...(xaiRequest.tools ?? []), structuredTool];
71
- // Force the model to use the json_response tool
72
- xaiRequest.tool_choice = { type: 'tool', name: 'json_response' };
73
- }
74
-
75
- return xaiRequest;
76
- }
77
-
78
- /**
79
- * Filter to only valid content blocks with a type property
80
- */
81
- function filterValidContent<T extends { type?: string }>(content: T[]): T[] {
82
- return content.filter((c) => c && typeof c.type === 'string');
83
- }
84
-
85
- /**
86
- * Transform a UPP Message to xAI Messages API format
87
- */
88
- function transformMessage(message: Message): XAIMessagesMessage {
89
- if (isUserMessage(message)) {
90
- const validContent = filterValidContent(message.content);
91
- return {
92
- role: 'user',
93
- content: validContent.map(transformContentBlock),
94
- };
95
- }
96
-
97
- if (isAssistantMessage(message)) {
98
- const validContent = filterValidContent(message.content);
99
- const content: XAIMessagesContent[] = validContent.map(transformContentBlock);
100
-
101
- // Add tool calls as tool_use content blocks
102
- if (message.toolCalls) {
103
- for (const call of message.toolCalls) {
104
- content.push({
105
- type: 'tool_use',
106
- id: call.toolCallId,
107
- name: call.toolName,
108
- input: call.arguments,
109
- });
110
- }
111
- }
112
-
113
- // Ensure content is not empty (xAI Messages API requires at least one content block)
114
- if (content.length === 0) {
115
- content.push({ type: 'text', text: '' });
116
- }
117
-
118
- return {
119
- role: 'assistant',
120
- content,
121
- };
122
- }
123
-
124
- if (isToolResultMessage(message)) {
125
- // Tool results are sent as user messages with tool_result content
126
- return {
127
- role: 'user',
128
- content: message.results.map((result) => ({
129
- type: 'tool_result' as const,
130
- tool_use_id: result.toolCallId,
131
- content:
132
- typeof result.result === 'string'
133
- ? result.result
134
- : JSON.stringify(result.result),
135
- is_error: result.isError,
136
- })),
137
- };
138
- }
139
-
140
- throw new Error(`Unknown message type: ${message.type}`);
141
- }
142
-
143
- /**
144
- * Transform a content block to xAI Messages API format
145
- */
146
- function transformContentBlock(block: ContentBlock): XAIMessagesContent {
147
- switch (block.type) {
148
- case 'text':
149
- return { type: 'text', text: block.text };
150
-
151
- case 'image': {
152
- const imageBlock = block as ImageBlock;
153
- if (imageBlock.source.type === 'base64') {
154
- return {
155
- type: 'image',
156
- source: {
157
- type: 'base64',
158
- media_type: imageBlock.mimeType,
159
- data: imageBlock.source.data,
160
- },
161
- };
162
- }
163
- if (imageBlock.source.type === 'url') {
164
- return {
165
- type: 'image',
166
- source: {
167
- type: 'url',
168
- url: imageBlock.source.url,
169
- },
170
- };
171
- }
172
- if (imageBlock.source.type === 'bytes') {
173
- // Convert bytes to base64
174
- const base64 = btoa(
175
- Array.from(imageBlock.source.data)
176
- .map((b) => String.fromCharCode(b))
177
- .join('')
178
- );
179
- return {
180
- type: 'image',
181
- source: {
182
- type: 'base64',
183
- media_type: imageBlock.mimeType,
184
- data: base64,
185
- },
186
- };
187
- }
188
- throw new Error(`Unknown image source type`);
189
- }
190
-
191
- default:
192
- throw new Error(`Unsupported content type: ${block.type}`);
193
- }
194
- }
195
-
196
- /**
197
- * Transform a UPP Tool to xAI Messages API format
198
- */
199
- function transformTool(tool: Tool): XAIMessagesTool {
200
- return {
201
- name: tool.name,
202
- description: tool.description,
203
- input_schema: {
204
- type: 'object',
205
- properties: tool.parameters.properties,
206
- required: tool.parameters.required,
207
- },
208
- };
209
- }
210
-
211
- /**
212
- * Transform xAI Messages API response to UPP LLMResponse
213
- */
214
- export function transformResponse(data: XAIMessagesResponse): LLMResponse {
215
- // Extract text content
216
- const textContent: TextBlock[] = [];
217
- const toolCalls: ToolCall[] = [];
218
- let structuredData: unknown;
219
-
220
- for (const block of data.content) {
221
- if (block.type === 'text') {
222
- textContent.push({ type: 'text', text: block.text });
223
- } else if (block.type === 'tool_use') {
224
- // Check if this is the json_response tool (structured output)
225
- if (block.name === 'json_response') {
226
- // Extract structured data from tool arguments
227
- structuredData = block.input;
228
- }
229
- toolCalls.push({
230
- toolCallId: block.id,
231
- toolName: block.name,
232
- arguments: block.input,
233
- });
234
- }
235
- // Skip thinking blocks for now
236
- }
237
-
238
- const message = new AssistantMessage(
239
- textContent,
240
- toolCalls.length > 0 ? toolCalls : undefined,
241
- {
242
- id: data.id,
243
- metadata: {
244
- xai: {
245
- stop_reason: data.stop_reason,
246
- stop_sequence: data.stop_sequence,
247
- model: data.model,
248
- },
249
- },
250
- }
251
- );
252
-
253
- const usage: TokenUsage = {
254
- inputTokens: data.usage.input_tokens,
255
- outputTokens: data.usage.output_tokens,
256
- totalTokens: data.usage.input_tokens + data.usage.output_tokens,
257
- };
258
-
259
- return {
260
- message,
261
- usage,
262
- stopReason: data.stop_reason ?? 'end_turn',
263
- data: structuredData,
264
- };
265
- }
266
-
267
- /**
268
- * State for accumulating streaming response
269
- */
270
- export interface MessagesStreamState {
271
- messageId: string;
272
- model: string;
273
- content: Array<{ type: string; text?: string; id?: string; name?: string; input?: string }>;
274
- stopReason: string | null;
275
- inputTokens: number;
276
- outputTokens: number;
277
- /** Track current content block index for delta events that don't include index */
278
- currentIndex: number;
279
- }
280
-
281
- /**
282
- * Create initial stream state
283
- */
284
- export function createStreamState(): MessagesStreamState {
285
- return {
286
- messageId: '',
287
- model: '',
288
- content: [],
289
- stopReason: null,
290
- inputTokens: 0,
291
- outputTokens: 0,
292
- currentIndex: 0,
293
- };
294
- }
295
-
296
- /**
297
- * Transform xAI Messages API stream event to UPP StreamEvent
298
- * Returns null for events that don't produce UPP events
299
- */
300
- export function transformStreamEvent(
301
- event: XAIMessagesStreamEvent,
302
- state: MessagesStreamState
303
- ): StreamEvent | null {
304
- switch (event.type) {
305
- case 'message_start':
306
- state.messageId = event.message.id;
307
- state.model = event.message.model;
308
- state.inputTokens = event.message.usage.input_tokens;
309
- return { type: 'message_start', index: 0, delta: {} };
310
-
311
- case 'content_block_start':
312
- // Track current index and initialize content block
313
- state.currentIndex = event.index;
314
- if (event.content_block.type === 'text') {
315
- state.content[event.index] = { type: 'text', text: '' };
316
- } else if (event.content_block.type === 'tool_use') {
317
- state.content[event.index] = {
318
- type: 'tool_use',
319
- id: event.content_block.id,
320
- name: event.content_block.name,
321
- input: '',
322
- };
323
- }
324
- return { type: 'content_block_start', index: event.index, delta: {} };
325
-
326
- case 'content_block_delta': {
327
- const delta = event.delta;
328
- // xAI delta events may not include index, use tracked currentIndex
329
- const index = event.index ?? state.currentIndex;
330
- if (delta.type === 'text_delta') {
331
- // Initialize content block if not already done (in case content_block_start was missed)
332
- if (!state.content[index]) {
333
- state.content[index] = { type: 'text', text: '' };
334
- }
335
- state.content[index]!.text =
336
- (state.content[index]!.text ?? '') + delta.text;
337
- return {
338
- type: 'text_delta',
339
- index: index,
340
- delta: { text: delta.text },
341
- };
342
- }
343
- if (delta.type === 'input_json_delta') {
344
- // Initialize content block if not already done
345
- if (!state.content[index]) {
346
- state.content[index] = { type: 'tool_use', id: '', name: '', input: '' };
347
- }
348
- state.content[index]!.input =
349
- (state.content[index]!.input ?? '') + delta.partial_json;
350
- return {
351
- type: 'tool_call_delta',
352
- index: index,
353
- delta: {
354
- argumentsJson: delta.partial_json,
355
- toolCallId: state.content[index]?.id,
356
- toolName: state.content[index]?.name,
357
- },
358
- };
359
- }
360
- if (delta.type === 'thinking_delta') {
361
- return {
362
- type: 'reasoning_delta',
363
- index: index,
364
- delta: { text: delta.thinking },
365
- };
366
- }
367
- return null;
368
- }
369
-
370
- case 'content_block_stop':
371
- return { type: 'content_block_stop', index: event.index ?? state.currentIndex, delta: {} };
372
-
373
- case 'message_delta':
374
- state.stopReason = event.delta.stop_reason;
375
- state.outputTokens = event.usage.output_tokens;
376
- return null;
377
-
378
- case 'message_stop':
379
- return { type: 'message_stop', index: 0, delta: {} };
380
-
381
- case 'ping':
382
- case 'error':
383
- return null;
384
-
385
- default:
386
- return null;
387
- }
388
- }
389
-
390
- /**
391
- * Build LLMResponse from accumulated stream state
392
- */
393
- export function buildResponseFromState(state: MessagesStreamState): LLMResponse {
394
- const textContent: TextBlock[] = [];
395
- const toolCalls: ToolCall[] = [];
396
- let structuredData: unknown;
397
-
398
- for (const block of state.content) {
399
- if (block.type === 'text' && block.text) {
400
- textContent.push({ type: 'text', text: block.text });
401
- } else if (block.type === 'tool_use' && block.id && block.name) {
402
- let args: Record<string, unknown> = {};
403
- if (block.input) {
404
- try {
405
- args = JSON.parse(block.input);
406
- } catch {
407
- // Invalid JSON - use empty object
408
- }
409
- }
410
- // Check if this is the json_response tool (structured output)
411
- if (block.name === 'json_response') {
412
- structuredData = args;
413
- }
414
- toolCalls.push({
415
- toolCallId: block.id,
416
- toolName: block.name,
417
- arguments: args,
418
- });
419
- }
420
- }
421
-
422
- const message = new AssistantMessage(
423
- textContent,
424
- toolCalls.length > 0 ? toolCalls : undefined,
425
- {
426
- id: state.messageId,
427
- metadata: {
428
- xai: {
429
- stop_reason: state.stopReason,
430
- model: state.model,
431
- },
432
- },
433
- }
434
- );
435
-
436
- const usage: TokenUsage = {
437
- inputTokens: state.inputTokens,
438
- outputTokens: state.outputTokens,
439
- totalTokens: state.inputTokens + state.outputTokens,
440
- };
441
-
442
- return {
443
- message,
444
- usage,
445
- stopReason: state.stopReason ?? 'end_turn',
446
- data: structuredData,
447
- };
448
- }