@providerprotocol/ai 0.0.11 → 0.0.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. package/dist/anthropic/index.d.ts +51 -15
  2. package/dist/anthropic/index.js +54 -19
  3. package/dist/anthropic/index.js.map +1 -1
  4. package/dist/{chunk-SUNYWHTH.js → chunk-MOU4U3PO.js} +55 -3
  5. package/dist/chunk-MOU4U3PO.js.map +1 -0
  6. package/dist/{chunk-Y6Q7JCNP.js → chunk-MSR5P65T.js} +1 -1
  7. package/dist/chunk-MSR5P65T.js.map +1 -0
  8. package/dist/{chunk-W4BB4BG2.js → chunk-SVYROCLD.js} +31 -11
  9. package/dist/chunk-SVYROCLD.js.map +1 -0
  10. package/dist/chunk-U4JJC2YX.js +234 -0
  11. package/dist/chunk-U4JJC2YX.js.map +1 -0
  12. package/dist/{chunk-X5G4EHL7.js → chunk-Z7RBRCRN.js} +1 -1
  13. package/dist/chunk-Z7RBRCRN.js.map +1 -0
  14. package/dist/google/index.d.ts +376 -7
  15. package/dist/google/index.js +127 -15
  16. package/dist/google/index.js.map +1 -1
  17. package/dist/http/index.d.ts +222 -25
  18. package/dist/http/index.js +3 -3
  19. package/dist/index.d.ts +1482 -198
  20. package/dist/index.js +233 -49
  21. package/dist/index.js.map +1 -1
  22. package/dist/ollama/index.d.ts +92 -20
  23. package/dist/ollama/index.js +17 -7
  24. package/dist/ollama/index.js.map +1 -1
  25. package/dist/openai/index.d.ts +340 -61
  26. package/dist/openai/index.js +57 -15
  27. package/dist/openai/index.js.map +1 -1
  28. package/dist/openrouter/index.d.ts +107 -51
  29. package/dist/openrouter/index.js +36 -8
  30. package/dist/openrouter/index.js.map +1 -1
  31. package/dist/provider-mKkz7Q9U.d.ts +488 -0
  32. package/dist/retry-Dh70lgr0.d.ts +508 -0
  33. package/dist/xai/index.d.ts +97 -22
  34. package/dist/xai/index.js +55 -19
  35. package/dist/xai/index.js.map +1 -1
  36. package/package.json +8 -12
  37. package/dist/chunk-CUCRF5W6.js +0 -136
  38. package/dist/chunk-CUCRF5W6.js.map +0 -1
  39. package/dist/chunk-SUNYWHTH.js.map +0 -1
  40. package/dist/chunk-W4BB4BG2.js.map +0 -1
  41. package/dist/chunk-X5G4EHL7.js.map +0 -1
  42. package/dist/chunk-Y6Q7JCNP.js.map +0 -1
  43. package/dist/provider-CUJWjgNl.d.ts +0 -192
  44. package/dist/retry-I2661_rv.d.ts +0 -118
  45. package/src/anthropic/index.ts +0 -3
  46. package/src/core/image.ts +0 -188
  47. package/src/core/llm.ts +0 -650
  48. package/src/core/provider.ts +0 -92
  49. package/src/google/index.ts +0 -3
  50. package/src/http/errors.ts +0 -112
  51. package/src/http/fetch.ts +0 -210
  52. package/src/http/index.ts +0 -31
  53. package/src/http/keys.ts +0 -136
  54. package/src/http/retry.ts +0 -205
  55. package/src/http/sse.ts +0 -136
  56. package/src/index.ts +0 -32
  57. package/src/ollama/index.ts +0 -3
  58. package/src/openai/index.ts +0 -39
  59. package/src/openrouter/index.ts +0 -11
  60. package/src/providers/anthropic/index.ts +0 -17
  61. package/src/providers/anthropic/llm.ts +0 -196
  62. package/src/providers/anthropic/transform.ts +0 -434
  63. package/src/providers/anthropic/types.ts +0 -213
  64. package/src/providers/google/index.ts +0 -17
  65. package/src/providers/google/llm.ts +0 -203
  66. package/src/providers/google/transform.ts +0 -447
  67. package/src/providers/google/types.ts +0 -214
  68. package/src/providers/ollama/index.ts +0 -43
  69. package/src/providers/ollama/llm.ts +0 -272
  70. package/src/providers/ollama/transform.ts +0 -434
  71. package/src/providers/ollama/types.ts +0 -260
  72. package/src/providers/openai/index.ts +0 -186
  73. package/src/providers/openai/llm.completions.ts +0 -201
  74. package/src/providers/openai/llm.responses.ts +0 -211
  75. package/src/providers/openai/transform.completions.ts +0 -561
  76. package/src/providers/openai/transform.responses.ts +0 -708
  77. package/src/providers/openai/types.ts +0 -1249
  78. package/src/providers/openrouter/index.ts +0 -177
  79. package/src/providers/openrouter/llm.completions.ts +0 -201
  80. package/src/providers/openrouter/llm.responses.ts +0 -211
  81. package/src/providers/openrouter/transform.completions.ts +0 -538
  82. package/src/providers/openrouter/transform.responses.ts +0 -742
  83. package/src/providers/openrouter/types.ts +0 -717
  84. package/src/providers/xai/index.ts +0 -223
  85. package/src/providers/xai/llm.completions.ts +0 -201
  86. package/src/providers/xai/llm.messages.ts +0 -195
  87. package/src/providers/xai/llm.responses.ts +0 -211
  88. package/src/providers/xai/transform.completions.ts +0 -565
  89. package/src/providers/xai/transform.messages.ts +0 -448
  90. package/src/providers/xai/transform.responses.ts +0 -678
  91. package/src/providers/xai/types.ts +0 -938
  92. package/src/types/content.ts +0 -133
  93. package/src/types/errors.ts +0 -85
  94. package/src/types/index.ts +0 -105
  95. package/src/types/llm.ts +0 -211
  96. package/src/types/messages.ts +0 -205
  97. package/src/types/provider.ts +0 -195
  98. package/src/types/schema.ts +0 -58
  99. package/src/types/stream.ts +0 -188
  100. package/src/types/thread.ts +0 -226
  101. package/src/types/tool.ts +0 -88
  102. package/src/types/turn.ts +0 -118
  103. package/src/utils/id.ts +0 -28
  104. package/src/xai/index.ts +0 -41
@@ -1,561 +0,0 @@
1
- import type { LLMRequest, LLMResponse } from '../../types/llm.ts';
2
- import type { Message } from '../../types/messages.ts';
3
- import type { StreamEvent } from '../../types/stream.ts';
4
- import type { Tool, ToolCall } from '../../types/tool.ts';
5
- import type { TokenUsage } from '../../types/turn.ts';
6
- import type { ContentBlock, TextBlock, ImageBlock } from '../../types/content.ts';
7
- import {
8
- AssistantMessage,
9
- isUserMessage,
10
- isAssistantMessage,
11
- isToolResultMessage,
12
- } from '../../types/messages.ts';
13
- import type {
14
- OpenAICompletionsParams,
15
- OpenAICompletionsRequest,
16
- OpenAICompletionsMessage,
17
- OpenAIUserContent,
18
- OpenAICompletionsTool,
19
- OpenAICompletionsResponse,
20
- OpenAICompletionsStreamChunk,
21
- OpenAIToolCall,
22
- } from './types.ts';
23
-
24
- /**
25
- * Transform UPP request to OpenAI Chat Completions format
26
- *
27
- * Params are spread directly to allow pass-through of any OpenAI API fields,
28
- * even those not explicitly defined in our type. This enables developers to
29
- * use new API features without waiting for library updates.
30
- */
31
- export function transformRequest(
32
- request: LLMRequest<OpenAICompletionsParams>,
33
- modelId: string
34
- ): OpenAICompletionsRequest {
35
- const params = request.params ?? ({} as OpenAICompletionsParams);
36
-
37
- // Spread params to pass through all fields, then set required fields
38
- const openaiRequest: OpenAICompletionsRequest = {
39
- ...params,
40
- model: modelId,
41
- messages: transformMessages(request.messages, request.system),
42
- };
43
-
44
- // Tools come from request, not params
45
- if (request.tools && request.tools.length > 0) {
46
- openaiRequest.tools = request.tools.map(transformTool);
47
- }
48
-
49
- // Structured output via response_format (overrides params.response_format if set)
50
- if (request.structure) {
51
- const schema: Record<string, unknown> = {
52
- type: 'object',
53
- properties: request.structure.properties,
54
- required: request.structure.required,
55
- ...(request.structure.additionalProperties !== undefined
56
- ? { additionalProperties: request.structure.additionalProperties }
57
- : { additionalProperties: false }),
58
- };
59
- if (request.structure.description) {
60
- schema.description = request.structure.description;
61
- }
62
-
63
- openaiRequest.response_format = {
64
- type: 'json_schema',
65
- json_schema: {
66
- name: 'json_response',
67
- description: request.structure.description,
68
- schema,
69
- strict: true,
70
- },
71
- };
72
- }
73
-
74
- return openaiRequest;
75
- }
76
-
77
- /**
78
- * Transform messages including system prompt
79
- */
80
- function transformMessages(
81
- messages: Message[],
82
- system?: string
83
- ): OpenAICompletionsMessage[] {
84
- const result: OpenAICompletionsMessage[] = [];
85
-
86
- // Add system message first if present
87
- if (system) {
88
- result.push({
89
- role: 'system',
90
- content: system,
91
- });
92
- }
93
-
94
- // Transform each message
95
- for (const message of messages) {
96
- // Handle tool result messages specially - they need to produce multiple messages
97
- if (isToolResultMessage(message)) {
98
- const toolMessages = transformToolResults(message);
99
- result.push(...toolMessages);
100
- } else {
101
- const transformed = transformMessage(message);
102
- if (transformed) {
103
- result.push(transformed);
104
- }
105
- }
106
- }
107
-
108
- return result;
109
- }
110
-
111
- /**
112
- * Filter to only valid content blocks with a type property
113
- */
114
- function filterValidContent<T extends { type?: string }>(content: T[]): T[] {
115
- return content.filter((c) => c && typeof c.type === 'string');
116
- }
117
-
118
- /**
119
- * Transform a UPP Message to OpenAI format
120
- */
121
- function transformMessage(message: Message): OpenAICompletionsMessage | null {
122
- if (isUserMessage(message)) {
123
- const validContent = filterValidContent(message.content);
124
- // Check if we can use simple string content
125
- if (validContent.length === 1 && validContent[0]?.type === 'text') {
126
- return {
127
- role: 'user',
128
- content: (validContent[0] as TextBlock).text,
129
- };
130
- }
131
- return {
132
- role: 'user',
133
- content: validContent.map(transformContentBlock),
134
- };
135
- }
136
-
137
- if (isAssistantMessage(message)) {
138
- const validContent = filterValidContent(message.content);
139
- // Extract text content
140
- const textContent = validContent
141
- .filter((c): c is TextBlock => c.type === 'text')
142
- .map((c) => c.text)
143
- .join('');
144
-
145
- const assistantMessage: OpenAICompletionsMessage = {
146
- role: 'assistant',
147
- content: textContent || null,
148
- };
149
-
150
- // Add tool calls if present
151
- if (message.toolCalls && message.toolCalls.length > 0) {
152
- (assistantMessage as { tool_calls?: OpenAIToolCall[] }).tool_calls =
153
- message.toolCalls.map((call) => ({
154
- id: call.toolCallId,
155
- type: 'function' as const,
156
- function: {
157
- name: call.toolName,
158
- arguments: JSON.stringify(call.arguments),
159
- },
160
- }));
161
- }
162
-
163
- return assistantMessage;
164
- }
165
-
166
- if (isToolResultMessage(message)) {
167
- // Tool results are sent as individual tool messages
168
- // Return the first one and handle multiple in a different way
169
- // Actually, we need to return multiple messages for multiple tool results
170
- // This is handled by the caller - transform each result to a message
171
- const results = message.results.map((result) => ({
172
- role: 'tool' as const,
173
- tool_call_id: result.toolCallId,
174
- content:
175
- typeof result.result === 'string'
176
- ? result.result
177
- : JSON.stringify(result.result),
178
- }));
179
-
180
- // For now, return the first result - caller should handle multiple
181
- return results[0] ?? null;
182
- }
183
-
184
- return null;
185
- }
186
-
187
- /**
188
- * Transform multiple tool results to messages
189
- */
190
- export function transformToolResults(
191
- message: Message
192
- ): OpenAICompletionsMessage[] {
193
- if (!isToolResultMessage(message)) {
194
- const single = transformMessage(message);
195
- return single ? [single] : [];
196
- }
197
-
198
- return message.results.map((result) => ({
199
- role: 'tool' as const,
200
- tool_call_id: result.toolCallId,
201
- content:
202
- typeof result.result === 'string'
203
- ? result.result
204
- : JSON.stringify(result.result),
205
- }));
206
- }
207
-
208
- /**
209
- * Transform a content block to OpenAI format
210
- */
211
- function transformContentBlock(block: ContentBlock): OpenAIUserContent {
212
- switch (block.type) {
213
- case 'text':
214
- return { type: 'text', text: block.text };
215
-
216
- case 'image': {
217
- const imageBlock = block as ImageBlock;
218
- let url: string;
219
-
220
- if (imageBlock.source.type === 'base64') {
221
- url = `data:${imageBlock.mimeType};base64,${imageBlock.source.data}`;
222
- } else if (imageBlock.source.type === 'url') {
223
- url = imageBlock.source.url;
224
- } else if (imageBlock.source.type === 'bytes') {
225
- // Convert bytes to base64
226
- const base64 = btoa(
227
- Array.from(imageBlock.source.data)
228
- .map((b) => String.fromCharCode(b))
229
- .join('')
230
- );
231
- url = `data:${imageBlock.mimeType};base64,${base64}`;
232
- } else {
233
- throw new Error('Unknown image source type');
234
- }
235
-
236
- return {
237
- type: 'image_url',
238
- image_url: { url },
239
- };
240
- }
241
-
242
- default:
243
- throw new Error(`Unsupported content type: ${block.type}`);
244
- }
245
- }
246
-
247
- /**
248
- * Transform a UPP Tool to OpenAI format
249
- */
250
- function transformTool(tool: Tool): OpenAICompletionsTool {
251
- return {
252
- type: 'function',
253
- function: {
254
- name: tool.name,
255
- description: tool.description,
256
- parameters: {
257
- type: 'object',
258
- properties: tool.parameters.properties,
259
- required: tool.parameters.required,
260
- ...(tool.parameters.additionalProperties !== undefined
261
- ? { additionalProperties: tool.parameters.additionalProperties }
262
- : {}),
263
- },
264
- },
265
- };
266
- }
267
-
268
- /**
269
- * Transform OpenAI response to UPP LLMResponse
270
- */
271
- export function transformResponse(data: OpenAICompletionsResponse): LLMResponse {
272
- const choice = data.choices[0];
273
- if (!choice) {
274
- throw new Error('No choices in OpenAI response');
275
- }
276
-
277
- // Extract text content
278
- const textContent: TextBlock[] = [];
279
- let structuredData: unknown;
280
- if (choice.message.content) {
281
- textContent.push({ type: 'text', text: choice.message.content });
282
- // Try to parse as JSON for structured output (native JSON mode)
283
- try {
284
- structuredData = JSON.parse(choice.message.content);
285
- } catch {
286
- // Not valid JSON - that's fine, might not be structured output
287
- }
288
- }
289
- let hadRefusal = false;
290
- if (choice.message.refusal) {
291
- textContent.push({ type: 'text', text: choice.message.refusal });
292
- hadRefusal = true;
293
- }
294
-
295
- // Extract tool calls
296
- const toolCalls: ToolCall[] = [];
297
- if (choice.message.tool_calls) {
298
- for (const call of choice.message.tool_calls) {
299
- let args: Record<string, unknown> = {};
300
- try {
301
- args = JSON.parse(call.function.arguments);
302
- } catch {
303
- // Invalid JSON - use empty object
304
- }
305
- toolCalls.push({
306
- toolCallId: call.id,
307
- toolName: call.function.name,
308
- arguments: args,
309
- });
310
- }
311
- }
312
-
313
- const message = new AssistantMessage(
314
- textContent,
315
- toolCalls.length > 0 ? toolCalls : undefined,
316
- {
317
- id: data.id,
318
- metadata: {
319
- openai: {
320
- model: data.model,
321
- finish_reason: choice.finish_reason,
322
- system_fingerprint: data.system_fingerprint,
323
- service_tier: data.service_tier,
324
- },
325
- },
326
- }
327
- );
328
-
329
- const usage: TokenUsage = {
330
- inputTokens: data.usage.prompt_tokens,
331
- outputTokens: data.usage.completion_tokens,
332
- totalTokens: data.usage.total_tokens,
333
- };
334
-
335
- // Map finish reason to stop reason
336
- let stopReason = 'end_turn';
337
- switch (choice.finish_reason) {
338
- case 'stop':
339
- stopReason = 'end_turn';
340
- break;
341
- case 'length':
342
- stopReason = 'max_tokens';
343
- break;
344
- case 'tool_calls':
345
- stopReason = 'tool_use';
346
- break;
347
- case 'content_filter':
348
- stopReason = 'content_filter';
349
- break;
350
- }
351
- if (hadRefusal && stopReason !== 'content_filter') {
352
- stopReason = 'content_filter';
353
- }
354
-
355
- return {
356
- message,
357
- usage,
358
- stopReason,
359
- data: structuredData,
360
- };
361
- }
362
-
363
- /**
364
- * State for accumulating streaming response
365
- */
366
- export interface CompletionsStreamState {
367
- id: string;
368
- model: string;
369
- text: string;
370
- toolCalls: Map<number, { id: string; name: string; arguments: string }>;
371
- finishReason: string | null;
372
- inputTokens: number;
373
- outputTokens: number;
374
- hadRefusal: boolean;
375
- }
376
-
377
- /**
378
- * Create initial stream state
379
- */
380
- export function createStreamState(): CompletionsStreamState {
381
- return {
382
- id: '',
383
- model: '',
384
- text: '',
385
- toolCalls: new Map(),
386
- finishReason: null,
387
- inputTokens: 0,
388
- outputTokens: 0,
389
- hadRefusal: false,
390
- };
391
- }
392
-
393
- /**
394
- * Transform OpenAI stream chunk to UPP StreamEvent
395
- * Returns array since one chunk may produce multiple events
396
- */
397
- export function transformStreamEvent(
398
- chunk: OpenAICompletionsStreamChunk,
399
- state: CompletionsStreamState
400
- ): StreamEvent[] {
401
- const events: StreamEvent[] = [];
402
-
403
- // Update state with basic info
404
- if (chunk.id && !state.id) {
405
- state.id = chunk.id;
406
- events.push({ type: 'message_start', index: 0, delta: {} });
407
- }
408
- if (chunk.model) {
409
- state.model = chunk.model;
410
- }
411
-
412
- // Process choices
413
- const choice = chunk.choices[0];
414
- if (choice) {
415
- // Text delta
416
- if (choice.delta.content) {
417
- state.text += choice.delta.content;
418
- events.push({
419
- type: 'text_delta',
420
- index: 0,
421
- delta: { text: choice.delta.content },
422
- });
423
- }
424
- if (choice.delta.refusal) {
425
- state.hadRefusal = true;
426
- state.text += choice.delta.refusal;
427
- events.push({
428
- type: 'text_delta',
429
- index: 0,
430
- delta: { text: choice.delta.refusal },
431
- });
432
- }
433
-
434
- // Tool call deltas
435
- if (choice.delta.tool_calls) {
436
- for (const toolCallDelta of choice.delta.tool_calls) {
437
- const index = toolCallDelta.index;
438
- let toolCall = state.toolCalls.get(index);
439
-
440
- if (!toolCall) {
441
- toolCall = { id: '', name: '', arguments: '' };
442
- state.toolCalls.set(index, toolCall);
443
- }
444
-
445
- if (toolCallDelta.id) {
446
- toolCall.id = toolCallDelta.id;
447
- }
448
- if (toolCallDelta.function?.name) {
449
- toolCall.name = toolCallDelta.function.name;
450
- }
451
- if (toolCallDelta.function?.arguments) {
452
- toolCall.arguments += toolCallDelta.function.arguments;
453
- events.push({
454
- type: 'tool_call_delta',
455
- index: index,
456
- delta: {
457
- toolCallId: toolCall.id,
458
- toolName: toolCall.name,
459
- argumentsJson: toolCallDelta.function.arguments,
460
- },
461
- });
462
- }
463
- }
464
- }
465
-
466
- // Finish reason
467
- if (choice.finish_reason) {
468
- state.finishReason = choice.finish_reason;
469
- events.push({ type: 'message_stop', index: 0, delta: {} });
470
- }
471
- }
472
-
473
- // Usage info (usually comes at the end with stream_options.include_usage)
474
- if (chunk.usage) {
475
- state.inputTokens = chunk.usage.prompt_tokens;
476
- state.outputTokens = chunk.usage.completion_tokens;
477
- }
478
-
479
- return events;
480
- }
481
-
482
- /**
483
- * Build LLMResponse from accumulated stream state
484
- */
485
- export function buildResponseFromState(state: CompletionsStreamState): LLMResponse {
486
- const textContent: TextBlock[] = [];
487
- let structuredData: unknown;
488
- if (state.text) {
489
- textContent.push({ type: 'text', text: state.text });
490
- // Try to parse as JSON for structured output (native JSON mode)
491
- try {
492
- structuredData = JSON.parse(state.text);
493
- } catch {
494
- // Not valid JSON - that's fine, might not be structured output
495
- }
496
- }
497
-
498
- const toolCalls: ToolCall[] = [];
499
- for (const [, toolCall] of state.toolCalls) {
500
- let args: Record<string, unknown> = {};
501
- if (toolCall.arguments) {
502
- try {
503
- args = JSON.parse(toolCall.arguments);
504
- } catch {
505
- // Invalid JSON - use empty object
506
- }
507
- }
508
- toolCalls.push({
509
- toolCallId: toolCall.id,
510
- toolName: toolCall.name,
511
- arguments: args,
512
- });
513
- }
514
-
515
- const message = new AssistantMessage(
516
- textContent,
517
- toolCalls.length > 0 ? toolCalls : undefined,
518
- {
519
- id: state.id,
520
- metadata: {
521
- openai: {
522
- model: state.model,
523
- finish_reason: state.finishReason,
524
- },
525
- },
526
- }
527
- );
528
-
529
- const usage: TokenUsage = {
530
- inputTokens: state.inputTokens,
531
- outputTokens: state.outputTokens,
532
- totalTokens: state.inputTokens + state.outputTokens,
533
- };
534
-
535
- // Map finish reason to stop reason
536
- let stopReason = 'end_turn';
537
- switch (state.finishReason) {
538
- case 'stop':
539
- stopReason = 'end_turn';
540
- break;
541
- case 'length':
542
- stopReason = 'max_tokens';
543
- break;
544
- case 'tool_calls':
545
- stopReason = 'tool_use';
546
- break;
547
- case 'content_filter':
548
- stopReason = 'content_filter';
549
- break;
550
- }
551
- if (state.hadRefusal && stopReason !== 'content_filter') {
552
- stopReason = 'content_filter';
553
- }
554
-
555
- return {
556
- message,
557
- usage,
558
- stopReason,
559
- data: structuredData,
560
- };
561
- }