@providerprotocol/ai 0.0.11 → 0.0.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. package/dist/anthropic/index.d.ts +51 -15
  2. package/dist/anthropic/index.js +54 -19
  3. package/dist/anthropic/index.js.map +1 -1
  4. package/dist/{chunk-SUNYWHTH.js → chunk-MOU4U3PO.js} +55 -3
  5. package/dist/chunk-MOU4U3PO.js.map +1 -0
  6. package/dist/{chunk-Y6Q7JCNP.js → chunk-MSR5P65T.js} +1 -1
  7. package/dist/chunk-MSR5P65T.js.map +1 -0
  8. package/dist/{chunk-W4BB4BG2.js → chunk-SVYROCLD.js} +31 -11
  9. package/dist/chunk-SVYROCLD.js.map +1 -0
  10. package/dist/chunk-U4JJC2YX.js +234 -0
  11. package/dist/chunk-U4JJC2YX.js.map +1 -0
  12. package/dist/{chunk-X5G4EHL7.js → chunk-Z7RBRCRN.js} +1 -1
  13. package/dist/chunk-Z7RBRCRN.js.map +1 -0
  14. package/dist/google/index.d.ts +376 -7
  15. package/dist/google/index.js +127 -15
  16. package/dist/google/index.js.map +1 -1
  17. package/dist/http/index.d.ts +222 -25
  18. package/dist/http/index.js +3 -3
  19. package/dist/index.d.ts +1482 -198
  20. package/dist/index.js +233 -49
  21. package/dist/index.js.map +1 -1
  22. package/dist/ollama/index.d.ts +92 -20
  23. package/dist/ollama/index.js +17 -7
  24. package/dist/ollama/index.js.map +1 -1
  25. package/dist/openai/index.d.ts +340 -61
  26. package/dist/openai/index.js +57 -15
  27. package/dist/openai/index.js.map +1 -1
  28. package/dist/openrouter/index.d.ts +107 -51
  29. package/dist/openrouter/index.js +36 -8
  30. package/dist/openrouter/index.js.map +1 -1
  31. package/dist/provider-mKkz7Q9U.d.ts +488 -0
  32. package/dist/retry-Dh70lgr0.d.ts +508 -0
  33. package/dist/xai/index.d.ts +97 -22
  34. package/dist/xai/index.js +55 -19
  35. package/dist/xai/index.js.map +1 -1
  36. package/package.json +8 -12
  37. package/dist/chunk-CUCRF5W6.js +0 -136
  38. package/dist/chunk-CUCRF5W6.js.map +0 -1
  39. package/dist/chunk-SUNYWHTH.js.map +0 -1
  40. package/dist/chunk-W4BB4BG2.js.map +0 -1
  41. package/dist/chunk-X5G4EHL7.js.map +0 -1
  42. package/dist/chunk-Y6Q7JCNP.js.map +0 -1
  43. package/dist/provider-CUJWjgNl.d.ts +0 -192
  44. package/dist/retry-I2661_rv.d.ts +0 -118
  45. package/src/anthropic/index.ts +0 -3
  46. package/src/core/image.ts +0 -188
  47. package/src/core/llm.ts +0 -650
  48. package/src/core/provider.ts +0 -92
  49. package/src/google/index.ts +0 -3
  50. package/src/http/errors.ts +0 -112
  51. package/src/http/fetch.ts +0 -210
  52. package/src/http/index.ts +0 -31
  53. package/src/http/keys.ts +0 -136
  54. package/src/http/retry.ts +0 -205
  55. package/src/http/sse.ts +0 -136
  56. package/src/index.ts +0 -32
  57. package/src/ollama/index.ts +0 -3
  58. package/src/openai/index.ts +0 -39
  59. package/src/openrouter/index.ts +0 -11
  60. package/src/providers/anthropic/index.ts +0 -17
  61. package/src/providers/anthropic/llm.ts +0 -196
  62. package/src/providers/anthropic/transform.ts +0 -434
  63. package/src/providers/anthropic/types.ts +0 -213
  64. package/src/providers/google/index.ts +0 -17
  65. package/src/providers/google/llm.ts +0 -203
  66. package/src/providers/google/transform.ts +0 -447
  67. package/src/providers/google/types.ts +0 -214
  68. package/src/providers/ollama/index.ts +0 -43
  69. package/src/providers/ollama/llm.ts +0 -272
  70. package/src/providers/ollama/transform.ts +0 -434
  71. package/src/providers/ollama/types.ts +0 -260
  72. package/src/providers/openai/index.ts +0 -186
  73. package/src/providers/openai/llm.completions.ts +0 -201
  74. package/src/providers/openai/llm.responses.ts +0 -211
  75. package/src/providers/openai/transform.completions.ts +0 -561
  76. package/src/providers/openai/transform.responses.ts +0 -708
  77. package/src/providers/openai/types.ts +0 -1249
  78. package/src/providers/openrouter/index.ts +0 -177
  79. package/src/providers/openrouter/llm.completions.ts +0 -201
  80. package/src/providers/openrouter/llm.responses.ts +0 -211
  81. package/src/providers/openrouter/transform.completions.ts +0 -538
  82. package/src/providers/openrouter/transform.responses.ts +0 -742
  83. package/src/providers/openrouter/types.ts +0 -717
  84. package/src/providers/xai/index.ts +0 -223
  85. package/src/providers/xai/llm.completions.ts +0 -201
  86. package/src/providers/xai/llm.messages.ts +0 -195
  87. package/src/providers/xai/llm.responses.ts +0 -211
  88. package/src/providers/xai/transform.completions.ts +0 -565
  89. package/src/providers/xai/transform.messages.ts +0 -448
  90. package/src/providers/xai/transform.responses.ts +0 -678
  91. package/src/providers/xai/types.ts +0 -938
  92. package/src/types/content.ts +0 -133
  93. package/src/types/errors.ts +0 -85
  94. package/src/types/index.ts +0 -105
  95. package/src/types/llm.ts +0 -211
  96. package/src/types/messages.ts +0 -205
  97. package/src/types/provider.ts +0 -195
  98. package/src/types/schema.ts +0 -58
  99. package/src/types/stream.ts +0 -188
  100. package/src/types/thread.ts +0 -226
  101. package/src/types/tool.ts +0 -88
  102. package/src/types/turn.ts +0 -118
  103. package/src/utils/id.ts +0 -28
  104. package/src/xai/index.ts +0 -41
@@ -1,708 +0,0 @@
1
- import type { LLMRequest, LLMResponse } from '../../types/llm.ts';
2
- import type { Message } from '../../types/messages.ts';
3
- import type { StreamEvent } from '../../types/stream.ts';
4
- import type { Tool, ToolCall } from '../../types/tool.ts';
5
- import type { TokenUsage } from '../../types/turn.ts';
6
- import type { ContentBlock, TextBlock, ImageBlock, AssistantContent } from '../../types/content.ts';
7
- import {
8
- AssistantMessage,
9
- isUserMessage,
10
- isAssistantMessage,
11
- isToolResultMessage,
12
- } from '../../types/messages.ts';
13
- import type {
14
- OpenAIResponsesParams,
15
- OpenAIResponsesRequest,
16
- OpenAIResponsesInputItem,
17
- OpenAIResponsesContentPart,
18
- OpenAIResponsesTool,
19
- OpenAIResponsesToolUnion,
20
- OpenAIResponsesResponse,
21
- OpenAIResponsesStreamEvent,
22
- OpenAIResponsesOutputItem,
23
- OpenAIResponsesMessageOutput,
24
- OpenAIResponsesFunctionCallOutput,
25
- OpenAIResponsesImageGenerationOutput,
26
- } from './types.ts';
27
-
28
- /**
29
- * Transform UPP request to OpenAI Responses API format
30
- *
31
- * Params are spread directly to allow pass-through of any OpenAI API fields,
32
- * even those not explicitly defined in our type. This enables developers to
33
- * use new API features without waiting for library updates.
34
- */
35
- export function transformRequest(
36
- request: LLMRequest<OpenAIResponsesParams>,
37
- modelId: string
38
- ): OpenAIResponsesRequest {
39
- const params = request.params ?? ({} as OpenAIResponsesParams);
40
-
41
- // Extract built-in tools from params before spreading
42
- const builtInTools = params.tools as OpenAIResponsesToolUnion[] | undefined;
43
- const { tools: _paramsTools, ...restParams } = params;
44
-
45
- // Spread params to pass through all fields, then set required fields
46
- const openaiRequest: OpenAIResponsesRequest = {
47
- ...restParams,
48
- model: modelId,
49
- input: transformInputItems(request.messages, request.system),
50
- };
51
-
52
- // Merge tools: UPP function tools from request + built-in tools from params
53
- const functionTools: OpenAIResponsesToolUnion[] = request.tools?.map(transformTool) ?? [];
54
- const allTools: OpenAIResponsesToolUnion[] = [...functionTools, ...(builtInTools ?? [])];
55
-
56
- if (allTools.length > 0) {
57
- openaiRequest.tools = allTools;
58
- }
59
-
60
- // Structured output via text.format (overrides params.text if set)
61
- if (request.structure) {
62
- const schema: Record<string, unknown> = {
63
- type: 'object',
64
- properties: request.structure.properties,
65
- required: request.structure.required,
66
- ...(request.structure.additionalProperties !== undefined
67
- ? { additionalProperties: request.structure.additionalProperties }
68
- : { additionalProperties: false }),
69
- };
70
- if (request.structure.description) {
71
- schema.description = request.structure.description;
72
- }
73
-
74
- openaiRequest.text = {
75
- format: {
76
- type: 'json_schema',
77
- name: 'json_response',
78
- description: request.structure.description,
79
- schema,
80
- strict: true,
81
- },
82
- };
83
- }
84
-
85
- return openaiRequest;
86
- }
87
-
88
- /**
89
- * Transform messages to Responses API input items
90
- */
91
- function transformInputItems(
92
- messages: Message[],
93
- system?: string
94
- ): OpenAIResponsesInputItem[] | string {
95
- const result: OpenAIResponsesInputItem[] = [];
96
-
97
- if (system) {
98
- result.push({
99
- type: 'message',
100
- role: 'system',
101
- content: system,
102
- });
103
- }
104
-
105
- for (const message of messages) {
106
- const items = transformMessage(message);
107
- result.push(...items);
108
- }
109
-
110
- // If there's only one user message with simple text, return as string
111
- if (result.length === 1 && result[0]?.type === 'message') {
112
- const item = result[0] as { role?: string; content?: string | unknown[] };
113
- if (item.role === 'user' && typeof item.content === 'string') {
114
- return item.content;
115
- }
116
- }
117
-
118
- return result;
119
- }
120
-
121
- /**
122
- * Filter to only valid content blocks with a type property
123
- */
124
- function filterValidContent<T extends { type?: string }>(content: T[]): T[] {
125
- return content.filter((c) => c && typeof c.type === 'string');
126
- }
127
-
128
- /**
129
- * Transform a UPP Message to OpenAI Responses API input items
130
- */
131
- function transformMessage(message: Message): OpenAIResponsesInputItem[] {
132
- if (isUserMessage(message)) {
133
- const validContent = filterValidContent(message.content);
134
- // Check if we can use simple string content
135
- if (validContent.length === 1 && validContent[0]?.type === 'text') {
136
- return [
137
- {
138
- type: 'message',
139
- role: 'user',
140
- content: (validContent[0] as TextBlock).text,
141
- },
142
- ];
143
- }
144
- return [
145
- {
146
- type: 'message',
147
- role: 'user',
148
- content: validContent.map(transformContentPart),
149
- },
150
- ];
151
- }
152
-
153
- if (isAssistantMessage(message)) {
154
- const validContent = filterValidContent(message.content);
155
- const items: OpenAIResponsesInputItem[] = [];
156
-
157
- // Add message content - only text parts for assistant messages
158
- const contentParts: OpenAIResponsesContentPart[] = validContent
159
- .filter((c): c is TextBlock => c.type === 'text')
160
- .map((c): OpenAIResponsesContentPart => ({
161
- type: 'output_text',
162
- text: c.text,
163
- }));
164
-
165
- // Add assistant message if we have text content
166
- if (contentParts.length > 0) {
167
- items.push({
168
- type: 'message',
169
- role: 'assistant',
170
- content: contentParts,
171
- });
172
- }
173
-
174
- // Add function_call items for each tool call (must precede function_call_output)
175
- const openaiMeta = message.metadata?.openai as
176
- | { functionCallItems?: Array<{ id: string; call_id: string; name: string; arguments: string }> }
177
- | undefined;
178
- const functionCallItems = openaiMeta?.functionCallItems;
179
-
180
- if (functionCallItems && functionCallItems.length > 0) {
181
- for (const fc of functionCallItems) {
182
- items.push({
183
- type: 'function_call',
184
- id: fc.id,
185
- call_id: fc.call_id,
186
- name: fc.name,
187
- arguments: fc.arguments,
188
- });
189
- }
190
- } else if (message.toolCalls && message.toolCalls.length > 0) {
191
- for (const call of message.toolCalls) {
192
- items.push({
193
- type: 'function_call',
194
- id: `fc_${call.toolCallId}`,
195
- call_id: call.toolCallId,
196
- name: call.toolName,
197
- arguments: JSON.stringify(call.arguments),
198
- });
199
- }
200
- }
201
-
202
- return items;
203
- }
204
-
205
- if (isToolResultMessage(message)) {
206
- // Tool results are function_call_output items
207
- return message.results.map((result) => ({
208
- type: 'function_call_output' as const,
209
- call_id: result.toolCallId,
210
- output:
211
- typeof result.result === 'string'
212
- ? result.result
213
- : JSON.stringify(result.result),
214
- }));
215
- }
216
-
217
- return [];
218
- }
219
-
220
- /**
221
- * Transform a content block to Responses API format
222
- */
223
- function transformContentPart(block: ContentBlock): OpenAIResponsesContentPart {
224
- switch (block.type) {
225
- case 'text':
226
- return { type: 'input_text', text: block.text };
227
-
228
- case 'image': {
229
- const imageBlock = block as ImageBlock;
230
- if (imageBlock.source.type === 'base64') {
231
- return {
232
- type: 'input_image',
233
- image_url: `data:${imageBlock.mimeType};base64,${imageBlock.source.data}`,
234
- };
235
- }
236
-
237
- if (imageBlock.source.type === 'url') {
238
- return {
239
- type: 'input_image',
240
- image_url: imageBlock.source.url,
241
- };
242
- }
243
-
244
- if (imageBlock.source.type === 'bytes') {
245
- // Convert bytes to base64
246
- const base64 = btoa(
247
- Array.from(imageBlock.source.data)
248
- .map((b) => String.fromCharCode(b))
249
- .join('')
250
- );
251
- return {
252
- type: 'input_image',
253
- image_url: `data:${imageBlock.mimeType};base64,${base64}`,
254
- };
255
- }
256
-
257
- throw new Error('Unknown image source type');
258
- }
259
-
260
- default:
261
- throw new Error(`Unsupported content type: ${block.type}`);
262
- }
263
- }
264
-
265
- /**
266
- * Transform a UPP Tool to Responses API format
267
- */
268
- function transformTool(tool: Tool): OpenAIResponsesTool {
269
- return {
270
- type: 'function',
271
- name: tool.name,
272
- description: tool.description,
273
- parameters: {
274
- type: 'object',
275
- properties: tool.parameters.properties,
276
- required: tool.parameters.required,
277
- ...(tool.parameters.additionalProperties !== undefined
278
- ? { additionalProperties: tool.parameters.additionalProperties }
279
- : {}),
280
- },
281
- };
282
- }
283
-
284
- /**
285
- * Transform OpenAI Responses API response to UPP LLMResponse
286
- */
287
- export function transformResponse(data: OpenAIResponsesResponse): LLMResponse {
288
- // Extract content and tool calls from output items
289
- const content: AssistantContent[] = [];
290
- const toolCalls: ToolCall[] = [];
291
- const functionCallItems: Array<{
292
- id: string;
293
- call_id: string;
294
- name: string;
295
- arguments: string;
296
- }> = [];
297
- let hadRefusal = false;
298
- let structuredData: unknown;
299
-
300
- for (const item of data.output) {
301
- if (item.type === 'message') {
302
- const messageItem = item as OpenAIResponsesMessageOutput;
303
- for (const part of messageItem.content) {
304
- if (part.type === 'output_text') {
305
- content.push({ type: 'text', text: part.text });
306
- // Try to parse as JSON for structured output (native JSON mode)
307
- if (structuredData === undefined) {
308
- try {
309
- structuredData = JSON.parse(part.text);
310
- } catch {
311
- // Not valid JSON - that's fine, might not be structured output
312
- }
313
- }
314
- } else if (part.type === 'refusal') {
315
- content.push({ type: 'text', text: part.refusal });
316
- hadRefusal = true;
317
- }
318
- }
319
- } else if (item.type === 'function_call') {
320
- const functionCall = item as OpenAIResponsesFunctionCallOutput;
321
- let args: Record<string, unknown> = {};
322
- try {
323
- args = JSON.parse(functionCall.arguments);
324
- } catch {
325
- // Invalid JSON - use empty object
326
- }
327
- toolCalls.push({
328
- toolCallId: functionCall.call_id,
329
- toolName: functionCall.name,
330
- arguments: args,
331
- });
332
- functionCallItems.push({
333
- id: functionCall.id,
334
- call_id: functionCall.call_id,
335
- name: functionCall.name,
336
- arguments: functionCall.arguments,
337
- });
338
- } else if (item.type === 'image_generation_call') {
339
- const imageGen = item as OpenAIResponsesImageGenerationOutput;
340
- if (imageGen.result) {
341
- content.push({
342
- type: 'image',
343
- mimeType: 'image/png',
344
- source: { type: 'base64', data: imageGen.result },
345
- } as ImageBlock);
346
- }
347
- }
348
- }
349
-
350
- const message = new AssistantMessage(
351
- content,
352
- toolCalls.length > 0 ? toolCalls : undefined,
353
- {
354
- id: data.id,
355
- metadata: {
356
- openai: {
357
- model: data.model,
358
- status: data.status,
359
- response_id: data.id,
360
- functionCallItems:
361
- functionCallItems.length > 0 ? functionCallItems : undefined,
362
- },
363
- },
364
- }
365
- );
366
-
367
- const usage: TokenUsage = {
368
- inputTokens: data.usage.input_tokens,
369
- outputTokens: data.usage.output_tokens,
370
- totalTokens: data.usage.total_tokens,
371
- };
372
-
373
- // Map status to stop reason
374
- let stopReason = 'end_turn';
375
- if (data.status === 'completed') {
376
- stopReason = toolCalls.length > 0 ? 'tool_use' : 'end_turn';
377
- } else if (data.status === 'incomplete') {
378
- stopReason = data.incomplete_details?.reason === 'max_output_tokens'
379
- ? 'max_tokens'
380
- : 'end_turn';
381
- } else if (data.status === 'failed') {
382
- stopReason = 'error';
383
- }
384
- if (hadRefusal && stopReason !== 'error') {
385
- stopReason = 'content_filter';
386
- }
387
-
388
- return {
389
- message,
390
- usage,
391
- stopReason,
392
- data: structuredData,
393
- };
394
- }
395
-
396
- /**
397
- * State for accumulating streaming response
398
- */
399
- export interface ResponsesStreamState {
400
- id: string;
401
- model: string;
402
- textByIndex: Map<number, string>;
403
- toolCalls: Map<
404
- number,
405
- { itemId?: string; callId?: string; name?: string; arguments: string }
406
- >;
407
- images: string[]; // Base64 image data from image_generation_call outputs
408
- status: string;
409
- inputTokens: number;
410
- outputTokens: number;
411
- hadRefusal: boolean;
412
- }
413
-
414
- /**
415
- * Create initial stream state
416
- */
417
- export function createStreamState(): ResponsesStreamState {
418
- return {
419
- id: '',
420
- model: '',
421
- textByIndex: new Map(),
422
- toolCalls: new Map(),
423
- images: [],
424
- status: 'in_progress',
425
- inputTokens: 0,
426
- outputTokens: 0,
427
- hadRefusal: false,
428
- };
429
- }
430
-
431
- /**
432
- * Transform OpenAI Responses API stream event to UPP StreamEvent
433
- * Returns array since one event may produce multiple UPP events
434
- */
435
- export function transformStreamEvent(
436
- event: OpenAIResponsesStreamEvent,
437
- state: ResponsesStreamState
438
- ): StreamEvent[] {
439
- const events: StreamEvent[] = [];
440
-
441
- switch (event.type) {
442
- case 'response.created':
443
- state.id = event.response.id;
444
- state.model = event.response.model;
445
- events.push({ type: 'message_start', index: 0, delta: {} });
446
- break;
447
-
448
- case 'response.in_progress':
449
- state.status = 'in_progress';
450
- break;
451
-
452
- case 'response.completed':
453
- state.status = 'completed';
454
- if (event.response.usage) {
455
- state.inputTokens = event.response.usage.input_tokens;
456
- state.outputTokens = event.response.usage.output_tokens;
457
- }
458
- events.push({ type: 'message_stop', index: 0, delta: {} });
459
- break;
460
-
461
- case 'response.failed':
462
- state.status = 'failed';
463
- events.push({ type: 'message_stop', index: 0, delta: {} });
464
- break;
465
-
466
- case 'response.output_item.added':
467
- if (event.item.type === 'function_call') {
468
- const functionCall = event.item as OpenAIResponsesFunctionCallOutput;
469
- const existing = state.toolCalls.get(event.output_index) ?? {
470
- arguments: '',
471
- };
472
- existing.itemId = functionCall.id;
473
- existing.callId = functionCall.call_id;
474
- existing.name = functionCall.name;
475
- if (functionCall.arguments) {
476
- existing.arguments = functionCall.arguments;
477
- }
478
- state.toolCalls.set(event.output_index, existing);
479
- }
480
- events.push({
481
- type: 'content_block_start',
482
- index: event.output_index,
483
- delta: {},
484
- });
485
- break;
486
-
487
- case 'response.output_item.done':
488
- if (event.item.type === 'function_call') {
489
- const functionCall = event.item as OpenAIResponsesFunctionCallOutput;
490
- const existing = state.toolCalls.get(event.output_index) ?? {
491
- arguments: '',
492
- };
493
- existing.itemId = functionCall.id;
494
- existing.callId = functionCall.call_id;
495
- existing.name = functionCall.name;
496
- if (functionCall.arguments) {
497
- existing.arguments = functionCall.arguments;
498
- }
499
- state.toolCalls.set(event.output_index, existing);
500
- } else if (event.item.type === 'image_generation_call') {
501
- const imageGen = event.item as OpenAIResponsesImageGenerationOutput;
502
- if (imageGen.result) {
503
- state.images.push(imageGen.result);
504
- }
505
- }
506
- events.push({
507
- type: 'content_block_stop',
508
- index: event.output_index,
509
- delta: {},
510
- });
511
- break;
512
-
513
- case 'response.output_text.delta':
514
- // Accumulate text
515
- const currentText = state.textByIndex.get(event.output_index) ?? '';
516
- state.textByIndex.set(event.output_index, currentText + event.delta);
517
- events.push({
518
- type: 'text_delta',
519
- index: event.output_index,
520
- delta: { text: event.delta },
521
- });
522
- break;
523
-
524
- case 'response.output_text.done':
525
- state.textByIndex.set(event.output_index, event.text);
526
- break;
527
-
528
- case 'response.refusal.delta': {
529
- state.hadRefusal = true;
530
- const currentRefusal = state.textByIndex.get(event.output_index) ?? '';
531
- state.textByIndex.set(event.output_index, currentRefusal + event.delta);
532
- events.push({
533
- type: 'text_delta',
534
- index: event.output_index,
535
- delta: { text: event.delta },
536
- });
537
- break;
538
- }
539
-
540
- case 'response.refusal.done':
541
- state.hadRefusal = true;
542
- state.textByIndex.set(event.output_index, event.refusal);
543
- break;
544
-
545
- case 'response.function_call_arguments.delta': {
546
- // Accumulate function call arguments
547
- let toolCall = state.toolCalls.get(event.output_index);
548
- if (!toolCall) {
549
- toolCall = { arguments: '' };
550
- state.toolCalls.set(event.output_index, toolCall);
551
- }
552
- if (event.item_id && !toolCall.itemId) {
553
- toolCall.itemId = event.item_id;
554
- }
555
- if (event.call_id && !toolCall.callId) {
556
- toolCall.callId = event.call_id;
557
- }
558
- toolCall.arguments += event.delta;
559
- events.push({
560
- type: 'tool_call_delta',
561
- index: event.output_index,
562
- delta: {
563
- toolCallId: toolCall.callId ?? toolCall.itemId ?? '',
564
- toolName: toolCall.name,
565
- argumentsJson: event.delta,
566
- },
567
- });
568
- break;
569
- }
570
-
571
- case 'response.function_call_arguments.done': {
572
- // Finalize function call
573
- let toolCall = state.toolCalls.get(event.output_index);
574
- if (!toolCall) {
575
- toolCall = { arguments: '' };
576
- state.toolCalls.set(event.output_index, toolCall);
577
- }
578
- if (event.item_id) {
579
- toolCall.itemId = event.item_id;
580
- }
581
- if (event.call_id) {
582
- toolCall.callId = event.call_id;
583
- }
584
- toolCall.name = event.name;
585
- toolCall.arguments = event.arguments;
586
- break;
587
- }
588
-
589
- case 'error':
590
- // Error events are handled at the handler level
591
- break;
592
-
593
- default:
594
- // Ignore other events
595
- break;
596
- }
597
-
598
- return events;
599
- }
600
-
601
- /**
602
- * Build LLMResponse from accumulated stream state
603
- */
604
- export function buildResponseFromState(state: ResponsesStreamState): LLMResponse {
605
- const content: AssistantContent[] = [];
606
- let structuredData: unknown;
607
-
608
- // Combine all text content
609
- for (const [, text] of state.textByIndex) {
610
- if (text) {
611
- content.push({ type: 'text', text });
612
- // Try to parse as JSON for structured output (native JSON mode)
613
- if (structuredData === undefined) {
614
- try {
615
- structuredData = JSON.parse(text);
616
- } catch {
617
- // Not valid JSON - that's fine, might not be structured output
618
- }
619
- }
620
- }
621
- }
622
-
623
- // Add any generated images
624
- for (const imageData of state.images) {
625
- content.push({
626
- type: 'image',
627
- mimeType: 'image/png',
628
- source: { type: 'base64', data: imageData },
629
- } as ImageBlock);
630
- }
631
-
632
- const toolCalls: ToolCall[] = [];
633
- const functionCallItems: Array<{
634
- id: string;
635
- call_id: string;
636
- name: string;
637
- arguments: string;
638
- }> = [];
639
- for (const [, toolCall] of state.toolCalls) {
640
- let args: Record<string, unknown> = {};
641
- if (toolCall.arguments) {
642
- try {
643
- args = JSON.parse(toolCall.arguments);
644
- } catch {
645
- // Invalid JSON - use empty object
646
- }
647
- }
648
- const itemId = toolCall.itemId ?? '';
649
- const callId = toolCall.callId ?? toolCall.itemId ?? '';
650
- const name = toolCall.name ?? '';
651
- toolCalls.push({
652
- toolCallId: callId,
653
- toolName: name,
654
- arguments: args,
655
- });
656
-
657
- if (itemId && callId && name) {
658
- functionCallItems.push({
659
- id: itemId,
660
- call_id: callId,
661
- name,
662
- arguments: toolCall.arguments,
663
- });
664
- }
665
- }
666
-
667
- const message = new AssistantMessage(
668
- content,
669
- toolCalls.length > 0 ? toolCalls : undefined,
670
- {
671
- id: state.id,
672
- metadata: {
673
- openai: {
674
- model: state.model,
675
- status: state.status,
676
- // Store response_id for multi-turn tool calling
677
- response_id: state.id,
678
- functionCallItems:
679
- functionCallItems.length > 0 ? functionCallItems : undefined,
680
- },
681
- },
682
- }
683
- );
684
-
685
- const usage: TokenUsage = {
686
- inputTokens: state.inputTokens,
687
- outputTokens: state.outputTokens,
688
- totalTokens: state.inputTokens + state.outputTokens,
689
- };
690
-
691
- // Map status to stop reason
692
- let stopReason = 'end_turn';
693
- if (state.status === 'completed') {
694
- stopReason = toolCalls.length > 0 ? 'tool_use' : 'end_turn';
695
- } else if (state.status === 'failed') {
696
- stopReason = 'error';
697
- }
698
- if (state.hadRefusal && stopReason !== 'error') {
699
- stopReason = 'content_filter';
700
- }
701
-
702
- return {
703
- message,
704
- usage,
705
- stopReason,
706
- data: structuredData,
707
- };
708
- }