@providerprotocol/ai 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +84 -0
  3. package/dist/anthropic/index.d.ts +41 -0
  4. package/dist/anthropic/index.js +500 -0
  5. package/dist/anthropic/index.js.map +1 -0
  6. package/dist/chunk-CUCRF5W6.js +136 -0
  7. package/dist/chunk-CUCRF5W6.js.map +1 -0
  8. package/dist/chunk-FTFX2VET.js +424 -0
  9. package/dist/chunk-FTFX2VET.js.map +1 -0
  10. package/dist/chunk-QUUX4G7U.js +117 -0
  11. package/dist/chunk-QUUX4G7U.js.map +1 -0
  12. package/dist/chunk-Y6Q7JCNP.js +39 -0
  13. package/dist/chunk-Y6Q7JCNP.js.map +1 -0
  14. package/dist/google/index.d.ts +69 -0
  15. package/dist/google/index.js +517 -0
  16. package/dist/google/index.js.map +1 -0
  17. package/dist/http/index.d.ts +61 -0
  18. package/dist/http/index.js +43 -0
  19. package/dist/http/index.js.map +1 -0
  20. package/dist/index.d.ts +792 -0
  21. package/dist/index.js +898 -0
  22. package/dist/index.js.map +1 -0
  23. package/dist/openai/index.d.ts +204 -0
  24. package/dist/openai/index.js +1340 -0
  25. package/dist/openai/index.js.map +1 -0
  26. package/dist/provider-CUJWjgNl.d.ts +192 -0
  27. package/dist/retry-I2661_rv.d.ts +118 -0
  28. package/package.json +88 -0
  29. package/src/anthropic/index.ts +3 -0
  30. package/src/core/image.ts +188 -0
  31. package/src/core/llm.ts +619 -0
  32. package/src/core/provider.ts +92 -0
  33. package/src/google/index.ts +3 -0
  34. package/src/http/errors.ts +112 -0
  35. package/src/http/fetch.ts +210 -0
  36. package/src/http/index.ts +31 -0
  37. package/src/http/keys.ts +136 -0
  38. package/src/http/retry.ts +205 -0
  39. package/src/http/sse.ts +136 -0
  40. package/src/index.ts +32 -0
  41. package/src/openai/index.ts +9 -0
  42. package/src/providers/anthropic/index.ts +17 -0
  43. package/src/providers/anthropic/llm.ts +196 -0
  44. package/src/providers/anthropic/transform.ts +452 -0
  45. package/src/providers/anthropic/types.ts +213 -0
  46. package/src/providers/google/index.ts +17 -0
  47. package/src/providers/google/llm.ts +203 -0
  48. package/src/providers/google/transform.ts +487 -0
  49. package/src/providers/google/types.ts +214 -0
  50. package/src/providers/openai/index.ts +151 -0
  51. package/src/providers/openai/llm.completions.ts +201 -0
  52. package/src/providers/openai/llm.responses.ts +211 -0
  53. package/src/providers/openai/transform.completions.ts +628 -0
  54. package/src/providers/openai/transform.responses.ts +718 -0
  55. package/src/providers/openai/types.ts +711 -0
  56. package/src/types/content.ts +133 -0
  57. package/src/types/errors.ts +85 -0
  58. package/src/types/index.ts +105 -0
  59. package/src/types/llm.ts +211 -0
  60. package/src/types/messages.ts +182 -0
  61. package/src/types/provider.ts +195 -0
  62. package/src/types/schema.ts +58 -0
  63. package/src/types/stream.ts +146 -0
  64. package/src/types/thread.ts +226 -0
  65. package/src/types/tool.ts +88 -0
  66. package/src/types/turn.ts +118 -0
  67. package/src/utils/id.ts +28 -0
@@ -0,0 +1,718 @@
1
+ import type { LLMRequest, LLMResponse } from '../../types/llm.ts';
2
+ import type { Message } from '../../types/messages.ts';
3
+ import type { StreamEvent } from '../../types/stream.ts';
4
+ import type { Tool, ToolCall } from '../../types/tool.ts';
5
+ import type { TokenUsage } from '../../types/turn.ts';
6
+ import type { ContentBlock, TextBlock, ImageBlock } from '../../types/content.ts';
7
+ import {
8
+ AssistantMessage,
9
+ isUserMessage,
10
+ isAssistantMessage,
11
+ isToolResultMessage,
12
+ } from '../../types/messages.ts';
13
+ import type {
14
+ OpenAILLMParams,
15
+ OpenAIResponsesRequest,
16
+ OpenAIResponsesInputItem,
17
+ OpenAIResponsesContentPart,
18
+ OpenAIResponsesTool,
19
+ OpenAIResponsesResponse,
20
+ OpenAIResponsesStreamEvent,
21
+ OpenAIResponsesOutputItem,
22
+ OpenAIResponsesMessageOutput,
23
+ OpenAIResponsesFunctionCallOutput,
24
+ } from './types.ts';
25
+
26
+ /**
27
+ * Transform UPP request to OpenAI Responses API format
28
+ */
29
+ export function transformRequest<TParams extends OpenAILLMParams>(
30
+ request: LLMRequest<TParams>,
31
+ modelId: string
32
+ ): OpenAIResponsesRequest {
33
+ const params: OpenAILLMParams = request.params ?? {};
34
+
35
+ const openaiRequest: OpenAIResponsesRequest = {
36
+ model: modelId,
37
+ input: transformInputItems(request.messages, request.system),
38
+ };
39
+
40
+ // Model parameters
41
+ if (params.temperature !== undefined) {
42
+ openaiRequest.temperature = params.temperature;
43
+ }
44
+ if (params.top_p !== undefined) {
45
+ openaiRequest.top_p = params.top_p;
46
+ }
47
+ if (params.max_output_tokens !== undefined) {
48
+ openaiRequest.max_output_tokens = params.max_output_tokens;
49
+ } else if (params.max_completion_tokens !== undefined) {
50
+ openaiRequest.max_output_tokens = params.max_completion_tokens;
51
+ } else if (params.max_tokens !== undefined) {
52
+ openaiRequest.max_output_tokens = params.max_tokens;
53
+ }
54
+ if (params.service_tier !== undefined) {
55
+ openaiRequest.service_tier = params.service_tier;
56
+ }
57
+ if (params.store !== undefined) {
58
+ openaiRequest.store = params.store;
59
+ }
60
+ if (params.metadata !== undefined) {
61
+ openaiRequest.metadata = params.metadata;
62
+ }
63
+ if (params.truncation !== undefined) {
64
+ openaiRequest.truncation = params.truncation;
65
+ }
66
+ if (params.include !== undefined) {
67
+ openaiRequest.include = params.include;
68
+ }
69
+ if (params.background !== undefined) {
70
+ openaiRequest.background = params.background;
71
+ }
72
+ if (params.previous_response_id !== undefined) {
73
+ openaiRequest.previous_response_id = params.previous_response_id;
74
+ }
75
+ if (params.reasoning !== undefined) {
76
+ openaiRequest.reasoning = { ...params.reasoning };
77
+ }
78
+ if (params.reasoning_effort !== undefined) {
79
+ openaiRequest.reasoning = {
80
+ ...(openaiRequest.reasoning ?? {}),
81
+ effort: params.reasoning_effort,
82
+ };
83
+ }
84
+
85
+ // Tools
86
+ if (request.tools && request.tools.length > 0) {
87
+ openaiRequest.tools = request.tools.map(transformTool);
88
+ if (params.parallel_tool_calls !== undefined) {
89
+ openaiRequest.parallel_tool_calls = params.parallel_tool_calls;
90
+ }
91
+ }
92
+
93
+ // Structured output via text.format
94
+ if (request.structure) {
95
+ const schema: Record<string, unknown> = {
96
+ type: 'object',
97
+ properties: request.structure.properties,
98
+ required: request.structure.required,
99
+ ...(request.structure.additionalProperties !== undefined
100
+ ? { additionalProperties: request.structure.additionalProperties }
101
+ : { additionalProperties: false }),
102
+ };
103
+ if (request.structure.description) {
104
+ schema.description = request.structure.description;
105
+ }
106
+
107
+ openaiRequest.text = {
108
+ format: {
109
+ type: 'json_schema',
110
+ name: 'json_response',
111
+ description: request.structure.description,
112
+ schema,
113
+ strict: true,
114
+ },
115
+ };
116
+ }
117
+
118
+ return openaiRequest;
119
+ }
120
+
121
+ /**
122
+ * Transform messages to Responses API input items
123
+ */
124
+ function transformInputItems(
125
+ messages: Message[],
126
+ system?: string
127
+ ): OpenAIResponsesInputItem[] | string {
128
+ const result: OpenAIResponsesInputItem[] = [];
129
+
130
+ if (system) {
131
+ result.push({
132
+ type: 'message',
133
+ role: 'system',
134
+ content: system,
135
+ });
136
+ }
137
+
138
+ for (const message of messages) {
139
+ const items = transformMessage(message);
140
+ result.push(...items);
141
+ }
142
+
143
+ // If there's only one user message with simple text, return as string
144
+ if (result.length === 1 && result[0]?.type === 'message') {
145
+ const item = result[0] as { role?: string; content?: string | unknown[] };
146
+ if (item.role === 'user' && typeof item.content === 'string') {
147
+ return item.content;
148
+ }
149
+ }
150
+
151
+ return result;
152
+ }
153
+
154
+ /**
155
+ * Filter to only valid content blocks with a type property
156
+ */
157
+ function filterValidContent<T extends { type?: string }>(content: T[]): T[] {
158
+ return content.filter((c) => c && typeof c.type === 'string');
159
+ }
160
+
161
+ /**
162
+ * Transform a UPP Message to OpenAI Responses API input items
163
+ */
164
+ function transformMessage(message: Message): OpenAIResponsesInputItem[] {
165
+ if (isUserMessage(message)) {
166
+ const validContent = filterValidContent(message.content);
167
+ // Check if we can use simple string content
168
+ if (validContent.length === 1 && validContent[0]?.type === 'text') {
169
+ return [
170
+ {
171
+ type: 'message',
172
+ role: 'user',
173
+ content: (validContent[0] as TextBlock).text,
174
+ },
175
+ ];
176
+ }
177
+ return [
178
+ {
179
+ type: 'message',
180
+ role: 'user',
181
+ content: validContent.map(transformContentPart),
182
+ },
183
+ ];
184
+ }
185
+
186
+ if (isAssistantMessage(message)) {
187
+ const validContent = filterValidContent(message.content);
188
+ const items: OpenAIResponsesInputItem[] = [];
189
+
190
+ // Add message content - only text parts for assistant messages
191
+ const contentParts: OpenAIResponsesContentPart[] = validContent
192
+ .filter((c): c is TextBlock => c.type === 'text')
193
+ .map((c): OpenAIResponsesContentPart => ({
194
+ type: 'output_text',
195
+ text: c.text,
196
+ }));
197
+
198
+ // Add assistant message if we have text content
199
+ if (contentParts.length > 0) {
200
+ items.push({
201
+ type: 'message',
202
+ role: 'assistant',
203
+ content: contentParts,
204
+ });
205
+ }
206
+
207
+ // Add function_call items for each tool call (must precede function_call_output)
208
+ const openaiMeta = message.metadata?.openai as
209
+ | { functionCallItems?: Array<{ id: string; call_id: string; name: string; arguments: string }> }
210
+ | undefined;
211
+ const functionCallItems = openaiMeta?.functionCallItems;
212
+
213
+ if (functionCallItems && functionCallItems.length > 0) {
214
+ for (const fc of functionCallItems) {
215
+ items.push({
216
+ type: 'function_call',
217
+ id: fc.id,
218
+ call_id: fc.call_id,
219
+ name: fc.name,
220
+ arguments: fc.arguments,
221
+ });
222
+ }
223
+ } else if (message.toolCalls && message.toolCalls.length > 0) {
224
+ for (const call of message.toolCalls) {
225
+ items.push({
226
+ type: 'function_call',
227
+ id: `fc_${call.toolCallId}`,
228
+ call_id: call.toolCallId,
229
+ name: call.toolName,
230
+ arguments: JSON.stringify(call.arguments),
231
+ });
232
+ }
233
+ }
234
+
235
+ return items;
236
+ }
237
+
238
+ if (isToolResultMessage(message)) {
239
+ // Tool results are function_call_output items
240
+ return message.results.map((result) => ({
241
+ type: 'function_call_output' as const,
242
+ call_id: result.toolCallId,
243
+ output:
244
+ typeof result.result === 'string'
245
+ ? result.result
246
+ : JSON.stringify(result.result),
247
+ }));
248
+ }
249
+
250
+ return [];
251
+ }
252
+
253
+ /**
254
+ * Transform a content block to Responses API format
255
+ */
256
+ function transformContentPart(block: ContentBlock): OpenAIResponsesContentPart {
257
+ switch (block.type) {
258
+ case 'text':
259
+ return { type: 'input_text', text: block.text };
260
+
261
+ case 'image': {
262
+ const imageBlock = block as ImageBlock;
263
+ if (imageBlock.source.type === 'base64') {
264
+ return {
265
+ type: 'input_image',
266
+ image_url: `data:${imageBlock.mimeType};base64,${imageBlock.source.data}`,
267
+ };
268
+ }
269
+
270
+ if (imageBlock.source.type === 'url') {
271
+ return {
272
+ type: 'input_image',
273
+ image_url: imageBlock.source.url,
274
+ };
275
+ }
276
+
277
+ if (imageBlock.source.type === 'bytes') {
278
+ // Convert bytes to base64
279
+ const base64 = btoa(
280
+ Array.from(imageBlock.source.data)
281
+ .map((b) => String.fromCharCode(b))
282
+ .join('')
283
+ );
284
+ return {
285
+ type: 'input_image',
286
+ image_url: `data:${imageBlock.mimeType};base64,${base64}`,
287
+ };
288
+ }
289
+
290
+ throw new Error('Unknown image source type');
291
+ }
292
+
293
+ default:
294
+ throw new Error(`Unsupported content type: ${block.type}`);
295
+ }
296
+ }
297
+
298
+ /**
299
+ * Transform a UPP Tool to Responses API format
300
+ */
301
+ function transformTool(tool: Tool): OpenAIResponsesTool {
302
+ return {
303
+ type: 'function',
304
+ name: tool.name,
305
+ description: tool.description,
306
+ parameters: {
307
+ type: 'object',
308
+ properties: tool.parameters.properties,
309
+ required: tool.parameters.required,
310
+ ...(tool.parameters.additionalProperties !== undefined
311
+ ? { additionalProperties: tool.parameters.additionalProperties }
312
+ : {}),
313
+ },
314
+ };
315
+ }
316
+
317
+ /**
318
+ * Transform OpenAI Responses API response to UPP LLMResponse
319
+ */
320
+ export function transformResponse(data: OpenAIResponsesResponse): LLMResponse {
321
+ // Extract text content and tool calls from output items
322
+ const textContent: TextBlock[] = [];
323
+ const toolCalls: ToolCall[] = [];
324
+ const functionCallItems: Array<{
325
+ id: string;
326
+ call_id: string;
327
+ name: string;
328
+ arguments: string;
329
+ }> = [];
330
+ let hadRefusal = false;
331
+ let structuredData: unknown;
332
+
333
+ for (const item of data.output) {
334
+ if (item.type === 'message') {
335
+ const messageItem = item as OpenAIResponsesMessageOutput;
336
+ for (const content of messageItem.content) {
337
+ if (content.type === 'output_text') {
338
+ textContent.push({ type: 'text', text: content.text });
339
+ // Try to parse as JSON for structured output (native JSON mode)
340
+ // Only set data if text is valid JSON
341
+ if (structuredData === undefined) {
342
+ try {
343
+ structuredData = JSON.parse(content.text);
344
+ } catch {
345
+ // Not valid JSON - that's fine, might not be structured output
346
+ }
347
+ }
348
+ } else if (content.type === 'refusal') {
349
+ textContent.push({ type: 'text', text: content.refusal });
350
+ hadRefusal = true;
351
+ }
352
+ }
353
+ } else if (item.type === 'function_call') {
354
+ const functionCall = item as OpenAIResponsesFunctionCallOutput;
355
+ let args: Record<string, unknown> = {};
356
+ try {
357
+ args = JSON.parse(functionCall.arguments);
358
+ } catch {
359
+ // Invalid JSON - use empty object
360
+ }
361
+ toolCalls.push({
362
+ toolCallId: functionCall.call_id,
363
+ toolName: functionCall.name,
364
+ arguments: args,
365
+ });
366
+ functionCallItems.push({
367
+ id: functionCall.id,
368
+ call_id: functionCall.call_id,
369
+ name: functionCall.name,
370
+ arguments: functionCall.arguments,
371
+ });
372
+ }
373
+ }
374
+
375
+ const message = new AssistantMessage(
376
+ textContent,
377
+ toolCalls.length > 0 ? toolCalls : undefined,
378
+ {
379
+ id: data.id,
380
+ metadata: {
381
+ openai: {
382
+ model: data.model,
383
+ status: data.status,
384
+ // Store response_id for multi-turn tool calling
385
+ response_id: data.id,
386
+ functionCallItems:
387
+ functionCallItems.length > 0 ? functionCallItems : undefined,
388
+ },
389
+ },
390
+ }
391
+ );
392
+
393
+ const usage: TokenUsage = {
394
+ inputTokens: data.usage.input_tokens,
395
+ outputTokens: data.usage.output_tokens,
396
+ totalTokens: data.usage.total_tokens,
397
+ };
398
+
399
+ // Map status to stop reason
400
+ let stopReason = 'end_turn';
401
+ if (data.status === 'completed') {
402
+ stopReason = toolCalls.length > 0 ? 'tool_use' : 'end_turn';
403
+ } else if (data.status === 'incomplete') {
404
+ stopReason = data.incomplete_details?.reason === 'max_output_tokens'
405
+ ? 'max_tokens'
406
+ : 'end_turn';
407
+ } else if (data.status === 'failed') {
408
+ stopReason = 'error';
409
+ }
410
+ if (hadRefusal && stopReason !== 'error') {
411
+ stopReason = 'content_filter';
412
+ }
413
+
414
+ return {
415
+ message,
416
+ usage,
417
+ stopReason,
418
+ data: structuredData,
419
+ };
420
+ }
421
+
422
+ /**
423
+ * State for accumulating streaming response
424
+ */
425
+ export interface ResponsesStreamState {
426
+ id: string;
427
+ model: string;
428
+ textByIndex: Map<number, string>;
429
+ toolCalls: Map<
430
+ number,
431
+ { itemId?: string; callId?: string; name?: string; arguments: string }
432
+ >;
433
+ status: string;
434
+ inputTokens: number;
435
+ outputTokens: number;
436
+ hadRefusal: boolean;
437
+ }
438
+
439
+ /**
440
+ * Create initial stream state
441
+ */
442
+ export function createStreamState(): ResponsesStreamState {
443
+ return {
444
+ id: '',
445
+ model: '',
446
+ textByIndex: new Map(),
447
+ toolCalls: new Map(),
448
+ status: 'in_progress',
449
+ inputTokens: 0,
450
+ outputTokens: 0,
451
+ hadRefusal: false,
452
+ };
453
+ }
454
+
455
+ /**
456
+ * Transform OpenAI Responses API stream event to UPP StreamEvent
457
+ * Returns array since one event may produce multiple UPP events
458
+ */
459
+ export function transformStreamEvent(
460
+ event: OpenAIResponsesStreamEvent,
461
+ state: ResponsesStreamState
462
+ ): StreamEvent[] {
463
+ const events: StreamEvent[] = [];
464
+
465
+ switch (event.type) {
466
+ case 'response.created':
467
+ state.id = event.response.id;
468
+ state.model = event.response.model;
469
+ events.push({ type: 'message_start', index: 0, delta: {} });
470
+ break;
471
+
472
+ case 'response.in_progress':
473
+ state.status = 'in_progress';
474
+ break;
475
+
476
+ case 'response.completed':
477
+ state.status = 'completed';
478
+ if (event.response.usage) {
479
+ state.inputTokens = event.response.usage.input_tokens;
480
+ state.outputTokens = event.response.usage.output_tokens;
481
+ }
482
+ events.push({ type: 'message_stop', index: 0, delta: {} });
483
+ break;
484
+
485
+ case 'response.failed':
486
+ state.status = 'failed';
487
+ events.push({ type: 'message_stop', index: 0, delta: {} });
488
+ break;
489
+
490
+ case 'response.output_item.added':
491
+ if (event.item.type === 'function_call') {
492
+ const functionCall = event.item as OpenAIResponsesFunctionCallOutput;
493
+ const existing = state.toolCalls.get(event.output_index) ?? {
494
+ arguments: '',
495
+ };
496
+ existing.itemId = functionCall.id;
497
+ existing.callId = functionCall.call_id;
498
+ existing.name = functionCall.name;
499
+ if (functionCall.arguments) {
500
+ existing.arguments = functionCall.arguments;
501
+ }
502
+ state.toolCalls.set(event.output_index, existing);
503
+ }
504
+ events.push({
505
+ type: 'content_block_start',
506
+ index: event.output_index,
507
+ delta: {},
508
+ });
509
+ break;
510
+
511
+ case 'response.output_item.done':
512
+ if (event.item.type === 'function_call') {
513
+ const functionCall = event.item as OpenAIResponsesFunctionCallOutput;
514
+ const existing = state.toolCalls.get(event.output_index) ?? {
515
+ arguments: '',
516
+ };
517
+ existing.itemId = functionCall.id;
518
+ existing.callId = functionCall.call_id;
519
+ existing.name = functionCall.name;
520
+ if (functionCall.arguments) {
521
+ existing.arguments = functionCall.arguments;
522
+ }
523
+ state.toolCalls.set(event.output_index, existing);
524
+ }
525
+ events.push({
526
+ type: 'content_block_stop',
527
+ index: event.output_index,
528
+ delta: {},
529
+ });
530
+ break;
531
+
532
+ case 'response.output_text.delta':
533
+ // Accumulate text
534
+ const currentText = state.textByIndex.get(event.output_index) ?? '';
535
+ state.textByIndex.set(event.output_index, currentText + event.delta);
536
+ events.push({
537
+ type: 'text_delta',
538
+ index: event.output_index,
539
+ delta: { text: event.delta },
540
+ });
541
+ break;
542
+
543
+ case 'response.output_text.done':
544
+ state.textByIndex.set(event.output_index, event.text);
545
+ break;
546
+
547
+ case 'response.refusal.delta': {
548
+ state.hadRefusal = true;
549
+ const currentRefusal = state.textByIndex.get(event.output_index) ?? '';
550
+ state.textByIndex.set(event.output_index, currentRefusal + event.delta);
551
+ events.push({
552
+ type: 'text_delta',
553
+ index: event.output_index,
554
+ delta: { text: event.delta },
555
+ });
556
+ break;
557
+ }
558
+
559
+ case 'response.refusal.done':
560
+ state.hadRefusal = true;
561
+ state.textByIndex.set(event.output_index, event.refusal);
562
+ break;
563
+
564
+ case 'response.function_call_arguments.delta': {
565
+ // Accumulate function call arguments
566
+ let toolCall = state.toolCalls.get(event.output_index);
567
+ if (!toolCall) {
568
+ toolCall = { arguments: '' };
569
+ state.toolCalls.set(event.output_index, toolCall);
570
+ }
571
+ if (event.item_id && !toolCall.itemId) {
572
+ toolCall.itemId = event.item_id;
573
+ }
574
+ if (event.call_id && !toolCall.callId) {
575
+ toolCall.callId = event.call_id;
576
+ }
577
+ toolCall.arguments += event.delta;
578
+ events.push({
579
+ type: 'tool_call_delta',
580
+ index: event.output_index,
581
+ delta: {
582
+ toolCallId: toolCall.callId ?? toolCall.itemId ?? '',
583
+ toolName: toolCall.name,
584
+ argumentsJson: event.delta,
585
+ },
586
+ });
587
+ break;
588
+ }
589
+
590
+ case 'response.function_call_arguments.done': {
591
+ // Finalize function call
592
+ let toolCall = state.toolCalls.get(event.output_index);
593
+ if (!toolCall) {
594
+ toolCall = { arguments: '' };
595
+ state.toolCalls.set(event.output_index, toolCall);
596
+ }
597
+ if (event.item_id) {
598
+ toolCall.itemId = event.item_id;
599
+ }
600
+ if (event.call_id) {
601
+ toolCall.callId = event.call_id;
602
+ }
603
+ toolCall.name = event.name;
604
+ toolCall.arguments = event.arguments;
605
+ break;
606
+ }
607
+
608
+ case 'error':
609
+ // Error events are handled at the handler level
610
+ break;
611
+
612
+ default:
613
+ // Ignore other events
614
+ break;
615
+ }
616
+
617
+ return events;
618
+ }
619
+
620
+ /**
621
+ * Build LLMResponse from accumulated stream state
622
+ */
623
+ export function buildResponseFromState(state: ResponsesStreamState): LLMResponse {
624
+ const textContent: TextBlock[] = [];
625
+ let structuredData: unknown;
626
+
627
+ // Combine all text content
628
+ for (const [, text] of state.textByIndex) {
629
+ if (text) {
630
+ textContent.push({ type: 'text', text });
631
+ // Try to parse as JSON for structured output (native JSON mode)
632
+ if (structuredData === undefined) {
633
+ try {
634
+ structuredData = JSON.parse(text);
635
+ } catch {
636
+ // Not valid JSON - that's fine, might not be structured output
637
+ }
638
+ }
639
+ }
640
+ }
641
+
642
+ const toolCalls: ToolCall[] = [];
643
+ const functionCallItems: Array<{
644
+ id: string;
645
+ call_id: string;
646
+ name: string;
647
+ arguments: string;
648
+ }> = [];
649
+ for (const [, toolCall] of state.toolCalls) {
650
+ let args: Record<string, unknown> = {};
651
+ if (toolCall.arguments) {
652
+ try {
653
+ args = JSON.parse(toolCall.arguments);
654
+ } catch {
655
+ // Invalid JSON - use empty object
656
+ }
657
+ }
658
+ const itemId = toolCall.itemId ?? '';
659
+ const callId = toolCall.callId ?? toolCall.itemId ?? '';
660
+ const name = toolCall.name ?? '';
661
+ toolCalls.push({
662
+ toolCallId: callId,
663
+ toolName: name,
664
+ arguments: args,
665
+ });
666
+
667
+ if (itemId && callId && name) {
668
+ functionCallItems.push({
669
+ id: itemId,
670
+ call_id: callId,
671
+ name,
672
+ arguments: toolCall.arguments,
673
+ });
674
+ }
675
+ }
676
+
677
+ const message = new AssistantMessage(
678
+ textContent,
679
+ toolCalls.length > 0 ? toolCalls : undefined,
680
+ {
681
+ id: state.id,
682
+ metadata: {
683
+ openai: {
684
+ model: state.model,
685
+ status: state.status,
686
+ // Store response_id for multi-turn tool calling
687
+ response_id: state.id,
688
+ functionCallItems:
689
+ functionCallItems.length > 0 ? functionCallItems : undefined,
690
+ },
691
+ },
692
+ }
693
+ );
694
+
695
+ const usage: TokenUsage = {
696
+ inputTokens: state.inputTokens,
697
+ outputTokens: state.outputTokens,
698
+ totalTokens: state.inputTokens + state.outputTokens,
699
+ };
700
+
701
+ // Map status to stop reason
702
+ let stopReason = 'end_turn';
703
+ if (state.status === 'completed') {
704
+ stopReason = toolCalls.length > 0 ? 'tool_use' : 'end_turn';
705
+ } else if (state.status === 'failed') {
706
+ stopReason = 'error';
707
+ }
708
+ if (state.hadRefusal && stopReason !== 'error') {
709
+ stopReason = 'content_filter';
710
+ }
711
+
712
+ return {
713
+ message,
714
+ usage,
715
+ stopReason,
716
+ data: structuredData,
717
+ };
718
+ }