@providerprotocol/ai 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +84 -0
  3. package/dist/anthropic/index.d.ts +41 -0
  4. package/dist/anthropic/index.js +500 -0
  5. package/dist/anthropic/index.js.map +1 -0
  6. package/dist/chunk-CUCRF5W6.js +136 -0
  7. package/dist/chunk-CUCRF5W6.js.map +1 -0
  8. package/dist/chunk-FTFX2VET.js +424 -0
  9. package/dist/chunk-FTFX2VET.js.map +1 -0
  10. package/dist/chunk-QUUX4G7U.js +117 -0
  11. package/dist/chunk-QUUX4G7U.js.map +1 -0
  12. package/dist/chunk-Y6Q7JCNP.js +39 -0
  13. package/dist/chunk-Y6Q7JCNP.js.map +1 -0
  14. package/dist/google/index.d.ts +69 -0
  15. package/dist/google/index.js +517 -0
  16. package/dist/google/index.js.map +1 -0
  17. package/dist/http/index.d.ts +61 -0
  18. package/dist/http/index.js +43 -0
  19. package/dist/http/index.js.map +1 -0
  20. package/dist/index.d.ts +792 -0
  21. package/dist/index.js +898 -0
  22. package/dist/index.js.map +1 -0
  23. package/dist/openai/index.d.ts +204 -0
  24. package/dist/openai/index.js +1340 -0
  25. package/dist/openai/index.js.map +1 -0
  26. package/dist/provider-CUJWjgNl.d.ts +192 -0
  27. package/dist/retry-I2661_rv.d.ts +118 -0
  28. package/package.json +88 -0
  29. package/src/anthropic/index.ts +3 -0
  30. package/src/core/image.ts +188 -0
  31. package/src/core/llm.ts +619 -0
  32. package/src/core/provider.ts +92 -0
  33. package/src/google/index.ts +3 -0
  34. package/src/http/errors.ts +112 -0
  35. package/src/http/fetch.ts +210 -0
  36. package/src/http/index.ts +31 -0
  37. package/src/http/keys.ts +136 -0
  38. package/src/http/retry.ts +205 -0
  39. package/src/http/sse.ts +136 -0
  40. package/src/index.ts +32 -0
  41. package/src/openai/index.ts +9 -0
  42. package/src/providers/anthropic/index.ts +17 -0
  43. package/src/providers/anthropic/llm.ts +196 -0
  44. package/src/providers/anthropic/transform.ts +452 -0
  45. package/src/providers/anthropic/types.ts +213 -0
  46. package/src/providers/google/index.ts +17 -0
  47. package/src/providers/google/llm.ts +203 -0
  48. package/src/providers/google/transform.ts +487 -0
  49. package/src/providers/google/types.ts +214 -0
  50. package/src/providers/openai/index.ts +151 -0
  51. package/src/providers/openai/llm.completions.ts +201 -0
  52. package/src/providers/openai/llm.responses.ts +211 -0
  53. package/src/providers/openai/transform.completions.ts +628 -0
  54. package/src/providers/openai/transform.responses.ts +718 -0
  55. package/src/providers/openai/types.ts +711 -0
  56. package/src/types/content.ts +133 -0
  57. package/src/types/errors.ts +85 -0
  58. package/src/types/index.ts +105 -0
  59. package/src/types/llm.ts +211 -0
  60. package/src/types/messages.ts +182 -0
  61. package/src/types/provider.ts +195 -0
  62. package/src/types/schema.ts +58 -0
  63. package/src/types/stream.ts +146 -0
  64. package/src/types/thread.ts +226 -0
  65. package/src/types/tool.ts +88 -0
  66. package/src/types/turn.ts +118 -0
  67. package/src/utils/id.ts +28 -0
@@ -0,0 +1,1340 @@
1
+ import {
2
+ AssistantMessage,
3
+ isAssistantMessage,
4
+ isToolResultMessage,
5
+ isUserMessage
6
+ } from "../chunk-QUUX4G7U.js";
7
+ import {
8
+ UPPError,
9
+ doFetch,
10
+ doStreamFetch,
11
+ normalizeHttpError,
12
+ parseSSEStream,
13
+ resolveApiKey
14
+ } from "../chunk-FTFX2VET.js";
15
+
16
+ // src/providers/openai/transform.completions.ts
17
+ function transformRequest(request, modelId) {
18
+ const params = request.params ?? {};
19
+ const openaiRequest = {
20
+ model: modelId,
21
+ messages: transformMessages(request.messages, request.system)
22
+ };
23
+ if (params.temperature !== void 0) {
24
+ openaiRequest.temperature = params.temperature;
25
+ }
26
+ if (params.top_p !== void 0) {
27
+ openaiRequest.top_p = params.top_p;
28
+ }
29
+ if (params.max_completion_tokens !== void 0) {
30
+ openaiRequest.max_completion_tokens = params.max_completion_tokens;
31
+ } else if (params.max_tokens !== void 0) {
32
+ openaiRequest.max_tokens = params.max_tokens;
33
+ }
34
+ if (params.frequency_penalty !== void 0) {
35
+ openaiRequest.frequency_penalty = params.frequency_penalty;
36
+ }
37
+ if (params.presence_penalty !== void 0) {
38
+ openaiRequest.presence_penalty = params.presence_penalty;
39
+ }
40
+ if (params.stop !== void 0) {
41
+ openaiRequest.stop = params.stop;
42
+ }
43
+ if (params.n !== void 0) {
44
+ openaiRequest.n = params.n;
45
+ }
46
+ if (params.logprobs !== void 0) {
47
+ openaiRequest.logprobs = params.logprobs;
48
+ }
49
+ if (params.top_logprobs !== void 0) {
50
+ openaiRequest.top_logprobs = params.top_logprobs;
51
+ }
52
+ if (params.seed !== void 0) {
53
+ openaiRequest.seed = params.seed;
54
+ }
55
+ if (params.user !== void 0) {
56
+ openaiRequest.user = params.user;
57
+ }
58
+ if (params.logit_bias !== void 0) {
59
+ openaiRequest.logit_bias = params.logit_bias;
60
+ }
61
+ if (params.reasoning_effort !== void 0) {
62
+ openaiRequest.reasoning_effort = params.reasoning_effort;
63
+ }
64
+ if (params.verbosity !== void 0) {
65
+ openaiRequest.verbosity = params.verbosity;
66
+ }
67
+ if (params.service_tier !== void 0) {
68
+ openaiRequest.service_tier = params.service_tier;
69
+ }
70
+ if (params.store !== void 0) {
71
+ openaiRequest.store = params.store;
72
+ }
73
+ if (params.metadata !== void 0) {
74
+ openaiRequest.metadata = params.metadata;
75
+ }
76
+ if (params.prediction !== void 0) {
77
+ openaiRequest.prediction = params.prediction;
78
+ }
79
+ if (params.prompt_cache_key !== void 0) {
80
+ openaiRequest.prompt_cache_key = params.prompt_cache_key;
81
+ }
82
+ if (params.prompt_cache_retention !== void 0) {
83
+ openaiRequest.prompt_cache_retention = params.prompt_cache_retention;
84
+ }
85
+ if (params.safety_identifier !== void 0) {
86
+ openaiRequest.safety_identifier = params.safety_identifier;
87
+ }
88
+ if (request.tools && request.tools.length > 0) {
89
+ openaiRequest.tools = request.tools.map(transformTool);
90
+ if (params.parallel_tool_calls !== void 0) {
91
+ openaiRequest.parallel_tool_calls = params.parallel_tool_calls;
92
+ }
93
+ }
94
+ if (request.structure) {
95
+ const schema = {
96
+ type: "object",
97
+ properties: request.structure.properties,
98
+ required: request.structure.required,
99
+ ...request.structure.additionalProperties !== void 0 ? { additionalProperties: request.structure.additionalProperties } : { additionalProperties: false }
100
+ };
101
+ if (request.structure.description) {
102
+ schema.description = request.structure.description;
103
+ }
104
+ openaiRequest.response_format = {
105
+ type: "json_schema",
106
+ json_schema: {
107
+ name: "json_response",
108
+ description: request.structure.description,
109
+ schema,
110
+ strict: true
111
+ }
112
+ };
113
+ } else if (params.response_format !== void 0) {
114
+ openaiRequest.response_format = params.response_format;
115
+ }
116
+ return openaiRequest;
117
+ }
118
+ function transformMessages(messages, system) {
119
+ const result = [];
120
+ if (system) {
121
+ result.push({
122
+ role: "system",
123
+ content: system
124
+ });
125
+ }
126
+ for (const message of messages) {
127
+ if (isToolResultMessage(message)) {
128
+ const toolMessages = transformToolResults(message);
129
+ result.push(...toolMessages);
130
+ } else {
131
+ const transformed = transformMessage(message);
132
+ if (transformed) {
133
+ result.push(transformed);
134
+ }
135
+ }
136
+ }
137
+ return result;
138
+ }
139
+ function filterValidContent(content) {
140
+ return content.filter((c) => c && typeof c.type === "string");
141
+ }
142
+ function transformMessage(message) {
143
+ if (isUserMessage(message)) {
144
+ const validContent = filterValidContent(message.content);
145
+ if (validContent.length === 1 && validContent[0]?.type === "text") {
146
+ return {
147
+ role: "user",
148
+ content: validContent[0].text
149
+ };
150
+ }
151
+ return {
152
+ role: "user",
153
+ content: validContent.map(transformContentBlock)
154
+ };
155
+ }
156
+ if (isAssistantMessage(message)) {
157
+ const validContent = filterValidContent(message.content);
158
+ const textContent = validContent.filter((c) => c.type === "text").map((c) => c.text).join("");
159
+ const assistantMessage = {
160
+ role: "assistant",
161
+ content: textContent || null
162
+ };
163
+ if (message.toolCalls && message.toolCalls.length > 0) {
164
+ assistantMessage.tool_calls = message.toolCalls.map((call) => ({
165
+ id: call.toolCallId,
166
+ type: "function",
167
+ function: {
168
+ name: call.toolName,
169
+ arguments: JSON.stringify(call.arguments)
170
+ }
171
+ }));
172
+ }
173
+ return assistantMessage;
174
+ }
175
+ if (isToolResultMessage(message)) {
176
+ const results = message.results.map((result) => ({
177
+ role: "tool",
178
+ tool_call_id: result.toolCallId,
179
+ content: typeof result.result === "string" ? result.result : JSON.stringify(result.result)
180
+ }));
181
+ return results[0] ?? null;
182
+ }
183
+ return null;
184
+ }
185
+ function transformToolResults(message) {
186
+ if (!isToolResultMessage(message)) {
187
+ const single = transformMessage(message);
188
+ return single ? [single] : [];
189
+ }
190
+ return message.results.map((result) => ({
191
+ role: "tool",
192
+ tool_call_id: result.toolCallId,
193
+ content: typeof result.result === "string" ? result.result : JSON.stringify(result.result)
194
+ }));
195
+ }
196
+ function transformContentBlock(block) {
197
+ switch (block.type) {
198
+ case "text":
199
+ return { type: "text", text: block.text };
200
+ case "image": {
201
+ const imageBlock = block;
202
+ let url;
203
+ if (imageBlock.source.type === "base64") {
204
+ url = `data:${imageBlock.mimeType};base64,${imageBlock.source.data}`;
205
+ } else if (imageBlock.source.type === "url") {
206
+ url = imageBlock.source.url;
207
+ } else if (imageBlock.source.type === "bytes") {
208
+ const base64 = btoa(
209
+ Array.from(imageBlock.source.data).map((b) => String.fromCharCode(b)).join("")
210
+ );
211
+ url = `data:${imageBlock.mimeType};base64,${base64}`;
212
+ } else {
213
+ throw new Error("Unknown image source type");
214
+ }
215
+ return {
216
+ type: "image_url",
217
+ image_url: { url }
218
+ };
219
+ }
220
+ default:
221
+ throw new Error(`Unsupported content type: ${block.type}`);
222
+ }
223
+ }
224
+ function transformTool(tool) {
225
+ return {
226
+ type: "function",
227
+ function: {
228
+ name: tool.name,
229
+ description: tool.description,
230
+ parameters: {
231
+ type: "object",
232
+ properties: tool.parameters.properties,
233
+ required: tool.parameters.required,
234
+ ...tool.parameters.additionalProperties !== void 0 ? { additionalProperties: tool.parameters.additionalProperties } : {}
235
+ }
236
+ }
237
+ };
238
+ }
239
+ function transformResponse(data) {
240
+ const choice = data.choices[0];
241
+ if (!choice) {
242
+ throw new Error("No choices in OpenAI response");
243
+ }
244
+ const textContent = [];
245
+ let structuredData;
246
+ if (choice.message.content) {
247
+ textContent.push({ type: "text", text: choice.message.content });
248
+ try {
249
+ structuredData = JSON.parse(choice.message.content);
250
+ } catch {
251
+ }
252
+ }
253
+ let hadRefusal = false;
254
+ if (choice.message.refusal) {
255
+ textContent.push({ type: "text", text: choice.message.refusal });
256
+ hadRefusal = true;
257
+ }
258
+ const toolCalls = [];
259
+ if (choice.message.tool_calls) {
260
+ for (const call of choice.message.tool_calls) {
261
+ let args = {};
262
+ try {
263
+ args = JSON.parse(call.function.arguments);
264
+ } catch {
265
+ }
266
+ toolCalls.push({
267
+ toolCallId: call.id,
268
+ toolName: call.function.name,
269
+ arguments: args
270
+ });
271
+ }
272
+ }
273
+ const message = new AssistantMessage(
274
+ textContent,
275
+ toolCalls.length > 0 ? toolCalls : void 0,
276
+ {
277
+ id: data.id,
278
+ metadata: {
279
+ openai: {
280
+ model: data.model,
281
+ finish_reason: choice.finish_reason,
282
+ system_fingerprint: data.system_fingerprint,
283
+ service_tier: data.service_tier
284
+ }
285
+ }
286
+ }
287
+ );
288
+ const usage = {
289
+ inputTokens: data.usage.prompt_tokens,
290
+ outputTokens: data.usage.completion_tokens,
291
+ totalTokens: data.usage.total_tokens
292
+ };
293
+ let stopReason = "end_turn";
294
+ switch (choice.finish_reason) {
295
+ case "stop":
296
+ stopReason = "end_turn";
297
+ break;
298
+ case "length":
299
+ stopReason = "max_tokens";
300
+ break;
301
+ case "tool_calls":
302
+ stopReason = "tool_use";
303
+ break;
304
+ case "content_filter":
305
+ stopReason = "content_filter";
306
+ break;
307
+ }
308
+ if (hadRefusal && stopReason !== "content_filter") {
309
+ stopReason = "content_filter";
310
+ }
311
+ return {
312
+ message,
313
+ usage,
314
+ stopReason,
315
+ data: structuredData
316
+ };
317
+ }
318
+ function createStreamState() {
319
+ return {
320
+ id: "",
321
+ model: "",
322
+ text: "",
323
+ toolCalls: /* @__PURE__ */ new Map(),
324
+ finishReason: null,
325
+ inputTokens: 0,
326
+ outputTokens: 0,
327
+ hadRefusal: false
328
+ };
329
+ }
330
+ function transformStreamEvent(chunk, state) {
331
+ const events = [];
332
+ if (chunk.id && !state.id) {
333
+ state.id = chunk.id;
334
+ events.push({ type: "message_start", index: 0, delta: {} });
335
+ }
336
+ if (chunk.model) {
337
+ state.model = chunk.model;
338
+ }
339
+ const choice = chunk.choices[0];
340
+ if (choice) {
341
+ if (choice.delta.content) {
342
+ state.text += choice.delta.content;
343
+ events.push({
344
+ type: "text_delta",
345
+ index: 0,
346
+ delta: { text: choice.delta.content }
347
+ });
348
+ }
349
+ if (choice.delta.refusal) {
350
+ state.hadRefusal = true;
351
+ state.text += choice.delta.refusal;
352
+ events.push({
353
+ type: "text_delta",
354
+ index: 0,
355
+ delta: { text: choice.delta.refusal }
356
+ });
357
+ }
358
+ if (choice.delta.tool_calls) {
359
+ for (const toolCallDelta of choice.delta.tool_calls) {
360
+ const index = toolCallDelta.index;
361
+ let toolCall = state.toolCalls.get(index);
362
+ if (!toolCall) {
363
+ toolCall = { id: "", name: "", arguments: "" };
364
+ state.toolCalls.set(index, toolCall);
365
+ }
366
+ if (toolCallDelta.id) {
367
+ toolCall.id = toolCallDelta.id;
368
+ }
369
+ if (toolCallDelta.function?.name) {
370
+ toolCall.name = toolCallDelta.function.name;
371
+ }
372
+ if (toolCallDelta.function?.arguments) {
373
+ toolCall.arguments += toolCallDelta.function.arguments;
374
+ events.push({
375
+ type: "tool_call_delta",
376
+ index,
377
+ delta: {
378
+ toolCallId: toolCall.id,
379
+ toolName: toolCall.name,
380
+ argumentsJson: toolCallDelta.function.arguments
381
+ }
382
+ });
383
+ }
384
+ }
385
+ }
386
+ if (choice.finish_reason) {
387
+ state.finishReason = choice.finish_reason;
388
+ events.push({ type: "message_stop", index: 0, delta: {} });
389
+ }
390
+ }
391
+ if (chunk.usage) {
392
+ state.inputTokens = chunk.usage.prompt_tokens;
393
+ state.outputTokens = chunk.usage.completion_tokens;
394
+ }
395
+ return events;
396
+ }
397
+ function buildResponseFromState(state) {
398
+ const textContent = [];
399
+ let structuredData;
400
+ if (state.text) {
401
+ textContent.push({ type: "text", text: state.text });
402
+ try {
403
+ structuredData = JSON.parse(state.text);
404
+ } catch {
405
+ }
406
+ }
407
+ const toolCalls = [];
408
+ for (const [, toolCall] of state.toolCalls) {
409
+ let args = {};
410
+ if (toolCall.arguments) {
411
+ try {
412
+ args = JSON.parse(toolCall.arguments);
413
+ } catch {
414
+ }
415
+ }
416
+ toolCalls.push({
417
+ toolCallId: toolCall.id,
418
+ toolName: toolCall.name,
419
+ arguments: args
420
+ });
421
+ }
422
+ const message = new AssistantMessage(
423
+ textContent,
424
+ toolCalls.length > 0 ? toolCalls : void 0,
425
+ {
426
+ id: state.id,
427
+ metadata: {
428
+ openai: {
429
+ model: state.model,
430
+ finish_reason: state.finishReason
431
+ }
432
+ }
433
+ }
434
+ );
435
+ const usage = {
436
+ inputTokens: state.inputTokens,
437
+ outputTokens: state.outputTokens,
438
+ totalTokens: state.inputTokens + state.outputTokens
439
+ };
440
+ let stopReason = "end_turn";
441
+ switch (state.finishReason) {
442
+ case "stop":
443
+ stopReason = "end_turn";
444
+ break;
445
+ case "length":
446
+ stopReason = "max_tokens";
447
+ break;
448
+ case "tool_calls":
449
+ stopReason = "tool_use";
450
+ break;
451
+ case "content_filter":
452
+ stopReason = "content_filter";
453
+ break;
454
+ }
455
+ if (state.hadRefusal && stopReason !== "content_filter") {
456
+ stopReason = "content_filter";
457
+ }
458
+ return {
459
+ message,
460
+ usage,
461
+ stopReason,
462
+ data: structuredData
463
+ };
464
+ }
465
+
466
+ // src/providers/openai/llm.completions.ts
467
+ var OPENAI_API_URL = "https://api.openai.com/v1/chat/completions";
468
+ var OPENAI_CAPABILITIES = {
469
+ streaming: true,
470
+ tools: true,
471
+ structuredOutput: true,
472
+ imageInput: true,
473
+ videoInput: false,
474
+ audioInput: false
475
+ };
476
+ function createCompletionsLLMHandler() {
477
+ let providerRef = null;
478
+ return {
479
+ _setProvider(provider) {
480
+ providerRef = provider;
481
+ },
482
+ bind(modelId) {
483
+ if (!providerRef) {
484
+ throw new UPPError(
485
+ "Provider reference not set. Handler must be used with createProvider() or have _setProvider called.",
486
+ "INVALID_REQUEST",
487
+ "openai",
488
+ "llm"
489
+ );
490
+ }
491
+ const model = {
492
+ modelId,
493
+ capabilities: OPENAI_CAPABILITIES,
494
+ get provider() {
495
+ return providerRef;
496
+ },
497
+ async complete(request) {
498
+ const apiKey = await resolveApiKey(
499
+ request.config,
500
+ "OPENAI_API_KEY",
501
+ "openai",
502
+ "llm"
503
+ );
504
+ const baseUrl = request.config.baseUrl ?? OPENAI_API_URL;
505
+ const body = transformRequest(request, modelId);
506
+ const response = await doFetch(
507
+ baseUrl,
508
+ {
509
+ method: "POST",
510
+ headers: {
511
+ "Content-Type": "application/json",
512
+ Authorization: `Bearer ${apiKey}`
513
+ },
514
+ body: JSON.stringify(body),
515
+ signal: request.signal
516
+ },
517
+ request.config,
518
+ "openai",
519
+ "llm"
520
+ );
521
+ const data = await response.json();
522
+ return transformResponse(data);
523
+ },
524
+ stream(request) {
525
+ const state = createStreamState();
526
+ let responseResolve;
527
+ let responseReject;
528
+ const responsePromise = new Promise((resolve, reject) => {
529
+ responseResolve = resolve;
530
+ responseReject = reject;
531
+ });
532
+ async function* generateEvents() {
533
+ try {
534
+ const apiKey = await resolveApiKey(
535
+ request.config,
536
+ "OPENAI_API_KEY",
537
+ "openai",
538
+ "llm"
539
+ );
540
+ const baseUrl = request.config.baseUrl ?? OPENAI_API_URL;
541
+ const body = transformRequest(request, modelId);
542
+ body.stream = true;
543
+ body.stream_options = { include_usage: true };
544
+ const response = await doStreamFetch(
545
+ baseUrl,
546
+ {
547
+ method: "POST",
548
+ headers: {
549
+ "Content-Type": "application/json",
550
+ Authorization: `Bearer ${apiKey}`
551
+ },
552
+ body: JSON.stringify(body),
553
+ signal: request.signal
554
+ },
555
+ request.config,
556
+ "openai",
557
+ "llm"
558
+ );
559
+ if (!response.ok) {
560
+ const error = await normalizeHttpError(response, "openai", "llm");
561
+ responseReject(error);
562
+ throw error;
563
+ }
564
+ if (!response.body) {
565
+ const error = new UPPError(
566
+ "No response body for streaming request",
567
+ "PROVIDER_ERROR",
568
+ "openai",
569
+ "llm"
570
+ );
571
+ responseReject(error);
572
+ throw error;
573
+ }
574
+ for await (const data of parseSSEStream(response.body)) {
575
+ if (data === "[DONE]") {
576
+ continue;
577
+ }
578
+ if (typeof data === "object" && data !== null) {
579
+ const chunk = data;
580
+ if ("error" in chunk && chunk.error) {
581
+ const errorData = chunk.error;
582
+ const error = new UPPError(
583
+ errorData.message ?? "Unknown error",
584
+ "PROVIDER_ERROR",
585
+ "openai",
586
+ "llm"
587
+ );
588
+ responseReject(error);
589
+ throw error;
590
+ }
591
+ const uppEvents = transformStreamEvent(chunk, state);
592
+ for (const event of uppEvents) {
593
+ yield event;
594
+ }
595
+ }
596
+ }
597
+ responseResolve(buildResponseFromState(state));
598
+ } catch (error) {
599
+ responseReject(error);
600
+ throw error;
601
+ }
602
+ }
603
+ return {
604
+ [Symbol.asyncIterator]() {
605
+ return generateEvents();
606
+ },
607
+ response: responsePromise
608
+ };
609
+ }
610
+ };
611
+ return model;
612
+ }
613
+ };
614
+ }
615
+
616
+ // src/providers/openai/transform.responses.ts
617
+ function transformRequest2(request, modelId) {
618
+ const params = request.params ?? {};
619
+ const openaiRequest = {
620
+ model: modelId,
621
+ input: transformInputItems(request.messages, request.system)
622
+ };
623
+ if (params.temperature !== void 0) {
624
+ openaiRequest.temperature = params.temperature;
625
+ }
626
+ if (params.top_p !== void 0) {
627
+ openaiRequest.top_p = params.top_p;
628
+ }
629
+ if (params.max_output_tokens !== void 0) {
630
+ openaiRequest.max_output_tokens = params.max_output_tokens;
631
+ } else if (params.max_completion_tokens !== void 0) {
632
+ openaiRequest.max_output_tokens = params.max_completion_tokens;
633
+ } else if (params.max_tokens !== void 0) {
634
+ openaiRequest.max_output_tokens = params.max_tokens;
635
+ }
636
+ if (params.service_tier !== void 0) {
637
+ openaiRequest.service_tier = params.service_tier;
638
+ }
639
+ if (params.store !== void 0) {
640
+ openaiRequest.store = params.store;
641
+ }
642
+ if (params.metadata !== void 0) {
643
+ openaiRequest.metadata = params.metadata;
644
+ }
645
+ if (params.truncation !== void 0) {
646
+ openaiRequest.truncation = params.truncation;
647
+ }
648
+ if (params.include !== void 0) {
649
+ openaiRequest.include = params.include;
650
+ }
651
+ if (params.background !== void 0) {
652
+ openaiRequest.background = params.background;
653
+ }
654
+ if (params.previous_response_id !== void 0) {
655
+ openaiRequest.previous_response_id = params.previous_response_id;
656
+ }
657
+ if (params.reasoning !== void 0) {
658
+ openaiRequest.reasoning = { ...params.reasoning };
659
+ }
660
+ if (params.reasoning_effort !== void 0) {
661
+ openaiRequest.reasoning = {
662
+ ...openaiRequest.reasoning ?? {},
663
+ effort: params.reasoning_effort
664
+ };
665
+ }
666
+ if (request.tools && request.tools.length > 0) {
667
+ openaiRequest.tools = request.tools.map(transformTool2);
668
+ if (params.parallel_tool_calls !== void 0) {
669
+ openaiRequest.parallel_tool_calls = params.parallel_tool_calls;
670
+ }
671
+ }
672
+ if (request.structure) {
673
+ const schema = {
674
+ type: "object",
675
+ properties: request.structure.properties,
676
+ required: request.structure.required,
677
+ ...request.structure.additionalProperties !== void 0 ? { additionalProperties: request.structure.additionalProperties } : { additionalProperties: false }
678
+ };
679
+ if (request.structure.description) {
680
+ schema.description = request.structure.description;
681
+ }
682
+ openaiRequest.text = {
683
+ format: {
684
+ type: "json_schema",
685
+ name: "json_response",
686
+ description: request.structure.description,
687
+ schema,
688
+ strict: true
689
+ }
690
+ };
691
+ }
692
+ return openaiRequest;
693
+ }
694
+ function transformInputItems(messages, system) {
695
+ const result = [];
696
+ if (system) {
697
+ result.push({
698
+ type: "message",
699
+ role: "system",
700
+ content: system
701
+ });
702
+ }
703
+ for (const message of messages) {
704
+ const items = transformMessage2(message);
705
+ result.push(...items);
706
+ }
707
+ if (result.length === 1 && result[0]?.type === "message") {
708
+ const item = result[0];
709
+ if (item.role === "user" && typeof item.content === "string") {
710
+ return item.content;
711
+ }
712
+ }
713
+ return result;
714
+ }
715
+ function filterValidContent2(content) {
716
+ return content.filter((c) => c && typeof c.type === "string");
717
+ }
718
+ function transformMessage2(message) {
719
+ if (isUserMessage(message)) {
720
+ const validContent = filterValidContent2(message.content);
721
+ if (validContent.length === 1 && validContent[0]?.type === "text") {
722
+ return [
723
+ {
724
+ type: "message",
725
+ role: "user",
726
+ content: validContent[0].text
727
+ }
728
+ ];
729
+ }
730
+ return [
731
+ {
732
+ type: "message",
733
+ role: "user",
734
+ content: validContent.map(transformContentPart)
735
+ }
736
+ ];
737
+ }
738
+ if (isAssistantMessage(message)) {
739
+ const validContent = filterValidContent2(message.content);
740
+ const items = [];
741
+ const contentParts = validContent.filter((c) => c.type === "text").map((c) => ({
742
+ type: "output_text",
743
+ text: c.text
744
+ }));
745
+ if (contentParts.length > 0) {
746
+ items.push({
747
+ type: "message",
748
+ role: "assistant",
749
+ content: contentParts
750
+ });
751
+ }
752
+ const openaiMeta = message.metadata?.openai;
753
+ const functionCallItems = openaiMeta?.functionCallItems;
754
+ if (functionCallItems && functionCallItems.length > 0) {
755
+ for (const fc of functionCallItems) {
756
+ items.push({
757
+ type: "function_call",
758
+ id: fc.id,
759
+ call_id: fc.call_id,
760
+ name: fc.name,
761
+ arguments: fc.arguments
762
+ });
763
+ }
764
+ } else if (message.toolCalls && message.toolCalls.length > 0) {
765
+ for (const call of message.toolCalls) {
766
+ items.push({
767
+ type: "function_call",
768
+ id: `fc_${call.toolCallId}`,
769
+ call_id: call.toolCallId,
770
+ name: call.toolName,
771
+ arguments: JSON.stringify(call.arguments)
772
+ });
773
+ }
774
+ }
775
+ return items;
776
+ }
777
+ if (isToolResultMessage(message)) {
778
+ return message.results.map((result) => ({
779
+ type: "function_call_output",
780
+ call_id: result.toolCallId,
781
+ output: typeof result.result === "string" ? result.result : JSON.stringify(result.result)
782
+ }));
783
+ }
784
+ return [];
785
+ }
786
+ function transformContentPart(block) {
787
+ switch (block.type) {
788
+ case "text":
789
+ return { type: "input_text", text: block.text };
790
+ case "image": {
791
+ const imageBlock = block;
792
+ if (imageBlock.source.type === "base64") {
793
+ return {
794
+ type: "input_image",
795
+ image_url: `data:${imageBlock.mimeType};base64,${imageBlock.source.data}`
796
+ };
797
+ }
798
+ if (imageBlock.source.type === "url") {
799
+ return {
800
+ type: "input_image",
801
+ image_url: imageBlock.source.url
802
+ };
803
+ }
804
+ if (imageBlock.source.type === "bytes") {
805
+ const base64 = btoa(
806
+ Array.from(imageBlock.source.data).map((b) => String.fromCharCode(b)).join("")
807
+ );
808
+ return {
809
+ type: "input_image",
810
+ image_url: `data:${imageBlock.mimeType};base64,${base64}`
811
+ };
812
+ }
813
+ throw new Error("Unknown image source type");
814
+ }
815
+ default:
816
+ throw new Error(`Unsupported content type: ${block.type}`);
817
+ }
818
+ }
819
+ function transformTool2(tool) {
820
+ return {
821
+ type: "function",
822
+ name: tool.name,
823
+ description: tool.description,
824
+ parameters: {
825
+ type: "object",
826
+ properties: tool.parameters.properties,
827
+ required: tool.parameters.required,
828
+ ...tool.parameters.additionalProperties !== void 0 ? { additionalProperties: tool.parameters.additionalProperties } : {}
829
+ }
830
+ };
831
+ }
832
+ function transformResponse2(data) {
833
+ const textContent = [];
834
+ const toolCalls = [];
835
+ const functionCallItems = [];
836
+ let hadRefusal = false;
837
+ let structuredData;
838
+ for (const item of data.output) {
839
+ if (item.type === "message") {
840
+ const messageItem = item;
841
+ for (const content of messageItem.content) {
842
+ if (content.type === "output_text") {
843
+ textContent.push({ type: "text", text: content.text });
844
+ if (structuredData === void 0) {
845
+ try {
846
+ structuredData = JSON.parse(content.text);
847
+ } catch {
848
+ }
849
+ }
850
+ } else if (content.type === "refusal") {
851
+ textContent.push({ type: "text", text: content.refusal });
852
+ hadRefusal = true;
853
+ }
854
+ }
855
+ } else if (item.type === "function_call") {
856
+ const functionCall = item;
857
+ let args = {};
858
+ try {
859
+ args = JSON.parse(functionCall.arguments);
860
+ } catch {
861
+ }
862
+ toolCalls.push({
863
+ toolCallId: functionCall.call_id,
864
+ toolName: functionCall.name,
865
+ arguments: args
866
+ });
867
+ functionCallItems.push({
868
+ id: functionCall.id,
869
+ call_id: functionCall.call_id,
870
+ name: functionCall.name,
871
+ arguments: functionCall.arguments
872
+ });
873
+ }
874
+ }
875
+ const message = new AssistantMessage(
876
+ textContent,
877
+ toolCalls.length > 0 ? toolCalls : void 0,
878
+ {
879
+ id: data.id,
880
+ metadata: {
881
+ openai: {
882
+ model: data.model,
883
+ status: data.status,
884
+ // Store response_id for multi-turn tool calling
885
+ response_id: data.id,
886
+ functionCallItems: functionCallItems.length > 0 ? functionCallItems : void 0
887
+ }
888
+ }
889
+ }
890
+ );
891
+ const usage = {
892
+ inputTokens: data.usage.input_tokens,
893
+ outputTokens: data.usage.output_tokens,
894
+ totalTokens: data.usage.total_tokens
895
+ };
896
+ let stopReason = "end_turn";
897
+ if (data.status === "completed") {
898
+ stopReason = toolCalls.length > 0 ? "tool_use" : "end_turn";
899
+ } else if (data.status === "incomplete") {
900
+ stopReason = data.incomplete_details?.reason === "max_output_tokens" ? "max_tokens" : "end_turn";
901
+ } else if (data.status === "failed") {
902
+ stopReason = "error";
903
+ }
904
+ if (hadRefusal && stopReason !== "error") {
905
+ stopReason = "content_filter";
906
+ }
907
+ return {
908
+ message,
909
+ usage,
910
+ stopReason,
911
+ data: structuredData
912
+ };
913
+ }
914
+ function createStreamState2() {
915
+ return {
916
+ id: "",
917
+ model: "",
918
+ textByIndex: /* @__PURE__ */ new Map(),
919
+ toolCalls: /* @__PURE__ */ new Map(),
920
+ status: "in_progress",
921
+ inputTokens: 0,
922
+ outputTokens: 0,
923
+ hadRefusal: false
924
+ };
925
+ }
926
+ function transformStreamEvent2(event, state) {
927
+ const events = [];
928
+ switch (event.type) {
929
+ case "response.created":
930
+ state.id = event.response.id;
931
+ state.model = event.response.model;
932
+ events.push({ type: "message_start", index: 0, delta: {} });
933
+ break;
934
+ case "response.in_progress":
935
+ state.status = "in_progress";
936
+ break;
937
+ case "response.completed":
938
+ state.status = "completed";
939
+ if (event.response.usage) {
940
+ state.inputTokens = event.response.usage.input_tokens;
941
+ state.outputTokens = event.response.usage.output_tokens;
942
+ }
943
+ events.push({ type: "message_stop", index: 0, delta: {} });
944
+ break;
945
+ case "response.failed":
946
+ state.status = "failed";
947
+ events.push({ type: "message_stop", index: 0, delta: {} });
948
+ break;
949
+ case "response.output_item.added":
950
+ if (event.item.type === "function_call") {
951
+ const functionCall = event.item;
952
+ const existing = state.toolCalls.get(event.output_index) ?? {
953
+ arguments: ""
954
+ };
955
+ existing.itemId = functionCall.id;
956
+ existing.callId = functionCall.call_id;
957
+ existing.name = functionCall.name;
958
+ if (functionCall.arguments) {
959
+ existing.arguments = functionCall.arguments;
960
+ }
961
+ state.toolCalls.set(event.output_index, existing);
962
+ }
963
+ events.push({
964
+ type: "content_block_start",
965
+ index: event.output_index,
966
+ delta: {}
967
+ });
968
+ break;
969
+ case "response.output_item.done":
970
+ if (event.item.type === "function_call") {
971
+ const functionCall = event.item;
972
+ const existing = state.toolCalls.get(event.output_index) ?? {
973
+ arguments: ""
974
+ };
975
+ existing.itemId = functionCall.id;
976
+ existing.callId = functionCall.call_id;
977
+ existing.name = functionCall.name;
978
+ if (functionCall.arguments) {
979
+ existing.arguments = functionCall.arguments;
980
+ }
981
+ state.toolCalls.set(event.output_index, existing);
982
+ }
983
+ events.push({
984
+ type: "content_block_stop",
985
+ index: event.output_index,
986
+ delta: {}
987
+ });
988
+ break;
989
+ case "response.output_text.delta":
990
+ const currentText = state.textByIndex.get(event.output_index) ?? "";
991
+ state.textByIndex.set(event.output_index, currentText + event.delta);
992
+ events.push({
993
+ type: "text_delta",
994
+ index: event.output_index,
995
+ delta: { text: event.delta }
996
+ });
997
+ break;
998
+ case "response.output_text.done":
999
+ state.textByIndex.set(event.output_index, event.text);
1000
+ break;
1001
+ case "response.refusal.delta": {
1002
+ state.hadRefusal = true;
1003
+ const currentRefusal = state.textByIndex.get(event.output_index) ?? "";
1004
+ state.textByIndex.set(event.output_index, currentRefusal + event.delta);
1005
+ events.push({
1006
+ type: "text_delta",
1007
+ index: event.output_index,
1008
+ delta: { text: event.delta }
1009
+ });
1010
+ break;
1011
+ }
1012
+ case "response.refusal.done":
1013
+ state.hadRefusal = true;
1014
+ state.textByIndex.set(event.output_index, event.refusal);
1015
+ break;
1016
+ case "response.function_call_arguments.delta": {
1017
+ let toolCall = state.toolCalls.get(event.output_index);
1018
+ if (!toolCall) {
1019
+ toolCall = { arguments: "" };
1020
+ state.toolCalls.set(event.output_index, toolCall);
1021
+ }
1022
+ if (event.item_id && !toolCall.itemId) {
1023
+ toolCall.itemId = event.item_id;
1024
+ }
1025
+ if (event.call_id && !toolCall.callId) {
1026
+ toolCall.callId = event.call_id;
1027
+ }
1028
+ toolCall.arguments += event.delta;
1029
+ events.push({
1030
+ type: "tool_call_delta",
1031
+ index: event.output_index,
1032
+ delta: {
1033
+ toolCallId: toolCall.callId ?? toolCall.itemId ?? "",
1034
+ toolName: toolCall.name,
1035
+ argumentsJson: event.delta
1036
+ }
1037
+ });
1038
+ break;
1039
+ }
1040
+ case "response.function_call_arguments.done": {
1041
+ let toolCall = state.toolCalls.get(event.output_index);
1042
+ if (!toolCall) {
1043
+ toolCall = { arguments: "" };
1044
+ state.toolCalls.set(event.output_index, toolCall);
1045
+ }
1046
+ if (event.item_id) {
1047
+ toolCall.itemId = event.item_id;
1048
+ }
1049
+ if (event.call_id) {
1050
+ toolCall.callId = event.call_id;
1051
+ }
1052
+ toolCall.name = event.name;
1053
+ toolCall.arguments = event.arguments;
1054
+ break;
1055
+ }
1056
+ case "error":
1057
+ break;
1058
+ default:
1059
+ break;
1060
+ }
1061
+ return events;
1062
+ }
1063
+ function buildResponseFromState2(state) {
1064
+ const textContent = [];
1065
+ let structuredData;
1066
+ for (const [, text] of state.textByIndex) {
1067
+ if (text) {
1068
+ textContent.push({ type: "text", text });
1069
+ if (structuredData === void 0) {
1070
+ try {
1071
+ structuredData = JSON.parse(text);
1072
+ } catch {
1073
+ }
1074
+ }
1075
+ }
1076
+ }
1077
+ const toolCalls = [];
1078
+ const functionCallItems = [];
1079
+ for (const [, toolCall] of state.toolCalls) {
1080
+ let args = {};
1081
+ if (toolCall.arguments) {
1082
+ try {
1083
+ args = JSON.parse(toolCall.arguments);
1084
+ } catch {
1085
+ }
1086
+ }
1087
+ const itemId = toolCall.itemId ?? "";
1088
+ const callId = toolCall.callId ?? toolCall.itemId ?? "";
1089
+ const name = toolCall.name ?? "";
1090
+ toolCalls.push({
1091
+ toolCallId: callId,
1092
+ toolName: name,
1093
+ arguments: args
1094
+ });
1095
+ if (itemId && callId && name) {
1096
+ functionCallItems.push({
1097
+ id: itemId,
1098
+ call_id: callId,
1099
+ name,
1100
+ arguments: toolCall.arguments
1101
+ });
1102
+ }
1103
+ }
1104
+ const message = new AssistantMessage(
1105
+ textContent,
1106
+ toolCalls.length > 0 ? toolCalls : void 0,
1107
+ {
1108
+ id: state.id,
1109
+ metadata: {
1110
+ openai: {
1111
+ model: state.model,
1112
+ status: state.status,
1113
+ // Store response_id for multi-turn tool calling
1114
+ response_id: state.id,
1115
+ functionCallItems: functionCallItems.length > 0 ? functionCallItems : void 0
1116
+ }
1117
+ }
1118
+ }
1119
+ );
1120
+ const usage = {
1121
+ inputTokens: state.inputTokens,
1122
+ outputTokens: state.outputTokens,
1123
+ totalTokens: state.inputTokens + state.outputTokens
1124
+ };
1125
+ let stopReason = "end_turn";
1126
+ if (state.status === "completed") {
1127
+ stopReason = toolCalls.length > 0 ? "tool_use" : "end_turn";
1128
+ } else if (state.status === "failed") {
1129
+ stopReason = "error";
1130
+ }
1131
+ if (state.hadRefusal && stopReason !== "error") {
1132
+ stopReason = "content_filter";
1133
+ }
1134
+ return {
1135
+ message,
1136
+ usage,
1137
+ stopReason,
1138
+ data: structuredData
1139
+ };
1140
+ }
1141
+
1142
+ // src/providers/openai/llm.responses.ts
1143
+ var OPENAI_RESPONSES_API_URL = "https://api.openai.com/v1/responses";
1144
+ var OPENAI_CAPABILITIES2 = {
1145
+ streaming: true,
1146
+ tools: true,
1147
+ structuredOutput: true,
1148
+ imageInput: true,
1149
+ videoInput: false,
1150
+ audioInput: false
1151
+ };
1152
+ function createResponsesLLMHandler() {
1153
+ let providerRef = null;
1154
+ return {
1155
+ _setProvider(provider) {
1156
+ providerRef = provider;
1157
+ },
1158
+ bind(modelId) {
1159
+ if (!providerRef) {
1160
+ throw new UPPError(
1161
+ "Provider reference not set. Handler must be used with createProvider() or have _setProvider called.",
1162
+ "INVALID_REQUEST",
1163
+ "openai",
1164
+ "llm"
1165
+ );
1166
+ }
1167
+ const model = {
1168
+ modelId,
1169
+ capabilities: OPENAI_CAPABILITIES2,
1170
+ get provider() {
1171
+ return providerRef;
1172
+ },
1173
+ async complete(request) {
1174
+ const apiKey = await resolveApiKey(
1175
+ request.config,
1176
+ "OPENAI_API_KEY",
1177
+ "openai",
1178
+ "llm"
1179
+ );
1180
+ const baseUrl = request.config.baseUrl ?? OPENAI_RESPONSES_API_URL;
1181
+ const body = transformRequest2(request, modelId);
1182
+ const response = await doFetch(
1183
+ baseUrl,
1184
+ {
1185
+ method: "POST",
1186
+ headers: {
1187
+ "Content-Type": "application/json",
1188
+ Authorization: `Bearer ${apiKey}`
1189
+ },
1190
+ body: JSON.stringify(body),
1191
+ signal: request.signal
1192
+ },
1193
+ request.config,
1194
+ "openai",
1195
+ "llm"
1196
+ );
1197
+ const data = await response.json();
1198
+ if (data.status === "failed" && data.error) {
1199
+ throw new UPPError(
1200
+ data.error.message,
1201
+ "PROVIDER_ERROR",
1202
+ "openai",
1203
+ "llm"
1204
+ );
1205
+ }
1206
+ return transformResponse2(data);
1207
+ },
1208
+ stream(request) {
1209
+ const state = createStreamState2();
1210
+ let responseResolve;
1211
+ let responseReject;
1212
+ const responsePromise = new Promise((resolve, reject) => {
1213
+ responseResolve = resolve;
1214
+ responseReject = reject;
1215
+ });
1216
+ async function* generateEvents() {
1217
+ try {
1218
+ const apiKey = await resolveApiKey(
1219
+ request.config,
1220
+ "OPENAI_API_KEY",
1221
+ "openai",
1222
+ "llm"
1223
+ );
1224
+ const baseUrl = request.config.baseUrl ?? OPENAI_RESPONSES_API_URL;
1225
+ const body = transformRequest2(request, modelId);
1226
+ body.stream = true;
1227
+ const response = await doStreamFetch(
1228
+ baseUrl,
1229
+ {
1230
+ method: "POST",
1231
+ headers: {
1232
+ "Content-Type": "application/json",
1233
+ Authorization: `Bearer ${apiKey}`
1234
+ },
1235
+ body: JSON.stringify(body),
1236
+ signal: request.signal
1237
+ },
1238
+ request.config,
1239
+ "openai",
1240
+ "llm"
1241
+ );
1242
+ if (!response.ok) {
1243
+ const error = await normalizeHttpError(response, "openai", "llm");
1244
+ responseReject(error);
1245
+ throw error;
1246
+ }
1247
+ if (!response.body) {
1248
+ const error = new UPPError(
1249
+ "No response body for streaming request",
1250
+ "PROVIDER_ERROR",
1251
+ "openai",
1252
+ "llm"
1253
+ );
1254
+ responseReject(error);
1255
+ throw error;
1256
+ }
1257
+ for await (const data of parseSSEStream(response.body)) {
1258
+ if (data === "[DONE]") {
1259
+ continue;
1260
+ }
1261
+ if (typeof data === "object" && data !== null) {
1262
+ const event = data;
1263
+ if (event.type === "error") {
1264
+ const errorEvent = event;
1265
+ const error = new UPPError(
1266
+ errorEvent.error.message,
1267
+ "PROVIDER_ERROR",
1268
+ "openai",
1269
+ "llm"
1270
+ );
1271
+ responseReject(error);
1272
+ throw error;
1273
+ }
1274
+ const uppEvents = transformStreamEvent2(event, state);
1275
+ for (const uppEvent of uppEvents) {
1276
+ yield uppEvent;
1277
+ }
1278
+ }
1279
+ }
1280
+ responseResolve(buildResponseFromState2(state));
1281
+ } catch (error) {
1282
+ responseReject(error);
1283
+ throw error;
1284
+ }
1285
+ }
1286
+ return {
1287
+ [Symbol.asyncIterator]() {
1288
+ return generateEvents();
1289
+ },
1290
+ response: responsePromise
1291
+ };
1292
+ }
1293
+ };
1294
+ return model;
1295
+ }
1296
+ };
1297
+ }
1298
+
1299
+ // src/providers/openai/index.ts
1300
+ function createOpenAIProvider() {
1301
+ let currentApiMode = "responses";
1302
+ const responsesHandler = createResponsesLLMHandler();
1303
+ const completionsHandler = createCompletionsLLMHandler();
1304
+ const fn = function(modelId, options) {
1305
+ const apiMode = options?.api ?? "responses";
1306
+ currentApiMode = apiMode;
1307
+ return { modelId, provider };
1308
+ };
1309
+ const modalities = {
1310
+ get llm() {
1311
+ return currentApiMode === "completions" ? completionsHandler : responsesHandler;
1312
+ }
1313
+ };
1314
+ Object.defineProperties(fn, {
1315
+ name: {
1316
+ value: "openai",
1317
+ writable: false,
1318
+ configurable: true
1319
+ },
1320
+ version: {
1321
+ value: "1.0.0",
1322
+ writable: false,
1323
+ configurable: true
1324
+ },
1325
+ modalities: {
1326
+ value: modalities,
1327
+ writable: false,
1328
+ configurable: true
1329
+ }
1330
+ });
1331
+ const provider = fn;
1332
+ responsesHandler._setProvider?.(provider);
1333
+ completionsHandler._setProvider?.(provider);
1334
+ return provider;
1335
+ }
1336
+ var openai = createOpenAIProvider();
1337
+ export {
1338
+ openai
1339
+ };
1340
+ //# sourceMappingURL=index.js.map