@providerprotocol/ai 0.0.20 → 0.0.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/dist/anthropic/index.d.ts +184 -14
  2. package/dist/anthropic/index.js +306 -107
  3. package/dist/anthropic/index.js.map +1 -1
  4. package/dist/{chunk-P5IRTEM5.js → chunk-7WYBJPJJ.js} +2 -2
  5. package/dist/chunk-I2VHCGQE.js +49 -0
  6. package/dist/chunk-I2VHCGQE.js.map +1 -0
  7. package/dist/{chunk-UMKWXGO3.js → chunk-M4BMM5IB.js} +86 -2
  8. package/dist/chunk-M4BMM5IB.js.map +1 -0
  9. package/dist/{chunk-SKY2JLA7.js → chunk-MKDLXV4O.js} +1 -1
  10. package/dist/chunk-MKDLXV4O.js.map +1 -0
  11. package/dist/{chunk-Z7RBRCRN.js → chunk-NWS5IKNR.js} +37 -11
  12. package/dist/chunk-NWS5IKNR.js.map +1 -0
  13. package/dist/{chunk-U3FZWV4U.js → chunk-RFWLEFAB.js} +100 -43
  14. package/dist/chunk-RFWLEFAB.js.map +1 -0
  15. package/dist/{chunk-U4JJC2YX.js → chunk-RS7C25LS.js} +36 -11
  16. package/dist/chunk-RS7C25LS.js.map +1 -0
  17. package/dist/google/index.d.ts +35 -24
  18. package/dist/google/index.js +273 -99
  19. package/dist/google/index.js.map +1 -1
  20. package/dist/http/index.d.ts +3 -3
  21. package/dist/http/index.js +4 -4
  22. package/dist/index.d.ts +103 -38
  23. package/dist/index.js +346 -153
  24. package/dist/index.js.map +1 -1
  25. package/dist/ollama/index.d.ts +14 -16
  26. package/dist/ollama/index.js +68 -16
  27. package/dist/ollama/index.js.map +1 -1
  28. package/dist/openai/index.d.ts +25 -133
  29. package/dist/openai/index.js +208 -122
  30. package/dist/openai/index.js.map +1 -1
  31. package/dist/openrouter/index.d.ts +28 -53
  32. package/dist/openrouter/index.js +179 -72
  33. package/dist/openrouter/index.js.map +1 -1
  34. package/dist/provider-DWEAzeM5.d.ts +1329 -0
  35. package/dist/proxy/index.d.ts +2 -3
  36. package/dist/proxy/index.js +174 -17
  37. package/dist/proxy/index.js.map +1 -1
  38. package/dist/{retry-DR7YRJDz.d.ts → retry-DmPmqZL6.d.ts} +12 -3
  39. package/dist/{stream-DRHy6q1a.d.ts → stream-DbkLOIbJ.d.ts} +15 -5
  40. package/dist/xai/index.d.ts +16 -88
  41. package/dist/xai/index.js +167 -86
  42. package/dist/xai/index.js.map +1 -1
  43. package/package.json +4 -1
  44. package/dist/chunk-MSR5P65T.js +0 -39
  45. package/dist/chunk-MSR5P65T.js.map +0 -1
  46. package/dist/chunk-SKY2JLA7.js.map +0 -1
  47. package/dist/chunk-U3FZWV4U.js.map +0 -1
  48. package/dist/chunk-U4JJC2YX.js.map +0 -1
  49. package/dist/chunk-UMKWXGO3.js.map +0 -1
  50. package/dist/chunk-Z7RBRCRN.js.map +0 -1
  51. package/dist/content-DEl3z_W2.d.ts +0 -276
  52. package/dist/image-Dhq-Yuq4.d.ts +0 -456
  53. package/dist/provider-BBMBZuGn.d.ts +0 -570
  54. /package/dist/{chunk-P5IRTEM5.js.map → chunk-7WYBJPJJ.js.map} +0 -0
@@ -1,24 +1,30 @@
1
1
  import {
2
2
  Image
3
3
  } from "../chunk-WAKD3OO5.js";
4
+ import {
5
+ parseJsonResponse
6
+ } from "../chunk-I2VHCGQE.js";
4
7
  import {
5
8
  AssistantMessage,
9
+ createProvider,
10
+ generateId,
6
11
  isAssistantMessage,
7
12
  isToolResultMessage,
8
13
  isUserMessage
9
- } from "../chunk-UMKWXGO3.js";
14
+ } from "../chunk-M4BMM5IB.js";
10
15
  import {
11
16
  parseSSEStream
12
- } from "../chunk-Z7RBRCRN.js";
17
+ } from "../chunk-NWS5IKNR.js";
13
18
  import {
14
19
  resolveApiKey
15
- } from "../chunk-P5IRTEM5.js";
20
+ } from "../chunk-7WYBJPJJ.js";
16
21
  import {
17
22
  UPPError,
18
23
  doFetch,
19
24
  doStreamFetch,
20
- normalizeHttpError
21
- } from "../chunk-U3FZWV4U.js";
25
+ normalizeHttpError,
26
+ toError
27
+ } from "../chunk-RFWLEFAB.js";
22
28
 
23
29
  // src/providers/openai/transform.completions.ts
24
30
  function transformRequest(request, modelId) {
@@ -54,9 +60,40 @@ function transformRequest(request, modelId) {
54
60
  return openaiRequest;
55
61
  }
56
62
  function normalizeSystem(system) {
57
- if (!system) return void 0;
63
+ if (system === void 0 || system === null) return void 0;
58
64
  if (typeof system === "string") return system;
59
- return system.map((block) => block.text ?? "").filter((text) => text.length > 0).join("\n\n");
65
+ if (!Array.isArray(system)) {
66
+ throw new UPPError(
67
+ "System prompt must be a string or an array of text blocks",
68
+ "INVALID_REQUEST",
69
+ "openai",
70
+ "llm"
71
+ );
72
+ }
73
+ const texts = [];
74
+ for (const block of system) {
75
+ if (!block || typeof block !== "object" || !("text" in block)) {
76
+ throw new UPPError(
77
+ "System prompt array must contain objects with a text field",
78
+ "INVALID_REQUEST",
79
+ "openai",
80
+ "llm"
81
+ );
82
+ }
83
+ const textValue = block.text;
84
+ if (typeof textValue !== "string") {
85
+ throw new UPPError(
86
+ "System prompt text must be a string",
87
+ "INVALID_REQUEST",
88
+ "openai",
89
+ "llm"
90
+ );
91
+ }
92
+ if (textValue.length > 0) {
93
+ texts.push(textValue);
94
+ }
95
+ }
96
+ return texts.length > 0 ? texts.join("\n\n") : void 0;
60
97
  }
61
98
  function transformMessages(messages, system) {
62
99
  const result = [];
@@ -224,7 +261,7 @@ function transformResponse(data) {
224
261
  textContent,
225
262
  toolCalls.length > 0 ? toolCalls : void 0,
226
263
  {
227
- id: data.id,
264
+ id: data.id || generateId(),
228
265
  metadata: {
229
266
  openai: {
230
267
  model: data.model,
@@ -373,11 +410,12 @@ function buildResponseFromState(state) {
373
410
  arguments: args
374
411
  });
375
412
  }
413
+ const messageId = state.id || generateId();
376
414
  const message = new AssistantMessage(
377
415
  textContent,
378
416
  toolCalls.length > 0 ? toolCalls : void 0,
379
417
  {
380
- id: state.id,
418
+ id: messageId,
381
419
  metadata: {
382
420
  openai: {
383
421
  model: state.model,
@@ -482,7 +520,7 @@ function createCompletionsLLMHandler() {
482
520
  "openai",
483
521
  "llm"
484
522
  );
485
- const data = await response.json();
523
+ const data = await parseJsonResponse(response, "openai", "llm");
486
524
  return transformResponse(data);
487
525
  },
488
526
  stream(request) {
@@ -507,7 +545,8 @@ function createCompletionsLLMHandler() {
507
545
  body.stream_options = { include_usage: true };
508
546
  const headers = {
509
547
  "Content-Type": "application/json",
510
- Authorization: `Bearer ${apiKey}`
548
+ Authorization: `Bearer ${apiKey}`,
549
+ Accept: "text/event-stream"
511
550
  };
512
551
  if (request.config.headers) {
513
552
  for (const [key, value] of Object.entries(request.config.headers)) {
@@ -568,8 +607,9 @@ function createCompletionsLLMHandler() {
568
607
  }
569
608
  responseResolve(buildResponseFromState(state));
570
609
  } catch (error) {
571
- responseReject(error);
572
- throw error;
610
+ const err = toError(error);
611
+ responseReject(err);
612
+ throw err;
573
613
  }
574
614
  }
575
615
  return {
@@ -588,15 +628,17 @@ function createCompletionsLLMHandler() {
588
628
  // src/providers/openai/transform.responses.ts
589
629
  function transformRequest2(request, modelId) {
590
630
  const params = request.params ?? {};
591
- const builtInTools = params.tools;
592
- const { tools: _paramsTools, ...restParams } = params;
631
+ const { tools: builtInTools, ...restParams } = params;
593
632
  const openaiRequest = {
594
633
  ...restParams,
595
634
  model: modelId,
596
635
  input: transformInputItems(request.messages, request.system)
597
636
  };
598
637
  const functionTools = request.tools?.map(transformTool2) ?? [];
599
- const allTools = [...functionTools, ...builtInTools ?? []];
638
+ const allTools = [
639
+ ...functionTools,
640
+ ...builtInTools ?? []
641
+ ];
600
642
  if (allTools.length > 0) {
601
643
  openaiRequest.tools = allTools;
602
644
  }
@@ -623,9 +665,40 @@ function transformRequest2(request, modelId) {
623
665
  return openaiRequest;
624
666
  }
625
667
  function normalizeSystem2(system) {
626
- if (!system) return void 0;
668
+ if (system === void 0 || system === null) return void 0;
627
669
  if (typeof system === "string") return system;
628
- return system.map((block) => block.text ?? "").filter((text) => text.length > 0).join("\n\n");
670
+ if (!Array.isArray(system)) {
671
+ throw new UPPError(
672
+ "System prompt must be a string or an array of text blocks",
673
+ "INVALID_REQUEST",
674
+ "openai",
675
+ "llm"
676
+ );
677
+ }
678
+ const texts = [];
679
+ for (const block of system) {
680
+ if (!block || typeof block !== "object" || !("text" in block)) {
681
+ throw new UPPError(
682
+ "System prompt array must contain objects with a text field",
683
+ "INVALID_REQUEST",
684
+ "openai",
685
+ "llm"
686
+ );
687
+ }
688
+ const textValue = block.text;
689
+ if (typeof textValue !== "string") {
690
+ throw new UPPError(
691
+ "System prompt text must be a string",
692
+ "INVALID_REQUEST",
693
+ "openai",
694
+ "llm"
695
+ );
696
+ }
697
+ if (textValue.length > 0) {
698
+ texts.push(textValue);
699
+ }
700
+ }
701
+ return texts.length > 0 ? texts.join("\n\n") : void 0;
629
702
  }
630
703
  function transformInputItems(messages, system) {
631
704
  const result = [];
@@ -816,24 +889,26 @@ function transformResponse2(data) {
816
889
  } else if (item.type === "image_generation_call") {
817
890
  const imageGen = item;
818
891
  if (imageGen.result) {
892
+ const mimeType = imageGen.mime_type ?? "image/png";
819
893
  content.push({
820
894
  type: "image",
821
- mimeType: "image/png",
895
+ mimeType,
822
896
  source: { type: "base64", data: imageGen.result }
823
897
  });
824
898
  }
825
899
  }
826
900
  }
901
+ const responseId = data.id || generateId();
827
902
  const message = new AssistantMessage(
828
903
  content,
829
904
  toolCalls.length > 0 ? toolCalls : void 0,
830
905
  {
831
- id: data.id,
906
+ id: responseId,
832
907
  metadata: {
833
908
  openai: {
834
909
  model: data.model,
835
910
  status: data.status,
836
- response_id: data.id,
911
+ response_id: responseId,
837
912
  functionCallItems: functionCallItems.length > 0 ? functionCallItems : void 0
838
913
  }
839
914
  }
@@ -872,6 +947,7 @@ function createStreamState2() {
872
947
  toolCalls: /* @__PURE__ */ new Map(),
873
948
  images: [],
874
949
  status: "in_progress",
950
+ incompleteReason: void 0,
875
951
  inputTokens: 0,
876
952
  outputTokens: 0,
877
953
  cacheReadTokens: 0,
@@ -880,26 +956,35 @@ function createStreamState2() {
880
956
  }
881
957
  function transformStreamEvent2(event, state) {
882
958
  const events = [];
959
+ const updateFromResponse = (response) => {
960
+ state.id = response.id || state.id;
961
+ state.model = response.model || state.model;
962
+ state.status = response.status;
963
+ if (response.incomplete_details?.reason) {
964
+ state.incompleteReason = response.incomplete_details.reason;
965
+ } else if (response.status !== "incomplete") {
966
+ state.incompleteReason = void 0;
967
+ }
968
+ if (response.usage) {
969
+ state.inputTokens = response.usage.input_tokens;
970
+ state.outputTokens = response.usage.output_tokens;
971
+ state.cacheReadTokens = response.usage.input_tokens_details?.cached_tokens ?? 0;
972
+ }
973
+ };
883
974
  switch (event.type) {
884
975
  case "response.created":
885
- state.id = event.response.id;
886
- state.model = event.response.model;
976
+ updateFromResponse(event.response);
887
977
  events.push({ type: "message_start", index: 0, delta: {} });
888
978
  break;
889
979
  case "response.in_progress":
890
- state.status = "in_progress";
980
+ updateFromResponse(event.response);
891
981
  break;
892
982
  case "response.completed":
893
- state.status = "completed";
894
- if (event.response.usage) {
895
- state.inputTokens = event.response.usage.input_tokens;
896
- state.outputTokens = event.response.usage.output_tokens;
897
- state.cacheReadTokens = event.response.usage.input_tokens_details?.cached_tokens ?? 0;
898
- }
983
+ updateFromResponse(event.response);
899
984
  events.push({ type: "message_stop", index: 0, delta: {} });
900
985
  break;
901
986
  case "response.failed":
902
- state.status = "failed";
987
+ updateFromResponse(event.response);
903
988
  events.push({ type: "message_stop", index: 0, delta: {} });
904
989
  break;
905
990
  case "response.output_item.added":
@@ -938,7 +1023,10 @@ function transformStreamEvent2(event, state) {
938
1023
  } else if (event.item.type === "image_generation_call") {
939
1024
  const imageGen = event.item;
940
1025
  if (imageGen.result) {
941
- state.images.push(imageGen.result);
1026
+ state.images.push({
1027
+ data: imageGen.result,
1028
+ mimeType: imageGen.mime_type ?? "image/png"
1029
+ });
942
1030
  }
943
1031
  }
944
1032
  events.push({
@@ -1025,7 +1113,10 @@ function transformStreamEvent2(event, state) {
1025
1113
  function buildResponseFromState2(state) {
1026
1114
  const content = [];
1027
1115
  let structuredData;
1028
- for (const [, text] of state.textByIndex) {
1116
+ const orderedTextEntries = [...state.textByIndex.entries()].sort(
1117
+ ([leftIndex], [rightIndex]) => leftIndex - rightIndex
1118
+ );
1119
+ for (const [, text] of orderedTextEntries) {
1029
1120
  if (text) {
1030
1121
  content.push({ type: "text", text });
1031
1122
  if (structuredData === void 0) {
@@ -1039,13 +1130,16 @@ function buildResponseFromState2(state) {
1039
1130
  for (const imageData of state.images) {
1040
1131
  content.push({
1041
1132
  type: "image",
1042
- mimeType: "image/png",
1043
- source: { type: "base64", data: imageData }
1133
+ mimeType: imageData.mimeType,
1134
+ source: { type: "base64", data: imageData.data }
1044
1135
  });
1045
1136
  }
1046
1137
  const toolCalls = [];
1047
1138
  const functionCallItems = [];
1048
- for (const [, toolCall] of state.toolCalls) {
1139
+ const orderedToolEntries = [...state.toolCalls.entries()].sort(
1140
+ ([leftIndex], [rightIndex]) => leftIndex - rightIndex
1141
+ );
1142
+ for (const [, toolCall] of orderedToolEntries) {
1049
1143
  let args = {};
1050
1144
  if (toolCall.arguments) {
1051
1145
  try {
@@ -1056,6 +1150,9 @@ function buildResponseFromState2(state) {
1056
1150
  const itemId = toolCall.itemId ?? "";
1057
1151
  const callId = toolCall.callId ?? toolCall.itemId ?? "";
1058
1152
  const name = toolCall.name ?? "";
1153
+ if (!name || !callId) {
1154
+ continue;
1155
+ }
1059
1156
  toolCalls.push({
1060
1157
  toolCallId: callId,
1061
1158
  toolName: name,
@@ -1070,16 +1167,17 @@ function buildResponseFromState2(state) {
1070
1167
  });
1071
1168
  }
1072
1169
  }
1170
+ const responseId = state.id || generateId();
1073
1171
  const message = new AssistantMessage(
1074
1172
  content,
1075
1173
  toolCalls.length > 0 ? toolCalls : void 0,
1076
1174
  {
1077
- id: state.id,
1175
+ id: responseId,
1078
1176
  metadata: {
1079
1177
  openai: {
1080
1178
  model: state.model,
1081
1179
  status: state.status,
1082
- response_id: state.id,
1180
+ response_id: responseId,
1083
1181
  functionCallItems: functionCallItems.length > 0 ? functionCallItems : void 0
1084
1182
  }
1085
1183
  }
@@ -1095,6 +1193,8 @@ function buildResponseFromState2(state) {
1095
1193
  let stopReason = "end_turn";
1096
1194
  if (state.status === "completed") {
1097
1195
  stopReason = toolCalls.length > 0 ? "tool_use" : "end_turn";
1196
+ } else if (state.status === "incomplete") {
1197
+ stopReason = state.incompleteReason === "max_output_tokens" ? "max_tokens" : "end_turn";
1098
1198
  } else if (state.status === "failed") {
1099
1199
  stopReason = "error";
1100
1200
  }
@@ -1172,7 +1272,7 @@ function createResponsesLLMHandler() {
1172
1272
  "openai",
1173
1273
  "llm"
1174
1274
  );
1175
- const data = await response.json();
1275
+ const data = await parseJsonResponse(response, "openai", "llm");
1176
1276
  if (data.status === "failed" && data.error) {
1177
1277
  throw new UPPError(
1178
1278
  data.error.message,
@@ -1204,7 +1304,8 @@ function createResponsesLLMHandler() {
1204
1304
  body.stream = true;
1205
1305
  const headers = {
1206
1306
  "Content-Type": "application/json",
1207
- Authorization: `Bearer ${apiKey}`
1307
+ Authorization: `Bearer ${apiKey}`,
1308
+ Accept: "text/event-stream"
1208
1309
  };
1209
1310
  if (request.config.headers) {
1210
1311
  for (const [key, value] of Object.entries(request.config.headers)) {
@@ -1265,8 +1366,9 @@ function createResponsesLLMHandler() {
1265
1366
  }
1266
1367
  responseResolve(buildResponseFromState2(state));
1267
1368
  } catch (error) {
1268
- responseReject(error);
1269
- throw error;
1369
+ const err = toError(error);
1370
+ responseReject(err);
1371
+ throw err;
1270
1372
  }
1271
1373
  }
1272
1374
  return {
@@ -1340,18 +1442,10 @@ function createEmbeddingHandler() {
1340
1442
  );
1341
1443
  });
1342
1444
  const body = {
1445
+ ...request.params,
1343
1446
  model: modelId,
1344
1447
  input: inputTexts
1345
1448
  };
1346
- if (request.params?.dimensions !== void 0) {
1347
- body.dimensions = request.params.dimensions;
1348
- }
1349
- if (request.params?.encoding_format !== void 0) {
1350
- body.encoding_format = request.params.encoding_format;
1351
- }
1352
- if (request.params?.user !== void 0) {
1353
- body.user = request.params.user;
1354
- }
1355
1449
  const headers = {
1356
1450
  "Content-Type": "application/json",
1357
1451
  Authorization: `Bearer ${apiKey}`
@@ -1369,7 +1463,7 @@ function createEmbeddingHandler() {
1369
1463
  body: JSON.stringify(body),
1370
1464
  signal: request.signal
1371
1465
  }, request.config, "openai", "embedding");
1372
- const data = await response.json();
1466
+ const data = await parseJsonResponse(response, "openai", "embedding");
1373
1467
  return {
1374
1468
  embeddings: data.data.map((d) => ({
1375
1469
  vector: d.embedding,
@@ -1444,22 +1538,10 @@ async function executeGenerate(modelId, request) {
1444
1538
  );
1445
1539
  const baseUrl = request.config.baseUrl ? `${request.config.baseUrl.replace(/\/$/, "")}/v1/images/generations` : OPENAI_IMAGES_API_URL;
1446
1540
  const body = {
1541
+ ...request.params,
1447
1542
  model: modelId,
1448
1543
  prompt: request.prompt
1449
1544
  };
1450
- if (request.params) {
1451
- const { n, size, quality, style, background, output_format, output_compression, response_format, moderation, user } = request.params;
1452
- if (n !== void 0) body.n = n;
1453
- if (size !== void 0) body.size = size;
1454
- if (quality !== void 0) body.quality = quality;
1455
- if (style !== void 0) body.style = style;
1456
- if (background !== void 0) body.background = background;
1457
- if (output_format !== void 0) body.output_format = output_format;
1458
- if (output_compression !== void 0) body.output_compression = output_compression;
1459
- if (response_format !== void 0) body.response_format = response_format;
1460
- if (moderation !== void 0) body.moderation = moderation;
1461
- if (user !== void 0) body.user = user;
1462
- }
1463
1545
  const headers = {
1464
1546
  "Content-Type": "application/json",
1465
1547
  Authorization: `Bearer ${apiKey}`
@@ -1477,7 +1559,7 @@ async function executeGenerate(modelId, request) {
1477
1559
  body: JSON.stringify(body),
1478
1560
  signal: request.signal
1479
1561
  }, request.config, "openai", "image");
1480
- const data = await response.json();
1562
+ const data = await parseJsonResponse(response, "openai", "image");
1481
1563
  return transformResponse3(data);
1482
1564
  }
1483
1565
  async function executeEdit(modelId, request) {
@@ -1522,7 +1604,7 @@ async function executeEdit(modelId, request) {
1522
1604
  body: formData,
1523
1605
  signal: request.signal
1524
1606
  }, request.config, "openai", "image");
1525
- const data = await response.json();
1607
+ const data = await parseJsonResponse(response, "openai", "image");
1526
1608
  return transformResponse3(data);
1527
1609
  }
1528
1610
  function executeStream(modelId, request) {
@@ -1543,21 +1625,11 @@ function executeStream(modelId, request) {
1543
1625
  );
1544
1626
  const baseUrl = request.config.baseUrl ? `${request.config.baseUrl.replace(/\/$/, "")}/v1/images/generations` : OPENAI_IMAGES_API_URL;
1545
1627
  const body = {
1628
+ ...request.params,
1546
1629
  model: modelId,
1547
1630
  prompt: request.prompt,
1548
1631
  stream: true
1549
1632
  };
1550
- if (request.params) {
1551
- const { n, size, quality, background, output_format, partial_images, moderation, user } = request.params;
1552
- if (n !== void 0) body.n = n;
1553
- if (size !== void 0) body.size = size;
1554
- if (quality !== void 0) body.quality = quality;
1555
- if (background !== void 0) body.background = background;
1556
- if (output_format !== void 0) body.output_format = output_format;
1557
- if (partial_images !== void 0) body.partial_images = partial_images;
1558
- if (moderation !== void 0) body.moderation = moderation;
1559
- if (user !== void 0) body.user = user;
1560
- }
1561
1633
  const headers = {
1562
1634
  "Content-Type": "application/json",
1563
1635
  Authorization: `Bearer ${apiKey}`,
@@ -1631,6 +1703,46 @@ function executeStream(modelId, request) {
1631
1703
  }
1632
1704
  }
1633
1705
  }
1706
+ const remaining = decoder.decode();
1707
+ if (remaining) {
1708
+ buffer += remaining;
1709
+ const lines = buffer.split("\n");
1710
+ buffer = lines.pop() ?? "";
1711
+ for (const line of lines) {
1712
+ if (line.startsWith("data: ")) {
1713
+ const data = line.slice(6);
1714
+ if (data === "[DONE]") {
1715
+ continue;
1716
+ }
1717
+ try {
1718
+ const chunk = JSON.parse(data);
1719
+ if (chunk.type === "image_generation.partial_image" && chunk.data?.b64_json) {
1720
+ const previewImage = Image.fromBase64(chunk.data.b64_json, "image/png");
1721
+ yield {
1722
+ type: "preview",
1723
+ image: previewImage,
1724
+ index: chunk.index ?? 0
1725
+ };
1726
+ } else if (chunk.type === "image_generation.completed" && chunk.data) {
1727
+ const image = chunk.data.b64_json ? Image.fromBase64(chunk.data.b64_json, "image/png") : Image.fromUrl(chunk.data.url ?? "", "image/png");
1728
+ const genImage = {
1729
+ image,
1730
+ metadata: chunk.data.revised_prompt ? { revised_prompt: chunk.data.revised_prompt } : void 0
1731
+ };
1732
+ generatedImages.push(genImage);
1733
+ yield {
1734
+ type: "complete",
1735
+ image: genImage,
1736
+ index: chunk.index ?? generatedImages.length - 1
1737
+ };
1738
+ } else if (chunk.type === "response.done") {
1739
+ responseMetadata = chunk.data;
1740
+ }
1741
+ } catch {
1742
+ }
1743
+ }
1744
+ }
1745
+ }
1634
1746
  resolveResponse({
1635
1747
  images: generatedImages,
1636
1748
  metadata: responseMetadata,
@@ -1639,8 +1751,9 @@ function executeStream(modelId, request) {
1639
1751
  }
1640
1752
  });
1641
1753
  } catch (error) {
1642
- rejectResponse(error);
1643
- throw error;
1754
+ const err = toError(error);
1755
+ rejectResponse(err);
1756
+ throw err;
1644
1757
  }
1645
1758
  }
1646
1759
  const generator = generateStream();
@@ -1749,49 +1862,22 @@ var tools = {
1749
1862
  };
1750
1863
 
1751
1864
  // src/providers/openai/index.ts
1752
- function createOpenAIProvider() {
1753
- let currentApiMode = "responses";
1754
- const responsesHandler = createResponsesLLMHandler();
1755
- const completionsHandler = createCompletionsLLMHandler();
1756
- const embeddingHandler = createEmbeddingHandler();
1757
- const imageHandler = createImageHandler();
1758
- const fn = function(modelId, options) {
1759
- const apiMode = options?.api ?? "responses";
1760
- currentApiMode = apiMode;
1761
- return { modelId, provider };
1762
- };
1763
- const modalities = {
1764
- get llm() {
1765
- return currentApiMode === "completions" ? completionsHandler : responsesHandler;
1766
- },
1767
- embedding: embeddingHandler,
1768
- image: imageHandler
1769
- };
1770
- Object.defineProperties(fn, {
1771
- name: {
1772
- value: "openai",
1773
- writable: false,
1774
- configurable: true
1775
- },
1776
- version: {
1777
- value: "1.0.0",
1778
- writable: false,
1779
- configurable: true
1865
+ var openai = createProvider({
1866
+ name: "openai",
1867
+ version: "1.0.0",
1868
+ handlers: {
1869
+ llm: {
1870
+ handlers: {
1871
+ responses: createResponsesLLMHandler(),
1872
+ completions: createCompletionsLLMHandler()
1873
+ },
1874
+ defaultMode: "responses",
1875
+ getMode: (options) => options?.api ?? "responses"
1780
1876
  },
1781
- modalities: {
1782
- value: modalities,
1783
- writable: false,
1784
- configurable: true
1785
- }
1786
- });
1787
- const provider = fn;
1788
- responsesHandler._setProvider?.(provider);
1789
- completionsHandler._setProvider?.(provider);
1790
- embeddingHandler._setProvider?.(provider);
1791
- imageHandler._setProvider?.(provider);
1792
- return provider;
1793
- }
1794
- var openai = createOpenAIProvider();
1877
+ embedding: createEmbeddingHandler(),
1878
+ image: createImageHandler()
1879
+ }
1880
+ });
1795
1881
  export {
1796
1882
  codeInterpreterTool,
1797
1883
  computerTool,