@providerprotocol/ai 0.0.21 → 0.0.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/dist/anthropic/index.d.ts +1 -1
  2. package/dist/anthropic/index.js +100 -29
  3. package/dist/anthropic/index.js.map +1 -1
  4. package/dist/{chunk-Y3GBJNA2.js → chunk-7WYBJPJJ.js} +2 -2
  5. package/dist/chunk-I2VHCGQE.js +49 -0
  6. package/dist/chunk-I2VHCGQE.js.map +1 -0
  7. package/dist/{chunk-SKY2JLA7.js → chunk-MKDLXV4O.js} +1 -1
  8. package/dist/chunk-MKDLXV4O.js.map +1 -0
  9. package/dist/{chunk-Z7RBRCRN.js → chunk-NWS5IKNR.js} +37 -11
  10. package/dist/chunk-NWS5IKNR.js.map +1 -0
  11. package/dist/{chunk-EDENPF3E.js → chunk-RFWLEFAB.js} +96 -42
  12. package/dist/chunk-RFWLEFAB.js.map +1 -0
  13. package/dist/{chunk-Z4ILICF5.js → chunk-RS7C25LS.js} +35 -10
  14. package/dist/chunk-RS7C25LS.js.map +1 -0
  15. package/dist/google/index.d.ts +20 -6
  16. package/dist/google/index.js +261 -65
  17. package/dist/google/index.js.map +1 -1
  18. package/dist/http/index.d.ts +3 -3
  19. package/dist/http/index.js +4 -4
  20. package/dist/index.d.ts +7 -5
  21. package/dist/index.js +286 -119
  22. package/dist/index.js.map +1 -1
  23. package/dist/ollama/index.d.ts +1 -1
  24. package/dist/ollama/index.js +66 -12
  25. package/dist/ollama/index.js.map +1 -1
  26. package/dist/openai/index.d.ts +1 -1
  27. package/dist/openai/index.js +183 -43
  28. package/dist/openai/index.js.map +1 -1
  29. package/dist/openrouter/index.d.ts +1 -1
  30. package/dist/openrouter/index.js +161 -31
  31. package/dist/openrouter/index.js.map +1 -1
  32. package/dist/{provider-DGQHYE6I.d.ts → provider-DWEAzeM5.d.ts} +11 -1
  33. package/dist/proxy/index.d.ts +2 -2
  34. package/dist/proxy/index.js +171 -12
  35. package/dist/proxy/index.js.map +1 -1
  36. package/dist/{retry-Pcs3hnbu.d.ts → retry-DmPmqZL6.d.ts} +11 -2
  37. package/dist/{stream-Di9acos2.d.ts → stream-DbkLOIbJ.d.ts} +15 -5
  38. package/dist/xai/index.d.ts +1 -1
  39. package/dist/xai/index.js +139 -30
  40. package/dist/xai/index.js.map +1 -1
  41. package/package.json +1 -1
  42. package/dist/chunk-EDENPF3E.js.map +0 -1
  43. package/dist/chunk-SKY2JLA7.js.map +0 -1
  44. package/dist/chunk-Z4ILICF5.js.map +0 -1
  45. package/dist/chunk-Z7RBRCRN.js.map +0 -1
  46. /package/dist/{chunk-Y3GBJNA2.js.map → chunk-7WYBJPJJ.js.map} +0 -0
@@ -1,25 +1,30 @@
1
1
  import {
2
2
  Image
3
3
  } from "../chunk-WAKD3OO5.js";
4
+ import {
5
+ parseJsonResponse
6
+ } from "../chunk-I2VHCGQE.js";
4
7
  import {
5
8
  AssistantMessage,
6
9
  createProvider,
10
+ generateId,
7
11
  isAssistantMessage,
8
12
  isToolResultMessage,
9
13
  isUserMessage
10
14
  } from "../chunk-M4BMM5IB.js";
11
15
  import {
12
16
  parseSSEStream
13
- } from "../chunk-Z7RBRCRN.js";
17
+ } from "../chunk-NWS5IKNR.js";
14
18
  import {
15
19
  resolveApiKey
16
- } from "../chunk-Y3GBJNA2.js";
20
+ } from "../chunk-7WYBJPJJ.js";
17
21
  import {
18
22
  UPPError,
19
23
  doFetch,
20
24
  doStreamFetch,
21
- normalizeHttpError
22
- } from "../chunk-EDENPF3E.js";
25
+ normalizeHttpError,
26
+ toError
27
+ } from "../chunk-RFWLEFAB.js";
23
28
 
24
29
  // src/providers/openai/transform.completions.ts
25
30
  function transformRequest(request, modelId) {
@@ -55,9 +60,40 @@ function transformRequest(request, modelId) {
55
60
  return openaiRequest;
56
61
  }
57
62
  function normalizeSystem(system) {
58
- if (!system) return void 0;
63
+ if (system === void 0 || system === null) return void 0;
59
64
  if (typeof system === "string") return system;
60
- return system.map((block) => block.text ?? "").filter((text) => text.length > 0).join("\n\n");
65
+ if (!Array.isArray(system)) {
66
+ throw new UPPError(
67
+ "System prompt must be a string or an array of text blocks",
68
+ "INVALID_REQUEST",
69
+ "openai",
70
+ "llm"
71
+ );
72
+ }
73
+ const texts = [];
74
+ for (const block of system) {
75
+ if (!block || typeof block !== "object" || !("text" in block)) {
76
+ throw new UPPError(
77
+ "System prompt array must contain objects with a text field",
78
+ "INVALID_REQUEST",
79
+ "openai",
80
+ "llm"
81
+ );
82
+ }
83
+ const textValue = block.text;
84
+ if (typeof textValue !== "string") {
85
+ throw new UPPError(
86
+ "System prompt text must be a string",
87
+ "INVALID_REQUEST",
88
+ "openai",
89
+ "llm"
90
+ );
91
+ }
92
+ if (textValue.length > 0) {
93
+ texts.push(textValue);
94
+ }
95
+ }
96
+ return texts.length > 0 ? texts.join("\n\n") : void 0;
61
97
  }
62
98
  function transformMessages(messages, system) {
63
99
  const result = [];
@@ -225,7 +261,7 @@ function transformResponse(data) {
225
261
  textContent,
226
262
  toolCalls.length > 0 ? toolCalls : void 0,
227
263
  {
228
- id: data.id,
264
+ id: data.id || generateId(),
229
265
  metadata: {
230
266
  openai: {
231
267
  model: data.model,
@@ -374,11 +410,12 @@ function buildResponseFromState(state) {
374
410
  arguments: args
375
411
  });
376
412
  }
413
+ const messageId = state.id || generateId();
377
414
  const message = new AssistantMessage(
378
415
  textContent,
379
416
  toolCalls.length > 0 ? toolCalls : void 0,
380
417
  {
381
- id: state.id,
418
+ id: messageId,
382
419
  metadata: {
383
420
  openai: {
384
421
  model: state.model,
@@ -483,7 +520,7 @@ function createCompletionsLLMHandler() {
483
520
  "openai",
484
521
  "llm"
485
522
  );
486
- const data = await response.json();
523
+ const data = await parseJsonResponse(response, "openai", "llm");
487
524
  return transformResponse(data);
488
525
  },
489
526
  stream(request) {
@@ -508,7 +545,8 @@ function createCompletionsLLMHandler() {
508
545
  body.stream_options = { include_usage: true };
509
546
  const headers = {
510
547
  "Content-Type": "application/json",
511
- Authorization: `Bearer ${apiKey}`
548
+ Authorization: `Bearer ${apiKey}`,
549
+ Accept: "text/event-stream"
512
550
  };
513
551
  if (request.config.headers) {
514
552
  for (const [key, value] of Object.entries(request.config.headers)) {
@@ -569,8 +607,9 @@ function createCompletionsLLMHandler() {
569
607
  }
570
608
  responseResolve(buildResponseFromState(state));
571
609
  } catch (error) {
572
- responseReject(error);
573
- throw error;
610
+ const err = toError(error);
611
+ responseReject(err);
612
+ throw err;
574
613
  }
575
614
  }
576
615
  return {
@@ -626,9 +665,40 @@ function transformRequest2(request, modelId) {
626
665
  return openaiRequest;
627
666
  }
628
667
  function normalizeSystem2(system) {
629
- if (!system) return void 0;
668
+ if (system === void 0 || system === null) return void 0;
630
669
  if (typeof system === "string") return system;
631
- return system.map((block) => block.text ?? "").filter((text) => text.length > 0).join("\n\n");
670
+ if (!Array.isArray(system)) {
671
+ throw new UPPError(
672
+ "System prompt must be a string or an array of text blocks",
673
+ "INVALID_REQUEST",
674
+ "openai",
675
+ "llm"
676
+ );
677
+ }
678
+ const texts = [];
679
+ for (const block of system) {
680
+ if (!block || typeof block !== "object" || !("text" in block)) {
681
+ throw new UPPError(
682
+ "System prompt array must contain objects with a text field",
683
+ "INVALID_REQUEST",
684
+ "openai",
685
+ "llm"
686
+ );
687
+ }
688
+ const textValue = block.text;
689
+ if (typeof textValue !== "string") {
690
+ throw new UPPError(
691
+ "System prompt text must be a string",
692
+ "INVALID_REQUEST",
693
+ "openai",
694
+ "llm"
695
+ );
696
+ }
697
+ if (textValue.length > 0) {
698
+ texts.push(textValue);
699
+ }
700
+ }
701
+ return texts.length > 0 ? texts.join("\n\n") : void 0;
632
702
  }
633
703
  function transformInputItems(messages, system) {
634
704
  const result = [];
@@ -819,24 +889,26 @@ function transformResponse2(data) {
819
889
  } else if (item.type === "image_generation_call") {
820
890
  const imageGen = item;
821
891
  if (imageGen.result) {
892
+ const mimeType = imageGen.mime_type ?? "image/png";
822
893
  content.push({
823
894
  type: "image",
824
- mimeType: "image/png",
895
+ mimeType,
825
896
  source: { type: "base64", data: imageGen.result }
826
897
  });
827
898
  }
828
899
  }
829
900
  }
901
+ const responseId = data.id || generateId();
830
902
  const message = new AssistantMessage(
831
903
  content,
832
904
  toolCalls.length > 0 ? toolCalls : void 0,
833
905
  {
834
- id: data.id,
906
+ id: responseId,
835
907
  metadata: {
836
908
  openai: {
837
909
  model: data.model,
838
910
  status: data.status,
839
- response_id: data.id,
911
+ response_id: responseId,
840
912
  functionCallItems: functionCallItems.length > 0 ? functionCallItems : void 0
841
913
  }
842
914
  }
@@ -875,6 +947,7 @@ function createStreamState2() {
875
947
  toolCalls: /* @__PURE__ */ new Map(),
876
948
  images: [],
877
949
  status: "in_progress",
950
+ incompleteReason: void 0,
878
951
  inputTokens: 0,
879
952
  outputTokens: 0,
880
953
  cacheReadTokens: 0,
@@ -883,26 +956,35 @@ function createStreamState2() {
883
956
  }
884
957
  function transformStreamEvent2(event, state) {
885
958
  const events = [];
959
+ const updateFromResponse = (response) => {
960
+ state.id = response.id || state.id;
961
+ state.model = response.model || state.model;
962
+ state.status = response.status;
963
+ if (response.incomplete_details?.reason) {
964
+ state.incompleteReason = response.incomplete_details.reason;
965
+ } else if (response.status !== "incomplete") {
966
+ state.incompleteReason = void 0;
967
+ }
968
+ if (response.usage) {
969
+ state.inputTokens = response.usage.input_tokens;
970
+ state.outputTokens = response.usage.output_tokens;
971
+ state.cacheReadTokens = response.usage.input_tokens_details?.cached_tokens ?? 0;
972
+ }
973
+ };
886
974
  switch (event.type) {
887
975
  case "response.created":
888
- state.id = event.response.id;
889
- state.model = event.response.model;
976
+ updateFromResponse(event.response);
890
977
  events.push({ type: "message_start", index: 0, delta: {} });
891
978
  break;
892
979
  case "response.in_progress":
893
- state.status = "in_progress";
980
+ updateFromResponse(event.response);
894
981
  break;
895
982
  case "response.completed":
896
- state.status = "completed";
897
- if (event.response.usage) {
898
- state.inputTokens = event.response.usage.input_tokens;
899
- state.outputTokens = event.response.usage.output_tokens;
900
- state.cacheReadTokens = event.response.usage.input_tokens_details?.cached_tokens ?? 0;
901
- }
983
+ updateFromResponse(event.response);
902
984
  events.push({ type: "message_stop", index: 0, delta: {} });
903
985
  break;
904
986
  case "response.failed":
905
- state.status = "failed";
987
+ updateFromResponse(event.response);
906
988
  events.push({ type: "message_stop", index: 0, delta: {} });
907
989
  break;
908
990
  case "response.output_item.added":
@@ -941,7 +1023,10 @@ function transformStreamEvent2(event, state) {
941
1023
  } else if (event.item.type === "image_generation_call") {
942
1024
  const imageGen = event.item;
943
1025
  if (imageGen.result) {
944
- state.images.push(imageGen.result);
1026
+ state.images.push({
1027
+ data: imageGen.result,
1028
+ mimeType: imageGen.mime_type ?? "image/png"
1029
+ });
945
1030
  }
946
1031
  }
947
1032
  events.push({
@@ -1028,7 +1113,10 @@ function transformStreamEvent2(event, state) {
1028
1113
  function buildResponseFromState2(state) {
1029
1114
  const content = [];
1030
1115
  let structuredData;
1031
- for (const [, text] of state.textByIndex) {
1116
+ const orderedTextEntries = [...state.textByIndex.entries()].sort(
1117
+ ([leftIndex], [rightIndex]) => leftIndex - rightIndex
1118
+ );
1119
+ for (const [, text] of orderedTextEntries) {
1032
1120
  if (text) {
1033
1121
  content.push({ type: "text", text });
1034
1122
  if (structuredData === void 0) {
@@ -1042,13 +1130,16 @@ function buildResponseFromState2(state) {
1042
1130
  for (const imageData of state.images) {
1043
1131
  content.push({
1044
1132
  type: "image",
1045
- mimeType: "image/png",
1046
- source: { type: "base64", data: imageData }
1133
+ mimeType: imageData.mimeType,
1134
+ source: { type: "base64", data: imageData.data }
1047
1135
  });
1048
1136
  }
1049
1137
  const toolCalls = [];
1050
1138
  const functionCallItems = [];
1051
- for (const [, toolCall] of state.toolCalls) {
1139
+ const orderedToolEntries = [...state.toolCalls.entries()].sort(
1140
+ ([leftIndex], [rightIndex]) => leftIndex - rightIndex
1141
+ );
1142
+ for (const [, toolCall] of orderedToolEntries) {
1052
1143
  let args = {};
1053
1144
  if (toolCall.arguments) {
1054
1145
  try {
@@ -1059,6 +1150,9 @@ function buildResponseFromState2(state) {
1059
1150
  const itemId = toolCall.itemId ?? "";
1060
1151
  const callId = toolCall.callId ?? toolCall.itemId ?? "";
1061
1152
  const name = toolCall.name ?? "";
1153
+ if (!name || !callId) {
1154
+ continue;
1155
+ }
1062
1156
  toolCalls.push({
1063
1157
  toolCallId: callId,
1064
1158
  toolName: name,
@@ -1073,16 +1167,17 @@ function buildResponseFromState2(state) {
1073
1167
  });
1074
1168
  }
1075
1169
  }
1170
+ const responseId = state.id || generateId();
1076
1171
  const message = new AssistantMessage(
1077
1172
  content,
1078
1173
  toolCalls.length > 0 ? toolCalls : void 0,
1079
1174
  {
1080
- id: state.id,
1175
+ id: responseId,
1081
1176
  metadata: {
1082
1177
  openai: {
1083
1178
  model: state.model,
1084
1179
  status: state.status,
1085
- response_id: state.id,
1180
+ response_id: responseId,
1086
1181
  functionCallItems: functionCallItems.length > 0 ? functionCallItems : void 0
1087
1182
  }
1088
1183
  }
@@ -1098,6 +1193,8 @@ function buildResponseFromState2(state) {
1098
1193
  let stopReason = "end_turn";
1099
1194
  if (state.status === "completed") {
1100
1195
  stopReason = toolCalls.length > 0 ? "tool_use" : "end_turn";
1196
+ } else if (state.status === "incomplete") {
1197
+ stopReason = state.incompleteReason === "max_output_tokens" ? "max_tokens" : "end_turn";
1101
1198
  } else if (state.status === "failed") {
1102
1199
  stopReason = "error";
1103
1200
  }
@@ -1175,7 +1272,7 @@ function createResponsesLLMHandler() {
1175
1272
  "openai",
1176
1273
  "llm"
1177
1274
  );
1178
- const data = await response.json();
1275
+ const data = await parseJsonResponse(response, "openai", "llm");
1179
1276
  if (data.status === "failed" && data.error) {
1180
1277
  throw new UPPError(
1181
1278
  data.error.message,
@@ -1207,7 +1304,8 @@ function createResponsesLLMHandler() {
1207
1304
  body.stream = true;
1208
1305
  const headers = {
1209
1306
  "Content-Type": "application/json",
1210
- Authorization: `Bearer ${apiKey}`
1307
+ Authorization: `Bearer ${apiKey}`,
1308
+ Accept: "text/event-stream"
1211
1309
  };
1212
1310
  if (request.config.headers) {
1213
1311
  for (const [key, value] of Object.entries(request.config.headers)) {
@@ -1268,8 +1366,9 @@ function createResponsesLLMHandler() {
1268
1366
  }
1269
1367
  responseResolve(buildResponseFromState2(state));
1270
1368
  } catch (error) {
1271
- responseReject(error);
1272
- throw error;
1369
+ const err = toError(error);
1370
+ responseReject(err);
1371
+ throw err;
1273
1372
  }
1274
1373
  }
1275
1374
  return {
@@ -1364,7 +1463,7 @@ function createEmbeddingHandler() {
1364
1463
  body: JSON.stringify(body),
1365
1464
  signal: request.signal
1366
1465
  }, request.config, "openai", "embedding");
1367
- const data = await response.json();
1466
+ const data = await parseJsonResponse(response, "openai", "embedding");
1368
1467
  return {
1369
1468
  embeddings: data.data.map((d) => ({
1370
1469
  vector: d.embedding,
@@ -1460,7 +1559,7 @@ async function executeGenerate(modelId, request) {
1460
1559
  body: JSON.stringify(body),
1461
1560
  signal: request.signal
1462
1561
  }, request.config, "openai", "image");
1463
- const data = await response.json();
1562
+ const data = await parseJsonResponse(response, "openai", "image");
1464
1563
  return transformResponse3(data);
1465
1564
  }
1466
1565
  async function executeEdit(modelId, request) {
@@ -1505,7 +1604,7 @@ async function executeEdit(modelId, request) {
1505
1604
  body: formData,
1506
1605
  signal: request.signal
1507
1606
  }, request.config, "openai", "image");
1508
- const data = await response.json();
1607
+ const data = await parseJsonResponse(response, "openai", "image");
1509
1608
  return transformResponse3(data);
1510
1609
  }
1511
1610
  function executeStream(modelId, request) {
@@ -1604,6 +1703,46 @@ function executeStream(modelId, request) {
1604
1703
  }
1605
1704
  }
1606
1705
  }
1706
+ const remaining = decoder.decode();
1707
+ if (remaining) {
1708
+ buffer += remaining;
1709
+ const lines = buffer.split("\n");
1710
+ buffer = lines.pop() ?? "";
1711
+ for (const line of lines) {
1712
+ if (line.startsWith("data: ")) {
1713
+ const data = line.slice(6);
1714
+ if (data === "[DONE]") {
1715
+ continue;
1716
+ }
1717
+ try {
1718
+ const chunk = JSON.parse(data);
1719
+ if (chunk.type === "image_generation.partial_image" && chunk.data?.b64_json) {
1720
+ const previewImage = Image.fromBase64(chunk.data.b64_json, "image/png");
1721
+ yield {
1722
+ type: "preview",
1723
+ image: previewImage,
1724
+ index: chunk.index ?? 0
1725
+ };
1726
+ } else if (chunk.type === "image_generation.completed" && chunk.data) {
1727
+ const image = chunk.data.b64_json ? Image.fromBase64(chunk.data.b64_json, "image/png") : Image.fromUrl(chunk.data.url ?? "", "image/png");
1728
+ const genImage = {
1729
+ image,
1730
+ metadata: chunk.data.revised_prompt ? { revised_prompt: chunk.data.revised_prompt } : void 0
1731
+ };
1732
+ generatedImages.push(genImage);
1733
+ yield {
1734
+ type: "complete",
1735
+ image: genImage,
1736
+ index: chunk.index ?? generatedImages.length - 1
1737
+ };
1738
+ } else if (chunk.type === "response.done") {
1739
+ responseMetadata = chunk.data;
1740
+ }
1741
+ } catch {
1742
+ }
1743
+ }
1744
+ }
1745
+ }
1607
1746
  resolveResponse({
1608
1747
  images: generatedImages,
1609
1748
  metadata: responseMetadata,
@@ -1612,8 +1751,9 @@ function executeStream(modelId, request) {
1612
1751
  }
1613
1752
  });
1614
1753
  } catch (error) {
1615
- rejectResponse(error);
1616
- throw error;
1754
+ const err = toError(error);
1755
+ rejectResponse(err);
1756
+ throw err;
1617
1757
  }
1618
1758
  }
1619
1759
  const generator = generateStream();