@providerprotocol/ai 0.0.7 → 0.0.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,7 +3,7 @@ import {
3
3
  isAssistantMessage,
4
4
  isToolResultMessage,
5
5
  isUserMessage
6
- } from "../chunk-QUUX4G7U.js";
6
+ } from "../chunk-W4BB4BG2.js";
7
7
  import {
8
8
  parseSSEStream
9
9
  } from "../chunk-X5G4EHL7.js";
@@ -549,13 +549,17 @@ function createCompletionsLLMHandler() {
549
549
  // src/providers/openai/transform.responses.ts
550
550
  function transformRequest2(request, modelId) {
551
551
  const params = request.params ?? {};
552
+ const builtInTools = params.tools;
553
+ const { tools: _paramsTools, ...restParams } = params;
552
554
  const openaiRequest = {
553
- ...params,
555
+ ...restParams,
554
556
  model: modelId,
555
557
  input: transformInputItems(request.messages, request.system)
556
558
  };
557
- if (request.tools && request.tools.length > 0) {
558
- openaiRequest.tools = request.tools.map(transformTool2);
559
+ const functionTools = request.tools?.map(transformTool2) ?? [];
560
+ const allTools = [...functionTools, ...builtInTools ?? []];
561
+ if (allTools.length > 0) {
562
+ openaiRequest.tools = allTools;
559
563
  }
560
564
  if (request.structure) {
561
565
  const schema = {
@@ -718,7 +722,7 @@ function transformTool2(tool) {
718
722
  };
719
723
  }
720
724
  function transformResponse2(data) {
721
- const textContent = [];
725
+ const content = [];
722
726
  const toolCalls = [];
723
727
  const functionCallItems = [];
724
728
  let hadRefusal = false;
@@ -726,17 +730,17 @@ function transformResponse2(data) {
726
730
  for (const item of data.output) {
727
731
  if (item.type === "message") {
728
732
  const messageItem = item;
729
- for (const content of messageItem.content) {
730
- if (content.type === "output_text") {
731
- textContent.push({ type: "text", text: content.text });
733
+ for (const part of messageItem.content) {
734
+ if (part.type === "output_text") {
735
+ content.push({ type: "text", text: part.text });
732
736
  if (structuredData === void 0) {
733
737
  try {
734
- structuredData = JSON.parse(content.text);
738
+ structuredData = JSON.parse(part.text);
735
739
  } catch {
736
740
  }
737
741
  }
738
- } else if (content.type === "refusal") {
739
- textContent.push({ type: "text", text: content.refusal });
742
+ } else if (part.type === "refusal") {
743
+ content.push({ type: "text", text: part.refusal });
740
744
  hadRefusal = true;
741
745
  }
742
746
  }
@@ -758,10 +762,19 @@ function transformResponse2(data) {
758
762
  name: functionCall.name,
759
763
  arguments: functionCall.arguments
760
764
  });
765
+ } else if (item.type === "image_generation_call") {
766
+ const imageGen = item;
767
+ if (imageGen.result) {
768
+ content.push({
769
+ type: "image",
770
+ mimeType: "image/png",
771
+ source: { type: "base64", data: imageGen.result }
772
+ });
773
+ }
761
774
  }
762
775
  }
763
776
  const message = new AssistantMessage(
764
- textContent,
777
+ content,
765
778
  toolCalls.length > 0 ? toolCalls : void 0,
766
779
  {
767
780
  id: data.id,
@@ -769,7 +782,6 @@ function transformResponse2(data) {
769
782
  openai: {
770
783
  model: data.model,
771
784
  status: data.status,
772
- // Store response_id for multi-turn tool calling
773
785
  response_id: data.id,
774
786
  functionCallItems: functionCallItems.length > 0 ? functionCallItems : void 0
775
787
  }
@@ -805,6 +817,7 @@ function createStreamState2() {
805
817
  model: "",
806
818
  textByIndex: /* @__PURE__ */ new Map(),
807
819
  toolCalls: /* @__PURE__ */ new Map(),
820
+ images: [],
808
821
  status: "in_progress",
809
822
  inputTokens: 0,
810
823
  outputTokens: 0,
@@ -867,6 +880,11 @@ function transformStreamEvent2(event, state) {
867
880
  existing.arguments = functionCall.arguments;
868
881
  }
869
882
  state.toolCalls.set(event.output_index, existing);
883
+ } else if (event.item.type === "image_generation_call") {
884
+ const imageGen = event.item;
885
+ if (imageGen.result) {
886
+ state.images.push(imageGen.result);
887
+ }
870
888
  }
871
889
  events.push({
872
890
  type: "content_block_stop",
@@ -949,11 +967,11 @@ function transformStreamEvent2(event, state) {
949
967
  return events;
950
968
  }
951
969
  function buildResponseFromState2(state) {
952
- const textContent = [];
970
+ const content = [];
953
971
  let structuredData;
954
972
  for (const [, text] of state.textByIndex) {
955
973
  if (text) {
956
- textContent.push({ type: "text", text });
974
+ content.push({ type: "text", text });
957
975
  if (structuredData === void 0) {
958
976
  try {
959
977
  structuredData = JSON.parse(text);
@@ -962,6 +980,13 @@ function buildResponseFromState2(state) {
962
980
  }
963
981
  }
964
982
  }
983
+ for (const imageData of state.images) {
984
+ content.push({
985
+ type: "image",
986
+ mimeType: "image/png",
987
+ source: { type: "base64", data: imageData }
988
+ });
989
+ }
965
990
  const toolCalls = [];
966
991
  const functionCallItems = [];
967
992
  for (const [, toolCall] of state.toolCalls) {
@@ -990,7 +1015,7 @@ function buildResponseFromState2(state) {
990
1015
  }
991
1016
  }
992
1017
  const message = new AssistantMessage(
993
- textContent,
1018
+ content,
994
1019
  toolCalls.length > 0 ? toolCalls : void 0,
995
1020
  {
996
1021
  id: state.id,
@@ -1184,6 +1209,67 @@ function createResponsesLLMHandler() {
1184
1209
  };
1185
1210
  }
1186
1211
 
1212
+ // src/providers/openai/types.ts
1213
+ function webSearchTool(options) {
1214
+ if (options) {
1215
+ return {
1216
+ type: "web_search",
1217
+ ...options
1218
+ };
1219
+ }
1220
+ return { type: "web_search" };
1221
+ }
1222
+ function fileSearchTool(options) {
1223
+ return {
1224
+ type: "file_search",
1225
+ file_search: options
1226
+ };
1227
+ }
1228
+ function codeInterpreterTool(options) {
1229
+ return {
1230
+ type: "code_interpreter",
1231
+ ...options?.container && { code_interpreter: { container: options.container } }
1232
+ };
1233
+ }
1234
+ function computerTool(options) {
1235
+ return {
1236
+ type: "computer",
1237
+ computer: options
1238
+ };
1239
+ }
1240
+ function imageGenerationTool(options) {
1241
+ if (options) {
1242
+ return {
1243
+ type: "image_generation",
1244
+ ...options
1245
+ };
1246
+ }
1247
+ return { type: "image_generation" };
1248
+ }
1249
+ function mcpTool(options) {
1250
+ const { url, name, allowed_tools, headers, require_approval } = options;
1251
+ return {
1252
+ type: "mcp",
1253
+ mcp: {
1254
+ server: {
1255
+ url,
1256
+ name,
1257
+ ...allowed_tools && { tool_configuration: { allowed_tools } },
1258
+ headers,
1259
+ require_approval
1260
+ }
1261
+ }
1262
+ };
1263
+ }
1264
+ var tools = {
1265
+ webSearch: webSearchTool,
1266
+ fileSearch: fileSearchTool,
1267
+ codeInterpreter: codeInterpreterTool,
1268
+ computer: computerTool,
1269
+ imageGeneration: imageGenerationTool,
1270
+ mcp: mcpTool
1271
+ };
1272
+
1187
1273
  // src/providers/openai/index.ts
1188
1274
  function createOpenAIProvider() {
1189
1275
  let currentApiMode = "responses";
@@ -1223,6 +1309,13 @@ function createOpenAIProvider() {
1223
1309
  }
1224
1310
  var openai = createOpenAIProvider();
1225
1311
  export {
1226
- openai
1312
+ codeInterpreterTool,
1313
+ computerTool,
1314
+ fileSearchTool,
1315
+ imageGenerationTool,
1316
+ mcpTool,
1317
+ openai,
1318
+ tools,
1319
+ webSearchTool
1227
1320
  };
1228
1321
  //# sourceMappingURL=index.js.map