@providerprotocol/ai 0.0.6 → 0.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,7 +3,7 @@ import {
3
3
  isAssistantMessage,
4
4
  isToolResultMessage,
5
5
  isUserMessage
6
- } from "../chunk-QUUX4G7U.js";
6
+ } from "../chunk-W4BB4BG2.js";
7
7
  import {
8
8
  parseSSEStream
9
9
  } from "../chunk-X5G4EHL7.js";
@@ -549,13 +549,17 @@ function createCompletionsLLMHandler() {
549
549
  // src/providers/openai/transform.responses.ts
550
550
  function transformRequest2(request, modelId) {
551
551
  const params = request.params ?? {};
552
+ const builtInTools = params.tools;
553
+ const { tools: _paramsTools, ...restParams } = params;
552
554
  const openaiRequest = {
553
- ...params,
555
+ ...restParams,
554
556
  model: modelId,
555
557
  input: transformInputItems(request.messages, request.system)
556
558
  };
557
- if (request.tools && request.tools.length > 0) {
558
- openaiRequest.tools = request.tools.map(transformTool2);
559
+ const functionTools = request.tools?.map(transformTool2) ?? [];
560
+ const allTools = [...functionTools, ...builtInTools ?? []];
561
+ if (allTools.length > 0) {
562
+ openaiRequest.tools = allTools;
559
563
  }
560
564
  if (request.structure) {
561
565
  const schema = {
@@ -718,7 +722,7 @@ function transformTool2(tool) {
718
722
  };
719
723
  }
720
724
  function transformResponse2(data) {
721
- const textContent = [];
725
+ const content = [];
722
726
  const toolCalls = [];
723
727
  const functionCallItems = [];
724
728
  let hadRefusal = false;
@@ -726,17 +730,17 @@ function transformResponse2(data) {
726
730
  for (const item of data.output) {
727
731
  if (item.type === "message") {
728
732
  const messageItem = item;
729
- for (const content of messageItem.content) {
730
- if (content.type === "output_text") {
731
- textContent.push({ type: "text", text: content.text });
733
+ for (const part of messageItem.content) {
734
+ if (part.type === "output_text") {
735
+ content.push({ type: "text", text: part.text });
732
736
  if (structuredData === void 0) {
733
737
  try {
734
- structuredData = JSON.parse(content.text);
738
+ structuredData = JSON.parse(part.text);
735
739
  } catch {
736
740
  }
737
741
  }
738
- } else if (content.type === "refusal") {
739
- textContent.push({ type: "text", text: content.refusal });
742
+ } else if (part.type === "refusal") {
743
+ content.push({ type: "text", text: part.refusal });
740
744
  hadRefusal = true;
741
745
  }
742
746
  }
@@ -758,10 +762,19 @@ function transformResponse2(data) {
758
762
  name: functionCall.name,
759
763
  arguments: functionCall.arguments
760
764
  });
765
+ } else if (item.type === "image_generation_call") {
766
+ const imageGen = item;
767
+ if (imageGen.result) {
768
+ content.push({
769
+ type: "image",
770
+ mimeType: "image/png",
771
+ source: { type: "base64", data: imageGen.result }
772
+ });
773
+ }
761
774
  }
762
775
  }
763
776
  const message = new AssistantMessage(
764
- textContent,
777
+ content,
765
778
  toolCalls.length > 0 ? toolCalls : void 0,
766
779
  {
767
780
  id: data.id,
@@ -769,7 +782,6 @@ function transformResponse2(data) {
769
782
  openai: {
770
783
  model: data.model,
771
784
  status: data.status,
772
- // Store response_id for multi-turn tool calling
773
785
  response_id: data.id,
774
786
  functionCallItems: functionCallItems.length > 0 ? functionCallItems : void 0
775
787
  }
@@ -1184,6 +1196,67 @@ function createResponsesLLMHandler() {
1184
1196
  };
1185
1197
  }
1186
1198
 
1199
+ // src/providers/openai/types.ts
1200
+ function webSearchTool(options) {
1201
+ if (options) {
1202
+ return {
1203
+ type: "web_search",
1204
+ ...options
1205
+ };
1206
+ }
1207
+ return { type: "web_search" };
1208
+ }
1209
+ function fileSearchTool(options) {
1210
+ return {
1211
+ type: "file_search",
1212
+ file_search: options
1213
+ };
1214
+ }
1215
+ function codeInterpreterTool(options) {
1216
+ return {
1217
+ type: "code_interpreter",
1218
+ ...options?.container && { code_interpreter: { container: options.container } }
1219
+ };
1220
+ }
1221
+ function computerTool(options) {
1222
+ return {
1223
+ type: "computer",
1224
+ computer: options
1225
+ };
1226
+ }
1227
+ function imageGenerationTool(options) {
1228
+ if (options) {
1229
+ return {
1230
+ type: "image_generation",
1231
+ ...options
1232
+ };
1233
+ }
1234
+ return { type: "image_generation" };
1235
+ }
1236
+ function mcpTool(options) {
1237
+ const { url, name, allowed_tools, headers, require_approval } = options;
1238
+ return {
1239
+ type: "mcp",
1240
+ mcp: {
1241
+ server: {
1242
+ url,
1243
+ name,
1244
+ ...allowed_tools && { tool_configuration: { allowed_tools } },
1245
+ headers,
1246
+ require_approval
1247
+ }
1248
+ }
1249
+ };
1250
+ }
1251
+ var tools = {
1252
+ webSearch: webSearchTool,
1253
+ fileSearch: fileSearchTool,
1254
+ codeInterpreter: codeInterpreterTool,
1255
+ computer: computerTool,
1256
+ imageGeneration: imageGenerationTool,
1257
+ mcp: mcpTool
1258
+ };
1259
+
1187
1260
  // src/providers/openai/index.ts
1188
1261
  function createOpenAIProvider() {
1189
1262
  let currentApiMode = "responses";
@@ -1223,6 +1296,13 @@ function createOpenAIProvider() {
1223
1296
  }
1224
1297
  var openai = createOpenAIProvider();
1225
1298
  export {
1226
- openai
1299
+ codeInterpreterTool,
1300
+ computerTool,
1301
+ fileSearchTool,
1302
+ imageGenerationTool,
1303
+ mcpTool,
1304
+ openai,
1305
+ tools,
1306
+ webSearchTool
1227
1307
  };
1228
1308
  //# sourceMappingURL=index.js.map