@zenning/openai 3.0.17 → 3.0.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2491,14 +2491,12 @@ async function convertToOpenAIResponsesInput({
2491
2491
  hasShellTool = false,
2492
2492
  hasApplyPatchTool = false,
2493
2493
  compactionInput,
2494
- providerOptions
2494
+ previousResponseId
2495
2495
  }) {
2496
2496
  var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2497
2497
  const input = [];
2498
2498
  const warnings = [];
2499
2499
  const processedApprovalIds = /* @__PURE__ */ new Set();
2500
- const skipItemReferences = (providerOptions == null ? void 0 : providerOptions.skipItemReferencesForApproval) === true;
2501
- console.log("[OpenAI Provider] Skip item references flag:", skipItemReferences, "providerOptions:", providerOptions);
2502
2500
  if (compactionInput && compactionInput.length > 0) {
2503
2501
  input.push(...compactionInput);
2504
2502
  }
@@ -2580,10 +2578,13 @@ async function convertToOpenAIResponsesInput({
2580
2578
  switch (part.type) {
2581
2579
  case "text": {
2582
2580
  const id = (_b = (_a = part.providerOptions) == null ? void 0 : _a[providerOptionsName]) == null ? void 0 : _b.itemId;
2583
- if (store && id != null) {
2581
+ if (store && id != null && !previousResponseId) {
2584
2582
  input.push({ type: "item_reference", id });
2585
2583
  break;
2586
2584
  }
2585
+ if (store && id != null && previousResponseId) {
2586
+ break;
2587
+ }
2587
2588
  input.push({
2588
2589
  role: "assistant",
2589
2590
  content: [{ type: "output_text", text: part.text }],
@@ -2593,29 +2594,17 @@ async function convertToOpenAIResponsesInput({
2593
2594
  }
2594
2595
  case "tool-call": {
2595
2596
  const id = (_g = (_d = (_c = part.providerOptions) == null ? void 0 : _c[providerOptionsName]) == null ? void 0 : _d.itemId) != null ? _g : (_f = (_e = part.providerMetadata) == null ? void 0 : _e[providerOptionsName]) == null ? void 0 : _f.itemId;
2596
- console.log("[OpenAI Provider] Processing tool-call:", {
2597
- toolCallId: part.toolCallId,
2598
- toolName: part.toolName,
2599
- id,
2600
- providerExecuted: part.providerExecuted,
2601
- store,
2602
- skipItemReferences,
2603
- willCreateItemReference: store && id != null && !skipItemReferences
2604
- });
2605
2597
  if (part.providerExecuted) {
2606
- if (store && id != null && !skipItemReferences) {
2607
- console.log("[OpenAI Provider] Creating item_reference (providerExecuted)");
2598
+ if (store && id != null && !previousResponseId) {
2608
2599
  input.push({ type: "item_reference", id });
2609
2600
  }
2610
2601
  break;
2611
2602
  }
2612
- if (store && id != null && !skipItemReferences) {
2613
- console.log("[OpenAI Provider] Creating item_reference (non-providerExecuted)");
2603
+ if (store && id != null && !previousResponseId) {
2614
2604
  input.push({ type: "item_reference", id });
2615
2605
  break;
2616
2606
  }
2617
- if (skipItemReferences) {
2618
- console.log("[OpenAI Provider] Skipping function_call due to approval continuation flag");
2607
+ if (store && id != null && previousResponseId) {
2619
2608
  break;
2620
2609
  }
2621
2610
  const resolvedToolName = toolNameMapping.toProviderToolName(
@@ -2673,9 +2662,10 @@ async function convertToOpenAIResponsesInput({
2673
2662
  if (part.output.type === "execution-denied" || part.output.type === "json" && typeof part.output.value === "object" && part.output.value != null && "type" in part.output.value && part.output.value.type === "execution-denied") {
2674
2663
  break;
2675
2664
  }
2676
- if (store) {
2665
+ if (store && !previousResponseId) {
2677
2666
  const itemId = (_j = (_i = (_h = part.providerMetadata) == null ? void 0 : _h[providerOptionsName]) == null ? void 0 : _i.itemId) != null ? _j : part.toolCallId;
2678
2667
  input.push({ type: "item_reference", id: itemId });
2668
+ } else if (store) {
2679
2669
  } else {
2680
2670
  warnings.push({
2681
2671
  type: "other",
@@ -2685,15 +2675,15 @@ async function convertToOpenAIResponsesInput({
2685
2675
  break;
2686
2676
  }
2687
2677
  case "reasoning": {
2688
- const providerOptions2 = await parseProviderOptions6({
2678
+ const providerOptions = await parseProviderOptions6({
2689
2679
  provider: providerOptionsName,
2690
2680
  providerOptions: part.providerOptions,
2691
2681
  schema: openaiResponsesReasoningProviderOptionsSchema
2692
2682
  });
2693
- const reasoningId = providerOptions2 == null ? void 0 : providerOptions2.itemId;
2683
+ const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2694
2684
  if (reasoningId != null) {
2695
2685
  const reasoningMessage = reasoningMessages[reasoningId];
2696
- if (store) {
2686
+ if (store && !previousResponseId) {
2697
2687
  if (reasoningMessage === void 0) {
2698
2688
  input.push({ type: "item_reference", id: reasoningId });
2699
2689
  reasoningMessages[reasoningId] = {
@@ -2702,6 +2692,14 @@ async function convertToOpenAIResponsesInput({
2702
2692
  summary: []
2703
2693
  };
2704
2694
  }
2695
+ } else if (store) {
2696
+ if (reasoningMessage === void 0) {
2697
+ reasoningMessages[reasoningId] = {
2698
+ type: "reasoning",
2699
+ id: reasoningId,
2700
+ summary: []
2701
+ };
2702
+ }
2705
2703
  } else {
2706
2704
  const summaryParts = [];
2707
2705
  if (part.text.length > 0) {
@@ -2719,14 +2717,14 @@ async function convertToOpenAIResponsesInput({
2719
2717
  reasoningMessages[reasoningId] = {
2720
2718
  type: "reasoning",
2721
2719
  id: reasoningId,
2722
- encrypted_content: providerOptions2 == null ? void 0 : providerOptions2.reasoningEncryptedContent,
2720
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2723
2721
  summary: summaryParts
2724
2722
  };
2725
2723
  input.push(reasoningMessages[reasoningId]);
2726
2724
  } else {
2727
2725
  reasoningMessage.summary.push(...summaryParts);
2728
- if ((providerOptions2 == null ? void 0 : providerOptions2.reasoningEncryptedContent) != null) {
2729
- reasoningMessage.encrypted_content = providerOptions2.reasoningEncryptedContent;
2726
+ if ((providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent) != null) {
2727
+ reasoningMessage.encrypted_content = providerOptions.reasoningEncryptedContent;
2730
2728
  }
2731
2729
  }
2732
2730
  }
@@ -2750,7 +2748,7 @@ async function convertToOpenAIResponsesInput({
2750
2748
  continue;
2751
2749
  }
2752
2750
  processedApprovalIds.add(approvalResponse.approvalId);
2753
- if (store) {
2751
+ if (store && !previousResponseId) {
2754
2752
  input.push({
2755
2753
  type: "item_reference",
2756
2754
  id: approvalResponse.approvalId
@@ -4370,7 +4368,7 @@ var OpenAIResponsesLanguageModel = class {
4370
4368
  toolChoice,
4371
4369
  responseFormat
4372
4370
  }) {
4373
- var _a, _b, _c, _d, _e, _f;
4371
+ var _a, _b, _c, _d, _e, _f, _g;
4374
4372
  const warnings = [];
4375
4373
  const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
4376
4374
  if (topK != null) {
@@ -4434,10 +4432,10 @@ var OpenAIResponsesLanguageModel = class {
4434
4432
  hasShellTool: hasOpenAITool("openai.shell"),
4435
4433
  hasApplyPatchTool: hasOpenAITool("openai.apply_patch"),
4436
4434
  compactionInput: openaiOptions == null ? void 0 : openaiOptions.compactionInput,
4437
- providerOptions: openaiOptions
4435
+ previousResponseId: (_d = openaiOptions == null ? void 0 : openaiOptions.previousResponseId) != null ? _d : void 0
4438
4436
  });
4439
4437
  warnings.push(...inputWarnings);
4440
- const strictJsonSchema = (_d = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _d : true;
4438
+ const strictJsonSchema = (_e = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _e : true;
4441
4439
  let include = openaiOptions == null ? void 0 : openaiOptions.include;
4442
4440
  function addInclude(key) {
4443
4441
  if (include == null) {
@@ -4453,9 +4451,9 @@ var OpenAIResponsesLanguageModel = class {
4453
4451
  if (topLogprobs) {
4454
4452
  addInclude("message.output_text.logprobs");
4455
4453
  }
4456
- const webSearchToolName = (_e = tools == null ? void 0 : tools.find(
4454
+ const webSearchToolName = (_f = tools == null ? void 0 : tools.find(
4457
4455
  (tool) => tool.type === "provider" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
4458
- )) == null ? void 0 : _e.name;
4456
+ )) == null ? void 0 : _f.name;
4459
4457
  if (webSearchToolName) {
4460
4458
  addInclude("web_search_call.action.sources");
4461
4459
  }
@@ -4478,7 +4476,7 @@ var OpenAIResponsesLanguageModel = class {
4478
4476
  format: responseFormat.schema != null ? {
4479
4477
  type: "json_schema",
4480
4478
  strict: strictJsonSchema,
4481
- name: (_f = responseFormat.name) != null ? _f : "response",
4479
+ name: (_g = responseFormat.name) != null ? _g : "response",
4482
4480
  description: responseFormat.description,
4483
4481
  schema: responseFormat.schema
4484
4482
  } : { type: "json_object" }