@zenning/openai 3.0.17 → 3.0.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2491,14 +2491,12 @@ async function convertToOpenAIResponsesInput({
2491
2491
  hasShellTool = false,
2492
2492
  hasApplyPatchTool = false,
2493
2493
  compactionInput,
2494
- providerOptions
2494
+ previousResponseId
2495
2495
  }) {
2496
2496
  var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2497
2497
  const input = [];
2498
2498
  const warnings = [];
2499
2499
  const processedApprovalIds = /* @__PURE__ */ new Set();
2500
- const skipItemReferences = (providerOptions == null ? void 0 : providerOptions.skipItemReferencesForApproval) === true;
2501
- console.log("[OpenAI Provider] Skip item references flag:", skipItemReferences, "providerOptions:", providerOptions);
2502
2500
  if (compactionInput && compactionInput.length > 0) {
2503
2501
  input.push(...compactionInput);
2504
2502
  }
@@ -2580,7 +2578,7 @@ async function convertToOpenAIResponsesInput({
2580
2578
  switch (part.type) {
2581
2579
  case "text": {
2582
2580
  const id = (_b = (_a = part.providerOptions) == null ? void 0 : _a[providerOptionsName]) == null ? void 0 : _b.itemId;
2583
- if (store && id != null) {
2581
+ if (store && id != null && !previousResponseId) {
2584
2582
  input.push({ type: "item_reference", id });
2585
2583
  break;
2586
2584
  }
@@ -2593,31 +2591,16 @@ async function convertToOpenAIResponsesInput({
2593
2591
  }
2594
2592
  case "tool-call": {
2595
2593
  const id = (_g = (_d = (_c = part.providerOptions) == null ? void 0 : _c[providerOptionsName]) == null ? void 0 : _d.itemId) != null ? _g : (_f = (_e = part.providerMetadata) == null ? void 0 : _e[providerOptionsName]) == null ? void 0 : _f.itemId;
2596
- console.log("[OpenAI Provider] Processing tool-call:", {
2597
- toolCallId: part.toolCallId,
2598
- toolName: part.toolName,
2599
- id,
2600
- providerExecuted: part.providerExecuted,
2601
- store,
2602
- skipItemReferences,
2603
- willCreateItemReference: store && id != null && !skipItemReferences
2604
- });
2605
2594
  if (part.providerExecuted) {
2606
- if (store && id != null && !skipItemReferences) {
2607
- console.log("[OpenAI Provider] Creating item_reference (providerExecuted)");
2595
+ if (store && id != null && !previousResponseId) {
2608
2596
  input.push({ type: "item_reference", id });
2609
2597
  }
2610
2598
  break;
2611
2599
  }
2612
- if (store && id != null && !skipItemReferences) {
2613
- console.log("[OpenAI Provider] Creating item_reference (non-providerExecuted)");
2600
+ if (store && id != null && !previousResponseId) {
2614
2601
  input.push({ type: "item_reference", id });
2615
2602
  break;
2616
2603
  }
2617
- if (skipItemReferences) {
2618
- console.log("[OpenAI Provider] Skipping function_call due to approval continuation flag");
2619
- break;
2620
- }
2621
2604
  const resolvedToolName = toolNameMapping.toProviderToolName(
2622
2605
  part.toolName
2623
2606
  );
@@ -2673,9 +2656,10 @@ async function convertToOpenAIResponsesInput({
2673
2656
  if (part.output.type === "execution-denied" || part.output.type === "json" && typeof part.output.value === "object" && part.output.value != null && "type" in part.output.value && part.output.value.type === "execution-denied") {
2674
2657
  break;
2675
2658
  }
2676
- if (store) {
2659
+ if (store && !previousResponseId) {
2677
2660
  const itemId = (_j = (_i = (_h = part.providerMetadata) == null ? void 0 : _h[providerOptionsName]) == null ? void 0 : _i.itemId) != null ? _j : part.toolCallId;
2678
2661
  input.push({ type: "item_reference", id: itemId });
2662
+ } else if (store) {
2679
2663
  } else {
2680
2664
  warnings.push({
2681
2665
  type: "other",
@@ -2685,15 +2669,15 @@ async function convertToOpenAIResponsesInput({
2685
2669
  break;
2686
2670
  }
2687
2671
  case "reasoning": {
2688
- const providerOptions2 = await parseProviderOptions6({
2672
+ const providerOptions = await parseProviderOptions6({
2689
2673
  provider: providerOptionsName,
2690
2674
  providerOptions: part.providerOptions,
2691
2675
  schema: openaiResponsesReasoningProviderOptionsSchema
2692
2676
  });
2693
- const reasoningId = providerOptions2 == null ? void 0 : providerOptions2.itemId;
2677
+ const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2694
2678
  if (reasoningId != null) {
2695
2679
  const reasoningMessage = reasoningMessages[reasoningId];
2696
- if (store) {
2680
+ if (store && !previousResponseId) {
2697
2681
  if (reasoningMessage === void 0) {
2698
2682
  input.push({ type: "item_reference", id: reasoningId });
2699
2683
  reasoningMessages[reasoningId] = {
@@ -2702,6 +2686,14 @@ async function convertToOpenAIResponsesInput({
2702
2686
  summary: []
2703
2687
  };
2704
2688
  }
2689
+ } else if (store) {
2690
+ if (reasoningMessage === void 0) {
2691
+ reasoningMessages[reasoningId] = {
2692
+ type: "reasoning",
2693
+ id: reasoningId,
2694
+ summary: []
2695
+ };
2696
+ }
2705
2697
  } else {
2706
2698
  const summaryParts = [];
2707
2699
  if (part.text.length > 0) {
@@ -2719,14 +2711,14 @@ async function convertToOpenAIResponsesInput({
2719
2711
  reasoningMessages[reasoningId] = {
2720
2712
  type: "reasoning",
2721
2713
  id: reasoningId,
2722
- encrypted_content: providerOptions2 == null ? void 0 : providerOptions2.reasoningEncryptedContent,
2714
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2723
2715
  summary: summaryParts
2724
2716
  };
2725
2717
  input.push(reasoningMessages[reasoningId]);
2726
2718
  } else {
2727
2719
  reasoningMessage.summary.push(...summaryParts);
2728
- if ((providerOptions2 == null ? void 0 : providerOptions2.reasoningEncryptedContent) != null) {
2729
- reasoningMessage.encrypted_content = providerOptions2.reasoningEncryptedContent;
2720
+ if ((providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent) != null) {
2721
+ reasoningMessage.encrypted_content = providerOptions.reasoningEncryptedContent;
2730
2722
  }
2731
2723
  }
2732
2724
  }
@@ -2750,7 +2742,7 @@ async function convertToOpenAIResponsesInput({
2750
2742
  continue;
2751
2743
  }
2752
2744
  processedApprovalIds.add(approvalResponse.approvalId);
2753
- if (store) {
2745
+ if (store && !previousResponseId) {
2754
2746
  input.push({
2755
2747
  type: "item_reference",
2756
2748
  id: approvalResponse.approvalId
@@ -4370,7 +4362,7 @@ var OpenAIResponsesLanguageModel = class {
4370
4362
  toolChoice,
4371
4363
  responseFormat
4372
4364
  }) {
4373
- var _a, _b, _c, _d, _e, _f;
4365
+ var _a, _b, _c, _d, _e, _f, _g;
4374
4366
  const warnings = [];
4375
4367
  const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
4376
4368
  if (topK != null) {
@@ -4434,10 +4426,10 @@ var OpenAIResponsesLanguageModel = class {
4434
4426
  hasShellTool: hasOpenAITool("openai.shell"),
4435
4427
  hasApplyPatchTool: hasOpenAITool("openai.apply_patch"),
4436
4428
  compactionInput: openaiOptions == null ? void 0 : openaiOptions.compactionInput,
4437
- providerOptions: openaiOptions
4429
+ previousResponseId: (_d = openaiOptions == null ? void 0 : openaiOptions.previousResponseId) != null ? _d : void 0
4438
4430
  });
4439
4431
  warnings.push(...inputWarnings);
4440
- const strictJsonSchema = (_d = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _d : true;
4432
+ const strictJsonSchema = (_e = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _e : true;
4441
4433
  let include = openaiOptions == null ? void 0 : openaiOptions.include;
4442
4434
  function addInclude(key) {
4443
4435
  if (include == null) {
@@ -4453,9 +4445,9 @@ var OpenAIResponsesLanguageModel = class {
4453
4445
  if (topLogprobs) {
4454
4446
  addInclude("message.output_text.logprobs");
4455
4447
  }
4456
- const webSearchToolName = (_e = tools == null ? void 0 : tools.find(
4448
+ const webSearchToolName = (_f = tools == null ? void 0 : tools.find(
4457
4449
  (tool) => tool.type === "provider" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
4458
- )) == null ? void 0 : _e.name;
4450
+ )) == null ? void 0 : _f.name;
4459
4451
  if (webSearchToolName) {
4460
4452
  addInclude("web_search_call.action.sources");
4461
4453
  }
@@ -4478,7 +4470,7 @@ var OpenAIResponsesLanguageModel = class {
4478
4470
  format: responseFormat.schema != null ? {
4479
4471
  type: "json_schema",
4480
4472
  strict: strictJsonSchema,
4481
- name: (_f = responseFormat.name) != null ? _f : "response",
4473
+ name: (_g = responseFormat.name) != null ? _g : "response",
4482
4474
  description: responseFormat.description,
4483
4475
  schema: responseFormat.schema
4484
4476
  } : { type: "json_object" }