@zenning/ai 6.0.14 → 6.0.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,22 @@
1
1
  # ai
2
2
 
3
+ ## 6.0.16
4
+
5
+ ### Patch Changes
6
+
7
+ - Fix tool approval validation error when using previousResponseId. Skip validation of approval-request existence when continuing from a previous response, since the approval-request is in the provider's context rather than the current messages.
8
+
9
+ ## 6.0.15
10
+
11
+ ### Patch Changes
12
+
13
+ - 35e1a30: Add support for OpenAI Responses API compaction feature via provider options for context window management
14
+ - Fix tool approval validation error when using previousResponseId. Skip validation of approval-request existence when continuing from a previous response, since the approval-request is in the provider's context rather than the current messages.
15
+ - Updated dependencies [35e1a30]
16
+ - @zenning/provider@3.0.5
17
+ - @zenning/provider-utils@4.0.7
18
+ - @zenning/gateway@3.0.12
19
+
3
20
  ## 6.0.14
4
21
 
5
22
  ### Patch Changes
package/dist/index.js CHANGED
@@ -1001,7 +1001,7 @@ var import_provider_utils3 = require("@zenning/provider-utils");
1001
1001
  var import_provider_utils4 = require("@zenning/provider-utils");
1002
1002
 
1003
1003
  // src/version.ts
1004
- var VERSION = true ? "6.0.14" : "0.0.0-test";
1004
+ var VERSION = true ? "6.0.15" : "0.0.0-test";
1005
1005
 
1006
1006
  // src/util/download/download.ts
1007
1007
  var download = async ({ url }) => {
@@ -2382,7 +2382,8 @@ function prepareRetries({
2382
2382
 
2383
2383
  // src/generate-text/collect-tool-approvals.ts
2384
2384
  function collectToolApprovals({
2385
- messages
2385
+ messages,
2386
+ skipValidation = false
2386
2387
  }) {
2387
2388
  const lastMessage = messages.at(-1);
2388
2389
  if ((lastMessage == null ? void 0 : lastMessage.role) != "tool") {
@@ -2427,19 +2428,25 @@ function collectToolApprovals({
2427
2428
  for (const approvalResponse of approvalResponses) {
2428
2429
  const approvalRequest = toolApprovalRequestsByApprovalId[approvalResponse.approvalId];
2429
2430
  if (approvalRequest == null) {
2430
- throw new InvalidToolApprovalError({
2431
- approvalId: approvalResponse.approvalId
2432
- });
2431
+ if (!skipValidation) {
2432
+ throw new InvalidToolApprovalError({
2433
+ approvalId: approvalResponse.approvalId
2434
+ });
2435
+ }
2436
+ continue;
2433
2437
  }
2434
2438
  if (toolResults[approvalRequest.toolCallId] != null) {
2435
2439
  continue;
2436
2440
  }
2437
2441
  const toolCall = toolCallsByToolCallId[approvalRequest.toolCallId];
2438
2442
  if (toolCall == null) {
2439
- throw new ToolCallNotFoundForApprovalError({
2440
- toolCallId: approvalRequest.toolCallId,
2441
- approvalId: approvalRequest.approvalId
2442
- });
2443
+ if (!skipValidation) {
2444
+ throw new ToolCallNotFoundForApprovalError({
2445
+ toolCallId: approvalRequest.toolCallId,
2446
+ approvalId: approvalRequest.approvalId
2447
+ });
2448
+ }
2449
+ continue;
2443
2450
  }
2444
2451
  const approval = {
2445
2452
  approvalRequest,
@@ -3667,10 +3674,13 @@ async function generateText({
3667
3674
  }),
3668
3675
  tracer,
3669
3676
  fn: async (span) => {
3670
- var _a16, _b, _c, _d, _e, _f, _g, _h;
3677
+ var _a16, _b, _c, _d, _e, _f, _g, _h, _i;
3671
3678
  const initialMessages = initialPrompt.messages;
3672
3679
  const responseMessages = [];
3673
- const { approvedToolApprovals, deniedToolApprovals } = collectToolApprovals({ messages: initialMessages });
3680
+ const { approvedToolApprovals, deniedToolApprovals } = collectToolApprovals({
3681
+ messages: initialMessages,
3682
+ skipValidation: ((_a16 = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _a16.previousResponseId) != null
3683
+ });
3674
3684
  const localApprovedToolApprovals = approvedToolApprovals.filter(
3675
3685
  (toolApproval) => !toolApproval.toolCall.providerExecuted
3676
3686
  );
@@ -3760,21 +3770,21 @@ async function generateText({
3760
3770
  experimental_context
3761
3771
  }));
3762
3772
  const stepModel = resolveLanguageModel(
3763
- (_a16 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a16 : model
3773
+ (_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
3764
3774
  );
3765
3775
  const promptMessages = await convertToLanguageModelPrompt({
3766
3776
  prompt: {
3767
- system: (_b = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _b : initialPrompt.system,
3768
- messages: (_c = prepareStepResult == null ? void 0 : prepareStepResult.messages) != null ? _c : stepInputMessages
3777
+ system: (_c = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _c : initialPrompt.system,
3778
+ messages: (_d = prepareStepResult == null ? void 0 : prepareStepResult.messages) != null ? _d : stepInputMessages
3769
3779
  },
3770
3780
  supportedUrls: await stepModel.supportedUrls,
3771
3781
  download: download2
3772
3782
  });
3773
- experimental_context = (_d = prepareStepResult == null ? void 0 : prepareStepResult.experimental_context) != null ? _d : experimental_context;
3783
+ experimental_context = (_e = prepareStepResult == null ? void 0 : prepareStepResult.experimental_context) != null ? _e : experimental_context;
3774
3784
  const { toolChoice: stepToolChoice, tools: stepTools } = await prepareToolsAndToolChoice({
3775
3785
  tools,
3776
- toolChoice: (_e = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _e : toolChoice,
3777
- activeTools: (_f = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _f : activeTools
3786
+ toolChoice: (_f = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _f : toolChoice,
3787
+ activeTools: (_g = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _g : activeTools
3778
3788
  });
3779
3789
  currentModelResponse = await retry(
3780
3790
  () => {
@@ -3994,7 +4004,7 @@ async function generateText({
3994
4004
  usage: asLanguageModelUsage(currentModelResponse.usage),
3995
4005
  warnings: currentModelResponse.warnings,
3996
4006
  providerMetadata: currentModelResponse.providerMetadata,
3997
- request: (_g = currentModelResponse.request) != null ? _g : {},
4007
+ request: (_h = currentModelResponse.request) != null ? _h : {},
3998
4008
  response: {
3999
4009
  ...currentModelResponse.response,
4000
4010
  // deep clone msgs to avoid mutating past messages in multi-step:
@@ -4002,7 +4012,7 @@ async function generateText({
4002
4012
  }
4003
4013
  });
4004
4014
  logWarnings({
4005
- warnings: (_h = currentModelResponse.warnings) != null ? _h : [],
4015
+ warnings: (_i = currentModelResponse.warnings) != null ? _i : [],
4006
4016
  provider: stepModel.provider,
4007
4017
  model: stepModel.modelId
4008
4018
  });
@@ -6168,6 +6178,7 @@ var DefaultStreamTextResult = class {
6168
6178
  tracer,
6169
6179
  endWhenDone: false,
6170
6180
  fn: async (rootSpanArg) => {
6181
+ var _a16;
6171
6182
  rootSpan = rootSpanArg;
6172
6183
  const initialPrompt = await standardizePrompt({
6173
6184
  system,
@@ -6176,7 +6187,10 @@ var DefaultStreamTextResult = class {
6176
6187
  });
6177
6188
  const initialMessages = initialPrompt.messages;
6178
6189
  const initialResponseMessages = [];
6179
- const { approvedToolApprovals, deniedToolApprovals } = collectToolApprovals({ messages: initialMessages });
6190
+ const { approvedToolApprovals, deniedToolApprovals } = collectToolApprovals({
6191
+ messages: initialMessages,
6192
+ skipValidation: ((_a16 = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _a16.previousResponseId) != null
6193
+ });
6180
6194
  if (deniedToolApprovals.length > 0 || approvedToolApprovals.length > 0) {
6181
6195
  const providerExecutedToolApprovals = [
6182
6196
  ...approvedToolApprovals,
@@ -6286,7 +6300,7 @@ var DefaultStreamTextResult = class {
6286
6300
  responseMessages,
6287
6301
  usage
6288
6302
  }) {
6289
- var _a16, _b, _c, _d, _e, _f;
6303
+ var _a17, _b, _c, _d, _e, _f;
6290
6304
  const includeRawChunks2 = self.includeRawChunks;
6291
6305
  stepFinish = new import_provider_utils19.DelayedPromise();
6292
6306
  const stepInputMessages = [...initialMessages, ...responseMessages];
@@ -6298,7 +6312,7 @@ var DefaultStreamTextResult = class {
6298
6312
  experimental_context
6299
6313
  }));
6300
6314
  const stepModel = resolveLanguageModel(
6301
- (_a16 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a16 : model
6315
+ (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a17 : model
6302
6316
  );
6303
6317
  const promptMessages = await convertToLanguageModelPrompt({
6304
6318
  prompt: {
@@ -6411,7 +6425,7 @@ var DefaultStreamTextResult = class {
6411
6425
  streamWithToolResults.pipeThrough(
6412
6426
  new TransformStream({
6413
6427
  async transform(chunk, controller) {
6414
- var _a17, _b2, _c2, _d2, _e2;
6428
+ var _a18, _b2, _c2, _d2, _e2;
6415
6429
  if (chunk.type === "stream-start") {
6416
6430
  warnings = chunk.warnings;
6417
6431
  return;
@@ -6484,7 +6498,7 @@ var DefaultStreamTextResult = class {
6484
6498
  }
6485
6499
  case "response-metadata": {
6486
6500
  stepResponse = {
6487
- id: (_a17 = chunk.id) != null ? _a17 : stepResponse.id,
6501
+ id: (_a18 = chunk.id) != null ? _a18 : stepResponse.id,
6488
6502
  timestamp: (_b2 = chunk.timestamp) != null ? _b2 : stepResponse.timestamp,
6489
6503
  modelId: (_c2 = chunk.modelId) != null ? _c2 : stepResponse.modelId
6490
6504
  };
@@ -7478,7 +7492,7 @@ async function convertToModelMessages(messages, options) {
7478
7492
  });
7479
7493
  } else if (isToolUIPart(part)) {
7480
7494
  const toolName = getToolName(part);
7481
- if (part.state !== "input-streaming") {
7495
+ if (part.state !== "input-streaming" && part.state !== "approval-responded" && part.state !== "output-denied") {
7482
7496
  content.push({
7483
7497
  type: "tool-call",
7484
7498
  toolCallId: part.toolCallId,
@@ -7494,7 +7508,7 @@ async function convertToModelMessages(messages, options) {
7494
7508
  toolCallId: part.toolCallId
7495
7509
  });
7496
7510
  }
7497
- if (part.providerExecuted === true && part.state !== "approval-responded" && (part.state === "output-available" || part.state === "output-error")) {
7511
+ if (part.providerExecuted === true && (part.state === "output-available" || part.state === "output-error")) {
7498
7512
  content.push({
7499
7513
  type: "tool-result",
7500
7514
  toolCallId: part.toolCallId,