@zenning/ai 6.0.13 → 6.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,26 @@
1
1
  # ai
2
2
 
3
+ ## 6.0.15
4
+
5
+ ### Patch Changes
6
+
7
+ - 35e1a30: Add support for OpenAI Responses API compaction feature via provider options for context window management
8
+ - Fix tool approval validation error when using previousResponseId. Skip validation of approval-request existence when continuing from a previous response, since the approval-request is in the provider's context rather than the current messages.
9
+ - Updated dependencies [35e1a30]
10
+ - @zenning/provider@3.0.5
11
+ - @zenning/provider-utils@4.0.7
12
+ - @zenning/gateway@3.0.12
13
+
14
+ ## 6.0.14
15
+
16
+ ### Patch Changes
17
+
18
+ - Add support for OpenAI Responses API compaction feature via provider options for context window management
19
+ - Updated dependencies
20
+ - @zenning/provider@3.0.4
21
+ - @zenning/provider-utils@4.0.6
22
+ - @zenning/gateway@3.0.11
23
+
3
24
  ## 6.0.13
4
25
 
5
26
  ### Patch Changes
package/dist/index.js CHANGED
@@ -1001,7 +1001,7 @@ var import_provider_utils3 = require("@zenning/provider-utils");
1001
1001
  var import_provider_utils4 = require("@zenning/provider-utils");
1002
1002
 
1003
1003
  // src/version.ts
1004
- var VERSION = true ? "6.0.13" : "0.0.0-test";
1004
+ var VERSION = true ? "6.0.14" : "0.0.0-test";
1005
1005
 
1006
1006
  // src/util/download/download.ts
1007
1007
  var download = async ({ url }) => {
@@ -1800,8 +1800,7 @@ var assistantModelMessageSchema = import_v45.z.object({
1800
1800
  reasoningPartSchema,
1801
1801
  toolCallPartSchema,
1802
1802
  toolResultPartSchema,
1803
- toolApprovalRequestSchema,
1804
- compactionPartSchema
1803
+ toolApprovalRequestSchema
1805
1804
  ])
1806
1805
  )
1807
1806
  ]),
@@ -2383,7 +2382,8 @@ function prepareRetries({
2383
2382
 
2384
2383
  // src/generate-text/collect-tool-approvals.ts
2385
2384
  function collectToolApprovals({
2386
- messages
2385
+ messages,
2386
+ skipValidation = false
2387
2387
  }) {
2388
2388
  const lastMessage = messages.at(-1);
2389
2389
  if ((lastMessage == null ? void 0 : lastMessage.role) != "tool") {
@@ -2428,19 +2428,25 @@ function collectToolApprovals({
2428
2428
  for (const approvalResponse of approvalResponses) {
2429
2429
  const approvalRequest = toolApprovalRequestsByApprovalId[approvalResponse.approvalId];
2430
2430
  if (approvalRequest == null) {
2431
- throw new InvalidToolApprovalError({
2432
- approvalId: approvalResponse.approvalId
2433
- });
2431
+ if (!skipValidation) {
2432
+ throw new InvalidToolApprovalError({
2433
+ approvalId: approvalResponse.approvalId
2434
+ });
2435
+ }
2436
+ continue;
2434
2437
  }
2435
2438
  if (toolResults[approvalRequest.toolCallId] != null) {
2436
2439
  continue;
2437
2440
  }
2438
2441
  const toolCall = toolCallsByToolCallId[approvalRequest.toolCallId];
2439
2442
  if (toolCall == null) {
2440
- throw new ToolCallNotFoundForApprovalError({
2441
- toolCallId: approvalRequest.toolCallId,
2442
- approvalId: approvalRequest.approvalId
2443
- });
2443
+ if (!skipValidation) {
2444
+ throw new ToolCallNotFoundForApprovalError({
2445
+ toolCallId: approvalRequest.toolCallId,
2446
+ approvalId: approvalRequest.approvalId
2447
+ });
2448
+ }
2449
+ continue;
2444
2450
  }
2445
2451
  const approval = {
2446
2452
  approvalRequest,
@@ -3668,10 +3674,13 @@ async function generateText({
3668
3674
  }),
3669
3675
  tracer,
3670
3676
  fn: async (span) => {
3671
- var _a16, _b, _c, _d, _e, _f, _g, _h;
3677
+ var _a16, _b, _c, _d, _e, _f, _g, _h, _i;
3672
3678
  const initialMessages = initialPrompt.messages;
3673
3679
  const responseMessages = [];
3674
- const { approvedToolApprovals, deniedToolApprovals } = collectToolApprovals({ messages: initialMessages });
3680
+ const { approvedToolApprovals, deniedToolApprovals } = collectToolApprovals({
3681
+ messages: initialMessages,
3682
+ skipValidation: ((_a16 = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _a16.previousResponseId) != null
3683
+ });
3675
3684
  const localApprovedToolApprovals = approvedToolApprovals.filter(
3676
3685
  (toolApproval) => !toolApproval.toolCall.providerExecuted
3677
3686
  );
@@ -3761,21 +3770,21 @@ async function generateText({
3761
3770
  experimental_context
3762
3771
  }));
3763
3772
  const stepModel = resolveLanguageModel(
3764
- (_a16 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a16 : model
3773
+ (_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
3765
3774
  );
3766
3775
  const promptMessages = await convertToLanguageModelPrompt({
3767
3776
  prompt: {
3768
- system: (_b = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _b : initialPrompt.system,
3769
- messages: (_c = prepareStepResult == null ? void 0 : prepareStepResult.messages) != null ? _c : stepInputMessages
3777
+ system: (_c = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _c : initialPrompt.system,
3778
+ messages: (_d = prepareStepResult == null ? void 0 : prepareStepResult.messages) != null ? _d : stepInputMessages
3770
3779
  },
3771
3780
  supportedUrls: await stepModel.supportedUrls,
3772
3781
  download: download2
3773
3782
  });
3774
- experimental_context = (_d = prepareStepResult == null ? void 0 : prepareStepResult.experimental_context) != null ? _d : experimental_context;
3783
+ experimental_context = (_e = prepareStepResult == null ? void 0 : prepareStepResult.experimental_context) != null ? _e : experimental_context;
3775
3784
  const { toolChoice: stepToolChoice, tools: stepTools } = await prepareToolsAndToolChoice({
3776
3785
  tools,
3777
- toolChoice: (_e = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _e : toolChoice,
3778
- activeTools: (_f = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _f : activeTools
3786
+ toolChoice: (_f = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _f : toolChoice,
3787
+ activeTools: (_g = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _g : activeTools
3779
3788
  });
3780
3789
  currentModelResponse = await retry(
3781
3790
  () => {
@@ -3995,7 +4004,7 @@ async function generateText({
3995
4004
  usage: asLanguageModelUsage(currentModelResponse.usage),
3996
4005
  warnings: currentModelResponse.warnings,
3997
4006
  providerMetadata: currentModelResponse.providerMetadata,
3998
- request: (_g = currentModelResponse.request) != null ? _g : {},
4007
+ request: (_h = currentModelResponse.request) != null ? _h : {},
3999
4008
  response: {
4000
4009
  ...currentModelResponse.response,
4001
4010
  // deep clone msgs to avoid mutating past messages in multi-step:
@@ -4003,7 +4012,7 @@ async function generateText({
4003
4012
  }
4004
4013
  });
4005
4014
  logWarnings({
4006
- warnings: (_h = currentModelResponse.warnings) != null ? _h : [],
4015
+ warnings: (_i = currentModelResponse.warnings) != null ? _i : [],
4007
4016
  provider: stepModel.provider,
4008
4017
  model: stepModel.modelId
4009
4018
  });
@@ -6169,6 +6178,7 @@ var DefaultStreamTextResult = class {
6169
6178
  tracer,
6170
6179
  endWhenDone: false,
6171
6180
  fn: async (rootSpanArg) => {
6181
+ var _a16;
6172
6182
  rootSpan = rootSpanArg;
6173
6183
  const initialPrompt = await standardizePrompt({
6174
6184
  system,
@@ -6177,7 +6187,10 @@ var DefaultStreamTextResult = class {
6177
6187
  });
6178
6188
  const initialMessages = initialPrompt.messages;
6179
6189
  const initialResponseMessages = [];
6180
- const { approvedToolApprovals, deniedToolApprovals } = collectToolApprovals({ messages: initialMessages });
6190
+ const { approvedToolApprovals, deniedToolApprovals } = collectToolApprovals({
6191
+ messages: initialMessages,
6192
+ skipValidation: ((_a16 = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _a16.previousResponseId) != null
6193
+ });
6181
6194
  if (deniedToolApprovals.length > 0 || approvedToolApprovals.length > 0) {
6182
6195
  const providerExecutedToolApprovals = [
6183
6196
  ...approvedToolApprovals,
@@ -6287,7 +6300,7 @@ var DefaultStreamTextResult = class {
6287
6300
  responseMessages,
6288
6301
  usage
6289
6302
  }) {
6290
- var _a16, _b, _c, _d, _e, _f;
6303
+ var _a17, _b, _c, _d, _e, _f;
6291
6304
  const includeRawChunks2 = self.includeRawChunks;
6292
6305
  stepFinish = new import_provider_utils19.DelayedPromise();
6293
6306
  const stepInputMessages = [...initialMessages, ...responseMessages];
@@ -6299,7 +6312,7 @@ var DefaultStreamTextResult = class {
6299
6312
  experimental_context
6300
6313
  }));
6301
6314
  const stepModel = resolveLanguageModel(
6302
- (_a16 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a16 : model
6315
+ (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a17 : model
6303
6316
  );
6304
6317
  const promptMessages = await convertToLanguageModelPrompt({
6305
6318
  prompt: {
@@ -6412,7 +6425,7 @@ var DefaultStreamTextResult = class {
6412
6425
  streamWithToolResults.pipeThrough(
6413
6426
  new TransformStream({
6414
6427
  async transform(chunk, controller) {
6415
- var _a17, _b2, _c2, _d2, _e2;
6428
+ var _a18, _b2, _c2, _d2, _e2;
6416
6429
  if (chunk.type === "stream-start") {
6417
6430
  warnings = chunk.warnings;
6418
6431
  return;
@@ -6485,7 +6498,7 @@ var DefaultStreamTextResult = class {
6485
6498
  }
6486
6499
  case "response-metadata": {
6487
6500
  stepResponse = {
6488
- id: (_a17 = chunk.id) != null ? _a17 : stepResponse.id,
6501
+ id: (_a18 = chunk.id) != null ? _a18 : stepResponse.id,
6489
6502
  timestamp: (_b2 = chunk.timestamp) != null ? _b2 : stepResponse.timestamp,
6490
6503
  modelId: (_c2 = chunk.modelId) != null ? _c2 : stepResponse.modelId
6491
6504
  };