@zenning/openai 2.2.0 → 2.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -2438,6 +2438,11 @@ var localShellCallItem = import_v416.z.object({
2438
2438
  env: import_v416.z.record(import_v416.z.string(), import_v416.z.string()).optional()
2439
2439
  })
2440
2440
  });
2441
+ var sourceExecutionFileCodeInterpreterItem = import_v416.z.object({
2442
+ containerId: import_v416.z.string(),
2443
+ fileId: import_v416.z.string(),
2444
+ filename: import_v416.z.string()
2445
+ });
2441
2446
  var imageGenerationCallItem = import_v416.z.object({
2442
2447
  type: import_v416.z.literal("image_generation_call"),
2443
2448
  id: import_v416.z.string(),
@@ -2663,7 +2668,7 @@ var OpenAIResponsesLanguageModel = class {
2663
2668
  };
2664
2669
  }
2665
2670
  async doGenerate(options) {
2666
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u;
2671
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
2667
2672
  const {
2668
2673
  args: body,
2669
2674
  warnings,
@@ -2673,6 +2678,7 @@ var OpenAIResponsesLanguageModel = class {
2673
2678
  path: "/responses",
2674
2679
  modelId: this.modelId
2675
2680
  });
2681
+ const providerKey = this.config.provider.replace(".responses", "");
2676
2682
  const {
2677
2683
  responseHeaders,
2678
2684
  value: response,
@@ -2721,7 +2727,12 @@ var OpenAIResponsesLanguageModel = class {
2721
2727
  quote: import_v416.z.string().nullish()
2722
2728
  }),
2723
2729
  import_v416.z.object({
2724
- type: import_v416.z.literal("container_file_citation")
2730
+ type: import_v416.z.literal("container_file_citation"),
2731
+ container_id: import_v416.z.string(),
2732
+ end_index: import_v416.z.number(),
2733
+ file_id: import_v416.z.string(),
2734
+ filename: import_v416.z.string(),
2735
+ start_index: import_v416.z.number()
2725
2736
  })
2726
2737
  ])
2727
2738
  )
@@ -2843,7 +2854,8 @@ var OpenAIResponsesLanguageModel = class {
2843
2854
  text: contentPart.text,
2844
2855
  providerMetadata: {
2845
2856
  openai: {
2846
- itemId: part.id
2857
+ itemId: part.id,
2858
+ annotations: contentPart.annotations
2847
2859
  }
2848
2860
  }
2849
2861
  });
@@ -2868,6 +2880,19 @@ var OpenAIResponsesLanguageModel = class {
2868
2880
  startIndex: (_m = annotation.start_index) != null ? _m : void 0,
2869
2881
  endIndex: (_n = annotation.end_index) != null ? _n : void 0
2870
2882
  });
2883
+ } else if (annotation.type === "container_file_citation") {
2884
+ content.push({
2885
+ type: "source",
2886
+ sourceType: "executionFile",
2887
+ id: (_q = (_p = (_o = this.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : (0, import_provider_utils14.generateId)(),
2888
+ providerMetadata: {
2889
+ [providerKey]: {
2890
+ containerId: annotation.container_id,
2891
+ fileId: annotation.file_id,
2892
+ filename: annotation.filename
2893
+ }
2894
+ }
2895
+ });
2871
2896
  }
2872
2897
  }
2873
2898
  }
@@ -2939,13 +2964,13 @@ var OpenAIResponsesLanguageModel = class {
2939
2964
  toolName: "file_search",
2940
2965
  result: {
2941
2966
  queries: part.queries,
2942
- results: (_p = (_o = part.results) == null ? void 0 : _o.map((result) => ({
2967
+ results: (_s = (_r = part.results) == null ? void 0 : _r.map((result) => ({
2943
2968
  attributes: result.attributes,
2944
2969
  fileId: result.file_id,
2945
2970
  filename: result.filename,
2946
2971
  score: result.score,
2947
2972
  text: result.text
2948
- }))) != null ? _p : null
2973
+ }))) != null ? _s : null
2949
2974
  },
2950
2975
  providerExecuted: true
2951
2976
  });
@@ -2987,15 +3012,15 @@ var OpenAIResponsesLanguageModel = class {
2987
3012
  return {
2988
3013
  content,
2989
3014
  finishReason: mapOpenAIResponseFinishReason({
2990
- finishReason: (_q = response.incomplete_details) == null ? void 0 : _q.reason,
3015
+ finishReason: (_t = response.incomplete_details) == null ? void 0 : _t.reason,
2991
3016
  hasFunctionCall
2992
3017
  }),
2993
3018
  usage: {
2994
3019
  inputTokens: response.usage.input_tokens,
2995
3020
  outputTokens: response.usage.output_tokens,
2996
3021
  totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2997
- reasoningTokens: (_s = (_r = response.usage.output_tokens_details) == null ? void 0 : _r.reasoning_tokens) != null ? _s : void 0,
2998
- cachedInputTokens: (_u = (_t = response.usage.input_tokens_details) == null ? void 0 : _t.cached_tokens) != null ? _u : void 0
3022
+ reasoningTokens: (_v = (_u = response.usage.output_tokens_details) == null ? void 0 : _u.reasoning_tokens) != null ? _v : void 0,
3023
+ cachedInputTokens: (_x = (_w = response.usage.input_tokens_details) == null ? void 0 : _w.cached_tokens) != null ? _x : void 0
2999
3024
  },
3000
3025
  request: { body },
3001
3026
  response: {
@@ -3042,6 +3067,7 @@ var OpenAIResponsesLanguageModel = class {
3042
3067
  const logprobs = [];
3043
3068
  let responseId = null;
3044
3069
  const ongoingToolCalls = {};
3070
+ const ongoingAnnotations = [];
3045
3071
  let hasFunctionCall = false;
3046
3072
  const activeReasoning = {};
3047
3073
  let serviceTier;
@@ -3052,7 +3078,7 @@ var OpenAIResponsesLanguageModel = class {
3052
3078
  controller.enqueue({ type: "stream-start", warnings });
3053
3079
  },
3054
3080
  transform(chunk, controller) {
3055
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y;
3081
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B;
3056
3082
  if (options.includeRawChunks) {
3057
3083
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
3058
3084
  }
@@ -3128,6 +3154,7 @@ var OpenAIResponsesLanguageModel = class {
3128
3154
  providerExecuted: true
3129
3155
  });
3130
3156
  } else if (value.item.type === "message") {
3157
+ ongoingAnnotations.splice(0, ongoingAnnotations.length);
3131
3158
  controller.enqueue({
3132
3159
  type: "text-start",
3133
3160
  id: value.item.id,
@@ -3153,7 +3180,7 @@ var OpenAIResponsesLanguageModel = class {
3153
3180
  }
3154
3181
  });
3155
3182
  }
3156
- } else if (isResponseOutputItemDoneChunk(value)) {
3183
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type !== "message") {
3157
3184
  if (value.item.type === "function_call") {
3158
3185
  ongoingToolCalls[value.output_index] = void 0;
3159
3186
  hasFunctionCall = true;
@@ -3274,11 +3301,6 @@ var OpenAIResponsesLanguageModel = class {
3274
3301
  openai: { itemId: value.item.id }
3275
3302
  }
3276
3303
  });
3277
- } else if (value.item.type === "message") {
3278
- controller.enqueue({
3279
- type: "text-end",
3280
- id: value.item.id
3281
- });
3282
3304
  } else if (isResponseOutputItemDoneReasoningChunk(value)) {
3283
3305
  const activeReasoningPart = activeReasoning[value.item.id];
3284
3306
  for (const summaryIndex of activeReasoningPart.summaryParts) {
@@ -3407,6 +3429,7 @@ var OpenAIResponsesLanguageModel = class {
3407
3429
  serviceTier = value.response.service_tier;
3408
3430
  }
3409
3431
  } else if (isResponseAnnotationAddedChunk(value)) {
3432
+ ongoingAnnotations.push(value.annotation);
3410
3433
  if (value.annotation.type === "url_citation") {
3411
3434
  controller.enqueue({
3412
3435
  type: "source",
@@ -3427,7 +3450,31 @@ var OpenAIResponsesLanguageModel = class {
3427
3450
  startIndex: (_x = value.annotation.start_index) != null ? _x : void 0,
3428
3451
  endIndex: (_y = value.annotation.end_index) != null ? _y : void 0
3429
3452
  });
3453
+ } else if (value.annotation.type === "container_file_citation") {
3454
+ controller.enqueue({
3455
+ type: "source",
3456
+ sourceType: "executionFile",
3457
+ id: (_B = (_A = (_z = self.config).generateId) == null ? void 0 : _A.call(_z)) != null ? _B : (0, import_provider_utils14.generateId)(),
3458
+ providerMetadata: {
3459
+ openai: {
3460
+ containerId: value.annotation.container_id,
3461
+ fileId: value.annotation.file_id,
3462
+ filename: value.annotation.filename
3463
+ }
3464
+ }
3465
+ });
3430
3466
  }
3467
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "message") {
3468
+ controller.enqueue({
3469
+ type: "text-end",
3470
+ id: value.item.id,
3471
+ providerMetadata: {
3472
+ openai: {
3473
+ itemId: value.item.id,
3474
+ annotations: ongoingAnnotations
3475
+ }
3476
+ }
3477
+ });
3431
3478
  } else if (isErrorChunk(value)) {
3432
3479
  controller.enqueue({ type: "error", error: value });
3433
3480
  }
@@ -3624,6 +3671,14 @@ var responseAnnotationAddedSchema = import_v416.z.object({
3624
3671
  start_index: import_v416.z.number().nullish(),
3625
3672
  end_index: import_v416.z.number().nullish(),
3626
3673
  quote: import_v416.z.string().nullish()
3674
+ }),
3675
+ import_v416.z.object({
3676
+ type: import_v416.z.literal("container_file_citation"),
3677
+ container_id: import_v416.z.string(),
3678
+ end_index: import_v416.z.number(),
3679
+ file_id: import_v416.z.string(),
3680
+ filename: import_v416.z.string(),
3681
+ start_index: import_v416.z.number()
3627
3682
  })
3628
3683
  ])
3629
3684
  });
@@ -3775,6 +3830,15 @@ var openaiResponsesProviderOptionsSchema = import_v416.z.object({
3775
3830
  textVerbosity: import_v416.z.enum(["low", "medium", "high"]).nullish(),
3776
3831
  user: import_v416.z.string().nullish()
3777
3832
  });
3833
+ var openaiResponsesTextUIPartProviderMetadataSchema = import_v416.z.object({
3834
+ openai: import_v416.z.object({
3835
+ itemId: import_v416.z.string(),
3836
+ annotations: import_v416.z.array(responseAnnotationAddedSchema.shape.annotation)
3837
+ })
3838
+ });
3839
+ var openaiSourceExecutionFileProviderMetadataSchema = import_v416.z.object({
3840
+ openai: sourceExecutionFileCodeInterpreterItem
3841
+ });
3778
3842
 
3779
3843
  // src/speech/openai-speech-model.ts
3780
3844
  var import_provider_utils15 = require("@zenning/provider-utils");
@@ -4108,7 +4172,7 @@ var openaiTranscriptionResponseSchema = import_v419.z.object({
4108
4172
  });
4109
4173
 
4110
4174
  // src/version.ts
4111
- var VERSION = true ? "2.2.0" : "0.0.0-test";
4175
+ var VERSION = true ? "2.3.0" : "0.0.0-test";
4112
4176
 
4113
4177
  // src/openai-provider.ts
4114
4178
  function createOpenAI(options = {}) {