@zenning/openai 1.4.0 → 1.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -2162,6 +2162,7 @@ var OpenAIResponsesLanguageModel = class {
2162
2162
  // provider options:
2163
2163
  metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
2164
2164
  parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
2165
+ include: openaiOptions == null ? void 0 : openaiOptions.include,
2165
2166
  previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
2166
2167
  store: openaiOptions == null ? void 0 : openaiOptions.store,
2167
2168
  user: openaiOptions == null ? void 0 : openaiOptions.user,
@@ -2181,7 +2182,6 @@ var OpenAIResponsesLanguageModel = class {
2181
2182
  truncation: "auto"
2182
2183
  }
2183
2184
  };
2184
- console.log("baseArgs", JSON.stringify(baseArgs));
2185
2185
  if (modelConfig.isReasoningModel) {
2186
2186
  if (baseArgs.temperature != null) {
2187
2187
  baseArgs.temperature = void 0;
@@ -2336,6 +2336,10 @@ var OpenAIResponsesLanguageModel = class {
2336
2336
  args: output.arguments
2337
2337
  }));
2338
2338
  const reasoningSummary = (_b = (_a = response.output.find((item) => item.type === "reasoning")) == null ? void 0 : _a.summary) != null ? _b : null;
2339
+ console.log(JSON.stringify({
2340
+ msg: "ai-sdk: content annotations",
2341
+ annotations: outputTextElements.flatMap((content) => content.annotations)
2342
+ }));
2339
2343
  return {
2340
2344
  text: outputTextElements.map((content) => content.text).join("\n"),
2341
2345
  sources: outputTextElements.flatMap(
@@ -2490,6 +2494,10 @@ var OpenAIResponsesLanguageModel = class {
2490
2494
  cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2491
2495
  reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2492
2496
  } else if (isResponseAnnotationAddedChunk(value)) {
2497
+ console.log(JSON.stringify({
2498
+ msg: "ai-sdk: source (stream)",
2499
+ source: value.annotation
2500
+ }));
2493
2501
  controller.enqueue({
2494
2502
  type: "source",
2495
2503
  source: {
@@ -2668,6 +2676,7 @@ function getResponsesModelConfig(modelId) {
2668
2676
  var openaiResponsesProviderOptionsSchema = z7.object({
2669
2677
  metadata: z7.any().nullish(),
2670
2678
  parallelToolCalls: z7.boolean().nullish(),
2679
+ include: z7.array(z7.string()).nullish(),
2671
2680
  previousResponseId: z7.string().nullish(),
2672
2681
  forceNoTemperature: z7.boolean().nullish(),
2673
2682
  store: z7.boolean().nullish(),