@ai-sdk/openai 0.0.64 → 0.0.66

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 0.0.66
4
+
5
+ ### Patch Changes
6
+
7
+ - 3f29c10: feat (provider/openai): support metadata field for distillation
8
+
9
+ ## 0.0.65
10
+
11
+ ### Patch Changes
12
+
13
+ - e8aed44: Add OpenAI cached prompt tokens to experimental_providerMetadata for generateText and streamText
14
+
3
15
  ## 0.0.64
4
16
 
5
17
  ### Patch Changes
package/dist/index.js CHANGED
@@ -250,7 +250,7 @@ var OpenAIChatLanguageModel = class {
250
250
  seed,
251
251
  providerMetadata
252
252
  }) {
253
- var _a, _b, _c, _d, _e;
253
+ var _a, _b, _c, _d, _e, _f, _g;
254
254
  const type = mode.type;
255
255
  const warnings = [];
256
256
  if (topK != null) {
@@ -297,6 +297,7 @@ var OpenAIChatLanguageModel = class {
297
297
  // openai specific settings:
298
298
  max_completion_tokens: (_b = (_a = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _a.maxCompletionTokens) != null ? _b : void 0,
299
299
  store: (_d = (_c = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _c.store) != null ? _d : void 0,
300
+ metadata: (_f = (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.metadata) != null ? _f : void 0,
300
301
  // response format:
301
302
  response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? { type: "json_object" } : void 0,
302
303
  // messages:
@@ -334,7 +335,7 @@ var OpenAIChatLanguageModel = class {
334
335
  json_schema: {
335
336
  schema: mode.schema,
336
337
  strict: true,
337
- name: (_e = mode.name) != null ? _e : "response",
338
+ name: (_g = mode.name) != null ? _g : "response",
338
339
  description: mode.description
339
340
  }
340
341
  } : { type: "json_object" }
@@ -384,7 +385,7 @@ var OpenAIChatLanguageModel = class {
384
385
  }
385
386
  }
386
387
  async doGenerate(options) {
387
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
388
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r;
388
389
  const { args, warnings } = this.getArgs(options);
389
390
  const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
390
391
  url: this.config.url({
@@ -402,13 +403,18 @@ var OpenAIChatLanguageModel = class {
402
403
  });
403
404
  const { messages: rawPrompt, ...rawSettings } = args;
404
405
  const choice = response.choices[0];
405
- const providerMetadata = ((_b = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? {
406
- openai: {
407
- reasoningTokens: (_d = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens
406
+ let providerMetadata;
407
+ if (((_b = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null || ((_d = (_c = response.usage) == null ? void 0 : _c.prompt_tokens_details) == null ? void 0 : _d.cached_tokens) != null) {
408
+ providerMetadata = { openai: {} };
409
+ if (((_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null) {
410
+ providerMetadata.openai.reasoningTokens = (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens_details) == null ? void 0 : _h.reasoning_tokens;
408
411
  }
409
- } : void 0;
412
+ if (((_j = (_i = response.usage) == null ? void 0 : _i.prompt_tokens_details) == null ? void 0 : _j.cached_tokens) != null) {
413
+ providerMetadata.openai.cachedPromptTokens = (_l = (_k = response.usage) == null ? void 0 : _k.prompt_tokens_details) == null ? void 0 : _l.cached_tokens;
414
+ }
415
+ }
410
416
  return {
411
- text: (_e = choice.message.content) != null ? _e : void 0,
417
+ text: (_m = choice.message.content) != null ? _m : void 0,
412
418
  toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
413
419
  {
414
420
  toolCallType: "function",
@@ -416,7 +422,7 @@ var OpenAIChatLanguageModel = class {
416
422
  toolName: choice.message.function_call.name,
417
423
  args: choice.message.function_call.arguments
418
424
  }
419
- ] : (_f = choice.message.tool_calls) == null ? void 0 : _f.map((toolCall) => {
425
+ ] : (_n = choice.message.tool_calls) == null ? void 0 : _n.map((toolCall) => {
420
426
  var _a2;
421
427
  return {
422
428
  toolCallType: "function",
@@ -427,8 +433,8 @@ var OpenAIChatLanguageModel = class {
427
433
  }),
428
434
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
429
435
  usage: {
430
- promptTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : NaN,
431
- completionTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : NaN
436
+ promptTokens: (_p = (_o = response.usage) == null ? void 0 : _o.prompt_tokens) != null ? _p : NaN,
437
+ completionTokens: (_r = (_q = response.usage) == null ? void 0 : _q.completion_tokens) != null ? _r : NaN
432
438
  },
433
439
  rawCall: { rawPrompt, rawSettings },
434
440
  rawResponse: { headers: responseHeaders },
@@ -505,11 +511,12 @@ var OpenAIChatLanguageModel = class {
505
511
  let logprobs;
506
512
  let isFirstChunk = true;
507
513
  const { useLegacyFunctionCalling } = this.settings;
514
+ let providerMetadata;
508
515
  return {
509
516
  stream: response.pipeThrough(
510
517
  new TransformStream({
511
518
  transform(chunk, controller) {
512
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
519
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
513
520
  if (!chunk.success) {
514
521
  finishReason = "error";
515
522
  controller.enqueue({ type: "error", error: chunk.error });
@@ -533,6 +540,13 @@ var OpenAIChatLanguageModel = class {
533
540
  promptTokens: (_a = value.usage.prompt_tokens) != null ? _a : void 0,
534
541
  completionTokens: (_b = value.usage.completion_tokens) != null ? _b : void 0
535
542
  };
543
+ if (((_c = value.usage.prompt_tokens_details) == null ? void 0 : _c.cached_tokens) != null) {
544
+ providerMetadata = {
545
+ openai: {
546
+ cachedPromptTokens: (_d = value.usage.prompt_tokens_details) == null ? void 0 : _d.cached_tokens
547
+ }
548
+ };
549
+ }
536
550
  }
537
551
  const choice = value.choices[0];
538
552
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -579,7 +593,7 @@ var OpenAIChatLanguageModel = class {
579
593
  message: `Expected 'id' to be a string.`
580
594
  });
581
595
  }
582
- if (((_c = toolCallDelta.function) == null ? void 0 : _c.name) == null) {
596
+ if (((_e = toolCallDelta.function) == null ? void 0 : _e.name) == null) {
583
597
  throw new import_provider2.InvalidResponseDataError({
584
598
  data: toolCallDelta,
585
599
  message: `Expected 'function.name' to be a string.`
@@ -590,11 +604,11 @@ var OpenAIChatLanguageModel = class {
590
604
  type: "function",
591
605
  function: {
592
606
  name: toolCallDelta.function.name,
593
- arguments: (_d = toolCallDelta.function.arguments) != null ? _d : ""
607
+ arguments: (_f = toolCallDelta.function.arguments) != null ? _f : ""
594
608
  }
595
609
  };
596
610
  const toolCall2 = toolCalls[index];
597
- if (((_e = toolCall2.function) == null ? void 0 : _e.name) != null && ((_f = toolCall2.function) == null ? void 0 : _f.arguments) != null) {
611
+ if (((_g = toolCall2.function) == null ? void 0 : _g.name) != null && ((_h = toolCall2.function) == null ? void 0 : _h.arguments) != null) {
598
612
  if (toolCall2.function.arguments.length > 0) {
599
613
  controller.enqueue({
600
614
  type: "tool-call-delta",
@@ -608,7 +622,7 @@ var OpenAIChatLanguageModel = class {
608
622
  controller.enqueue({
609
623
  type: "tool-call",
610
624
  toolCallType: "function",
611
- toolCallId: (_g = toolCall2.id) != null ? _g : (0, import_provider_utils3.generateId)(),
625
+ toolCallId: (_i = toolCall2.id) != null ? _i : (0, import_provider_utils3.generateId)(),
612
626
  toolName: toolCall2.function.name,
613
627
  args: toolCall2.function.arguments
614
628
  });
@@ -617,21 +631,21 @@ var OpenAIChatLanguageModel = class {
617
631
  continue;
618
632
  }
619
633
  const toolCall = toolCalls[index];
620
- if (((_h = toolCallDelta.function) == null ? void 0 : _h.arguments) != null) {
621
- toolCall.function.arguments += (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : "";
634
+ if (((_j = toolCallDelta.function) == null ? void 0 : _j.arguments) != null) {
635
+ toolCall.function.arguments += (_l = (_k = toolCallDelta.function) == null ? void 0 : _k.arguments) != null ? _l : "";
622
636
  }
623
637
  controller.enqueue({
624
638
  type: "tool-call-delta",
625
639
  toolCallType: "function",
626
640
  toolCallId: toolCall.id,
627
641
  toolName: toolCall.function.name,
628
- argsTextDelta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
642
+ argsTextDelta: (_m = toolCallDelta.function.arguments) != null ? _m : ""
629
643
  });
630
- if (((_l = toolCall.function) == null ? void 0 : _l.name) != null && ((_m = toolCall.function) == null ? void 0 : _m.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
644
+ if (((_n = toolCall.function) == null ? void 0 : _n.name) != null && ((_o = toolCall.function) == null ? void 0 : _o.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
631
645
  controller.enqueue({
632
646
  type: "tool-call",
633
647
  toolCallType: "function",
634
- toolCallId: (_n = toolCall.id) != null ? _n : (0, import_provider_utils3.generateId)(),
648
+ toolCallId: (_p = toolCall.id) != null ? _p : (0, import_provider_utils3.generateId)(),
635
649
  toolName: toolCall.function.name,
636
650
  args: toolCall.function.arguments
637
651
  });
@@ -648,7 +662,8 @@ var OpenAIChatLanguageModel = class {
648
662
  usage: {
649
663
  promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
650
664
  completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
651
- }
665
+ },
666
+ ...providerMetadata != null ? { providerMetadata } : {}
652
667
  });
653
668
  }
654
669
  })
@@ -662,6 +677,9 @@ var OpenAIChatLanguageModel = class {
662
677
  var openAITokenUsageSchema = import_zod2.z.object({
663
678
  prompt_tokens: import_zod2.z.number().nullish(),
664
679
  completion_tokens: import_zod2.z.number().nullish(),
680
+ prompt_tokens_details: import_zod2.z.object({
681
+ cached_tokens: import_zod2.z.number().nullish()
682
+ }).nullish(),
665
683
  completion_tokens_details: import_zod2.z.object({
666
684
  reasoning_tokens: import_zod2.z.number().nullish()
667
685
  }).nullish()