ai 5.0.12 → 5.0.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,22 @@
1
1
  # ai
2
2
 
3
+ ## 5.0.14
4
+
5
+ ### Patch Changes
6
+
7
+ - 7729e32: fix(ai): expand mp3 detection to support all mpeg frame headers
8
+
9
+ ## 5.0.13
10
+
11
+ ### Patch Changes
12
+
13
+ - a7b2e66: Added providerOptions to agent stream and generate calls
14
+ - 9bed210: ### `extractReasoningMiddleware()`: delay sending `text-start` chunk to prevent rendering final text before reasoning
15
+
16
+ When wrapping a text stream in `extractReasoningMiddleware()`, delay queing the `text-start` chunk until either `reasoning-start` chunk was queued or the first `text-delta` chunk is about to be queued, whichever comes first.
17
+
18
+ https://github.com/vercel/ai/pull/8036
19
+
3
20
  ## 5.0.12
4
21
 
5
22
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -2089,6 +2089,12 @@ declare class Agent<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = neve
2089
2089
  results that can be fully encapsulated in the provider.
2090
2090
  */
2091
2091
  providerMetadata?: ProviderMetadata;
2092
+ /**
2093
+ Additional provider-specific metadata. They are passed through
2094
+ to the provider from the AI SDK and enable provider-specific
2095
+ functionality that can be fully encapsulated in the provider.
2096
+ */
2097
+ providerOptions?: ProviderOptions;
2092
2098
  }): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
2093
2099
  stream(options: Prompt & {
2094
2100
  /**
@@ -2097,6 +2103,12 @@ declare class Agent<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = neve
2097
2103
  results that can be fully encapsulated in the provider.
2098
2104
  */
2099
2105
  providerMetadata?: ProviderMetadata;
2106
+ /**
2107
+ Additional provider-specific metadata. They are passed through
2108
+ to the provider from the AI SDK and enable provider-specific
2109
+ functionality that can be fully encapsulated in the provider.
2110
+ */
2111
+ providerOptions?: ProviderOptions;
2100
2112
  }): StreamTextResult<TOOLS, OUTPUT_PARTIAL>;
2101
2113
  }
2102
2114
 
package/dist/index.d.ts CHANGED
@@ -2089,6 +2089,12 @@ declare class Agent<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = neve
2089
2089
  results that can be fully encapsulated in the provider.
2090
2090
  */
2091
2091
  providerMetadata?: ProviderMetadata;
2092
+ /**
2093
+ Additional provider-specific metadata. They are passed through
2094
+ to the provider from the AI SDK and enable provider-specific
2095
+ functionality that can be fully encapsulated in the provider.
2096
+ */
2097
+ providerOptions?: ProviderOptions;
2092
2098
  }): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
2093
2099
  stream(options: Prompt & {
2094
2100
  /**
@@ -2097,6 +2103,12 @@ declare class Agent<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = neve
2097
2103
  results that can be fully encapsulated in the provider.
2098
2104
  */
2099
2105
  providerMetadata?: ProviderMetadata;
2106
+ /**
2107
+ Additional provider-specific metadata. They are passed through
2108
+ to the provider from the AI SDK and enable provider-specific
2109
+ functionality that can be fully encapsulated in the provider.
2110
+ */
2111
+ providerOptions?: ProviderOptions;
2100
2112
  }): StreamTextResult<TOOLS, OUTPUT_PARTIAL>;
2101
2113
  }
2102
2114
 
package/dist/index.js CHANGED
@@ -600,6 +600,31 @@ var audioMediaTypeSignatures = [
600
600
  bytesPrefix: [255, 251],
601
601
  base64Prefix: "//s="
602
602
  },
603
+ {
604
+ mediaType: "audio/mpeg",
605
+ bytesPrefix: [255, 250],
606
+ base64Prefix: "//o="
607
+ },
608
+ {
609
+ mediaType: "audio/mpeg",
610
+ bytesPrefix: [255, 243],
611
+ base64Prefix: "//M="
612
+ },
613
+ {
614
+ mediaType: "audio/mpeg",
615
+ bytesPrefix: [255, 242],
616
+ base64Prefix: "//I="
617
+ },
618
+ {
619
+ mediaType: "audio/mpeg",
620
+ bytesPrefix: [255, 227],
621
+ base64Prefix: "/+M="
622
+ },
623
+ {
624
+ mediaType: "audio/mpeg",
625
+ bytesPrefix: [255, 226],
626
+ base64Prefix: "/+I="
627
+ },
603
628
  {
604
629
  mediaType: "audio/wav",
605
630
  bytesPrefix: [82, 73, 70, 70],
@@ -7730,10 +7755,19 @@ function extractReasoningMiddleware({
7730
7755
  wrapStream: async ({ doStream }) => {
7731
7756
  const { stream, ...rest } = await doStream();
7732
7757
  const reasoningExtractions = {};
7758
+ let delayedTextStart;
7733
7759
  return {
7734
7760
  stream: stream.pipeThrough(
7735
7761
  new TransformStream({
7736
7762
  transform: (chunk, controller) => {
7763
+ if (chunk.type === "text-start") {
7764
+ delayedTextStart = chunk;
7765
+ return;
7766
+ }
7767
+ if (chunk.type === "text-end" && delayedTextStart) {
7768
+ controller.enqueue(delayedTextStart);
7769
+ delayedTextStart = void 0;
7770
+ }
7737
7771
  if (chunk.type !== "text-delta") {
7738
7772
  controller.enqueue(chunk);
7739
7773
  return;
@@ -7760,17 +7794,23 @@ function extractReasoningMiddleware({
7760
7794
  id: `reasoning-${activeExtraction.idCounter}`
7761
7795
  });
7762
7796
  }
7763
- controller.enqueue(
7764
- activeExtraction.isReasoning ? {
7797
+ if (activeExtraction.isReasoning) {
7798
+ controller.enqueue({
7765
7799
  type: "reasoning-delta",
7766
7800
  delta: prefix + text2,
7767
7801
  id: `reasoning-${activeExtraction.idCounter}`
7768
- } : {
7802
+ });
7803
+ } else {
7804
+ if (delayedTextStart) {
7805
+ controller.enqueue(delayedTextStart);
7806
+ delayedTextStart = void 0;
7807
+ }
7808
+ controller.enqueue({
7769
7809
  type: "text-delta",
7770
7810
  delta: prefix + text2,
7771
7811
  id: activeExtraction.textId
7772
- }
7773
- );
7812
+ });
7813
+ }
7774
7814
  activeExtraction.afterSwitch = false;
7775
7815
  if (activeExtraction.isReasoning) {
7776
7816
  activeExtraction.isFirstReasoning = false;