ai 3.3.30 → 3.3.32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # ai
2
2
 
3
+ ## 3.3.32
4
+
5
+ ### Patch Changes
6
+
7
+ - ce7a4af: feat (ai/core): support providerMetadata in functions
8
+
9
+ ## 3.3.31
10
+
11
+ ### Patch Changes
12
+
13
+ - 561fd7e: feat (ai/core): add output: enum to generateObject
14
+
3
15
  ## 3.3.30
4
16
 
5
17
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -706,6 +706,12 @@ Optional telemetry configuration (experimental).
706
706
  */
707
707
  experimental_telemetry?: TelemetrySettings;
708
708
  /**
709
+ Additional provider-specific metadata. They are passed through
710
+ to the provider from the AI SDK and enable provider-specific
711
+ functionality that can be fully encapsulated in the provider.
712
+ */
713
+ experimental_providerMetadata?: ProviderMetadata;
714
+ /**
709
715
  * Internal. For test use only. May change without notice.
710
716
  */
711
717
  _internal?: {
@@ -762,6 +768,12 @@ Optional telemetry configuration (experimental).
762
768
  */
763
769
  experimental_telemetry?: TelemetrySettings;
764
770
  /**
771
+ Additional provider-specific metadata. They are passed through
772
+ to the provider from the AI SDK and enable provider-specific
773
+ functionality that can be fully encapsulated in the provider.
774
+ */
775
+ experimental_providerMetadata?: ProviderMetadata;
776
+ /**
765
777
  * Internal. For test use only. May change without notice.
766
778
  */
767
779
  _internal?: {
@@ -770,6 +782,56 @@ Optional telemetry configuration (experimental).
770
782
  };
771
783
  }): Promise<GenerateObjectResult<Array<ELEMENT>>>;
772
784
  /**
785
+ Generate a value from an enum (limited list of string values) using a language model.
786
+
787
+ This function does not stream the output.
788
+
789
+ @return
790
+ A result object that contains the generated value, the finish reason, the token usage, and additional information.
791
+ */
792
+ declare function generateObject<ENUM extends string>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
793
+ output: 'enum';
794
+ /**
795
+ The language model to use.
796
+ */
797
+ model: LanguageModel;
798
+ /**
799
+ The enum values that the model should use.
800
+ */
801
+ enum: Array<ENUM>;
802
+ /**
803
+ The mode to use for object generation.
804
+
805
+ The schema is converted in a JSON schema and used in one of the following ways
806
+
807
+ - 'auto': The provider will choose the best mode for the model.
808
+ - 'tool': A tool with the JSON schema as parameters is is provided and the provider is instructed to use it.
809
+ - 'json': The JSON schema and an instruction is injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
810
+
811
+ Please note that most providers do not support all modes.
812
+
813
+ Default and recommended: 'auto' (best mode for the model).
814
+ */
815
+ mode?: 'auto' | 'json' | 'tool';
816
+ /**
817
+ Optional telemetry configuration (experimental).
818
+ */
819
+ experimental_telemetry?: TelemetrySettings;
820
+ /**
821
+ Additional provider-specific metadata. They are passed through
822
+ to the provider from the AI SDK and enable provider-specific
823
+ functionality that can be fully encapsulated in the provider.
824
+ */
825
+ experimental_providerMetadata?: ProviderMetadata;
826
+ /**
827
+ * Internal. For test use only. May change without notice.
828
+ */
829
+ _internal?: {
830
+ generateId?: () => string;
831
+ currentDate?: () => Date;
832
+ };
833
+ }): Promise<GenerateObjectResult<ENUM>>;
834
+ /**
773
835
  Generate JSON with any schema for a given prompt using a language model.
774
836
 
775
837
  This function does not stream the output. If you want to stream the output, use `streamObject` instead.
@@ -792,6 +854,12 @@ Optional telemetry configuration (experimental).
792
854
  */
793
855
  experimental_telemetry?: TelemetrySettings;
794
856
  /**
857
+ Additional provider-specific metadata. They are passed through
858
+ to the provider from the AI SDK and enable provider-specific
859
+ functionality that can be fully encapsulated in the provider.
860
+ */
861
+ experimental_providerMetadata?: ProviderMetadata;
862
+ /**
795
863
  * Internal. For test use only. May change without notice.
796
864
  */
797
865
  _internal?: {
@@ -989,6 +1057,12 @@ Optional telemetry configuration (experimental).
989
1057
  */
990
1058
  experimental_telemetry?: TelemetrySettings;
991
1059
  /**
1060
+ Additional provider-specific metadata. They are passed through
1061
+ to the provider from the AI SDK and enable provider-specific
1062
+ functionality that can be fully encapsulated in the provider.
1063
+ */
1064
+ experimental_providerMetadata?: ProviderMetadata;
1065
+ /**
992
1066
  Callback that is called when the LLM response and the final object validation are finished.
993
1067
  */
994
1068
  onFinish?: OnFinishCallback<OBJECT>;
@@ -1050,6 +1124,12 @@ Optional telemetry configuration (experimental).
1050
1124
  */
1051
1125
  experimental_telemetry?: TelemetrySettings;
1052
1126
  /**
1127
+ Additional provider-specific metadata. They are passed through
1128
+ to the provider from the AI SDK and enable provider-specific
1129
+ functionality that can be fully encapsulated in the provider.
1130
+ */
1131
+ experimental_providerMetadata?: ProviderMetadata;
1132
+ /**
1053
1133
  Callback that is called when the LLM response and the final object validation are finished.
1054
1134
  */
1055
1135
  onFinish?: OnFinishCallback<Array<ELEMENT>>;
@@ -1085,6 +1165,12 @@ Optional telemetry configuration (experimental).
1085
1165
  */
1086
1166
  experimental_telemetry?: TelemetrySettings;
1087
1167
  /**
1168
+ Additional provider-specific metadata. They are passed through
1169
+ to the provider from the AI SDK and enable provider-specific
1170
+ functionality that can be fully encapsulated in the provider.
1171
+ */
1172
+ experimental_providerMetadata?: ProviderMetadata;
1173
+ /**
1088
1174
  Callback that is called when the LLM response and the final object validation are finished.
1089
1175
  */
1090
1176
  onFinish?: OnFinishCallback<JSONValue>;
@@ -1390,7 +1476,7 @@ If set and supported by the model, calls will generate deterministic results.
1390
1476
  @returns
1391
1477
  A result object that contains the generated text, the results of the tool calls, and additional information.
1392
1478
  */
1393
- declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, experimental_telemetry: telemetry, _internal: { generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1479
+ declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, _internal: { generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1394
1480
  /**
1395
1481
  The language model to use.
1396
1482
  */
@@ -1425,6 +1511,12 @@ By default, it's set to 0, which will disable the feature.
1425
1511
  */
1426
1512
  experimental_telemetry?: TelemetrySettings;
1427
1513
  /**
1514
+ Additional provider-specific metadata. They are passed through
1515
+ to the provider from the AI SDK and enable provider-specific
1516
+ functionality that can be fully encapsulated in the provider.
1517
+ */
1518
+ experimental_providerMetadata?: ProviderMetadata;
1519
+ /**
1428
1520
  * Internal. For test use only. May change without notice.
1429
1521
  */
1430
1522
  _internal?: {
@@ -1688,7 +1780,7 @@ If set and supported by the model, calls will generate deterministic results.
1688
1780
  @return
1689
1781
  A result object for accessing different stream types and additional information.
1690
1782
  */
1691
- declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, experimental_telemetry: telemetry, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1783
+ declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1692
1784
  /**
1693
1785
  The language model to use.
1694
1786
  */
@@ -1719,6 +1811,12 @@ Optional telemetry configuration (experimental).
1719
1811
  */
1720
1812
  experimental_telemetry?: TelemetrySettings;
1721
1813
  /**
1814
+ Additional provider-specific metadata. They are passed through
1815
+ to the provider from the AI SDK and enable provider-specific
1816
+ functionality that can be fully encapsulated in the provider.
1817
+ */
1818
+ experimental_providerMetadata?: ProviderMetadata;
1819
+ /**
1722
1820
  Enable streaming of tool call deltas as they are generated. Disabled by default.
1723
1821
  */
1724
1822
  experimental_toolCallStreaming?: boolean;
package/dist/index.d.ts CHANGED
@@ -706,6 +706,12 @@ Optional telemetry configuration (experimental).
706
706
  */
707
707
  experimental_telemetry?: TelemetrySettings;
708
708
  /**
709
+ Additional provider-specific metadata. They are passed through
710
+ to the provider from the AI SDK and enable provider-specific
711
+ functionality that can be fully encapsulated in the provider.
712
+ */
713
+ experimental_providerMetadata?: ProviderMetadata;
714
+ /**
709
715
  * Internal. For test use only. May change without notice.
710
716
  */
711
717
  _internal?: {
@@ -762,6 +768,12 @@ Optional telemetry configuration (experimental).
762
768
  */
763
769
  experimental_telemetry?: TelemetrySettings;
764
770
  /**
771
+ Additional provider-specific metadata. They are passed through
772
+ to the provider from the AI SDK and enable provider-specific
773
+ functionality that can be fully encapsulated in the provider.
774
+ */
775
+ experimental_providerMetadata?: ProviderMetadata;
776
+ /**
765
777
  * Internal. For test use only. May change without notice.
766
778
  */
767
779
  _internal?: {
@@ -770,6 +782,56 @@ Optional telemetry configuration (experimental).
770
782
  };
771
783
  }): Promise<GenerateObjectResult<Array<ELEMENT>>>;
772
784
  /**
785
+ Generate a value from an enum (limited list of string values) using a language model.
786
+
787
+ This function does not stream the output.
788
+
789
+ @return
790
+ A result object that contains the generated value, the finish reason, the token usage, and additional information.
791
+ */
792
+ declare function generateObject<ENUM extends string>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
793
+ output: 'enum';
794
+ /**
795
+ The language model to use.
796
+ */
797
+ model: LanguageModel;
798
+ /**
799
+ The enum values that the model should use.
800
+ */
801
+ enum: Array<ENUM>;
802
+ /**
803
+ The mode to use for object generation.
804
+
805
+ The schema is converted in a JSON schema and used in one of the following ways
806
+
807
+ - 'auto': The provider will choose the best mode for the model.
808
+ - 'tool': A tool with the JSON schema as parameters is is provided and the provider is instructed to use it.
809
+ - 'json': The JSON schema and an instruction is injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
810
+
811
+ Please note that most providers do not support all modes.
812
+
813
+ Default and recommended: 'auto' (best mode for the model).
814
+ */
815
+ mode?: 'auto' | 'json' | 'tool';
816
+ /**
817
+ Optional telemetry configuration (experimental).
818
+ */
819
+ experimental_telemetry?: TelemetrySettings;
820
+ /**
821
+ Additional provider-specific metadata. They are passed through
822
+ to the provider from the AI SDK and enable provider-specific
823
+ functionality that can be fully encapsulated in the provider.
824
+ */
825
+ experimental_providerMetadata?: ProviderMetadata;
826
+ /**
827
+ * Internal. For test use only. May change without notice.
828
+ */
829
+ _internal?: {
830
+ generateId?: () => string;
831
+ currentDate?: () => Date;
832
+ };
833
+ }): Promise<GenerateObjectResult<ENUM>>;
834
+ /**
773
835
  Generate JSON with any schema for a given prompt using a language model.
774
836
 
775
837
  This function does not stream the output. If you want to stream the output, use `streamObject` instead.
@@ -792,6 +854,12 @@ Optional telemetry configuration (experimental).
792
854
  */
793
855
  experimental_telemetry?: TelemetrySettings;
794
856
  /**
857
+ Additional provider-specific metadata. They are passed through
858
+ to the provider from the AI SDK and enable provider-specific
859
+ functionality that can be fully encapsulated in the provider.
860
+ */
861
+ experimental_providerMetadata?: ProviderMetadata;
862
+ /**
795
863
  * Internal. For test use only. May change without notice.
796
864
  */
797
865
  _internal?: {
@@ -989,6 +1057,12 @@ Optional telemetry configuration (experimental).
989
1057
  */
990
1058
  experimental_telemetry?: TelemetrySettings;
991
1059
  /**
1060
+ Additional provider-specific metadata. They are passed through
1061
+ to the provider from the AI SDK and enable provider-specific
1062
+ functionality that can be fully encapsulated in the provider.
1063
+ */
1064
+ experimental_providerMetadata?: ProviderMetadata;
1065
+ /**
992
1066
  Callback that is called when the LLM response and the final object validation are finished.
993
1067
  */
994
1068
  onFinish?: OnFinishCallback<OBJECT>;
@@ -1050,6 +1124,12 @@ Optional telemetry configuration (experimental).
1050
1124
  */
1051
1125
  experimental_telemetry?: TelemetrySettings;
1052
1126
  /**
1127
+ Additional provider-specific metadata. They are passed through
1128
+ to the provider from the AI SDK and enable provider-specific
1129
+ functionality that can be fully encapsulated in the provider.
1130
+ */
1131
+ experimental_providerMetadata?: ProviderMetadata;
1132
+ /**
1053
1133
  Callback that is called when the LLM response and the final object validation are finished.
1054
1134
  */
1055
1135
  onFinish?: OnFinishCallback<Array<ELEMENT>>;
@@ -1085,6 +1165,12 @@ Optional telemetry configuration (experimental).
1085
1165
  */
1086
1166
  experimental_telemetry?: TelemetrySettings;
1087
1167
  /**
1168
+ Additional provider-specific metadata. They are passed through
1169
+ to the provider from the AI SDK and enable provider-specific
1170
+ functionality that can be fully encapsulated in the provider.
1171
+ */
1172
+ experimental_providerMetadata?: ProviderMetadata;
1173
+ /**
1088
1174
  Callback that is called when the LLM response and the final object validation are finished.
1089
1175
  */
1090
1176
  onFinish?: OnFinishCallback<JSONValue>;
@@ -1390,7 +1476,7 @@ If set and supported by the model, calls will generate deterministic results.
1390
1476
  @returns
1391
1477
  A result object that contains the generated text, the results of the tool calls, and additional information.
1392
1478
  */
1393
- declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, experimental_telemetry: telemetry, _internal: { generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1479
+ declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, _internal: { generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1394
1480
  /**
1395
1481
  The language model to use.
1396
1482
  */
@@ -1425,6 +1511,12 @@ By default, it's set to 0, which will disable the feature.
1425
1511
  */
1426
1512
  experimental_telemetry?: TelemetrySettings;
1427
1513
  /**
1514
+ Additional provider-specific metadata. They are passed through
1515
+ to the provider from the AI SDK and enable provider-specific
1516
+ functionality that can be fully encapsulated in the provider.
1517
+ */
1518
+ experimental_providerMetadata?: ProviderMetadata;
1519
+ /**
1428
1520
  * Internal. For test use only. May change without notice.
1429
1521
  */
1430
1522
  _internal?: {
@@ -1688,7 +1780,7 @@ If set and supported by the model, calls will generate deterministic results.
1688
1780
  @return
1689
1781
  A result object for accessing different stream types and additional information.
1690
1782
  */
1691
- declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, experimental_telemetry: telemetry, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1783
+ declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
1692
1784
  /**
1693
1785
  The language model to use.
1694
1786
  */
@@ -1719,6 +1811,12 @@ Optional telemetry configuration (experimental).
1719
1811
  */
1720
1812
  experimental_telemetry?: TelemetrySettings;
1721
1813
  /**
1814
+ Additional provider-specific metadata. They are passed through
1815
+ to the provider from the AI SDK and enable provider-specific
1816
+ functionality that can be fully encapsulated in the provider.
1817
+ */
1818
+ experimental_providerMetadata?: ProviderMetadata;
1819
+ /**
1722
1820
  Enable streaming of tool call deltas as they are generated. Disabled by default.
1723
1821
  */
1724
1822
  experimental_toolCallStreaming?: boolean;
package/dist/index.js CHANGED
@@ -1537,7 +1537,7 @@ var objectOutputStrategy = (schema) => ({
1537
1537
  var arrayOutputStrategy = (schema) => {
1538
1538
  const { $schema, ...itemSchema } = schema.jsonSchema;
1539
1539
  return {
1540
- type: "array",
1540
+ type: "enum",
1541
1541
  // wrap in object that contains array of elements, since most LLMs will not
1542
1542
  // be able to generate an array directly:
1543
1543
  // possible future optimization: use arrays directly when model supports grammar-guided generation
@@ -1641,15 +1641,64 @@ var arrayOutputStrategy = (schema) => {
1641
1641
  }
1642
1642
  };
1643
1643
  };
1644
+ var enumOutputStrategy = (enumValues) => {
1645
+ return {
1646
+ type: "enum",
1647
+ // wrap in object that contains result, since most LLMs will not
1648
+ // be able to generate an enum value directly:
1649
+ // possible future optimization: use enums directly when model supports top-level enums
1650
+ jsonSchema: {
1651
+ $schema: "http://json-schema.org/draft-07/schema#",
1652
+ type: "object",
1653
+ properties: {
1654
+ result: { type: "string", enum: enumValues }
1655
+ },
1656
+ required: ["result"],
1657
+ additionalProperties: false
1658
+ },
1659
+ validateFinalResult(value) {
1660
+ if (!(0, import_provider9.isJSONObject)(value) || typeof value.result !== "string") {
1661
+ return {
1662
+ success: false,
1663
+ error: new import_provider9.TypeValidationError({
1664
+ value,
1665
+ cause: 'value must be an object that contains a string in the "result" property.'
1666
+ })
1667
+ };
1668
+ }
1669
+ const result = value.result;
1670
+ return enumValues.includes(result) ? { success: true, value: result } : {
1671
+ success: false,
1672
+ error: new import_provider9.TypeValidationError({
1673
+ value,
1674
+ cause: "value must be a string in the enum"
1675
+ })
1676
+ };
1677
+ },
1678
+ validatePartialResult() {
1679
+ throw new import_provider9.UnsupportedFunctionalityError({
1680
+ functionality: "partial results in enum mode"
1681
+ });
1682
+ },
1683
+ createElementStream() {
1684
+ throw new import_provider9.UnsupportedFunctionalityError({
1685
+ functionality: "element streams in enum mode"
1686
+ });
1687
+ }
1688
+ };
1689
+ };
1644
1690
  function getOutputStrategy({
1645
1691
  output,
1646
- schema
1692
+ schema,
1693
+ enumValues
1647
1694
  }) {
1648
1695
  switch (output) {
1649
1696
  case "object":
1650
1697
  return objectOutputStrategy((0, import_ui_utils.asSchema)(schema));
1651
1698
  case "array":
1652
1699
  return arrayOutputStrategy((0, import_ui_utils.asSchema)(schema));
1700
+ case "enum":
1701
+ return enumOutputStrategy(enumValues);
1653
1702
  case "no-schema":
1654
1703
  return noSchemaOutputStrategy;
1655
1704
  default: {
@@ -1665,9 +1714,10 @@ function validateObjectGenerationInput({
1665
1714
  mode,
1666
1715
  schema,
1667
1716
  schemaName,
1668
- schemaDescription
1717
+ schemaDescription,
1718
+ enumValues
1669
1719
  }) {
1670
- if (output != null && output !== "object" && output !== "array" && output !== "no-schema") {
1720
+ if (output != null && output !== "object" && output !== "array" && output !== "enum" && output !== "no-schema") {
1671
1721
  throw new InvalidArgumentError({
1672
1722
  parameter: "output",
1673
1723
  value: output,
@@ -1703,6 +1753,13 @@ function validateObjectGenerationInput({
1703
1753
  message: "Schema name is not supported for no-schema output."
1704
1754
  });
1705
1755
  }
1756
+ if (enumValues != null) {
1757
+ throw new InvalidArgumentError({
1758
+ parameter: "enumValues",
1759
+ value: enumValues,
1760
+ message: "Enum values are not supported for no-schema output."
1761
+ });
1762
+ }
1706
1763
  }
1707
1764
  if (output === "object") {
1708
1765
  if (schema == null) {
@@ -1712,6 +1769,13 @@ function validateObjectGenerationInput({
1712
1769
  message: "Schema is required for object output."
1713
1770
  });
1714
1771
  }
1772
+ if (enumValues != null) {
1773
+ throw new InvalidArgumentError({
1774
+ parameter: "enumValues",
1775
+ value: enumValues,
1776
+ message: "Enum values are not supported for object output."
1777
+ });
1778
+ }
1715
1779
  }
1716
1780
  if (output === "array") {
1717
1781
  if (schema == null) {
@@ -1721,6 +1785,52 @@ function validateObjectGenerationInput({
1721
1785
  message: "Element schema is required for array output."
1722
1786
  });
1723
1787
  }
1788
+ if (enumValues != null) {
1789
+ throw new InvalidArgumentError({
1790
+ parameter: "enumValues",
1791
+ value: enumValues,
1792
+ message: "Enum values are not supported for array output."
1793
+ });
1794
+ }
1795
+ }
1796
+ if (output === "enum") {
1797
+ if (schema != null) {
1798
+ throw new InvalidArgumentError({
1799
+ parameter: "schema",
1800
+ value: schema,
1801
+ message: "Schema is not supported for enum output."
1802
+ });
1803
+ }
1804
+ if (schemaDescription != null) {
1805
+ throw new InvalidArgumentError({
1806
+ parameter: "schemaDescription",
1807
+ value: schemaDescription,
1808
+ message: "Schema description is not supported for enum output."
1809
+ });
1810
+ }
1811
+ if (schemaName != null) {
1812
+ throw new InvalidArgumentError({
1813
+ parameter: "schemaName",
1814
+ value: schemaName,
1815
+ message: "Schema name is not supported for enum output."
1816
+ });
1817
+ }
1818
+ if (enumValues == null) {
1819
+ throw new InvalidArgumentError({
1820
+ parameter: "enumValues",
1821
+ value: enumValues,
1822
+ message: "Enum values are required for enum output."
1823
+ });
1824
+ }
1825
+ for (const value of enumValues) {
1826
+ if (typeof value !== "string") {
1827
+ throw new InvalidArgumentError({
1828
+ parameter: "enumValues",
1829
+ value,
1830
+ message: "Enum values must be strings."
1831
+ });
1832
+ }
1833
+ }
1724
1834
  }
1725
1835
  }
1726
1836
 
@@ -1728,6 +1838,8 @@ function validateObjectGenerationInput({
1728
1838
  var originalGenerateId = (0, import_provider_utils6.createIdGenerator)({ prefix: "aiobj-", length: 24 });
1729
1839
  async function generateObject({
1730
1840
  model,
1841
+ enum: enumValues,
1842
+ // rename bc enum is reserved by typescript
1731
1843
  schema: inputSchema,
1732
1844
  schemaName,
1733
1845
  schemaDescription,
@@ -1740,6 +1852,7 @@ async function generateObject({
1740
1852
  abortSignal,
1741
1853
  headers,
1742
1854
  experimental_telemetry: telemetry,
1855
+ experimental_providerMetadata: providerMetadata,
1743
1856
  _internal: {
1744
1857
  generateId: generateId3 = originalGenerateId,
1745
1858
  currentDate = () => /* @__PURE__ */ new Date()
@@ -1752,9 +1865,14 @@ async function generateObject({
1752
1865
  mode,
1753
1866
  schema: inputSchema,
1754
1867
  schemaName,
1755
- schemaDescription
1868
+ schemaDescription,
1869
+ enumValues
1870
+ });
1871
+ const outputStrategy = getOutputStrategy({
1872
+ output,
1873
+ schema: inputSchema,
1874
+ enumValues
1756
1875
  });
1757
- const outputStrategy = getOutputStrategy({ output, schema: inputSchema });
1758
1876
  if (outputStrategy.type === "no-schema" && mode === void 0) {
1759
1877
  mode = "json";
1760
1878
  }
@@ -1799,7 +1917,7 @@ async function generateObject({
1799
1917
  let rawResponse;
1800
1918
  let response;
1801
1919
  let logprobs;
1802
- let providerMetadata;
1920
+ let resultProviderMetadata;
1803
1921
  switch (mode) {
1804
1922
  case "json": {
1805
1923
  const validatedPrompt = validatePrompt({
@@ -1857,6 +1975,7 @@ async function generateObject({
1857
1975
  ...prepareCallSettings(settings),
1858
1976
  inputFormat,
1859
1977
  prompt: promptMessages,
1978
+ providerMetadata,
1860
1979
  abortSignal,
1861
1980
  headers
1862
1981
  });
@@ -1901,7 +2020,7 @@ async function generateObject({
1901
2020
  warnings = generateResult.warnings;
1902
2021
  rawResponse = generateResult.rawResponse;
1903
2022
  logprobs = generateResult.logprobs;
1904
- providerMetadata = generateResult.providerMetadata;
2023
+ resultProviderMetadata = generateResult.providerMetadata;
1905
2024
  response = generateResult.responseData;
1906
2025
  break;
1907
2026
  }
@@ -1961,6 +2080,7 @@ async function generateObject({
1961
2080
  ...prepareCallSettings(settings),
1962
2081
  inputFormat,
1963
2082
  prompt: promptMessages,
2083
+ providerMetadata,
1964
2084
  abortSignal,
1965
2085
  headers
1966
2086
  });
@@ -2006,7 +2126,7 @@ async function generateObject({
2006
2126
  warnings = generateResult.warnings;
2007
2127
  rawResponse = generateResult.rawResponse;
2008
2128
  logprobs = generateResult.logprobs;
2009
- providerMetadata = generateResult.providerMetadata;
2129
+ resultProviderMetadata = generateResult.providerMetadata;
2010
2130
  response = generateResult.responseData;
2011
2131
  break;
2012
2132
  }
@@ -2058,7 +2178,7 @@ async function generateObject({
2058
2178
  headers: rawResponse == null ? void 0 : rawResponse.headers
2059
2179
  },
2060
2180
  logprobs,
2061
- providerMetadata
2181
+ providerMetadata: resultProviderMetadata
2062
2182
  });
2063
2183
  }
2064
2184
  });
@@ -2216,6 +2336,7 @@ async function streamObject({
2216
2336
  abortSignal,
2217
2337
  headers,
2218
2338
  experimental_telemetry: telemetry,
2339
+ experimental_providerMetadata: providerMetadata,
2219
2340
  onFinish,
2220
2341
  _internal: {
2221
2342
  generateId: generateId3 = originalGenerateId2,
@@ -2296,6 +2417,7 @@ async function streamObject({
2296
2417
  prompt: validatedPrompt,
2297
2418
  modelSupportsImageUrls: model.supportsImageUrls
2298
2419
  }),
2420
+ providerMetadata,
2299
2421
  abortSignal,
2300
2422
  headers
2301
2423
  };
@@ -2337,6 +2459,7 @@ async function streamObject({
2337
2459
  prompt: validatedPrompt,
2338
2460
  modelSupportsImageUrls: model.supportsImageUrls
2339
2461
  }),
2462
+ providerMetadata,
2340
2463
  abortSignal,
2341
2464
  headers
2342
2465
  };
@@ -2899,6 +3022,7 @@ async function generateText({
2899
3022
  maxAutomaticRoundtrips = 0,
2900
3023
  maxToolRoundtrips = maxAutomaticRoundtrips,
2901
3024
  experimental_telemetry: telemetry,
3025
+ experimental_providerMetadata: providerMetadata,
2902
3026
  _internal: {
2903
3027
  generateId: generateId3 = originalGenerateId3,
2904
3028
  currentDate = () => /* @__PURE__ */ new Date()
@@ -2996,6 +3120,7 @@ async function generateText({
2996
3120
  ...callSettings,
2997
3121
  inputFormat: currentInputFormat,
2998
3122
  prompt: promptMessages,
3123
+ providerMetadata,
2999
3124
  abortSignal,
3000
3125
  headers
3001
3126
  });
@@ -3567,6 +3692,7 @@ async function streamText({
3567
3692
  headers,
3568
3693
  maxToolRoundtrips = 0,
3569
3694
  experimental_telemetry: telemetry,
3695
+ experimental_providerMetadata: providerMetadata,
3570
3696
  experimental_toolCallStreaming: toolCallStreaming = false,
3571
3697
  onChunk,
3572
3698
  onFinish,
@@ -3653,6 +3779,7 @@ async function streamText({
3653
3779
  ...prepareCallSettings(settings),
3654
3780
  inputFormat: promptType,
3655
3781
  prompt: promptMessages2,
3782
+ providerMetadata,
3656
3783
  abortSignal,
3657
3784
  headers
3658
3785
  })