ai 6.0.0-beta.130 → 6.0.0-beta.132

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,20 @@
1
1
  # ai
2
2
 
3
+ ## 6.0.0-beta.132
4
+
5
+ ### Patch Changes
6
+
7
+ - af9dab3: fix(ai): remove unused mode setting from generateObject and streamObject
8
+
9
+ ## 6.0.0-beta.131
10
+
11
+ ### Patch Changes
12
+
13
+ - 3ed5519: chore: rename ToolCallOptions to ToolExecutionOptions
14
+ - Updated dependencies [3ed5519]
15
+ - @ai-sdk/provider-utils@4.0.0-beta.42
16
+ - @ai-sdk/gateway@2.0.0-beta.71
17
+
3
18
  ## 6.0.0-beta.130
4
19
 
5
20
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -2,7 +2,7 @@ import { GatewayModelId } from '@ai-sdk/gateway';
2
2
  export { GatewayModelId, createGateway, gateway } from '@ai-sdk/gateway';
3
3
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
4
4
  import { Tool, InferToolInput, InferToolOutput, FlexibleSchema, InferSchema, AssistantModelMessage, ToolModelMessage, ReasoningPart, SystemModelMessage, ModelMessage, UserModelMessage, ProviderOptions, IdGenerator, ToolCall, MaybePromiseLike, TextPart, FilePart, Resolvable, FetchFunction, DataContent } from '@ai-sdk/provider-utils';
5
- export { AssistantContent, AssistantModelMessage, DataContent, FilePart, FlexibleSchema, IdGenerator, ImagePart, InferSchema, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolApprovalRequest, ToolApprovalResponse, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, dynamicTool, generateId, jsonSchema, parseJsonEventStream, tool, zodSchema } from '@ai-sdk/provider-utils';
5
+ export { AssistantContent, AssistantModelMessage, DataContent, FilePart, FlexibleSchema, IdGenerator, ImagePart, InferSchema, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolApprovalRequest, ToolApprovalResponse, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolExecutionOptions, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, dynamicTool, generateId, jsonSchema, parseJsonEventStream, tool, zodSchema } from '@ai-sdk/provider-utils';
6
6
  import * as _ai_sdk_provider from '@ai-sdk/provider';
7
7
  import { EmbeddingModelV3, EmbeddingModelV2, EmbeddingModelV3Embedding, EmbeddingModelV3Middleware, ImageModelV3, ImageModelV2, ImageModelV3ProviderMetadata, ImageModelV2ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV3, LanguageModelV2, LanguageModelV3FinishReason, SharedV3Warning, LanguageModelV3Source, LanguageModelV3Middleware, RerankingModelV3, SharedV3ProviderMetadata, SpeechModelV3, SpeechModelV2, TranscriptionModelV3, TranscriptionModelV2, LanguageModelV3Usage, ImageModelV3Usage, LanguageModelV3CallOptions, AISDKError, LanguageModelV3ToolCall, JSONSchema7, JSONParseError, TypeValidationError, JSONObject, EmbeddingModelCallOptions, ProviderV3, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
8
8
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, JSONSchema7, LoadAPIKeyError, LoadSettingError, NoContentGeneratedError, NoSuchModelError, TooManyEmbeddingValuesForCallError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
@@ -4164,7 +4164,6 @@ declare function generateObject<SCHEMA extends FlexibleSchema<unknown> = Flexibl
4164
4164
  The enum values that the model should use.
4165
4165
  */
4166
4166
  enum: Array<RESULT>;
4167
- mode?: 'json';
4168
4167
  output: 'enum';
4169
4168
  } : OUTPUT extends 'no-schema' ? {} : {
4170
4169
  /**
@@ -4183,20 +4182,6 @@ Used by some providers for additional LLM guidance, e.g.
4183
4182
  via tool or schema description.
4184
4183
  */
4185
4184
  schemaDescription?: string;
4186
- /**
4187
- The mode to use for object generation.
4188
-
4189
- The schema is converted into a JSON schema and used in one of the following ways
4190
-
4191
- - 'auto': The provider will choose the best mode for the model.
4192
- - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
4193
- - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
4194
-
4195
- Please note that most providers do not support all modes.
4196
-
4197
- Default and recommended: 'auto' (best mode for the model).
4198
- */
4199
- mode?: 'auto' | 'json' | 'tool';
4200
4185
  }) & {
4201
4186
  output?: OUTPUT;
4202
4187
  /**
@@ -4510,7 +4495,6 @@ declare function streamObject<SCHEMA extends FlexibleSchema<unknown> = FlexibleS
4510
4495
  The enum values that the model should use.
4511
4496
  */
4512
4497
  enum: Array<RESULT>;
4513
- mode?: 'json';
4514
4498
  output: 'enum';
4515
4499
  } : OUTPUT extends 'no-schema' ? {} : {
4516
4500
  /**
@@ -4529,20 +4513,6 @@ Used by some providers for additional LLM guidance, e.g.
4529
4513
  via tool or schema description.
4530
4514
  */
4531
4515
  schemaDescription?: string;
4532
- /**
4533
- The mode to use for object generation.
4534
-
4535
- The schema is converted into a JSON schema and used in one of the following ways
4536
-
4537
- - 'auto': The provider will choose the best mode for the model.
4538
- - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
4539
- - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
4540
-
4541
- Please note that most providers do not support all modes.
4542
-
4543
- Default and recommended: 'auto' (best mode for the model).
4544
- */
4545
- mode?: 'auto' | 'json' | 'tool';
4546
4516
  }) & {
4547
4517
  output?: OUTPUT;
4548
4518
  /**
package/dist/index.d.ts CHANGED
@@ -2,7 +2,7 @@ import { GatewayModelId } from '@ai-sdk/gateway';
2
2
  export { GatewayModelId, createGateway, gateway } from '@ai-sdk/gateway';
3
3
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
4
4
  import { Tool, InferToolInput, InferToolOutput, FlexibleSchema, InferSchema, AssistantModelMessage, ToolModelMessage, ReasoningPart, SystemModelMessage, ModelMessage, UserModelMessage, ProviderOptions, IdGenerator, ToolCall, MaybePromiseLike, TextPart, FilePart, Resolvable, FetchFunction, DataContent } from '@ai-sdk/provider-utils';
5
- export { AssistantContent, AssistantModelMessage, DataContent, FilePart, FlexibleSchema, IdGenerator, ImagePart, InferSchema, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolApprovalRequest, ToolApprovalResponse, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, dynamicTool, generateId, jsonSchema, parseJsonEventStream, tool, zodSchema } from '@ai-sdk/provider-utils';
5
+ export { AssistantContent, AssistantModelMessage, DataContent, FilePart, FlexibleSchema, IdGenerator, ImagePart, InferSchema, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolApprovalRequest, ToolApprovalResponse, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolExecutionOptions, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, dynamicTool, generateId, jsonSchema, parseJsonEventStream, tool, zodSchema } from '@ai-sdk/provider-utils';
6
6
  import * as _ai_sdk_provider from '@ai-sdk/provider';
7
7
  import { EmbeddingModelV3, EmbeddingModelV2, EmbeddingModelV3Embedding, EmbeddingModelV3Middleware, ImageModelV3, ImageModelV2, ImageModelV3ProviderMetadata, ImageModelV2ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV3, LanguageModelV2, LanguageModelV3FinishReason, SharedV3Warning, LanguageModelV3Source, LanguageModelV3Middleware, RerankingModelV3, SharedV3ProviderMetadata, SpeechModelV3, SpeechModelV2, TranscriptionModelV3, TranscriptionModelV2, LanguageModelV3Usage, ImageModelV3Usage, LanguageModelV3CallOptions, AISDKError, LanguageModelV3ToolCall, JSONSchema7, JSONParseError, TypeValidationError, JSONObject, EmbeddingModelCallOptions, ProviderV3, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
8
8
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, JSONSchema7, LoadAPIKeyError, LoadSettingError, NoContentGeneratedError, NoSuchModelError, TooManyEmbeddingValuesForCallError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
@@ -4164,7 +4164,6 @@ declare function generateObject<SCHEMA extends FlexibleSchema<unknown> = Flexibl
4164
4164
  The enum values that the model should use.
4165
4165
  */
4166
4166
  enum: Array<RESULT>;
4167
- mode?: 'json';
4168
4167
  output: 'enum';
4169
4168
  } : OUTPUT extends 'no-schema' ? {} : {
4170
4169
  /**
@@ -4183,20 +4182,6 @@ Used by some providers for additional LLM guidance, e.g.
4183
4182
  via tool or schema description.
4184
4183
  */
4185
4184
  schemaDescription?: string;
4186
- /**
4187
- The mode to use for object generation.
4188
-
4189
- The schema is converted into a JSON schema and used in one of the following ways
4190
-
4191
- - 'auto': The provider will choose the best mode for the model.
4192
- - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
4193
- - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
4194
-
4195
- Please note that most providers do not support all modes.
4196
-
4197
- Default and recommended: 'auto' (best mode for the model).
4198
- */
4199
- mode?: 'auto' | 'json' | 'tool';
4200
4185
  }) & {
4201
4186
  output?: OUTPUT;
4202
4187
  /**
@@ -4510,7 +4495,6 @@ declare function streamObject<SCHEMA extends FlexibleSchema<unknown> = FlexibleS
4510
4495
  The enum values that the model should use.
4511
4496
  */
4512
4497
  enum: Array<RESULT>;
4513
- mode?: 'json';
4514
4498
  output: 'enum';
4515
4499
  } : OUTPUT extends 'no-schema' ? {} : {
4516
4500
  /**
@@ -4529,20 +4513,6 @@ Used by some providers for additional LLM guidance, e.g.
4529
4513
  via tool or schema description.
4530
4514
  */
4531
4515
  schemaDescription?: string;
4532
- /**
4533
- The mode to use for object generation.
4534
-
4535
- The schema is converted into a JSON schema and used in one of the following ways
4536
-
4537
- - 'auto': The provider will choose the best mode for the model.
4538
- - 'tool': A tool with the JSON schema as parameters is provided and the provider is instructed to use it.
4539
- - 'json': The JSON schema and an instruction are injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
4540
-
4541
- Please note that most providers do not support all modes.
4542
-
4543
- Default and recommended: 'auto' (best mode for the model).
4544
- */
4545
- mode?: 'auto' | 'json' | 'tool';
4546
4516
  }) & {
4547
4517
  output?: OUTPUT;
4548
4518
  /**
package/dist/index.js CHANGED
@@ -908,7 +908,7 @@ function detectMediaType({
908
908
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
909
909
 
910
910
  // src/version.ts
911
- var VERSION = true ? "6.0.0-beta.130" : "0.0.0-test";
911
+ var VERSION = true ? "6.0.0-beta.132" : "0.0.0-test";
912
912
 
913
913
  // src/util/download/download.ts
914
914
  var download = async ({ url }) => {