ai 6.0.0-beta.65 → 6.0.0-beta.67

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,21 @@
1
1
  # ai
2
2
 
3
+ ## 6.0.0-beta.67
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [2b6a848]
8
+ - @ai-sdk/gateway@2.0.0-beta.37
9
+
10
+ ## 6.0.0-beta.66
11
+
12
+ ### Patch Changes
13
+
14
+ - fca786b: feat(agent): configurable call options
15
+ - Updated dependencies [fca786b]
16
+ - @ai-sdk/provider-utils@4.0.0-beta.20
17
+ - @ai-sdk/gateway@2.0.0-beta.36
18
+
3
19
  ## 6.0.0-beta.65
4
20
 
5
21
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1,6 +1,6 @@
1
1
  export { createGateway, gateway } from '@ai-sdk/gateway';
2
2
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
- import { Tool, InferToolInput, InferToolOutput, AssistantModelMessage, ToolModelMessage, ReasoningPart, FlexibleSchema, InferSchema, ModelMessage, SystemModelMessage, UserModelMessage, ProviderOptions, IdGenerator, ToolCall, Resolvable, FetchFunction, DataContent } from '@ai-sdk/provider-utils';
3
+ import { Tool, InferToolInput, InferToolOutput, AssistantModelMessage, ToolModelMessage, ReasoningPart, FlexibleSchema, InferSchema, ModelMessage, SystemModelMessage, UserModelMessage, ProviderOptions, IdGenerator, ToolCall, MaybePromiseLike, Resolvable, FetchFunction, DataContent } from '@ai-sdk/provider-utils';
4
4
  export { AssistantContent, AssistantModelMessage, DataContent, FilePart, FlexibleSchema, IdGenerator, ImagePart, InferSchema, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolApprovalRequest, ToolApprovalResponse, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, dynamicTool, generateId, jsonSchema, parseJsonEventStream, tool, zodSchema } from '@ai-sdk/provider-utils';
5
5
  import * as _ai_sdk_provider from '@ai-sdk/provider';
6
6
  import { EmbeddingModelV3, EmbeddingModelV2, EmbeddingModelV3Embedding, ImageModelV3, ImageModelV3CallWarning, ImageModelV3ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV3, LanguageModelV2, LanguageModelV3FinishReason, LanguageModelV3CallWarning, LanguageModelV3Source, LanguageModelV3Middleware, SharedV3ProviderMetadata, SpeechModelV3, SpeechModelV2, SpeechModelV3CallWarning, TranscriptionModelV3, TranscriptionModelV2, TranscriptionModelV3CallWarning, LanguageModelV3Usage, LanguageModelV3CallOptions, AISDKError, LanguageModelV3ToolCall, JSONSchema7, JSONParseError, TypeValidationError, ProviderV3, ProviderV2, NoSuchModelError, JSONObject } from '@ai-sdk/provider';
@@ -2380,7 +2380,11 @@ type TextStreamPart<TOOLS extends ToolSet> = {
2380
2380
  rawValue: unknown;
2381
2381
  };
2382
2382
 
2383
- type AgentCallParameters = {
2383
+ type AgentCallParameters<CALL_OPTIONS> = ([CALL_OPTIONS] extends [never] ? {
2384
+ options?: never;
2385
+ } : {
2386
+ options: CALL_OPTIONS;
2387
+ }) & ({
2384
2388
  /**
2385
2389
  * A prompt. It can be either a text prompt or a list of messages.
2386
2390
  *
@@ -2406,7 +2410,7 @@ type AgentCallParameters = {
2406
2410
  * You can either use `prompt` or `messages` but not both.
2407
2411
  */
2408
2412
  prompt?: never;
2409
- };
2413
+ });
2410
2414
  /**
2411
2415
  * An Agent receives a prompt (text or messages) and generates or streams an output
2412
2416
  * that consists of steps, tool calls, data parts, etc.
@@ -2414,7 +2418,7 @@ type AgentCallParameters = {
2414
2418
  * You can implement your own Agent by implementing the `Agent` interface,
2415
2419
  * or use the `ToolLoopAgent` class.
2416
2420
  */
2417
- interface Agent<TOOLS extends ToolSet = {}, OUTPUT extends Output = never> {
2421
+ interface Agent<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never> {
2418
2422
  /**
2419
2423
  * The specification version of the agent interface. This will enable
2420
2424
  * us to evolve the agent interface and retain backwards compatibility.
@@ -2431,11 +2435,11 @@ interface Agent<TOOLS extends ToolSet = {}, OUTPUT extends Output = never> {
2431
2435
  /**
2432
2436
  * Generates an output from the agent (non-streaming).
2433
2437
  */
2434
- generate(options: AgentCallParameters): PromiseLike<GenerateTextResult<TOOLS, InferGenerateOutput<OUTPUT>>>;
2438
+ generate(options: AgentCallParameters<CALL_OPTIONS>): PromiseLike<GenerateTextResult<TOOLS, InferGenerateOutput<OUTPUT>>>;
2435
2439
  /**
2436
2440
  * Streams an output from the agent (streaming).
2437
2441
  */
2438
- stream(options: AgentCallParameters): StreamTextResult<TOOLS, InferStreamOutput<OUTPUT>>;
2442
+ stream(options: AgentCallParameters<CALL_OPTIONS>): PromiseLike<StreamTextResult<TOOLS, InferStreamOutput<OUTPUT>>>;
2439
2443
  }
2440
2444
 
2441
2445
  /**
@@ -2464,7 +2468,7 @@ type ToolLoopAgentOnStepFinishCallback<TOOLS extends ToolSet = {}> = (stepResult
2464
2468
  /**
2465
2469
  * Configuration options for an agent.
2466
2470
  */
2467
- type ToolLoopAgentSettings<TOOLS extends ToolSet = {}, OUTPUT extends Output = never> = CallSettings & {
2471
+ type ToolLoopAgentSettings<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never> = CallSettings & {
2468
2472
  /**
2469
2473
  * The id of the agent.
2470
2474
  */
@@ -2535,6 +2539,16 @@ type ToolLoopAgentSettings<TOOLS extends ToolSet = {}, OUTPUT extends Output = n
2535
2539
  * @default undefined
2536
2540
  */
2537
2541
  experimental_context?: unknown;
2542
+ /**
2543
+ * The schema for the call options.
2544
+ */
2545
+ callOptionsSchema?: FlexibleSchema<CALL_OPTIONS>;
2546
+ /**
2547
+ * Prepare the parameters for the generateText or streamText call.
2548
+ *
2549
+ * You can use this to have templates based on call options.
2550
+ */
2551
+ prepareCall?: (options: AgentCallParameters<CALL_OPTIONS> & Pick<ToolLoopAgentSettings<CALL_OPTIONS, TOOLS, OUTPUT>, 'model' | 'tools' | 'maxOutputTokens' | 'temperature' | 'topP' | 'topK' | 'presencePenalty' | 'frequencyPenalty' | 'stopSequences' | 'seed' | 'headers' | 'instructions' | 'stopWhen' | 'experimental_telemetry' | 'activeTools' | 'providerOptions' | 'experimental_context'>) => MaybePromiseLike<Pick<ToolLoopAgentSettings<CALL_OPTIONS, TOOLS, OUTPUT>, 'model' | 'tools' | 'maxOutputTokens' | 'temperature' | 'topP' | 'topK' | 'presencePenalty' | 'frequencyPenalty' | 'stopSequences' | 'seed' | 'headers' | 'instructions' | 'stopWhen' | 'experimental_telemetry' | 'activeTools' | 'providerOptions' | 'experimental_context'> & Omit<Prompt, 'system'>>;
2538
2552
  };
2539
2553
 
2540
2554
  /**
@@ -2548,10 +2562,10 @@ type ToolLoopAgentSettings<TOOLS extends ToolSet = {}, OUTPUT extends Output = n
2548
2562
  * - A tool call needs approval, or
2549
2563
  * - A stop condition is met (default stop condition is stepCountIs(20))
2550
2564
  */
2551
- declare class ToolLoopAgent<TOOLS extends ToolSet = {}, OUTPUT extends Output = never> implements Agent<TOOLS, OUTPUT> {
2565
+ declare class ToolLoopAgent<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never> implements Agent<CALL_OPTIONS, TOOLS, OUTPUT> {
2552
2566
  readonly version = "agent-v1";
2553
2567
  private readonly settings;
2554
- constructor(settings: ToolLoopAgentSettings<TOOLS, OUTPUT>);
2568
+ constructor(settings: ToolLoopAgentSettings<CALL_OPTIONS, TOOLS, OUTPUT>);
2555
2569
  /**
2556
2570
  * The id of the agent.
2557
2571
  */
@@ -2560,20 +2574,21 @@ declare class ToolLoopAgent<TOOLS extends ToolSet = {}, OUTPUT extends Output =
2560
2574
  * The tools that the agent can use.
2561
2575
  */
2562
2576
  get tools(): TOOLS;
2577
+ private prepareCall;
2563
2578
  /**
2564
2579
  * Generates an output from the agent (non-streaming).
2565
2580
  */
2566
- generate(options: AgentCallParameters): Promise<GenerateTextResult<TOOLS, InferGenerateOutput<OUTPUT>>>;
2581
+ generate(options: AgentCallParameters<CALL_OPTIONS>): Promise<GenerateTextResult<TOOLS, InferGenerateOutput<OUTPUT>>>;
2567
2582
  /**
2568
2583
  * Streams an output from the agent (streaming).
2569
2584
  */
2570
- stream(options: AgentCallParameters): StreamTextResult<TOOLS, InferStreamOutput<OUTPUT>>;
2585
+ stream(options: AgentCallParameters<CALL_OPTIONS>): Promise<StreamTextResult<TOOLS, InferStreamOutput<OUTPUT>>>;
2571
2586
  }
2572
2587
 
2573
2588
  /**
2574
2589
  * Infer the type of the tools of an agent.
2575
2590
  */
2576
- type InferAgentTools<AGENT> = AGENT extends Agent<infer TOOLS, never> ? TOOLS : never;
2591
+ type InferAgentTools<AGENT> = AGENT extends Agent<any, infer TOOLS, any> ? TOOLS : never;
2577
2592
 
2578
2593
  /**
2579
2594
  * Infer the UI message type of an agent.
@@ -2588,10 +2603,11 @@ type InferAgentUIMessage<AGENT> = UIMessage<never, never, InferUITools<InferAgen
2588
2603
  *
2589
2604
  * @returns The response object.
2590
2605
  */
2591
- declare function createAgentUIStreamResponse<TOOLS extends ToolSet = {}, OUTPUT extends Output = never>({ headers, status, statusText, consumeSseStream, ...options }: {
2592
- agent: Agent<TOOLS, OUTPUT>;
2606
+ declare function createAgentUIStreamResponse<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never, MESSAGE_METADATA = unknown>({ headers, status, statusText, consumeSseStream, ...options }: {
2607
+ agent: Agent<CALL_OPTIONS, TOOLS, OUTPUT>;
2593
2608
  messages: unknown[];
2594
- } & UIMessageStreamResponseInit & UIMessageStreamOptions<UIMessage<never, never, InferUITools<TOOLS>>>): Promise<Response>;
2609
+ options?: CALL_OPTIONS;
2610
+ } & UIMessageStreamResponseInit & UIMessageStreamOptions<UIMessage<MESSAGE_METADATA, never, InferUITools<TOOLS>>>): Promise<Response>;
2595
2611
 
2596
2612
  declare const getOriginalFetch: () => typeof fetch;
2597
2613
  declare function callCompletionApi({ api, prompt, credentials, headers, body, streamProtocol, setCompletion, setLoading, setError, setAbortController, onFinish, onError, fetch, }: {
@@ -3241,10 +3257,11 @@ declare const UI_MESSAGE_STREAM_HEADERS: {
3241
3257
  *
3242
3258
  * @returns The UI message stream.
3243
3259
  */
3244
- declare function createAgentUIStream<TOOLS extends ToolSet = {}, OUTPUT extends Output = never>({ agent, messages, ...uiMessageStreamOptions }: {
3245
- agent: Agent<TOOLS, OUTPUT>;
3260
+ declare function createAgentUIStream<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never, MESSAGE_METADATA = unknown>({ agent, messages, options, ...uiMessageStreamOptions }: {
3261
+ agent: Agent<CALL_OPTIONS, TOOLS, OUTPUT>;
3246
3262
  messages: unknown[];
3247
- } & UIMessageStreamOptions<UIMessage<never, never, InferUITools<TOOLS>>>): Promise<AsyncIterableStream<InferUIMessageChunk<UIMessage<never, never, InferUITools<TOOLS>>>>>;
3263
+ options?: CALL_OPTIONS;
3264
+ } & UIMessageStreamOptions<UIMessage<MESSAGE_METADATA, never, InferUITools<TOOLS>>>): Promise<AsyncIterableStream<InferUIMessageChunk<UIMessage<MESSAGE_METADATA, never, InferUITools<TOOLS>>>>>;
3248
3265
 
3249
3266
  /**
3250
3267
  * Pipes the agent UI message stream to a Node.js ServerResponse object.
@@ -3252,11 +3269,12 @@ declare function createAgentUIStream<TOOLS extends ToolSet = {}, OUTPUT extends
3252
3269
  * @param agent - The agent to run.
3253
3270
  * @param messages - The input UI messages.
3254
3271
  */
3255
- declare function pipeAgentUIStreamToResponse<TOOLS extends ToolSet = {}, OUTPUT extends Output = never>({ response, headers, status, statusText, consumeSseStream, ...options }: {
3272
+ declare function pipeAgentUIStreamToResponse<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never, MESSAGE_METADATA = unknown>({ response, headers, status, statusText, consumeSseStream, ...options }: {
3256
3273
  response: ServerResponse;
3257
- agent: Agent<TOOLS, OUTPUT>;
3274
+ agent: Agent<CALL_OPTIONS, TOOLS, OUTPUT>;
3258
3275
  messages: unknown[];
3259
- } & UIMessageStreamResponseInit & UIMessageStreamOptions<UIMessage<never, never, InferUITools<TOOLS>>>): Promise<void>;
3276
+ options?: CALL_OPTIONS;
3277
+ } & UIMessageStreamResponseInit & UIMessageStreamOptions<UIMessage<MESSAGE_METADATA, never, InferUITools<TOOLS>>>): Promise<void>;
3260
3278
 
3261
3279
  /**
3262
3280
  The result of an `embed` call.
package/dist/index.d.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  export { createGateway, gateway } from '@ai-sdk/gateway';
2
2
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
- import { Tool, InferToolInput, InferToolOutput, AssistantModelMessage, ToolModelMessage, ReasoningPart, FlexibleSchema, InferSchema, ModelMessage, SystemModelMessage, UserModelMessage, ProviderOptions, IdGenerator, ToolCall, Resolvable, FetchFunction, DataContent } from '@ai-sdk/provider-utils';
3
+ import { Tool, InferToolInput, InferToolOutput, AssistantModelMessage, ToolModelMessage, ReasoningPart, FlexibleSchema, InferSchema, ModelMessage, SystemModelMessage, UserModelMessage, ProviderOptions, IdGenerator, ToolCall, MaybePromiseLike, Resolvable, FetchFunction, DataContent } from '@ai-sdk/provider-utils';
4
4
  export { AssistantContent, AssistantModelMessage, DataContent, FilePart, FlexibleSchema, IdGenerator, ImagePart, InferSchema, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolApprovalRequest, ToolApprovalResponse, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, dynamicTool, generateId, jsonSchema, parseJsonEventStream, tool, zodSchema } from '@ai-sdk/provider-utils';
5
5
  import * as _ai_sdk_provider from '@ai-sdk/provider';
6
6
  import { EmbeddingModelV3, EmbeddingModelV2, EmbeddingModelV3Embedding, ImageModelV3, ImageModelV3CallWarning, ImageModelV3ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV3, LanguageModelV2, LanguageModelV3FinishReason, LanguageModelV3CallWarning, LanguageModelV3Source, LanguageModelV3Middleware, SharedV3ProviderMetadata, SpeechModelV3, SpeechModelV2, SpeechModelV3CallWarning, TranscriptionModelV3, TranscriptionModelV2, TranscriptionModelV3CallWarning, LanguageModelV3Usage, LanguageModelV3CallOptions, AISDKError, LanguageModelV3ToolCall, JSONSchema7, JSONParseError, TypeValidationError, ProviderV3, ProviderV2, NoSuchModelError, JSONObject } from '@ai-sdk/provider';
@@ -2380,7 +2380,11 @@ type TextStreamPart<TOOLS extends ToolSet> = {
2380
2380
  rawValue: unknown;
2381
2381
  };
2382
2382
 
2383
- type AgentCallParameters = {
2383
+ type AgentCallParameters<CALL_OPTIONS> = ([CALL_OPTIONS] extends [never] ? {
2384
+ options?: never;
2385
+ } : {
2386
+ options: CALL_OPTIONS;
2387
+ }) & ({
2384
2388
  /**
2385
2389
  * A prompt. It can be either a text prompt or a list of messages.
2386
2390
  *
@@ -2406,7 +2410,7 @@ type AgentCallParameters = {
2406
2410
  * You can either use `prompt` or `messages` but not both.
2407
2411
  */
2408
2412
  prompt?: never;
2409
- };
2413
+ });
2410
2414
  /**
2411
2415
  * An Agent receives a prompt (text or messages) and generates or streams an output
2412
2416
  * that consists of steps, tool calls, data parts, etc.
@@ -2414,7 +2418,7 @@ type AgentCallParameters = {
2414
2418
  * You can implement your own Agent by implementing the `Agent` interface,
2415
2419
  * or use the `ToolLoopAgent` class.
2416
2420
  */
2417
- interface Agent<TOOLS extends ToolSet = {}, OUTPUT extends Output = never> {
2421
+ interface Agent<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never> {
2418
2422
  /**
2419
2423
  * The specification version of the agent interface. This will enable
2420
2424
  * us to evolve the agent interface and retain backwards compatibility.
@@ -2431,11 +2435,11 @@ interface Agent<TOOLS extends ToolSet = {}, OUTPUT extends Output = never> {
2431
2435
  /**
2432
2436
  * Generates an output from the agent (non-streaming).
2433
2437
  */
2434
- generate(options: AgentCallParameters): PromiseLike<GenerateTextResult<TOOLS, InferGenerateOutput<OUTPUT>>>;
2438
+ generate(options: AgentCallParameters<CALL_OPTIONS>): PromiseLike<GenerateTextResult<TOOLS, InferGenerateOutput<OUTPUT>>>;
2435
2439
  /**
2436
2440
  * Streams an output from the agent (streaming).
2437
2441
  */
2438
- stream(options: AgentCallParameters): StreamTextResult<TOOLS, InferStreamOutput<OUTPUT>>;
2442
+ stream(options: AgentCallParameters<CALL_OPTIONS>): PromiseLike<StreamTextResult<TOOLS, InferStreamOutput<OUTPUT>>>;
2439
2443
  }
2440
2444
 
2441
2445
  /**
@@ -2464,7 +2468,7 @@ type ToolLoopAgentOnStepFinishCallback<TOOLS extends ToolSet = {}> = (stepResult
2464
2468
  /**
2465
2469
  * Configuration options for an agent.
2466
2470
  */
2467
- type ToolLoopAgentSettings<TOOLS extends ToolSet = {}, OUTPUT extends Output = never> = CallSettings & {
2471
+ type ToolLoopAgentSettings<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never> = CallSettings & {
2468
2472
  /**
2469
2473
  * The id of the agent.
2470
2474
  */
@@ -2535,6 +2539,16 @@ type ToolLoopAgentSettings<TOOLS extends ToolSet = {}, OUTPUT extends Output = n
2535
2539
  * @default undefined
2536
2540
  */
2537
2541
  experimental_context?: unknown;
2542
+ /**
2543
+ * The schema for the call options.
2544
+ */
2545
+ callOptionsSchema?: FlexibleSchema<CALL_OPTIONS>;
2546
+ /**
2547
+ * Prepare the parameters for the generateText or streamText call.
2548
+ *
2549
+ * You can use this to have templates based on call options.
2550
+ */
2551
+ prepareCall?: (options: AgentCallParameters<CALL_OPTIONS> & Pick<ToolLoopAgentSettings<CALL_OPTIONS, TOOLS, OUTPUT>, 'model' | 'tools' | 'maxOutputTokens' | 'temperature' | 'topP' | 'topK' | 'presencePenalty' | 'frequencyPenalty' | 'stopSequences' | 'seed' | 'headers' | 'instructions' | 'stopWhen' | 'experimental_telemetry' | 'activeTools' | 'providerOptions' | 'experimental_context'>) => MaybePromiseLike<Pick<ToolLoopAgentSettings<CALL_OPTIONS, TOOLS, OUTPUT>, 'model' | 'tools' | 'maxOutputTokens' | 'temperature' | 'topP' | 'topK' | 'presencePenalty' | 'frequencyPenalty' | 'stopSequences' | 'seed' | 'headers' | 'instructions' | 'stopWhen' | 'experimental_telemetry' | 'activeTools' | 'providerOptions' | 'experimental_context'> & Omit<Prompt, 'system'>>;
2538
2552
  };
2539
2553
 
2540
2554
  /**
@@ -2548,10 +2562,10 @@ type ToolLoopAgentSettings<TOOLS extends ToolSet = {}, OUTPUT extends Output = n
2548
2562
  * - A tool call needs approval, or
2549
2563
  * - A stop condition is met (default stop condition is stepCountIs(20))
2550
2564
  */
2551
- declare class ToolLoopAgent<TOOLS extends ToolSet = {}, OUTPUT extends Output = never> implements Agent<TOOLS, OUTPUT> {
2565
+ declare class ToolLoopAgent<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never> implements Agent<CALL_OPTIONS, TOOLS, OUTPUT> {
2552
2566
  readonly version = "agent-v1";
2553
2567
  private readonly settings;
2554
- constructor(settings: ToolLoopAgentSettings<TOOLS, OUTPUT>);
2568
+ constructor(settings: ToolLoopAgentSettings<CALL_OPTIONS, TOOLS, OUTPUT>);
2555
2569
  /**
2556
2570
  * The id of the agent.
2557
2571
  */
@@ -2560,20 +2574,21 @@ declare class ToolLoopAgent<TOOLS extends ToolSet = {}, OUTPUT extends Output =
2560
2574
  * The tools that the agent can use.
2561
2575
  */
2562
2576
  get tools(): TOOLS;
2577
+ private prepareCall;
2563
2578
  /**
2564
2579
  * Generates an output from the agent (non-streaming).
2565
2580
  */
2566
- generate(options: AgentCallParameters): Promise<GenerateTextResult<TOOLS, InferGenerateOutput<OUTPUT>>>;
2581
+ generate(options: AgentCallParameters<CALL_OPTIONS>): Promise<GenerateTextResult<TOOLS, InferGenerateOutput<OUTPUT>>>;
2567
2582
  /**
2568
2583
  * Streams an output from the agent (streaming).
2569
2584
  */
2570
- stream(options: AgentCallParameters): StreamTextResult<TOOLS, InferStreamOutput<OUTPUT>>;
2585
+ stream(options: AgentCallParameters<CALL_OPTIONS>): Promise<StreamTextResult<TOOLS, InferStreamOutput<OUTPUT>>>;
2571
2586
  }
2572
2587
 
2573
2588
  /**
2574
2589
  * Infer the type of the tools of an agent.
2575
2590
  */
2576
- type InferAgentTools<AGENT> = AGENT extends Agent<infer TOOLS, never> ? TOOLS : never;
2591
+ type InferAgentTools<AGENT> = AGENT extends Agent<any, infer TOOLS, any> ? TOOLS : never;
2577
2592
 
2578
2593
  /**
2579
2594
  * Infer the UI message type of an agent.
@@ -2588,10 +2603,11 @@ type InferAgentUIMessage<AGENT> = UIMessage<never, never, InferUITools<InferAgen
2588
2603
  *
2589
2604
  * @returns The response object.
2590
2605
  */
2591
- declare function createAgentUIStreamResponse<TOOLS extends ToolSet = {}, OUTPUT extends Output = never>({ headers, status, statusText, consumeSseStream, ...options }: {
2592
- agent: Agent<TOOLS, OUTPUT>;
2606
+ declare function createAgentUIStreamResponse<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never, MESSAGE_METADATA = unknown>({ headers, status, statusText, consumeSseStream, ...options }: {
2607
+ agent: Agent<CALL_OPTIONS, TOOLS, OUTPUT>;
2593
2608
  messages: unknown[];
2594
- } & UIMessageStreamResponseInit & UIMessageStreamOptions<UIMessage<never, never, InferUITools<TOOLS>>>): Promise<Response>;
2609
+ options?: CALL_OPTIONS;
2610
+ } & UIMessageStreamResponseInit & UIMessageStreamOptions<UIMessage<MESSAGE_METADATA, never, InferUITools<TOOLS>>>): Promise<Response>;
2595
2611
 
2596
2612
  declare const getOriginalFetch: () => typeof fetch;
2597
2613
  declare function callCompletionApi({ api, prompt, credentials, headers, body, streamProtocol, setCompletion, setLoading, setError, setAbortController, onFinish, onError, fetch, }: {
@@ -3241,10 +3257,11 @@ declare const UI_MESSAGE_STREAM_HEADERS: {
3241
3257
  *
3242
3258
  * @returns The UI message stream.
3243
3259
  */
3244
- declare function createAgentUIStream<TOOLS extends ToolSet = {}, OUTPUT extends Output = never>({ agent, messages, ...uiMessageStreamOptions }: {
3245
- agent: Agent<TOOLS, OUTPUT>;
3260
+ declare function createAgentUIStream<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never, MESSAGE_METADATA = unknown>({ agent, messages, options, ...uiMessageStreamOptions }: {
3261
+ agent: Agent<CALL_OPTIONS, TOOLS, OUTPUT>;
3246
3262
  messages: unknown[];
3247
- } & UIMessageStreamOptions<UIMessage<never, never, InferUITools<TOOLS>>>): Promise<AsyncIterableStream<InferUIMessageChunk<UIMessage<never, never, InferUITools<TOOLS>>>>>;
3263
+ options?: CALL_OPTIONS;
3264
+ } & UIMessageStreamOptions<UIMessage<MESSAGE_METADATA, never, InferUITools<TOOLS>>>): Promise<AsyncIterableStream<InferUIMessageChunk<UIMessage<MESSAGE_METADATA, never, InferUITools<TOOLS>>>>>;
3248
3265
 
3249
3266
  /**
3250
3267
  * Pipes the agent UI message stream to a Node.js ServerResponse object.
@@ -3252,11 +3269,12 @@ declare function createAgentUIStream<TOOLS extends ToolSet = {}, OUTPUT extends
3252
3269
  * @param agent - The agent to run.
3253
3270
  * @param messages - The input UI messages.
3254
3271
  */
3255
- declare function pipeAgentUIStreamToResponse<TOOLS extends ToolSet = {}, OUTPUT extends Output = never>({ response, headers, status, statusText, consumeSseStream, ...options }: {
3272
+ declare function pipeAgentUIStreamToResponse<CALL_OPTIONS = never, TOOLS extends ToolSet = {}, OUTPUT extends Output = never, MESSAGE_METADATA = unknown>({ response, headers, status, statusText, consumeSseStream, ...options }: {
3256
3273
  response: ServerResponse;
3257
- agent: Agent<TOOLS, OUTPUT>;
3274
+ agent: Agent<CALL_OPTIONS, TOOLS, OUTPUT>;
3258
3275
  messages: unknown[];
3259
- } & UIMessageStreamResponseInit & UIMessageStreamOptions<UIMessage<never, never, InferUITools<TOOLS>>>): Promise<void>;
3276
+ options?: CALL_OPTIONS;
3277
+ } & UIMessageStreamResponseInit & UIMessageStreamOptions<UIMessage<MESSAGE_METADATA, never, InferUITools<TOOLS>>>): Promise<void>;
3260
3278
 
3261
3279
  /**
3262
3280
  The result of an `embed` call.
package/dist/index.js CHANGED
@@ -873,7 +873,7 @@ function detectMediaType({
873
873
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
874
874
 
875
875
  // src/version.ts
876
- var VERSION = true ? "6.0.0-beta.65" : "0.0.0-test";
876
+ var VERSION = true ? "6.0.0-beta.67" : "0.0.0-test";
877
877
 
878
878
  // src/util/download/download.ts
879
879
  var download = async ({ url }) => {
@@ -6341,29 +6341,32 @@ var ToolLoopAgent = class {
6341
6341
  get tools() {
6342
6342
  return this.settings.tools;
6343
6343
  }
6344
+ async prepareCall(options) {
6345
+ var _a17, _b, _c, _d;
6346
+ const baseCallArgs = {
6347
+ ...this.settings,
6348
+ stopWhen: (_a17 = this.settings.stopWhen) != null ? _a17 : stepCountIs(20),
6349
+ ...options
6350
+ };
6351
+ const preparedCallArgs = (_d = await ((_c = (_b = this.settings).prepareCall) == null ? void 0 : _c.call(_b, baseCallArgs))) != null ? _d : baseCallArgs;
6352
+ const { instructions, messages, prompt, ...callArgs } = preparedCallArgs;
6353
+ return {
6354
+ ...callArgs,
6355
+ // restore prompt types
6356
+ ...{ system: instructions, messages, prompt }
6357
+ };
6358
+ }
6344
6359
  /**
6345
6360
  * Generates an output from the agent (non-streaming).
6346
6361
  */
6347
6362
  async generate(options) {
6348
- const { instructions, stopWhen, ...settings } = this.settings;
6349
- return generateText({
6350
- ...settings,
6351
- system: instructions,
6352
- stopWhen: stopWhen != null ? stopWhen : stepCountIs(20),
6353
- ...options
6354
- });
6363
+ return generateText(await this.prepareCall(options));
6355
6364
  }
6356
6365
  /**
6357
6366
  * Streams an output from the agent (streaming).
6358
6367
  */
6359
- stream(options) {
6360
- const { instructions, stopWhen, ...settings } = this.settings;
6361
- return streamText({
6362
- ...settings,
6363
- system: instructions,
6364
- stopWhen: stopWhen != null ? stopWhen : stepCountIs(20),
6365
- ...options
6366
- });
6368
+ async stream(options) {
6369
+ return streamText(await this.prepareCall(options));
6367
6370
  }
6368
6371
  };
6369
6372
 
@@ -7086,6 +7089,7 @@ async function validateUIMessages({
7086
7089
  async function createAgentUIStream({
7087
7090
  agent,
7088
7091
  messages,
7092
+ options,
7089
7093
  ...uiMessageStreamOptions
7090
7094
  }) {
7091
7095
  const validatedMessages = await validateUIMessages({
@@ -7095,7 +7099,10 @@ async function createAgentUIStream({
7095
7099
  const modelMessages = convertToModelMessages(validatedMessages, {
7096
7100
  tools: agent.tools
7097
7101
  });
7098
- const result = agent.stream({ prompt: modelMessages });
7102
+ const result = await agent.stream({
7103
+ prompt: modelMessages,
7104
+ options
7105
+ });
7099
7106
  return result.toUIMessageStream(uiMessageStreamOptions);
7100
7107
  }
7101
7108