@ai-sdk/react 2.0.0-canary.8 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,7 +1,7 @@
1
1
  # AI SDK: React provider
2
2
 
3
- [React](https://react.dev/) UI components for the [AI SDK](https://sdk.vercel.ai/docs):
3
+ [React](https://react.dev/) UI components for the [AI SDK](https://ai-sdk.dev/docs):
4
4
 
5
- - [`useChat`](https://sdk.vercel.ai/docs/reference/ai-sdk-ui/use-chat) hook
6
- - [`useCompletion`](https://sdk.vercel.ai/docs/reference/ai-sdk-ui/use-completion) hook
7
- - [`useObject`](https://sdk.vercel.ai/docs/reference/ai-sdk-ui/use-object) hook
5
+ - [`useChat`](https://ai-sdk.dev/docs/reference/ai-sdk-ui/use-chat) hook
6
+ - [`useCompletion`](https://ai-sdk.dev/docs/reference/ai-sdk-ui/use-completion) hook
7
+ - [`useObject`](https://ai-sdk.dev/docs/reference/ai-sdk-ui/use-object) hook
package/dist/index.d.mts CHANGED
@@ -1,106 +1,44 @@
1
- import { UIMessage, Message, CreateMessage, ChatRequestOptions, JSONValue, UseChatOptions, RequestOptions, UseCompletionOptions, Schema, DeepPartial } from 'ai';
2
- export { CreateMessage, Message, UseChatOptions, UseCompletionOptions } from 'ai';
3
- import { FetchFunction } from '@ai-sdk/provider-utils';
4
- import z from 'zod';
1
+ import { UIMessage, AbstractChat, ChatInit, CompletionRequestOptions, UseCompletionOptions, Schema, DeepPartial } from 'ai';
2
+ export { CreateUIMessage, UIMessage, UseCompletionOptions } from 'ai';
3
+ import { FetchFunction, InferSchema } from '@ai-sdk/provider-utils';
4
+ import * as z3 from 'zod/v3';
5
+ import * as z4 from 'zod/v4';
5
6
 
6
- type UseChatHelpers = {
7
- /** Current messages in the chat */
8
- messages: UIMessage[];
9
- /** The error object of the API request */
10
- error: undefined | Error;
11
- /**
12
- * Append a user message to the chat list. This triggers the API call to fetch
13
- * the assistant's response.
14
- * @param message The message to append
15
- * @param options Additional options to pass to the API call
16
- */
17
- append: (message: Message | CreateMessage, chatRequestOptions?: ChatRequestOptions) => Promise<string | null | undefined>;
18
- /**
19
- * Reload the last AI chat response for the given chat history. If the last
20
- * message isn't from the assistant, it will request the API to generate a
21
- * new response.
22
- */
23
- reload: (chatRequestOptions?: ChatRequestOptions) => Promise<string | null | undefined>;
7
+ declare class Chat<UI_MESSAGE extends UIMessage> extends AbstractChat<UI_MESSAGE> {
8
+ #private;
9
+ constructor({ messages, ...init }: ChatInit<UI_MESSAGE>);
10
+ '~registerMessagesCallback': (onChange: () => void, throttleWaitMs?: number) => (() => void);
11
+ '~registerStatusCallback': (onChange: () => void) => (() => void);
12
+ '~registerErrorCallback': (onChange: () => void) => (() => void);
13
+ }
14
+
15
+ type UseChatHelpers<UI_MESSAGE extends UIMessage> = {
24
16
  /**
25
- * Abort the current request immediately, keep the generated tokens if any.
17
+ * The id of the chat.
26
18
  */
27
- stop: () => void;
19
+ readonly id: string;
28
20
  /**
29
21
  * Update the `messages` state locally. This is useful when you want to
30
22
  * edit the messages on the client, and then trigger the `reload` method
31
23
  * manually to regenerate the AI response.
32
24
  */
33
- setMessages: (messages: Message[] | ((messages: Message[]) => Message[])) => void;
34
- /** The current value of the input */
35
- input: string;
36
- /** setState-powered method to update the input value */
37
- setInput: React.Dispatch<React.SetStateAction<string>>;
38
- /** An input/textarea-ready onChange handler to control the value of the input */
39
- handleInputChange: (e: React.ChangeEvent<HTMLInputElement> | React.ChangeEvent<HTMLTextAreaElement>) => void;
40
- /** Form submission handler to automatically reset input and append a user message */
41
- handleSubmit: (event?: {
42
- preventDefault?: () => void;
43
- }, chatRequestOptions?: ChatRequestOptions) => void;
44
- metadata?: Object;
45
- /**
46
- * Whether the API request is in progress
47
- *
48
- * @deprecated use `status` instead
49
- */
50
- isLoading: boolean;
51
- /**
52
- * Hook status:
53
- *
54
- * - `submitted`: The message has been sent to the API and we're awaiting the start of the response stream.
55
- * - `streaming`: The response is actively streaming in from the API, receiving chunks of data.
56
- * - `ready`: The full response has been received and processed; a new user message can be submitted.
57
- * - `error`: An error occurred during the API request, preventing successful completion.
58
- */
59
- status: 'submitted' | 'streaming' | 'ready' | 'error';
60
- /** Additional data added on the server via StreamData. */
61
- data?: JSONValue[];
62
- /** Set the data of the chat. You can use this to transform or clear the chat data. */
63
- setData: (data: JSONValue[] | undefined | ((data: JSONValue[] | undefined) => JSONValue[] | undefined)) => void;
64
- /** The id of the chat */
65
- id: string;
66
- };
67
- declare function useChat({ api, id, initialMessages, initialInput, sendExtraMessageFields, onToolCall, experimental_prepareRequestBody, maxSteps, streamProtocol, onResponse, onFinish, onError, credentials, headers, body, generateId, fetch, keepLastMessageOnError, experimental_throttle: throttleWaitMs, }?: UseChatOptions & {
68
- key?: string;
69
- /**
70
- * Experimental (React only). When a function is provided, it will be used
71
- * to prepare the request body for the chat API. This can be useful for
72
- * customizing the request body based on the messages and data in the chat.
73
- *
74
- * @param messages The current messages in the chat.
75
- * @param requestData The data object passed in the chat request.
76
- * @param requestBody The request body object passed in the chat request.
77
- */
78
- experimental_prepareRequestBody?: (options: {
79
- id: string;
80
- messages: UIMessage[];
81
- requestData?: JSONValue;
82
- requestBody?: object;
83
- }) => unknown;
25
+ setMessages: (messages: UI_MESSAGE[] | ((messages: UI_MESSAGE[]) => UI_MESSAGE[])) => void;
26
+ error: Error | undefined;
27
+ } & Pick<AbstractChat<UI_MESSAGE>, 'sendMessage' | 'regenerate' | 'stop' | 'resumeStream' | 'addToolResult' | 'status' | 'messages' | 'clearError'>;
28
+ type UseChatOptions<UI_MESSAGE extends UIMessage> = ({
29
+ chat: Chat<UI_MESSAGE>;
30
+ } | ChatInit<UI_MESSAGE>) & {
84
31
  /**
85
32
  Custom throttle wait in ms for the chat messages and data updates.
86
33
  Default is undefined, which disables throttling.
87
34
  */
88
35
  experimental_throttle?: number;
89
36
  /**
90
- Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
91
- Must be at least 1.
92
-
93
- A maximum number is required to prevent infinite loops in the case of misconfigured tools.
94
-
95
- By default, it's set to 1, which means that only a single LLM call is made.
96
- */
97
- maxSteps?: number;
98
- }): UseChatHelpers & {
99
- addToolResult: ({ toolCallId, result, }: {
100
- toolCallId: string;
101
- result: any;
102
- }) => void;
37
+ * Whether to resume an ongoing chat generation stream.
38
+ */
39
+ resume?: boolean;
103
40
  };
41
+ declare function useChat<UI_MESSAGE extends UIMessage = UIMessage>({ experimental_throttle: throttleWaitMs, resume, ...options }?: UseChatOptions<UI_MESSAGE>): UseChatHelpers<UI_MESSAGE>;
104
42
 
105
43
  type UseCompletionHelpers = {
106
44
  /** The current completion result */
@@ -108,7 +46,7 @@ type UseCompletionHelpers = {
108
46
  /**
109
47
  * Send a new prompt to the API endpoint and update the completion state.
110
48
  */
111
- complete: (prompt: string, options?: RequestOptions) => Promise<string | null | undefined>;
49
+ complete: (prompt: string, options?: CompletionRequestOptions) => Promise<string | null | undefined>;
112
50
  /** The error object of the API request */
113
51
  error: undefined | Error;
114
52
  /**
@@ -145,10 +83,8 @@ type UseCompletionHelpers = {
145
83
  }) => void;
146
84
  /** Whether the API request is in progress */
147
85
  isLoading: boolean;
148
- /** Additional data added on the server via StreamData */
149
- data?: JSONValue[];
150
86
  };
151
- declare function useCompletion({ api, id, initialCompletion, initialInput, credentials, headers, body, streamProtocol, fetch, onResponse, onFinish, onError, experimental_throttle: throttleWaitMs, }?: UseCompletionOptions & {
87
+ declare function useCompletion({ api, id, initialCompletion, initialInput, credentials, headers, body, streamProtocol, fetch, onFinish, onError, experimental_throttle: throttleWaitMs, }?: UseCompletionOptions & {
152
88
  /**
153
89
  * Custom throttle wait in ms for the completion and data updates.
154
90
  * Default is undefined, which disables throttling.
@@ -156,7 +92,7 @@ declare function useCompletion({ api, id, initialCompletion, initialInput, crede
156
92
  experimental_throttle?: number;
157
93
  }): UseCompletionHelpers;
158
94
 
159
- type Experimental_UseObjectOptions<RESULT> = {
95
+ type Experimental_UseObjectOptions<SCHEMA extends z4.core.$ZodType | z3.Schema | Schema, RESULT> = {
160
96
  /**
161
97
  * The API endpoint. It should stream JSON that matches the schema as chunked text.
162
98
  */
@@ -164,7 +100,7 @@ type Experimental_UseObjectOptions<RESULT> = {
164
100
  /**
165
101
  * A Zod schema that defines the shape of the complete object.
166
102
  */
167
- schema: z.Schema<RESULT, z.ZodTypeDef, any> | Schema<RESULT>;
103
+ schema: SCHEMA;
168
104
  /**
169
105
  * An unique identifier. If not provided, a random one will be
170
106
  * generated. When provided, the `useObject` hook with the same `id` will
@@ -230,9 +166,13 @@ type Experimental_UseObjectHelpers<RESULT, INPUT> = {
230
166
  * Abort the current request immediately, keep the current partial object if any.
231
167
  */
232
168
  stop: () => void;
169
+ /**
170
+ * Clear the object state.
171
+ */
172
+ clear: () => void;
233
173
  };
234
- declare function useObject<RESULT, INPUT = any>({ api, id, schema, // required, in the future we will use it for validation
235
- initialValue, fetch, onError, onFinish, headers, credentials, }: Experimental_UseObjectOptions<RESULT>): Experimental_UseObjectHelpers<RESULT, INPUT>;
174
+ declare function useObject<SCHEMA extends z4.core.$ZodType | z3.Schema | Schema, RESULT = InferSchema<SCHEMA>, INPUT = any>({ api, id, schema, // required, in the future we will use it for validation
175
+ initialValue, fetch, onError, onFinish, headers, credentials, }: Experimental_UseObjectOptions<SCHEMA, RESULT>): Experimental_UseObjectHelpers<RESULT, INPUT>;
236
176
  declare const experimental_useObject: typeof useObject;
237
177
 
238
- export { Experimental_UseObjectHelpers, Experimental_UseObjectOptions, UseChatHelpers, UseCompletionHelpers, experimental_useObject, useChat, useCompletion };
178
+ export { Chat, Experimental_UseObjectHelpers, Experimental_UseObjectOptions, UseChatHelpers, UseChatOptions, UseCompletionHelpers, experimental_useObject, useChat, useCompletion };
package/dist/index.d.ts CHANGED
@@ -1,106 +1,44 @@
1
- import { UIMessage, Message, CreateMessage, ChatRequestOptions, JSONValue, UseChatOptions, RequestOptions, UseCompletionOptions, Schema, DeepPartial } from 'ai';
2
- export { CreateMessage, Message, UseChatOptions, UseCompletionOptions } from 'ai';
3
- import { FetchFunction } from '@ai-sdk/provider-utils';
4
- import z from 'zod';
1
+ import { UIMessage, AbstractChat, ChatInit, CompletionRequestOptions, UseCompletionOptions, Schema, DeepPartial } from 'ai';
2
+ export { CreateUIMessage, UIMessage, UseCompletionOptions } from 'ai';
3
+ import { FetchFunction, InferSchema } from '@ai-sdk/provider-utils';
4
+ import * as z3 from 'zod/v3';
5
+ import * as z4 from 'zod/v4';
5
6
 
6
- type UseChatHelpers = {
7
- /** Current messages in the chat */
8
- messages: UIMessage[];
9
- /** The error object of the API request */
10
- error: undefined | Error;
11
- /**
12
- * Append a user message to the chat list. This triggers the API call to fetch
13
- * the assistant's response.
14
- * @param message The message to append
15
- * @param options Additional options to pass to the API call
16
- */
17
- append: (message: Message | CreateMessage, chatRequestOptions?: ChatRequestOptions) => Promise<string | null | undefined>;
18
- /**
19
- * Reload the last AI chat response for the given chat history. If the last
20
- * message isn't from the assistant, it will request the API to generate a
21
- * new response.
22
- */
23
- reload: (chatRequestOptions?: ChatRequestOptions) => Promise<string | null | undefined>;
7
+ declare class Chat<UI_MESSAGE extends UIMessage> extends AbstractChat<UI_MESSAGE> {
8
+ #private;
9
+ constructor({ messages, ...init }: ChatInit<UI_MESSAGE>);
10
+ '~registerMessagesCallback': (onChange: () => void, throttleWaitMs?: number) => (() => void);
11
+ '~registerStatusCallback': (onChange: () => void) => (() => void);
12
+ '~registerErrorCallback': (onChange: () => void) => (() => void);
13
+ }
14
+
15
+ type UseChatHelpers<UI_MESSAGE extends UIMessage> = {
24
16
  /**
25
- * Abort the current request immediately, keep the generated tokens if any.
17
+ * The id of the chat.
26
18
  */
27
- stop: () => void;
19
+ readonly id: string;
28
20
  /**
29
21
  * Update the `messages` state locally. This is useful when you want to
30
22
  * edit the messages on the client, and then trigger the `reload` method
31
23
  * manually to regenerate the AI response.
32
24
  */
33
- setMessages: (messages: Message[] | ((messages: Message[]) => Message[])) => void;
34
- /** The current value of the input */
35
- input: string;
36
- /** setState-powered method to update the input value */
37
- setInput: React.Dispatch<React.SetStateAction<string>>;
38
- /** An input/textarea-ready onChange handler to control the value of the input */
39
- handleInputChange: (e: React.ChangeEvent<HTMLInputElement> | React.ChangeEvent<HTMLTextAreaElement>) => void;
40
- /** Form submission handler to automatically reset input and append a user message */
41
- handleSubmit: (event?: {
42
- preventDefault?: () => void;
43
- }, chatRequestOptions?: ChatRequestOptions) => void;
44
- metadata?: Object;
45
- /**
46
- * Whether the API request is in progress
47
- *
48
- * @deprecated use `status` instead
49
- */
50
- isLoading: boolean;
51
- /**
52
- * Hook status:
53
- *
54
- * - `submitted`: The message has been sent to the API and we're awaiting the start of the response stream.
55
- * - `streaming`: The response is actively streaming in from the API, receiving chunks of data.
56
- * - `ready`: The full response has been received and processed; a new user message can be submitted.
57
- * - `error`: An error occurred during the API request, preventing successful completion.
58
- */
59
- status: 'submitted' | 'streaming' | 'ready' | 'error';
60
- /** Additional data added on the server via StreamData. */
61
- data?: JSONValue[];
62
- /** Set the data of the chat. You can use this to transform or clear the chat data. */
63
- setData: (data: JSONValue[] | undefined | ((data: JSONValue[] | undefined) => JSONValue[] | undefined)) => void;
64
- /** The id of the chat */
65
- id: string;
66
- };
67
- declare function useChat({ api, id, initialMessages, initialInput, sendExtraMessageFields, onToolCall, experimental_prepareRequestBody, maxSteps, streamProtocol, onResponse, onFinish, onError, credentials, headers, body, generateId, fetch, keepLastMessageOnError, experimental_throttle: throttleWaitMs, }?: UseChatOptions & {
68
- key?: string;
69
- /**
70
- * Experimental (React only). When a function is provided, it will be used
71
- * to prepare the request body for the chat API. This can be useful for
72
- * customizing the request body based on the messages and data in the chat.
73
- *
74
- * @param messages The current messages in the chat.
75
- * @param requestData The data object passed in the chat request.
76
- * @param requestBody The request body object passed in the chat request.
77
- */
78
- experimental_prepareRequestBody?: (options: {
79
- id: string;
80
- messages: UIMessage[];
81
- requestData?: JSONValue;
82
- requestBody?: object;
83
- }) => unknown;
25
+ setMessages: (messages: UI_MESSAGE[] | ((messages: UI_MESSAGE[]) => UI_MESSAGE[])) => void;
26
+ error: Error | undefined;
27
+ } & Pick<AbstractChat<UI_MESSAGE>, 'sendMessage' | 'regenerate' | 'stop' | 'resumeStream' | 'addToolResult' | 'status' | 'messages' | 'clearError'>;
28
+ type UseChatOptions<UI_MESSAGE extends UIMessage> = ({
29
+ chat: Chat<UI_MESSAGE>;
30
+ } | ChatInit<UI_MESSAGE>) & {
84
31
  /**
85
32
  Custom throttle wait in ms for the chat messages and data updates.
86
33
  Default is undefined, which disables throttling.
87
34
  */
88
35
  experimental_throttle?: number;
89
36
  /**
90
- Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
91
- Must be at least 1.
92
-
93
- A maximum number is required to prevent infinite loops in the case of misconfigured tools.
94
-
95
- By default, it's set to 1, which means that only a single LLM call is made.
96
- */
97
- maxSteps?: number;
98
- }): UseChatHelpers & {
99
- addToolResult: ({ toolCallId, result, }: {
100
- toolCallId: string;
101
- result: any;
102
- }) => void;
37
+ * Whether to resume an ongoing chat generation stream.
38
+ */
39
+ resume?: boolean;
103
40
  };
41
+ declare function useChat<UI_MESSAGE extends UIMessage = UIMessage>({ experimental_throttle: throttleWaitMs, resume, ...options }?: UseChatOptions<UI_MESSAGE>): UseChatHelpers<UI_MESSAGE>;
104
42
 
105
43
  type UseCompletionHelpers = {
106
44
  /** The current completion result */
@@ -108,7 +46,7 @@ type UseCompletionHelpers = {
108
46
  /**
109
47
  * Send a new prompt to the API endpoint and update the completion state.
110
48
  */
111
- complete: (prompt: string, options?: RequestOptions) => Promise<string | null | undefined>;
49
+ complete: (prompt: string, options?: CompletionRequestOptions) => Promise<string | null | undefined>;
112
50
  /** The error object of the API request */
113
51
  error: undefined | Error;
114
52
  /**
@@ -145,10 +83,8 @@ type UseCompletionHelpers = {
145
83
  }) => void;
146
84
  /** Whether the API request is in progress */
147
85
  isLoading: boolean;
148
- /** Additional data added on the server via StreamData */
149
- data?: JSONValue[];
150
86
  };
151
- declare function useCompletion({ api, id, initialCompletion, initialInput, credentials, headers, body, streamProtocol, fetch, onResponse, onFinish, onError, experimental_throttle: throttleWaitMs, }?: UseCompletionOptions & {
87
+ declare function useCompletion({ api, id, initialCompletion, initialInput, credentials, headers, body, streamProtocol, fetch, onFinish, onError, experimental_throttle: throttleWaitMs, }?: UseCompletionOptions & {
152
88
  /**
153
89
  * Custom throttle wait in ms for the completion and data updates.
154
90
  * Default is undefined, which disables throttling.
@@ -156,7 +92,7 @@ declare function useCompletion({ api, id, initialCompletion, initialInput, crede
156
92
  experimental_throttle?: number;
157
93
  }): UseCompletionHelpers;
158
94
 
159
- type Experimental_UseObjectOptions<RESULT> = {
95
+ type Experimental_UseObjectOptions<SCHEMA extends z4.core.$ZodType | z3.Schema | Schema, RESULT> = {
160
96
  /**
161
97
  * The API endpoint. It should stream JSON that matches the schema as chunked text.
162
98
  */
@@ -164,7 +100,7 @@ type Experimental_UseObjectOptions<RESULT> = {
164
100
  /**
165
101
  * A Zod schema that defines the shape of the complete object.
166
102
  */
167
- schema: z.Schema<RESULT, z.ZodTypeDef, any> | Schema<RESULT>;
103
+ schema: SCHEMA;
168
104
  /**
169
105
  * An unique identifier. If not provided, a random one will be
170
106
  * generated. When provided, the `useObject` hook with the same `id` will
@@ -230,9 +166,13 @@ type Experimental_UseObjectHelpers<RESULT, INPUT> = {
230
166
  * Abort the current request immediately, keep the current partial object if any.
231
167
  */
232
168
  stop: () => void;
169
+ /**
170
+ * Clear the object state.
171
+ */
172
+ clear: () => void;
233
173
  };
234
- declare function useObject<RESULT, INPUT = any>({ api, id, schema, // required, in the future we will use it for validation
235
- initialValue, fetch, onError, onFinish, headers, credentials, }: Experimental_UseObjectOptions<RESULT>): Experimental_UseObjectHelpers<RESULT, INPUT>;
174
+ declare function useObject<SCHEMA extends z4.core.$ZodType | z3.Schema | Schema, RESULT = InferSchema<SCHEMA>, INPUT = any>({ api, id, schema, // required, in the future we will use it for validation
175
+ initialValue, fetch, onError, onFinish, headers, credentials, }: Experimental_UseObjectOptions<SCHEMA, RESULT>): Experimental_UseObjectHelpers<RESULT, INPUT>;
236
176
  declare const experimental_useObject: typeof useObject;
237
177
 
238
- export { Experimental_UseObjectHelpers, Experimental_UseObjectOptions, UseChatHelpers, UseCompletionHelpers, experimental_useObject, useChat, useCompletion };
178
+ export { Chat, Experimental_UseObjectHelpers, Experimental_UseObjectOptions, UseChatHelpers, UseChatOptions, UseCompletionHelpers, experimental_useObject, useChat, useCompletion };