@assistant-ui/react-ai-sdk 0.10.14 → 0.10.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/dist/converters/fromLanguageModelTools.d.ts +7 -0
  2. package/dist/converters/fromLanguageModelTools.d.ts.map +1 -0
  3. package/dist/converters/fromLanguageModelTools.js +16 -0
  4. package/dist/converters/fromLanguageModelTools.js.map +1 -0
  5. package/dist/converters/index.d.ts +4 -0
  6. package/dist/converters/index.d.ts.map +1 -0
  7. package/dist/converters/index.js +10 -0
  8. package/dist/converters/index.js.map +1 -0
  9. package/dist/converters/toLanguageModelMessages.d.ts +9 -0
  10. package/dist/converters/toLanguageModelMessages.d.ts.map +1 -0
  11. package/dist/converters/toLanguageModelMessages.js +143 -0
  12. package/dist/converters/toLanguageModelMessages.js.map +1 -0
  13. package/dist/converters/toLanguageModelTools.d.ts +7 -0
  14. package/dist/converters/toLanguageModelTools.d.ts.map +1 -0
  15. package/dist/converters/toLanguageModelTools.js +15 -0
  16. package/dist/converters/toLanguageModelTools.js.map +1 -0
  17. package/dist/dangerous-in-browser/DangerousInBrowserAdapter.d.ts +9 -0
  18. package/dist/dangerous-in-browser/DangerousInBrowserAdapter.d.ts.map +1 -0
  19. package/dist/dangerous-in-browser/DangerousInBrowserAdapter.js +38 -0
  20. package/dist/dangerous-in-browser/DangerousInBrowserAdapter.js.map +1 -0
  21. package/dist/dangerous-in-browser/createEdgeRuntimeAPI.d.ts +56 -0
  22. package/dist/dangerous-in-browser/createEdgeRuntimeAPI.d.ts.map +1 -0
  23. package/dist/dangerous-in-browser/createEdgeRuntimeAPI.js +65 -0
  24. package/dist/dangerous-in-browser/createEdgeRuntimeAPI.js.map +1 -0
  25. package/dist/dangerous-in-browser/index.d.ts +2 -0
  26. package/dist/dangerous-in-browser/index.d.ts.map +1 -0
  27. package/dist/dangerous-in-browser/index.js +8 -0
  28. package/dist/dangerous-in-browser/index.js.map +1 -0
  29. package/dist/dangerous-in-browser/useDangerousInBrowserRuntime.d.ts +5 -0
  30. package/dist/dangerous-in-browser/useDangerousInBrowserRuntime.d.ts.map +1 -0
  31. package/dist/dangerous-in-browser/useDangerousInBrowserRuntime.js +21 -0
  32. package/dist/dangerous-in-browser/useDangerousInBrowserRuntime.js.map +1 -0
  33. package/dist/frontendTools.d.ts +1 -1
  34. package/dist/index.d.ts +2 -1
  35. package/dist/index.d.ts.map +1 -1
  36. package/dist/index.js +3 -13
  37. package/dist/index.js.map +1 -1
  38. package/dist/ui/utils/convertMessage.js.map +1 -1
  39. package/dist/ui/utils/sliceMessagesUntil.js +1 -1
  40. package/dist/ui/utils/sliceMessagesUntil.js.map +1 -1
  41. package/dist/useChatRuntime.d.ts +15 -3
  42. package/dist/useChatRuntime.d.ts.map +1 -1
  43. package/dist/useChatRuntime.js +99 -5
  44. package/dist/useChatRuntime.js.map +1 -1
  45. package/package.json +11 -7
  46. package/src/converters/fromLanguageModelTools.ts +19 -0
  47. package/src/converters/index.ts +3 -0
  48. package/src/converters/toLanguageModelMessages.ts +185 -0
  49. package/src/converters/toLanguageModelTools.ts +21 -0
  50. package/src/dangerous-in-browser/DangerousInBrowserAdapter.ts +41 -0
  51. package/src/dangerous-in-browser/createEdgeRuntimeAPI.ts +145 -0
  52. package/src/dangerous-in-browser/index.ts +4 -0
  53. package/src/dangerous-in-browser/useDangerousInBrowserRuntime.ts +26 -0
  54. package/src/index.ts +2 -8
  55. package/src/ui/utils/convertMessage.ts +12 -12
  56. package/src/ui/utils/sliceMessagesUntil.tsx +1 -1
  57. package/src/useChatRuntime.ts +156 -12
@@ -0,0 +1,145 @@
1
+ import {
2
+ LanguageModelV1,
3
+ LanguageModelV1ToolChoice,
4
+ LanguageModelV1FunctionTool,
5
+ LanguageModelV1Prompt,
6
+ LanguageModelV1CallOptions,
7
+ } from "@ai-sdk/provider";
8
+ import { toLanguageModelMessages } from "../converters/toLanguageModelMessages";
9
+ import { toLanguageModelTools } from "../converters/toLanguageModelTools";
10
+ import { AssistantStreamChunk } from "assistant-stream";
11
+ import { LanguageModelV1StreamDecoder } from "assistant-stream/ai-sdk";
12
+ import { ThreadMessage, Tool } from "@assistant-ui/react";
13
+
14
+ export type LanguageModelV1CallSettings = {
15
+ maxTokens?: number;
16
+ temperature?: number;
17
+ topP?: number;
18
+ presencePenalty?: number;
19
+ frequencyPenalty?: number;
20
+ seed?: number;
21
+ headers?: Record<string, string>;
22
+ };
23
+
24
+ export type LanguageModelConfig = {
25
+ apiKey?: string;
26
+ baseUrl?: string;
27
+ modelName?: string;
28
+ };
29
+
30
+ export type EdgeRuntimeRequestOptions = {
31
+ system?: string | undefined;
32
+ messages: readonly ThreadMessage[];
33
+ runConfig?: {
34
+ custom?: Record<string, unknown>;
35
+ };
36
+ tools?: any[];
37
+ unstable_assistantMessageId?: string;
38
+ state?: unknown;
39
+ } & LanguageModelV1CallSettings &
40
+ LanguageModelConfig;
41
+
42
+ type LanguageModelCreator = (
43
+ config: LanguageModelConfig,
44
+ ) => Promise<LanguageModelV1> | LanguageModelV1;
45
+
46
+ type ThreadStep = {
47
+ readonly messageId?: string;
48
+ readonly usage?:
49
+ | {
50
+ readonly promptTokens: number;
51
+ readonly completionTokens: number;
52
+ }
53
+ | undefined;
54
+ };
55
+
56
+ type FinishResult = {
57
+ messages: readonly ThreadMessage[];
58
+ metadata: {
59
+ steps: readonly ThreadStep[];
60
+ };
61
+ };
62
+
63
+ export type CreateEdgeRuntimeAPIOptions = LanguageModelV1CallSettings & {
64
+ model: LanguageModelV1 | LanguageModelCreator;
65
+ system?: string;
66
+ tools?: Record<string, Tool<any, any>>;
67
+ toolChoice?: LanguageModelV1ToolChoice;
68
+ onFinish?: (result: FinishResult) => void;
69
+ };
70
+
71
+ type GetEdgeRuntimeStreamOptions = {
72
+ abortSignal: AbortSignal;
73
+ requestData: EdgeRuntimeRequestOptions;
74
+ options: CreateEdgeRuntimeAPIOptions;
75
+ };
76
+
77
+ export const getEdgeRuntimeStream = async ({
78
+ abortSignal,
79
+ requestData: request,
80
+ options: {
81
+ model: modelOrCreator,
82
+ system: serverSystem,
83
+ tools: serverTools = {},
84
+ toolChoice,
85
+ onFinish,
86
+ ...unsafeSettings
87
+ },
88
+ }: GetEdgeRuntimeStreamOptions): Promise<
89
+ ReadableStream<AssistantStreamChunk>
90
+ > => {
91
+ const model =
92
+ typeof modelOrCreator === "function"
93
+ ? await modelOrCreator(request)
94
+ : modelOrCreator;
95
+
96
+ const messages: LanguageModelV1Prompt = toLanguageModelMessages(
97
+ request.messages,
98
+ );
99
+
100
+ if (serverSystem || request.system) {
101
+ messages.unshift({
102
+ role: "system",
103
+ content: serverSystem || request.system!,
104
+ });
105
+ }
106
+
107
+ const allTools = serverTools || {};
108
+ const tools: LanguageModelV1FunctionTool[] = toLanguageModelTools(allTools);
109
+ const callOptions: LanguageModelV1CallOptions = {
110
+ inputFormat: "messages",
111
+ mode:
112
+ tools.length > 0
113
+ ? {
114
+ type: "regular",
115
+ tools,
116
+ ...(toolChoice && { toolChoice }),
117
+ }
118
+ : { type: "regular" },
119
+ prompt: messages,
120
+ ...(request.maxTokens !== undefined && { maxTokens: request.maxTokens }),
121
+ ...(request.temperature !== undefined && {
122
+ temperature: request.temperature,
123
+ }),
124
+ ...(request.topP !== undefined && { topP: request.topP }),
125
+ ...(request.presencePenalty !== undefined && {
126
+ presencePenalty: request.presencePenalty,
127
+ }),
128
+ ...(request.frequencyPenalty !== undefined && {
129
+ frequencyPenalty: request.frequencyPenalty,
130
+ }),
131
+ ...(request.seed !== undefined && { seed: request.seed }),
132
+ ...(request.headers && {
133
+ headers: Object.fromEntries(
134
+ Object.entries(request.headers).filter(([_, v]) => v !== undefined),
135
+ ) as Record<string, string>,
136
+ }),
137
+ abortSignal,
138
+ ...Object.fromEntries(
139
+ Object.entries(unsafeSettings).filter(([_, v]) => v !== undefined),
140
+ ),
141
+ };
142
+
143
+ const result = await model.doStream(callOptions);
144
+ return result.stream.pipeThrough(new LanguageModelV1StreamDecoder());
145
+ };
@@ -0,0 +1,4 @@
1
+ export {
2
+ useDangerousInBrowserRuntime,
3
+ type DangerousInBrowserRuntimeOptions,
4
+ } from "./useDangerousInBrowserRuntime";
@@ -0,0 +1,26 @@
1
+ "use client";
2
+
3
+ import { useState } from "react";
4
+ import {
5
+ DangerousInBrowserAdapter,
6
+ DangerousInBrowserAdapterOptions,
7
+ } from "./DangerousInBrowserAdapter";
8
+ import {
9
+ INTERNAL,
10
+ LocalRuntimeOptions,
11
+ useLocalRuntime,
12
+ } from "@assistant-ui/react";
13
+
14
+ const { splitLocalRuntimeOptions } = INTERNAL;
15
+
16
+ export type DangerousInBrowserRuntimeOptions =
17
+ DangerousInBrowserAdapterOptions & LocalRuntimeOptions;
18
+
19
+ export const useDangerousInBrowserRuntime = (
20
+ options: DangerousInBrowserRuntimeOptions,
21
+ ) => {
22
+ const { localRuntimeOptions, otherOptions } =
23
+ splitLocalRuntimeOptions(options);
24
+ const [adapter] = useState(() => new DangerousInBrowserAdapter(otherOptions));
25
+ return useLocalRuntime(adapter, localRuntimeOptions);
26
+ };
package/src/index.ts CHANGED
@@ -2,12 +2,6 @@ export * from "./rsc";
2
2
  export * from "./ui";
3
3
  export * from "./useChatRuntime";
4
4
  export * from "./useCloudRuntime";
5
-
6
- export {
7
- toLanguageModelMessages,
8
- toLanguageModelTools,
9
- fromLanguageModelMessages,
10
- fromLanguageModelTools,
11
- useDangerousInBrowserRuntime,
12
- } from "@assistant-ui/react-edge";
5
+ export * from "./converters";
6
+ export * from "./dangerous-in-browser";
13
7
  export { frontendTools } from "./frontendTools";
@@ -1,12 +1,12 @@
1
1
  import { Message } from "@ai-sdk/ui-utils";
2
2
  import {
3
3
  unstable_createMessageConverter,
4
- type ReasoningContentPart,
5
- type ToolCallContentPart,
6
- type TextContentPart,
4
+ type ReasoningMessagePart,
5
+ type ToolCallMessagePart,
6
+ type TextMessagePart,
7
7
  type CompleteAttachment,
8
- type SourceContentPart,
9
- type FileContentPart,
8
+ type SourceMessagePart,
9
+ type FileMessagePart,
10
10
  } from "@assistant-ui/react";
11
11
 
12
12
  const convertParts = (message: Message) => {
@@ -20,7 +20,7 @@ const convertParts = (message: Message) => {
20
20
  return {
21
21
  type: "text",
22
22
  text: part.text,
23
- } satisfies TextContentPart;
23
+ } satisfies TextMessagePart;
24
24
  case "tool-invocation":
25
25
  return {
26
26
  type: "tool-call",
@@ -31,23 +31,23 @@ const convertParts = (message: Message) => {
31
31
  result:
32
32
  part.toolInvocation.state === "result" &&
33
33
  part.toolInvocation.result,
34
- } satisfies ToolCallContentPart;
34
+ } satisfies ToolCallMessagePart;
35
35
  case "reasoning":
36
36
  return {
37
37
  type: "reasoning",
38
38
  text: part.reasoning,
39
- } satisfies ReasoningContentPart;
39
+ } satisfies ReasoningMessagePart;
40
40
  case "source":
41
41
  return {
42
42
  type: "source",
43
43
  ...part.source,
44
- } satisfies SourceContentPart;
44
+ } satisfies SourceMessagePart;
45
45
  case "file":
46
46
  return {
47
47
  type: "file",
48
48
  data: part.data,
49
49
  mimeType: part.mimeType,
50
- } satisfies FileContentPart;
50
+ } satisfies FileMessagePart;
51
51
  default: {
52
52
  const _unsupported: never = type;
53
53
  throw new Error(
@@ -63,7 +63,7 @@ const convertParts = (message: Message) => {
63
63
  {
64
64
  type: "text",
65
65
  text: message.content,
66
- } satisfies TextContentPart,
66
+ } satisfies TextMessagePart,
67
67
  ]
68
68
  : [];
69
69
  };
@@ -117,7 +117,7 @@ export const AISDKMessageConverter = unstable_createMessageConverter(
117
117
  case "data": {
118
118
  type MaybeSupportedDataMessage =
119
119
  | { type?: "unsafe_other" }
120
- | ToolCallContentPart
120
+ | ToolCallMessagePart
121
121
  | {
122
122
  type: "tool-result";
123
123
  toolCallId: string;
@@ -9,7 +9,7 @@ export const sliceMessagesUntil = (
9
9
  let messageIdx = messages.findIndex((m) => m.id === messageId);
10
10
  if (messageIdx === -1)
11
11
  throw new Error(
12
- "useVercelAIThreadState: Message not found. This is liekly an internal bug in assistant-ui.",
12
+ "useVercelAIThreadState: Message not found. This is likely an internal bug in assistant-ui.",
13
13
  );
14
14
 
15
15
  while (messages[messageIdx + 1]?.role === "assistant") {
@@ -1,13 +1,157 @@
1
- import { EdgeRuntimeOptions, useEdgeRuntime } from "@assistant-ui/react-edge";
2
-
3
- export type UseChatRuntimeOptions = Omit<
4
- EdgeRuntimeOptions,
5
- "unstable_AISDKInterop"
6
- >;
7
-
8
- export const useChatRuntime = (options: UseChatRuntimeOptions) => {
9
- return useEdgeRuntime({
10
- ...options,
11
- unstable_AISDKInterop: "v2",
12
- });
1
+ "use client";
2
+
3
+ import { toLanguageModelMessages } from "./converters";
4
+ import {
5
+ AssistantRuntime,
6
+ ChatModelAdapter,
7
+ ChatModelRunOptions,
8
+ INTERNAL,
9
+ LocalRuntimeOptions,
10
+ ThreadMessage,
11
+ Tool,
12
+ useLocalRuntime,
13
+ } from "@assistant-ui/react";
14
+ import { z } from "zod";
15
+ import zodToJsonSchema from "zod-to-json-schema";
16
+ import { JSONSchema7 } from "json-schema";
17
+ import {
18
+ AssistantMessageAccumulator,
19
+ DataStreamDecoder,
20
+ unstable_toolResultStream,
21
+ } from "assistant-stream";
22
+ import { asAsyncIterableStream } from "assistant-stream/utils";
23
+
24
+ const { splitLocalRuntimeOptions } = INTERNAL;
25
+
26
+ type HeadersValue = Record<string, string> | Headers;
27
+
28
+ export type UseChatRuntimeOptions = {
29
+ api: string;
30
+ onResponse?: (response: Response) => void | Promise<void>;
31
+ onFinish?: (message: ThreadMessage) => void;
32
+ onError?: (error: Error) => void;
33
+ onCancel?: () => void;
34
+ credentials?: RequestCredentials;
35
+ headers?: HeadersValue | (() => Promise<HeadersValue>);
36
+ body?: object;
37
+ sendExtraMessageFields?: boolean;
38
+ } & LocalRuntimeOptions;
39
+
40
+ type ChatRuntimeRequestOptions = {
41
+ messages: any[];
42
+ tools: any;
43
+ system?: string | undefined;
44
+ runConfig?: any;
45
+ unstable_assistantMessageId?: string;
46
+ state?: any;
47
+ };
48
+
49
+ const toAISDKTools = (tools: Record<string, Tool>) => {
50
+ return Object.fromEntries(
51
+ Object.entries(tools).map(([name, tool]) => [
52
+ name,
53
+ {
54
+ ...(tool.description ? { description: tool.description } : undefined),
55
+ parameters: (tool.parameters instanceof z.ZodType
56
+ ? zodToJsonSchema(tool.parameters)
57
+ : tool.parameters) as JSONSchema7,
58
+ },
59
+ ]),
60
+ );
61
+ };
62
+
63
+ const getEnabledTools = (tools: Record<string, Tool>) => {
64
+ return Object.fromEntries(
65
+ Object.entries(tools).filter(
66
+ ([, tool]) => !tool.disabled && tool.type !== "backend",
67
+ ),
68
+ );
69
+ };
70
+
71
+ class ChatRuntimeAdapter implements ChatModelAdapter {
72
+ constructor(
73
+ private options: Omit<UseChatRuntimeOptions, keyof LocalRuntimeOptions>,
74
+ ) {}
75
+
76
+ async *run({
77
+ messages,
78
+ runConfig,
79
+ abortSignal,
80
+ context,
81
+ unstable_assistantMessageId,
82
+ unstable_getMessage,
83
+ }: ChatModelRunOptions) {
84
+ const headersValue =
85
+ typeof this.options.headers === "function"
86
+ ? await this.options.headers()
87
+ : this.options.headers;
88
+
89
+ abortSignal.addEventListener(
90
+ "abort",
91
+ () => {
92
+ if (!abortSignal.reason?.detach) this.options.onCancel?.();
93
+ },
94
+ { once: true },
95
+ );
96
+
97
+ const headers = new Headers(headersValue);
98
+ headers.set("Content-Type", "application/json");
99
+
100
+ const result = await fetch(this.options.api, {
101
+ method: "POST",
102
+ headers,
103
+ credentials: this.options.credentials ?? "same-origin",
104
+ body: JSON.stringify({
105
+ system: context.system,
106
+ messages: toLanguageModelMessages(messages, {
107
+ unstable_includeId: this.options.sendExtraMessageFields,
108
+ }) as ChatRuntimeRequestOptions["messages"],
109
+ tools: toAISDKTools(
110
+ getEnabledTools(context.tools ?? {}),
111
+ ) as unknown as ChatRuntimeRequestOptions["tools"],
112
+ ...(unstable_assistantMessageId ? { unstable_assistantMessageId } : {}),
113
+ runConfig,
114
+ state: unstable_getMessage().metadata.unstable_state || undefined,
115
+ ...context.callSettings,
116
+ ...context.config,
117
+ ...this.options.body,
118
+ } satisfies ChatRuntimeRequestOptions),
119
+ signal: abortSignal,
120
+ });
121
+
122
+ await this.options.onResponse?.(result);
123
+
124
+ try {
125
+ if (!result.ok) {
126
+ throw new Error(`Status ${result.status}: ${await result.text()}`);
127
+ }
128
+ if (!result.body) {
129
+ throw new Error("Response body is null");
130
+ }
131
+
132
+ const stream = result.body
133
+ .pipeThrough(new DataStreamDecoder())
134
+ .pipeThrough(unstable_toolResultStream(context.tools, abortSignal))
135
+ .pipeThrough(new AssistantMessageAccumulator());
136
+
137
+ yield* asAsyncIterableStream(stream);
138
+
139
+ this.options.onFinish?.(unstable_getMessage());
140
+ } catch (error: unknown) {
141
+ this.options.onError?.(error as Error);
142
+ throw error;
143
+ }
144
+ }
145
+ }
146
+
147
+ export const useChatRuntime = (
148
+ options: UseChatRuntimeOptions,
149
+ ): AssistantRuntime => {
150
+ const { localRuntimeOptions, otherOptions } =
151
+ splitLocalRuntimeOptions(options);
152
+
153
+ return useLocalRuntime(
154
+ new ChatRuntimeAdapter(otherOptions),
155
+ localRuntimeOptions,
156
+ );
13
157
  };