@assistant-ui/mcp-docs-server 0.1.13 → 0.1.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. package/.docs/organized/code-examples/store-example.md +554 -0
  2. package/.docs/organized/code-examples/with-ag-ui.md +1639 -0
  3. package/.docs/organized/code-examples/with-ai-sdk-v5.md +555 -53
  4. package/.docs/organized/code-examples/with-assistant-transport.md +553 -52
  5. package/.docs/organized/code-examples/with-cloud.md +637 -42
  6. package/.docs/organized/code-examples/with-external-store.md +584 -34
  7. package/.docs/organized/code-examples/with-ffmpeg.md +586 -52
  8. package/.docs/organized/code-examples/with-langgraph.md +636 -53
  9. package/.docs/organized/code-examples/with-parent-id-grouping.md +584 -34
  10. package/.docs/organized/code-examples/with-react-hook-form.md +587 -75
  11. package/.docs/raw/blog/2024-07-29-hello/index.mdx +0 -1
  12. package/.docs/raw/docs/cli.mdx +396 -0
  13. package/.docs/raw/docs/cloud/authorization.mdx +2 -2
  14. package/.docs/raw/docs/getting-started.mdx +31 -37
  15. package/.docs/raw/docs/guides/context-api.mdx +5 -5
  16. package/.docs/raw/docs/migrations/v0-12.mdx +2 -2
  17. package/.docs/raw/docs/runtimes/assistant-transport.mdx +891 -0
  18. package/.docs/raw/docs/runtimes/custom/custom-thread-list.mdx +9 -0
  19. package/.docs/raw/docs/runtimes/custom/local.mdx +77 -4
  20. package/.docs/raw/docs/runtimes/langgraph/index.mdx +8 -5
  21. package/.docs/raw/docs/runtimes/mastra/full-stack-integration.mdx +12 -10
  22. package/.docs/raw/docs/runtimes/mastra/separate-server-integration.mdx +50 -31
  23. package/.docs/raw/docs/ui/Reasoning.mdx +174 -0
  24. package/dist/chunk-M2RKUM66.js +3 -3
  25. package/dist/chunk-NVNFQ5ZO.js +2 -2
  26. package/package.json +15 -7
@@ -109,6 +109,15 @@ When the hook mounts it calls `list()` on your adapter, hydrates existing thread
109
109
  async delete(remoteId) {
110
110
  await fetch(`/api/threads/${remoteId}`, { method: "DELETE" });
111
111
  },
112
+ async fetch(remoteId) {
113
+ const response = await fetch(`/api/threads/${remoteId}`);
114
+ const thread = await response.json();
115
+ return {
116
+ status: thread.is_archived ? "archived" : "regular",
117
+ remoteId: thread.id,
118
+ title: thread.title,
119
+ };
120
+ },
112
121
  async generateTitle(remoteId, messages) {
113
122
  return createAssistantStream(async (controller) => {
114
123
  const response = await fetch(`/api/threads/${remoteId}/title`, {
@@ -538,7 +538,12 @@ export function MyRuntimeProvider({ children }) {
538
538
  ```
539
539
 
540
540
  <Callout type="info" title="Returning a title from generateTitle">
541
- The `generateTitle` method must return an <code>AssistantStream</code> containing the title text. The easiest, type-safe way is to use <code>createAssistantStream</code> and call <code>controller.appendText(newTitle)</code> followed by <code>controller.close()</code>. Returning a raw <code>ReadableStream</code> won't update the thread list UI.
541
+ The `generateTitle` method must return an <code>AssistantStream</code>{" "}
542
+ containing the title text. The easiest, type-safe way is to use{" "}
543
+ <code>createAssistantStream</code> and call{" "}
544
+ <code>controller.appendText(newTitle)</code> followed by{" "}
545
+ <code>controller.close()</code>. Returning a raw <code>ReadableStream</code>{" "}
546
+ won't update the thread list UI.
542
547
  </Callout>
543
548
 
544
549
  #### Understanding the Architecture
@@ -854,9 +859,9 @@ function MyCustomRuntimeProvider({ children }) {
854
859
  </Callout>
855
860
 
856
861
  <Callout type="warning">
857
- **`useThreadRuntime` vs `useLocalThreadRuntime`:**
858
- - `useThreadRuntime` - Access the current thread's runtime from within components
859
- - `useLocalThreadRuntime` - Create a new single-thread runtime instance
862
+ **`useThreadRuntime` vs `useLocalThreadRuntime`:** - `useThreadRuntime` -
863
+ Access the current thread's runtime from within components -
864
+ `useLocalThreadRuntime` - Create a new single-thread runtime instance
860
865
  </Callout>
861
866
 
862
867
  ## Integration Examples
@@ -999,6 +1004,74 @@ async run({ messages }) { // ❌ Wrong for streaming
999
1004
  ```
1000
1005
  </Callout>
1001
1006
 
1007
+ ### Tool UI Flickers or Disappears During Streaming
1008
+
1009
+ A common issue when implementing a streaming `ChatModelAdapter` is seeing a tool's UI appear for a moment and then disappear. This is caused by failing to accumulate the `tool_calls` correctly across multiple stream chunks. State must be stored **outside** the streaming loop to persist.
1010
+
1011
+ **❌ Incorrect: Forgetting Previous Tool Calls**
1012
+
1013
+ This implementation incorrectly re-creates the `content` array for every chunk. If a later chunk contains only text, tool calls from previous chunks are lost, causing the UI to disappear.
1014
+
1015
+ ```tsx
1016
+ // This implementation incorrectly re-creates the `content` array for every chunk.
1017
+ // If a later chunk contains only text, tool calls from previous chunks are lost.
1018
+ async *run({ messages, abortSignal, context }) {
1019
+ const stream = await backendApi({ messages, abortSignal, context });
1020
+ let text = "";
1021
+
1022
+ for await (const chunk of stream) {
1023
+ // ❌ DON'T: This overwrites toolCalls with only the current chunk's data
1024
+ const toolCalls = chunk.tool_calls || [];
1025
+ const content = [{ type: "text", text }];
1026
+ for (const toolCall of toolCalls) {
1027
+ content.push({
1028
+ type: "tool-call",
1029
+ toolName: toolCall.name,
1030
+ toolCallId: toolCall.id,
1031
+ args: toolCall.args,
1032
+ });
1033
+ }
1034
+ yield { content }; // This yield might not contain the tool call anymore
1035
+ }
1036
+ }
1037
+ ```
1038
+
1039
+ **✅ Correct: Accumulating State**
1040
+
1041
+ This implementation uses a `Map` outside the loop to remember all tool calls.
1042
+
1043
+ ```tsx
1044
+ // This implementation uses a Map outside the loop to remember all tool calls.
1045
+ async *run({ messages, abortSignal, context }) {
1046
+ const stream = await backendApi({ messages, abortSignal, context });
1047
+ let text = "";
1048
+ // ✅ DO: Declare state outside the loop
1049
+ const toolCallsMap = new Map();
1050
+
1051
+ for await (const chunk of stream) {
1052
+ text += chunk.content || "";
1053
+
1054
+ // ✅ DO: Add/update tool calls in the persistent map
1055
+ for (const toolCall of chunk.tool_calls || []) {
1056
+ toolCallsMap.set(toolCall.toolCallId, {
1057
+ type: "tool-call",
1058
+ toolName: toolCall.name,
1059
+ toolCallId: toolCall.toolCallId,
1060
+ args: toolCall.args,
1061
+ });
1062
+ }
1063
+
1064
+ // ✅ DO: Build content from accumulated state
1065
+ const content = [
1066
+ ...(text ? [{ type: "text", text }] : []),
1067
+ ...Array.from(toolCallsMap.values()),
1068
+ ];
1069
+
1070
+ yield { content }; // Yield the complete, correct state every time
1071
+ }
1072
+ }
1073
+ ```
1074
+
1002
1075
  ### Debug Tips
1003
1076
 
1004
1077
  1. **Log adapter calls** to trace execution:
@@ -136,7 +136,7 @@ export const OPTIONS = () => {
136
136
 
137
137
  // ---cut---
138
138
  import { Client } from "@langchain/langgraph-sdk";
139
- import { LangChainMessage } from "@assistant-ui/react-langgraph";
139
+ import { LangChainMessage, LangGraphSendMessageConfig } from "@assistant-ui/react-langgraph";
140
140
 
141
141
  const createClient = () => {
142
142
  const apiUrl = process.env["NEXT_PUBLIC_LANGGRAPH_API_URL"] || "/api";
@@ -160,6 +160,7 @@ export const getThreadState = async (
160
160
  export const sendMessage = async (params: {
161
161
  threadId: string;
162
162
  messages: LangChainMessage;
163
+ config?: LangGraphSendMessageConfig;
163
164
  }) => {
164
165
  const client = createClient();
165
166
  return client.runs.stream(
@@ -170,6 +171,7 @@ export const sendMessage = async (params: {
170
171
  messages: params.messages,
171
172
  },
172
173
  streamMode: "messages",
174
+ ...params.config
173
175
  },
174
176
  );
175
177
  };
@@ -187,7 +189,7 @@ export const sendMessage = async (params: {
187
189
  // ---cut---
188
190
  "use client";
189
191
 
190
- import { Thread } from "@/components/assistant-ui";
192
+ import { Thread } from "@/components/assistant-ui/thread";
191
193
  import { AssistantRuntimeProvider } from "@assistant-ui/react";
192
194
  import { useLangGraphRuntime } from "@assistant-ui/react-langgraph";
193
195
 
@@ -195,12 +197,13 @@ import { createThread, getThreadState, sendMessage } from "@/lib/chatApi";
195
197
 
196
198
  export function MyAssistant() {
197
199
  const runtime = useLangGraphRuntime({
198
- stream: async (messages, { initialize }) => {
200
+ stream: async (messages, { initialize, config }) => {
199
201
  const { externalId } = await initialize();
200
202
  if (!externalId) throw new Error("Thread not found");
201
203
  return sendMessage({
202
204
  threadId: externalId,
203
205
  messages,
206
+ config
204
207
  });
205
208
  },
206
209
  create: async () => {
@@ -306,11 +309,11 @@ The `useLangGraphRuntime` hook now includes built-in thread management capabilit
306
309
 
307
310
  ```typescript
308
311
  const runtime = useLangGraphRuntime({
309
- stream: async (messages, { initialize }) => {
312
+ stream: async (messages, { initialize, config }) => {
310
313
  // initialize() creates or loads a thread and returns its IDs
311
314
  const { remoteId, externalId } = await initialize();
312
315
  // Use externalId (your backend's thread ID) for API calls
313
- return sendMessage({ threadId: externalId, messages });
316
+ return sendMessage({ threadId: externalId, messages, config });
314
317
  },
315
318
  create: async () => {
316
319
  // Called when creating a new thread
@@ -63,10 +63,10 @@ This default route uses the Vercel AI SDK directly with OpenAI. In the following
63
63
 
64
64
  ### Install Mastra Packages
65
65
 
66
- Add the Mastra core, memory, the AI SDK OpenAI provider packages to your project:
66
+ Add the `@mastra/core` package and its peer dependency `zod` (which you can use later inside tools for example). Also add `@mastra/ai-sdk` to convert Mastra's stream to an AI SDK-compatible format:
67
67
 
68
68
  ```bash npm2yarn
69
- npm install @mastra/core@latest @mastra/memory@latest @ai-sdk/openai
69
+ npm install @mastra/core@latest @mastra/ai-sdk@latest zod@latest
70
70
  ```
71
71
 
72
72
  </Step>
@@ -123,7 +123,6 @@ These files will be used in the next steps to define your Mastra agent and confi
123
123
  Now, let's define the behavior of our AI agent. Open the `mastra/agents/chefAgent.ts` file and add the following code:
124
124
 
125
125
  ```typescript title="mastra/agents/chefAgent.ts"
126
- import { openai } from "@ai-sdk/openai";
127
126
  import { Agent } from "@mastra/core/agent";
128
127
 
129
128
  export const chefAgent = new Agent({
@@ -131,14 +130,14 @@ export const chefAgent = new Agent({
131
130
  instructions:
132
131
  "You are Michel, a practical and experienced home chef. " +
133
132
  "You help people cook with whatever ingredients they have available.",
134
- model: openai("gpt-4o-mini"),
133
+ model: "openai/gpt-4o-mini",
135
134
  });
136
135
  ```
137
136
 
138
137
  This code creates a new Mastra `Agent` named `chef-agent`.
139
138
 
140
139
  - `instructions`: Defines the agent's persona and primary goal.
141
- - `model`: Specifies the language model the agent will use (in this case, OpenAI's GPT-4o Mini via the AI SDK).
140
+ - `model`: Specifies the language model the agent will use (in this case, OpenAI's GPT-4o Mini via Mastra's model router).
142
141
 
143
142
  Make sure you have set up your OpenAI API key as described in the [Getting Started guide](/docs/getting-started).
144
143
 
@@ -151,7 +150,6 @@ Next, register the agent with your Mastra instance. Open the `mastra/index.ts` f
151
150
 
152
151
  ```typescript title="mastra/index.ts"
153
152
  import { Mastra } from "@mastra/core";
154
-
155
153
  import { chefAgent } from "./agents/chefAgent";
156
154
 
157
155
  export const mastra = new Mastra({
@@ -169,6 +167,8 @@ This code initializes Mastra and makes the `chefAgent` available for use in your
169
167
  Now, update your API route (`app/api/chat/route.ts`) to use the Mastra agent you just configured. Replace the existing content with the following:
170
168
 
171
169
  ```typescript title="app/api/chat/route.ts"
170
+ import { createUIMessageStreamResponse } from "ai";
171
+ import { toAISdkFormat } from "@mastra/ai-sdk";
172
172
  import { mastra } from "@/mastra"; // Adjust the import path if necessary
173
173
 
174
174
  // Allow streaming responses up to 30 seconds
@@ -182,10 +182,12 @@ export async function POST(req: Request) {
182
182
  const agent = mastra.getAgent("chefAgent");
183
183
 
184
184
  // Stream the response using the agent
185
- const result = await agent.stream(messages);
185
+ const stream = await agent.stream(messages);
186
186
 
187
- // Return the result as a UI message stream response
188
- return result.toUIMessageStreamResponse();
187
+ // Create a Response that streams the UI message stream to the client
188
+ return createUIMessageStreamResponse({
189
+ stream: toAISdkFormat(stream, { from: "agent" }),
190
+ });
189
191
  }
190
192
  ```
191
193
 
@@ -194,7 +196,7 @@ Key changes:
194
196
  - We import the `mastra` instance created in `mastra/index.ts`. Make sure the import path (`@/mastra`) is correct for your project setup (you might need `~/mastra`, `../../../mastra`, etc., depending on your path aliases and project structure).
195
197
  - We retrieve the `chefAgent` using `mastra.getAgent("chefAgent")`.
196
198
  - Instead of calling the AI SDK's `streamText` directly, we call `agent.stream(messages)` to process the chat messages using the agent's configuration and model.
197
- - The result is still returned in a format compatible with assistant-ui using `toUIMessageStreamResponse()`.
199
+ - The result is still returned in a format compatible with assistant-ui using `createUIMessageStreamResponse()` and `toAISdkFormat()`.
198
200
 
199
201
  Your API route is now powered by Mastra!
200
202
 
@@ -29,6 +29,12 @@ Once the setup is complete, navigate into your new Mastra project directory (the
29
29
  cd your-mastra-server-directory # Replace with the actual directory name
30
30
  ```
31
31
 
32
+ In the next steps you'll need to use the `@mastra/ai-sdk` package. Add it to your Mastra project:
33
+
34
+ ```bash
35
+ npm install @mastra/ai-sdk@latest
36
+ ```
37
+
32
38
  You now have a basic Mastra server project ready.
33
39
 
34
40
  <Callout title="API Keys">
@@ -47,10 +53,9 @@ You now have a basic Mastra server project ready.
47
53
 
48
54
  Next, let's define an agent within your Mastra server project. We'll create a `chefAgent` similar to the one used in the full-stack guide.
49
55
 
50
- Open or create the agent file (e.g., `src/agents/chefAgent.ts` within your Mastra project) and add the following code:
56
+ Open or create the agent file (e.g., `src/mastra/agents/chefAgent.ts` within your Mastra project) and add the following code:
51
57
 
52
- ```typescript title="src/agents/chefAgent.ts"
53
- import { openai } from "@ai-sdk/openai";
58
+ ```typescript title="src/mastra/agents/chefAgent.ts"
54
59
  import { Agent } from "@mastra/core/agent";
55
60
 
56
61
  export const chefAgent = new Agent({
@@ -58,7 +63,7 @@ export const chefAgent = new Agent({
58
63
  instructions:
59
64
  "You are Michel, a practical and experienced home chef. " +
60
65
  "You help people cook with whatever ingredients they have available.",
61
- model: openai("gpt-4o-mini"),
66
+ model: "openai/gpt-4o-mini",
62
67
  });
63
68
  ```
64
69
 
@@ -70,11 +75,11 @@ This defines the agent's behavior, but it's not yet active in the Mastra server.
70
75
 
71
76
  ### Register the Agent
72
77
 
73
- Now, you need to register the `chefAgent` with your Mastra instance so the server knows about it. Open your main Mastra configuration file (this is often `src/index.ts` in projects created with `create-mastra`).
78
+ Now, you need to register the `chefAgent` with your Mastra instance so the server knows about it. Open your main Mastra configuration file (this is often `src/mastra/index.ts` in projects created with `create-mastra`).
74
79
 
75
80
  Import the `chefAgent` and add it to the `agents` object when initializing Mastra:
76
81
 
77
- ```typescript title="src/index.ts"
82
+ ```typescript title="src/mastra/index.ts"
78
83
  import { Mastra } from "@mastra/core";
79
84
  import { chefAgent } from "./agents/chefAgent"; // Adjust path if necessary
80
85
 
@@ -83,7 +88,34 @@ export const mastra = new Mastra({
83
88
  });
84
89
  ```
85
90
 
86
- Make sure you adapt this code to fit the existing structure of your `src/index.ts` file generated by `create-mastra`. The key is to import your agent and include it in the `agents` configuration object.
91
+ Make sure you adapt this code to fit the existing structure of your `src/mastra/index.ts` file generated by `create-mastra`. The key is to import your agent and include it in the `agents` configuration object.
92
+
93
+ </Step>
94
+
95
+ <Step>
96
+
97
+ ### Register the Chat Route
98
+
99
+ Still inside `src/mastra/index.ts`, register a chat route for the `chefAgent` now. You can do this by using `chatRoute()` from `@mastra/ai-sdk`. You need to place this inside `server.apiRoutes` of your Mastra configuration:
100
+
101
+ ```typescript title="src/mastra/index.ts" {3,7-13}
102
+ import { Mastra } from "@mastra/core";
103
+ import { chefAgent } from "./agents/chefAgent";
104
+ import { chatRoute } from "@mastra/ai-sdk";
105
+
106
+ export const mastra = new Mastra({
107
+ agents: { chefAgent },
108
+ server: {
109
+ apiRoutes: [
110
+ chatRoute({
111
+ path: "/chat/:agentId",
112
+ }),
113
+ ],
114
+ },
115
+ });
116
+ ```
117
+
118
+ Make sure you adapt this code to fit the existing structure of your `src/mastra/index.ts` file generated by `create-mastra`. This will make all agents available in AI SDK-compatible formats, including the `chefAgent` at the endpoint `/chat/chefAgent`.
87
119
 
88
120
  </Step>
89
121
 
@@ -97,7 +129,7 @@ With the agent defined and registered, start the Mastra development server:
97
129
  npm run dev
98
130
  ```
99
131
 
100
- By default, the Mastra server will run on `http://localhost:4111`. Your `chefAgent` should now be accessible via a POST request endpoint, typically `http://localhost:4111/api/agents/chefAgent/stream`. Keep this server running for the next steps where we'll set up the assistant-ui frontend to connect to it.
132
+ By default, the Mastra server will run on `http://localhost:4111`. Keep this server running for the next steps where we'll set up the assistant-ui frontend to connect to it.
101
133
 
102
134
  </Step>
103
135
 
@@ -135,38 +167,25 @@ In the next step, we will configure this frontend to communicate with the separa
135
167
 
136
168
  The default assistant-ui setup configures the chat runtime to use a local API route (`/api/chat`) within the Next.js project. Since our Mastra agent is running on a separate server, we need to update the frontend to point to that server's endpoint.
137
169
 
138
- Open the main page file in your assistant-ui frontend project (usually `app/page.tsx` or `src/app/page.tsx`). Find the `useChatRuntime` hook and change the `api` property to the full URL of your Mastra agent's stream endpoint:
170
+ Open the file in your assistant-ui frontend project that contains the `useChatRuntime` hook (usually `app/assistant.tsx` or `src/app/assistant.tsx`). Find the `useChatRuntime` hook and change the `api` property to the full URL of your Mastra agent's stream endpoint:
139
171
 
140
- ```tsx {10} title="app/page.tsx"
172
+ ```tsx {8} title="app/assistant.tsx"
141
173
  "use client";
142
- import { Thread } from "@/components/assistant-ui/thread";
143
- import {
144
- useChatRuntime,
145
- AssistantChatTransport,
146
- } from "@assistant-ui/react-ai-sdk";
147
- import { AssistantRuntimeProvider } from "@assistant-ui/react";
148
- import { ThreadList } from "@/components/assistant-ui/thread-list";
149
-
150
- export default function Home() {
151
- // Point the runtime to the Mastra server endpoint
174
+
175
+ // Rest of the imports...
176
+
177
+ export const Assistant = () => {
152
178
  const runtime = useChatRuntime({
153
179
  transport: new AssistantChatTransport({
154
- api: "MASTRA_ENDPOINT",
180
+ api: "http://localhost:4111/chat/chefAgent",
155
181
  }),
156
182
  });
157
183
 
158
- return (
159
- <AssistantRuntimeProvider runtime={runtime}>
160
- <main className="grid h-dvh grid-cols-[200px_1fr] gap-x-2 px-4 py-4">
161
- <ThreadList />
162
- <Thread />
163
- </main>
164
- </AssistantRuntimeProvider>
165
- );
166
- }
184
+ // Rest of the component...
185
+ };
167
186
  ```
168
187
 
169
- Replace `"http://localhost:4111/api/agents/chefAgent/stream"` with the actual URL if your Mastra server runs on a different port or host, or if your agent has a different name.
188
+ Replace `"http://localhost:4111/chat/chefAgent"` with the actual URL if your Mastra server runs on a different port or host, or if your agent has a different name.
170
189
 
171
190
  Now, the assistant-ui frontend will send chat requests directly to your running Mastra server.
172
191
 
@@ -0,0 +1,174 @@
1
+ ---
2
+ title: Reasoning
3
+ ---
4
+
5
+ import { Steps, Step } from "fumadocs-ui/components/steps";
6
+ import { Tab, Tabs } from "fumadocs-ui/components/tabs";
7
+
8
+ ## Overview
9
+
10
+ The Reasoning component displays AI reasoning or thinking messages in a collapsible UI. Consecutive reasoning message parts are automatically grouped together with smooth animations and a shimmer effect while streaming.
11
+
12
+ ## Getting Started
13
+
14
+ <Steps>
15
+ <Step>
16
+
17
+ ### Add `reasoning`
18
+
19
+ <Tabs items={["assistant-ui", "shadcn (namespace)", "shadcn"]}>
20
+ <Tab>
21
+
22
+ ```sh
23
+ npx assistant-ui@latest add reasoning
24
+ ```
25
+
26
+ </Tab>
27
+ <Tab>
28
+
29
+ ```sh
30
+ npx shadcn@latest add @assistant-ui/reasoning
31
+ ```
32
+
33
+ </Tab>
34
+ <Tab>
35
+
36
+ ```sh
37
+ npx shadcn@latest add "https://r.assistant-ui.com/reasoning"
38
+ ```
39
+
40
+ </Tab>
41
+ </Tabs>
42
+
43
+ This adds a `/components/assistant-ui/reasoning.tsx` file to your project, which you can adjust as needed.
44
+
45
+ </Step>
46
+ <Step>
47
+
48
+ ### Use in your application
49
+
50
+ Pass the `Reasoning` and `ReasoningGroup` components to the `MessagePrimitive.Parts` component:
51
+
52
+ ```tsx title="/app/components/assistant-ui/thread.tsx" {2,10-11}
53
+ import { MessagePrimitive } from "@assistant-ui/react";
54
+ import { Reasoning, ReasoningGroup } from "@/components/assistant-ui/reasoning";
55
+
56
+ const AssistantMessage: FC = () => {
57
+ return (
58
+ <MessagePrimitive.Root className="...">
59
+ <div className="...">
60
+ <MessagePrimitive.Parts
61
+ components={{
62
+ Reasoning: Reasoning,
63
+ ReasoningGroup: ReasoningGroup
64
+ }}
65
+ />
66
+ </div>
67
+ <AssistantActionBar />
68
+
69
+ <BranchPicker className="..." />
70
+ </MessagePrimitive.Root>
71
+ );
72
+ };
73
+ ```
74
+
75
+ </Step>
76
+ </Steps>
77
+
78
+ ## How It Works
79
+
80
+ The component consists of two parts:
81
+
82
+ 1. **Reasoning**: Renders individual reasoning message part content
83
+ 2. **ReasoningGroup**: Wraps consecutive reasoning parts in a collapsible container
84
+
85
+ Consecutive reasoning parts are automatically grouped together by the `ReasoningGroup` component, similar to how `ToolGroup` handles tool calls.
86
+
87
+ ### Reasoning
88
+
89
+ The Reasoning component doesn't accept additional props—it renders the reasoning text content with markdown support.
90
+
91
+ ## Examples
92
+
93
+ ### Basic Usage
94
+
95
+ ```tsx title="/app/components/assistant-ui/thread.tsx"
96
+ <MessagePrimitive.Parts
97
+ components={{
98
+ Reasoning,
99
+ ReasoningGroup
100
+ }}
101
+ />
102
+ ```
103
+
104
+ ### Custom Styling
105
+
106
+ Since the component is copied to your project, you can customize it directly by modifying the `reasoning.tsx` file. The internal components (`ReasoningRoot`, `ReasoningTrigger`, `ReasoningContent`, `ReasoningText`) accept `className` props for styling:
107
+
108
+ ```tsx title="/components/assistant-ui/reasoning.tsx"
109
+ const ReasoningGroupImpl: ReasoningGroupComponent = ({
110
+ // ... existing code ...
111
+ return (
112
+ <ReasoningRoot className="rounded-lg border bg-muted/50 p-4">
113
+ <ReasoningTrigger
114
+ active={isReasoningStreaming}
115
+ className="font-semibold text-foreground"
116
+ />
117
+ <ReasoningContent
118
+ aria-busy={isReasoningStreaming}
119
+ className="mt-2"
120
+ >
121
+ <ReasoningText className="text-base">{children}</ReasoningText>
122
+ </ReasoningContent>
123
+ </ReasoningRoot>
124
+ );
125
+ };
126
+ ```
127
+
128
+ You can also customize the individual internal components:
129
+
130
+ ```tsx title="/components/assistant-ui/reasoning.tsx"
131
+ const ReasoningRoot: FC<PropsWithChildren<{ className?: string }>> = ({
132
+ // ... existing code ...
133
+ return (
134
+ <Collapsible
135
+ // ...
136
+ className={cn("aui-reasoning-root mb-4 w-full rounded-lg border bg-muted/50 p-4", className)}
137
+ // ...
138
+ >
139
+ {children}
140
+ </Collapsible>
141
+ );
142
+ };
143
+
144
+ const ReasoningTrigger: FC<{ active: boolean; className?: string }> = ({
145
+ // ... existing code ...
146
+ <CollapsibleTrigger
147
+ className={cn(
148
+ "aui-reasoning-trigger group/trigger -mb-2 flex max-w-[75%] items-center gap-2 py-2 text-sm font-semibold text-foreground transition-colors hover:text-foreground",
149
+ className,
150
+ )}
151
+ >
152
+ {/* ... existing content ... */}
153
+ </CollapsibleTrigger>
154
+ );
155
+ ```
156
+
157
+ ## Technical Details
158
+
159
+ ### Scroll Lock
160
+
161
+ The component uses the `useScrollLock` hook (exported from `@assistant-ui/react`) to prevent page jumps when collapsing the reasoning section. This maintains the scroll position during the collapse animation.
162
+
163
+ ### Animation Timing
164
+
165
+ The component uses CSS custom properties for animation timing:
166
+ - `--animation-duration`: Controls expand/collapse animation (default: 200ms)
167
+ - `--shimmer-duration`: Controls the shimmer effect speed (default: 1000ms)
168
+
169
+ These can be customized by modifying the CSS variables in your component.
170
+
171
+ ## Related Components
172
+
173
+ - [ToolGroup](/docs/ui/ToolGroup) - Similar grouping pattern for tool calls
174
+ - [PartGrouping](/docs/ui/PartGrouping) - Experimental API for grouping message parts
@@ -2,9 +2,9 @@ import { fileURLToPath } from 'url';
2
2
  import { dirname, join } from 'path';
3
3
 
4
4
  // src/constants.ts
5
- var __dirname = dirname(fileURLToPath(import.meta.url));
6
- var ROOT_DIR = join(__dirname, "../../../");
7
- var PACKAGE_DIR = join(__dirname, "../");
5
+ var __dirname$1 = dirname(fileURLToPath(import.meta.url));
6
+ var ROOT_DIR = join(__dirname$1, "../../../");
7
+ var PACKAGE_DIR = join(__dirname$1, "../");
8
8
  var EXAMPLES_PATH = join(ROOT_DIR, "examples");
9
9
  var DOCS_BASE = join(PACKAGE_DIR, ".docs");
10
10
  var DOCS_PATH = join(DOCS_BASE, "raw/docs");
@@ -377,9 +377,9 @@ var examplesTools = {
377
377
  }
378
378
  }
379
379
  };
380
- var __dirname = dirname(fileURLToPath(import.meta.url));
380
+ var __dirname$1 = dirname(fileURLToPath(import.meta.url));
381
381
  var packageJson = JSON.parse(
382
- readFileSync(join(__dirname, "../package.json"), "utf-8")
382
+ readFileSync(join(__dirname$1, "../package.json"), "utf-8")
383
383
  );
384
384
  var server = new McpServer({
385
385
  name: "assistant-ui-docs",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@assistant-ui/mcp-docs-server",
3
- "version": "0.1.13",
3
+ "version": "0.1.15",
4
4
  "description": "MCP server for assistant-ui documentation and examples",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -8,17 +8,17 @@
8
8
  "assistant-ui-mcp": "./dist/stdio.js"
9
9
  },
10
10
  "dependencies": {
11
- "@modelcontextprotocol/sdk": "^1.20.2",
12
- "zod": "^4.1.12",
11
+ "@modelcontextprotocol/sdk": "^1.23.0",
12
+ "zod": "^4.1.13",
13
13
  "gray-matter": "^4.0.3",
14
14
  "cross-env": "^10.1.0"
15
15
  },
16
16
  "devDependencies": {
17
- "@types/node": "^24.10.0",
18
- "tsup": "^8.5.0",
17
+ "@types/node": "^24.10.1",
18
+ "tsup": "^8.5.1",
19
19
  "tsx": "^4.20.6",
20
20
  "typescript": "^5.9.3",
21
- "vitest": "^4.0.6"
21
+ "vitest": "^4.0.14"
22
22
  },
23
23
  "files": [
24
24
  "dist",
@@ -26,7 +26,15 @@
26
26
  "README.md"
27
27
  ],
28
28
  "publishConfig": {
29
- "access": "public"
29
+ "access": "public",
30
+ "provenance": true
31
+ },
32
+ "repository": {
33
+ "type": "git",
34
+ "url": "https://github.com/assistant-ui/assistant-ui/tree/main/packages/mcp-docs-server"
35
+ },
36
+ "bugs": {
37
+ "url": "https://github.com/assistant-ui/assistant-ui/issues"
30
38
  },
31
39
  "scripts": {
32
40
  "clean": "tsx scripts/clean.mts",