@assistant-ui/mcp-docs-server 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. package/.docs/organized/code-examples/local-ollama.md +1135 -0
  2. package/.docs/organized/code-examples/search-agent-for-e-commerce.md +1721 -0
  3. package/.docs/organized/code-examples/with-ai-sdk.md +1081 -0
  4. package/.docs/organized/code-examples/with-cloud.md +1164 -0
  5. package/.docs/organized/code-examples/with-external-store.md +1064 -0
  6. package/.docs/organized/code-examples/with-ffmpeg.md +1305 -0
  7. package/.docs/organized/code-examples/with-langgraph.md +1819 -0
  8. package/.docs/organized/code-examples/with-openai-assistants.md +1175 -0
  9. package/.docs/organized/code-examples/with-react-hook-form.md +1727 -0
  10. package/.docs/organized/code-examples/with-vercel-ai-rsc.md +1157 -0
  11. package/.docs/raw/blog/2024-07-29-hello/index.mdx +65 -0
  12. package/.docs/raw/blog/2024-09-11/index.mdx +10 -0
  13. package/.docs/raw/blog/2024-12-15/index.mdx +10 -0
  14. package/.docs/raw/blog/2025-01-31-changelog/index.mdx +129 -0
  15. package/.docs/raw/docs/about-assistantui.mdx +44 -0
  16. package/.docs/raw/docs/api-reference/context-providers/AssistantRuntimeProvider.mdx +30 -0
  17. package/.docs/raw/docs/api-reference/context-providers/TextContentPartProvider.mdx +26 -0
  18. package/.docs/raw/docs/api-reference/integrations/react-hook-form.mdx +103 -0
  19. package/.docs/raw/docs/api-reference/integrations/vercel-ai-sdk.mdx +145 -0
  20. package/.docs/raw/docs/api-reference/overview.mdx +583 -0
  21. package/.docs/raw/docs/api-reference/primitives/ActionBar.mdx +264 -0
  22. package/.docs/raw/docs/api-reference/primitives/AssistantModal.mdx +129 -0
  23. package/.docs/raw/docs/api-reference/primitives/Attachment.mdx +96 -0
  24. package/.docs/raw/docs/api-reference/primitives/BranchPicker.mdx +87 -0
  25. package/.docs/raw/docs/api-reference/primitives/Composer.mdx +204 -0
  26. package/.docs/raw/docs/api-reference/primitives/ContentPart.mdx +173 -0
  27. package/.docs/raw/docs/api-reference/primitives/Error.mdx +70 -0
  28. package/.docs/raw/docs/api-reference/primitives/Message.mdx +181 -0
  29. package/.docs/raw/docs/api-reference/primitives/Thread.mdx +197 -0
  30. package/.docs/raw/docs/api-reference/primitives/composition.mdx +21 -0
  31. package/.docs/raw/docs/api-reference/runtimes/AssistantRuntime.mdx +33 -0
  32. package/.docs/raw/docs/api-reference/runtimes/AttachmentRuntime.mdx +46 -0
  33. package/.docs/raw/docs/api-reference/runtimes/ComposerRuntime.mdx +69 -0
  34. package/.docs/raw/docs/api-reference/runtimes/ContentPartRuntime.mdx +22 -0
  35. package/.docs/raw/docs/api-reference/runtimes/MessageRuntime.mdx +49 -0
  36. package/.docs/raw/docs/api-reference/runtimes/ThreadListItemRuntime.mdx +32 -0
  37. package/.docs/raw/docs/api-reference/runtimes/ThreadListRuntime.mdx +31 -0
  38. package/.docs/raw/docs/api-reference/runtimes/ThreadRuntime.mdx +48 -0
  39. package/.docs/raw/docs/architecture.mdx +92 -0
  40. package/.docs/raw/docs/cloud/authorization.mdx +152 -0
  41. package/.docs/raw/docs/cloud/overview.mdx +55 -0
  42. package/.docs/raw/docs/cloud/persistence/ai-sdk.mdx +54 -0
  43. package/.docs/raw/docs/cloud/persistence/langgraph.mdx +123 -0
  44. package/.docs/raw/docs/concepts/architecture.mdx +19 -0
  45. package/.docs/raw/docs/concepts/runtime-layer.mdx +163 -0
  46. package/.docs/raw/docs/concepts/why.mdx +9 -0
  47. package/.docs/raw/docs/copilots/make-assistant-readable.mdx +71 -0
  48. package/.docs/raw/docs/copilots/make-assistant-tool-ui.mdx +76 -0
  49. package/.docs/raw/docs/copilots/make-assistant-tool.mdx +117 -0
  50. package/.docs/raw/docs/copilots/model-context.mdx +135 -0
  51. package/.docs/raw/docs/copilots/motivation.mdx +191 -0
  52. package/.docs/raw/docs/copilots/use-assistant-instructions.mdx +62 -0
  53. package/.docs/raw/docs/getting-started.mdx +1133 -0
  54. package/.docs/raw/docs/guides/Attachments.mdx +640 -0
  55. package/.docs/raw/docs/guides/Branching.mdx +59 -0
  56. package/.docs/raw/docs/guides/Editing.mdx +56 -0
  57. package/.docs/raw/docs/guides/Speech.mdx +43 -0
  58. package/.docs/raw/docs/guides/ToolUI.mdx +663 -0
  59. package/.docs/raw/docs/guides/Tools.mdx +496 -0
  60. package/.docs/raw/docs/index.mdx +7 -0
  61. package/.docs/raw/docs/legacy/styled/AssistantModal.mdx +85 -0
  62. package/.docs/raw/docs/legacy/styled/Decomposition.mdx +633 -0
  63. package/.docs/raw/docs/legacy/styled/Markdown.mdx +86 -0
  64. package/.docs/raw/docs/legacy/styled/Scrollbar.mdx +71 -0
  65. package/.docs/raw/docs/legacy/styled/Thread.mdx +84 -0
  66. package/.docs/raw/docs/legacy/styled/ThreadWidth.mdx +21 -0
  67. package/.docs/raw/docs/mcp-docs-server.mdx +324 -0
  68. package/.docs/raw/docs/migrations/deprecation-policy.mdx +41 -0
  69. package/.docs/raw/docs/migrations/v0-7.mdx +188 -0
  70. package/.docs/raw/docs/migrations/v0-8.mdx +160 -0
  71. package/.docs/raw/docs/migrations/v0-9.mdx +75 -0
  72. package/.docs/raw/docs/react-compatibility.mdx +208 -0
  73. package/.docs/raw/docs/runtimes/ai-sdk/rsc.mdx +226 -0
  74. package/.docs/raw/docs/runtimes/ai-sdk/use-assistant-hook.mdx +195 -0
  75. package/.docs/raw/docs/runtimes/ai-sdk/use-chat-hook.mdx +138 -0
  76. package/.docs/raw/docs/runtimes/ai-sdk/use-chat.mdx +136 -0
  77. package/.docs/raw/docs/runtimes/custom/external-store.mdx +1624 -0
  78. package/.docs/raw/docs/runtimes/custom/local.mdx +1185 -0
  79. package/.docs/raw/docs/runtimes/helicone.mdx +60 -0
  80. package/.docs/raw/docs/runtimes/langgraph/index.mdx +320 -0
  81. package/.docs/raw/docs/runtimes/langgraph/tutorial/index.mdx +11 -0
  82. package/.docs/raw/docs/runtimes/langgraph/tutorial/introduction.mdx +28 -0
  83. package/.docs/raw/docs/runtimes/langgraph/tutorial/part-1.mdx +120 -0
  84. package/.docs/raw/docs/runtimes/langgraph/tutorial/part-2.mdx +336 -0
  85. package/.docs/raw/docs/runtimes/langgraph/tutorial/part-3.mdx +385 -0
  86. package/.docs/raw/docs/runtimes/langserve.mdx +126 -0
  87. package/.docs/raw/docs/runtimes/mastra/full-stack-integration.mdx +218 -0
  88. package/.docs/raw/docs/runtimes/mastra/overview.mdx +17 -0
  89. package/.docs/raw/docs/runtimes/mastra/separate-server-integration.mdx +196 -0
  90. package/.docs/raw/docs/runtimes/pick-a-runtime.mdx +222 -0
  91. package/.docs/raw/docs/ui/AssistantModal.mdx +46 -0
  92. package/.docs/raw/docs/ui/AssistantSidebar.mdx +42 -0
  93. package/.docs/raw/docs/ui/Attachment.mdx +82 -0
  94. package/.docs/raw/docs/ui/Markdown.mdx +72 -0
  95. package/.docs/raw/docs/ui/Mermaid.mdx +79 -0
  96. package/.docs/raw/docs/ui/Scrollbar.mdx +59 -0
  97. package/.docs/raw/docs/ui/SyntaxHighlighting.mdx +253 -0
  98. package/.docs/raw/docs/ui/Thread.mdx +47 -0
  99. package/.docs/raw/docs/ui/ThreadList.mdx +49 -0
  100. package/.docs/raw/docs/ui/ToolFallback.mdx +64 -0
  101. package/.docs/raw/docs/ui/primitives/Thread.mdx +197 -0
  102. package/LICENSE +21 -0
  103. package/README.md +128 -0
  104. package/dist/chunk-C7O7EFKU.js +38 -0
  105. package/dist/chunk-CZCDQ3YH.js +420 -0
  106. package/dist/index.js +1 -0
  107. package/dist/prepare-docs/prepare.js +199 -0
  108. package/dist/stdio.js +8 -0
  109. package/package.json +43 -0
@@ -0,0 +1,60 @@
1
+ ---
2
+ title: Helicone
3
+ ---
4
+
5
+ Helicone acts as a proxy for your OpenAI API calls, enabling detailed logging and monitoring. To integrate, update your API base URL and add the Helicone-Auth header.
6
+
7
+ ## AI SDK by vercel
8
+
9
+ 1. **Set Environment Variables:**
10
+
11
+ - `HELICONE_API_KEY`
12
+ - `OPENAI_API_KEY`
13
+
14
+ 2. **Configure the OpenAI client:**
15
+
16
+ ```ts
17
+ import { createOpenAI } from "@ai-sdk/openai";
18
+ import { streamText } from "ai";
19
+
20
+ const openai = createOpenAI({
21
+ baseURL: "https://oai.helicone.ai/v1",
22
+ headers: {
23
+ "Helicone-Auth": `Bearer ${process.env.HELICONE_API_KEY}`,
24
+ },
25
+ });
26
+
27
+ export async function POST(req: Request) {
28
+ const { prompt } = await req.json();
29
+ return streamText({
30
+ model: openai("gpt-4o"),
31
+ prompt,
32
+ });
33
+ }
34
+ ```
35
+
36
+ ## LangChain Integration (Python)
37
+
38
+ 1. **Set Environment Variables:**
39
+
40
+ - `HELICONE_API_KEY`
41
+ - `OPENAI_API_KEY`
42
+
43
+ 2. **Configure ChatOpenAI:**
44
+
45
+ ```python
46
+ from langchain.chat_models import ChatOpenAI
47
+ import os
48
+
49
+ llm = ChatOpenAI(
50
+ model_name="gpt-3.5-turbo",
51
+ temperature=0,
52
+ openai_api_base="https://oai.helicone.ai/v1",
53
+ openai_api_key=os.environ["OPENAI_API_KEY"],
54
+ openai_api_headers={"Helicone-Auth": f"Bearer {os.environ['HELICONE_API_KEY']}"}
55
+ )
56
+ ```
57
+
58
+ ## Summary
59
+
60
+ Update your API base URL to `https://oai.helicone.ai/v1` and add the `Helicone-Auth` header with your API key either in your Vercel AI SDK or LangChain configuration.
@@ -0,0 +1,320 @@
1
+ ---
2
+ title: Getting Started
3
+ ---
4
+
5
+ ## Requirements
6
+
7
+ You need a LangGraph Cloud API server. You can start a server locally via [LangGraph Studio](https://github.com/langchain-ai/langgraph-studio) or use [LangSmith](https://www.langchain.com/langsmith) for a hosted version.
8
+
9
+ The state of the graph you are using must have a `messages` key with a list of LangChain-alike messages.
10
+
11
+ ## New project from template
12
+
13
+ import { Steps, Step } from "fumadocs-ui/components/steps";
14
+
15
+ <Steps>
16
+ <Step>
17
+ ### Create a new project based on the LangGraph assistant-ui template
18
+
19
+ ```sh
20
+ npx create-assistant-ui@latest -t langgraph my-app
21
+ ```
22
+
23
+ </Step>
24
+ <Step>
25
+ ### Set environment variables
26
+
27
+ Create a `.env.local` file in your project with the following variables:
28
+
29
+ ```sh
30
+ # LANGCHAIN_API_KEY=your_api_key # for production
31
+ # LANGGRAPH_API_URL=your_api_url # for production
32
+ NEXT_PUBLIC_LANGGRAPH_API_URL=your_api_url # for development (no api key required)
33
+ NEXT_PUBLIC_LANGGRAPH_ASSISTANT_ID=your_graph_id
34
+ ```
35
+
36
+ </Step>
37
+ </Steps>
38
+
39
+ ## Installation in existing React project
40
+
41
+ <Steps>
42
+ <Step>
43
+
44
+ ### Install dependencies
45
+
46
+ ```sh npm2yarn
47
+ npm install @assistant-ui/react @assistant-ui/react-ui @assistant-ui/react-langgraph @langchain/langgraph-sdk
48
+ ```
49
+
50
+ </Step>
51
+ <Step>
52
+
53
+ ### Setup a proxy backend endpoint (optional, for production)
54
+
55
+ <Callout type="warn">
56
+ This example forwards every request to the LangGraph server directly from the
57
+ browser. For production use-cases, you should limit the API calls to the
58
+ subset of endpoints that you need and perform authorization checks.
59
+ </Callout>
60
+
61
+ ```tsx twoslash title="@/api/api/[...path]/route.ts"
62
+ import { NextRequest, NextResponse } from "next/server";
63
+
64
+ export const runtime = "edge";
65
+
66
+ function getCorsHeaders() {
67
+ return {
68
+ "Access-Control-Allow-Origin": "*",
69
+ "Access-Control-Allow-Methods": "GET, POST, PUT, PATCH, DELETE, OPTIONS",
70
+ "Access-Control-Allow-Headers": "*",
71
+ };
72
+ }
73
+
74
+ async function handleRequest(req: NextRequest, method: string) {
75
+ try {
76
+ const path = req.nextUrl.pathname.replace(/^\/?api\//, "");
77
+ const url = new URL(req.url);
78
+ const searchParams = new URLSearchParams(url.search);
79
+ searchParams.delete("_path");
80
+ searchParams.delete("nxtP_path");
81
+ const queryString = searchParams.toString()
82
+ ? `?${searchParams.toString()}`
83
+ : "";
84
+
85
+ const options: RequestInit = {
86
+ method,
87
+ headers: {
88
+ "x-api-key": process.env["LANGCHAIN_API_KEY"] || "",
89
+ },
90
+ };
91
+
92
+ if (["POST", "PUT", "PATCH"].includes(method)) {
93
+ options.body = await req.text();
94
+ }
95
+
96
+ const res = await fetch(
97
+ `${process.env["LANGGRAPH_API_URL"]}/${path}${queryString}`,
98
+ options,
99
+ );
100
+
101
+ return new NextResponse(res.body, {
102
+ status: res.status,
103
+ statusText: res.statusText,
104
+ headers: {
105
+ ...res.headers,
106
+ ...getCorsHeaders(),
107
+ },
108
+ });
109
+ } catch (e: any) {
110
+ return NextResponse.json({ error: e.message }, { status: e.status ?? 500 });
111
+ }
112
+ }
113
+
114
+ export const GET = (req: NextRequest) => handleRequest(req, "GET");
115
+ export const POST = (req: NextRequest) => handleRequest(req, "POST");
116
+ export const PUT = (req: NextRequest) => handleRequest(req, "PUT");
117
+ export const PATCH = (req: NextRequest) => handleRequest(req, "PATCH");
118
+ export const DELETE = (req: NextRequest) => handleRequest(req, "DELETE");
119
+
120
+ // Add a new OPTIONS handler
121
+ export const OPTIONS = () => {
122
+ return new NextResponse(null, {
123
+ status: 204,
124
+ headers: {
125
+ ...getCorsHeaders(),
126
+ },
127
+ });
128
+ };
129
+ ```
130
+
131
+ </Step>
132
+ <Step>
133
+
134
+ ### Setup helper functions
135
+
136
+ ```tsx twoslash include chatApi title="@/lib/chatApi.ts"
137
+ // @filename: /lib/chatApi.ts
138
+
139
+ // ---cut---
140
+ import { Client } from "@langchain/langgraph-sdk";
141
+ import { LangChainMessage } from "@assistant-ui/react-langgraph";
142
+
143
+ const createClient = () => {
144
+ const apiUrl = process.env["NEXT_PUBLIC_LANGGRAPH_API_URL"] || "/api";
145
+ return new Client({
146
+ apiUrl,
147
+ });
148
+ };
149
+
150
+ export const createThread = async () => {
151
+ const client = createClient();
152
+ return client.threads.create();
153
+ };
154
+
155
+ export const getThreadState = async (
156
+ threadId: string,
157
+ ): Promise<ThreadState<{ messages: LangChainMessage[] }>> => {
158
+ const client = createClient();
159
+ return client.threads.getState(threadId);
160
+ };
161
+
162
+ export const sendMessage = async (params: {
163
+ threadId: string;
164
+ messages: LangChainMessage;
165
+ }) => {
166
+ const client = createClient();
167
+ return client.runs.stream(
168
+ params.threadId,
169
+ process.env["NEXT_PUBLIC_LANGGRAPH_ASSISTANT_ID"]!,
170
+ {
171
+ input: {
172
+ messages: params.messages,
173
+ },
174
+ streamMode: "messages",
175
+ },
176
+ );
177
+ };
178
+ ```
179
+
180
+ </Step>
181
+ <Step>
182
+
183
+ ### Define a `MyAssistant` component
184
+
185
+ ```tsx twoslash include MyAssistant title="@/components/MyAssistant.tsx"
186
+ // @filename: /components/MyAssistant.tsx
187
+ // @include: chatApi
188
+
189
+ // ---cut---
190
+ "use client";
191
+
192
+ import { useRef } from "react";
193
+ import { Thread } from "@/components/assistant-ui";
194
+ import { AssistantRuntimeProvider } from "@assistant-ui/react";
195
+ import { useLangGraphRuntime } from "@assistant-ui/react-langgraph";
196
+
197
+ import { createThread, getThreadState, sendMessage } from "@/lib/chatApi";
198
+
199
+ export function MyAssistant() {
200
+ const threadIdRef = useRef<string | undefined>();
201
+ const runtime = useLangGraphRuntime({
202
+ threadId: threadIdRef.current,
203
+ stream: async (messages) => {
204
+ if (!threadIdRef.current) {
205
+ const { thread_id } = await createThread();
206
+ threadIdRef.current = thread_id;
207
+ }
208
+ const threadId = threadIdRef.current;
209
+ return sendMessage({
210
+ threadId,
211
+ messages,
212
+ });
213
+ },
214
+ onSwitchToNewThread: async () => {
215
+ const { thread_id } = await createThread();
216
+ threadIdRef.current = thread_id;
217
+ },
218
+ onSwitchToThread: async (threadId) => {
219
+ const state = await getThreadState(threadId);
220
+ threadIdRef.current = threadId;
221
+ return {
222
+ messages: state.values.messages,
223
+ interrupts: state.tasks[0]?.interrupts,
224
+ };
225
+ },
226
+ });
227
+
228
+ return (
229
+ <AssistantRuntimeProvider runtime={runtime}>
230
+ <Thread />
231
+ </AssistantRuntimeProvider>
232
+ );
233
+ }
234
+ ```
235
+
236
+ </Step>
237
+ <Step>
238
+
239
+ ### Use the `MyAssistant` component
240
+
241
+ ```tsx twoslash title="@/app/page.tsx" {2,8}
242
+ // @include: MyAssistant
243
+ // @filename: /app/page.tsx
244
+ // ---cut---
245
+ import { MyAssistant } from "@/components/MyAssistant";
246
+
247
+ export default function Home() {
248
+ return (
249
+ <main className="h-dvh">
250
+ <MyAssistant />
251
+ </main>
252
+ );
253
+ }
254
+ ```
255
+
256
+ </Step>
257
+ <Step>
258
+
259
+ ### Setup environment variables
260
+
261
+ Create a `.env.local` file in your project with the following variables:
262
+
263
+ ```sh
264
+ # LANGCHAIN_API_KEY=your_api_key # for production
265
+ # LANGGRAPH_API_URL=your_api_url # for production
266
+ NEXT_PUBLIC_LANGGRAPH_API_URL=your_api_url # for development (no api key required)
267
+ NEXT_PUBLIC_LANGGRAPH_ASSISTANT_ID=your_graph_id
268
+ ```
269
+
270
+ </Step>
271
+ <Step>
272
+
273
+ ### Setup UI components
274
+
275
+ Follow the [UI Components](/docs/ui/shadcn-ui/Thread) guide to setup the UI components.
276
+
277
+ </Step>
278
+ </Steps>
279
+
280
+ ## Advanced APIs
281
+
282
+ ### Message Accumulator
283
+
284
+ The `LangGraphMessageAccumulator` lets you append messages incoming from the server to replicate the messages state client side.
285
+
286
+ ```typescript
287
+ import {
288
+ LangGraphMessageAccumulator,
289
+ appendLangChainChunk,
290
+ } from "@assistant-ui/react-langgraph";
291
+
292
+ const accumulator = new LangGraphMessageAccumulator({
293
+ appendMessage: appendLangChainChunk,
294
+ });
295
+
296
+ // Add new chunks from the server
297
+ if (event.event === "messages/partial") accumulator.addMessages(event.data);
298
+ ```
299
+
300
+ ### Message Conversion
301
+
302
+ Use `convertLangChainMessages` to transform LangChain messages to assistant-ui format:
303
+
304
+ ```typescript
305
+ import { convertLangChainMessages } from "@assistant-ui/react-langgraph";
306
+
307
+ const threadMessage = convertLangChainMessages(langChainMessage);
308
+ ```
309
+
310
+ ## Interrupt Persistence
311
+
312
+ LangGraph supports interrupting the execution flow to request user input or handle specific interactions. These interrupts can be persisted and restored when switching between threads. This means that if a user switches away from a thread during an interaction (like waiting for user approval), the interaction state will be preserved when they return to that thread.
313
+
314
+ To handle interrupts in your application:
315
+
316
+ 1. Make sure your thread state type includes the `interrupts` field
317
+ 2. Return the interrupts from `onSwitchToThread` along with the messages
318
+ 3. The runtime will automatically restore the interrupt state when switching threads
319
+
320
+ This feature is particularly useful for applications that require user approval flows, multi-step forms, or any other interactive elements that might span multiple thread switches.
@@ -0,0 +1,11 @@
1
+ ---
2
+ title: "Introduction"
3
+ ---
4
+
5
+ import { redirect } from "next/navigation";
6
+
7
+ <>
8
+ {redirect(
9
+ "/docs/runtimes/langgraph/tutorial/introduction",
10
+ )}
11
+ </>
@@ -0,0 +1,28 @@
1
+ ---
2
+ title: "Introduction"
3
+ ---
4
+
5
+ In this tutorial, we will build a stockbroker assistant using LangChain.js, LangGraph.js and assistant-ui.
6
+
7
+ We will go through the necessary steps to integrate assistant-ui with a LangGraph Cloud endpoint.
8
+ Code snippets focus on the setup of the frontend, but we will highlight relevant sections of the backend code as well.
9
+
10
+ This agent leverages the following features:
11
+
12
+ - 🚄 Streaming of messages from LangGraph state to assistant-ui
13
+ - 💅 Rich text rendering using Markdown
14
+ - 🛠️ Generative UI: Mapping tool calls to tool UIs
15
+ - 🔁 Approval UI: Confirming tool calls before execution (human-in-the-loop)
16
+
17
+ ## Prerequisites
18
+
19
+ - Node.js 18.x or higher
20
+
21
+ ## Final Result
22
+
23
+ - Demo: https://assistant-ui-stockbroker.vercel.app/
24
+ - Source Code: https://github.com/assistant-ui/assistant-ui-stockbroker
25
+
26
+ ## Get Started
27
+
28
+ Begin Part 1 of the tutorial by [setting up the frontend](/docs/runtimes/langgraph/tutorial/part-1).
@@ -0,0 +1,120 @@
1
+ ---
2
+ title: "Part 1: Setup frontend"
3
+ ---
4
+
5
+ ## Create a new project
6
+
7
+ Run the following command to create a new Next.js project with the LangGraph assistant-ui template:
8
+
9
+ ```sh
10
+ npx create-assistant-ui@latest -t langgraph my-app
11
+ cd my-app
12
+ ```
13
+
14
+ You should see the following files in your project:
15
+
16
+ import { File, Folder, Files } from "fumadocs-ui/components/files";
17
+
18
+ <Files>
19
+ <Folder name="my-app" defaultOpen>
20
+ <Folder name="app" defaultOpen>
21
+ <Folder name="api" defaultOpen>
22
+ <Folder name="[...path]" defaultOpen>
23
+ <File name="route.ts" />
24
+ </Folder>
25
+ </Folder>
26
+ <File name="globals.css" />
27
+ <File name="layout.tsx" />
28
+ <File name="MyRuntimeProvider.tsx" />
29
+ <File name="page.tsx" />
30
+ </Folder>
31
+ <Folder name="lib">
32
+ <File name="chatApi.ts" />
33
+ </Folder>
34
+ <File name="next.config.ts" />
35
+ <File name="package.json" />
36
+ <File name="postcss.config.mjs" />
37
+ <File name="tailwind.config.ts" />
38
+ <File name="tsconfig.json" />
39
+ </Folder>
40
+ </Files>
41
+
42
+ ### Setup environment variables
43
+
44
+ Create a `.env.local` file in your project with the following variables:
45
+
46
+ ```sh title="@/.env.local"
47
+ LANGGRAPH_API_URL=https://assistant-ui-stockbroker.vercel.app/api
48
+ NEXT_PUBLIC_LANGGRAPH_ASSISTANT_ID=stockbroker
49
+ ```
50
+
51
+ This connects the frontend to a LangGraph Cloud endpoint running under
52
+ `https://assistant-ui-stockbroker.vercel.app/api`.
53
+ This endpoint is running the LangGraph agent defined [in this repository](https://github.com/assistant-ui/assistant-ui-stockbroker/blob/main/backend).
54
+
55
+ ### Start the server
56
+
57
+ You can start the server by running the following command:
58
+
59
+ ```sh
60
+ npm run dev
61
+ ```
62
+
63
+ The server will start and you can view the frontend by opening a browser tab to http://localhost:3000.
64
+
65
+ You should be able to chat with the assistant and see LLM responses streaming in real-time.
66
+
67
+ ## Explore features
68
+
69
+ ### Streaming
70
+
71
+ Streaming message support is enabled by default. The LangGraph integration includes sophisticated message handling that efficiently manages streaming responses:
72
+
73
+ - Messages are accumulated and updated in real-time using `LangGraphMessageAccumulator`
74
+ - Partial message chunks are automatically merged using `appendLangChainChunk`
75
+ - The runtime handles all the complexity of managing streaming state
76
+
77
+ This means you'll see tokens appear smoothly as they're generated by the LLM, with proper handling of both text content and tool calls.
78
+
79
+ ### Markdown support
80
+
81
+ Rich text rendering using Markdown is enabled by default.
82
+
83
+ ## Add conversation starter messages
84
+
85
+ In order to help users understand what the assistant can do, we can add some conversation starter messages.
86
+
87
+ import Image from "next/image";
88
+ import starter from "./images/conversation-starters.png";
89
+
90
+ <Image
91
+ src={starter}
92
+ alt="Conversation starters"
93
+ width={600}
94
+ className="mx-auto rounded-lg border shadow"
95
+ />
96
+
97
+ ```tsx title="@/app/page.tsx" {5-17}
98
+ export default function Home() {
99
+ return (
100
+ <div className="flex h-full flex-col">
101
+ <Thread
102
+ welcome={{
103
+ suggestions: [
104
+ {
105
+ prompt: "How much revenue did Apple make last year?",
106
+ },
107
+ {
108
+ prompt: "Is McDonald's profitable?",
109
+ },
110
+ {
111
+ prompt: "What's the current stock price of Tesla?",
112
+ },
113
+ ],
114
+ }}
115
+ assistantMessage={{ components: { Text: MarkdownText } }}
116
+ />
117
+ </div>
118
+ );
119
+ }
120
+ ```