@assistant-ui/mcp-docs-server 0.1.22 → 0.1.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/.docs/organized/code-examples/waterfall.md +801 -0
  2. package/.docs/organized/code-examples/with-ag-ui.md +39 -27
  3. package/.docs/organized/code-examples/with-ai-sdk-v6.md +39 -29
  4. package/.docs/organized/code-examples/with-artifacts.md +467 -0
  5. package/.docs/organized/code-examples/with-assistant-transport.md +32 -25
  6. package/.docs/organized/code-examples/with-chain-of-thought.md +42 -33
  7. package/.docs/organized/code-examples/with-cloud-standalone.md +674 -0
  8. package/.docs/organized/code-examples/with-cloud.md +35 -28
  9. package/.docs/organized/code-examples/with-custom-thread-list.md +35 -28
  10. package/.docs/organized/code-examples/with-elevenlabs-scribe.md +42 -31
  11. package/.docs/organized/code-examples/with-expo.md +2012 -0
  12. package/.docs/organized/code-examples/with-external-store.md +32 -26
  13. package/.docs/organized/code-examples/with-ffmpeg.md +32 -28
  14. package/.docs/organized/code-examples/with-langgraph.md +97 -39
  15. package/.docs/organized/code-examples/with-parent-id-grouping.md +33 -26
  16. package/.docs/organized/code-examples/with-react-hook-form.md +63 -61
  17. package/.docs/organized/code-examples/with-react-router.md +38 -31
  18. package/.docs/organized/code-examples/with-store.md +17 -25
  19. package/.docs/organized/code-examples/with-tanstack.md +36 -26
  20. package/.docs/organized/code-examples/with-tap-runtime.md +11 -25
  21. package/.docs/raw/docs/(docs)/cli.mdx +13 -6
  22. package/.docs/raw/docs/(docs)/guides/attachments.mdx +26 -3
  23. package/.docs/raw/docs/(docs)/guides/chain-of-thought.mdx +5 -5
  24. package/.docs/raw/docs/(docs)/guides/context-api.mdx +53 -52
  25. package/.docs/raw/docs/(docs)/guides/dictation.mdx +0 -2
  26. package/.docs/raw/docs/(docs)/guides/message-timing.mdx +169 -0
  27. package/.docs/raw/docs/(docs)/guides/quoting.mdx +327 -0
  28. package/.docs/raw/docs/(docs)/guides/speech.mdx +0 -1
  29. package/.docs/raw/docs/(docs)/index.mdx +12 -2
  30. package/.docs/raw/docs/(docs)/installation.mdx +8 -2
  31. package/.docs/raw/docs/(docs)/llm.mdx +9 -7
  32. package/.docs/raw/docs/(reference)/api-reference/primitives/action-bar-more.mdx +1 -1
  33. package/.docs/raw/docs/(reference)/api-reference/primitives/action-bar.mdx +2 -2
  34. package/.docs/raw/docs/(reference)/api-reference/primitives/assistant-if.mdx +27 -27
  35. package/.docs/raw/docs/(reference)/api-reference/primitives/composer.mdx +60 -0
  36. package/.docs/raw/docs/(reference)/api-reference/primitives/message-part.mdx +78 -4
  37. package/.docs/raw/docs/(reference)/api-reference/primitives/message.mdx +32 -0
  38. package/.docs/raw/docs/(reference)/api-reference/primitives/selection-toolbar.mdx +61 -0
  39. package/.docs/raw/docs/(reference)/api-reference/primitives/thread.mdx +1 -1
  40. package/.docs/raw/docs/(reference)/legacy/styled/assistant-modal.mdx +1 -6
  41. package/.docs/raw/docs/(reference)/legacy/styled/decomposition.mdx +2 -2
  42. package/.docs/raw/docs/(reference)/legacy/styled/markdown.mdx +1 -6
  43. package/.docs/raw/docs/(reference)/legacy/styled/thread.mdx +1 -5
  44. package/.docs/raw/docs/(reference)/migrations/v0-12.mdx +17 -17
  45. package/.docs/raw/docs/cloud/ai-sdk-assistant-ui.mdx +209 -0
  46. package/.docs/raw/docs/cloud/ai-sdk.mdx +296 -0
  47. package/.docs/raw/docs/cloud/authorization.mdx +178 -79
  48. package/.docs/raw/docs/cloud/{persistence/langgraph.mdx → langgraph.mdx} +2 -2
  49. package/.docs/raw/docs/cloud/overview.mdx +29 -39
  50. package/.docs/raw/docs/react-native/adapters.mdx +118 -0
  51. package/.docs/raw/docs/react-native/custom-backend.mdx +210 -0
  52. package/.docs/raw/docs/react-native/hooks.mdx +364 -0
  53. package/.docs/raw/docs/react-native/index.mdx +332 -0
  54. package/.docs/raw/docs/react-native/primitives.mdx +653 -0
  55. package/.docs/raw/docs/runtimes/ai-sdk/v6.mdx +60 -15
  56. package/.docs/raw/docs/runtimes/assistant-transport.mdx +103 -0
  57. package/.docs/raw/docs/runtimes/custom/external-store.mdx +25 -2
  58. package/.docs/raw/docs/runtimes/data-stream.mdx +1 -3
  59. package/.docs/raw/docs/runtimes/langgraph/index.mdx +113 -9
  60. package/.docs/raw/docs/runtimes/pick-a-runtime.mdx +1 -4
  61. package/.docs/raw/docs/ui/attachment.mdx +4 -2
  62. package/.docs/raw/docs/ui/context-display.mdx +147 -0
  63. package/.docs/raw/docs/ui/message-timing.mdx +92 -0
  64. package/.docs/raw/docs/ui/part-grouping.mdx +1 -1
  65. package/.docs/raw/docs/ui/reasoning.mdx +4 -4
  66. package/.docs/raw/docs/ui/scrollbar.mdx +2 -2
  67. package/.docs/raw/docs/ui/syntax-highlighting.mdx +55 -50
  68. package/.docs/raw/docs/ui/thread.mdx +16 -9
  69. package/dist/index.d.ts +1 -1
  70. package/dist/index.d.ts.map +1 -1
  71. package/package.json +3 -3
  72. package/src/tools/tests/integration.test.ts +2 -2
  73. package/src/tools/tests/json-parsing.test.ts +1 -1
  74. package/src/tools/tests/mcp-protocol.test.ts +1 -3
  75. package/.docs/raw/docs/cloud/persistence/ai-sdk.mdx +0 -108
@@ -101,6 +101,59 @@ export default function Home() {
101
101
  </Step>
102
102
  </Steps>
103
103
 
104
+ ## Tracking Token Usage
105
+
106
+ assistant-ui exports a `useThreadTokenUsage` hook to access thread-level token usage on the client.
107
+ <Steps>
108
+ <Step>
109
+ Use `messageMetadata` in your Next.js route to attach `usage` from `finish` and `modelId` from `finish-step`.
110
+ ```tsx
111
+ import { streamText, convertToModelMessages } from "ai";
112
+ import { frontendTools } from "@assistant-ui/react-ai-sdk";
113
+ export async function POST(req: Request) {
114
+ const { messages, tools, modelName } = await req.json();
115
+ const result = streamText({
116
+ model: getModel(modelName),
117
+ messages: await convertToModelMessages(messages),
118
+ tools: frontendTools(tools),
119
+ });
120
+ return result.toUIMessageStreamResponse({
121
+ messageMetadata: ({ part }) => {
122
+ if (part.type === "finish") {
123
+ return {
124
+ usage: part.totalUsage,
125
+ };
126
+ }
127
+ if (part.type === "finish-step") {
128
+ return {
129
+ modelId: part.response.modelId,
130
+ };
131
+ }
132
+ return undefined;
133
+ },
134
+ });
135
+ }
136
+ ```
137
+ </Step>
138
+ <Step>
139
+ Use `useThreadTokenUsage` to render token usage on the client.
140
+
141
+ ```tsx
142
+ "use client";
143
+
144
+ import { useThreadTokenUsage } from "@assistant-ui/react-ai-sdk";
145
+
146
+ export function TokenCounter() {
147
+ const usage = useThreadTokenUsage();
148
+
149
+ if (!usage) return null;
150
+
151
+ return <div>{usage.totalTokens} total tokens</div>;
152
+ }
153
+ ```
154
+ </Step>
155
+ </Steps>
156
+
104
157
  ## Key Changes from v5
105
158
 
106
159
  | Feature | v5 | v6 |
@@ -119,35 +172,27 @@ Creates a runtime integrated with AI SDK's `useChat` hook.
119
172
  ```tsx
120
173
  import { useChatRuntime } from "@assistant-ui/react-ai-sdk";
121
174
 
122
- const runtime = useChatRuntime({
123
- api: "/api/chat", // optional, defaults to "/api/chat"
124
- });
175
+ const runtime = useChatRuntime();
125
176
  ```
126
177
 
127
178
  ### Custom API URL
128
179
 
129
- ```tsx
130
- const runtime = useChatRuntime({
131
- api: "/my-custom-api/chat",
132
- });
133
- ```
134
-
135
- ### Forwarding System Messages and Frontend Tools
136
-
137
- Use `AssistantChatTransport` to automatically forward system messages and frontend tools to your backend:
180
+ To use a different endpoint, pass a custom `AssistantChatTransport`:
138
181
 
139
182
  ```tsx
140
- "use client";
141
-
142
183
  import { useChatRuntime, AssistantChatTransport } from "@assistant-ui/react-ai-sdk";
143
184
 
144
185
  const runtime = useChatRuntime({
145
186
  transport: new AssistantChatTransport({
146
- api: "/api/chat",
187
+ api: "/my-custom-api/chat",
147
188
  }),
148
189
  });
149
190
  ```
150
191
 
192
+ ### System Messages and Frontend Tools
193
+
194
+ `AssistantChatTransport` (used by default) automatically forwards system messages and frontend tools to your backend. To consume them, update your backend route:
195
+
151
196
  Backend route with system/tools forwarding:
152
197
 
153
198
  ```tsx
@@ -114,6 +114,28 @@ async def chat_endpoint(request: ChatRequest):
114
114
 
115
115
  The state snapshots are automatically streamed to the frontend using the operations described in [Streaming Protocol](#streaming-protocol).
116
116
 
117
+ > **Cancellation:** `create_run` exposes `controller.is_cancelled` and `controller.cancelled_event`.
118
+ > If the response stream is closed early (for example user cancel or client disconnect),
119
+ > these are set so your backend loop can exit cooperatively.
120
+ > `controller.cancelled_event` is a read-only signal object with `wait()` and `is_set()`.
121
+ > `create_run` gives callbacks a ~50ms cooperative shutdown window before forced task cancellation.
122
+ > Callback exceptions that happen during early-close cleanup are not re-raised to the stream consumer,
123
+ > but are logged with traceback at warning level for debugging.
124
+ > Put critical cleanup in `finally` blocks, since forced cancellation may happen after the grace window.
125
+ >
126
+ > ```python
127
+ > async def run_callback(controller: RunController):
128
+ > while not controller.is_cancelled:
129
+ > # Long-running work / model loop
130
+ > await asyncio.sleep(0.05)
131
+ > ```
132
+ >
133
+ > ```python
134
+ > async def run_callback(controller: RunController):
135
+ > await controller.cancelled_event.wait()
136
+ > # cancellation-aware shutdown path
137
+ > ```
138
+
117
139
  ### Backend Reference Implementation
118
140
 
119
141
  <Tabs items={["Minimal", "Example", "LangGraph"]}>
@@ -314,6 +336,8 @@ The `useAssistantTransportRuntime` hook is used to configure the runtime. It acc
314
336
  converter: (state: T, connectionMetadata: ConnectionMetadata) => AssistantTransportState,
315
337
  headers?: Record<string, string> | (() => Promise<Record<string, string>>),
316
338
  body?: object,
339
+ prepareSendCommandsRequest?: (body: SendCommandsRequestBody) => Record<string, unknown> | Promise<Record<string, unknown>>,
340
+ capabilities?: { edit?: boolean },
317
341
  onResponse?: (response: Response) => void,
318
342
  onFinish?: () => void,
319
343
  onError?: (error: Error) => void,
@@ -490,6 +514,85 @@ const runtime = useAssistantTransportRuntime({
490
514
  });
491
515
  ```
492
516
 
517
+ ### Transforming the Request Body
518
+
519
+ Use `prepareSendCommandsRequest` to transform the entire request body before it is sent to the backend. This receives the fully assembled body object and returns the (potentially transformed) body.
520
+
521
+ ```typescript
522
+ const runtime = useAssistantTransportRuntime({
523
+ // ... other options
524
+ prepareSendCommandsRequest: (body) => ({
525
+ ...body,
526
+ trackingId: crypto.randomUUID(),
527
+ }),
528
+ });
529
+ ```
530
+
531
+ This is useful for adding tracking IDs, transforming commands, or injecting metadata that depends on the assembled request:
532
+
533
+ ```typescript
534
+ const runtime = useAssistantTransportRuntime({
535
+ // ... other options
536
+ prepareSendCommandsRequest: (body) => ({
537
+ ...body,
538
+ commands: body.commands.map((cmd) =>
539
+ cmd.type === "add-message"
540
+ ? { ...cmd, trackingId: crypto.randomUUID() }
541
+ : cmd,
542
+ ),
543
+ }),
544
+ });
545
+ ```
546
+
547
+ ## Editing Messages
548
+
549
+ By default, editing messages is disabled. To enable it, set `capabilities.edit` to `true`:
550
+
551
+ ```typescript
552
+ const runtime = useAssistantTransportRuntime({
553
+ // ... other options
554
+ capabilities: {
555
+ edit: true,
556
+ },
557
+ });
558
+ ```
559
+
560
+ `add-message` commands always include `parentId` and `sourceId` fields:
561
+
562
+ ```typescript
563
+ {
564
+ type: "add-message",
565
+ message: { role: "user", parts: [...] },
566
+ parentId: "msg-3", // The message after which this message should be inserted
567
+ sourceId: "msg-4", // The ID of the message being replaced (null for new messages)
568
+ }
569
+ ```
570
+
571
+ ### Backend Handling
572
+
573
+ When the backend receives an `add-message` command with a `parentId`, it should:
574
+
575
+ 1. Truncate all messages after the message with `parentId`
576
+ 2. Append the new message
577
+ 3. Stream the updated state back to the frontend
578
+
579
+ ```python
580
+ for command in request.commands:
581
+ if command.type == "add-message":
582
+ if hasattr(command, "parentId") and command.parentId is not None:
583
+ # Find the parent message index and truncate
584
+ parent_idx = next(
585
+ i for i, m in enumerate(messages) if m.id == command.parentId
586
+ )
587
+ messages = messages[:parent_idx + 1]
588
+ # Append the new message
589
+ messages.append(command.message)
590
+ ```
591
+
592
+ <Callout type="info">
593
+ `parentId` and `sourceId` are always included on `add-message` commands. For new messages, `sourceId` will be `null`.
594
+ </Callout>
595
+
493
596
  ## Resuming from a Sync Server
494
597
 
495
598
  <Callout type="info">
@@ -1157,6 +1157,29 @@ const ToolUI = makeAssistantToolUI({
1157
1157
  });
1158
1158
  ```
1159
1159
 
1160
+ ### Binding External Messages Manually
1161
+
1162
+ Use `bindExternalStoreMessage` to attach your original message to a `ThreadMessage` or message part object. This is useful when you construct `ThreadMessage` objects yourself (outside of the built-in message converter) and want `getExternalStoreMessages` to work with them.
1163
+
1164
+ ```tsx
1165
+ import {
1166
+ bindExternalStoreMessage,
1167
+ getExternalStoreMessages,
1168
+ } from "@assistant-ui/react";
1169
+
1170
+ // Attach your original message to a ThreadMessage
1171
+ bindExternalStoreMessage(threadMessage, originalMessage);
1172
+
1173
+ // Later, retrieve it
1174
+ const original = getExternalStoreMessages(threadMessage);
1175
+ ```
1176
+
1177
+ <Callout type="warn">
1178
+ This API is experimental and may change without notice.
1179
+ </Callout>
1180
+
1181
+ `bindExternalStoreMessage` is a no-op if the target already has a bound message. It mutates the target object in place.
1182
+
1160
1183
  ## Debugging
1161
1184
 
1162
1185
  ### Common Debugging Scenarios
@@ -1503,7 +1526,7 @@ A flexible message format that can be converted to assistant-ui's internal forma
1503
1526
  {
1504
1527
  name: "content",
1505
1528
  type: "string | readonly MessagePart[]",
1506
- description: "Message content as string or structured message parts",
1529
+ description: "Message content as string or structured message parts. Supports `data-*` prefixed types (e.g. `{ type: \"data-workflow\", data: {...} }`) which are automatically converted to DataMessagePart.",
1507
1530
  required: true,
1508
1531
  },
1509
1532
  {
@@ -1525,7 +1548,7 @@ A flexible message format that can be converted to assistant-ui's internal forma
1525
1548
  {
1526
1549
  name: "attachments",
1527
1550
  type: "readonly CompleteAttachment[]",
1528
- description: "File attachments (user messages only)",
1551
+ description: "File attachments (user messages only). Attachment `type` accepts custom strings beyond \"image\" | \"document\" | \"file\", and `contentType` is optional.",
1529
1552
  },
1530
1553
  {
1531
1554
  name: "metadata",
@@ -342,9 +342,7 @@ const runtime = useDataStreamRuntime({
342
342
 
343
343
  ## Examples
344
344
 
345
- - **[Basic Data Stream Example](https://github.com/assistant-ui/assistant-ui/tree/main/examples/with-data-stream)** - Simple streaming chat
346
- - **[Tool Integration Example](https://github.com/assistant-ui/assistant-ui/tree/main/examples/with-data-stream-tools)** - Frontend and backend tools
347
- - **[Authentication Example](https://github.com/assistant-ui/assistant-ui/tree/main/examples/with-data-stream-auth)** - Secure endpoints
345
+ Explore our [examples repository](https://github.com/assistant-ui/assistant-ui/tree/main/examples) for implementation references.
348
346
 
349
347
  ## API Reference
350
348
 
@@ -158,19 +158,21 @@ export const getThreadState = async (
158
158
 
159
159
  export const sendMessage = async (params: {
160
160
  threadId: string;
161
- messages: LangChainMessage;
161
+ messages: LangChainMessage[];
162
162
  config?: LangGraphSendMessageConfig;
163
163
  }) => {
164
164
  const client = createClient();
165
+ const { checkpointId, ...restConfig } = params.config ?? {};
165
166
  return client.runs.stream(
166
167
  params.threadId,
167
168
  process.env["NEXT_PUBLIC_LANGGRAPH_ASSISTANT_ID"]!,
168
169
  {
169
- input: {
170
- messages: params.messages,
171
- },
172
- streamMode: "messages",
173
- ...params.config
170
+ input: params.messages.length > 0
171
+ ? { messages: params.messages }
172
+ : null,
173
+ streamMode: "messages-tuple",
174
+ ...(checkpointId && { checkpoint_id: checkpointId }),
175
+ ...restConfig,
174
176
  },
175
177
  );
176
178
  };
@@ -196,7 +198,7 @@ import { createThread, getThreadState, sendMessage } from "@/lib/chatApi";
196
198
 
197
199
  export function MyAssistant() {
198
200
  const runtime = useLangGraphRuntime({
199
- stream: async (messages, { initialize, config }) => {
201
+ stream: async (messages, { initialize, ...config }) => {
200
202
  const { externalId } = await initialize();
201
203
  if (!externalId) throw new Error("Thread not found");
202
204
  return sendMessage({
@@ -300,6 +302,44 @@ import { convertLangChainMessages } from "@assistant-ui/react-langgraph";
300
302
  const threadMessage = convertLangChainMessages(langChainMessage);
301
303
  ```
302
304
 
305
+ ### Event Handlers
306
+
307
+ You can listen to streaming events by passing `eventHandlers` to `useLangGraphRuntime`:
308
+
309
+ ```typescript
310
+ const runtime = useLangGraphRuntime({
311
+ stream: async (messages, { initialize, ...config }) => { /* ... */ },
312
+ eventHandlers: {
313
+ onMessageChunk: (chunk, metadata) => {
314
+ // Fired for each chunk in messages-tuple mode
315
+ // metadata contains langgraph_step, langgraph_node, ls_model_name, etc.
316
+ },
317
+ onValues: (values) => {
318
+ // Fired when a "values" event is received
319
+ },
320
+ onUpdates: (updates) => {
321
+ // Fired when an "updates" event is received
322
+ },
323
+ onMetadata: (metadata) => { /* thread metadata */ },
324
+ onError: (error) => { /* stream errors */ },
325
+ onCustomEvent: (type, data) => { /* custom events */ },
326
+ },
327
+ });
328
+ ```
329
+
330
+ ### Message Metadata
331
+
332
+ When using `streamMode: "messages-tuple"`, each chunk includes metadata from the LangGraph server. Access accumulated metadata per message with the `useLangGraphMessageMetadata` hook:
333
+
334
+ ```typescript
335
+ import { useLangGraphMessageMetadata } from "@assistant-ui/react-langgraph";
336
+
337
+ function MyComponent() {
338
+ const metadata = useLangGraphMessageMetadata();
339
+ // Map<string, LangGraphTupleMetadata> keyed by message ID
340
+ }
341
+ ```
342
+
303
343
  ## Thread Management
304
344
 
305
345
  ### Basic Thread Support
@@ -308,7 +348,7 @@ The `useLangGraphRuntime` hook now includes built-in thread management capabilit
308
348
 
309
349
  ```typescript
310
350
  const runtime = useLangGraphRuntime({
311
- stream: async (messages, { initialize, config }) => {
351
+ stream: async (messages, { initialize, ...config }) => {
312
352
  // initialize() creates or loads a thread and returns its IDs
313
353
  const { remoteId, externalId } = await initialize();
314
354
  // Use externalId (your backend's thread ID) for API calls
@@ -343,7 +383,71 @@ const runtime = useLangGraphRuntime({
343
383
  });
344
384
  ```
345
385
 
346
- See the [Cloud Persistence guide](/docs/cloud/persistence/langgraph) for detailed setup instructions.
386
+ See the [Cloud Persistence guide](/docs/cloud/langgraph) for detailed setup instructions.
387
+
388
+ ## Message Editing & Regeneration
389
+
390
+ LangGraph uses server-side checkpoints for state management. To support message editing (branching) and regeneration, you need to provide a `getCheckpointId` callback that resolves the appropriate checkpoint for server-side forking.
391
+
392
+ ```typescript
393
+ const runtime = useLangGraphRuntime({
394
+ stream: async (messages, { initialize, ...config }) => {
395
+ const { externalId } = await initialize();
396
+ if (!externalId) throw new Error("Thread not found");
397
+ return sendMessage({ threadId: externalId, messages, config });
398
+ },
399
+ create: async () => {
400
+ const { thread_id } = await createThread();
401
+ return { externalId: thread_id };
402
+ },
403
+ load: async (externalId) => {
404
+ const state = await getThreadState(externalId);
405
+ return {
406
+ messages: state.values.messages,
407
+ interrupts: state.tasks[0]?.interrupts,
408
+ };
409
+ },
410
+ getCheckpointId: async (threadId, parentMessages) => {
411
+ const client = createClient();
412
+ // Get the thread state history and find the checkpoint
413
+ // that matches the parent messages by exact message ID sequence.
414
+ // If IDs are missing, return null and skip edit/reload for safety.
415
+ const history = await client.threads.getHistory(threadId);
416
+ for (const state of history) {
417
+ const stateMessages = state.values.messages;
418
+ if (!stateMessages || stateMessages.length !== parentMessages.length) {
419
+ continue;
420
+ }
421
+
422
+ const hasStableIds =
423
+ parentMessages.every((message) => typeof message.id === "string") &&
424
+ stateMessages.every((message) => typeof message.id === "string");
425
+ if (!hasStableIds) {
426
+ continue;
427
+ }
428
+
429
+ const isMatch = parentMessages.every(
430
+ (message, index) => message.id === stateMessages[index]?.id,
431
+ );
432
+
433
+ if (isMatch) {
434
+ return state.checkpoint.checkpoint_id ?? null;
435
+ }
436
+ }
437
+ return null;
438
+ },
439
+ });
440
+ ```
441
+
442
+ When `getCheckpointId` is provided:
443
+ - **Edit buttons** appear on user messages, allowing users to edit and resend from that point
444
+ - **Regenerate buttons** appear on assistant messages, allowing users to regenerate the response
445
+
446
+ The resolved `checkpointId` is passed to your `stream` callback via `config.checkpointId`. Your `sendMessage` helper should map it to the LangGraph SDK's `checkpoint_id` parameter (see the helper function in the setup section above).
447
+
448
+ <Callout type="info">
449
+ Without `getCheckpointId`, the edit and regenerate buttons will not appear. This is intentional — simply truncating client-side messages without forking from the correct server-side checkpoint would produce incorrect state.
450
+ </Callout>
347
451
 
348
452
  ## Interrupt Persistence
349
453
 
@@ -130,9 +130,7 @@ Pre-built integrations can always be replaced with a custom `LocalRuntime` or `E
130
130
  import { useChatRuntime } from "@assistant-ui/react-ai-sdk";
131
131
 
132
132
  export function MyAssistant() {
133
- const runtime = useChatRuntime({
134
- api: "/api/chat",
135
- });
133
+ const runtime = useChatRuntime();
136
134
 
137
135
  return (
138
136
  <AssistantRuntimeProvider runtime={runtime}>
@@ -189,7 +187,6 @@ Explore our implementation examples:
189
187
  - **[External Store Example](https://github.com/assistant-ui/assistant-ui/tree/main/examples/with-external-store)** - `ExternalStoreRuntime` with custom state
190
188
  - **[Assistant Cloud Example](https://github.com/assistant-ui/assistant-ui/tree/main/examples/with-cloud)** - Multi-thread with cloud persistence
191
189
  - **[LangGraph Example](https://github.com/assistant-ui/assistant-ui/tree/main/examples/with-langgraph)** - Agent workflows
192
- - **[OpenAI Assistants Example](https://github.com/assistant-ui/assistant-ui/tree/main/examples/with-openai-assistants)** - OpenAI Assistants API
193
190
 
194
191
  ## Common Pitfalls to Avoid
195
192
 
@@ -209,9 +209,9 @@ Attachments have the following structure:
209
209
  ```typescript
210
210
  type Attachment = {
211
211
  id: string;
212
- type: "image" | "document" | "file";
212
+ type: "image" | "document" | "file" | (string & {});
213
213
  name: string;
214
- contentType: string;
214
+ contentType?: string;
215
215
  file?: File;
216
216
  status:
217
217
  | { type: "running" | "requires-action" | "incomplete"; progress?: number }
@@ -219,6 +219,8 @@ type Attachment = {
219
219
  };
220
220
  ```
221
221
 
222
+ The `type` field accepts custom strings (e.g. `"data-workflow"`) beyond the built-in types. When an unknown type is encountered, the generic `Attachment` component is used as a fallback. The `contentType` field is optional — it can be omitted for non-file attachments where a MIME type is not meaningful.
223
+
222
224
  ## Related Components
223
225
 
224
226
  - [Thread](/docs/ui/thread) - Main chat interface that displays attachments
@@ -0,0 +1,147 @@
1
+ ---
2
+ title: Context Display
3
+ description: Visualize token usage relative to a model's context window — ring, bar, or text — with a detailed hover popover.
4
+ ---
5
+
6
+ import { ContextDisplaySample } from "@/components/docs/samples/context-display";
7
+
8
+ <ContextDisplaySample />
9
+
10
+ <Callout type="info">
11
+ This component requires server-side setup to [forward token usage metadata](#forward-token-usage-from-your-route-handler). Without it, ContextDisplay will show 0 usage and no breakdown data.
12
+ </Callout>
13
+
14
+ ## Getting Started
15
+
16
+ <Steps>
17
+ <Step>
18
+
19
+ ### Add `context-display`
20
+
21
+ <InstallCommand shadcn={["context-display"]} />
22
+
23
+ This adds a `/components/assistant-ui/context-display.tsx` file to your project.
24
+
25
+ </Step>
26
+ <Step>
27
+
28
+ ### Forward token usage from your route handler
29
+
30
+ Use `messageMetadata` in your Next.js route to attach `usage` from `finish` and `modelId` from `finish-step`:
31
+
32
+ ```tsx title="app/api/chat/route.ts"
33
+ import { streamText, convertToModelMessages } from "ai";
34
+
35
+ export async function POST(req: Request) {
36
+ const { messages, modelName } = await req.json();
37
+ const result = streamText({
38
+ model: getModel(modelName),
39
+ messages: await convertToModelMessages(messages),
40
+ });
41
+ return result.toUIMessageStreamResponse({
42
+ messageMetadata: ({ part }) => {
43
+ if (part.type === "finish") {
44
+ return {
45
+ usage: part.totalUsage,
46
+ };
47
+ }
48
+ if (part.type === "finish-step") {
49
+ return {
50
+ modelId: part.response.modelId,
51
+ };
52
+ }
53
+ return undefined;
54
+ },
55
+ });
56
+ }
57
+ ```
58
+
59
+ </Step>
60
+ <Step>
61
+
62
+ ### Use in your application
63
+
64
+ Pick a variant and place it in your thread footer, composer, or sidebar. Pass `modelContextWindow` with your model's token limit.
65
+
66
+ ```tsx title="/components/assistant-ui/thread.tsx" {1,8}
67
+ import { ContextDisplay } from "@/components/assistant-ui/context-display";
68
+
69
+ const ThreadFooter: FC = () => {
70
+ return (
71
+ <div className="flex items-center justify-end px-3 py-1.5">
72
+ <ContextDisplay.Bar modelContextWindow={128000} />
73
+ </div>
74
+ );
75
+ };
76
+ ```
77
+
78
+ </Step>
79
+ </Steps>
80
+
81
+ ## Variants
82
+
83
+ Three preset variants are available, each wrapping the shared tooltip popover:
84
+
85
+ ```tsx
86
+ // SVG donut ring (default, compact)
87
+ <ContextDisplay.Ring modelContextWindow={128000} />
88
+
89
+ // Horizontal progress bar with label
90
+ <ContextDisplay.Bar modelContextWindow={128000} />
91
+
92
+ // Minimal monospace text
93
+ <ContextDisplay.Text modelContextWindow={128000} />
94
+ ```
95
+
96
+ All presets accept `className` for styling overrides and `side` to control tooltip placement (`"top"`, `"bottom"`, `"left"`, `"right"`).
97
+
98
+ ## Composable API
99
+
100
+ For custom visualizations, use the building blocks directly:
101
+
102
+ ```tsx
103
+ import { ContextDisplay } from "@/components/assistant-ui/context-display";
104
+
105
+ <ContextDisplay.Root modelContextWindow={128000}>
106
+ <ContextDisplay.Trigger aria-label="Context usage">
107
+ <MyCustomGauge />
108
+ </ContextDisplay.Trigger>
109
+ <ContextDisplay.Content side="top" />
110
+ </ContextDisplay.Root>
111
+ ```
112
+
113
+ | Component | Description |
114
+ |-----------|-------------|
115
+ | `Root` | Uses provided `usage` when supplied, otherwise fetches token usage internally; provides shared context and wraps children in a tooltip |
116
+ | `Trigger` | Button that opens the tooltip on hover |
117
+ | `Content` | Tooltip popover with the token breakdown (Usage %, Input, Cached, Output, Reasoning, Total) |
118
+
119
+ ## API Reference
120
+
121
+ ### Preset Props
122
+
123
+ All preset variants (`Ring`, `Bar`, `Text`) share the same props:
124
+
125
+ | Prop | Type | Default | Description |
126
+ |------|------|---------|-------------|
127
+ | `modelContextWindow` | `number` | — | Maximum token limit of the current model (required) |
128
+ | `className` | `string` | — | Additional class names on the trigger button |
129
+ | `side` | `"top" \| "bottom" \| "left" \| "right"` | `"top"` | Tooltip placement |
130
+ | `usage` | `ThreadTokenUsage` | — | Optional externally-provided usage data (skips internal usage fetch when provided) |
131
+
132
+ ### Color Thresholds
133
+
134
+ Ring and Bar share the same severity colors:
135
+
136
+ | Level | Threshold | Ring | Bar |
137
+ |-------|-----------|------|-----|
138
+ | Low | `< 65%` | `stroke-emerald-500` | `bg-emerald-500` |
139
+ | Warning | `65% – 85%` | `stroke-amber-500` | `bg-amber-500` |
140
+ | Critical | `> 85%` | `stroke-red-500` | `bg-red-500` |
141
+
142
+ Text displays numeric values only — no severity color.
143
+
144
+ ## Related
145
+
146
+ - [Message Timing](/docs/ui/message-timing) — Streaming performance stats (TTFT, tok/s)
147
+ - [Thread](/docs/ui/thread) — The thread component where ContextDisplay is typically placed