@assistant-ui/mcp-docs-server 0.1.21 → 0.1.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/.docs/organized/code-examples/waterfall.md +801 -0
  2. package/.docs/organized/code-examples/with-ag-ui.md +38 -26
  3. package/.docs/organized/code-examples/with-ai-sdk-v6.md +38 -28
  4. package/.docs/organized/code-examples/with-artifacts.md +467 -0
  5. package/.docs/organized/code-examples/with-assistant-transport.md +31 -24
  6. package/.docs/organized/code-examples/with-chain-of-thought.md +607 -0
  7. package/.docs/organized/code-examples/with-cloud-standalone.md +675 -0
  8. package/.docs/organized/code-examples/with-cloud.md +34 -27
  9. package/.docs/organized/code-examples/with-custom-thread-list.md +34 -27
  10. package/.docs/organized/code-examples/with-elevenlabs-scribe.md +41 -30
  11. package/.docs/organized/code-examples/with-expo.md +2031 -0
  12. package/.docs/organized/code-examples/with-external-store.md +32 -25
  13. package/.docs/organized/code-examples/with-ffmpeg.md +31 -27
  14. package/.docs/organized/code-examples/with-langgraph.md +96 -38
  15. package/.docs/organized/code-examples/with-parent-id-grouping.md +32 -25
  16. package/.docs/organized/code-examples/with-react-hook-form.md +63 -58
  17. package/.docs/organized/code-examples/with-react-router.md +38 -30
  18. package/.docs/organized/code-examples/with-store.md +16 -24
  19. package/.docs/organized/code-examples/with-tanstack.md +36 -26
  20. package/.docs/organized/code-examples/with-tap-runtime.md +10 -24
  21. package/.docs/raw/docs/(docs)/cli.mdx +13 -6
  22. package/.docs/raw/docs/(docs)/guides/attachments.mdx +26 -3
  23. package/.docs/raw/docs/(docs)/guides/chain-of-thought.mdx +162 -0
  24. package/.docs/raw/docs/(docs)/guides/context-api.mdx +53 -52
  25. package/.docs/raw/docs/(docs)/guides/dictation.mdx +0 -2
  26. package/.docs/raw/docs/(docs)/guides/message-timing.mdx +169 -0
  27. package/.docs/raw/docs/(docs)/guides/quoting.mdx +327 -0
  28. package/.docs/raw/docs/(docs)/guides/speech.mdx +0 -1
  29. package/.docs/raw/docs/(docs)/index.mdx +13 -3
  30. package/.docs/raw/docs/(docs)/installation.mdx +8 -2
  31. package/.docs/raw/docs/(docs)/llm.mdx +10 -8
  32. package/.docs/raw/docs/(reference)/api-reference/primitives/action-bar-more.mdx +1 -1
  33. package/.docs/raw/docs/(reference)/api-reference/primitives/action-bar.mdx +2 -2
  34. package/.docs/raw/docs/(reference)/api-reference/primitives/assistant-if.mdx +27 -27
  35. package/.docs/raw/docs/(reference)/api-reference/primitives/composer.mdx +60 -0
  36. package/.docs/raw/docs/(reference)/api-reference/primitives/message-part.mdx +78 -4
  37. package/.docs/raw/docs/(reference)/api-reference/primitives/message.mdx +32 -0
  38. package/.docs/raw/docs/(reference)/api-reference/primitives/selection-toolbar.mdx +61 -0
  39. package/.docs/raw/docs/(reference)/api-reference/primitives/thread.mdx +1 -1
  40. package/.docs/raw/docs/(reference)/legacy/styled/assistant-modal.mdx +1 -6
  41. package/.docs/raw/docs/(reference)/legacy/styled/decomposition.mdx +2 -2
  42. package/.docs/raw/docs/(reference)/legacy/styled/markdown.mdx +1 -6
  43. package/.docs/raw/docs/(reference)/legacy/styled/thread.mdx +1 -5
  44. package/.docs/raw/docs/(reference)/migrations/v0-12.mdx +17 -17
  45. package/.docs/raw/docs/cloud/ai-sdk-assistant-ui.mdx +205 -0
  46. package/.docs/raw/docs/cloud/ai-sdk.mdx +292 -0
  47. package/.docs/raw/docs/cloud/authorization.mdx +178 -79
  48. package/.docs/raw/docs/cloud/{persistence/langgraph.mdx → langgraph.mdx} +2 -2
  49. package/.docs/raw/docs/cloud/overview.mdx +29 -39
  50. package/.docs/raw/docs/react-native/adapters.mdx +118 -0
  51. package/.docs/raw/docs/react-native/custom-backend.mdx +210 -0
  52. package/.docs/raw/docs/react-native/hooks.mdx +364 -0
  53. package/.docs/raw/docs/react-native/index.mdx +332 -0
  54. package/.docs/raw/docs/react-native/primitives.mdx +653 -0
  55. package/.docs/raw/docs/runtimes/ai-sdk/v6.mdx +7 -15
  56. package/.docs/raw/docs/runtimes/assistant-transport.mdx +103 -0
  57. package/.docs/raw/docs/runtimes/custom/external-store.mdx +25 -2
  58. package/.docs/raw/docs/runtimes/data-stream.mdx +1 -3
  59. package/.docs/raw/docs/runtimes/langgraph/index.mdx +113 -9
  60. package/.docs/raw/docs/runtimes/pick-a-runtime.mdx +1 -4
  61. package/.docs/raw/docs/ui/attachment.mdx +4 -2
  62. package/.docs/raw/docs/ui/message-timing.mdx +92 -0
  63. package/.docs/raw/docs/ui/part-grouping.mdx +1 -1
  64. package/.docs/raw/docs/ui/reasoning.mdx +4 -4
  65. package/.docs/raw/docs/ui/scrollbar.mdx +2 -2
  66. package/.docs/raw/docs/ui/syntax-highlighting.mdx +55 -50
  67. package/.docs/raw/docs/ui/thread.mdx +16 -9
  68. package/dist/index.d.ts +1 -1
  69. package/dist/index.d.ts.map +1 -1
  70. package/package.json +3 -3
  71. package/src/tools/tests/integration.test.ts +2 -2
  72. package/src/tools/tests/json-parsing.test.ts +1 -1
  73. package/src/tools/tests/mcp-protocol.test.ts +1 -3
  74. package/.docs/raw/docs/cloud/persistence/ai-sdk.mdx +0 -108
@@ -114,6 +114,28 @@ async def chat_endpoint(request: ChatRequest):
114
114
 
115
115
  The state snapshots are automatically streamed to the frontend using the operations described in [Streaming Protocol](#streaming-protocol).
116
116
 
117
+ > **Cancellation:** `create_run` exposes `controller.is_cancelled` and `controller.cancelled_event`.
118
+ > If the response stream is closed early (for example user cancel or client disconnect),
119
+ > these are set so your backend loop can exit cooperatively.
120
+ > `controller.cancelled_event` is a read-only signal object with `wait()` and `is_set()`.
121
+ > `create_run` gives callbacks a ~50ms cooperative shutdown window before forced task cancellation.
122
+ > Callback exceptions that happen during early-close cleanup are not re-raised to the stream consumer,
123
+ > but are logged with traceback at warning level for debugging.
124
+ > Put critical cleanup in `finally` blocks, since forced cancellation may happen after the grace window.
125
+ >
126
+ > ```python
127
+ > async def run_callback(controller: RunController):
128
+ > while not controller.is_cancelled:
129
+ > # Long-running work / model loop
130
+ > await asyncio.sleep(0.05)
131
+ > ```
132
+ >
133
+ > ```python
134
+ > async def run_callback(controller: RunController):
135
+ > await controller.cancelled_event.wait()
136
+ > # cancellation-aware shutdown path
137
+ > ```
138
+
117
139
  ### Backend Reference Implementation
118
140
 
119
141
  <Tabs items={["Minimal", "Example", "LangGraph"]}>
@@ -314,6 +336,8 @@ The `useAssistantTransportRuntime` hook is used to configure the runtime. It acc
314
336
  converter: (state: T, connectionMetadata: ConnectionMetadata) => AssistantTransportState,
315
337
  headers?: Record<string, string> | (() => Promise<Record<string, string>>),
316
338
  body?: object,
339
+ prepareSendCommandsRequest?: (body: SendCommandsRequestBody) => Record<string, unknown> | Promise<Record<string, unknown>>,
340
+ capabilities?: { edit?: boolean },
317
341
  onResponse?: (response: Response) => void,
318
342
  onFinish?: () => void,
319
343
  onError?: (error: Error) => void,
@@ -490,6 +514,85 @@ const runtime = useAssistantTransportRuntime({
490
514
  });
491
515
  ```
492
516
 
517
+ ### Transforming the Request Body
518
+
519
+ Use `prepareSendCommandsRequest` to transform the entire request body before it is sent to the backend. This receives the fully assembled body object and returns the (potentially transformed) body.
520
+
521
+ ```typescript
522
+ const runtime = useAssistantTransportRuntime({
523
+ // ... other options
524
+ prepareSendCommandsRequest: (body) => ({
525
+ ...body,
526
+ trackingId: crypto.randomUUID(),
527
+ }),
528
+ });
529
+ ```
530
+
531
+ This is useful for adding tracking IDs, transforming commands, or injecting metadata that depends on the assembled request:
532
+
533
+ ```typescript
534
+ const runtime = useAssistantTransportRuntime({
535
+ // ... other options
536
+ prepareSendCommandsRequest: (body) => ({
537
+ ...body,
538
+ commands: body.commands.map((cmd) =>
539
+ cmd.type === "add-message"
540
+ ? { ...cmd, trackingId: crypto.randomUUID() }
541
+ : cmd,
542
+ ),
543
+ }),
544
+ });
545
+ ```
546
+
547
+ ## Editing Messages
548
+
549
+ By default, editing messages is disabled. To enable it, set `capabilities.edit` to `true`:
550
+
551
+ ```typescript
552
+ const runtime = useAssistantTransportRuntime({
553
+ // ... other options
554
+ capabilities: {
555
+ edit: true,
556
+ },
557
+ });
558
+ ```
559
+
560
+ `add-message` commands always include `parentId` and `sourceId` fields:
561
+
562
+ ```typescript
563
+ {
564
+ type: "add-message",
565
+ message: { role: "user", parts: [...] },
566
+ parentId: "msg-3", // The message after which this message should be inserted
567
+ sourceId: "msg-4", // The ID of the message being replaced (null for new messages)
568
+ }
569
+ ```
570
+
571
+ ### Backend Handling
572
+
573
+ When the backend receives an `add-message` command with a `parentId`, it should:
574
+
575
+ 1. Truncate all messages after the message with `parentId`
576
+ 2. Append the new message
577
+ 3. Stream the updated state back to the frontend
578
+
579
+ ```python
580
+ for command in request.commands:
581
+ if command.type == "add-message":
582
+ if hasattr(command, "parentId") and command.parentId is not None:
583
+ # Find the parent message index and truncate
584
+ parent_idx = next(
585
+ i for i, m in enumerate(messages) if m.id == command.parentId
586
+ )
587
+ messages = messages[:parent_idx + 1]
588
+ # Append the new message
589
+ messages.append(command.message)
590
+ ```
591
+
592
+ <Callout type="info">
593
+ `parentId` and `sourceId` are always included on `add-message` commands. For new messages, `sourceId` will be `null`.
594
+ </Callout>
595
+
493
596
  ## Resuming from a Sync Server
494
597
 
495
598
  <Callout type="info">
@@ -1157,6 +1157,29 @@ const ToolUI = makeAssistantToolUI({
1157
1157
  });
1158
1158
  ```
1159
1159
 
1160
+ ### Binding External Messages Manually
1161
+
1162
+ Use `bindExternalStoreMessage` to attach your original message to a `ThreadMessage` or message part object. This is useful when you construct `ThreadMessage` objects yourself (outside of the built-in message converter) and want `getExternalStoreMessages` to work with them.
1163
+
1164
+ ```tsx
1165
+ import {
1166
+ bindExternalStoreMessage,
1167
+ getExternalStoreMessages,
1168
+ } from "@assistant-ui/react";
1169
+
1170
+ // Attach your original message to a ThreadMessage
1171
+ bindExternalStoreMessage(threadMessage, originalMessage);
1172
+
1173
+ // Later, retrieve it
1174
+ const original = getExternalStoreMessages(threadMessage);
1175
+ ```
1176
+
1177
+ <Callout type="warn">
1178
+ This API is experimental and may change without notice.
1179
+ </Callout>
1180
+
1181
+ `bindExternalStoreMessage` is a no-op if the target already has a bound message. It mutates the target object in place.
1182
+
1160
1183
  ## Debugging
1161
1184
 
1162
1185
  ### Common Debugging Scenarios
@@ -1503,7 +1526,7 @@ A flexible message format that can be converted to assistant-ui's internal forma
1503
1526
  {
1504
1527
  name: "content",
1505
1528
  type: "string | readonly MessagePart[]",
1506
- description: "Message content as string or structured message parts",
1529
+ description: "Message content as string or structured message parts. Supports `data-*` prefixed types (e.g. `{ type: \"data-workflow\", data: {...} }`) which are automatically converted to DataMessagePart.",
1507
1530
  required: true,
1508
1531
  },
1509
1532
  {
@@ -1525,7 +1548,7 @@ A flexible message format that can be converted to assistant-ui's internal forma
1525
1548
  {
1526
1549
  name: "attachments",
1527
1550
  type: "readonly CompleteAttachment[]",
1528
- description: "File attachments (user messages only)",
1551
+ description: "File attachments (user messages only). Attachment `type` accepts custom strings beyond \"image\" | \"document\" | \"file\", and `contentType` is optional.",
1529
1552
  },
1530
1553
  {
1531
1554
  name: "metadata",
@@ -342,9 +342,7 @@ const runtime = useDataStreamRuntime({
342
342
 
343
343
  ## Examples
344
344
 
345
- - **[Basic Data Stream Example](https://github.com/assistant-ui/assistant-ui/tree/main/examples/with-data-stream)** - Simple streaming chat
346
- - **[Tool Integration Example](https://github.com/assistant-ui/assistant-ui/tree/main/examples/with-data-stream-tools)** - Frontend and backend tools
347
- - **[Authentication Example](https://github.com/assistant-ui/assistant-ui/tree/main/examples/with-data-stream-auth)** - Secure endpoints
345
+ Explore our [examples repository](https://github.com/assistant-ui/assistant-ui/tree/main/examples) for implementation references.
348
346
 
349
347
  ## API Reference
350
348
 
@@ -158,19 +158,21 @@ export const getThreadState = async (
158
158
 
159
159
  export const sendMessage = async (params: {
160
160
  threadId: string;
161
- messages: LangChainMessage;
161
+ messages: LangChainMessage[];
162
162
  config?: LangGraphSendMessageConfig;
163
163
  }) => {
164
164
  const client = createClient();
165
+ const { checkpointId, ...restConfig } = params.config ?? {};
165
166
  return client.runs.stream(
166
167
  params.threadId,
167
168
  process.env["NEXT_PUBLIC_LANGGRAPH_ASSISTANT_ID"]!,
168
169
  {
169
- input: {
170
- messages: params.messages,
171
- },
172
- streamMode: "messages",
173
- ...params.config
170
+ input: params.messages.length > 0
171
+ ? { messages: params.messages }
172
+ : null,
173
+ streamMode: "messages-tuple",
174
+ ...(checkpointId && { checkpoint_id: checkpointId }),
175
+ ...restConfig,
174
176
  },
175
177
  );
176
178
  };
@@ -196,7 +198,7 @@ import { createThread, getThreadState, sendMessage } from "@/lib/chatApi";
196
198
 
197
199
  export function MyAssistant() {
198
200
  const runtime = useLangGraphRuntime({
199
- stream: async (messages, { initialize, config }) => {
201
+ stream: async (messages, { initialize, ...config }) => {
200
202
  const { externalId } = await initialize();
201
203
  if (!externalId) throw new Error("Thread not found");
202
204
  return sendMessage({
@@ -300,6 +302,44 @@ import { convertLangChainMessages } from "@assistant-ui/react-langgraph";
300
302
  const threadMessage = convertLangChainMessages(langChainMessage);
301
303
  ```
302
304
 
305
+ ### Event Handlers
306
+
307
+ You can listen to streaming events by passing `eventHandlers` to `useLangGraphRuntime`:
308
+
309
+ ```typescript
310
+ const runtime = useLangGraphRuntime({
311
+ stream: async (messages, { initialize, ...config }) => { /* ... */ },
312
+ eventHandlers: {
313
+ onMessageChunk: (chunk, metadata) => {
314
+ // Fired for each chunk in messages-tuple mode
315
+ // metadata contains langgraph_step, langgraph_node, ls_model_name, etc.
316
+ },
317
+ onValues: (values) => {
318
+ // Fired when a "values" event is received
319
+ },
320
+ onUpdates: (updates) => {
321
+ // Fired when an "updates" event is received
322
+ },
323
+ onMetadata: (metadata) => { /* thread metadata */ },
324
+ onError: (error) => { /* stream errors */ },
325
+ onCustomEvent: (type, data) => { /* custom events */ },
326
+ },
327
+ });
328
+ ```
329
+
330
+ ### Message Metadata
331
+
332
+ When using `streamMode: "messages-tuple"`, each chunk includes metadata from the LangGraph server. Access accumulated metadata per message with the `useLangGraphMessageMetadata` hook:
333
+
334
+ ```typescript
335
+ import { useLangGraphMessageMetadata } from "@assistant-ui/react-langgraph";
336
+
337
+ function MyComponent() {
338
+ const metadata = useLangGraphMessageMetadata();
339
+ // Map<string, LangGraphTupleMetadata> keyed by message ID
340
+ }
341
+ ```
342
+
303
343
  ## Thread Management
304
344
 
305
345
  ### Basic Thread Support
@@ -308,7 +348,7 @@ The `useLangGraphRuntime` hook now includes built-in thread management capabilit
308
348
 
309
349
  ```typescript
310
350
  const runtime = useLangGraphRuntime({
311
- stream: async (messages, { initialize, config }) => {
351
+ stream: async (messages, { initialize, ...config }) => {
312
352
  // initialize() creates or loads a thread and returns its IDs
313
353
  const { remoteId, externalId } = await initialize();
314
354
  // Use externalId (your backend's thread ID) for API calls
@@ -343,7 +383,71 @@ const runtime = useLangGraphRuntime({
343
383
  });
344
384
  ```
345
385
 
346
- See the [Cloud Persistence guide](/docs/cloud/persistence/langgraph) for detailed setup instructions.
386
+ See the [Cloud Persistence guide](/docs/cloud/langgraph) for detailed setup instructions.
387
+
388
+ ## Message Editing & Regeneration
389
+
390
+ LangGraph uses server-side checkpoints for state management. To support message editing (branching) and regeneration, you need to provide a `getCheckpointId` callback that resolves the appropriate checkpoint for server-side forking.
391
+
392
+ ```typescript
393
+ const runtime = useLangGraphRuntime({
394
+ stream: async (messages, { initialize, ...config }) => {
395
+ const { externalId } = await initialize();
396
+ if (!externalId) throw new Error("Thread not found");
397
+ return sendMessage({ threadId: externalId, messages, config });
398
+ },
399
+ create: async () => {
400
+ const { thread_id } = await createThread();
401
+ return { externalId: thread_id };
402
+ },
403
+ load: async (externalId) => {
404
+ const state = await getThreadState(externalId);
405
+ return {
406
+ messages: state.values.messages,
407
+ interrupts: state.tasks[0]?.interrupts,
408
+ };
409
+ },
410
+ getCheckpointId: async (threadId, parentMessages) => {
411
+ const client = createClient();
412
+ // Get the thread state history and find the checkpoint
413
+ // that matches the parent messages by exact message ID sequence.
414
+ // If IDs are missing, return null and skip edit/reload for safety.
415
+ const history = await client.threads.getHistory(threadId);
416
+ for (const state of history) {
417
+ const stateMessages = state.values.messages;
418
+ if (!stateMessages || stateMessages.length !== parentMessages.length) {
419
+ continue;
420
+ }
421
+
422
+ const hasStableIds =
423
+ parentMessages.every((message) => typeof message.id === "string") &&
424
+ stateMessages.every((message) => typeof message.id === "string");
425
+ if (!hasStableIds) {
426
+ continue;
427
+ }
428
+
429
+ const isMatch = parentMessages.every(
430
+ (message, index) => message.id === stateMessages[index]?.id,
431
+ );
432
+
433
+ if (isMatch) {
434
+ return state.checkpoint.checkpoint_id ?? null;
435
+ }
436
+ }
437
+ return null;
438
+ },
439
+ });
440
+ ```
441
+
442
+ When `getCheckpointId` is provided:
443
+ - **Edit buttons** appear on user messages, allowing users to edit and resend from that point
444
+ - **Regenerate buttons** appear on assistant messages, allowing users to regenerate the response
445
+
446
+ The resolved `checkpointId` is passed to your `stream` callback via `config.checkpointId`. Your `sendMessage` helper should map it to the LangGraph SDK's `checkpoint_id` parameter (see the helper function in the setup section above).
447
+
448
+ <Callout type="info">
449
+ Without `getCheckpointId`, the edit and regenerate buttons will not appear. This is intentional — simply truncating client-side messages without forking from the correct server-side checkpoint would produce incorrect state.
450
+ </Callout>
347
451
 
348
452
  ## Interrupt Persistence
349
453
 
@@ -130,9 +130,7 @@ Pre-built integrations can always be replaced with a custom `LocalRuntime` or `E
130
130
  import { useChatRuntime } from "@assistant-ui/react-ai-sdk";
131
131
 
132
132
  export function MyAssistant() {
133
- const runtime = useChatRuntime({
134
- api: "/api/chat",
135
- });
133
+ const runtime = useChatRuntime();
136
134
 
137
135
  return (
138
136
  <AssistantRuntimeProvider runtime={runtime}>
@@ -189,7 +187,6 @@ Explore our implementation examples:
189
187
  - **[External Store Example](https://github.com/assistant-ui/assistant-ui/tree/main/examples/with-external-store)** - `ExternalStoreRuntime` with custom state
190
188
  - **[Assistant Cloud Example](https://github.com/assistant-ui/assistant-ui/tree/main/examples/with-cloud)** - Multi-thread with cloud persistence
191
189
  - **[LangGraph Example](https://github.com/assistant-ui/assistant-ui/tree/main/examples/with-langgraph)** - Agent workflows
192
- - **[OpenAI Assistants Example](https://github.com/assistant-ui/assistant-ui/tree/main/examples/with-openai-assistants)** - OpenAI Assistants API
193
190
 
194
191
  ## Common Pitfalls to Avoid
195
192
 
@@ -209,9 +209,9 @@ Attachments have the following structure:
209
209
  ```typescript
210
210
  type Attachment = {
211
211
  id: string;
212
- type: "image" | "document" | "file";
212
+ type: "image" | "document" | "file" | (string & {});
213
213
  name: string;
214
- contentType: string;
214
+ contentType?: string;
215
215
  file?: File;
216
216
  status:
217
217
  | { type: "running" | "requires-action" | "incomplete"; progress?: number }
@@ -219,6 +219,8 @@ type Attachment = {
219
219
  };
220
220
  ```
221
221
 
222
+ The `type` field accepts custom strings (e.g. `"data-workflow"`) beyond the built-in types. When an unknown type is encountered, the generic `Attachment` component is used as a fallback. The `contentType` field is optional — it can be omitted for non-file attachments where a MIME type is not meaningful.
223
+
222
224
  ## Related Components
223
225
 
224
226
  - [Thread](/docs/ui/thread) - Main chat interface that displays attachments
@@ -0,0 +1,92 @@
1
+ ---
2
+ title: Message Timing
3
+ description: Display streaming performance stats — TTFT, total time, tok/s, and chunk count — as a badge with hover popover.
4
+ ---
5
+
6
+ import { MessageTimingSample } from "@/components/docs/samples/message-timing";
7
+
8
+ <MessageTimingSample />
9
+
10
+ <Callout type="warn">
11
+ This component is experimental. The API and displayed metrics may change in future versions. When used with the Vercel AI SDK, token counts and tok/s are **estimated** client-side and may be inaccurate — see [Accuracy](#accuracy) below.
12
+ </Callout>
13
+
14
+ ## Getting Started
15
+
16
+ <Steps>
17
+ <Step>
18
+
19
+ ### Add `message-timing`
20
+
21
+ <InstallCommand shadcn={["message-timing"]} />
22
+
23
+ This adds a `/components/assistant-ui/message-timing.tsx` file to your project.
24
+
25
+ </Step>
26
+ <Step>
27
+
28
+ ### Use in your application
29
+
30
+ Place `MessageTiming` inside `ActionBarPrimitive.Root` in your `thread.tsx`. It will inherit the action bar's auto-hide behaviour and only renders after the stream completes.
31
+
32
+ ```tsx title="/components/assistant-ui/thread.tsx" {2,12}
33
+ import { ActionBarPrimitive } from "@assistant-ui/react";
34
+ import { MessageTiming } from "@/components/assistant-ui/message-timing";
35
+
36
+ const AssistantActionBar: FC = () => {
37
+ return (
38
+ <ActionBarPrimitive.Root
39
+ hideWhenRunning
40
+ autohide="not-last"
41
+ >
42
+ <ActionBarPrimitive.Copy />
43
+ <ActionBarPrimitive.Reload />
44
+ <MessageTiming />
45
+ </ActionBarPrimitive.Root>
46
+ );
47
+ };
48
+ ```
49
+
50
+ </Step>
51
+ </Steps>
52
+
53
+ ## What It Shows
54
+
55
+ The badge displays `totalStreamTime` inline and reveals a popover on hover with the full breakdown:
56
+
57
+ | Metric | Description |
58
+ |--------|-------------|
59
+ | **First token** | Time from request start to first text chunk (TTFT) |
60
+ | **Total** | Total wall-clock time from start to stream end |
61
+ | **Speed** | Output tokens per second (hidden for very short messages) |
62
+ | **Chunks** | Number of stream chunks received |
63
+
64
+ ## Accuracy
65
+
66
+ Timing accuracy depends on how your backend is connected.
67
+
68
+ ### assistant-stream (accurate)
69
+
70
+ When using `assistant-stream` on the backend, token counts come directly from the model's usage data sent in `step-finish` chunks. The `tokensPerSecond` metric is exact whenever your backend reports `outputTokens`.
71
+
72
+ ### Vercel AI SDK (estimated)
73
+
74
+ When using the AI SDK integration (`useChatRuntime`), token counts are **estimated** client-side using a 4 characters per token approximation. This can overcount significantly for short messages.
75
+
76
+ ## API Reference
77
+
78
+ ### `MessageTiming` component
79
+
80
+ | Prop | Type | Default | Description |
81
+ |------|------|---------|-------------|
82
+ | `className` | `string` | — | Additional class names on the root element |
83
+ | `side` | `"top" \| "right" \| "bottom" \| "left"` | `"right"` | Side of the tooltip relative to the badge |
84
+
85
+ Renders `null` until `totalStreamTime` is available (i.e., while streaming or for user messages).
86
+
87
+ For the underlying `useMessageTiming()` hook, field definitions, and runtime-specific setup (LocalRuntime, ExternalStore, etc.), see the [Message Timing guide](/docs/guides/message-timing).
88
+
89
+ ## Related
90
+
91
+ - [Message Timing guide](/docs/guides/message-timing) — `useMessageTiming()` hook, runtime support table, and custom timing UI
92
+ - [Thread](/docs/ui/thread) — The action bar context that `MessageTiming` is typically placed inside
@@ -517,7 +517,7 @@ import { useAuiState } from "@assistant-ui/react";
517
517
  const DynamicGroup: FC<
518
518
  PropsWithChildren<{ groupKey: string | undefined; indices: number[] }>
519
519
  > = ({ groupKey, indices, children }) => {
520
- const parts = useAuiState(({ message }) => message.content);
520
+ const parts = useAuiState((s) => s.message.content);
521
521
  const groupParts = indices.map((i) => parts[i]);
522
522
 
523
523
  // Analyze group content
@@ -90,11 +90,11 @@ const ReasoningGroupImpl: ReasoningGroupComponent = ({
90
90
  startIndex,
91
91
  endIndex,
92
92
  }) => {
93
- const isReasoningStreaming = useAuiState(({ message }) => {
94
- if (message.status?.type !== "running") return false;
95
- const lastIndex = message.parts.length - 1;
93
+ const isReasoningStreaming = useAuiState((s) => {
94
+ if (s.message.status?.type !== "running") return false;
95
+ const lastIndex = s.message.parts.length - 1;
96
96
  if (lastIndex < 0) return false;
97
- const lastType = message.parts[lastIndex]?.type;
97
+ const lastType = s.message.parts[lastIndex]?.type;
98
98
  if (lastType !== "reasoning") return false;
99
99
  return lastIndex >= startIndex && lastIndex <= endIndex;
100
100
  });
@@ -7,7 +7,7 @@ import { ScrollbarSample } from "@/components/docs/samples/scrollbar";
7
7
 
8
8
  <ScrollbarSample />
9
9
 
10
- If you want to show a custom scrollbar UI of the `ThreadPrimitive.Viewport` in place of the system default, you can integrate `@radix-ui/react-scroll-area`.
10
+ If you want to show a custom scrollbar UI of the `ThreadPrimitive.Viewport` in place of the system default, you can integrate `radix-ui`'s Scroll Area.
11
11
  An example implementation of this is [shadcn/ui's Scroll Area](https://ui.shadcn.com/docs/components/scroll-area).
12
12
 
13
13
  <Steps>
@@ -48,7 +48,7 @@ Add the following CSS to your `globals.css`:
48
48
  The resulting MyThread component should look like this:
49
49
 
50
50
  ```tsx {1-2,6,8,12-13,15}
51
- import * as ScrollAreaPrimitive from "@radix-ui/react-scroll-area";
51
+ import { ScrollArea as ScrollAreaPrimitive } from "radix-ui";
52
52
  import { ScrollBar } from "@/components/ui/scroll-area";
53
53
 
54
54
  const MyThread: FC = () => {
@@ -5,10 +5,10 @@ description: Code block syntax highlighting with react-shiki or react-syntax-hig
5
5
 
6
6
  import { SyntaxHighlightingSample } from "@/components/docs/samples/syntax-highlighting";
7
7
 
8
- <SyntaxHighlightingSample />
9
-
10
8
  <Callout type="warn">Syntax highlighting is not enabled in markdown by default.</Callout>
11
9
 
10
+ <SyntaxHighlightingSample />
11
+
12
12
  <Callout type="info">
13
13
  `assistant-ui` provides two options for syntax highlighting:
14
14
  - **react-shiki** (recommended for performance & dynamic language support)
@@ -53,11 +53,61 @@ export const defaultComponents = memoizeMarkdownComponents({
53
53
  See [react-shiki documentation](https://github.com/AVGVSTVS96/react-shiki#props) for all available options.
54
54
 
55
55
  Key options:
56
- - `theme` - Shiki theme (default: `"github-dark"`)
56
+ - `theme` - Shiki theme or multi-theme object (`{ light, dark, ... }`)
57
57
  - `language` - Language for highlighting (default: `"text"`)
58
+ - `defaultColor` - Default color mode (`string | false`, e.g. `light-dark()`)
58
59
  - `delay` - Delay between highlights, useful for streaming (default: `0`)
59
- - `customLanguages` - Custom languages to preload
60
- - `codeToHastOptions` - Options passed to Shiki's `codeToHast`
60
+ - `customLanguages` - Custom languages to preload for dynamic support
61
+ - `codeToHastOptions` - All other options accepted by Shiki's [`codeToHast`](https://github.com/shikijs/shiki/blob/main/packages/types/src/options.ts#L121)
62
+
63
+ ### Dual/multi theme support
64
+
65
+ To use multiple themes, pass a theme object:
66
+
67
+ ```tsx title="/components/assistant-ui/shiki-highlighter.tsx"
68
+ <ShikiHighlighter
69
+ /* ... */
70
+ theme={{
71
+ light: "github-light",
72
+ dark: "github-dark",
73
+ }}
74
+ defaultColor="light-dark()"
75
+ /* ... */
76
+ >
77
+ ```
78
+
79
+ > **Note:** The `shiki-highlighter` component sets `defaultColor="light-dark()"` automatically.
80
+ > Only set this manually if using `ShikiHighlighter` directly.
81
+
82
+ With `defaultColor="light-dark()"`, theme switching is automatic based on your site's `color-scheme`.
83
+ No custom Shiki CSS overrides are required.
84
+
85
+ Set `color-scheme` on your app root:
86
+
87
+ System-based (follows OS/browser preference):
88
+
89
+ ```css title="globals.css"
90
+ :root {
91
+ color-scheme: light dark;
92
+ }
93
+ ```
94
+
95
+ Class-based theme switching:
96
+
97
+ ```css title="globals.css"
98
+ :root {
99
+ color-scheme: light;
100
+ }
101
+ :root.dark {
102
+ color-scheme: dark;
103
+ }
104
+ ```
105
+
106
+ If you need broader support for older browsers, you can still use the manual CSS-variable switching approach from the Shiki dual-theme docs.
107
+
108
+ For more information:
109
+ - [react-shiki multi-theme/reactive themes](https://github.com/AVGVSTVS96/react-shiki)
110
+ - [Shiki dual themes + `light-dark()`](https://shiki.style/guide/dual-themes)
61
111
 
62
112
  ### Bundle Optimization
63
113
 
@@ -100,51 +150,6 @@ const customHighlighter = await createHighlighterCore({
100
150
  For more information, see [react-shiki - bundle options](https://github.com/avgvstvs96/react-shiki#bundle-options).
101
151
  </Callout>
102
152
 
103
- ### Dual/multi theme support
104
-
105
- To use multiple theme modes, pass an object with your multi-theme configuration to the `theme` prop in the `ShikiHighlighter` component:
106
-
107
- ```tsx title="/components/assistant-ui/shiki-highlighter.tsx"
108
- <ShikiHighlighter
109
- /* ... */
110
- theme={{
111
- light: "github-light",
112
- dark: "github-dark",
113
- }}
114
- /* ... */
115
- >
116
- ```
117
-
118
- To make themes responsive to your site's theme mode, add one of the following CSS snippets to your project:
119
-
120
- ```css title="shiki.css"
121
- /* for class based dark mode */
122
- html.dark .shiki,
123
- html.dark .shiki span {
124
- color: var(--shiki-dark) !important;
125
- background-color: var(--shiki-dark-bg) !important;
126
- /* Optional, if you also want font styles */
127
- font-style: var(--shiki-dark-font-style) !important;
128
- font-weight: var(--shiki-dark-font-weight) !important;
129
- text-decoration: var(--shiki-dark-text-decoration) !important;
130
- }
131
-
132
- /* for query based dark mode */
133
- @media (prefers-color-scheme: dark) {
134
- .shiki,
135
- .shiki span {
136
- color: var(--shiki-dark) !important;
137
- background-color: var(--shiki-dark-bg) !important;
138
- /* Optional, if you also want font styles */
139
- font-style: var(--shiki-dark-font-style) !important;
140
- font-weight: var(--shiki-dark-font-weight) !important;
141
- text-decoration: var(--shiki-dark-text-decoration) !important;
142
- }
143
- }
144
- ```
145
-
146
- For more information, see [Shiki's documentation on dual and multi themes](https://shiki.style/guide/dual-themes).
147
-
148
153
  ---
149
154
 
150
155
  ## react-syntax-highlighter