@assistant-ui/mcp-docs-server 0.1.9 → 0.1.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,9 +2,9 @@
2
2
  title: Chat History for LangGraph Cloud
3
3
  ---
4
4
 
5
- import { Steps, Step } from 'fumadocs-ui/components/steps';
6
- import { Callout } from 'fumadocs-ui/components/callout';
7
- import { Tab, Tabs } from 'fumadocs-ui/components/tabs';
5
+ import { Steps, Step } from "fumadocs-ui/components/steps";
6
+ import { Callout } from "fumadocs-ui/components/callout";
7
+ import { Tab, Tabs } from "fumadocs-ui/components/tabs";
8
8
 
9
9
  ## Overview
10
10
 
@@ -13,7 +13,8 @@ assistant-cloud provides thread management and persistent chat history for appli
13
13
  ## Prerequisites
14
14
 
15
15
  <Callout type="info">
16
- You need an assistant-cloud account to follow this guide. [Sign up here](https://cloud.assistant-ui.com/) to get started.
16
+ You need an assistant-cloud account to follow this guide. [Sign up
17
+ here](https://cloud.assistant-ui.com/) to get started.
17
18
  </Callout>
18
19
 
19
20
  ## Setup Guide
@@ -75,40 +76,12 @@ Create a runtime provider that integrates LangGraph with assistant-cloud. Choose
75
76
  import {
76
77
  AssistantCloud,
77
78
  AssistantRuntimeProvider,
78
- useCloudThreadListRuntime,
79
- useThreadListItemRuntime,
80
79
  } from "@assistant-ui/react";
81
80
  import { useLangGraphRuntime } from "@assistant-ui/react-langgraph";
82
81
  import { createThread, getThreadState, sendMessage } from "@/lib/chatApi";
83
82
  import { LangChainMessage } from "@assistant-ui/react-langgraph";
84
83
  import { useMemo } from "react";
85
84
 
86
- const useMyLangGraphRuntime = () => {
87
- const threadListItemRuntime = useThreadListItemRuntime();
88
-
89
- const runtime = useLangGraphRuntime({
90
- stream: async function* (messages) {
91
- const { externalId } = await threadListItemRuntime.initialize();
92
- if (!externalId) throw new Error("Thread not found");
93
-
94
- return sendMessage({
95
- threadId: externalId,
96
- messages,
97
- });
98
- },
99
-
100
- onSwitchToThread: async (externalId) => {
101
- const state = await getThreadState(externalId);
102
- return {
103
- messages:
104
- (state.values as { messages?: LangChainMessage[] }).messages ?? [],
105
- };
106
- },
107
- });
108
-
109
- return runtime;
110
- };
111
-
112
85
  export function MyRuntimeProvider({
113
86
  children,
114
87
  }: Readonly<{
@@ -123,13 +96,28 @@ export function MyRuntimeProvider({
123
96
  [],
124
97
  );
125
98
 
126
- const runtime = useCloudThreadListRuntime({
99
+ const runtime = useLangGraphRuntime({
127
100
  cloud,
128
- runtimeHook: useMyLangGraphRuntime,
101
+ stream: async function* (messages, { initialize }) {
102
+ const { externalId } = await initialize();
103
+ if (!externalId) throw new Error("Thread not found");
104
+
105
+ return sendMessage({
106
+ threadId: externalId,
107
+ messages,
108
+ });
109
+ },
129
110
  create: async () => {
130
111
  const { thread_id } = await createThread();
131
112
  return { externalId: thread_id };
132
113
  },
114
+ load: async (externalId) => {
115
+ const state = await getThreadState(externalId);
116
+ return {
117
+ messages:
118
+ (state.values as { messages?: LangChainMessage[] }).messages ?? [],
119
+ };
120
+ },
133
121
  });
134
122
 
135
123
  return (
@@ -150,8 +138,6 @@ export function MyRuntimeProvider({
150
138
  import {
151
139
  AssistantCloud,
152
140
  AssistantRuntimeProvider,
153
- useCloudThreadListRuntime,
154
- useThreadListItemRuntime,
155
141
  } from "@assistant-ui/react";
156
142
  import { useLangGraphRuntime } from "@assistant-ui/react-langgraph";
157
143
  import { createThread, getThreadState, sendMessage } from "@/lib/chatApi";
@@ -159,32 +145,6 @@ import { LangChainMessage } from "@assistant-ui/react-langgraph";
159
145
  import { useAuth } from "@clerk/nextjs";
160
146
  import { useMemo } from "react";
161
147
 
162
- const useMyLangGraphRuntime = () => {
163
- const threadListItemRuntime = useThreadListItemRuntime();
164
-
165
- const runtime = useLangGraphRuntime({
166
- stream: async function* (messages) {
167
- const { externalId } = await threadListItemRuntime.initialize();
168
- if (!externalId) throw new Error("Thread not found");
169
-
170
- return sendMessage({
171
- threadId: externalId,
172
- messages,
173
- });
174
- },
175
-
176
- onSwitchToThread: async (externalId) => {
177
- const state = await getThreadState(externalId);
178
- return {
179
- messages:
180
- (state.values as { messages?: LangChainMessage[] }).messages ?? []
181
- };
182
- },
183
- });
184
-
185
- return runtime;
186
- };
187
-
188
148
  export function MyRuntimeProvider({
189
149
  children,
190
150
  }: Readonly<{
@@ -201,13 +161,28 @@ export function MyRuntimeProvider({
201
161
  [getToken],
202
162
  );
203
163
 
204
- const runtime = useCloudThreadListRuntime({
164
+ const runtime = useLangGraphRuntime({
205
165
  cloud,
206
- runtimeHook: useMyLangGraphRuntime,
166
+ stream: async function* (messages, { initialize }) {
167
+ const { externalId } = await initialize();
168
+ if (!externalId) throw new Error("Thread not found");
169
+
170
+ return sendMessage({
171
+ threadId: externalId,
172
+ messages,
173
+ });
174
+ },
207
175
  create: async () => {
208
176
  const { thread_id } = await createThread();
209
177
  return { externalId: thread_id };
210
178
  },
179
+ load: async (externalId) => {
180
+ const state = await getThreadState(externalId);
181
+ return {
182
+ messages:
183
+ (state.values as { messages?: LangChainMessage[] }).messages ?? [],
184
+ };
185
+ },
211
186
  });
212
187
 
213
188
  return (
@@ -219,7 +194,8 @@ export function MyRuntimeProvider({
219
194
  ```
220
195
 
221
196
  <Callout type="info">
222
- For Clerk authentication, configure the `"assistant-ui"` token template in your Clerk dashboard.
197
+ For Clerk authentication, configure the `"assistant-ui"` token template in
198
+ your Clerk dashboard.
223
199
  </Callout>
224
200
 
225
201
  </Tab>
@@ -227,7 +203,7 @@ export function MyRuntimeProvider({
227
203
  </Tabs>
228
204
 
229
205
  <Callout type="info">
230
- The `useMyLangGraphRuntime` hook is extracted into a separate function because it will be mounted multiple times, once per active thread.
206
+ The `useLangGraphRuntime` hook now directly accepts `cloud`, `create`, and `load` parameters for simplified thread management. The runtime handles thread lifecycle internally.
231
207
  </Callout>
232
208
 
233
209
  </Step>
@@ -283,8 +283,7 @@ const ThreadWelcomeSuggestions: FC = () => {
283
283
  <ThreadPrimitive.Suggestion
284
284
  className="aui-thread-welcome-suggestion"
285
285
  prompt="What is the weather in Tokyo?"
286
- method="replace"
287
- autoSend
286
+ send
288
287
  >
289
288
  <span className="aui-thread-welcome-suggestion-text">
290
289
  What is the weather in Tokyo?
@@ -293,8 +292,7 @@ const ThreadWelcomeSuggestions: FC = () => {
293
292
  <ThreadPrimitive.Suggestion
294
293
  className="aui-thread-welcome-suggestion"
295
294
  prompt="What is assistant-ui?"
296
- method="replace"
297
- autoSend
295
+ send
298
296
  >
299
297
  <span className="aui-thread-welcome-suggestion-text">
300
298
  What is assistant-ui?
@@ -378,17 +378,42 @@ const DatePickerToolUI = makeAssistantToolUI<
378
378
 
379
379
  ### Multi-Step Interactions
380
380
 
381
- Build complex workflows with multiple user interactions:
381
+ Build complex workflows with human-in-the-loop patterns for multi-step user interactions:
382
382
 
383
383
  ```tsx
384
- const ApprovalToolUI = makeAssistantToolUI<
385
- { action: string; details: any },
386
- { approved: boolean; reason?: string }
387
- >({
384
+ const DeleteProjectTool = makeAssistantTool({
385
+ toolName: "deleteProject",
386
+ execute: async ({ projectId }, { human }) => {
387
+ const response = await human({ action, details });
388
+ if (!response.approved) throw new Error("Project deletion cancelled");
389
+
390
+ await deleteProject(projectId);
391
+ return { success: true };
392
+ },
393
+ });
394
+
395
+ const ApprovalTool = makeAssistantTool({
396
+ ...tool({
397
+ description: "Request user approval for an action",
398
+ parameters: z.object({
399
+ action: z.string(),
400
+ details: z.any(),
401
+ }),
402
+ execute: async ({ action, details }, { human }) => {
403
+ // Request approval from user
404
+ const response = await human({ action, details });
405
+
406
+ return {
407
+ approved: response.approved,
408
+ reason: response.reason,
409
+ };
410
+ },
411
+ }),
388
412
  toolName: "requestApproval",
389
- render: ({ args, result, addResult }) => {
413
+ render: ({ args, result, interrupt, resume }) => {
390
414
  const [reason, setReason] = useState("");
391
415
 
416
+ // Show result after approval/rejection
392
417
  if (result) {
393
418
  return (
394
419
  <div className={result.approved ? "text-green-600" : "text-red-600"}>
@@ -397,41 +422,53 @@ const ApprovalToolUI = makeAssistantToolUI<
397
422
  );
398
423
  }
399
424
 
400
- return (
401
- <div className="rounded border-2 border-yellow-400 p-4">
402
- <h4 className="font-bold">Approval Required</h4>
403
- <p className="my-2">{args.action}</p>
404
- <pre className="rounded bg-gray-100 p-2 text-sm">
405
- {JSON.stringify(args.details, null, 2)}
406
- </pre>
407
-
408
- <div className="mt-4 flex gap-2">
409
- <button
410
- onClick={() => addResult({ approved: true })}
411
- className="rounded bg-green-500 px-4 py-2 text-white"
412
- >
413
- Approve
414
- </button>
415
- <button
416
- onClick={() => addResult({ approved: false, reason })}
417
- className="rounded bg-red-500 px-4 py-2 text-white"
418
- >
419
- Reject
420
- </button>
421
- <input
422
- type="text"
423
- placeholder="Rejection reason..."
424
- value={reason}
425
- onChange={(e) => setReason(e.target.value)}
426
- className="flex-1 rounded border px-2"
427
- />
425
+ // Show approval UI when waiting for user input
426
+ if (interrupt) {
427
+ return (
428
+ <div className="rounded border-2 border-yellow-400 p-4">
429
+ <h4 className="font-bold">Approval Required</h4>
430
+ <p className="my-2">{interrupt.payload.action}</p>
431
+ <pre className="rounded bg-gray-100 p-2 text-sm">
432
+ {JSON.stringify(interrupt.payload.details, null, 2)}
433
+ </pre>
434
+
435
+ <div className="mt-4 flex gap-2">
436
+ <button
437
+ onClick={() => resume({ approved: true })}
438
+ className="rounded bg-green-500 px-4 py-2 text-white"
439
+ >
440
+ Approve
441
+ </button>
442
+ <button
443
+ onClick={() => resume({ approved: false, reason })}
444
+ className="rounded bg-red-500 px-4 py-2 text-white"
445
+ >
446
+ Reject
447
+ </button>
448
+ <input
449
+ type="text"
450
+ placeholder="Rejection reason..."
451
+ value={reason}
452
+ onChange={(e) => setReason(e.target.value)}
453
+ className="flex-1 rounded border px-2"
454
+ />
455
+ </div>
428
456
  </div>
429
- </div>
430
- );
457
+ );
458
+ }
459
+
460
+ return <div>Processing...</div>;
431
461
  },
432
462
  });
433
463
  ```
434
464
 
465
+ <Callout type="tip">
466
+ Use tool human input (`human()` / `resume()`) for workflows that need to
467
+ pause tool execution and wait for user input. Use `addResult()` for "human
468
+ tools" where the AI requests a tool call but the entire execution happens
469
+ through user interaction.
470
+ </Callout>
471
+
435
472
  ## Advanced Features
436
473
 
437
474
  ### Tool Status Handling
@@ -589,14 +626,52 @@ type ToolUIRenderProps<TArgs, TResult> = {
589
626
  toolName: string;
590
627
  toolCallId: string;
591
628
 
592
- // Interactive callback
629
+ // Interactive callbacks
593
630
  addResult: (result: TResult) => void;
631
+ resume: (payload: unknown) => void;
632
+
633
+ // Interrupt state
634
+ interrupt?: { type: "human"; payload: unknown }; // Payload from context.human()
594
635
 
595
636
  // Optional artifact data
596
637
  artifact?: unknown;
597
638
  };
598
639
  ```
599
640
 
641
+ ### Human Input Handling
642
+
643
+ When a tool calls `human()` during execution, the payload becomes available in the render function as `interrupt.payload`:
644
+
645
+ ```tsx
646
+ const ConfirmationToolUI = makeAssistantToolUI<
647
+ { action: string },
648
+ { confirmed: boolean }
649
+ >({
650
+ toolName: "confirmAction",
651
+ render: ({ args, result, interrupt, resume }) => {
652
+ // Tool is waiting for user input
653
+ if (interrupt) {
654
+ return (
655
+ <div className="confirmation-dialog">
656
+ <p>Confirm: {interrupt.payload.message}</p>
657
+ <button onClick={() => resume(true)}>Yes</button>
658
+ <button onClick={() => resume(false)}>No</button>
659
+ </div>
660
+ );
661
+ }
662
+
663
+ // Tool completed
664
+ if (result) {
665
+ return <div>Action {result.confirmed ? "confirmed" : "cancelled"}</div>;
666
+ }
667
+
668
+ return <div>Processing...</div>;
669
+ },
670
+ });
671
+ ```
672
+
673
+ Learn more about tool human input in the [Tools Guide](/docs/guides/Tools#tool-human-input).
674
+
600
675
  ## Best Practices
601
676
 
602
677
  ### 1. Handle All Status States
@@ -359,13 +359,167 @@ const RefundTool = makeAssistantTool({
359
359
  });
360
360
  ```
361
361
 
362
+ ### Tool Human Input
363
+
364
+ Tools can pause their execution to request user input or approval before continuing. This is useful for:
365
+
366
+ - Requesting user confirmation for sensitive operations
367
+ - Collecting additional information mid-execution
368
+ - Implementing progressive disclosure workflows
369
+ - Building interactive, multi-step tool experiences
370
+
371
+ ```tsx
372
+ import { makeAssistantTool, tool } from "@assistant-ui/react";
373
+ import { z } from "zod";
374
+
375
+ const confirmationTool = tool({
376
+ description: "Send an email with confirmation",
377
+ parameters: z.object({
378
+ to: z.string(),
379
+ subject: z.string(),
380
+ body: z.string(),
381
+ }),
382
+ execute: async ({ to, subject, body }, { human }) => {
383
+ // Request user confirmation before sending
384
+ const confirmed = await human({
385
+ type: "confirmation",
386
+ action: "send-email",
387
+ details: { to, subject },
388
+ });
389
+
390
+ if (!confirmed) {
391
+ return {
392
+ status: "cancelled",
393
+ message: "Email sending cancelled by user",
394
+ };
395
+ }
396
+
397
+ // Proceed with sending the email
398
+ await sendEmail({ to, subject, body });
399
+ return { status: "sent", message: `Email sent to ${to}` };
400
+ },
401
+ });
402
+
403
+ const EmailTool = makeAssistantTool({
404
+ ...confirmationTool,
405
+ toolName: "sendEmail",
406
+ render: ({ args, result, interrupt, resume }) => {
407
+ // The interrupt payload is available when the tool is waiting for user input
408
+ if (interrupt) {
409
+ return (
410
+ <div className="confirmation-dialog">
411
+ <h3>Confirm Email</h3>
412
+ <p>Send email to: {interrupt.payload.details.to}</p>
413
+ <p>Subject: {interrupt.payload.details.subject}</p>
414
+ <div className="actions">
415
+ <button onClick={() => resume(true)}>Confirm</button>
416
+ <button onClick={() => resume(false)}>Cancel</button>
417
+ </div>
418
+ </div>
419
+ );
420
+ }
421
+
422
+ // Show the result after completion
423
+ if (result) {
424
+ return (
425
+ <div className="email-result">
426
+ <p>{result.message}</p>
427
+ </div>
428
+ );
429
+ }
430
+
431
+ // Show loading state
432
+ return <div>Preparing email...</div>;
433
+ },
434
+ });
435
+ ```
436
+
437
+ #### Human Input Behavior
438
+
439
+ - **Payload**: The object passed to `human()` is available in the `render` function as `interrupt.payload`
440
+ - **Type**: The `interrupt` object has the structure `{ type: "human", payload: ... }`
441
+ - **Resume**: Call `resume(payload)` to continue execution - the payload becomes the resolved value of the `human()` call
442
+ - **Multiple Requests**: If `human()` is called multiple times, previous requests are automatically rejected with an error
443
+ - **Cancellation**: If the tool execution is aborted (e.g., user cancels the message), all pending requests are rejected
444
+
445
+ #### Advanced Human Input Patterns
446
+
447
+ You can use human input for complex multi-step interactions:
448
+
449
+ ```tsx
450
+ const wizardTool = tool({
451
+ description: "Multi-step data processing wizard",
452
+ parameters: z.object({
453
+ dataSource: z.string(),
454
+ }),
455
+ execute: async ({ dataSource }, { human }) => {
456
+ // Step 1: Load data
457
+ const data = await loadData(dataSource);
458
+
459
+ // Step 2: Request user to select columns
460
+ const selectedColumns = await human({
461
+ type: "column-selection",
462
+ availableColumns: data.columns,
463
+ });
464
+
465
+ // Step 3: Request processing options
466
+ const options = await human({
467
+ type: "processing-options",
468
+ columns: selectedColumns,
469
+ });
470
+
471
+ // Step 4: Process data with user selections
472
+ const result = await processData(data, selectedColumns, options);
473
+ return result;
474
+ },
475
+ });
476
+
477
+ const WizardTool = makeAssistantTool({
478
+ ...wizardTool,
479
+ toolName: "dataWizard",
480
+ render: ({ args, result, interrupt, resume }) => {
481
+ if (interrupt?.payload.type === "column-selection") {
482
+ return (
483
+ <ColumnSelector
484
+ columns={interrupt.payload.availableColumns}
485
+ onSelect={(cols) => resume(cols)}
486
+ />
487
+ );
488
+ }
489
+
490
+ if (interrupt?.payload.type === "processing-options") {
491
+ return (
492
+ <ProcessingOptions
493
+ columns={interrupt.payload.columns}
494
+ onConfirm={(opts) => resume(opts)}
495
+ />
496
+ );
497
+ }
498
+
499
+ if (result) {
500
+ return <ResultDisplay data={result} />;
501
+ }
502
+
503
+ return <div>Loading...</div>;
504
+ },
505
+ });
506
+ ```
507
+
508
+ <Callout type="note">
509
+ When a tool calls `human()` multiple times (e.g., for multi-step
510
+ workflows), each new request automatically rejects any previous pending
511
+ request. Make sure to handle potential errors if you need to support
512
+ cancellation of earlier steps.
513
+ </Callout>
514
+
362
515
  ### MCP (Model Context Protocol) Tools
363
516
 
364
517
  Integration with MCP servers using AI SDK v5's experimental MCP support:
365
518
 
366
519
  <Callout type="warning">
367
- MCP support in AI SDK v5 is experimental. The API may change in future releases.
368
- Make sure to install the MCP SDK: `npm install @modelcontextprotocol/sdk`
520
+ MCP support in AI SDK v5 is experimental. The API may change in future
521
+ releases. Make sure to install the MCP SDK: `npm install
522
+ @modelcontextprotocol/sdk`
369
523
  </Callout>
370
524
 
371
525
  ```tsx
@@ -414,15 +568,13 @@ import { SSEClientTransport } from "@modelcontextprotocol/sdk/client/sse.js";
414
568
  // HTTP transport
415
569
  const httpClient = await experimental_createMCPClient({
416
570
  transport: new StreamableHTTPClientTransport(
417
- new URL("http://localhost:3000/mcp")
571
+ new URL("http://localhost:3000/mcp"),
418
572
  ),
419
573
  });
420
574
 
421
575
  // Server-Sent Events transport
422
576
  const sseClient = await experimental_createMCPClient({
423
- transport: new SSEClientTransport(
424
- new URL("http://localhost:3000/sse")
425
- ),
577
+ transport: new SSEClientTransport(new URL("http://localhost:3000/sse")),
426
578
  });
427
579
  ```
428
580
 
@@ -560,9 +712,21 @@ Tools receive additional context during execution:
560
712
  execute: async (args, context) => {
561
713
  // context.abortSignal - AbortSignal for cancellation
562
714
  // context.toolCallId - Unique identifier for this invocation
715
+ // context.human - Function to request human input
716
+
717
+ // Example: Request user confirmation
718
+ const userResponse = await context.human({
719
+ message: "Are you sure?",
720
+ });
563
721
  };
564
722
  ```
565
723
 
724
+ The execution context provides:
725
+
726
+ - **`abortSignal`**: An `AbortSignal` that triggers when the tool execution is cancelled
727
+ - **`toolCallId`**: A unique identifier for this specific tool invocation
728
+ - **`human`**: A function that pauses execution and requests user input. The payload passed to `human()` becomes available in the render function, and the value passed to `resume()` becomes the resolved value of the `human()` call
729
+
566
730
  ## Runtime Integration
567
731
 
568
732
  Each integration handles tools differently: