@assistant-ui/mcp-docs-server 0.1.5 → 0.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/.docs/organized/code-examples/with-ai-sdk-v5.md +54 -19
  2. package/.docs/organized/code-examples/with-cloud.md +17 -24
  3. package/.docs/organized/code-examples/with-external-store.md +6 -6
  4. package/.docs/organized/code-examples/with-ffmpeg.md +18 -21
  5. package/.docs/organized/code-examples/with-langgraph.md +7 -8
  6. package/.docs/organized/code-examples/with-parent-id-grouping.md +9 -6
  7. package/.docs/organized/code-examples/with-react-hook-form.md +16 -21
  8. package/.docs/raw/docs/api-reference/overview.mdx +1 -4
  9. package/.docs/raw/docs/getting-started.mdx +33 -33
  10. package/.docs/raw/docs/guides/Attachments.mdx +1 -102
  11. package/.docs/raw/docs/guides/Latex.mdx +42 -16
  12. package/.docs/raw/docs/guides/ToolUI.mdx +3 -3
  13. package/.docs/raw/docs/guides/Tools.mdx +101 -84
  14. package/.docs/raw/docs/runtimes/ai-sdk/use-chat.mdx +134 -55
  15. package/.docs/raw/docs/runtimes/ai-sdk/v4-legacy.mdx +182 -0
  16. package/.docs/raw/docs/runtimes/custom/local.mdx +1 -1
  17. package/.docs/raw/docs/runtimes/langgraph/index.mdx +0 -1
  18. package/.docs/raw/docs/runtimes/langserve.mdx +9 -11
  19. package/.docs/raw/docs/runtimes/mastra/separate-server-integration.mdx +2 -2
  20. package/dist/{chunk-JS4PWCVA.js → chunk-L4K23SWI.js} +1 -1
  21. package/dist/index.js +1 -1
  22. package/dist/stdio.js +1 -1
  23. package/package.json +6 -6
  24. package/.docs/organized/code-examples/local-ollama.md +0 -1135
  25. package/.docs/organized/code-examples/search-agent-for-e-commerce.md +0 -1721
  26. package/.docs/organized/code-examples/with-ai-sdk.md +0 -1081
  27. package/.docs/organized/code-examples/with-openai-assistants.md +0 -1175
  28. package/.docs/raw/docs/runtimes/ai-sdk/rsc.mdx +0 -226
  29. package/.docs/raw/docs/runtimes/ai-sdk/use-assistant-hook.mdx +0 -195
  30. package/.docs/raw/docs/runtimes/ai-sdk/use-chat-hook.mdx +0 -138
  31. package/.docs/raw/docs/runtimes/ai-sdk/use-chat-v5.mdx +0 -129
@@ -17,7 +17,10 @@ Tools in assistant-ui are functions that the LLM can call to perform specific ta
17
17
  When tools are executed, you can display custom generative UI components that provide rich, interactive visualizations of the tool's execution and results. Learn more in the [Generative UI guide](/docs/guides/ToolUI).
18
18
 
19
19
  <Callout type="tip">
20
- If you haven't provided a custom UI for a tool, assistant-ui offers a [`ToolFallback`](/docs/ui/ToolFallback) component that you can add to your codebase to render a default UI for tool executions. You can customize this by creating your own Tool UI component for the tool's name.
20
+ If you haven't provided a custom UI for a tool, assistant-ui offers a
21
+ [`ToolFallback`](/docs/ui/ToolFallback) component that you can add to your
22
+ codebase to render a default UI for tool executions. You can customize this by
23
+ creating your own Tool UI component for the tool's name.
21
24
  </Callout>
22
25
 
23
26
  ## Tool Creation Methods
@@ -42,19 +45,19 @@ const weatherTool = tool({
42
45
  description: "Get current weather for a location",
43
46
  parameters: z.object({
44
47
  location: z.string().describe("City name or zip code"),
45
- unit: z.enum(["celsius", "fahrenheit"]).default("celsius")
48
+ unit: z.enum(["celsius", "fahrenheit"]).default("celsius"),
46
49
  }),
47
50
  execute: async ({ location, unit }) => {
48
51
  // Tool execution logic
49
52
  const weather = await fetchWeatherAPI(location, unit);
50
53
  return weather;
51
- }
54
+ },
52
55
  });
53
56
 
54
57
  // Create the component
55
58
  const WeatherTool = makeAssistantTool({
56
59
  ...weatherTool,
57
- toolName: "getWeather"
60
+ toolName: "getWeather",
58
61
  });
59
62
 
60
63
  // Place the tool component inside AssistantRuntimeProvider
@@ -69,7 +72,10 @@ function App() {
69
72
  ```
70
73
 
71
74
  <Callout type="tip">
72
- When using server-side runtimes like Vercel AI SDK, you can pass client-defined tools to your backend using `frontendTools`. See the [Client-Defined Tools with frontendTools](#client-defined-tools-with-frontendtools) section below.
75
+ When using server-side runtimes like Vercel AI SDK, you can pass
76
+ client-defined tools to your backend using `frontendTools`. See the
77
+ [Client-Defined Tools with
78
+ frontendTools](#client-defined-tools-with-frontendtools) section below.
73
79
  </Callout>
74
80
 
75
81
  ### 2. Using `useAssistantTool` Hook
@@ -82,12 +88,12 @@ import { z } from "zod";
82
88
 
83
89
  function DynamicTools() {
84
90
  const [dataSource, setDataSource] = useState<"local" | "cloud">("local");
85
-
91
+
86
92
  useAssistantTool({
87
93
  toolName: "searchData",
88
94
  description: "Search through the selected data source",
89
95
  parameters: z.object({
90
- query: z.string()
96
+ query: z.string(),
91
97
  }),
92
98
  execute: async ({ query }) => {
93
99
  if (dataSource === "local") {
@@ -97,9 +103,9 @@ function DynamicTools() {
97
103
  }
98
104
  },
99
105
  // Re-register when data source changes
100
- enabled: true
106
+ enabled: true,
101
107
  });
102
-
108
+
103
109
  return null;
104
110
  }
105
111
  ```
@@ -109,22 +115,27 @@ function DynamicTools() {
109
115
  Create generative UI components for tools that are defined elsewhere. This is UI-only - the tool's execution logic must be registered separately (e.g., in your backend, MCP server, or another component):
110
116
 
111
117
  <Callout type="note">
112
- This creates only the UI component. The actual tool execution happens where you've defined it (typically in your API route with server-based runtimes like Vercel AI SDK).
118
+ This creates only the UI component. The actual tool execution happens where
119
+ you've defined it (typically in your API route with server-based runtimes like
120
+ Vercel AI SDK).
113
121
  </Callout>
114
122
 
115
123
  ```tsx
116
124
  import { makeAssistantToolUI, AssistantToolUI } from "@assistant-ui/react";
117
125
 
118
- const SearchResultsUI = makeAssistantToolUI<{
119
- query: string;
120
- }, {
121
- results: Array<{
122
- id: string;
123
- url: string;
124
- title: string;
125
- snippet: string;
126
- }>;
127
- }>({
126
+ const SearchResultsUI = makeAssistantToolUI<
127
+ {
128
+ query: string;
129
+ },
130
+ {
131
+ results: Array<{
132
+ id: string;
133
+ url: string;
134
+ title: string;
135
+ snippet: string;
136
+ }>;
137
+ }
138
+ >({
128
139
  toolName: "webSearch", // Must match the registered tool's name
129
140
  render: ({ args, result }) => {
130
141
  return (
@@ -138,7 +149,7 @@ const SearchResultsUI = makeAssistantToolUI<{
138
149
  ))}
139
150
  </div>
140
151
  );
141
- }
152
+ },
142
153
  });
143
154
 
144
155
  // Place the tool component inside AssistantRuntimeProvider
@@ -164,16 +175,16 @@ import { z } from "zod";
164
175
  function MyComponent() {
165
176
  const runtime = useAssistantRuntime();
166
177
  const [isCreativeMode, setIsCreativeMode] = useState(false);
167
-
178
+
168
179
  useEffect(() => {
169
180
  const calculateTool = tool({
170
181
  description: "Perform mathematical calculations",
171
182
  parameters: z.object({
172
- expression: z.string()
183
+ expression: z.string(),
173
184
  }),
174
185
  execute: async ({ expression }) => {
175
186
  return eval(expression); // Note: Use proper math parser in production
176
- }
187
+ },
177
188
  });
178
189
 
179
190
  // Register tools with model configuration
@@ -182,18 +193,19 @@ function MyComponent() {
182
193
  tools: { calculate: calculateTool },
183
194
  callSettings: {
184
195
  temperature: isCreativeMode ? 0.9 : 0.2,
185
- maxTokens: 1000
196
+ maxTokens: 1000,
186
197
  },
187
- priority: 10 // Higher priority overrides other providers
188
- })
198
+ priority: 10, // Higher priority overrides other providers
199
+ }),
189
200
  });
190
201
  }, [runtime, isCreativeMode]);
191
-
202
+
192
203
  return <div>{/* Your component */}</div>;
193
204
  }
194
205
  ```
195
206
 
196
207
  Use this approach when you need:
208
+
197
209
  - Dynamic model parameters (temperature, maxTokens, etc.)
198
210
  - Priority-based context merging
199
211
  - Multiple context types in one registration
@@ -208,18 +220,18 @@ Tools that execute in the browser, accessing client-side resources:
208
220
  const screenshotTool = tool({
209
221
  description: "Capture a screenshot of the current page",
210
222
  parameters: z.object({
211
- selector: z.string().optional()
223
+ selector: z.string().optional(),
212
224
  }),
213
225
  execute: async ({ selector }) => {
214
226
  const element = selector ? document.querySelector(selector) : document.body;
215
227
  const screenshot = await captureElement(element);
216
228
  return { dataUrl: screenshot };
217
- }
229
+ },
218
230
  });
219
231
 
220
232
  const ScreenshotTool = makeAssistantTool({
221
233
  ...screenshotTool,
222
- toolName: "screenshot"
234
+ toolName: "screenshot",
223
235
  });
224
236
  ```
225
237
 
@@ -231,27 +243,27 @@ Tools that trigger server-side operations:
231
243
  // Backend route (AI SDK)
232
244
  export async function POST(req: Request) {
233
245
  const { messages } = await req.json();
234
-
246
+
235
247
  const result = streamText({
236
248
  model: openai("gpt-4o"),
237
- messages,
249
+ messages: convertToModelMessages(messages),
238
250
  tools: {
239
251
  queryDatabase: {
240
252
  description: "Query the application database",
241
- parameters: z.object({
253
+ inputSchema: z.object({
242
254
  query: z.string(),
243
- table: z.string()
255
+ table: z.string(),
244
256
  }),
245
257
  execute: async ({ query, table }) => {
246
258
  // Server-side database access
247
259
  const results = await db.query(query, { table });
248
260
  return results;
249
- }
250
- }
251
- }
261
+ },
262
+ },
263
+ },
252
264
  });
253
-
254
- return result.toDataStreamResponse();
265
+
266
+ return result.toUIMessageStreamResponse();
255
267
  }
256
268
  ```
257
269
 
@@ -266,16 +278,16 @@ import { makeAssistantTool, tool } from "@assistant-ui/react";
266
278
  const calculateTool = tool({
267
279
  description: "Perform calculations",
268
280
  parameters: z.object({
269
- expression: z.string()
281
+ expression: z.string(),
270
282
  }),
271
283
  execute: async ({ expression }) => {
272
284
  return eval(expression); // Note: Use proper math parser in production
273
- }
285
+ },
274
286
  });
275
287
 
276
288
  const CalculateTool = makeAssistantTool({
277
289
  ...calculateTool,
278
- toolName: "calculate"
290
+ toolName: "calculate",
279
291
  });
280
292
 
281
293
  // Backend: Use frontendTools to receive client tools
@@ -283,29 +295,32 @@ import { frontendTools } from "@assistant-ui/react-ai-sdk";
283
295
 
284
296
  export async function POST(req: Request) {
285
297
  const { messages, tools } = await req.json();
286
-
298
+
287
299
  const result = streamText({
288
300
  model: openai("gpt-4o"),
289
- messages,
301
+ messages: convertToModelMessages(messages),
290
302
  tools: {
291
303
  ...frontendTools(tools), // Client-defined tools
292
304
  // Additional server-side tools
293
305
  queryDatabase: {
294
306
  description: "Query the application database",
295
- parameters: z.object({ query: z.string() }),
307
+ inputSchema: z.object({ query: z.string() }),
296
308
  execute: async ({ query }) => {
297
309
  return await db.query(query);
298
- }
299
- }
300
- }
310
+ },
311
+ },
312
+ },
301
313
  });
302
-
303
- return result.toDataStreamResponse();
314
+
315
+ return result.toUIMessageStreamResponse();
304
316
  }
305
317
  ```
306
318
 
307
319
  <Callout type="note">
308
- The `frontendTools` utility is currently only available for the Vercel AI SDK integration. Other adapters like LangGraph follow a server-side tool definition model and don't yet implement client tool serialization. Learn more in the [Vercel AI SDK integration guide](/docs/runtimes/ai-sdk/use-chat-hook).
320
+ The `frontendTools` utility is currently only available for the Vercel AI SDK
321
+ integration. Other adapters like LangGraph follow a server-side tool
322
+ definition model and don't yet implement client tool serialization. Learn more
323
+ in the [Vercel AI SDK integration guide](/docs/runtimes/ai-sdk/use-chat-hook).
309
324
  </Callout>
310
325
 
311
326
  ### Human-in-the-Loop Tools
@@ -321,26 +336,26 @@ const refundTool = tool({
321
336
  parameters: z.object({
322
337
  orderId: z.string(),
323
338
  amount: z.number(),
324
- reason: z.string()
339
+ reason: z.string(),
325
340
  }),
326
341
  execute: async ({ orderId, amount, reason }) => {
327
342
  // Wait for human approval
328
343
  const approved = await requestHumanApproval({
329
344
  action: "refund",
330
- details: { orderId, amount, reason }
345
+ details: { orderId, amount, reason },
331
346
  });
332
-
347
+
333
348
  if (!approved) {
334
349
  throw new Error("Refund rejected by administrator");
335
350
  }
336
-
351
+
337
352
  return await processRefund(orderId, amount);
338
- }
353
+ },
339
354
  });
340
355
 
341
356
  const RefundTool = makeAssistantTool({
342
357
  ...refundTool,
343
- toolName: "requestRefund"
358
+ toolName: "requestRefund",
344
359
  });
345
360
  ```
346
361
 
@@ -356,15 +371,15 @@ const mcpClient = createMCPClient({
356
371
  servers: {
357
372
  github: {
358
373
  command: "npx",
359
- args: ["@modelcontextprotocol/server-github"]
360
- }
361
- }
374
+ args: ["@modelcontextprotocol/server-github"],
375
+ },
376
+ },
362
377
  });
363
378
 
364
379
  // Tools are automatically available through the runtime
365
380
  const runtime = useChatRuntime({
366
381
  api: "/api/chat",
367
- tools: await mcpClient.getTools()
382
+ tools: await mcpClient.getTools(),
368
383
  });
369
384
  ```
370
385
 
@@ -381,33 +396,33 @@ const travelPlannerTool = tool({
381
396
  destination: z.string(),
382
397
  dates: z.object({
383
398
  start: z.string(),
384
- end: z.string()
385
- })
399
+ end: z.string(),
400
+ }),
386
401
  }),
387
402
  execute: async ({ destination, dates }) => {
388
403
  // Execute multiple operations
389
404
  const weather = await getWeatherAPI(destination);
390
- const hotels = await searchHotelsAPI({
405
+ const hotels = await searchHotelsAPI({
391
406
  location: destination,
392
- dates
407
+ dates,
393
408
  });
394
409
  const activities = await findActivitiesAPI({
395
410
  location: destination,
396
- weather: weather.forecast
411
+ weather: weather.forecast,
397
412
  });
398
-
413
+
399
414
  return {
400
415
  weather,
401
416
  hotels,
402
417
  activities,
403
- itinerary: generateItinerary({ weather, hotels, activities })
418
+ itinerary: generateItinerary({ weather, hotels, activities }),
404
419
  };
405
- }
420
+ },
406
421
  });
407
422
 
408
423
  const TravelPlannerTool = makeAssistantTool({
409
424
  ...travelPlannerTool,
410
- toolName: "planTrip"
425
+ toolName: "planTrip",
411
426
  });
412
427
  ```
413
428
 
@@ -419,20 +434,20 @@ Tools that appear based on context:
419
434
  function ConditionalTools() {
420
435
  const { user } = useAuth();
421
436
  const { subscription } = useSubscription();
422
-
437
+
423
438
  // Premium features
424
439
  useAssistantTool({
425
440
  toolName: "advancedAnalysis",
426
441
  description: "Perform advanced data analysis",
427
442
  parameters: z.object({
428
- dataset: z.string()
443
+ dataset: z.string(),
429
444
  }),
430
445
  execute: async (args) => {
431
446
  // Premium analysis logic
432
447
  },
433
- enabled: subscription?.tier === "premium"
448
+ enabled: subscription?.tier === "premium",
434
449
  });
435
-
450
+
436
451
  // Role-based tools
437
452
  useAssistantTool({
438
453
  toolName: "adminPanel",
@@ -441,7 +456,7 @@ function ConditionalTools() {
441
456
  execute: async () => {
442
457
  // Admin actions
443
458
  },
444
- enabled: user?.role === "admin"
459
+ enabled: user?.role === "admin",
445
460
  });
446
461
  }
447
462
  ```
@@ -454,12 +469,12 @@ Robust error handling and recovery:
454
469
  const resilientTool = tool({
455
470
  description: "Fetch data with retry logic",
456
471
  parameters: z.object({
457
- endpoint: z.string()
472
+ endpoint: z.string(),
458
473
  }),
459
474
  execute: async ({ endpoint }, { abortSignal }) => {
460
475
  const maxRetries = 3;
461
476
  let lastError;
462
-
477
+
463
478
  for (let i = 0; i < maxRetries; i++) {
464
479
  try {
465
480
  const response = await fetch(endpoint, { signal: abortSignal });
@@ -468,17 +483,19 @@ const resilientTool = tool({
468
483
  } catch (error) {
469
484
  lastError = error;
470
485
  if (abortSignal.aborted) throw error; // Don't retry on abort
471
- await new Promise(resolve => setTimeout(resolve, 1000 * i));
486
+ await new Promise((resolve) => setTimeout(resolve, 1000 * i));
472
487
  }
473
488
  }
474
-
475
- throw new Error(`Failed after ${maxRetries} attempts: ${lastError.message}`);
476
- }
489
+
490
+ throw new Error(
491
+ `Failed after ${maxRetries} attempts: ${lastError.message}`,
492
+ );
493
+ },
477
494
  });
478
495
 
479
496
  const ResilientTool = makeAssistantTool({
480
497
  ...resilientTool,
481
- toolName: "fetchWithRetries"
498
+ toolName: "fetchWithRetries",
482
499
  });
483
500
  ```
484
501
 
@@ -500,7 +517,7 @@ Tools receive additional context during execution:
500
517
  execute: async (args, context) => {
501
518
  // context.abortSignal - AbortSignal for cancellation
502
519
  // context.toolCallId - Unique identifier for this invocation
503
- }
520
+ };
504
521
  ```
505
522
 
506
523
  ## Runtime Integration