@assistant-ui/mcp-docs-server 0.1.6 → 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/.docs/organized/code-examples/with-ai-sdk-v5.md +15 -13
  2. package/.docs/organized/code-examples/with-cloud.md +19 -25
  3. package/.docs/organized/code-examples/with-external-store.md +9 -7
  4. package/.docs/organized/code-examples/with-ffmpeg.md +21 -21
  5. package/.docs/organized/code-examples/with-langgraph.md +72 -46
  6. package/.docs/organized/code-examples/with-parent-id-grouping.md +9 -7
  7. package/.docs/organized/code-examples/with-react-hook-form.md +19 -21
  8. package/.docs/raw/docs/api-reference/integrations/react-data-stream.mdx +194 -0
  9. package/.docs/raw/docs/api-reference/overview.mdx +7 -4
  10. package/.docs/raw/docs/api-reference/primitives/Composer.mdx +31 -0
  11. package/.docs/raw/docs/api-reference/primitives/Message.mdx +108 -3
  12. package/.docs/raw/docs/api-reference/primitives/Thread.mdx +59 -0
  13. package/.docs/raw/docs/api-reference/primitives/ThreadList.mdx +128 -0
  14. package/.docs/raw/docs/api-reference/primitives/ThreadListItem.mdx +160 -0
  15. package/.docs/raw/docs/api-reference/runtimes/AssistantRuntime.mdx +0 -11
  16. package/.docs/raw/docs/api-reference/runtimes/ComposerRuntime.mdx +3 -3
  17. package/.docs/raw/docs/copilots/assistant-frame.mdx +397 -0
  18. package/.docs/raw/docs/getting-started.mdx +53 -52
  19. package/.docs/raw/docs/guides/Attachments.mdx +7 -115
  20. package/.docs/raw/docs/guides/ToolUI.mdx +3 -3
  21. package/.docs/raw/docs/guides/Tools.mdx +152 -92
  22. package/.docs/raw/docs/guides/context-api.mdx +574 -0
  23. package/.docs/raw/docs/migrations/v0-12.mdx +125 -0
  24. package/.docs/raw/docs/runtimes/ai-sdk/use-chat.mdx +134 -55
  25. package/.docs/raw/docs/runtimes/ai-sdk/v4-legacy.mdx +182 -0
  26. package/.docs/raw/docs/runtimes/custom/local.mdx +16 -3
  27. package/.docs/raw/docs/runtimes/data-stream.mdx +287 -0
  28. package/.docs/raw/docs/runtimes/langgraph/index.mdx +0 -1
  29. package/.docs/raw/docs/runtimes/langserve.mdx +9 -11
  30. package/.docs/raw/docs/runtimes/pick-a-runtime.mdx +5 -0
  31. package/.docs/raw/docs/ui/ThreadList.mdx +54 -16
  32. package/dist/{chunk-L4K23SWI.js → chunk-NVNFQ5ZO.js} +4 -1
  33. package/dist/index.js +1 -1
  34. package/dist/prepare-docs/prepare.js +1 -1
  35. package/dist/stdio.js +1 -1
  36. package/package.json +7 -7
  37. package/.docs/organized/code-examples/local-ollama.md +0 -1135
  38. package/.docs/organized/code-examples/search-agent-for-e-commerce.md +0 -1721
  39. package/.docs/organized/code-examples/with-ai-sdk.md +0 -1082
  40. package/.docs/organized/code-examples/with-openai-assistants.md +0 -1175
  41. package/.docs/raw/docs/concepts/architecture.mdx +0 -19
  42. package/.docs/raw/docs/concepts/runtime-layer.mdx +0 -163
  43. package/.docs/raw/docs/concepts/why.mdx +0 -9
  44. package/.docs/raw/docs/runtimes/ai-sdk/rsc.mdx +0 -226
  45. package/.docs/raw/docs/runtimes/ai-sdk/use-assistant-hook.mdx +0 -195
  46. package/.docs/raw/docs/runtimes/ai-sdk/use-chat-hook.mdx +0 -138
  47. package/.docs/raw/docs/runtimes/ai-sdk/use-chat-v5.mdx +0 -212
@@ -22,7 +22,10 @@ import { Card, Cards } from "fumadocs-ui/components/card";
22
22
  # Create a new project with the default template
23
23
  npx assistant-ui@latest create
24
24
 
25
- # Or start with a template:
25
+ # Or choose one of the following templates:
26
+ # Assistant Cloud for baked in persistence and thread management
27
+ npx assistant-ui@latest create -t cloud
28
+
26
29
  # LangGraph
27
30
  npx assistant-ui@latest create -t langgraph
28
31
 
@@ -686,11 +689,11 @@ import { ComponentPropsWithoutRef, forwardRef } from "react";
686
689
  import {
687
690
  Tooltip,
688
691
  TooltipContent,
689
- TooltipProvider,
690
692
  TooltipTrigger,
691
693
  } from "@/components/ui/tooltip";
692
694
  import { Button } from "@/components/ui/button";
693
695
  import { cn } from "@/lib/utils";
696
+ import { Slottable } from "@radix-ui/react-slot";
694
697
 
695
698
  export type TooltipIconButtonProps = ComponentPropsWithoutRef<typeof Button> & {
696
699
  tooltip: string;
@@ -702,23 +705,21 @@ export const TooltipIconButton = forwardRef<
702
705
  TooltipIconButtonProps
703
706
  >(({ children, tooltip, side = "bottom", className, ...rest }, ref) => {
704
707
  return (
705
- <TooltipProvider>
706
- <Tooltip>
707
- <TooltipTrigger asChild>
708
- <Button
709
- variant="ghost"
710
- size="icon"
711
- {...rest}
712
- className={cn("", className)}
713
- ref={ref}
714
- >
715
- {children}
716
- <span className="aui-sr-only">{tooltip}</span>
717
- </Button>
718
- </TooltipTrigger>
719
- <TooltipContent side={side}>{tooltip}</TooltipContent>
720
- </Tooltip>
721
- </TooltipProvider>
708
+ <Tooltip>
709
+ <TooltipTrigger asChild>
710
+ <Button
711
+ variant="ghost"
712
+ size="icon"
713
+ {...rest}
714
+ className={cn("aui-button-icon", className)}
715
+ ref={ref}
716
+ >
717
+ <Slottable>{children}</Slottable>
718
+ <span className="aui-sr-only">{tooltip}</span>
719
+ </Button>
720
+ </TooltipTrigger>
721
+ <TooltipContent side={side}>{tooltip}</TooltipContent>
722
+ </Tooltip>
722
723
  );
723
724
  });
724
725
 
@@ -810,7 +811,7 @@ Add an API endpoint:
810
811
  <Tabs id="provider" items={["OpenAI", "Anthropic", "Azure", "AWS", "Gemini", "GCP", "Groq", "Fireworks", "Cohere", "Ollama", "Chrome AI"]}>
811
812
  ```ts title="/app/api/chat/route.ts" tab="OpenAI"
812
813
  import { openai } from "@ai-sdk/openai";
813
- import { streamText } from "ai";
814
+ import { convertToModelMessages, streamText } from "ai";
814
815
 
815
816
  export const maxDuration = 30;
816
817
 
@@ -818,15 +819,15 @@ export async function POST(req: Request) {
818
819
  const { messages } = await req.json();
819
820
  const result = streamText({
820
821
  model: openai("gpt-4o-mini"),
821
- messages,
822
+ messages: convertToModelMessages(messages),
822
823
  });
823
- return result.toDataStreamResponse();
824
+ return result.toUIMessageStreamResponse();
824
825
  }
825
826
  ```
826
827
 
827
828
  ```ts title="/app/api/chat/route.ts" tab="Anthropic"
828
829
  import { anthropic } from "@ai-sdk/anthropic";
829
- import { streamText } from "ai";
830
+ import { convertToModelMessages, streamText } from "ai";
830
831
 
831
832
  export const maxDuration = 30;
832
833
 
@@ -834,15 +835,15 @@ export async function POST(req: Request) {
834
835
  const { messages } = await req.json();
835
836
  const result = streamText({
836
837
  model: anthropic("claude-3-5-sonnet-20240620"),
837
- messages,
838
+ messages: convertToModelMessages(messages),
838
839
  });
839
- return result.toDataStreamResponse();
840
+ return result.toUIMessageStreamResponse();
840
841
  }
841
842
  ```
842
843
 
843
844
  ```ts title="/app/api/chat/route.ts" tab="Azure"
844
845
  import { azure } from "@ai-sdk/azure";
845
- import { streamText } from "ai";
846
+ import { convertToModelMessages, streamText } from "ai";
846
847
 
847
848
  export const maxDuration = 30;
848
849
 
@@ -850,15 +851,15 @@ export async function POST(req: Request) {
850
851
  const { messages } = await req.json();
851
852
  const result = streamText({
852
853
  model: azure("your-deployment-name"),
853
- messages,
854
+ messages: convertToModelMessages(messages),
854
855
  });
855
- return result.toDataStreamResponse();
856
+ return result.toUIMessageStreamResponse();
856
857
  }
857
858
  ```
858
859
 
859
860
  ```ts title="/app/api/chat/route.ts" tab="AWS"
860
861
  import { bedrock } from "@ai-sdk/amazon-bedrock";
861
- import { streamText } from "ai";
862
+ import { convertToModelMessages, streamText } from "ai";
862
863
 
863
864
  export const maxDuration = 30;
864
865
 
@@ -866,15 +867,15 @@ export async function POST(req: Request) {
866
867
  const { messages } = await req.json();
867
868
  const result = streamText({
868
869
  model: bedrock("anthropic.claude-3-5-sonnet-20240620-v1:0"),
869
- messages,
870
+ messages: convertToModelMessages(messages),
870
871
  });
871
- return result.toDataStreamResponse();
872
+ return result.toUIMessageStreamResponse();
872
873
  }
873
874
  ```
874
875
 
875
876
  ```ts title="/app/api/chat/route.ts" tab="Gemini"
876
877
  import { google } from "@ai-sdk/google";
877
- import { streamText } from "ai";
878
+ import { convertToModelMessages, streamText } from "ai";
878
879
 
879
880
  export const maxDuration = 30;
880
881
 
@@ -882,15 +883,15 @@ export async function POST(req: Request) {
882
883
  const { messages } = await req.json();
883
884
  const result = streamText({
884
885
  model: google("gemini-2.0-flash"),
885
- messages,
886
+ messages: convertToModelMessages(messages),
886
887
  });
887
- return result.toDataStreamResponse();
888
+ return result.toUIMessageStreamResponse();
888
889
  }
889
890
  ```
890
891
 
891
892
  ```ts title="/app/api/chat/route.ts" tab="GCP"
892
893
  import { vertex } from "@ai-sdk/google-vertex";
893
- import { streamText } from "ai";
894
+ import { convertToModelMessages, streamText } from "ai";
894
895
 
895
896
  export const maxDuration = 30;
896
897
 
@@ -898,15 +899,15 @@ export async function POST(req: Request) {
898
899
  const { messages } = await req.json();
899
900
  const result = streamText({
900
901
  model: vertex("gemini-1.5-pro"),
901
- messages,
902
+ messages: convertToModelMessages(messages),
902
903
  });
903
- return result.toDataStreamResponse();
904
+ return result.toUIMessageStreamResponse();
904
905
  }
905
906
  ```
906
907
 
907
908
  ```ts title="/app/api/chat/route.ts" tab="Groq"
908
909
  import { createOpenAI } from "@ai-sdk/openai";
909
- import { streamText } from "ai";
910
+ import { convertToModelMessages, streamText } from "ai";
910
911
 
911
912
  export const maxDuration = 30;
912
913
 
@@ -919,15 +920,15 @@ export async function POST(req: Request) {
919
920
  const { messages } = await req.json();
920
921
  const result = streamText({
921
922
  model: groq("llama3-70b-8192"),
922
- messages,
923
+ messages: convertToModelMessages(messages),
923
924
  });
924
- return result.toDataStreamResponse();
925
+ return result.toUIMessageStreamResponse();
925
926
  }
926
927
  ```
927
928
 
928
929
  ```ts title="/app/api/chat/route.ts" tab="Fireworks"
929
930
  import { createOpenAI } from "@ai-sdk/openai";
930
- import { streamText } from "ai";
931
+ import { convertToModelMessages, streamText } from "ai";
931
932
 
932
933
  export const maxDuration = 30;
933
934
 
@@ -940,15 +941,15 @@ export async function POST(req: Request) {
940
941
  const { messages } = await req.json();
941
942
  const result = streamText({
942
943
  model: fireworks("accounts/fireworks/models/firefunction-v2"),
943
- messages,
944
+ messages: convertToModelMessages(messages),
944
945
  });
945
- return result.toDataStreamResponse();
946
+ return result.toUIMessageStreamResponse();
946
947
  }
947
948
  ```
948
949
 
949
950
  ```ts title="/app/api/chat/route.ts" tab="Cohere"
950
951
  import { cohere } from "@ai-sdk/cohere";
951
- import { streamText } from "ai";
952
+ import { convertToModelMessages, streamText } from "ai";
952
953
 
953
954
  export const maxDuration = 30;
954
955
 
@@ -956,15 +957,15 @@ export async function POST(req: Request) {
956
957
  const { messages } = await req.json();
957
958
  const result = streamText({
958
959
  model: cohere("command-r-plus"),
959
- messages,
960
+ messages: convertToModelMessages(messages),
960
961
  });
961
- return result.toDataStreamResponse();
962
+ return result.toUIMessageStreamResponse();
962
963
  }
963
964
  ```
964
965
 
965
966
  ```ts title="/app/api/chat/route.ts" tab="Ollama"
966
967
  import { ollama } from "ollama-ai-provider";
967
- import { streamText } from "ai";
968
+ import { convertToModelMessages, streamText } from "ai";
968
969
 
969
970
  export const maxDuration = 30;
970
971
 
@@ -972,15 +973,15 @@ export async function POST(req: Request) {
972
973
  const { messages } = await req.json();
973
974
  const result = streamText({
974
975
  model: ollama("llama3"),
975
- messages,
976
+ messages: convertToModelMessages(messages),
976
977
  });
977
- return result.toDataStreamResponse();
978
+ return result.toUIMessageStreamResponse();
978
979
  }
979
980
  ```
980
981
 
981
982
  ```ts title="/app/api/chat/route.ts" tab="Chrome AI"
982
983
  import { chromeai } from "chrome-ai";
983
- import { streamText } from "ai";
984
+ import { convertToModelMessages, streamText } from "ai";
984
985
 
985
986
  export const maxDuration = 30;
986
987
 
@@ -988,9 +989,9 @@ export async function POST(req: Request) {
988
989
  const { messages } = await req.json();
989
990
  const result = streamText({
990
991
  model: chromeai(),
991
- messages,
992
+ messages: convertToModelMessages(messages),
992
993
  });
993
- return result.toDataStreamResponse();
994
+ return result.toUIMessageStreamResponse();
994
995
  }
995
996
  ```
996
997
 
@@ -41,29 +41,22 @@ This adds `/components/assistant-ui/attachment.tsx` to your project.
41
41
  </Step>
42
42
  <Step>
43
43
 
44
- ### Configure Attachment Adapter
44
+ ### Set up Runtime (No Configuration Required)
45
45
 
46
- Set up an attachment adapter in your runtime provider:
46
+ For `useChatRuntime`, attachments work automatically without additional configuration:
47
47
 
48
48
  ```tsx title="/app/MyRuntimeProvider.tsx"
49
49
  import { useChatRuntime } from "@assistant-ui/react-ai-sdk";
50
- import {
51
- CompositeAttachmentAdapter,
52
- SimpleImageAttachmentAdapter,
53
- SimpleTextAttachmentAdapter,
54
- } from "@assistant-ui/react";
55
50
 
56
51
  const runtime = useChatRuntime({
57
52
  api: "/api/chat",
58
- adapters: {
59
- attachments: new CompositeAttachmentAdapter([
60
- new SimpleImageAttachmentAdapter(),
61
- new SimpleTextAttachmentAdapter(),
62
- ]),
63
- },
64
53
  });
65
54
  ```
66
55
 
56
+ <Callout type="info">
57
+ **Note:** The AI SDK runtime handles attachments automatically. For other runtimes like `useLocalRuntime`, you may still need to configure attachment adapters as shown in the [Creating Custom Attachment Adapters](#creating-custom-attachment-adapters) section below.
58
+ </Callout>
59
+
67
60
  </Step>
68
61
  <Step>
69
62
 
@@ -353,36 +346,6 @@ const runtime = useLocalRuntime(MyModelAdapter, {
353
346
  });
354
347
  ```
355
348
 
356
- ### With Vercel AI SDK
357
-
358
- If you're using the Vercel AI SDK, images are handled automatically through experimental attachments:
359
-
360
- ```tsx
361
- // In your API route
362
- import { streamText } from "ai";
363
- import { openai } from "@ai-sdk/openai";
364
-
365
- export async function POST(req: Request) {
366
- const { messages } = await req.json();
367
-
368
- const result = streamText({
369
- model: openai("gpt-4-vision-preview"),
370
- messages: messages.map((msg) => {
371
- if (msg.experimental_attachments?.length) {
372
- // Images are automatically formatted for the model
373
- return {
374
- ...msg,
375
- experimental_attachments: msg.experimental_attachments,
376
- };
377
- }
378
- return msg;
379
- }),
380
- });
381
-
382
- return result.toDataStreamResponse();
383
- }
384
- ```
385
-
386
349
  ## Advanced Features
387
350
 
388
351
  ### Progress Updates
@@ -540,78 +503,7 @@ const handleMultipleFiles = async (files: FileList) => {
540
503
 
541
504
  ### With Vercel AI SDK
542
505
 
543
- Process attachments in your API route:
544
-
545
- ```tsx title="/app/api/chat/route.ts"
546
- import { streamText } from "ai";
547
- import { openai } from "@ai-sdk/openai";
548
-
549
- export async function POST(req: Request) {
550
- const { messages } = await req.json();
551
-
552
- // Process messages with attachments
553
- const processedMessages = messages.map((msg) => {
554
- if (msg.role === "user" && msg.experimental_attachments) {
555
- // Handle attachments
556
- const attachmentContent = msg.experimental_attachments
557
- .map((att) => {
558
- if (att.contentType.startsWith("image/")) {
559
- return `[Image: ${att.name}]`;
560
- }
561
- return att.content;
562
- })
563
- .join("\n");
564
-
565
- return {
566
- ...msg,
567
- content: `${msg.content}\n\nAttachments:\n${attachmentContent}`,
568
- };
569
- }
570
- return msg;
571
- });
572
-
573
- const result = streamText({
574
- model: openai("gpt-4o"),
575
- messages: processedMessages,
576
- });
577
-
578
- return result.toDataStreamResponse();
579
- }
580
- ```
581
-
582
- ### Custom Backend Handling
583
-
584
- Implement your own attachment processing:
585
-
586
- ```tsx
587
- // In your attachment adapter
588
- class ServerUploadAdapter implements AttachmentAdapter {
589
- async send(attachment: PendingAttachment): Promise<CompleteAttachment> {
590
- const formData = new FormData();
591
- formData.append("file", attachment.file);
592
-
593
- const response = await fetch("/api/upload", {
594
- method: "POST",
595
- body: formData,
596
- });
597
-
598
- const { url, id } = await response.json();
599
-
600
- return {
601
- id,
602
- type: attachment.type,
603
- name: attachment.name,
604
- content: [
605
- {
606
- type: "image",
607
- url,
608
- },
609
- ],
610
- status: { type: "complete" },
611
- };
612
- }
613
- }
614
- ```
506
+ Attachments are sent to the backend as file content parts.
615
507
 
616
508
  ## Runtime Support
617
509
 
@@ -201,11 +201,11 @@ export async function POST(req: Request) {
201
201
 
202
202
  const result = streamText({
203
203
  model: openai("gpt-4o"),
204
- messages,
204
+ messages: convertToModelMessages(messages),
205
205
  tools: {
206
206
  getWeather: tool({
207
207
  description: "Get current weather for a location",
208
- parameters: z.object({
208
+ inputSchema: z.object({
209
209
  location: z.string(),
210
210
  unit: z.enum(["celsius", "fahrenheit"]),
211
211
  }),
@@ -222,7 +222,7 @@ export async function POST(req: Request) {
222
222
  },
223
223
  });
224
224
 
225
- return result.toDataStreamResponse();
225
+ return result.toUIMessageStreamResponse();
226
226
  }
227
227
  ```
228
228