@assistant-ui/mcp-docs-server 0.1.6 → 0.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. package/.docs/organized/code-examples/with-ai-sdk-v5.md +12 -12
  2. package/.docs/organized/code-examples/with-cloud.md +16 -24
  3. package/.docs/organized/code-examples/with-external-store.md +6 -6
  4. package/.docs/organized/code-examples/with-ffmpeg.md +18 -20
  5. package/.docs/organized/code-examples/with-langgraph.md +6 -8
  6. package/.docs/organized/code-examples/with-parent-id-grouping.md +6 -6
  7. package/.docs/organized/code-examples/with-react-hook-form.md +16 -20
  8. package/.docs/raw/docs/api-reference/overview.mdx +1 -4
  9. package/.docs/raw/docs/getting-started.mdx +33 -33
  10. package/.docs/raw/docs/guides/Attachments.mdx +1 -102
  11. package/.docs/raw/docs/guides/ToolUI.mdx +3 -3
  12. package/.docs/raw/docs/guides/Tools.mdx +101 -84
  13. package/.docs/raw/docs/runtimes/ai-sdk/use-chat.mdx +134 -55
  14. package/.docs/raw/docs/runtimes/ai-sdk/v4-legacy.mdx +182 -0
  15. package/.docs/raw/docs/runtimes/langgraph/index.mdx +0 -1
  16. package/.docs/raw/docs/runtimes/langserve.mdx +9 -11
  17. package/package.json +6 -6
  18. package/.docs/organized/code-examples/local-ollama.md +0 -1135
  19. package/.docs/organized/code-examples/search-agent-for-e-commerce.md +0 -1721
  20. package/.docs/organized/code-examples/with-ai-sdk.md +0 -1082
  21. package/.docs/organized/code-examples/with-openai-assistants.md +0 -1175
  22. package/.docs/raw/docs/runtimes/ai-sdk/rsc.mdx +0 -226
  23. package/.docs/raw/docs/runtimes/ai-sdk/use-assistant-hook.mdx +0 -195
  24. package/.docs/raw/docs/runtimes/ai-sdk/use-chat-hook.mdx +0 -138
  25. package/.docs/raw/docs/runtimes/ai-sdk/use-chat-v5.mdx +0 -212
@@ -810,7 +810,7 @@ Add an API endpoint:
810
810
  <Tabs id="provider" items={["OpenAI", "Anthropic", "Azure", "AWS", "Gemini", "GCP", "Groq", "Fireworks", "Cohere", "Ollama", "Chrome AI"]}>
811
811
  ```ts title="/app/api/chat/route.ts" tab="OpenAI"
812
812
  import { openai } from "@ai-sdk/openai";
813
- import { streamText } from "ai";
813
+ import { convertToModelMessages, streamText } from "ai";
814
814
 
815
815
  export const maxDuration = 30;
816
816
 
@@ -818,15 +818,15 @@ export async function POST(req: Request) {
818
818
  const { messages } = await req.json();
819
819
  const result = streamText({
820
820
  model: openai("gpt-4o-mini"),
821
- messages,
821
+ messages: convertToModelMessages(messages),
822
822
  });
823
- return result.toDataStreamResponse();
823
+ return result.toUIMessageStreamResponse();
824
824
  }
825
825
  ```
826
826
 
827
827
  ```ts title="/app/api/chat/route.ts" tab="Anthropic"
828
828
  import { anthropic } from "@ai-sdk/anthropic";
829
- import { streamText } from "ai";
829
+ import { convertToModelMessages, streamText } from "ai";
830
830
 
831
831
  export const maxDuration = 30;
832
832
 
@@ -834,15 +834,15 @@ export async function POST(req: Request) {
834
834
  const { messages } = await req.json();
835
835
  const result = streamText({
836
836
  model: anthropic("claude-3-5-sonnet-20240620"),
837
- messages,
837
+ messages: convertToModelMessages(messages),
838
838
  });
839
- return result.toDataStreamResponse();
839
+ return result.toUIMessageStreamResponse();
840
840
  }
841
841
  ```
842
842
 
843
843
  ```ts title="/app/api/chat/route.ts" tab="Azure"
844
844
  import { azure } from "@ai-sdk/azure";
845
- import { streamText } from "ai";
845
+ import { convertToModelMessages, streamText } from "ai";
846
846
 
847
847
  export const maxDuration = 30;
848
848
 
@@ -850,15 +850,15 @@ export async function POST(req: Request) {
850
850
  const { messages } = await req.json();
851
851
  const result = streamText({
852
852
  model: azure("your-deployment-name"),
853
- messages,
853
+ messages: convertToModelMessages(messages),
854
854
  });
855
- return result.toDataStreamResponse();
855
+ return result.toUIMessageStreamResponse();
856
856
  }
857
857
  ```
858
858
 
859
859
  ```ts title="/app/api/chat/route.ts" tab="AWS"
860
860
  import { bedrock } from "@ai-sdk/amazon-bedrock";
861
- import { streamText } from "ai";
861
+ import { convertToModelMessages, streamText } from "ai";
862
862
 
863
863
  export const maxDuration = 30;
864
864
 
@@ -866,15 +866,15 @@ export async function POST(req: Request) {
866
866
  const { messages } = await req.json();
867
867
  const result = streamText({
868
868
  model: bedrock("anthropic.claude-3-5-sonnet-20240620-v1:0"),
869
- messages,
869
+ messages: convertToModelMessages(messages),
870
870
  });
871
- return result.toDataStreamResponse();
871
+ return result.toUIMessageStreamResponse();
872
872
  }
873
873
  ```
874
874
 
875
875
  ```ts title="/app/api/chat/route.ts" tab="Gemini"
876
876
  import { google } from "@ai-sdk/google";
877
- import { streamText } from "ai";
877
+ import { convertToModelMessages, streamText } from "ai";
878
878
 
879
879
  export const maxDuration = 30;
880
880
 
@@ -882,15 +882,15 @@ export async function POST(req: Request) {
882
882
  const { messages } = await req.json();
883
883
  const result = streamText({
884
884
  model: google("gemini-2.0-flash"),
885
- messages,
885
+ messages: convertToModelMessages(messages),
886
886
  });
887
- return result.toDataStreamResponse();
887
+ return result.toUIMessageStreamResponse();
888
888
  }
889
889
  ```
890
890
 
891
891
  ```ts title="/app/api/chat/route.ts" tab="GCP"
892
892
  import { vertex } from "@ai-sdk/google-vertex";
893
- import { streamText } from "ai";
893
+ import { convertToModelMessages, streamText } from "ai";
894
894
 
895
895
  export const maxDuration = 30;
896
896
 
@@ -898,15 +898,15 @@ export async function POST(req: Request) {
898
898
  const { messages } = await req.json();
899
899
  const result = streamText({
900
900
  model: vertex("gemini-1.5-pro"),
901
- messages,
901
+ messages: convertToModelMessages(messages),
902
902
  });
903
- return result.toDataStreamResponse();
903
+ return result.toUIMessageStreamResponse();
904
904
  }
905
905
  ```
906
906
 
907
907
  ```ts title="/app/api/chat/route.ts" tab="Groq"
908
908
  import { createOpenAI } from "@ai-sdk/openai";
909
- import { streamText } from "ai";
909
+ import { convertToModelMessages, streamText } from "ai";
910
910
 
911
911
  export const maxDuration = 30;
912
912
 
@@ -919,15 +919,15 @@ export async function POST(req: Request) {
919
919
  const { messages } = await req.json();
920
920
  const result = streamText({
921
921
  model: groq("llama3-70b-8192"),
922
- messages,
922
+ messages: convertToModelMessages(messages),
923
923
  });
924
- return result.toDataStreamResponse();
924
+ return result.toUIMessageStreamResponse();
925
925
  }
926
926
  ```
927
927
 
928
928
  ```ts title="/app/api/chat/route.ts" tab="Fireworks"
929
929
  import { createOpenAI } from "@ai-sdk/openai";
930
- import { streamText } from "ai";
930
+ import { convertToModelMessages, streamText } from "ai";
931
931
 
932
932
  export const maxDuration = 30;
933
933
 
@@ -940,15 +940,15 @@ export async function POST(req: Request) {
940
940
  const { messages } = await req.json();
941
941
  const result = streamText({
942
942
  model: fireworks("accounts/fireworks/models/firefunction-v2"),
943
- messages,
943
+ messages: convertToModelMessages(messages),
944
944
  });
945
- return result.toDataStreamResponse();
945
+ return result.toUIMessageStreamResponse();
946
946
  }
947
947
  ```
948
948
 
949
949
  ```ts title="/app/api/chat/route.ts" tab="Cohere"
950
950
  import { cohere } from "@ai-sdk/cohere";
951
- import { streamText } from "ai";
951
+ import { convertToModelMessages, streamText } from "ai";
952
952
 
953
953
  export const maxDuration = 30;
954
954
 
@@ -956,15 +956,15 @@ export async function POST(req: Request) {
956
956
  const { messages } = await req.json();
957
957
  const result = streamText({
958
958
  model: cohere("command-r-plus"),
959
- messages,
959
+ messages: convertToModelMessages(messages),
960
960
  });
961
- return result.toDataStreamResponse();
961
+ return result.toUIMessageStreamResponse();
962
962
  }
963
963
  ```
964
964
 
965
965
  ```ts title="/app/api/chat/route.ts" tab="Ollama"
966
966
  import { ollama } from "ollama-ai-provider";
967
- import { streamText } from "ai";
967
+ import { convertToModelMessages, streamText } from "ai";
968
968
 
969
969
  export const maxDuration = 30;
970
970
 
@@ -972,15 +972,15 @@ export async function POST(req: Request) {
972
972
  const { messages } = await req.json();
973
973
  const result = streamText({
974
974
  model: ollama("llama3"),
975
- messages,
975
+ messages: convertToModelMessages(messages),
976
976
  });
977
- return result.toDataStreamResponse();
977
+ return result.toUIMessageStreamResponse();
978
978
  }
979
979
  ```
980
980
 
981
981
  ```ts title="/app/api/chat/route.ts" tab="Chrome AI"
982
982
  import { chromeai } from "chrome-ai";
983
- import { streamText } from "ai";
983
+ import { convertToModelMessages, streamText } from "ai";
984
984
 
985
985
  export const maxDuration = 30;
986
986
 
@@ -988,9 +988,9 @@ export async function POST(req: Request) {
988
988
  const { messages } = await req.json();
989
989
  const result = streamText({
990
990
  model: chromeai(),
991
- messages,
991
+ messages: convertToModelMessages(messages),
992
992
  });
993
- return result.toDataStreamResponse();
993
+ return result.toUIMessageStreamResponse();
994
994
  }
995
995
  ```
996
996
 
@@ -353,36 +353,6 @@ const runtime = useLocalRuntime(MyModelAdapter, {
353
353
  });
354
354
  ```
355
355
 
356
- ### With Vercel AI SDK
357
-
358
- If you're using the Vercel AI SDK, images are handled automatically through experimental attachments:
359
-
360
- ```tsx
361
- // In your API route
362
- import { streamText } from "ai";
363
- import { openai } from "@ai-sdk/openai";
364
-
365
- export async function POST(req: Request) {
366
- const { messages } = await req.json();
367
-
368
- const result = streamText({
369
- model: openai("gpt-4-vision-preview"),
370
- messages: messages.map((msg) => {
371
- if (msg.experimental_attachments?.length) {
372
- // Images are automatically formatted for the model
373
- return {
374
- ...msg,
375
- experimental_attachments: msg.experimental_attachments,
376
- };
377
- }
378
- return msg;
379
- }),
380
- });
381
-
382
- return result.toDataStreamResponse();
383
- }
384
- ```
385
-
386
356
  ## Advanced Features
387
357
 
388
358
  ### Progress Updates
@@ -540,78 +510,7 @@ const handleMultipleFiles = async (files: FileList) => {
540
510
 
541
511
  ### With Vercel AI SDK
542
512
 
543
- Process attachments in your API route:
544
-
545
- ```tsx title="/app/api/chat/route.ts"
546
- import { streamText } from "ai";
547
- import { openai } from "@ai-sdk/openai";
548
-
549
- export async function POST(req: Request) {
550
- const { messages } = await req.json();
551
-
552
- // Process messages with attachments
553
- const processedMessages = messages.map((msg) => {
554
- if (msg.role === "user" && msg.experimental_attachments) {
555
- // Handle attachments
556
- const attachmentContent = msg.experimental_attachments
557
- .map((att) => {
558
- if (att.contentType.startsWith("image/")) {
559
- return `[Image: ${att.name}]`;
560
- }
561
- return att.content;
562
- })
563
- .join("\n");
564
-
565
- return {
566
- ...msg,
567
- content: `${msg.content}\n\nAttachments:\n${attachmentContent}`,
568
- };
569
- }
570
- return msg;
571
- });
572
-
573
- const result = streamText({
574
- model: openai("gpt-4o"),
575
- messages: processedMessages,
576
- });
577
-
578
- return result.toDataStreamResponse();
579
- }
580
- ```
581
-
582
- ### Custom Backend Handling
583
-
584
- Implement your own attachment processing:
585
-
586
- ```tsx
587
- // In your attachment adapter
588
- class ServerUploadAdapter implements AttachmentAdapter {
589
- async send(attachment: PendingAttachment): Promise<CompleteAttachment> {
590
- const formData = new FormData();
591
- formData.append("file", attachment.file);
592
-
593
- const response = await fetch("/api/upload", {
594
- method: "POST",
595
- body: formData,
596
- });
597
-
598
- const { url, id } = await response.json();
599
-
600
- return {
601
- id,
602
- type: attachment.type,
603
- name: attachment.name,
604
- content: [
605
- {
606
- type: "image",
607
- url,
608
- },
609
- ],
610
- status: { type: "complete" },
611
- };
612
- }
613
- }
614
- ```
513
+ Attachments are sent to the backend as file content parts.
615
514
 
616
515
  ## Runtime Support
617
516
 
@@ -201,11 +201,11 @@ export async function POST(req: Request) {
201
201
 
202
202
  const result = streamText({
203
203
  model: openai("gpt-4o"),
204
- messages,
204
+ messages: convertToModelMessages(messages),
205
205
  tools: {
206
206
  getWeather: tool({
207
207
  description: "Get current weather for a location",
208
- parameters: z.object({
208
+ inputSchema: z.object({
209
209
  location: z.string(),
210
210
  unit: z.enum(["celsius", "fahrenheit"]),
211
211
  }),
@@ -222,7 +222,7 @@ export async function POST(req: Request) {
222
222
  },
223
223
  });
224
224
 
225
- return result.toDataStreamResponse();
225
+ return result.toUIMessageStreamResponse();
226
226
  }
227
227
  ```
228
228